From 938e7b61f443dba1af8f78d0ca75476b51d814ac Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 26 Sep 2024 04:54:11 +0000 Subject: [PATCH] treewide: rename Component arg size -> input_shapes --- ..._Shallice_debugging_Interactive_activation | 10 +- Scripts/Debug/Hebbian_Simon.py | 2 +- ...on_Reward_rate_with_penalty_with_inputs.py | 12 +- Scripts/Debug/Markus Stroop.py | 18 ++- .../Debug/Predator-Prey Sebastian REDUCED.py | 6 +- Scripts/Debug/Predator-Prey Sebastian.py | 6 +- Scripts/Debug/StabilityFlexibility.py | 9 +- Scripts/Debug/Yotam LCA Model LLVM.py | 29 ++-- Scripts/Debug/Yotam LCA Model.py | 26 ++-- Scripts/Debug/bryant_lca_with_termination.py | 6 +- .../laura_test_no_noise_stroop_09_11_2018.py | 20 +-- Scripts/Debug/lca/pytorch_lca.py | 2 +- Scripts/Debug/markus_test_umemoto.py | 3 +- .../predator_prey_opt/predator_prey_dmt.py | 8 +- .../stability_flexibility.py | 24 +-- .../stability_flexibility_nn.py | 26 ++-- Scripts/Debug/stability_flexibility_simple.py | 9 +- .../Basics And Primer/Stroop Model - Basic.py | 12 +- .../Stroop Model - Conflict Monitoring.py | 17 ++- Scripts/Examples/Basics And Primer/XOR Model | 6 +- .../Examples/Botvinick Model Composition.py | 21 ++- .../Examples/Gating-Mechanism. with UDF.py | 2 +- .../Gilbert_Shallice_Composition_Model.py | 21 ++- Scripts/Examples/Lena Rumelhart script.py | 12 +- Scripts/Examples/RL-DDM.py | 2 +- .../Examples/Rumelhart Semantic Network.py | 16 +- Scripts/Examples/StabilityFlexibility.py | 9 +- Scripts/Examples/Stroop Model.py | 14 +- .../Rumelhart Semantic Network (autodiff).py | 12 +- .../Examples/Tutorial/Stroop Model - EVC.py | 14 +- Scripts/Examples/_Gating-Mechanism.py | 2 +- Scripts/Examples/_Leabra-Demo.py | 4 +- Scripts/Examples/_Leabra-Learning-Demo.py | 4 +- .../Examples/_Reinforcement-Learning REV.py | 4 +- .../Adaptive Replay Model.py | 22 +-- .../Bustamante_Stroop_XOR_LVOC_Model.py | 6 +- .../Bustamante_Stroop_XOR_LVOC_Model_VZ.py | 6 +- .../EGO Model - CSW with RNN.py | 10 +- .../EGO Model - CSW with Simple Integrator.py | 10 +- .../EGO Model - Revaluation.py | 16 +- .../EGO Model - MDP.py | 24 +-- .../GreedyAgentInteractiveInputs.py | 10 +- .../GreedyAgentModel.py | 10 +- .../GreedyAgentModel_LLVM_TEST.py | 6 +- .../PanickyAgentModel.py | 8 +- .../Predator-Prey Model DEMO.py | 8 +- .../Predator-Prey Model DQN LVOC.py | 6 +- .../Predator-Prey Model DQN [ORIG].py | 8 +- .../Predator-Prey Model DQN.py | 8 +- .../Predator-Prey Model INPUT LAYER.py | 20 +-- .../Predator-Prey Model I_0 Nested Comp.py | 20 +-- .../Predator-Prey Model.py | 8 +- .../Models (Under Development)/nback/nback.py | 28 ++-- .../nback/nback_og_pnl.py | 26 ++-- docs/source/BasicsAndPrimer.rst | 38 ++--- .../BotvinickConflictMonitoringModel.rst | 14 +- docs/source/Cohen_HustonModel.rst | 12 +- docs/source/NieuwenhuisModel.rst | 6 +- docs/source/PCTC_model.rst | 16 +- docs/source/RefactoredLearningGuide.rst | 8 +- docs/source/UserGuide_TBD.rst | 2 +- psyneulink/core/components/component.py | 142 +++++++++--------- .../nonstateful/learningfunctions.py | 6 +- .../nonstateful/objectivefunctions.py | 30 ++-- .../nonstateful/transferfunctions.py | 18 +-- .../functions/userdefinedfunction.py | 8 +- .../core/components/mechanisms/mechanism.py | 44 +++--- .../modulatory/control/controlmechanism.py | 12 +- .../control/gating/gatingmechanism.py | 12 +- .../modulatory/learning/learningmechanism.py | 4 +- .../modulatory/modulatorymechanism.py | 4 +- .../compositioninterfacemechanism.py | 4 +- .../processing/defaultprocessingmechanism.py | 6 +- .../processing/integratormechanism.py | 14 +- .../processing/objectivemechanism.py | 8 +- .../processing/processingmechanism.py | 12 +- .../processing/transfermechanism.py | 34 ++--- psyneulink/core/components/ports/inputport.py | 34 ++--- .../ports/modulatorysignals/controlsignal.py | 4 +- .../ports/modulatorysignals/gatingsignal.py | 10 +- .../ports/modulatorysignals/learningsignal.py | 4 +- .../modulatorysignals/modulatorysignal.py | 4 +- .../core/components/ports/outputport.py | 4 +- .../core/components/ports/parameterport.py | 8 +- psyneulink/core/components/ports/port.py | 8 +- psyneulink/core/components/shellclasses.py | 4 +- psyneulink/core/compositions/composition.py | 6 +- psyneulink/core/compositions/showgraph.py | 6 +- psyneulink/core/globals/keywords.py | 4 +- psyneulink/core/globals/log.py | 4 +- psyneulink/core/globals/parameters.py | 2 +- .../autoassociativelearningmechanism.py | 4 +- .../learning/kohonenlearningmechanism.py | 4 +- .../mechanisms/processing/integrator/ddm.py | 12 +- .../integrator/episodicmemorymechanism.py | 44 +++--- .../mechanisms/processing/leabramechanism.py | 6 +- .../objective/comparatormechanism.py | 2 +- .../objective/predictionerrormechanism.py | 4 +- .../transfer/contrastivehebbianmechanism.py | 6 +- .../processing/transfer/kohonenmechanism.py | 4 +- .../processing/transfer/kwtamechanism.py | 8 +- .../processing/transfer/lcamechanism.py | 4 +- .../transfer/recurrenttransfermechanism.py | 14 +- .../compositions/autodiffcomposition.py | 4 +- .../library/compositions/emcomposition.py | 24 +-- psyneulink/library/models/Cohen_Huston1994.py | 14 +- .../models/Cohen_Huston1994_horse_race.py | 21 ++- psyneulink/library/models/GilzenratModel.py | 6 +- .../library/models/Kalanthroff_PCTC_2018.py | 16 +- .../library/models/Nieuwenhuis2005Model.py | 6 +- tests/components/test_component.py | 6 +- tests/composition/test_autodiffcomposition.py | 82 +++++----- tests/composition/test_composition.py | 67 +++++---- tests/composition/test_control.py | 33 ++-- tests/composition/test_gating.py | 4 +- tests/composition/test_interfaces.py | 2 +- tests/composition/test_learning.py | 125 ++++++++------- tests/composition/test_models.py | 32 ++-- .../test_parameterestimationcomposition.py | 8 +- tests/composition/test_show_graph.py | 10 +- tests/control/test_gilzenrat.py | 4 +- .../functions/test_accumulator_integrator.py | 4 +- tests/functions/test_combination.py | 2 +- tests/functions/test_memory.py | 12 +- tests/functions/test_user_defined_func.py | 12 +- tests/log/test_log.py | 32 ++-- tests/log/test_rpc.py | 24 +-- tests/mdf/model_varied_matrix_sizes.py | 8 +- tests/mdf/stroop_conflict_monitoring.py | 16 +- tests/mechanisms/test_control_mechanism.py | 17 ++- tests/mechanisms/test_ddm_mechanism.py | 24 +-- tests/mechanisms/test_episodic_memory.py | 10 +- tests/mechanisms/test_gating_mechanism.py | 12 +- tests/mechanisms/test_input_output_labels.py | 12 +- tests/mechanisms/test_input_port_spec.py | 64 ++++---- tests/mechanisms/test_kwta.py | 78 +++++----- tests/mechanisms/test_lca.py | 53 ++++--- tests/mechanisms/test_leabra_mechanism.py | 30 ++-- tests/mechanisms/test_mechanisms.py | 10 +- tests/mechanisms/test_processing_mechanism.py | 4 +- .../test_recurrent_transfer_mechanism.py | 122 +++++++-------- tests/mechanisms/test_transfer_mechanism.py | 86 +++++------ tests/models/test_botvinick.py | 21 ++- tests/models/test_greedy_agent.py | 24 +-- tests/ports/test_input_ports.py | 18 +-- .../test_projection_specifications.py | 24 +-- tests/scheduling/test_scheduler.py | 8 +- 147 files changed, 1300 insertions(+), 1207 deletions(-) diff --git a/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation b/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation index e01144bdcf3..c40a12ff480 100644 --- a/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation +++ b/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation @@ -5,16 +5,16 @@ import psyneulink as pnl ### LAYERS -WORD_INPUT_LAYER = pnl.TransferMechanism(size = 3, +WORD_INPUT_LAYER = pnl.TransferMechanism(input_shapes = 3, function=pnl.Linear, name='WORD INPUT LAYER') -COLOR_INPUT_LAYER = pnl.TransferMechanism(size = 3, +COLOR_INPUT_LAYER = pnl.TransferMechanism(input_shapes = 3, function=pnl.Linear, name='COLOR INPUT LAYER') -WORD_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(size = 3, +WORD_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(input_shapes = 3, auto=0.0, hetero=0.0,#-2.0, function=pnl.Linear(), @@ -25,7 +25,7 @@ WORD_OUTPUT_LAYER.set_log_conditions('InputPort-0') -COLOR_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(size = 3, +COLOR_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(input_shapes = 3, auto=0.0, hetero=0.0,#-2.0, function=pnl.Linear(), @@ -35,7 +35,7 @@ COLOR_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(size = 3, COLOR_OUTPUT_LAYER.set_log_conditions('value') -TASK_DEMAND_LAYER = pnl.RecurrentTransferMechanism(size = 2, +TASK_DEMAND_LAYER = pnl.RecurrentTransferMechanism(input_shapes = 2, auto=0.0, hetero=0.0,#-2.0, function=pnl.Linear(), diff --git a/Scripts/Debug/Hebbian_Simon.py b/Scripts/Debug/Hebbian_Simon.py index f2722741205..6037fa14155 100644 --- a/Scripts/Debug/Hebbian_Simon.py +++ b/Scripts/Debug/Hebbian_Simon.py @@ -14,7 +14,7 @@ Hebb_comp = pnl.Composition() Hebb_mech=pnl.RecurrentTransferMechanism( - size=sizeF, + input_shapes=sizeF, function=pnl.Linear, #integrator_mode = True, #integration_rate = 0.5, diff --git a/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py b/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py index 759f1a1c708..3fba66424af 100644 --- a/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py +++ b/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py @@ -72,18 +72,18 @@ def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1): punish = pnl.TransferMechanism(name='punish') inp_clr = pnl.TransferMechanism( - size=N_UNITS, function=pnl.Linear, name='COLOR INPUT' + input_shapes=N_UNITS, function=pnl.Linear, name='COLOR INPUT' ) inp_wrd = pnl.TransferMechanism( - size=N_UNITS, function=pnl.Linear, name='WORD INPUT' + input_shapes=N_UNITS, function=pnl.Linear, name='WORD INPUT' ) # task layer, represent the task instruction; color naming / word reading inp_task = pnl.TransferMechanism( - size=N_UNITS, function=pnl.Linear, name='TASK' + input_shapes=N_UNITS, function=pnl.Linear, name='TASK' ) # hidden layer for color and word hid_clr = pnl.TransferMechanism( - size=N_UNITS, + input_shapes=N_UNITS, function=hidden_func, integrator_mode=True, integration_rate=integration_rate, @@ -92,7 +92,7 @@ def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1): name='COLORS HIDDEN' ) hid_wrd = pnl.TransferMechanism( - size=N_UNITS, + input_shapes=N_UNITS, function=hidden_func, integrator_mode=True, integration_rate=integration_rate, @@ -102,7 +102,7 @@ def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1): ) # output layer output = pnl.TransferMechanism( - size=N_UNITS, + input_shapes=N_UNITS, function=pnl.Logistic, integrator_mode=True, integration_rate=integration_rate, diff --git a/Scripts/Debug/Markus Stroop.py b/Scripts/Debug/Markus Stroop.py index afc339a101f..1eda026211b 100644 --- a/Scripts/Debug/Markus Stroop.py +++ b/Scripts/Debug/Markus Stroop.py @@ -7,16 +7,19 @@ import psyneulink.core.components.functions.stateful.integratorfunctions import psyneulink.core.components.functions.nonstateful.transferfunctions -colors_input_layer = pnl.TransferMechanism(size=2, +colors_input_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='COLORS_INPUT') -words_input_layer = pnl.TransferMechanism(size=2, +words_input_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='WORDS_INPUT') # Task layer, tasks: ('name the color', 'read the word') -task_layer = pnl.TransferMechanism(size=2, +task_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='TASK') @@ -26,14 +29,16 @@ # randomly distributed noise to the net input # time averaging = integration_rate = 0.1 unit_noise = 0.001 -colors_hidden_layer = pnl.TransferMechanism(size=2, +colors_hidden_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic(gain=1.0, x_0=4.0), #should be able to get same result with offset = -4.0 integrator_mode=True, noise=psyneulink.core.components.functions.nonstateful.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function, integration_rate=0.1, name='COLORS HIDDEN') # words_hidden: ('RED','GREEN') -words_hidden_layer = pnl.TransferMechanism(size=2, +words_hidden_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic(gain=1.0, x_0=4.0), integrator_mode=True, noise=psyneulink.core.components.functions.nonstateful.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function, @@ -43,7 +48,8 @@ # OUTPUT UNITS # Response layer, provide input to accumulator, responses: ('red', 'green') -response_layer = pnl.TransferMechanism(size=2, +response_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic, integrator_mode=True, noise=psyneulink.core.components.functions.nonstateful.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function, diff --git a/Scripts/Debug/Predator-Prey Sebastian REDUCED.py b/Scripts/Debug/Predator-Prey Sebastian REDUCED.py index 15ec7e4928b..54348a7050f 100644 --- a/Scripts/Debug/Predator-Prey Sebastian REDUCED.py +++ b/Scripts/Debug/Predator-Prey Sebastian REDUCED.py @@ -36,9 +36,9 @@ def get_new_episode_flag(): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=2, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=2, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=2, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=2, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=2, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=2, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode trialtype from environment trial_type_input_mech = ProcessingMechanism(name="TRIAL TYPE INPUT") diff --git a/Scripts/Debug/Predator-Prey Sebastian.py b/Scripts/Debug/Predator-Prey Sebastian.py index 491cb9f5a63..711631b4c77 100644 --- a/Scripts/Debug/Predator-Prey Sebastian.py +++ b/Scripts/Debug/Predator-Prey Sebastian.py @@ -105,9 +105,9 @@ def get_optimal_action(observation): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode trialtype from environment trial_type_input_mech = ProcessingMechanism(name="TRIAL TYPE INPUT") diff --git a/Scripts/Debug/StabilityFlexibility.py b/Scripts/Debug/StabilityFlexibility.py index 3bc8d4bdf45..3035eeb921d 100644 --- a/Scripts/Debug/StabilityFlexibility.py +++ b/Scripts/Debug/StabilityFlexibility.py @@ -77,7 +77,7 @@ def computeAccuracy(variable): # first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism(#default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name='Input') @@ -100,7 +100,7 @@ def computeAccuracy(variable): stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size = 2, + input_shapes= 2, function = pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name = "Stimulus Info") @@ -108,7 +108,7 @@ def computeAccuracy(variable): stimulusInfo.set_log_conditions([pnl.RESULT]) controlledElement = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size = 2, + input_shapes= 2, function=pnl.Linear(slope=1, intercept= 0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports = [pnl.RESULT], @@ -116,7 +116,8 @@ def computeAccuracy(variable): controlledElement.set_log_conditions([pnl.RESULT]) -ddmCombination = pnl.TransferMechanism(size = 1, +ddmCombination = pnl.TransferMechanism( + input_shapes= 1, function = pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name = "DDM Integrator") diff --git a/Scripts/Debug/Yotam LCA Model LLVM.py b/Scripts/Debug/Yotam LCA Model LLVM.py index a947ad0509d..bea30c52943 100644 --- a/Scripts/Debug/Yotam LCA Model LLVM.py +++ b/Scripts/Debug/Yotam LCA Model LLVM.py @@ -132,13 +132,15 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs= lr = learning_rate # Instantiate layers and projections - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, name='hidden', + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, name='output', + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) @@ -190,7 +192,8 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs= # Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay) if attach_LCA: - lca = pnl.LCAMechanism(size=D_o, + lca = pnl.LCAMechanism( + input_shapes=D_o, leak=leak, competition=competition, self_excitation=self_excitation, @@ -251,14 +254,16 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, lr = learning_rate # Instantiate layers and projections - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) @@ -323,7 +328,8 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, lca_matrix = get_LCA_matrix(output_dims, num_features, self_excitation, competition) - lca = pnl.RecurrentTransferMechanism(size=D_o, + lca = pnl.RecurrentTransferMechanism( + input_shapes=D_o, matrix=lca_matrix, integrator_mode=True, integrator_function=lci, @@ -339,7 +345,8 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, # Dummy to save mnet results if str(LCA_BIN_EXECUTE).startswith("LLVM"): - dummy = pnl.TransferMechanism(size=D_o, + dummy = pnl.TransferMechanism( + input_shapes=D_o, name="MNET_OUT") wrapper_composition.add_linear_processing_pathway([mnet, dummy]) diff --git a/Scripts/Debug/Yotam LCA Model.py b/Scripts/Debug/Yotam LCA Model.py index 812a49a88d5..d1fec4ab013 100644 --- a/Scripts/Debug/Yotam LCA Model.py +++ b/Scripts/Debug/Yotam LCA Model.py @@ -117,13 +117,15 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs= lr = learning_rate # Instantiate layers and projections - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, name='hidden', + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, name='output', + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) @@ -174,7 +176,8 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs= # Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay) if attach_LCA: - lca = pnl.LCAMechanism(size=D_o, + lca = pnl.LCAMechanism( + input_shapes=D_o, leak=leak, competition=competition, self_excitation=self_excitation, @@ -237,14 +240,16 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, lr = learning_rate # Instantiate layers and projections - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) @@ -304,7 +309,8 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, lca_matrix = get_LCA_matrix(output_dims, num_features, self_excitation, competition) - lca = pnl.RecurrentTransferMechanism(size=D_o, + lca = pnl.RecurrentTransferMechanism( + input_shapes=D_o, matrix=lca_matrix, integrator_mode=True, integrator_function=lci, diff --git a/Scripts/Debug/bryant_lca_with_termination.py b/Scripts/Debug/bryant_lca_with_termination.py index 610e2a76769..0399473ac29 100644 --- a/Scripts/Debug/bryant_lca_with_termination.py +++ b/Scripts/Debug/bryant_lca_with_termination.py @@ -1,19 +1,19 @@ import psyneulink as pnl cueInterval = pnl.TransferMechanism(default_variable=[[0.0]], - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Cue-Stimulus Interval') taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]') activation = pnl.LCAMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Logistic(gain=1), leak=.5, competition=2, diff --git a/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py b/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py index c8c23a59b93..d234874b81e 100644 --- a/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py +++ b/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py @@ -8,16 +8,16 @@ # # INPUT UNITS # # # colors: ('red', 'green'), words: ('RED','GREEN') -# colors_input_layer = pnl.TransferMechanism(size=2, +# colors_input_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Linear, # name='COLORS_INPUT') # -# words_input_layer = pnl.TransferMechanism(size=2, +# words_input_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Linear, # name='WORDS_INPUT') # # # Task layer, tasks: ('name the color', 'read the word') -# task_layer = pnl.TransferMechanism(size=2, +# task_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Linear, # name='TASK') # @@ -28,7 +28,7 @@ # # randomly distributed noise to the net input # # time averaging = integration_rate = 0.1 # unit_noise = 0.005 -# # colors_hidden_layer = pnl.TransferMechanism(size=2, +# # colors_hidden_layer = pnl.TransferMechanism(input_shapes=2, # # function=pnl.Logistic(gain=1.0, bias=4.0), # # # should be able to get same result with offset = -4.0 # # integrator_mode=True, @@ -36,7 +36,7 @@ # # integration_rate=0.1, # # name='COLORS HIDDEN') # -# colors_hidden_layer = pnl.TransferMechanism(size=2, +# colors_hidden_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Logistic(gain=1.0, x_0=4.0), # # should be able to get same result with offset = -4.0 # integrator_mode=True, @@ -44,13 +44,13 @@ # integration_rate=0.1, # name='COLORS HIDDEN') # # words_hidden: ('RED','GREEN') -# # words_hidden_layer = pnl.TransferMechanism(size=2, +# # words_hidden_layer = pnl.TransferMechanism(input_shapes=2, # # function=pnl.Logistic(gain=1.0, bias=4.0), # # integrator_mode=True, # # noise=pnl.NormalDist(mean=0, standard_deviation=unit_noise).function, # # integration_rate=0.1, # # name='WORDS HIDDEN') -# words_hidden_layer = pnl.TransferMechanism(size=2, +# words_hidden_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Logistic(gain=1.0, x_0=4.0), # integrator_mode=True, # noise=0.0, @@ -62,13 +62,13 @@ # # Response layer, provide input to accumulator, responses: ('red', 'green') # # time averaging = tau = 0.1 # # randomly distributed noise to the net input -# # response_layer = pnl.TransferMechanism(size=2, +# # response_layer = pnl.TransferMechanism(input_shapes=2, # # function=pnl.Logistic, # # name='RESPONSE', # # integrator_mode=True, # # noise=pnl.NormalDist(mean=0, standard_deviation=unit_noise).function, # # integration_rate=0.1) -# response_layer = pnl.TransferMechanism(size=2, +# response_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Logistic, # name='RESPONSE', # integrator_mode=True, @@ -295,7 +295,7 @@ # dataframes = [] # first = True # for log_layer in mechanism_list: -# layer_size = log_layer.size[0] +# layer_size = log_layer.input_shapes[0] # log_dict = log_layer.log.nparray_dictionary() # # # Extract out all keys, treating value specially since it's already an np array diff --git a/Scripts/Debug/lca/pytorch_lca.py b/Scripts/Debug/lca/pytorch_lca.py index cb70c6639ad..56e4ebbdbd4 100644 --- a/Scripts/Debug/lca/pytorch_lca.py +++ b/Scripts/Debug/lca/pytorch_lca.py @@ -275,7 +275,7 @@ def make_pnl_lca( lca = pnl.LCAMechanism( default_variable=[[0.0 for _ in range(num_lca_dim)]], - size=num_lca_dim, + input_shapes=num_lca_dim, threshold=threshold, function=activation_function, leak=leak, diff --git a/Scripts/Debug/markus_test_umemoto.py b/Scripts/Debug/markus_test_umemoto.py index 973c7d1b3c6..03fd510c0cf 100644 --- a/Scripts/Debug/markus_test_umemoto.py +++ b/Scripts/Debug/markus_test_umemoto.py @@ -99,7 +99,8 @@ # Decision.loggable_items # Outcome Mechanisms: -Reward = pnl.TransferMechanism(size = 1, +Reward = pnl.TransferMechanism( + input_shapes= 1, name='Reward') # Processes: diff --git a/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py b/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py index 9d862001c2e..9977211a108 100644 --- a/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py +++ b/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py @@ -134,9 +134,11 @@ def get_new_episode_flag(): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms - self.player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") - self.predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") - self.prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") + self.player_percept = ProcessingMechanism( + input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") + self.predator_percept = ProcessingMechanism( + input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") + self.prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode trialtype from environment self.prey_pred_trial_input_mech = ProcessingMechanism(name="PREY PREDATOR TRIAL") diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility.py b/Scripts/Debug/stability_flexibility/stability_flexibility.py index 38420d88380..4d70590d29d 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility.py @@ -118,7 +118,7 @@ def make_stab_flex( # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive # Origin Node taskLayer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Task Input [I1, I2]", @@ -127,7 +127,7 @@ def make_stab_flex( # Stimulus Layer: [Color Stimulus, Motion Stimulus] # Origin Node stimulusInfo = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Input [S1, S2]", @@ -136,7 +136,7 @@ def make_stab_flex( # Cue-To-Stimulus Interval Layer # Origin Node cueInterval = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Cue-Stimulus Interval", @@ -145,7 +145,7 @@ def make_stab_flex( # Correct Response Info # Origin Node correctResponseInfo = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Correct Response Info", @@ -153,7 +153,7 @@ def make_stab_flex( # Control Module Layer: [Color Activation, Motion Activation] controlModule = pnl.LCAMechanism( - size=2, + input_shapes=2, function=pnl.Logistic(gain=GAIN), leak=LEAK, competition=COMP, @@ -174,7 +174,7 @@ def make_stab_flex( # Hadamard product of controlModule and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -183,7 +183,7 @@ def make_stab_flex( # Multiply Stimulus Input by the automaticity weight congruenceWeighting = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=AUTOMATICITY, intercept=0), output_ports=[pnl.RESULT], name="Automaticity-weighted Stimulus Input [w*S1, w*S2]", @@ -191,7 +191,7 @@ def make_stab_flex( # Summation of nonAutomatic and Automatic Components ddmCombination = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.SUM), output_ports=[pnl.RESULT], @@ -200,7 +200,7 @@ def make_stab_flex( # Ensure upper boundary of DDM is always correct response by multiplying DDM input by correctResponseInfo ddmRecodeDrift = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -209,7 +209,7 @@ def make_stab_flex( # Scale DDM inputs ddmInputScale = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=SCALE, intercept=0), output_ports=[pnl.RESULT], name="Scaled DDM Input", @@ -284,10 +284,10 @@ def make_stab_flex( # Hot-fix currently necessary to allow control module and DDM to execute in parallel in compiled mode # We need two gates in order to output both values (decision and response) from the ddm - decisionGate = pnl.ProcessingMechanism(size=1, name="DECISION_GATE") + decisionGate = pnl.ProcessingMechanism(input_shapes=1, name="DECISION_GATE") stabilityFlexibility.add_node(decisionGate) - responseGate = pnl.ProcessingMechanism(size=1, name="RESPONSE_GATE") + responseGate = pnl.ProcessingMechanism(input_shapes=1, name="RESPONSE_GATE") stabilityFlexibility.add_node(responseGate) stabilityFlexibility.add_projection( diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_nn.py b/Scripts/Debug/stability_flexibility/stability_flexibility_nn.py index c0ae1c70f64..f564f6b2bfb 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility_nn.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_nn.py @@ -105,24 +105,24 @@ def make_stab_flex( # Task Input: [Parity, Magnitude] {0, 1} Mutually Exclusive # Origin Node - taskInput = pnl.TransferMechanism(name="Task Input", size=2) # Note default function is linear + taskInput = pnl.TransferMechanism(name="Task Input", input_shapes=2) # Note default function is linear # Stimulus Input: [Odd, Even, Small, Large] {0, 1} # Origin Node - stimulusInput = pnl.TransferMechanism(name="Stimulus Input", size=4) + stimulusInput = pnl.TransferMechanism(name="Stimulus Input", input_shapes=4) # Cue-To-Stimulus Interval Input # Origin Node - cueInterval = pnl.TransferMechanism(name="Cue-Stimulus Interval", size=1) + cueInterval = pnl.TransferMechanism(name="Cue-Stimulus Interval", input_shapes=1) # Correct Response Info {1, -1} # Origin Node - correctResponseInfo = pnl.TransferMechanism(name="Correct Response Info", size=1) + correctResponseInfo = pnl.TransferMechanism(name="Correct Response Info", input_shapes=1) # Control Units: [Parity Activation, Magnitude Activation] controlModule = pnl.LCAMechanism( name="Task Activations [C1, C2]", - size=2, + input_shapes=2, function=pnl.Logistic(gain=GAIN), leak=LEAK, competition=COMP, @@ -143,14 +143,14 @@ def make_stab_flex( # Stimulus Input to Hidden Weighting stimulusWeighting = pnl.TransferMechanism( name="Stimulus Input to Hidden Weighting", - size=4, + input_shapes=4, function=pnl.Linear(slope=STIM_HIDDEN_WT, intercept=0), ) # Hidden Units [Odd, Even, Small, Large] hiddenLayer = pnl.TransferMechanism( name="Hidden Units", - size=4, + input_shapes=4, function=pnl.Logistic(gain=1, bias=-4), input_ports=pnl.InputPort(combine=pnl.SUM) ) @@ -158,14 +158,14 @@ def make_stab_flex( # Hidden to Response Weighting hiddenWeighting = pnl.TransferMechanism( name="Hidden Unit to Response Weighting", - size=4, + input_shapes=4, function=pnl.Linear(slope=HIDDEN_RESP_WT, intercept=0) ) # Response Units [Left, Right] responseLayer = pnl.TransferMechanism( name="Response Units", - size=2, + input_shapes=2, function=pnl.Logistic(gain=1), input_ports=pnl.InputPort(combine=pnl.SUM) ) @@ -173,14 +173,14 @@ def make_stab_flex( # Difference in activation of response units ddmCombination = pnl.TransferMechanism( name="Drift", - size=1, + input_shapes=1, input_ports=pnl.InputPort(combine=pnl.SUM) ) # Ensure upper boundary of DDM is always correct response by multiplying DDM input by correctResponseInfo ddmRecodeDrift = pnl.TransferMechanism( name="Recoded Drift = Drift * correctResponseInfo", - size=1, + input_shapes=1, input_ports=pnl.InputPort(combine=pnl.PRODUCT) ) @@ -270,10 +270,10 @@ def make_stab_flex( # Hot-fix currently necessary to allow control module and DDM to execute in parallel in compiled mode # We need two gates in order to output both values (decision and response) from the ddm - decisionGate = pnl.ProcessingMechanism(size=1, name="DECISION_GATE") + decisionGate = pnl.ProcessingMechanism(input_shapes=1, name="DECISION_GATE") stabilityFlexibility.add_node(decisionGate) - responseGate = pnl.ProcessingMechanism(size=1, name="RESPONSE_GATE") + responseGate = pnl.ProcessingMechanism(input_shapes=1, name="RESPONSE_GATE") stabilityFlexibility.add_node(responseGate) stabilityFlexibility.add_projection( diff --git a/Scripts/Debug/stability_flexibility_simple.py b/Scripts/Debug/stability_flexibility_simple.py index d8a6ed9d30b..ade1f99d491 100644 --- a/Scripts/Debug/stability_flexibility_simple.py +++ b/Scripts/Debug/stability_flexibility_simple.py @@ -71,7 +71,7 @@ def computeAccuracy(variable): # first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism( # default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Input') @@ -93,7 +93,7 @@ def computeAccuracy(variable): activation.set_log_conditions([pnl.RESULT, "mod_gain"]) stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Info") @@ -101,7 +101,7 @@ def computeAccuracy(variable): stimulusInfo.set_log_conditions([pnl.RESULT]) controlledElement = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -109,7 +109,8 @@ def computeAccuracy(variable): controlledElement.set_log_conditions([pnl.RESULT]) -ddmCombination = pnl.TransferMechanism(size=1, +ddmCombination = pnl.TransferMechanism( + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="DDM Integrator") diff --git a/Scripts/Examples/Basics And Primer/Stroop Model - Basic.py b/Scripts/Examples/Basics And Primer/Stroop Model - Basic.py index 234bc6b36d9..948ce72b10e 100644 --- a/Scripts/Examples/Basics And Primer/Stroop Model - Basic.py +++ b/Scripts/Examples/Basics And Primer/Stroop Model - Basic.py @@ -2,22 +2,22 @@ import numpy as np # Construct the color naming pathway: -color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # Note: default function is Linear +color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # Note: default function is Linear color_input_to_hidden_wts = np.array([[1, -1], [-1, 1]]) -color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) +color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[1, -1], [-1, 1]]) -output = ProcessingMechanism(name='OUTPUT', size=2, function=Logistic) +output = ProcessingMechanism(name='OUTPUT', input_shapes=2, function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) -word_input = ProcessingMechanism(name='WORD INPUT', size=2) +word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) -word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) +word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways -task_input = ProcessingMechanism(name='TASK INPUT', size=2) +task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task_color_wts, color_hidden] diff --git a/Scripts/Examples/Basics And Primer/Stroop Model - Conflict Monitoring.py b/Scripts/Examples/Basics And Primer/Stroop Model - Conflict Monitoring.py index 7b24cee5b19..f4e9656118b 100644 --- a/Scripts/Examples/Basics And Primer/Stroop Model - Conflict Monitoring.py +++ b/Scripts/Examples/Basics And Primer/Stroop Model - Conflict Monitoring.py @@ -4,23 +4,23 @@ # CONSTRUCT THE MODEL *********************************** # Construct the color naming pathway: -color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # Note: default function is Linear +color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # Note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) -color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) +color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) -output = ProcessingMechanism(name='OUTPUT', size=2, function=Logistic) +output = ProcessingMechanism(name='OUTPUT', input_shapes=2, function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) -word_input = ProcessingMechanism(name='WORD INPUT', size=2) +word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) -word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) +word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways -task_input = ProcessingMechanism(name='TASK INPUT', size=2) -task = LCAMechanism(name='TASK', size=2, initial_value=[0.5,0.5]) +task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) +task = LCAMechanism(name='TASK', input_shapes=2, initial_value=[0.5, 0.5]) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task, task_color_wts, color_hidden] @@ -33,7 +33,8 @@ # Construct control mechanism control = ControlMechanism(name='CONTROL', objective_mechanism=ObjectiveMechanism(name='Conflict Monitor', - function=Energy(size=2, + function=Energy( + input_shapes=2, matrix=[[0,-2.5],[-2.5,0]]), monitor=output), default_allocation=[0.5], diff --git a/Scripts/Examples/Basics And Primer/XOR Model b/Scripts/Examples/Basics And Primer/XOR Model index f67a654a839..b85e9863bf9 100644 --- a/Scripts/Examples/Basics And Primer/XOR Model +++ b/Scripts/Examples/Basics And Primer/XOR Model @@ -1,9 +1,9 @@ from psyneulink import * import numpy as np -input_mech = TransferMechanism(name='INPUT', size=2) -hidden_mech = TransferMechanism(name='HIDDEN', size=10, function=Logistic) -output_mech = TransferMechanism(name='OUTPUT', size=1, function=Logistic) +input_mech = TransferMechanism(name='INPUT', input_shapes=2) +hidden_mech = TransferMechanism(name='HIDDEN', input_shapes=10, function=Logistic) +output_mech = TransferMechanism(name='OUTPUT', input_shapes=1, function=Logistic) input_to_hidden_projection = MappingProjection(name='INPUT_TO_HIDDEN', matrix=np.random.rand(2,10), sender=input_mech, diff --git a/Scripts/Examples/Botvinick Model Composition.py b/Scripts/Examples/Botvinick Model Composition.py index 0c0c4540f4b..b188e00cee0 100644 --- a/Scripts/Examples/Botvinick Model Composition.py +++ b/Scripts/Examples/Botvinick Model Composition.py @@ -2,20 +2,24 @@ import numpy as np -colors_input_layer = pnl.TransferMechanism(size=3, +colors_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='COLORS_INPUT') -words_input_layer = pnl.TransferMechanism(size=3, +words_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='WORDS_INPUT') -task_input_layer = pnl.TransferMechanism(size=2, +task_input_layer = pnl.TransferMechanism( + input_shapes=2, function=pnl.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') -task_layer = pnl.RecurrentTransferMechanism(size=2, +task_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic(), hetero=-2, integrator_mode=True, @@ -24,14 +28,16 @@ # Hidden layer # colors: ('red','green', 'neutral') words: ('RED','GREEN', 'NEUTRAL') -colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, +colors_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), # bias 4.0 is -4.0 in the paper see Docs for description integrator_mode=True, hetero=-2, integration_rate=0.01, # cohen-huston text says 0.01 name='COLORS_HIDDEN') -words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, +words_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), integrator_mode=True, hetero=-2, @@ -39,7 +45,8 @@ name='WORDS_HIDDEN') # Response layer, responses: ('red', 'green') -response_layer = pnl.RecurrentTransferMechanism(size=2, +response_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic(), hetero=-2.0, integrator_mode=True, diff --git a/Scripts/Examples/Gating-Mechanism. with UDF.py b/Scripts/Examples/Gating-Mechanism. with UDF.py index 91d51d3fc83..a7c22e3a94f 100644 --- a/Scripts/Examples/Gating-Mechanism. with UDF.py +++ b/Scripts/Examples/Gating-Mechanism. with UDF.py @@ -69,7 +69,7 @@ def my_sinusoidal_fct(input, Gating_Mechanism = pnl.GatingMechanism( # default_gating_allocation=0.0, - size=[1], + input_shapes=[1], gating_signals=[ # Output_Layer Output_Layer.output_port, diff --git a/Scripts/Examples/Gilbert_Shallice_Composition_Model.py b/Scripts/Examples/Gilbert_Shallice_Composition_Model.py index 285e758b7f4..babe1d1db90 100644 --- a/Scripts/Examples/Gilbert_Shallice_Composition_Model.py +++ b/Scripts/Examples/Gilbert_Shallice_Composition_Model.py @@ -5,15 +5,18 @@ ### LAYERS -WORD_INPUT_LAYER = pnl.TransferMechanism(size = 3, +WORD_INPUT_LAYER = pnl.TransferMechanism( + input_shapes= 3, function=pnl.Linear, name='WORD INPUT LAYER') -COLOR_INPUT_LAYER = pnl.TransferMechanism(size = 3, +COLOR_INPUT_LAYER = pnl.TransferMechanism( + input_shapes= 3, function=pnl.Linear, name='COLOR INPUT LAYER') -WORD_OUTPUT_LAYER = pnl.IntegratorMechanism(size = 3, +WORD_OUTPUT_LAYER = pnl.IntegratorMechanism( + input_shapes= 3, # auto= 0.0, # hetero= -2.0, function= pnl.InteractiveActivationIntegrator(decay= 0.0015, rest=-6), @@ -21,7 +24,8 @@ WORD_OUTPUT_LAYER.set_log_conditions('value') -COLOR_OUTPUT_LAYER = pnl.IntegratorMechanism(size = 3, +COLOR_OUTPUT_LAYER = pnl.IntegratorMechanism( + input_shapes= 3, # auto= 0.0, # hetero= -2.0, function= pnl.InteractiveActivationIntegrator(decay= 0.0015, rest=-6, ), @@ -31,18 +35,21 @@ COLOR_OUTPUT_LAYER.set_log_conditions('value') -TASK_DEMAND_LAYER = pnl.IntegratorMechanism(size = 2, +TASK_DEMAND_LAYER = pnl.IntegratorMechanism( + input_shapes= 2, # auto= 0.0, # hetero= -2.0, function= pnl.InteractiveActivationIntegrator(decay= 0.0015, max_val=1, min_val= 1, rest= -4), name='TASK DEMAND LAYER') -WORD_RECURRENT_LAYER = pnl.TransferMechanism(size = 3, +WORD_RECURRENT_LAYER = pnl.TransferMechanism( + input_shapes= 3, function=pnl.Linear, name = 'WORD RECURRENT LAYER') -COLOR_RECURRENT_LAYER = pnl.TransferMechanism(size = 3, +COLOR_RECURRENT_LAYER = pnl.TransferMechanism( + input_shapes= 3, function=pnl.Linear, name = 'COLOR RECURRENT LAYER') diff --git a/Scripts/Examples/Lena Rumelhart script.py b/Scripts/Examples/Lena Rumelhart script.py index e7b78640f86..6d837743148 100644 --- a/Scripts/Examples/Lena Rumelhart script.py +++ b/Scripts/Examples/Lena Rumelhart script.py @@ -79,32 +79,32 @@ def gen_input_vals(nouns, relations): ) h1 = pnl.TransferMechanism(name="hidden_nouns", - size=9, + input_shapes=9, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) h2 = pnl.TransferMechanism(name="hidden_mixed", - size=16, + input_shapes=16, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) out_sig_I = pnl.TransferMechanism(name="sig_outs_I", - size=len(nouns), + input_shapes=len(nouns), function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) out_sig_is = pnl.TransferMechanism(name="sig_outs_is", - size=len(is_list), + input_shapes=len(is_list), function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) out_sig_has = pnl.TransferMechanism(name="sig_outs_has", - size=len(has_list), + input_shapes=len(has_list), function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) out_sig_can = pnl.TransferMechanism(name="sig_outs_can", - size=len(can_list), + input_shapes=len(can_list), function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) diff --git a/Scripts/Examples/RL-DDM.py b/Scripts/Examples/RL-DDM.py index 254dfecbe29..2e11215c490 100644 --- a/Scripts/Examples/RL-DDM.py +++ b/Scripts/Examples/RL-DDM.py @@ -11,7 +11,7 @@ import psyneulink.core.components.functions.nonstateful.learningfunctions input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, name='Input Layer' ) diff --git a/Scripts/Examples/Rumelhart Semantic Network.py b/Scripts/Examples/Rumelhart Semantic Network.py index 2fcf81b1c8e..7eab228f8bc 100644 --- a/Scripts/Examples/Rumelhart Semantic Network.py +++ b/Scripts/Examples/Rumelhart Semantic Network.py @@ -19,14 +19,14 @@ # Representation_Input (REP_IN) # Construct Mechanisms -rep_in = TransferMechanism(size=10, name='REP_IN') -rel_in = TransferMechanism(size=11, name='REL_IN') -rep_hidden = TransferMechanism(size=4, function=Logistic, name='REP_HIDDEN') -rel_hidden = TransferMechanism(size=5, function=Logistic, name='REL_HIDDEN') -rep_out = TransferMechanism(size=10, function=Logistic, name='REP_OUT') -prop_out = TransferMechanism(size=12, function=Logistic, name='PROP_OUT') -qual_out = TransferMechanism(size=13, function=Logistic, name='QUAL_OUT') -act_out = TransferMechanism(size=14, function=Logistic, name='ACT_OUT') +rep_in = TransferMechanism(input_shapes=10, name='REP_IN') +rel_in = TransferMechanism(input_shapes=11, name='REL_IN') +rep_hidden = TransferMechanism(input_shapes=4, function=Logistic, name='REP_HIDDEN') +rel_hidden = TransferMechanism(input_shapes=5, function=Logistic, name='REL_HIDDEN') +rep_out = TransferMechanism(input_shapes=10, function=Logistic, name='REP_OUT') +prop_out = TransferMechanism(input_shapes=12, function=Logistic, name='PROP_OUT') +qual_out = TransferMechanism(input_shapes=13, function=Logistic, name='QUAL_OUT') +act_out = TransferMechanism(input_shapes=14, function=Logistic, name='ACT_OUT') # Construct Composition comp = Composition(name='Rumelhart Semantic Network') diff --git a/Scripts/Examples/StabilityFlexibility.py b/Scripts/Examples/StabilityFlexibility.py index 8de929a983f..49a9ed63d50 100644 --- a/Scripts/Examples/StabilityFlexibility.py +++ b/Scripts/Examples/StabilityFlexibility.py @@ -78,7 +78,7 @@ def computeAccuracy(variable): # first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism(#default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name='Input') @@ -101,7 +101,7 @@ def computeAccuracy(variable): stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size = 2, + input_shapes= 2, function = pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name = "Stimulus Info") @@ -109,7 +109,7 @@ def computeAccuracy(variable): stimulusInfo.set_log_conditions([pnl.RESULT]) controlledElement = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size = 2, + input_shapes= 2, function=pnl.Linear(slope=1, intercept= 0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports = [pnl.RESULT], @@ -117,7 +117,8 @@ def computeAccuracy(variable): controlledElement.set_log_conditions([pnl.RESULT]) -ddmCombination = pnl.TransferMechanism(size = 1, +ddmCombination = pnl.TransferMechanism( + input_shapes= 1, function = pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name = "DDM Integrator") diff --git a/Scripts/Examples/Stroop Model.py b/Scripts/Examples/Stroop Model.py index 982fb67cec1..720b38efc0c 100644 --- a/Scripts/Examples/Stroop Model.py +++ b/Scripts/Examples/Stroop Model.py @@ -4,23 +4,23 @@ # CONSTRUCT THE MODEL *********************************** # Construct the color naming pathway: -color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # Note: default function is Linear +color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # Note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) -color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) +color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) -output = ProcessingMechanism(name='OUTPUT', size=2, function=Logistic) +output = ProcessingMechanism(name='OUTPUT', input_shapes=2, function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) -word_input = ProcessingMechanism(name='WORD INPUT', size=2) +word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) -word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) +word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways -task_input = ProcessingMechanism(name='TASK INPUT', size=2) -task = LCAMechanism(name='TASK', size=2, initial_value=[0.5,0.5]) +task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) +task = LCAMechanism(name='TASK', input_shapes=2, initial_value=[0.5, 0.5]) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task, task_color_wts, color_hidden] diff --git a/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py b/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py index 653601b6918..291b97f2ad1 100644 --- a/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py +++ b/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py @@ -96,32 +96,32 @@ def gen_input_vals(nouns, relations): #For the hidden layers, we will be using logistic functions hn = pnl.TransferMechanism(name="hidden_nouns", - size=9, + input_shapes=9, function=pnl.Logistic() ) hm = pnl.TransferMechanism(name="hidden_mixed", - size=n_units, + input_shapes=n_units, function=pnl.Logistic() ) out_sig_I = pnl.TransferMechanism(name="sig_outs_I", - size=len(nouns), + input_shapes=len(nouns), function=pnl.Logistic() ) out_sig_is = pnl.TransferMechanism(name="sig_outs_is", - size=len(is_list), + input_shapes=len(is_list), function=pnl.Logistic() ) out_sig_has = pnl.TransferMechanism(name="sig_outs_has", - size=len(has_list), + input_shapes=len(has_list), function=pnl.Logistic() ) out_sig_can = pnl.TransferMechanism(name="sig_outs_can", - size=len(can_list), + input_shapes=len(can_list), function=pnl.Logistic() ) diff --git a/Scripts/Examples/Tutorial/Stroop Model - EVC.py b/Scripts/Examples/Tutorial/Stroop Model - EVC.py index e4ab6f08344..de6b28b659b 100644 --- a/Scripts/Examples/Tutorial/Stroop Model - EVC.py +++ b/Scripts/Examples/Tutorial/Stroop Model - EVC.py @@ -4,23 +4,23 @@ # CONSTRUCT THE MODEL *********************************** # Construct the color naming pathway: -color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # Note: default function is Linear +color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # Note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) -color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) +color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) -output = ProcessingMechanism(name='OUTPUT', size=2, function=Logistic) +output = ProcessingMechanism(name='OUTPUT', input_shapes=2, function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) -word_input = ProcessingMechanism(name='WORD INPUT', size=2) +word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) -word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) +word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways -task_input = ProcessingMechanism(name='TASK INPUT', size=2) -task = LCAMechanism(name='TASK', size=2, initial_value=[0.5,0.5]) +task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) +task = LCAMechanism(name='TASK', input_shapes=2, initial_value=[0.5, 0.5]) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task, task_color_wts, color_hidden] diff --git a/Scripts/Examples/_Gating-Mechanism.py b/Scripts/Examples/_Gating-Mechanism.py index 3391fb3a5c7..ff6ca5e6c72 100644 --- a/Scripts/Examples/_Gating-Mechanism.py +++ b/Scripts/Examples/_Gating-Mechanism.py @@ -29,7 +29,7 @@ Gating_Mechanism = pnl.GatingMechanism( # default_gating_allocation=0.0, - size=[1], + input_shapes=[1], gating_signals=[ Hidden_Layer_1, Hidden_Layer_2, diff --git a/Scripts/Examples/_Leabra-Demo.py b/Scripts/Examples/_Leabra-Demo.py index 1db82c5bd4f..0bef2cef590 100644 --- a/Scripts/Examples/_Leabra-Demo.py +++ b/Scripts/Examples/_Leabra-Demo.py @@ -62,8 +62,8 @@ ) -T1 = pnl.TransferMechanism(name='T1', size=input_size, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear) -T2 = pnl.TransferMechanism(name='T2', size=output_size, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear) +T1 = pnl.TransferMechanism(name='T1', input_shapes=input_size, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear) +T2 = pnl.TransferMechanism(name='T2', input_shapes=output_size, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear) proj = pnl.MappingProjection(sender=T2, receiver=L.input_ports[1]) comp = pnl.Composition(pathways=[[T1, L], [T2, proj, L]]) diff --git a/Scripts/Examples/_Leabra-Learning-Demo.py b/Scripts/Examples/_Leabra-Learning-Demo.py index 7a8fce2b26c..17ad59d0d2a 100644 --- a/Scripts/Examples/_Leabra-Learning-Demo.py +++ b/Scripts/Examples/_Leabra-Learning-Demo.py @@ -26,8 +26,8 @@ hidden_sizes=None, training_flag=True, quarter_size=20) ### building the PsyNeuLink network -T_input = pnl.TransferMechanism(size=n_input) -T_target = pnl.TransferMechanism(size=n_output) +T_input = pnl.TransferMechanism(input_shapes=n_input) +T_target = pnl.TransferMechanism(input_shapes=n_output) # target_projection connects T_target to the TARGET InputPort of Leab target_projection = pnl.MappingProjection(sender=T_target, receiver = Leab.input_ports[1]) comp = pnl.Composition(pathways=[[T_input, Leab], [T_target, target_projection, Leab]]) diff --git a/Scripts/Examples/_Reinforcement-Learning REV.py b/Scripts/Examples/_Reinforcement-Learning REV.py index f351c63e716..a00f6652145 100644 --- a/Scripts/Examples/_Reinforcement-Learning REV.py +++ b/Scripts/Examples/_Reinforcement-Learning REV.py @@ -4,12 +4,12 @@ import psyneulink.core.components.functions.nonstateful.transferfunctions input_layer = pnl.TransferMechanism( - size=3, + input_shapes=3, name='Input Layer' ) action_selection = pnl.TransferMechanism( - size=3, + input_shapes=3, function=psyneulink.core.components.functions.nonstateful.transferfunctions.SoftMax( output=pnl.ALL, gain=1.0), diff --git a/Scripts/Models (Under Development)/Adaptive Replay Model.py b/Scripts/Models (Under Development)/Adaptive Replay Model.py index 069dbb00d75..e1d6d1a9a27 100644 --- a/Scripts/Models (Under Development)/Adaptive Replay Model.py +++ b/Scripts/Models (Under Development)/Adaptive Replay Model.py @@ -14,35 +14,35 @@ # PERCEPTUAL AND ACTION MECHANISMS # ********************************************************************************************* stim_in = ProcessingMechanism(name='Stimulus', - size=stim_size) + input_shapes=stim_size) context_in = ProcessingMechanism(name='Context', - size=context_size) + input_shapes=context_size) reward_in = ProcessingMechanism(name='Reward', - size=1) + input_shapes=1) perceptual_state = ProcessingMechanism(name='Current Port', function=Concatenate, input_ports=[{NAME:'STIM', - SIZE:stim_size, - PROJECTIONS:stim_in}, + INPUT_SHAPES:stim_size, + PROJECTIONS:stim_in}, {NAME:'CONTEXT', - SIZE:context_size, + INPUT_SHAPES:context_size, PROJECTIONS:context_in}]) # action = ProcessingMechanism(name='Action', -# size=num_actions, +# input_shapes=num_actions, # input_ports={NAME: 'Q values', # PROJECTIONS:perceptual_state}) action = ProcessingMechanism(name='Action', - size=num_actions) + input_shapes=num_actions) # ********************************************************************************************* # RL AGENT NESTED COMPOSITION # ********************************************************************************************* -rl_agent_state = ProcessingMechanism(name='RL Agent Port', size=5) -rl_agent_action = ProcessingMechanism(name='RL Agent Action', size=5) +rl_agent_state = ProcessingMechanism(name='RL Agent Port', input_shapes=5) +rl_agent_action = ProcessingMechanism(name='RL Agent Action', input_shapes=5) rl_agent = Composition(name='RL Agent') rl_learning_components = rl_agent.add_reinforcement_learning_pathway([rl_agent_state, rl_agent_action]) # rl_agent.add_required_node_role(rl_agent_action, NodeRole.OUTPUT) @@ -52,7 +52,7 @@ # MEMORY AND CONTROL MECHANISMS # ********************************************************************************************* # q_rep = ProcessingMechanism(name='Q rep', -# size=num_actions*stim_size, +# input_shapes=num_actions*stim_size, # function=SoftMax(output=PROB, gain=1.0)) # # em = EpisodicMemoryMechanism(name='Episodic Memory', diff --git a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py index db888ec5ebf..32e325600c8 100644 --- a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py +++ b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py @@ -49,13 +49,13 @@ def objective_function(v): # return np.sum(v[0] * v[1]) -color_stim = pnl.TransferMechanism(name='Color Stimulus', size=8) -word_stim = pnl.TransferMechanism(name='Word Stimulus', size=8) +color_stim = pnl.TransferMechanism(name='Color Stimulus', input_shapes=8) +word_stim = pnl.TransferMechanism(name='Word Stimulus', input_shapes=8) color_task = pnl.TransferMechanism(name='Color Task') word_task = pnl.ProcessingMechanism(name='Word Task', function=w_fct_UDF) -reward = pnl.TransferMechanism(name='Reward', size=2) +reward = pnl.TransferMechanism(name='Reward', input_shapes=2) task_decision = pnl.DDM( name='Task Decision', diff --git a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py index 4bc078ea5f5..f247f16a261 100644 --- a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py +++ b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py @@ -71,13 +71,13 @@ def adj_cost_fct(v): from math import e return e**(.25 * np.abs(v) - 1) -color_stim = pnl.TransferMechanism(name='Color Stimulus', size=8) -word_stim = pnl.TransferMechanism(name='Word Stimulus', size=8) +color_stim = pnl.TransferMechanism(name='Color Stimulus', input_shapes=8) +word_stim = pnl.TransferMechanism(name='Word Stimulus', input_shapes=8) color_task = pnl.TransferMechanism(name='Color Task') word_task = pnl.ProcessingMechanism(name='Word Task', function=w_fct_UDF) -reward = pnl.TransferMechanism(name='Reward', size=2) +reward = pnl.TransferMechanism(name='Reward', input_shapes=2) task_decision = pnl.DDM( name='Task Decision', diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py index 8a45bb6ab14..c0ed1f5e408 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py @@ -276,14 +276,14 @@ def construct_model(model_name:str=MODEL_NAME, # ------------------------------------------------- Nodes ------------------------------------------------------ # ---------------------------------------------------------------------------------------------------------------- - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - previous_state_layer = ProcessingMechanism(name=previous_state_input_name, size=state_size) + state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) + previous_state_layer = ProcessingMechanism(name=previous_state_input_name, input_shapes=state_size) integrator_layer = RecurrentTransferMechanism(name=integrator_name, function=Tanh, - size=integrator_size, + input_shapes=integrator_size, auto=1-integration_rate, hetero=0.0) - context_layer = ProcessingMechanism(name=context_name, size=context_size) + context_layer = ProcessingMechanism(name=context_name, input_shapes=context_size) em = EMComposition(name=em_name, memory_template=[[0] * state_size, # state @@ -305,7 +305,7 @@ def construct_model(model_name:str=MODEL_NAME, ) prediction_layer = ProcessingMechanism(name=prediction_layer_name, - size=state_size) + input_shapes=state_size) # ---------------------------------------------------------------------------------------------------------------- diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py index 5cb51d00181..93390245c6a 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py @@ -233,11 +233,11 @@ def construct_model(model_name:str=model_params['name'], # ------------------------------------------------- Nodes ------------------------------------------------------ # ---------------------------------------------------------------------------------------------------------------- - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - previous_state_layer = ProcessingMechanism(name=previous_state_input_name, size=state_size) - # context_layer = ProcessingMechanism(name=context_name, size=context_size) + state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) + previous_state_layer = ProcessingMechanism(name=previous_state_input_name, input_shapes=state_size) + # context_layer = ProcessingMechanism(name=context_name, input_shapes=context_size) context_layer = TransferMechanism(name=context_name, - size=context_size, + input_shapes=context_size, function=Tanh, integrator_mode=True, integration_rate=integration_rate) @@ -268,7 +268,7 @@ def construct_model(model_name:str=model_params['name'], device=device ) - prediction_layer = ProcessingMechanism(name=prediction_layer_name, size=state_size) + prediction_layer = ProcessingMechanism(name=prediction_layer_name, input_shapes=state_size) # ---------------------------------------------------------------------------------------------------------------- diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py index c9e827cf197..deced4903f2 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py @@ -468,15 +468,15 @@ def construct_model(model_name:str=MODEL_NAME, # ------------------------------------------------- Nodes ------------------------------------------------------ # ---------------------------------------------------------------------------------------------------------------- - task_input_layer = ProcessingMechanism(name=task_input_name, size=task_size) - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - time_input_layer = ProcessingMechanism(name=time_input_name, size=time_size) - reward_input_layer = ProcessingMechanism(name=reward_input_name, size=reward_size) - attend_external_layer = ProcessingMechanism(name=attend_external_layer_name, size=state_size) - attend_memory_layer = ProcessingMechanism(name=attend_memory_layer_name, size=state_size) - retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, size=reward_size) + task_input_layer = ProcessingMechanism(name=task_input_name, input_shapes=task_size) + state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) + time_input_layer = ProcessingMechanism(name=time_input_name, input_shapes=time_size) + reward_input_layer = ProcessingMechanism(name=reward_input_name, input_shapes=reward_size) + attend_external_layer = ProcessingMechanism(name=attend_external_layer_name, input_shapes=state_size) + attend_memory_layer = ProcessingMechanism(name=attend_memory_layer_name, input_shapes=state_size) + retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, input_shapes=reward_size) context_layer = RecurrentTransferMechanism(name=context_name, - size=state_size, + input_shapes=state_size, auto=1-context_integration_rate, hetero=0.0) em = EMComposition(name=em_name, diff --git a/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py b/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py index e6c7b4bd368..cd1507b3a26 100644 --- a/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py +++ b/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py @@ -485,15 +485,15 @@ def construct_model(model_name:str=MODEL_NAME, # ------------------------------------------------- Mechanisms ------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------- - task_input_layer = ProcessingMechanism(name=task_input_name, size=task_size) - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - time_input_layer = ProcessingMechanism(name=time_input_name, size=time_size) - reward_input_layer = ProcessingMechanism(name=reward_input_name, size=reward_size) - attend_external_layer = ProcessingMechanism(name=attend_external_layer_name, size=state_size) - attend_memory_layer = ProcessingMechanism(name=attend_memory_layer_name, size=state_size) - retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, size=reward_size) + task_input_layer = ProcessingMechanism(name=task_input_name, input_shapes=task_size) + state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) + time_input_layer = ProcessingMechanism(name=time_input_name, input_shapes=time_size) + reward_input_layer = ProcessingMechanism(name=reward_input_name, input_shapes=reward_size) + attend_external_layer = ProcessingMechanism(name=attend_external_layer_name, input_shapes=state_size) + attend_memory_layer = ProcessingMechanism(name=attend_memory_layer_name, input_shapes=state_size) + retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, input_shapes=reward_size) context_layer = RecurrentTransferMechanism(name=context_name, - size=state_size, + input_shapes=state_size, auto=1-context_integration_rate, hetero=0.0) em = EpisodicMemoryMechanism(name=em_name, @@ -501,10 +501,10 @@ def construct_model(model_name:str=MODEL_NAME, [0] * time_size, # time [0] * state_size, # context [0] * reward_size], # reward - input_ports=[{NAME:state_input_name, SIZE:state_size}, - {NAME:time_input_name, SIZE:time_size}, - {NAME:context_name, SIZE:state_size}, - {NAME:reward_input_name, SIZE:reward_size}], + input_ports=[{NAME:state_input_name, INPUT_SHAPES:state_size}, + {NAME:time_input_name, INPUT_SHAPES:time_size}, + {NAME:context_name, INPUT_SHAPES:state_size}, + {NAME:reward_input_name, INPUT_SHAPES:reward_size}], function=ContentAddressableMemory( # selection_function=SoftMax(gain=retrieval_softmax_gain), distance_field_weights=[state_retrieval_weight, diff --git a/Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py b/Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py index 6fa96600729..4100ec0a3c1 100644 --- a/Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py +++ b/Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py @@ -28,14 +28,14 @@ # ********************************************************************************************************************* if PERCEPT_DISTORT: - player = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS") - prey = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PREY OBS") + player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS") + prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(variance=0), name="PREY OBS") else: - player = TransferMechanism(size=prey_len, name="PLAYER OBS") - prey = TransferMechanism(size=prey_len, name="PREY OBS") + player = TransferMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey = TransferMechanism(input_shapes=prey_len, name="PREY OBS") # For future use: -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: diff --git a/Scripts/Models (Under Development)/GreedyAgentModel.py b/Scripts/Models (Under Development)/GreedyAgentModel.py index 2855548e350..adab17a1fd3 100644 --- a/Scripts/Models (Under Development)/GreedyAgentModel.py +++ b/Scripts/Models (Under Development)/GreedyAgentModel.py @@ -32,14 +32,14 @@ # ********************************************************************************************************************* if PERCEPT_DISTORT: - player = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS") - prey = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PREY OBS") + player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS") + prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(variance=0), name="PREY OBS") else: - player = TransferMechanism(size=prey_len, name="PLAYER OBS") - prey = TransferMechanism(size=prey_len, name="PREY OBS") + player = TransferMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey = TransferMechanism(input_shapes=prey_len, name="PREY OBS") # For future use: -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: diff --git a/Scripts/Models (Under Development)/GreedyAgentModel_LLVM_TEST.py b/Scripts/Models (Under Development)/GreedyAgentModel_LLVM_TEST.py index b20c5718370..9f28b4ef672 100644 --- a/Scripts/Models (Under Development)/GreedyAgentModel_LLVM_TEST.py +++ b/Scripts/Models (Under Development)/GreedyAgentModel_LLVM_TEST.py @@ -18,11 +18,11 @@ player_len = prey_len = predator_len = obs_len -player = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") -prey = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") +player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") +prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") # For future use: -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: diff --git a/Scripts/Models (Under Development)/PanickyAgentModel.py b/Scripts/Models (Under Development)/PanickyAgentModel.py index df3d0942a00..f1c692f774b 100644 --- a/Scripts/Models (Under Development)/PanickyAgentModel.py +++ b/Scripts/Models (Under Development)/PanickyAgentModel.py @@ -78,12 +78,12 @@ def control_allocation_function(variable): # ********************************************************************************************************************* # Perceptual Mechanisms -player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") -prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") -predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") +player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") +prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") +predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Action Mechanism diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py b/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py index 7a30b14d0bd..c565dd4c8a9 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py @@ -96,12 +96,12 @@ def get_optimal_action(observation): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode optimal action from call to Run -optimal_action_mech = ProcessingMechanism(size=action_len, name="OPTIMAL ACTION") +optimal_action_mech = ProcessingMechanism(input_shapes=action_len, name="OPTIMAL ACTION") actual_agent_frame_buffer = None diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py b/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py index 52db2a7a4fa..6a6926f10fc 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py @@ -104,9 +104,9 @@ def get_optimal_action(observation): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode trialtype from environment trial_type_input_mech = ProcessingMechanism(name="TRIAL TYPE INPUT") diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DQN [ORIG].py b/Scripts/Models (Under Development)/Predator-Prey Model DQN [ORIG].py index 9e9630238d8..2e1b5ea13d7 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DQN [ORIG].py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DQN [ORIG].py @@ -54,12 +54,12 @@ # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY PERCEPT") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # env = ForagerEnv() diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DQN.py b/Scripts/Models (Under Development)/Predator-Prey Model DQN.py index 3354bd89e72..2c95015f2e0 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DQN.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DQN.py @@ -89,12 +89,12 @@ def get_optimal_action(observation): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode optimal action from call to Run -optimal_action_mech = ProcessingMechanism(size=action_len, name="OPTIMAL ACTION") +optimal_action_mech = ProcessingMechanism(input_shapes=action_len, name="OPTIMAL ACTION") actual_agent_frame_buffer = None diff --git a/Scripts/Models (Under Development)/Predator-Prey Model INPUT LAYER.py b/Scripts/Models (Under Development)/Predator-Prey Model INPUT LAYER.py index 4649a0c0bd4..095f78ef527 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model INPUT LAYER.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model INPUT LAYER.py @@ -40,23 +40,23 @@ # ********************************************************************************************************************* # Input Mechanisms -player_input = ProcessingMechanism(size=prey_len, name="PLAYER INPUT") -prey_input = ProcessingMechanism(size=prey_len, name="PREY INPUT") -predator_input = TransferMechanism(size=predator_len, name="PREDATOR INPUT") +player_input = ProcessingMechanism(input_shapes=prey_len, name="PLAYER INPUT") +prey_input = ProcessingMechanism(input_shapes=prey_len, name="PREY INPUT") +predator_input = TransferMechanism(input_shapes=predator_len, name="PREDATOR INPUT") # Perceptual Mechanisms if PERCEPTUAL_DISTORT: - player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") else: - player_obs = ProcessingMechanism(size=prey_len, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, name="PREDATOR OBS") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Action Mechanism diff --git a/Scripts/Models (Under Development)/Predator-Prey Model I_0 Nested Comp.py b/Scripts/Models (Under Development)/Predator-Prey Model I_0 Nested Comp.py index cce5386ad72..82d1596dfb8 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model I_0 Nested Comp.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model I_0 Nested Comp.py @@ -41,23 +41,23 @@ # ********************************************************************************************************************* # Input Mechanisms -player_input = ProcessingMechanism(size=prey_len, name="PLAYER INPUT") -prey_input = ProcessingMechanism(size=prey_len, name="PREY INPUT") -predator_input = TransferMechanism(size=predator_len, name="PREDATOR INPUT") +player_input = ProcessingMechanism(input_shapes=prey_len, name="PLAYER INPUT") +prey_input = ProcessingMechanism(input_shapes=prey_len, name="PREY INPUT") +predator_input = TransferMechanism(input_shapes=predator_len, name="PREDATOR INPUT") # Perceptual Mechanisms if PERCEPTUAL_DISTORT: - player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") else: - player_obs = ProcessingMechanism(size=prey_len, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, name="PREDATOR OBS") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Action Mechanism diff --git a/Scripts/Models (Under Development)/Predator-Prey Model.py b/Scripts/Models (Under Development)/Predator-Prey Model.py index dd2c13682af..2eee31e1742 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model.py @@ -40,11 +40,11 @@ # ********************************************************************************************************************* # Perceptual Mechanisms -player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") -prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") -predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") +player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") +prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") +predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Action Mechanism diff --git a/Scripts/Models (Under Development)/nback/nback.py b/Scripts/Models (Under Development)/nback/nback.py index c1513137d36..1453e5576c5 100644 --- a/Scripts/Models (Under Development)/nback/nback.py +++ b/Scripts/Models (Under Development)/nback/nback.py @@ -305,28 +305,28 @@ def construct_model(stim_size:int = STIM_SIZE, # output: match [1,0] or non-match [0,1] # Must be trained to detect match for specified task (1-back, 2-back, etc.) input_current_stim = TransferMechanism(name=FFN_STIMULUS_INPUT, - size=stim_size, + input_shapes=stim_size, function=FFN_TRANSFER_FUNCTION) input_current_context = TransferMechanism(name=FFN_CONTEXT_INPUT, - size=context_size, + input_shapes=context_size, function=FFN_TRANSFER_FUNCTION) input_retrieved_stim = TransferMechanism(name=FFN_STIMULUS_RETRIEVED, - size=stim_size, + input_shapes=stim_size, function=FFN_TRANSFER_FUNCTION) input_retrieved_context = TransferMechanism(name=FFN_CONTEXT_RETRIEVED, - size=context_size, + input_shapes=context_size, function=FFN_TRANSFER_FUNCTION) input_task = TransferMechanism(name=FFN_TASK, - size=num_nback_levels, + input_shapes=num_nback_levels, function=FFN_TRANSFER_FUNCTION) hidden = TransferMechanism(name=FFN_HIDDEN, - size=hidden_size, + input_shapes=hidden_size, function=FFN_TRANSFER_FUNCTION) dropout = TransferMechanism(name=FFN_DROPOUT, - size=hidden_size, + input_shapes=hidden_size, function=Dropout(p=DROPOUT_PROB)) output = ProcessingMechanism(name=FFN_OUTPUT, - size=2, + input_shapes=2, # function=ReLU ) @@ -354,7 +354,7 @@ def construct_model(stim_size:int = STIM_SIZE, print(f"constructing '{NBACK_MODEL}'...") # Stimulus Encoding: takes STIM_SIZE vector as input - stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, size=stim_size) + stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, input_shapes=stim_size) # Context Encoding: takes scalar as drift step for current trial context = ProcessingMechanism(name=MODEL_CONTEXT_INPUT, @@ -365,16 +365,16 @@ def construct_model(stim_size:int = STIM_SIZE, # Task: task one-hot indicating n-back (1, 2, 3 etc.) - must correspond to what ffn has been trained to do task = ProcessingMechanism(name=MODEL_TASK_INPUT, - size=num_nback_levels) + input_shapes=num_nback_levels) # Episodic Memory: # - entries: stimulus (field[0]) and context (field[1]); randomly initialized # - uses Softmax to retrieve best matching input, subject to weighting of stimulus and context by STIM_WEIGHT em = EpisodicMemoryMechanism(name=EM, input_ports=[{NAME:"STIMULUS_FIELD", - SIZE:stim_size}, + INPUT_SHAPES:stim_size}, {NAME:"CONTEXT_FIELD", - SIZE:context_size}], + INPUT_SHAPES:context_size}], function=ContentAddressableMemory( initializer=[[[0] * stim_size, [0] * context_size]], distance_field_weights=[retrieval_stimulus_weight, @@ -385,13 +385,13 @@ def construct_model(stim_size:int = STIM_SIZE, ) logit = TransferMechanism(name='LOGIT', - size=2, + input_shapes=2, # output_ports=[{VARIABLE: (OWNER_VALUE,0), # FUNCTION: lambda x : np.log(x)}], function=Logistic) decision = TransferMechanism(name=DECISION, - size=2, + input_shapes=2, function=SoftMax(output=MAX_INDICATOR)) # Control Mechanism diff --git a/Scripts/Models (Under Development)/nback/nback_og_pnl.py b/Scripts/Models (Under Development)/nback/nback_og_pnl.py index fcbab06dc66..18b8e77413f 100644 --- a/Scripts/Models (Under Development)/nback/nback_og_pnl.py +++ b/Scripts/Models (Under Development)/nback/nback_og_pnl.py @@ -317,24 +317,24 @@ def construct_model(stim_size:int = STIM_SIZE, # output: match [1,0] or non-match [0,1] # Must be trained to detect match for specified task (1-back, 2-back, etc.) stim_context_input = TransferMechanism(name=FFN_INPUT, - size=ffn_input_size) + input_shapes=ffn_input_size) task_input = ProcessingMechanism(name=FFN_TASK, - size=task_size) + input_shapes=task_size) task_embedding = ProcessingMechanism(name=FFN_TASK, - size=h1_size) + input_shapes=h1_size) h1 = ProcessingMechanism(name=FFN_H1, - size=h1_size, + input_shapes=h1_size, function=FFN_TRANSFER_FUNCTION) add_layer = ProcessingMechanism(name=FFN_ADD_LAYER, - size=h1_size) + input_shapes=h1_size) dropout = ProcessingMechanism(name=FFN_DROPOUT, - size=h1_size, + input_shapes=h1_size, function=Dropout(p=DROPOUT_PROB)) h2 = ProcessingMechanism(name=FFN_H2, - size=h2_size, + input_shapes=h2_size, function=FFN_TRANSFER_FUNCTION) output = ProcessingMechanism(name=FFN_OUTPUT, - size=2, + input_shapes=2, function = Linear # function=ReLU ) @@ -358,7 +358,7 @@ def construct_model(stim_size:int = STIM_SIZE, print(f"constructing '{NBACK_MODEL}'...") # Stimulus Encoding: takes stim_size vector as input - stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, size=stim_size) + stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, input_shapes=stim_size) # Context Encoding: takes scalar as drift step for current trial context = ProcessingMechanism(name=MODEL_CONTEXT_INPUT, @@ -369,16 +369,16 @@ def construct_model(stim_size:int = STIM_SIZE, # Task: task one-hot indicating n-back (1, 2, 3 etc.) - must correspond to what ffn has been trained to do task = ProcessingMechanism(name=MODEL_TASK_INPUT, - size=task_size) + input_shapes=task_size) # Episodic Memory: # - entries: stimulus (field[0]) and context (field[1]); randomly initialized # - uses Softmax to retrieve best matching input, subject to weighting of stimulus and context by STIM_WEIGHT em = EpisodicMemoryMechanism(name=EM, input_ports=[{NAME:"STIMULUS_FIELD", - SIZE:stim_size}, + INPUT_SHAPES:stim_size}, {NAME:"CONTEXT_FIELD", - SIZE:context_size}], + INPUT_SHAPES:context_size}], function=ContentAddressableMemory( initializer=[[[0] * stim_size, [0] * context_size]], distance_field_weights=[retrieval_stimulus_weight, @@ -395,7 +395,7 @@ def construct_model(stim_size:int = STIM_SIZE, function=Concatenate) decision = TransferMechanism(name=DECISION, - size=2, + input_shapes=2, function=SoftMax(output=MAX_INDICATOR)) # Control Mechanism diff --git a/docs/source/BasicsAndPrimer.rst b/docs/source/BasicsAndPrimer.rst index 445148d20ba..b35f4d75fe9 100644 --- a/docs/source/BasicsAndPrimer.rst +++ b/docs/source/BasicsAndPrimer.rst @@ -95,9 +95,9 @@ encoder network, the first layer of which takes an an array of length 5 as its i `Logistic` function:: # Construct the Mechanisms: - input_layer = ProcessingMechanism(size=5, name='Input') - hidden_layer = ProcessingMechanism(size=2, function=Logistic, name='hidden') - output_layer = ProcessingMechanism(size=5, function=Logistic, name='output') + input_layer = ProcessingMechanism(input_shapes=5, name='Input') + hidden_layer = ProcessingMechanism(input_shapes=2, function=Logistic, name='hidden') + output_layer = ProcessingMechanism(input_shapes=5, function=Logistic, name='output') # Construct the Composition: my_encoder = Composition(pathways=[[input_layer, hidden_layer, output_layer]]) @@ -188,22 +188,22 @@ of those to perform based on a task instruction. These all converge on a common drift diffusion (DDM) decision mechanism responsible for determining the response:: # Construct the color naming pathway: - color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # note: default function is Linear + color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) - color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) + color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) - output = ProcessingMechanism(name='OUTPUT', size=2 , function=Logistic) + output = ProcessingMechanism(name='OUTPUT', input_shapes=2 , function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) - word_input = ProcessingMechanism(name='WORD INPUT', size=2) + word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) - word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) + word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways - task_input = ProcessingMechanism(name='TASK INPUT', size=2) + task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task_color_wts, color_hidden] @@ -324,7 +324,7 @@ that uses a `leaky competing accumulator `, and use control = ControlMechanism(name='CONTROL', objective_mechanism=ObjectiveMechanism(name='Conflict Monitor', monitor=output, - function=Energy(size=2, + function=Energy(input_shapes=2, matrix=[[0,-2.5],[-2.5,0]])), default_allocation=[0.5], control_signals=[(GAIN, task)]) @@ -936,14 +936,14 @@ For example, the following implements a network for learning semantic representa # Representation_Input # Construct Mechanisms - rep_in = pnl.ProcessingMechanism(size=10, name='REP_IN') - rel_in = pnl.ProcessingMechanism(size=11, name='REL_IN') - rep_hidden = pnl.ProcessingMechanism(size=4, function=Logistic, name='REP_HIDDEN') - rel_hidden = pnl.ProcessingMechanism(size=5, function=Logistic, name='REL_HIDDEN') - rep_out = pnl.ProcessingMechanism(size=10, function=Logistic, name='REP_OUT') - prop_out = pnl.ProcessingMechanism(size=12, function=Logistic, name='PROP_OUT') - qual_out = pnl.ProcessingMechanism(size=13, function=Logistic, name='QUAL_OUT') - act_out = pnl.ProcessingMechanism(size=14, function=Logistic, name='ACT_OUT') + rep_in = pnl.ProcessingMechanism(input_shapes=10, name='REP_IN') + rel_in = pnl.ProcessingMechanism(input_shapes=11, name='REL_IN') + rep_hidden = pnl.ProcessingMechanism(input_shapes=4, function=Logistic, name='REP_HIDDEN') + rel_hidden = pnl.ProcessingMechanism(input_shapes=5, function=Logistic, name='REL_HIDDEN') + rep_out = pnl.ProcessingMechanism(input_shapes=10, function=Logistic, name='REP_OUT') + prop_out = pnl.ProcessingMechanism(input_shapes=12, function=Logistic, name='PROP_OUT') + qual_out = pnl.ProcessingMechanism(input_shapes=13, function=Logistic, name='QUAL_OUT') + act_out = pnl.ProcessingMechanism(input_shapes=14, function=Logistic, name='ACT_OUT') # Construct Composition comp = Composition(name='Rumelhart Semantic Network') diff --git a/docs/source/BotvinickConflictMonitoringModel.rst b/docs/source/BotvinickConflictMonitoringModel.rst index 5473b967595..61a3e5db4f8 100644 --- a/docs/source/BotvinickConflictMonitoringModel.rst +++ b/docs/source/BotvinickConflictMonitoringModel.rst @@ -43,30 +43,30 @@ bidirectional way. The response layer receives inputs from both hidden layers. A Network System ~~~~~~~~~~~~~~ -**COLOR INPUT LAYER**: a `TransferMechanism` with **size** = 3 (one unit for the input of one color, respectively +**COLOR INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 3 (one unit for the input of one color, respectively here blue & green), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**WORD INPUT LAYER**: a `TransferMechanism` with **size** = 3 (one unit for the input of one word, respectively, +**WORD INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 3 (one unit for the input of one word, respectively, here blue & green), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**TASK INPUT LAYER**: a `TransferMechanism` with **size** = 2 (one unit specified with a task +**TASK INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 2 (one unit specified with a task value of one, the other element set to zero), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**COLOR HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size** = 3 (one element for each of the two colors, one +**COLOR HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes** = 3 (one element for each of the two colors, one element for the neutral color and assigned a `Logistic` function with **gain** = 4.0 and **bias** = 1.0. The **integrator_mode** = `True` and **smoothing_factor** = 0.01. Both units receive mutually inhibitory weights (**hetero** = -2). -**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size** = 3 (one element for each of the two colors, one +**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes** = 3 (one element for each of the two colors, one element for the neutral color and assigned a `Logistic` function with **gain** = 4.0 and **bias** = 1.0. The **integrator_mode** = `True` and **smoothing_factor** = 0.01. Both units receive mutually inhibitory weights (**hetero** = -2). -**TASK DEMAND LAYER**: a `RecurrentTransferMechanism` with **size** = 2 (one element for each of the two tasks, and +**TASK DEMAND LAYER**: a `RecurrentTransferMechanism` with **input_shapes** = 2 (one element for each of the two tasks, and assigned a `Logistic` function with **gain** = 1.0 and **bias** = 0.0. The **integrator_mode** = `True` and **smoothing_factor** = 0.01. Both units receive mutually inhibitory weights (**hetero** = -2). -**RESPONSE LAYER**: a `RecurrentTransferMechanism` with **size** = 2 (one element for each of the two responses, and +**RESPONSE LAYER**: a `RecurrentTransferMechanism` with **input_shapes** = 2 (one element for each of the two responses, and assigned a `Logistic` function with **gain** = 1.0 and **bias** = 0.0. The **integrator_mode** = `True` and **smoothing_factor** = 0.01. Both units receive mutually inhibitory weights (**hetero** = -2). diff --git a/docs/source/Cohen_HustonModel.rst b/docs/source/Cohen_HustonModel.rst index f949fd55dd7..909b58df9e5 100644 --- a/docs/source/Cohen_HustonModel.rst +++ b/docs/source/Cohen_HustonModel.rst @@ -58,26 +58,26 @@ Below the Graph of the model is shown. Composition ~~~~~~~~~~~ -**COLOR INPUT LAYER**: a `TransferMechanism` with **size** = 3 (one element for the input to each color in the +**COLOR INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 3 (one element for the input to each color in the *HIDDEN COLOR LAYER*, respectively), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**WORD INPUT LAYER**: a `TransferMechanism` with **size** = 3 (one element for the input to each word in the +**WORD INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 3 (one element for the input to each word in the *HIDDEN WORD LAYER*, respectively), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**TASK INPUT LAYER**: a `TransferMechanism` with **size** = 2 (one element for the input to each task in the +**TASK INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 2 (one element for the input to each task in the *TASK LAYER*, respectively), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**HIDDEN COLOR LAYER**: a `RecurrentTransferMechanism` Mechanism of **size** = 3 (one element each for the color units), +**HIDDEN COLOR LAYER**: a `RecurrentTransferMechanism` Mechanism of **input_shapes** = 3 (one element each for the color units), and assigned a `Logistic` Function with a bias = 4.0 and intercept = 0.0. Each element is connected to every other element by mutually inhibitory connections with a weight specified by **hetero** = -2.0. An integrator mechanism is specified by setting the **integrator_mode** = `True` and **smoothing_factor** = 0.1. -**HIDDEN WORD LAYER**: a `RecurrentTransferMechanism` specified as the *HIDDEN COLOR LAYER* with **size** = 3, +**HIDDEN WORD LAYER**: a `RecurrentTransferMechanism` specified as the *HIDDEN COLOR LAYER* with **input_shapes** = 3, a `Logistic` Function with a **bias** = 4.0 and **intercept** = 0.0, mutually inhibitory connections with a weight specified by **hetero** = -2.0, **integrator_mode** = `True` and **smoothing_factor** = 0.1.. **RESPONSE LAYER**: a `RecurrentTransferMechanism` specified as the *HIDDEN COLOR LAYER* with the only difference of -changing the bias to 0 in the `Logistic` Function, and the size of 2. +changing the bias to 0 in the `Logistic` Function, and the input_shapes of 2. **TASK LAYER**: a `RecurrentTransferMechanism` specified as the *RESPONSE LAYER*. diff --git a/docs/source/NieuwenhuisModel.rst b/docs/source/NieuwenhuisModel.rst index 7c4c07d33f4..e4faf3a8510 100644 --- a/docs/source/NieuwenhuisModel.rst +++ b/docs/source/NieuwenhuisModel.rst @@ -65,17 +65,17 @@ associated `ObjectiveMechanism`, as shown in the figure below: Behavioral Network Subsystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -**INPUT LAYER**: a `TransferMechanism` with **size**\ =3 (one element for the input to the T1, T2 and distractor units +**INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =3 (one element for the input to the T1, T2 and distractor units of the *DECISION LAYER*, respectively), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**DECISION LAYER**: an `LCAMechanism` Mechanism of **size**\ =3 (one element each for the T1, T2 and distractor units), +**DECISION LAYER**: an `LCAMechanism` Mechanism of **input_shapes**\ =3 (one element each for the T1, T2 and distractor units), and assigned a `Logistic` Function with a slope=1.0 and intercept=0.0. Each element has a self-excitatory connection with a weight specified by **self_excitation**\ =2.5, a **leak**\ =-1.0, and every element is connected to every other element by mutually inhibitory connections with a weight specified by **competition** =1.0. An ordinary differential equation describes the change in state over time, implemented in the LCAMechanism mechanism by setting **integrator_mode** = `True` and **time_step_size**\ =0.02. -**RESPONSE LAYER**: an `LCAMechanism` Mechanism of **size**\ =2, with one element each for the response to T1 and T2, +**RESPONSE LAYER**: an `LCAMechanism` Mechanism of **input_shapes**\ =2, with one element each for the response to T1 and T2, respectively, **self_excitation**\ =2.0, **leak**\ =-1.0, and no mutually inhibitory weights (**competition**\ =0). **PROJECTIONS**: The weights of the behavioral network are implemented as `MappingProjections `. diff --git a/docs/source/PCTC_model.rst b/docs/source/PCTC_model.rst index 058bc81331f..c07996d78de 100644 --- a/docs/source/PCTC_model.rst +++ b/docs/source/PCTC_model.rst @@ -56,33 +56,33 @@ model. A graph of the model is shown below. Network System ~~~~~~~~~~~~~~ -**COLOR INPUT LAYER**: a `TransferMechanism` with **size**\ =2 (one unit for the input of one color, respectively +**COLOR INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =2 (one unit for the input of one color, respectively here blue & green), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**WORD INPUT LAYER**: a `TransferMechanism` with **size**\ =2 (one unit for the input of one word, respectively, +**WORD INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =2 (one unit for the input of one word, respectively, here blue & green), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**BIAS INPUT LAYER**: a `TransferMechanism` with **size**\ =2 (one unit for the bias of one of the hidden layers, +**BIAS INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =2 (one unit for the bias of one of the hidden layers, which is the same in this model), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**PROACTIVE CONTROL INPUT LAYER**: a `TransferMechanism` with **size**\ =2 (one unit specified with a proactive control +**PROACTIVE CONTROL INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =2 (one unit specified with a proactive control value, the other one set to zero), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**COLOR HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size**\ =2 (one element for each of the two colors, and +**COLOR HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes**\ =2 (one element for each of the two colors, and assigned a `Logistic` function with **gain**\ =4.0 and **bias**\ =1.0. The **integrator_mode**\ =\ `True` and **smoothing_factor**\ =0.03. Both units receive mutually inhibitory weights (**hetero**\ =-2). A python function that sets the output of the `Logistic` function to 0 when it receives 0 as an input is specified on the `output_ports`. It simply subtracts 0.018 from the output of a logistic function and if this leads to a value below 0, outputs a 0 as a minimum value. -**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size**\ =2 (one element for each of the two words, and +**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes**\ =2 (one element for each of the two words, and assigned a `Logistic` function with **gain**\ =4.0 and **bias**\ =1.0. The **integrator_mode**\ =\ `True` and **smoothing_factor**\ =0.03. Both units receive mutually inhibitory weights (**hetero**\ =-2). A python function that sets the output of the `Logistic` function to 0 when it receives 0 as an input is specified on the `output_ports`. It simply subtracts 0.018 from the output of a logistic function and if this leads to a value below 0, outputs a 0 as a minimum value. -**TASK DEMAND LAYER**: a `RecurrentTransferMechanism` with **size**\ =2 (one element for each of the two tasks, and +**TASK DEMAND LAYER**: a `RecurrentTransferMechanism` with **input_shapes**\ =2 (one element for each of the two tasks, and assigned a `Logistic` function with **gain**\ =4.0 and **bias**\ =1.0. The **integrator_mode**\ =\ `True` and **smoothing_factor**\ =0.03. Both units receive mutually inhibitory weights (**hetero**\ =-2). A python function that sets the output of the `Logistic` function to 0 when it receives 0 as an input is specified on the `output_ports`. It @@ -90,7 +90,7 @@ simply subtracts 0.018 from the output of a logistic function and if this leads outputs a 0 as a minimum value. A second OutputPort is specified that computes the conflict between the two task units. -**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size**\ =2 (one element for each of the two responses, and +**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes**\ =2 (one element for each of the two responses, and assigned a `Logistic` function with **gain**\ =4.0 and **bias**\ =1.0. The **integrator_mode**\ =\ `True` and **smoothing_factor**\ =0.03. Both units receive mutually inhibitory weights (**hetero**\ =-2). A python function that sets the output of the `Logistic` function to 0 when it receives 0 as an input is specified on the `output_ports`. It diff --git a/docs/source/RefactoredLearningGuide.rst b/docs/source/RefactoredLearningGuide.rst index 5b0cfa840c4..40d47c2a805 100644 --- a/docs/source/RefactoredLearningGuide.rst +++ b/docs/source/RefactoredLearningGuide.rst @@ -100,8 +100,8 @@ This is demonstrated in the following codeblocks: This is the OLD code: ->>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3) -... my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2) +>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 3) +... my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 2) ... my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2), ... sender=my_mech_1, ... receiver=my_mech_2) @@ -124,8 +124,8 @@ This is the OLD code: And this is equivalent code AFTER the changes: ->>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3) -... my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2) +>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 3) +... my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 2) ... my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2), ... sender=my_mech_1, ... receiver=my_mech_2) diff --git a/docs/source/UserGuide_TBD.rst b/docs/source/UserGuide_TBD.rst index 3f0b20d6fa7..5441679d8f6 100644 --- a/docs/source/UserGuide_TBD.rst +++ b/docs/source/UserGuide_TBD.rst @@ -45,7 +45,7 @@ Components , as well as two other fundamental types of Components (`Ports ` and `Functions `), that are described in the section below on `Components `. The other primary type of object, `Composition`, has two primary types: -`Processes ` and `Systems ` that allow Compositions of different degrees of size and complexity to +`Processes ` and `Systems ` that allow Compositions of different degrees of input_shapes and complexity to be created. These are described in the section below on `Compositions `. In each case, examples are provided that illustrate how these objects are implemented, and that parallel those used in the interactive `Tutorial `. diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index e0c57db6496..36cc0402340 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -95,7 +95,7 @@ argument in the constructor for a Component determines both its format (e.g., whether its value is numeric, its dimensionality and shape if it is an array, etc.) as well as its `default_value ` (the value used when the Component is executed and no input is provided). - It may alternatively be specified by `size `. + It may alternatively be specified by `input_shapes `. .. technical_note:: Internally, the attribute **variable** is not directly used as input to functions, to allow for parallelization. @@ -103,25 +103,25 @@ During parallelization however, the attribute may not accurately represent the most current value of variable being used, due to asynchrony inherent to parallelization. -.. _Component_Size: +.. _Component_Input_Shapes: -* **size** - the numpy shape or iterable of shapes matching the - `variable ` attribute. The **size** argument of +* **input_shapes** - the numpy shape or iterable of shapes matching the + `variable ` attribute. The **input_shapes** argument of the constructor for a Component can be used as a convenient method for specifying the `variable `, attribute in which case it will be assigned as an array of zeros of - the specified shape. When **size** is an iterable, each item in the + the specified shape. When **input_shapes** is an iterable, each item in the iterable is treated as a single shape, and the entire iterable is then - assigned as an array. When **size** is an integer, it is treated the + assigned as an array. When **input_shapes** is an integer, it is treated the same as a one-item iterable containing that integer. For example, - setting **size** = 3 is equivalent to setting - **variable** = [[0, 0, 0]] and setting **size** = [4, 3] is equivalent + setting **input_shapes** = 3 is equivalent to setting + **variable** = [[0, 0, 0]] and setting **input_shapes** = [4, 3] is equivalent to setting **variable** = [[0, 0, 0, 0], [0, 0, 0]]. .. note:: - The size attribute serves a role similar to + The input_shapes attribute serves a role similar to `shape in Numpy `_, with the difference that - size permits the specification of `ragged arrays `_ -- that is, ones + input_shapes permits the specification of `ragged arrays `_ -- that is, ones that have elements of varying lengths, such as [[1,2],[3,4,5]]. .. _Component_Function: @@ -331,10 +331,10 @@ _instantiate_function method checks that the input of the Component's `function ` is compatible with its `variable `). - * `_handle_size ` attempts to infer - `variable ` from the **size** argument if + * `_handle_input_shapes ` attempts to infer + `variable ` from the **input_shapes** argument if **variable** is not passed as an argument. - The _handle_size method then checks that the **size** and **variable** arguments are compatible. + The _handle_input_shapes method then checks that the **input_shapes** and **variable** arguments are compatible. * `_instantiate_defaults ` first calls the validation methods, and then assigns the default values for all of the attributes of the instance of the Component being created. @@ -535,7 +535,7 @@ MODEL_SPEC_ID_INPUT_PORTS, MODEL_SPEC_ID_OUTPUT_PORTS, \ MODEL_SPEC_ID_MDF_VARIABLE, \ MODULATORY_SPEC_KEYWORDS, NAME, OUTPUT_PORTS, OWNER, PARAMS, PREFS_ARG, \ - RESET_STATEFUL_FUNCTION_WHEN, SIZE, VALUE, VARIABLE, SHARED_COMPONENT_TYPES + RESET_STATEFUL_FUNCTION_WHEN, INPUT_SHAPES, VALUE, VARIABLE, SHARED_COMPONENT_TYPES from psyneulink.core.globals.log import LogCondition from psyneulink.core.globals.parameters import \ Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, check_user_specified, copy_parameter_value, is_array_like @@ -750,7 +750,7 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): """ Component( \ default_variable=None, \ - size=None, \ + input_shapes=None, \ params=None, \ name=None, \ prefs=None, \ @@ -772,7 +772,7 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): The variable(s) can be a function reference, in which case the function is called to resolve the value; however: it must be "wrapped" as an item in a list, so that it is not called before being passed it must of course return a variable of the type expected for the variable - The size argument is an int or array of ints, which specify the size of variable and set variable to be array(s) + The input_shapes argument is an int or array of ints, which specify the input_shapes of variable and set variable to be array(s) of zeros. The default variableList is a list of default values, one for each of the variables defined in the child class The params argument is a dictionary; the key for each entry is the parameter name, associated with its value. @@ -815,11 +815,11 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): specifies template for the input to the Component's `function `, and the value used as the input to the Component if none is provided on execution (see `Component_Variable` for additional information). - size : int, or Iterable of tuple or int : default None + input_shapes : int, or Iterable of tuple or int : default None specifies default_variable as array(s) of zeros if **default_variable** is not passed as an argument; if **default_variable** is specified, it is checked for - compatibility against **size** (see - `size ` for additonal details). + compatibility against **input_shapes** (see + `input_shapes ` for additonal details). COMMENT: param_defaults : : default None, @@ -847,8 +847,8 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): variable : 2d np.array see `variable ` - size : Union[int, Iterable[Union[int, tuple]]] - see `size ` + input_shapes : Union[int, Iterable[Union[int, tuple]]] + see `input_shapes ` function : Function, function or method see `function ` @@ -930,7 +930,7 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): componentCategory = None componentType = None - standard_constructor_args = {EXECUTE_UNTIL_FINISHED, FUNCTION_PARAMS, MAX_EXECUTIONS_BEFORE_FINISHED, RESET_STATEFUL_FUNCTION_WHEN, SIZE} + standard_constructor_args = {EXECUTE_UNTIL_FINISHED, FUNCTION_PARAMS, MAX_EXECUTIONS_BEFORE_FINISHED, RESET_STATEFUL_FUNCTION_WHEN, INPUT_SHAPES} # helper attributes for MDF model spec _model_spec_id_parameters = 'parameters' @@ -1113,7 +1113,7 @@ def _parse_modulable(self, param_name, param_value): def __init__(self, default_variable, param_defaults, - size=None, + input_shapes=None, function=None, name=None, reset_stateful_function_when=None, @@ -1124,7 +1124,7 @@ def __init__(self, Initialization arguments: - default_variable (anything): establishes type for the variable, used for validation - - size (int or list/array of ints): if specified, establishes variable if variable was not already specified + - input_shapes (int or list/array of ints): if specified, establishes variable if variable was not already specified - params_default (dict): assigned as default Note: if parameter_validation is off, validation is suppressed (for efficiency) (Component class default = on) @@ -1141,7 +1141,7 @@ def __init__(self, self.reset_stateful_function_when = Never() parameter_values, function_params = self._parse_arguments( - default_variable, param_defaults, size, function, function_params, kwargs + default_variable, param_defaults, input_shapes, function, function_params, kwargs ) self._initialize_parameters( @@ -1643,9 +1643,9 @@ def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, # Handlers # ------------------------------------------------------------------------------------------------------------------ - def _handle_default_variable(self, default_variable=None, size=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None): """ - Finds whether default_variable can be determined using **default_variable** and **size** + Finds whether default_variable can be determined using **default_variable** and **input_shapes** arguments. Returns @@ -1654,7 +1654,7 @@ def _handle_default_variable(self, default_variable=None, size=None): None otherwise """ default_variable = self._parse_arg_variable(default_variable) - default_variable = self._handle_size(size, default_variable) + default_variable = self._handle_input_shapes(input_shapes, default_variable) if default_variable is None or default_variable is NotImplemented: return None @@ -1663,19 +1663,19 @@ def _handle_default_variable(self, default_variable=None, size=None): return convert_to_np_array(default_variable, dimension=1) - def _parse_size( - self, size: Union[int, Iterable[Union[int, tuple]]] + def _parse_input_shapes( + self, input_shapes: Union[int, Iterable[Union[int, tuple]]] ) -> np.ndarray: """ - Returns the equivalent 'variable' array specified by **size** + Returns the equivalent 'variable' array specified by **input_shapes** Args: - size (Union[int, Iterable[Union[int, tuple]]]) + input_shapes (Union[int, Iterable[Union[int, tuple]]]) Returns: np.ndarray """ - def get_size_elem(s, idx=None): + def get_input_shapes_elem(s, idx=None): try: return np.zeros(s) except (TypeError, ValueError) as e: @@ -1685,43 +1685,43 @@ def get_size_elem(s, idx=None): idx_str = '' raise ComponentError( - f'Invalid size argument of {self}{idx_str}. size must be a' + f'Invalid input_shapes argument of {self}{idx_str}. input_shapes must be a' ' valid numpy shape or a list of shapes for use with' f' numpy.zeros: {e}' ) from e - if not is_iterable(size, exclude_str=True): - variable_from_size = np.asarray([get_size_elem(size)]) + if not is_iterable(input_shapes, exclude_str=True): + variable_from_input_shapes = np.asarray([get_input_shapes_elem(input_shapes)]) else: - if len(size) == 0: + if len(input_shapes) == 0: raise ComponentError( - f'Invalid size argument of {self}. size must not be an empty list' + f'Invalid input_shapes argument of {self}. input_shapes must not be an empty list' ) - variable_from_size = [] - for i, s in enumerate(size): - variable_from_size.append(get_size_elem(s, i)) - variable_from_size = convert_all_elements_to_np_array(variable_from_size) + variable_from_input_shapes = [] + for i, s in enumerate(input_shapes): + variable_from_input_shapes.append(get_input_shapes_elem(s, i)) + variable_from_input_shapes = convert_all_elements_to_np_array(variable_from_input_shapes) - return variable_from_size + return variable_from_input_shapes # ELIMINATE SYSTEM - # IMPLEMENTATION NOTE: (7/7/17 CW) Due to System and Process being initialized with size at the moment (which will - # be removed later), I’m keeping _handle_size in Component.py. I’ll move the bulk of the function to Mechanism - # through an override, when Composition is done. For now, only Port.py overwrites _handle_size(). - def _handle_size(self, size, variable): - """If variable is None, _handle_size tries to infer variable based on the **size** argument to the - __init__() function. If size is None (usually in the case of + # IMPLEMENTATION NOTE: (7/7/17 CW) Due to System and Process being initialized with input_shapes at the moment (which will + # be removed later), I’m keeping _handle_input_shapes in Component.py. I’ll move the bulk of the function to Mechanism + # through an override, when Composition is done. For now, only Port.py overwrites _handle_input_shapes(). + def _handle_input_shapes(self, input_shapes, variable): + """If variable is None, _handle_input_shapes tries to infer variable based on the **input_shapes** argument to the + __init__() function. If input_shapes is None (usually in the case of Projections/Functions), then this function passes without - doing anything. If both size and variable are not None, a + doing anything. If both input_shapes and variable are not None, a ComponentError is thrown if they are not compatible. """ - if size is not None: + if input_shapes is not None: self._variable_shape_flexibility = self._specified_variable_shape_flexibility - # region Fill in and infer variable and size if they aren't specified in args - # if variable is None and size is None: + # region Fill in and infer variable and input_shapes if they aren't specified in args + # if variable is None and input_shapes is None: # variable = self.class_defaults.variable # 6/30/17 now handled in the individual subclasses' __init__() methods because each subclass has different - # expected behavior when variable is None and size is None. + # expected behavior when variable is None and input_shapes is None. # implementation note: for good coding practices, perhaps add setting to enable easy change of the default # value of variable (though it's an unlikely use case), which is an array of zeros at the moment @@ -1733,41 +1733,41 @@ def conflict_error(reason=None): reason_str = '' return ComponentError( - f'size and default_variable arguments of {self} conflict{reason_str}' + f'input_shapes and default_variable arguments of {self} conflict{reason_str}' ) - variable_from_size = self._parse_size(size) + variable_from_input_shapes = self._parse_input_shapes(input_shapes) if variable is None: - return variable_from_size + return variable_from_input_shapes - if is_iterable(size, exclude_str=True): - assert len(size) == len(variable_from_size) + if is_iterable(input_shapes, exclude_str=True): + assert len(input_shapes) == len(variable_from_input_shapes) if variable.ndim == 0: raise conflict_error( - 'size gives a list of items but default_variable is 0d' + 'input_shapes gives a list of items but default_variable is 0d' ) - elif len(size) != len(variable): + elif len(input_shapes) != len(variable): raise conflict_error( - f'len(size) is {len(size)};' + f'len(input_shapes) is {len(input_shapes)};' f' len(default_variable) is {len(variable)}' ) else: - for i in range(len(size)): - if variable_from_size[i].shape != variable[i].shape: + for i in range(len(input_shapes)): + if variable_from_input_shapes[i].shape != variable[i].shape: raise conflict_error( - f'size[{i}].shape: {variable_from_size[i].shape};' + f'input_shapes[{i}].shape: {variable_from_input_shapes[i].shape};' f' default_variable[{i}].shape: {variable[i].shape}' ) else: - if variable_from_size.shape != variable.shape: + if variable_from_input_shapes.shape != variable.shape: raise conflict_error( - f'size.shape: {variable_from_size.shape};' + f'input_shapes.shape: {variable_from_input_shapes.shape};' f' default_variable.shape: {variable.shape}' ) - # if variable_from_size is created an error has not been thrown + # if variable_from_input_shapes is created an error has not been thrown # so far, variable is equal return variable @@ -2185,7 +2185,7 @@ def alias_conflicts(alias, passed_name): ) def _parse_arguments( - self, default_variable, param_defaults, size, function, function_params, kwargs + self, default_variable, param_defaults, input_shapes, function, function_params, kwargs ): if function_params is None: function_params = {} @@ -2198,7 +2198,7 @@ def _parse_arguments( parameter_values = { **{ 'function': function, - 'size': size, + 'input_shapes': input_shapes, 'default_variable': default_variable, 'function_params': function_params }, @@ -3722,7 +3722,7 @@ def name(self, value): self._name = value @property - def size(self): + def input_shapes(self): s = [] try: diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index 96b4b3c3085..3a890c42d47 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -822,10 +822,10 @@ def __init__(self, prefs=prefs, ) - def _handle_default_variable(self, default_variable=None, size=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None): # If default_variable was not specified by user... - if default_variable is None and size in {None, NotImplemented}: + if default_variable is None and input_shapes in {None, NotImplemented}: # but mu_0 and/or sigma_0 was specified as an array... if isinstance(self.mu_0, (list, np.ndarray)) or isinstance(self.sigma_0, (list, np.ndarray)): # if both are specified, make sure they are the same size @@ -842,7 +842,7 @@ def _handle_default_variable(self, default_variable=None, size=None): else: default_variable = [np.zeros_like(self.sigma_0), np.zeros((1,1))] - return super()._handle_default_variable(default_variable=default_variable, size=size) + return super()._handle_default_variable(default_variable=default_variable, input_shapes=input_shapes) def initialize_priors(self): """Set the prior parameters (`mu_prior `, `Lamba_prior `, diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index 66d45844e32..4a7d890028c 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -33,7 +33,7 @@ CORRELATION, COSINE, COSINE_SIMILARITY, CROSS_ENTROPY, \ DEFAULT_VARIABLE, DIFFERENCE, DISTANCE_FUNCTION, DISTANCE_METRICS, DOT_PRODUCT, \ ENERGY, ENTROPY, EUCLIDEAN, HOLLOW_MATRIX, MATRIX, MAX_ABS_DIFF, NORMALIZE, \ - NORMED_L0_SIMILARITY, OBJECTIVE_FUNCTION_TYPE, SIZE, STABILITY_FUNCTION + NORMED_L0_SIMILARITY, OBJECTIVE_FUNCTION_TYPE, INPUT_SHAPES, STABILITY_FUNCTION from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.utilities import DistanceMetricLiteral, safe_len, convert_to_np_array, convert_all_elements_to_np_array @@ -100,7 +100,7 @@ class Stability(ObjectiveFunction): variable : list or 1d array of numbers: Default class_defaults.variable specifies shape and default value of the array for which stability is calculated. - size : int : None + input_shapes : int : None specifies length of the array over which stability is calculated; can be used in place of default_value, in which case zeros are assigned as the value(s). An error is generated if both are specified but size != len(default_value). @@ -211,7 +211,7 @@ class Parameters(ObjectiveFunction.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, matrix=None, # metric:is_distance_metric=None, metric: Optional[DistanceMetricLiteral] = None, @@ -221,12 +221,12 @@ def __init__(self, owner=None, prefs: Optional[ValidPrefSet] = None): - if size: + if input_shapes: if default_variable is None: - default_variable = np.zeros(size) - elif size != len(default_variable): - raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(SIZE)} ({size}) " - f"are specified for {self.name} but are {SIZE}!=len({DEFAULT_VARIABLE}).") + default_variable = np.zeros(input_shapes) + elif input_shapes != len(default_variable): + raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(INPUT_SHAPES)} ({input_shapes}) " + f"are specified for {self.name} but are {INPUT_SHAPES}!=len({DEFAULT_VARIABLE}).") super().__init__( default_variable=default_variable, @@ -497,7 +497,7 @@ class Energy(Stability): variable : list or 1d array of numbers: Default class_defaults.variable specifies shape and default value of the array for which energy is calculated. - size : int : None + input_shapes : int : None specifies length of the array over which energy is calculated; can be used in place of default_value, in which case zeros are assigned as the value(s). An error is generated if both are specified but size != len(default_value). @@ -564,7 +564,7 @@ class Energy(Stability): @check_user_specified def __init__(self, default_variable=None, - size=None, + input_shapes=None, normalize:bool=None, # transfer_fct=None, matrix=None, @@ -574,7 +574,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, metric=ENERGY, matrix=matrix, # transfer_fct=transfer_fct, @@ -588,7 +588,7 @@ class Entropy(Stability): """ Entropy( \ default_variable=None, \ - size=None, \ + input_shapes=None, \ matrix=INVERSE_HOLLOW_MATRIX, \ transfer_fct=None \ normalize=False, \ @@ -607,10 +607,10 @@ class Entropy(Stability): variable : list or 1d array of numbers: Default class_defaults.variable specifies shape and default value of the array for which entropy is calculated. - size : int : None + input_shapes : int : None specifies length of the array over which entropy is calculated; can be used in place of default_value, in which case zeros are assigned as the value(s). An error is generated if both are specified but - size != len(default_value). + input_shapes != len(default_value). matrix : list, np.ndarray, or matrix keyword : default INVERSE_HOLLOW_MATRIX specifies the matrix of recurrent weights; must be a square matrix with the same width as the @@ -644,7 +644,7 @@ class Entropy(Stability): variable : 1d array array for which entropy is calculated. - size : int + input_shapes : int length of array for which energy is calculated. matrix : list, np.ndarray, or matrix keyword diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 66ef69aadfc..5fdb539246b 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -4333,7 +4333,7 @@ class TransferWithCosts(TransferFunction): """ TransferWithCosts( \ default_variable=None, \ - size=None, \ + input_shapes=None, \ transfer_fct=Line \ enabled_cost_functions=None, \ intensity_fct=Exponential \ @@ -4407,11 +4407,11 @@ class TransferWithCosts(TransferFunction): ` on which costs are calculated. - size : int : None + input_shapes : int : None specifies length of the array for `variable ` used by `function ` and on which costs are calculated; can be used in place of default_value, in which case zeros are assigned as the value(s). An error is generated if both are - specified but size != len(default_value). + specified but input_shapes != len(default_value). transfer_fct : TransferFunction : Linear specifies the primary function, used to generate the value it returns. @@ -4454,7 +4454,7 @@ class TransferWithCosts(TransferFunction): value used by `function `, and on which `intensity ` and associated costs are calculated. - size : int + input_shapes : int length of array for `variable `. intensity : 1 array @@ -4809,7 +4809,7 @@ class Parameters(TransferFunction.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, transfer_fct: Optional[Callable] = None, enabled_cost_functions: Optional[Union[CostFunctions, list]] = None, intensity_cost_fct: Optional[Callable] = None, @@ -4820,11 +4820,11 @@ def __init__(self, owner=None, prefs: Optional[ValidPrefSet] = None): - # if size: + # if input_shapes: # if default_variable is None: - # default_variable = np.zeros(size) - # elif size != len(default_variable): - # raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(SIZE)} ({size}) " + # default_variable = np.zeros(input_shapes) + # elif input_shapes != len(default_variable): + # raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(SIZE)} ({input_shapes}) " # f"are specified for {self.name} but are {SIZE}!=len({DEFAULT_VARIABLE}).") super().__init__( diff --git a/psyneulink/core/components/functions/userdefinedfunction.py b/psyneulink/core/components/functions/userdefinedfunction.py index 383a0380988..39f10221485 100644 --- a/psyneulink/core/components/functions/userdefinedfunction.py +++ b/psyneulink/core/components/functions/userdefinedfunction.py @@ -144,8 +144,8 @@ class UserDefinedFunction(Function_Base): array([[6]]) Note that the function treats its argument, x, as a 2d array, and accesses its first item for the calculation. - This is because the `variable ` of ``my_mech`` is defined in the **size** argument of - its constructor as having a single item (a 1d array of length 3; (see `size `). In the + This is because the `variable ` of ``my_mech`` is defined in the **input_shapes** argument of + its constructor as having a single item (a 1d array of length 3; (see `input_shapes `). In the following example, a function is defined for a Mechanism in which the variable has two items, that are summed by the function:: @@ -267,7 +267,7 @@ class UserDefinedFunction(Function_Base): >>> L = pnl.Logistic(gain = 2) >>> def my_fct(variable): ... return L(variable) + 2 - >>> my_mech = pnl.ProcessingMechanism(size = 3, function = my_fct) + >>> my_mech = pnl.ProcessingMechanism(input_shapes = 3, function = my_fct) >>> my_mech.execute(input = [1, 2, 3]) #doctest: +SKIP array([[2.88079708, 2.98201379, 2.99752738]]) @@ -280,7 +280,7 @@ class UserDefinedFunction(Function_Base): For example, the following assigns ``my_sinusoidal_fct`` to the `function ` of an OutputPort of ``my_mech``, rather the Mechanism's `function `:: - >>> my_wave_mech = pnl.ProcessingMechanism(size=1, + >>> my_wave_mech = pnl.ProcessingMechanism(input_shapes=1, ... function=pnl.Linear, ... output_ports=[{pnl.NAME: 'SINUSOIDAL OUTPUT', ... pnl.VARIABLE: [(pnl.OWNER_VALUE, 0),pnl.EXECUTION_COUNT], diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 79caf6c97d4..e261ce5eb32 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -419,7 +419,7 @@ a Mechanism's InputPorts and the items of its `variable `, their size along their outermost dimension (axis 0) must be equal; that is, the number of items in the Mechanism's `variable ` attribute must equal the number of InputPorts in its `input_ports ` attribute. A -Mechanism's constructor does its best to insure this: if its **default_variable** and/or its **size** argument is +Mechanism's constructor does its best to insure this: if its **default_variable** and/or its **input_shapes** argument is specified, it constructs a number of InputPorts (and each with a `value `) corresponding to the items specified for the Mechanism's `variable `, as in the examples below:: @@ -444,7 +444,7 @@ print(my_mech_C.variable) > [array([0, 0]) array([0])] -If both the **default_variable** (or **size**) and **input_ports** arguments are specified, then the number and format +If both the **default_variable** (or **input_shapes**) and **input_ports** arguments are specified, then the number and format of their respective items must be the same (see `Port ` for additional examples of specifying Ports). If InputPorts are added using the Mechanism's `add_ports ` method, then its @@ -478,9 +478,9 @@ ` of the corresponding InputPorts for any that are not explicitly specified in the **input_ports** argument or *INPUT_PORTS* entry (see below). .. -* **size** (int, list or ndarray) -- specifies the number and length of items in the Mechanism's variable, +* **input_shapes** (int, list or ndarray) -- specifies the number and length of items in the Mechanism's variable, if **default_variable** is not specified. For example, the following mechanisms are equivalent:: - T1 = TransferMechanism(size = [3, 2]) + T1 = TransferMechanism(input_shapes = [3, 2]) T2 = TransferMechanism(default_variable = [[0, 0, 0], [0, 0]]) The relationship to any specifications in the **input_ports** argument or *INPUT_PORTS* entry of a **params** dictionary is the same as for the **default_variable** argument, @@ -488,7 +488,7 @@ .. * **input_ports** (list) -- this can be used to explicitly `specify the InputPorts ` created for the Mechanism. Each item must be an `InputPort specification `, and the number - of items must match the number of items in the **default_variable** argument or **size** argument + of items must match the number of items in the **default_variable** argument or **input_shapes** argument if either of those is specified. If the `variable ` and/or `value ` is `explicitly specified for an InputPort ` in the **input_ports** argument or *INPUT_PORTS* entry of a **params** dictionary, it must be compatible with the value of the corresponding @@ -1148,7 +1148,7 @@ class Mechanism_Base(Mechanism): """ Mechanism_Base( \ default_variable=None, \ - size=None, \ + input_shapes=None, \ input_ports, \ function, \ output_ports, \ @@ -1222,21 +1222,21 @@ class Mechanism_Base(Mechanism): of its `function ` if those are not specified. If it is not specified, then a subclass-specific default is assigned (usually [[0]]). - size : int, or Iterable of tuples or ints : default None + input_shapes : int, or Iterable of tuples or ints : default None specifies default_variable as array(s) of zeros if **default_variable** is not passed as an argument; if **default_variable** is specified, it must be equivalent to - **size**. + **input_shapes**. For example, the following Mechanisms are equivalent:: - my_mech = ProcessingMechanism(size = [3, 2]) + my_mech = ProcessingMechanism(input_shapes = [3, 2]) my_mech = ProcessingMechanism(default_variable = [[0, 0, 0], [0, 0]]) - When specified as an iterable, each element of **size** is used + When specified as an iterable, each element of **input_shapes** is used as the size of the corresponding InputPort. input_ports : str, list, dict, or np.ndarray : default None specifies the InputPorts for the Mechanism; if it is not specified, a single InputPort is created using the value of default_variable as its `variable `; if more than one is specified, the number and, if specified, their values must be compatible with any specifications made for - **default_variable** or **size** (see `Mechanism_InputPorts` for additional details). + **default_variable** or **input_shapes** (see `Mechanism_InputPorts` for additional details). input_labels : dict specifies labels (strings) that can be used to specify numeric values as input to the Mechanism; @@ -1698,7 +1698,7 @@ def _parse_output_ports(self, output_ports): @abc.abstractmethod def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports=None, input_labels=None, function=None, @@ -1717,7 +1717,7 @@ def __init__(self, NOTES: * Since Mechanism is a subclass of Component, it calls super.__init__ - to validate size and default_variable and param_defaults; + to validate input_shapes and default_variable and param_defaults; it uses INPUT_PORT as the default_variable * registers Mechanism with MechanismRegistry @@ -1761,7 +1761,7 @@ def __init__(self, super(Mechanism_Base, self).__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, param_defaults=params, prefs=prefs, @@ -1794,9 +1794,9 @@ def _parse_arg_variable(self, variable): # Handlers # ------------------------------------------------------------------------------------------------------------------ - def _handle_default_variable(self, default_variable=None, size=None, input_ports=None, function=None, params=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None, input_ports=None, function=None, params=None): """ - Finds whether default_variable can be determined using **default_variable** and **size** + Finds whether default_variable can be determined using **default_variable** and **input_shapes** arguments. Returns @@ -1827,20 +1827,20 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports if default_variable_from_input_ports is not None: if default_variable is None: - if size is None: + if input_shapes is None: default_variable = default_variable_from_input_ports else: if input_ports_variable_was_specified: - size_variable = self._handle_size(size, None) - if iscompatible(size_variable, default_variable_from_input_ports): + input_shapes_variable = self._handle_input_shapes(input_shapes, None) + if iscompatible(input_shapes_variable, default_variable_from_input_ports): default_variable = default_variable_from_input_ports else: raise MechanismError( f'Default variable for {self.name} determined from the specified input_ports spec ' f'({default_variable_from_input_ports}) is not compatible with the default variable ' - f'determined from size parameter ({size_variable}).') + f'determined from input_shapes parameter ({input_shapes_variable}).') else: - # do not pass input_ports variable as default_variable, fall back to size specification + # do not pass input_ports variable as default_variable, fall back to input_shapes specification pass else: if input_ports_variable_was_specified: @@ -1853,7 +1853,7 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports # do not pass input_ports variable as default_variable, fall back to default_variable specification pass - return super()._handle_default_variable(default_variable=default_variable, size=size) + return super()._handle_default_variable(default_variable=default_variable, input_shapes=input_shapes) def _handle_arg_input_ports(self, input_ports): """ diff --git a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py index 4f77c962ef0..549489c5ce6 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py @@ -630,7 +630,7 @@ EID_SIMULATION, FEEDBACK, FUNCTION, GATING_SIGNAL, INIT_EXECUTE_METHOD_ONLY, INTERNAL_ONLY, NAME, \ MECHANISM, MULTIPLICATIVE, MODULATORY_SIGNALS, MONITOR_FOR_CONTROL, MONITOR_FOR_MODULATION, \ OBJECTIVE_MECHANISM, OUTCOME, OWNER_VALUE, PARAMS, PORT_TYPE, PRODUCT, PROJECTION_TYPE, PROJECTIONS, \ - REFERENCE_VALUE, SEPARATE, SIZE, VALUE + REFERENCE_VALUE, SEPARATE, INPUT_SHAPES, VALUE from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.context import Context from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet @@ -1276,7 +1276,7 @@ def _validate_output_ports(self, control): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, monitor_for_control: Optional[Union[Iterable, Mechanism, OutputPort]] = None, objective_mechanism=None, allow_probes: bool = False, @@ -1341,7 +1341,7 @@ def __init__(self, super(ControlMechanism, self).__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, modulation=modulation, params=params, name=name, @@ -1498,9 +1498,9 @@ def _instantiate_objective_mechanism(self, input_ports=None, context=None): # Get size of ObjectiveMechanism's OUTCOME OutputPort, and then append sizes of other any InputPorts passed in outcome_input_port_size = self.objective_mechanism.output_ports[OUTCOME].value.size - outcome_input_port = {SIZE:outcome_input_port_size, - NAME:OUTCOME, - PARAMS:{INTERNAL_ONLY:True}} + outcome_input_port = {INPUT_SHAPES:outcome_input_port_size, + NAME:OUTCOME, + PARAMS:{INTERNAL_ONLY:True}} other_input_port_value_sizes, _ = self._handle_arg_input_ports(other_input_ports) input_port_value_sizes = [outcome_input_port_size] + other_input_port_value_sizes input_ports = [outcome_input_port] + other_input_ports diff --git a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py index f76fbb90981..c5900868420 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py @@ -257,11 +257,11 @@ class GatingMechanism(ControlMechanism): the default value for each of the GatingMechanism's GatingSignals; its length must equal the number of items specified in the **gate** argument. - size : int, list or 1d np.array of ints + input_shapes : int, list or 1d np.array of ints specifies default_gating_allocation as an array of zeros if **default_gating_allocation** is not passed as an - argument; if **default_gating_allocation** is specified, it takes precedence over the specification of **size**. + argument; if **default_gating_allocation** is specified, it takes precedence over the specification of **input_shapes**. As an example, the following mechanisms are equivalent:: - T1 = TransferMechanism(size = [3, 2]) + T1 = TransferMechanism(input_shapes = [3, 2]) T2 = TransferMechanism(default_variable = [[0, 0, 0], [0, 0]]) monitor_for_gating : List[OutputPort or Mechanism] : default None @@ -308,7 +308,7 @@ class GatingMechanism(ControlMechanism): variable : value, list or ndarray used as the input to the GatingMechanism's `function `. Its format is determined - by the **default_gating_allocation** or **size** argument of the GatingMechanism's constructor (see above), + by the **default_gating_allocation** or **input_shapes** argument of the GatingMechanism's constructor (see above), and is the same format as its `gating_allocation ` (unless a custom `function ` has been assigned). @@ -442,7 +442,7 @@ class Parameters(ControlMechanism.Parameters): @beartype def __init__(self, default_gating_allocation=None, - size=None, + input_shapes=None, monitor_for_gating=None, function=None, default_allocation: Optional[Union[int, float, list, np.ndarray]] = None, @@ -470,7 +470,7 @@ def __init__(self, f"'default_gating_allocation'.") super().__init__(default_variable=default_gating_allocation, - size=size, + input_shapes=input_shapes, monitor_for_control=monitor_for_gating, function=function, default_allocation=default_allocation, diff --git a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py index 7512d990c32..5ad1a3cac55 100644 --- a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py @@ -1091,7 +1091,7 @@ def _parse_error_sources(self, error_sources): def __init__(self, # default_variable:Union[list, np.ndarray], default_variable=None, - size=None, + input_shapes=None, covariates_sources: Optional[Union[InputPort, list]] = None, error_sources: Optional[Union[Mechanism, list]] = None, function=None, @@ -1121,7 +1121,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, covariates_sources=covariates_sources, error_sources=error_sources, function=function, diff --git a/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py b/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py index 2c3a634c416..093c4143d4a 100644 --- a/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py @@ -194,7 +194,7 @@ class Parameters(Mechanism_Base.Parameters): @check_user_specified def __init__(self, default_variable, - size, + input_shapes, modulation, params, name, @@ -208,7 +208,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, modulation=modulation, params=params, name=name, diff --git a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py index 8176a6913f3..d6cbd10861f 100644 --- a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py @@ -180,7 +180,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports: Optional[Union[Iterable, Mechanism, OutputPort, InputPort]] = None, function=None, composition=None, @@ -197,7 +197,7 @@ def __init__(self, OUTPUT_PORTS: set() } super(CompositionInterfaceMechanism, self).__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, params=params, diff --git a/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py b/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py index 3b4255bdfe8..88e3c29769e 100644 --- a/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py +++ b/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py @@ -59,7 +59,7 @@ class Parameters(Mechanism_Base.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, params=None, name=None, prefs: Optional[ValidPrefSet] = None, @@ -69,14 +69,14 @@ def __init__(self, """Add Linear as default function, assign default name, and call super.__init__ :param default_variable: (value) - :param size: (int or list/array of ints) + :param input_shapes: (int or list/array of ints) :param params: (dict) :param name: (str) :param prefs: (PreferenceSet) """ super(DefaultProcessingMechanism_Base, self).__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, params=params, name=name, diff --git a/psyneulink/core/components/mechanisms/processing/integratormechanism.py b/psyneulink/core/components/mechanisms/processing/integratormechanism.py index e6cf9a5e20c..e87174cd023 100644 --- a/psyneulink/core/components/mechanisms/processing/integratormechanism.py +++ b/psyneulink/core/components/mechanisms/processing/integratormechanism.py @@ -44,7 +44,7 @@ >>> my_time_averaging_mechanism = pnl.IntegratorMechanism(function=pnl.AdaptiveIntegrator(rate=0.5)) The **default_variable** argument specifies the format of its input (i.e., whether it is a single scalar or an -array), as well as the value to use if none is provided when Mechanism is executed. Alternatively, the **size** +array), as well as the value to use if none is provided when Mechanism is executed. Alternatively, the **input_shapes** argument can be used to specify the length of the array, in which case it will be initialized with all zeros. .. _IntegratorMechanism_Structure: @@ -67,11 +67,11 @@ When an IntegratorMechanism is executed, it carries out the specified integration, and assigns the result to the `value ` of its `primary OutputPort `. For the default function -(`IntegratorFunction`), if the value specified for **default_variable** is a list or array, or **size** is greater +(`IntegratorFunction`), if the value specified for **default_variable** is a list or array, or **input_shapes** is greater than 1, each element of the array is independently integrated. If its `rate ` parameter is a single value, that rate is used for integrating each element. If the `rate ` parameter is a list or array, then each element is used as the rate for the corresponding element of the input (in this case, `rate -` must be the same length as the value specified for **default_variable** or **size**). +` must be the same length as the value specified for **default_variable** or **input_shapes**). Integration can be reset to the value of its `function `\\s `initializer by setting its `reset ` parameter to a non-zero value, as described below. @@ -204,7 +204,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports:Optional[Union[list, dict]]=None, function=None, reset_default=0, @@ -217,7 +217,7 @@ def __init__(self, """ super(IntegratorMechanism, self).__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, reset_default=reset_default, params=params, @@ -232,7 +232,7 @@ def __init__(self, # def _parse_function_variable(self, variable, context=None, context=None): # super()._parse_function_variable(variable, context, context) - def _handle_default_variable(self, default_variable=None, size=None, input_ports=None, function=None, params=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None, input_ports=None, function=None, params=None): """If any parameters with len>1 have been specified for the Mechanism's function, and Mechanism's default_variable has not been specified, reshape Mechanism's variable to match function's, but make sure function's has the same outer dimensionality as the Mechanism's @@ -281,7 +281,7 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports # as the reshaping of the function's variable will be taken care of in _instantiate_function return super()._handle_default_variable(default_variable=variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, params=params) diff --git a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py index 1599b540756..44abba283cf 100644 --- a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py @@ -73,7 +73,7 @@ monitor the specified OutputPort. In general, the `value ` of each specified OutputPort determines the format of the `variable ` of the InputPort that is created for it by the ObjectiveMechanism. However, this can be overridden using the ObjectiveMechanism's `default_variable ` -or `size ` attributes (see `Mechanism InputPort specification +or `input_shapes ` attributes (see `Mechanism InputPort specification `), or by specifying a Projection from the OutputPort to the InputPort (see `Input Source Specification `). If an item in the **monitor** argument specifies an InputPort for the ObjectiveMechanism, but not the OutputPort to @@ -154,7 +154,7 @@ By default, the format of the `variable ` for each InputPort is determined by the `value ` of the monitored OutputPort(s) to which it corresponds. However, if either the -**default_variable** or **size** argument is specified in an Objective Mechanism's constructor, or a `variable +**default_variable** or **input_shapes** argument is specified in an Objective Mechanism's constructor, or a `variable ` is `specified for an InputPort ` for one or more of the items in its **monitor** argument, then that is used as the format for the corresponding InputPort(s). This can be used to transform the `value ` of a monitored OutputPort into different form for the `variable @@ -567,7 +567,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): def __init__(self, monitor=None, default_variable=None, - size=None, + input_shapes=None, function=None, output_ports: Optional[Union[str, Iterable]] = None, params=None, @@ -590,7 +590,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, monitor=monitor, output_ports=output_ports, function=function, diff --git a/psyneulink/core/components/mechanisms/processing/processingmechanism.py b/psyneulink/core/components/mechanisms/processing/processingmechanism.py index c2ccbcecab4..58a3c649ffb 100644 --- a/psyneulink/core/components/mechanisms/processing/processingmechanism.py +++ b/psyneulink/core/components/mechanisms/processing/processingmechanism.py @@ -64,7 +64,7 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As with any `Mechanism`, the number of InputPorts can be specified using the **input_ports**, **default_variable** or -**size** arguments of the constructor (see `Mechanism_InputPorts`), and OutputPorts can be specified using the +**input_shapes** arguments of the constructor (see `Mechanism_InputPorts`), and OutputPorts can be specified using the **output_ports** argument (see `Mechanism_OutputPorts`). These can be used to configure processing in a variety of ways. Some common ones are described below (also see `ProcessingMechanism_Examples`). @@ -269,7 +269,7 @@ class ProcessingMechanism_Base(Mechanism_Base): @check_user_specified def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports=None, function=None, output_ports=None, @@ -282,7 +282,7 @@ def __init__(self, """Abstract class for processing mechanisms :param variable: (value) - :param size: (int or list/array of ints) + :param input_shapes: (int or list/array of ints) :param params: (dict) :param name: (str) :param prefs: (PreferenceSet) @@ -290,7 +290,7 @@ def __init__(self, """ super().__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, output_ports=output_ports, @@ -378,7 +378,7 @@ class ProcessingMechanism(ProcessingMechanism_Base): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports:Optional[Union[Iterable, Mechanism, OutputPort, InputPort]]=None, output_ports:Optional[Union[str, Iterable]]=None, function=None, @@ -387,7 +387,7 @@ def __init__(self, prefs: Optional[ValidPrefSet] = None, **kwargs): super(ProcessingMechanism, self).__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, output_ports=output_ports, diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index 61268e3d066..842e72e40c3 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -110,7 +110,7 @@ ~~~~~~~~~~~~~ By default, a TransferMechanism has a single `InputPort`; however, more than one can be specified -using the **default_variable** or **size** arguments of its constructor (see `Mechanism`). The `value +using the **default_variable** or **input_shapes** arguments of its constructor (see `Mechanism`). The `value ` of each InputPort is used as a separate item of the Mechanism's `variable `, and transformed independently by its `function `. @@ -417,12 +417,12 @@ `value ` and the `value ` of its `output_ports ` without using its `integrator_function `, as in the following example:: - # >>> my_mech = pnl.TransferMechanism(size=2) + # >>> my_mech = pnl.TransferMechanism(input_shapes=2) # >>> my_mech.execute([0.5, 1]) # array([[0.5, 1. ]]) >>> my_logistic_tm = pnl.TransferMechanism(function=pnl.Logistic, - ... size=3) + ... input_shapes=3) >>> my_logistic_tm.execute([-2.0, 0, 2.0]) array([[0.11920292, 0.5 , 0.88079708]]) @@ -431,7 +431,7 @@ the value is simply added to the result, as shown in the example below, that uses the TransferMechanism's default `function `, `Linear`:: - >>> my_linear_tm = pnl.TransferMechanism(size=3, + >>> my_linear_tm = pnl.TransferMechanism(input_shapes=3, ... noise=2.0) >>> my_linear_tm.execute([1.0, 1.0, 1.0]) array([[3., 3., 3.]]) @@ -452,7 +452,7 @@ is specified, it is applied to all elements; however, on each execution, the function is executed indpendently for each element. This is shown below using the `NormalDist` function:: - >>> my_linear_tm = pnl.TransferMechanism(size=3, + >>> my_linear_tm = pnl.TransferMechanism(input_shapes=3, ... noise=pnl.NormalDist) >>> my_linear_tm.execute([1.0, 1.0, 1.0]) array([[2.1576537 , 1.60782117, 0.75840058]]) @@ -466,7 +466,7 @@ can also be used in a list to specify **noise**, together with other functions or with numeric values; however, when used in a list, functions must be instances, as shown below:: - >>> my_linear_tm = pnl.TransferMechanism(size=3, + >>> my_linear_tm = pnl.TransferMechanism(input_shapes=3, ... noise=[pnl.NormalDist(), pnl.UniformDist(), 3.0]) >>> my_linear_tm.execute([1.0, 1.0, 1.0]) array([[-0.22503678, 1.36995517, 4. ]]) @@ -509,7 +509,7 @@ results that begin close to its `initializer ` and asymptotically approach the value of the current input, which in this example is [1.0, 1.0, 1,0] for each execution:: - >>> my_linear_tm = pnl.TransferMechanism(size=3, + >>> my_linear_tm = pnl.TransferMechanism(input_shapes=3, ... function=pnl.Linear, ... integrator_mode=True, ... initial_value=np.array([[0.1, 0.5, 0.9]]), @@ -662,7 +662,7 @@ and the scalar returned is compared to **termination_threshold** using the comparison operator specified by **termination_comparison_op**. Execution continues until this returns True, as in the following example:: - >>> my_mech = pnl.TransferMechanism(size=2, + >>> my_mech = pnl.TransferMechanism(input_shapes=2, ... integrator_mode=True, ... termination_measure=max, ... termination_threshold=0.9, @@ -687,7 +687,7 @@ ` is automatically set to *GREATER_THAN_OR_EQUAL*). For example, ``my_mech`` is configured below to execute at least twice per trial:: - >>> my_mech = pnl.TransferMechanism(size=2, + >>> my_mech = pnl.TransferMechanism(input_shapes=2, ... integrator_mode=True, ... termination_measure=TimeScale.TRIAL, ... termination_threshold=2) @@ -713,20 +713,20 @@ which feature of the stimulus should be attended) before a stimulus is presented, and then allowing that Mechanism to continue to integrate the instruction and impact stimulus processing once the stimulus is presented:: - >>> stim_input = pnl.ProcessingMechanism(size=2) - >>> stim_percept = pnl.TransferMechanism(size=2, function=pnl.Logistic) - >>> decision = pnl.TransferMechanism(name='Decision', size=2, + >>> stim_input = pnl.ProcessingMechanism(input_shapes=2) + >>> stim_percept = pnl.TransferMechanism(input_shapes=2, function=pnl.Logistic) + >>> decision = pnl.TransferMechanism(name='Decision', input_shapes=2, ... integrator_mode=True, ... execute_until_finished=False, ... termination_threshold=0.65, ... termination_measure=max, ... termination_comparison_op=pnl.GREATER_THAN) - >>> instruction_input = pnl.ProcessingMechanism(size=2, function=pnl.Linear(slope=10)) - >>> attention = pnl.LCAMechanism(name='Attention', size=2, function=pnl.Logistic, + >>> instruction_input = pnl.ProcessingMechanism(input_shapes=2, function=pnl.Linear(slope=10)) + >>> attention = pnl.LCAMechanism(name='Attention', input_shapes=2, function=pnl.Logistic, ... leak=8, competition=8, self_excitation=0, time_step_size=.1, ... termination_threshold=3, ... termination_measure = pnl.TimeScale.TRIAL) - >>> response = pnl.ProcessingMechanism(name='Response', size=2) + >>> response = pnl.ProcessingMechanism(name='Response', input_shapes=2) ... >>> comp = pnl.Composition() >>> comp.add_linear_processing_pathway([stim_input, [[1,-1],[-1,1]], stim_percept, decision, response]) #doctest: +SKIP @@ -1287,7 +1287,7 @@ def _validate_termination_comparison_op(self, termination_comparison_op): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports: Optional[Union[Iterable, Mechanism, OutputPort, InputPort]] = None, function=None, noise=None, @@ -1320,7 +1320,7 @@ def __init__(self, super(TransferMechanism, self).__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, output_ports=output_ports, initial_value=initial_value, diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index 07b90a33684..f226a1bdfdb 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -59,10 +59,10 @@ `. InputPorts can also be specified in the **input_ports** argument of a Mechanism's constructor (see `below `). -The `variable ` of an InputPort can be specified using the **variable** or **size** arguments of -its constructor. It can also be specified using the **projections** argument, if neither **variable** nor **size** is +The `variable ` of an InputPort can be specified using the **variable** or **input_shapes** arguments of +its constructor. It can also be specified using the **projections** argument, if neither **variable** nor **input_shapes** is specified. The **projections** argument is used to `specify Projections ` to the InputPort. If -neither the **variable** nor **size** arguments is specified, then the value of the `Projections(s) ` or +neither the **variable** nor **input_shapes** arguments is specified, then the value of the `Projections(s) ` or their `sender `\\s (all of which must be the same length) is used to determine the `variable ` of the InputPort. @@ -590,7 +590,7 @@ INPUT_PORT, INPUT_PORTS, INPUT_PORT_PARAMS, \ LEARNING_SIGNAL, MAPPING_PROJECTION, MATRIX, NAME, OPERATION, OUTPUT_PORT, OUTPUT_PORTS, OWNER, \ PARAMS, PROJECTIONS, REFERENCE_VALUE, \ - SENDER, SHADOW_INPUTS, SHADOW_INPUT_NAME, SIZE, PORT_TYPE, SUM, VALUE, VARIABLE, WEIGHT + SENDER, SHADOW_INPUTS, SHADOW_INPUT_NAME, INPUT_SHAPES, PORT_TYPE, SUM, VALUE, VARIABLE, WEIGHT from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -681,7 +681,7 @@ class InputPort(Port_Base): `GatingProjection(s) ` to be received by the InputPort, and that are listed in its `path_afferents ` and `mod_afferents ` attributes, respectively (see `InputPort_Compatability_and_Constraints` for additional details). If **projections** but - neither **variable** nor **size** are specified, then the `value ` of the Projection(s) + neither **variable** nor **input_shapes** are specified, then the `value ` of the Projection(s) or their `senders ` specified in **projections** argument are used to determine the InputPort's `variable `. @@ -701,7 +701,7 @@ class InputPort(Port_Base): variable : value, list or np.ndarray the template for the `value ` of each Projection that the InputPort receives, each of which must match the format (number and types of elements) of the InputPort's - `variable `. If neither the **variable** or **size** argument is specified, and + `variable `. If neither the **variable** or **input_shapes** argument is specified, and **projections** is specified, then `variable ` is assigned the `value ` of the Projection(s) or its `sender `. @@ -878,7 +878,7 @@ def __init__(self, owner=None, reference_value=None, variable=None, - size=None, + input_shapes=None, default_input=None, function=None, projections=None, @@ -892,8 +892,8 @@ def __init__(self, context=None, **kwargs): - if variable is None and size is None and projections is not None: - variable = self._assign_variable_from_projection(variable, size, projections) + if variable is None and input_shapes is None and projections is not None: + variable = self._assign_variable_from_projection(variable, input_shapes, projections) # If combine argument is specified, save it along with any user-specified function for _validate_params() if combine: @@ -922,7 +922,7 @@ def __init__(self, super(InputPort, self).__init__( owner, variable=variable, - size=size, + input_shapes=input_shapes, projections=projections, function=function, weight=weight, @@ -938,7 +938,7 @@ def __init__(self, if self.name is self.componentName or self.componentName + '-' in self.name: self._assign_default_port_Name() - def _assign_variable_from_projection(self, variable, size, projections): + def _assign_variable_from_projection(self, variable, input_shapes, projections): """Assign variable to value of Projection in projections """ from psyneulink.core.components.projections.projection import \ @@ -1153,16 +1153,16 @@ def _parse_port_specific_specs(self, owner, port_dict, port_specific_spec, conte # if MECHANISM in port_specific_spec: # if OUTPUT_PORTS in port_specific_spec - if any(spec in port_specific_spec for spec in {SIZE, COMBINE}): + if any(spec in port_specific_spec for spec in {INPUT_SHAPES, COMBINE}): - if SIZE in port_specific_spec: + if INPUT_SHAPES in port_specific_spec: if (VARIABLE in port_specific_spec or - any(key in port_dict and port_dict[key] is not None for key in {VARIABLE, SIZE})): + any(key in port_dict and port_dict[key] is not None for key in {VARIABLE, INPUT_SHAPES})): raise InputPortError(f"PROGRAM ERROR: SIZE specification found in port_specific_spec dict " f"for {self.__name__} specification of {owner.name} when SIZE or VARIABLE " f"is already present in its port_specific_spec dict or port_dict.") - port_dict.update({VARIABLE:np.zeros(port_specific_spec[SIZE])}) - del port_specific_spec[SIZE] + port_dict.update({VARIABLE:np.zeros(port_specific_spec[INPUT_SHAPES])}) + del port_specific_spec[INPUT_SHAPES] if COMBINE in port_specific_spec: fct_err = None @@ -1395,7 +1395,7 @@ def _port_spec_allows_override_variable(spec): Returns ------- True - if **spec** outlines a spec for creating an InputPort whose variable can be - overridden by a default_variable or size argument + overridden by a default_variable or input_shapes argument False - otherwise ex: specifying an InputPort with a Mechanism allows overriding diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index 6a6513a9901..fbe1e4d5c9e 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -795,7 +795,7 @@ def __init__(self, owner=None, reference_value=None, default_allocation=None, - size=None, + input_shapes=None, transfer_function=None, cost_options: Optional[Union[CostFunctions, list]] = None, intensity_cost_function:Optional[Callable] = None, @@ -857,7 +857,7 @@ def __init__(self, owner=owner, reference_value=reference_value, default_allocation=default_allocation, - size=size, + input_shapes=input_shapes, transfer_function=transfer_function, modulation=modulation, modulates=control, diff --git a/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py b/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py index 74494a578ad..2e76d8381e8 100644 --- a/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py @@ -182,9 +182,9 @@ *MULTIPLICATIVE_PARAM* of an InputPort's `function `. In the example, this is changed so that it *adds* the `value ` of the `GatingSignal` to the `value ` of each InputPort:: - >>> my_input_layer = pnl.TransferMechanism(size=3) - >>> my_hidden_layer = pnl.TransferMechanism(size=5) - >>> my_output_layer = pnl.TransferMechanism(size=2) + >>> my_input_layer = pnl.TransferMechanism(input_shapes=3) + >>> my_hidden_layer = pnl.TransferMechanism(input_shapes=5) + >>> my_output_layer = pnl.TransferMechanism(input_shapes=2) >>> my_gating_mechanism = pnl.GatingMechanism(gating_signals=[{pnl.NAME: 'GATE_ALL', ... pnl.PROJECTIONS: [my_input_layer, ... my_hidden_layer, @@ -422,7 +422,7 @@ def __init__(self, owner=None, reference_value=None, default_allocation=defaultGatingAllocation, - size=None, + input_shapes=None, transfer_function=None, modulation:Optional[str]=None, gate=None, @@ -466,7 +466,7 @@ def __init__(self, super().__init__(owner=owner, reference_value=reference_value, default_allocation=default_allocation, - size=size, + input_shapes=input_shapes, modulation=modulation, control=gate, params=params, diff --git a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py index b4d74b151eb..6429c510f8b 100644 --- a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py @@ -357,7 +357,7 @@ def __init__(self, owner=None, reference_value=None, variable=None, - size=None, + input_shapes=None, index=PRIMARY, assign=None, function=None, @@ -378,7 +378,7 @@ def __init__(self, super().__init__(owner=owner, reference_value=reference_value, variable=variable, - size=size, + input_shapes=input_shapes, modulation=modulation, index=index, assign=None, diff --git a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py index bdfafa0b7bc..de87ee2595e 100644 --- a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py @@ -563,7 +563,7 @@ class Parameters(OutputPort.Parameters): @check_user_specified def __init__(self, owner=None, - size=None, + input_shapes=None, reference_value=None, default_allocation=defaultModulatoryAllocation, function=None, @@ -601,7 +601,7 @@ def __init__(self, super().__init__(owner=owner, reference_value=reference_value, variable=default_allocation, - size=size, + input_shapes=input_shapes, projections=modulates, index=index, assign=assign, diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index fa46d5a54e6..968d3273c33 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -916,7 +916,7 @@ def __init__(self, owner=None, reference_value=None, variable=None, - size=None, + input_shapes=None, function=None, projections=None, params=None, @@ -971,7 +971,7 @@ def __init__(self, super().__init__( owner, variable=variable, - size=size, + input_shapes=input_shapes, projections=projections, params=params, name=name, diff --git a/psyneulink/core/components/ports/parameterport.py b/psyneulink/core/components/ports/parameterport.py index b46b11191b9..10994e8acc6 100644 --- a/psyneulink/core/components/ports/parameterport.py +++ b/psyneulink/core/components/ports/parameterport.py @@ -187,7 +187,7 @@ >>> import psyneulink as pnl >>> my_mechanism = pnl.RecurrentTransferMechanism( - ... size=5, + ... input_shapes=5, ... noise=pnl.ControlSignal(), ... function=pnl.Logistic( ... gain=(0.5, pnl.ControlSignal), @@ -198,7 +198,7 @@ default noise value, why are we using a ControlSignal here?? COMMENT -The first argument of the constructor for the Mechanism specifies its `size ` parameter by +The first argument of the constructor for the Mechanism specifies its `input_shapes ` parameter by directly assigning a value to it. The second specifies the `noise ` parameter by assigning a default `ControlSignal`; this will use the default value of the `noise ` attribute. The **function** argument is specified using the constructor for @@ -700,7 +700,7 @@ def __init__(self, owner, reference_value=None, variable=None, - size=None, + input_shapes=None, function=None, projections=None, params=None, @@ -726,7 +726,7 @@ def __init__(self, # Note: pass name of Mechanism (to override assignment of componentName in super.__init__) super(ParameterPort, self).__init__(owner, variable=variable, - size=size, + input_shapes=input_shapes, projections=projections, function=function, params=params, diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index a3d23e6da86..8e92734b6ba 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -1019,7 +1019,7 @@ class Parameters(Port.Parameters): def __init__(self, owner: Union[Mechanism, Projection], variable=None, - size=None, + input_shapes=None, projections=None, function=None, params=None, @@ -1039,9 +1039,9 @@ def __init__(self, - variable (value): value of the Port: must be list or tuple of numbers, or a number (in which case it will be converted to a single-item list) must match input and output of Port's _update method, and any sending or receiving projections - - size (int or array/list of ints): + - input_shapes (int or array/list of ints): Sets variable to be array(s) of zeros, if **variable** is not specified as an argument; - if **variable** is specified, it takes precedence over the specification of **size**. + if **variable** is specified, it takes precedence over the specification of **input_shapes**. - params (dict): + if absent, implements default Port determined by PROJECTION_TYPE param + if dict, can have the following entries: @@ -1100,7 +1100,7 @@ def __init__(self, # VALIDATE VARIABLE, PARAM_SPECS, AND INSTANTIATE self.function super(Port_Base, self).__init__( default_variable=variable, - size=size, + input_shapes=input_shapes, function=function, projections=projections, param_defaults=params, diff --git a/psyneulink/core/components/shellclasses.py b/psyneulink/core/components/shellclasses.py index f3865cc1dbf..b39e2de826b 100644 --- a/psyneulink/core/components/shellclasses.py +++ b/psyneulink/core/components/shellclasses.py @@ -73,14 +73,14 @@ class Mechanism(ShellClass): @check_user_specified def __init__(self, default_variable=None, - size=None, + input_shapes=None, function=None, param_defaults=None, name=None, prefs=None, **kwargs): super().__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, param_defaults=param_defaults, name=name, diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index c2216c625a4..6e6b78cb68a 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -1383,11 +1383,11 @@ >>> B = ProcessingMechanism(name='B', default_variable=[0,0,0]) >>> inner_nested_comp = Composition(nodes=[A, B]) - >>> C = ComparatorMechanism(name='C', size=3) + >>> C = ComparatorMechanism(name='C', input_shapes=3) >>> nested_comp_1 = Composition(nodes=[C, inner_nested_comp]) - >>> D = ComparatorMechanism(name='D', size=3) - >>> E = ComparatorMechanism(name='E', size=3) + >>> D = ComparatorMechanism(name='D', input_shapes=3) + >>> E = ComparatorMechanism(name='E', input_shapes=3) >>> nested_comp_2 = Composition([D, E]) >>> F = ComparatorMechanism(name='F') diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index 29177bd5398..2bb18641d9e 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -134,16 +134,16 @@ | >>> from psyneulink import * | .. figure:: _static/Composition_show_graph_basic_fig.svg | | >>> a = ProcessingMechanism( | | | name='A', | | -| ... size=3, | | +| ... input_shapes=3, | | | ... output_ports=[RESULT, MEAN] | | | ... ) | | | >>> b = ProcessingMechanism( | | | ... name='B', | | -| ... size=5 | | +| ... input_shapes=5 | | | ... ) | | | >>> c = ProcessingMechanism( | | | ... name='C', | | -| ... size=2, | | +| ... input_shapes=2, | | | ... function=Logistic(gain=pnl.CONTROL) | | | ... ) | | | >>> comp = Composition( | | diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index 4ed8c8335a9..58752b7ae8f 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -116,7 +116,7 @@ 'RESET_STATEFUL_FUNCTION_WHEN', 'RELU_FUNCTION', 'REST', 'RESULT', 'RESULT', 'ROLES', 'RL_FUNCTION', 'RUN', 'SAMPLE', 'SAVE_ALL_VALUES_AND_POLICIES', 'SCALAR', 'SCALE', 'SCHEDULER', 'SELF', 'SENDER', 'SEPARATE', 'SEPARATOR_BAR', 'SHADOW_INPUT_NAME', 'SHADOW_INPUTS', 'SIMPLE', 'SIMPLE_INTEGRATOR_FUNCTION', 'SIMULATIONS', - 'SINGLE', 'SINGLETON', 'SIZE', 'SLOPE', 'SOFT_CLAMP', 'SOFTMAX_FUNCTION', 'SOURCE', 'STABILITY_FUNCTION', + 'SINGLE', 'SINGLETON', 'INPUT_SHAPES', 'SLOPE', 'SOFT_CLAMP', 'SOFTMAX_FUNCTION', 'SOURCE', 'STABILITY_FUNCTION', 'STANDARD_ARGS', 'STANDARD_DEVIATION', 'STANDARD_OUTPUT_PORTS', 'STORE', 'SUBTRACTION', 'SUM', 'TARGET', 'TARGET_MECHANISM', 'TARGET_LABELS_DICT', 'TERMINAL', 'TARGETS', 'TERMINATION_MEASURE', 'TERMINATION_THRESHOLD', 'TERMINATION_COMPARISION_OP', 'TERSE', 'TEXT', 'THRESHOLD', @@ -916,7 +916,7 @@ class Loss(Enum): MEAN = 'MEAN' MEDIAN = 'MEDIAN' MECHANISM_VALUE = 'MECHANISM_VALUE' -SIZE = 'size' +INPUT_SHAPES = 'input_shapes' K_VALUE = 'k_value' RATIO = 'ratio' diff --git a/psyneulink/core/globals/log.py b/psyneulink/core/globals/log.py index 6d2cce9dde6..5585c4aff39 100644 --- a/psyneulink/core/globals/log.py +++ b/psyneulink/core/globals/log.py @@ -167,8 +167,8 @@ `MappingProjection` from the first to the second:: # Create a Process with two TransferMechanisms, and get a reference for the Projection created between them: - >>> my_mech_A = pnl.TransferMechanism(name='mech_A', size=2) - >>> my_mech_B = pnl.TransferMechanism(name='mech_B', size=3) + >>> my_mech_A = pnl.TransferMechanism(name='mech_A', input_shapes=2) + >>> my_mech_B = pnl.TransferMechanism(name='mech_B', input_shapes=3) >>> my_composition = pnl.Composition(pathways=[my_mech_A, my_mech_B]) >>> proj_A_to_B = my_mech_B.path_afferents[0] diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index e2204c6ecdd..1d2bfeae32d 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -148,7 +148,7 @@ def _modulatory_mechanism_costs_getter(owning_component=None, context=None): def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, context=None): try: - value = get_matrix(value, owning_component.size[0], owning_component.size[0]) + value = get_matrix(value, owning_component.input_shapes[0], owning_component.input_shapes[0]) except AttributeError: pass diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py index c7b079750f1..45f60f1c837 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py @@ -320,7 +320,7 @@ class Parameters(LearningMechanism.Parameters): @beartype def __init__(self, default_variable: Union[list, np.ndarray], - size=None, + input_shapes=None, function: Optional[Callable] = None, learning_signals: Optional[list] = None, modulation: Optional[str] = None, @@ -344,7 +344,7 @@ def __init__(self, # self._learning_rate = learning_rate super().__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, modulation=modulation, learning_rate=learning_rate, diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py index 8100535b78b..924ad0c5736 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py @@ -321,7 +321,7 @@ class Parameters(LearningMechanism.Parameters): @beartype def __init__(self, default_variable: Union[list, np.ndarray], - size=None, + input_shapes=None, matrix: Optional[ParameterPort] = None, function: Optional[Callable] = None, learning_signals: Optional[list] = None, @@ -345,7 +345,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, modulation=modulation, learning_rate=learning_rate, diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index de0f9e40642..55328030c17 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -767,7 +767,7 @@ class Parameters(ProcessingMechanism.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_format: Optional[Literal['SCALAR', 'ARRAY', 'VECTOR']] = None, function=None, input_ports=None, @@ -799,7 +799,7 @@ def __init__(self, # These are created here rather than as StandardOutputPorts # since they require input_format==ARRAY to be meaningful if input_format in {ARRAY, VECTOR}: - size=1 # size of variable for DDM Mechanism + input_shapes=1 # size of variable for DDM Mechanism input_ports = [ {NAME:'ARRAY', VARIABLE: np.array([[0.0, 0.0]]), @@ -848,7 +848,7 @@ def __init__(self, # IMPLEMENTATION NOTE: this manner of setting default_variable works but is idiosyncratic # compared to other mechanisms: see TransferMechanism.py __init__ function for a more normal example. - if default_variable is None and size is None: + if default_variable is None and input_shapes is None: try: default_variable = params[FUNCTION_PARAMS][STARTING_VALUE] if not is_numeric(default_variable): @@ -859,7 +859,7 @@ def __init__(self, pass # # Conflict with above - # self.size = size + # self.input_shapes = input_shapes # New (1/19/2021) default behaviour of DDM mechanism is to execute until finished. That # is, it should execute until it reaches its threshold. @@ -882,7 +882,7 @@ def __init__(self, params=params, name=name, prefs=prefs, - size=size, + input_shapes=input_shapes, **kwargs), self._instantiate_plotting_functions() @@ -964,7 +964,7 @@ def _validate_variable(self, variable, context=None): raise DDMError("Length of input to DDM ({}) is greater than 1, implying there are multiple " "input ports, which is currently not supported in DDM, but may be supported" " in the future under a multi-process DDM. Please use a single numeric " - "item as the default_variable, or use size = 1.".format(variable)) + "item as the default_variable, or use input_shapes = 1.".format(variable)) # # MODIFIED 6/28/17 (CW): changed len(variable) > 1 to len(variable[0]) > 1 # # if not isinstance(variable, numbers.Number) and len(variable[0]) > 1: # if not is_numeric(variable) and len(variable[0]) > 1: diff --git a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py index ab53a4aff54..83673854297 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py @@ -77,9 +77,9 @@ .. _EpisodicMemoryMechanism_Creation_Default_Variable_and_Size: - * **default_variable** or **size** -- these are specified in the standard way that the `variable + * **default_variable** or **input_shapes** -- these are specified in the standard way that the `variable ` is specified for any `Component` (see `default_variable `, - `size `, respectively); the specified value is passed to the constructor for the + `input_shapes `, respectively); the specified value is passed to the constructor for the EpisodicMemoryMechanism's `function `), which determines the shape of an entry in `memory `; the `memory ` itself remains empty until the Mechanism is executed and an item is stored. @@ -108,7 +108,7 @@ of the entry stored in `memory `, and used to retrieve one similar to it. By default, `input_port ` are named *FIELD_n_INPUT*, where "n" is replaced by the index of each field; however, they can be named explicitly by specifying a list of strings in the **input_ports** argument of -the constructor; the number of these must equal the number of fields specified in **default_variable** or **size**. +the constructor; the number of these must equal the number of fields specified in **default_variable** or **input_shapes**. .. _EpisodicMemoryMechanism_Creation_Function_Parameters: @@ -155,7 +155,7 @@ .. technical_note:: The shape of an entry in `memory ` is determined by the shape of the Mechanism's - `variable `. specified in the **default_variable** or **size** arguments of its constructor + `variable `. specified in the **default_variable** or **input_shapes** arguments of its constructor (see `EpisodicMemoryMechanism_Creation`). Each item of `variable ` corresponds to a field. Both `memory ` and all entries are stored in the EpisodicMemoryMechanism's `function ` as np.ndarrays, the dimensionality of which is determined by the shape of an @@ -176,7 +176,7 @@ ` of that function. By default InputPorts are named *FIELD_n_INPUT* (see `EpisodicMemoryMechanism_Creation`). If the Mechanism is assigned `DictionaryMemory` as its `function `, then it is assigned at least one InputPort (named *KEY_INPUT* by default), -and optionally a second (named *VALUE_INPUT*) if **default_variable** or **size** specifies two items; any additional +and optionally a second (named *VALUE_INPUT*) if **default_variable** or **input_shapes** specifies two items; any additional fields are ignored. .. _EpisodicMemoryMechanism_Function: @@ -290,16 +290,16 @@ .. _EpisodicMemoryMechanism_Examples_Size: -*Format entries using* **size** +*Format entries using* **input_shapes** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The **size** argument can also be used to format entries:: +The **input_shapes** argument can also be used to format entries:: - >>> my_em = EpisodicMemoryMechanism(size=[2,3]) + >>> my_em = EpisodicMemoryMechanism(input_shapes=[2,3]) >>> my_em.execute([[1,2],[3,4,5]]) array([array([0, 0]), array([0, 0, 0])], dtype=object) -Note that each element of **size** specifies the length of a field +Note that each element of **input_shapes** specifies the length of a field (see `EpisodicMemoryMechanism_Creation_Default_Variable_and_Size` for additional details). .. _EpisodicMemoryMechanism_Examples_Memory_Init: @@ -317,8 +317,8 @@ >>> my_em.execute([[1,2],[3,4,6]]) array([array([1., 2.]), array([3., 4., 6.])], dtype=object) -Note that there was no need to use **default_variable** or **size** to format entries here, since that is determined -by the entries in the **memory** argument. If **default_variable** or **size** is specified, its shape must be the +Note that there was no need to use **default_variable** or **input_shapes** to format entries here, since that is determined +by the entries in the **memory** argument. If **default_variable** or **input_shapes** is specified, its shape must be the same as the entries specified in **memory**. In this example, since `memory ` was initialized, the first execution returns the closest value to the input, which is used as the retrieval cue. In the second execution, the input from the first execution is returned, since it was stored after the first retrieval. The @@ -377,7 +377,7 @@ The names of `input_ports ` can be customized by specifying a list of names in the **input_ports** argument of the Mechanism's constructor:: - >>> my_em = EpisodicMemoryMechanism(size=[2,2,2], + >>> my_em = EpisodicMemoryMechanism(input_shapes=[2,2,2], ... input_ports=['KEY', 'VALUE', 'LABEL']) >>> my_em.input_ports.names ['KEY', 'VALUE', 'LABEL'] @@ -525,7 +525,7 @@ def _parse_memory(self, memory): @check_user_specified def __init__(self, default_variable:Union[int, list, np.ndarray]=None, - size:Optional[Union[int, list, np.ndarray]]=None, + input_shapes:Optional[Union[int, list, np.ndarray]]=None, memory:Optional[Union[list, np.ndarray]]=None, function:Optional[Function]=None, params=None, @@ -539,21 +539,21 @@ def __init__(self, and function.__name__ is DictionaryMemory.__name__)) if self._dictionary_memory: # Identify and warn about any deprecated args, and return their values for reassignment - deprecated_arg_values = deprecation_warning(self, kwargs, {'content_size':'size'}) + deprecated_arg_values = deprecation_warning(self, kwargs, {'content_size':'input_shapes'}) # Assign value of deprecated args to current ones - if 'size' in deprecated_arg_values: - size = deprecated_arg_values['size'] + if 'input_shapes' in deprecated_arg_values: + input_shapes = deprecated_arg_values['input_shapes'] # Need to handle assoc_size specially, since it needs to be added to what was content_size if 'assoc_size' in kwargs: - if isinstance(size, int): - size = [size,kwargs['assoc_size']] + if isinstance(input_shapes, int): + input_shapes = [input_shapes, kwargs['assoc_size']] else: - size += kwargs['assoc_size'] + input_shapes += kwargs['assoc_size'] kwargs.pop('assoc_size') super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, params=params, name=name, @@ -562,7 +562,7 @@ def __init__(self, **kwargs ) - def _handle_default_variable(self, default_variable=None, size=None, input_ports=None, function=None, params=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None, input_ports=None, function=None, params=None): """Override to initialize or validate default_variable based on _memory_init or function.memory - if memory argument for Mechanism is specified and default_variable is not, use former to specify latter; - if both are specified, validate that they are the same shape; @@ -601,7 +601,7 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports f"does not match the shape of entries ({entry_shape}) in " f"the memory of its function ({self.function.name}).") - return super()._handle_default_variable(default_variable, size, input_ports, function, params) + return super()._handle_default_variable(default_variable, input_shapes, input_ports, function, params) def _instantiate_input_ports(self, context=None): """Override to assign default names to input_ports""" diff --git a/psyneulink/library/components/mechanisms/processing/leabramechanism.py b/psyneulink/library/components/mechanisms/processing/leabramechanism.py index 84c8576d96a..142cc995377 100644 --- a/psyneulink/library/components/mechanisms/processing/leabramechanism.py +++ b/psyneulink/library/components/mechanisms/processing/leabramechanism.py @@ -73,8 +73,8 @@ LeabraMechanism. Here is an example of how to do this. In the example, T2 passes the training_data to the *LEARNING_TARGET* InputPort of L (L.input_ports[1]):: L = LeabraMechanism(input_size=input_size, output_size=output_size) - T1 = TransferMechanism(name='T1', size=input_size, function=Linear) - T2 = TransferMechanism(name='T2', size=output_size, function=Linear) + T1 = TransferMechanism(name='T1', input_shapes=input_size, function=Linear) + T2 = TransferMechanism(name='T2', input_shapes=output_size, function=Linear) p1 = Process(pathway=[T1, L]) proj = MappingProjection(sender=T2, receiver=L.input_ports[1]) p2 = Process(pathway=[T2, proj, L]) @@ -512,7 +512,7 @@ def __init__(self, ] super().__init__( - size=size, + input_shapes=size, network=network, input_size=input_size, output_size=output_size, diff --git a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py index aa2a8516e42..bc212dfa277 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py @@ -113,7 +113,7 @@ TARGET InputPorts in the **default_variable** argument of the ComparatorMechanism's constructor, as follows:: >>> import psyneulink as pnl - >>> my_action_selection_mech = pnl.TransferMechanism(size=5, + >>> my_action_selection_mech = pnl.TransferMechanism(input_shapes=5, ... function=pnl.SoftMax(output=pnl.PROB)) >>> my_reward_mech = pnl.TransferMechanism() diff --git a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py index 3f67c9777c7..c8b85868739 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py @@ -140,9 +140,9 @@ value of which is a vector of the same length as the output of sample. >>> import psyneulink as pnl - >>> sample_mech = pnl.TransferMechanism(size=5, + >>> sample_mech = pnl.TransferMechanism(input_shapes=5, ... function=pnl.Linear()) - >>> reward_mech = pnl.TransferMechanism(size=5) + >>> reward_mech = pnl.TransferMechanism(input_shapes=5) >>> prediction_error_mech = pnl.PredictionErrorMechanism(sample=sample_mech, ... target=reward_mech) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py index 460063a0d15..0da5204996d 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py @@ -64,7 +64,7 @@ ~~~~~~ The **input_size** argument of the constructor must always be specified (this is comparable to specifying the -**size** or *default_variable** arguments of other types of `Mechanism`). If it is specified on its own, +**input_shapes** or *default_variable** arguments of other types of `Mechanism`). If it is specified on its own, it determines the total number of processing units. If either the **hidden_size** and/or **target_size** arguments are specified, then those units are treated as distinct from the input units (see `ContrastiveHebbian_Execution` for details). @@ -345,7 +345,7 @@ from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.keywords import \ CONTRASTIVE_HEBBIAN_MECHANISM, COUNT, FUNCTION, HARD_CLAMP, HOLLOW_MATRIX, MAX_ABS_DIFF, NAME, \ - SIZE, SOFT_CLAMP, TARGET, VARIABLE + INPUT_SHAPES, SOFT_CLAMP, TARGET, VARIABLE from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.utilities import ValidParamSpecType, NumericCollections @@ -1112,7 +1112,7 @@ def _instantiate_input_ports(self, input_ports=None, reference_value=None, conte # Assign InputPort specification dictionaries for required InputPorts sizes = dict(INPUT=self.input_size, RECURRENT=self.recurrent_size, TARGET=self.target_size) for i, input_port in enumerate((s for s in self.input_ports if s in {INPUT, TARGET, RECURRENT})): - self.input_ports[i] = {NAME:input_port, SIZE: sizes[input_port]} + self.input_ports[i] = {NAME:input_port, INPUT_SHAPES: sizes[input_port]} super()._instantiate_input_ports(input_ports, reference_value, context) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py index b8e5d80dad6..270b00a5561 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py @@ -275,7 +275,7 @@ class Parameters(TransferMechanism.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, function=None, # selection_function=OneHot(mode=MAX_INDICATOR), # RE-INSTATE WHEN IMPLEMENT NHot function integrator_function=None, @@ -311,7 +311,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, integrator_function=integrator_function, integrator_mode=integrator_mode, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py index 11a7e7a383c..e26961ffa3f 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py @@ -346,7 +346,7 @@ class Parameters(RecurrentTransferMechanism.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, function=None, matrix=None, auto: Optional[NumericCollections] = None, @@ -378,7 +378,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, matrix=matrix, @@ -439,7 +439,7 @@ def _kwta_scale(self, current_input, context=None): int_k_value = int(k_value) # ^ this is hacky but necessary for now, since something is # incorrectly turning k_value into an array of floats - n = self.size[0] + n = self.input_shapes[0] if (k_value[0] > 0) and (k_value[0] < 1): k = int(round(k_value[0] * n)) elif (int_k_value < 0): @@ -513,7 +513,7 @@ def _validate_params(self, request_set, target_set=None, context=None): format(k_param, self)) except AttributeError: raise KWTAError("k-value parameter ({}) for {} was an unexpected type.".format(k_param, self)) - if abs(k_num) > self.size[0]: + if abs(k_num) > self.input_shapes[0]: raise KWTAError("k-value parameter ({}) for {} was larger than the total number of elements.". format(k_param, self)) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py index 48e5292a9d7..d34a39cb425 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py @@ -442,7 +442,7 @@ def _validate_integration_rate(self, integration_rate): @beartype def __init__(self, default_variable=None, - size: Optional[Union[int, list, np.ndarray]] = None, + input_shapes: Optional[Union[int, list, np.ndarray]] = None, input_ports: Optional[Union[list, dict]] = None, function=None, initial_value=None, @@ -514,7 +514,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, # matrix=matrix, auto=self_excitation, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index e65e5896b9b..6e173e83aec 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -646,7 +646,7 @@ class Parameters(TransferMechanism.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports: Optional[Union[list, dict]] = None, has_recurrent_input_port=None, combination_function: Optional[Callable] = None, @@ -688,7 +688,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, integrator_function=integrator_function, @@ -766,7 +766,7 @@ def _validate_params(self, request_set, target_set=None, context=None): if isinstance(matrix_param, AutoAssociativeProjection): err_msg = ("Number of rows in {} param for {} ({}) must be same as the size of variable for " "{} {} (whose size is {} and whose variable is {})". - format(MATRIX, self.name, rows, self.__class__.__name__, self.name, self.size, + format(MATRIX, self.name, rows, self.__class__.__name__, self.name, self.input_shapes, self.defaults.variable)) else: err_msg = ("Size of {} param for {} ({}) must be the same as its variable ({})". @@ -779,9 +779,9 @@ def _validate_params(self, request_set, target_set=None, context=None): if (auto_param is not None) and not isinstance(auto_param, (np.ndarray, list, numbers.Number)): raise RecurrentTransferError("auto parameter ({}) of {} is of incompatible type: it should be a " "number, None, or a 1D numeric array".format(auto_param, self)) - if isinstance(auto_param, (np.ndarray, list)) and safe_len(auto_param) != 1 and safe_len(auto_param) != self.size[0]: + if isinstance(auto_param, (np.ndarray, list)) and safe_len(auto_param) != 1 and safe_len(auto_param) != self.input_shapes[0]: raise RecurrentTransferError("auto parameter ({0}) for {1} is of incompatible length with the size " - "({2}) of its owner, {1}.".format(auto_param, self, self.size[0])) + "({2}) of its owner, {1}.".format(auto_param, self, self.input_shapes[0])) if HETERO in target_set: hetero_param = target_set[HETERO] @@ -790,9 +790,9 @@ def _validate_params(self, request_set, target_set=None, context=None): "number, None, or a 2D numeric array".format(hetero_param, self)) hetero_shape = np.array(hetero_param).shape if hetero_shape != (1,) and hetero_shape != (1, 1): - if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != self.size[0]): + if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != self.input_shapes[0]): raise RecurrentTransferError("hetero parameter ({0}) for {1} is of incompatible size with the size " - "({2}) of its owner, {1}.".format(hetero_param, self, self.size[0])) + "({2}) of its owner, {1}.".format(hetero_param, self, self.input_shapes[0])) if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != hetero_shape[1]): raise RecurrentTransferError("hetero parameter ({}) for {} must be square.".format(hetero_param, self)) diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index c851c32afd2..f93efa43736 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -279,8 +279,8 @@ >>> import psyneulink as pnl >>> # Set up PsyNeuLink Components - >>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3) - >>> my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2) + >>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 3) + >>> my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 2) >>> my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2), ... sender=my_mech_1, ... receiver=my_mech_2) diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index a6da921c761..809a007b186 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -962,7 +962,7 @@ from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.keywords import \ (ADAPTIVE, AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, - GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE) + GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS, RANDOM, INPUT_SHAPES, VARIABLE) from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar from psyneulink.core.globals.context import ContextFlags from psyneulink.core.llvm import ExecutionMode @@ -2040,10 +2040,11 @@ def _construct_query_input_nodes(self, field_weights)->list: f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(self.key_indices)})." - # query_input_nodes = [TransferMechanism(size=len(self.entry_template[self.key_indices[i]]), + # query_input_nodes = [TransferMechanism(input_shapes=len(self.entry_template[self.key_indices[i]]), # name=f'{self.key_names[self.key_indices[i]]} [QUERY]') # for i in range(self.num_keys)] - query_input_nodes = [TransferMechanism(size=len(self.entry_template[self.key_indices[i]]), + query_input_nodes = [TransferMechanism( + input_shapes=len(self.entry_template[self.key_indices[i]]), name=f'{self.key_names[i]} [QUERY]') for i in range(self.num_keys)] @@ -2062,7 +2063,8 @@ def _construct_value_input_nodes(self, field_weights)->list: f"PROGRAM ERROR: number of values ({self.num_values}) does not match number of " \ f"non-zero values in field_weights ({len(value_indices)})." - value_input_nodes = [TransferMechanism(size=len(self.entry_template[value_indices[i]]), + value_input_nodes = [TransferMechanism( + input_shapes=len(self.entry_template[value_indices[i]]), name= f'{self.value_names[i]} [VALUE]') for i in range(self.num_values)] @@ -2078,7 +2080,7 @@ def _construct_concatenate_keys_node(self, concatenate_keys)->ProcessingMechanis else: return ProcessingMechanism(function=Concatenate, input_ports=[{NAME: 'CONCATENATE_KEYS', - SIZE: len(self.query_input_nodes[i].output_port.value), + INPUT_SHAPES: len(self.query_input_nodes[i].output_port.value), PROJECTIONS: MappingProjection( name=f'{self.key_names[i]} to CONCATENATE', sender=self.query_input_nodes[i].output_port, @@ -2106,7 +2108,7 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k match_nodes = [ TransferMechanism( input_ports={NAME: 'CONCATENATED_INPUTS', - SIZE: memory_capacity, + INPUT_SHAPES: memory_capacity, PROJECTIONS: MappingProjection(sender=self.concatenate_keys_node, matrix=matrix, function=LinearMatrix( @@ -2119,7 +2121,7 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k match_nodes = [ TransferMechanism( input_ports= { - SIZE:memory_capacity, + INPUT_SHAPES:memory_capacity, PROJECTIONS: MappingProjection(sender=self.query_input_nodes[i].output_port, matrix = np.array( memory_template[:,i].tolist()).transpose().astype(float), @@ -2144,7 +2146,7 @@ def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain, f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(key_indices)})." - softmax_nodes = [TransferMechanism(input_ports={SIZE:memory_capacity, + softmax_nodes = [TransferMechanism(input_ports={INPUT_SHAPES:memory_capacity, PROJECTIONS: MappingProjection( sender=match_node.output_port, matrix=IDENTITY_MATRIX, @@ -2233,7 +2235,7 @@ def _construct_combined_softmax_node(self, input_source = self.weighted_softmax_nodes combined_softmax_node = ( - ProcessingMechanism(input_ports=[{SIZE:memory_capacity, + ProcessingMechanism(input_ports=[{INPUT_SHAPES:memory_capacity, # PROJECTIONS:[s for s in input_source]}], PROJECTIONS:[MappingProjection(sender=s, matrix=IDENTITY_MATRIX, @@ -2252,7 +2254,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: """Create nodes that report the value field(s) for the item(s) matched in memory. """ self.retrieved_key_nodes = \ - [TransferMechanism(input_ports={SIZE: len(self.query_input_nodes[i].variable[0]), + [TransferMechanism(input_ports={INPUT_SHAPES: len(self.query_input_nodes[i].variable[0]), PROJECTIONS: MappingProjection( sender=self.combined_softmax_node, @@ -2263,7 +2265,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: for i in range(self.num_keys)] self.retrieved_value_nodes = \ - [TransferMechanism(input_ports={SIZE: len(self.value_input_nodes[i].variable[0]), + [TransferMechanism(input_ports={INPUT_SHAPES: len(self.value_input_nodes[i].variable[0]), PROJECTIONS: MappingProjection( sender=self.combined_softmax_node, diff --git a/psyneulink/library/models/Cohen_Huston1994.py b/psyneulink/library/models/Cohen_Huston1994.py index ff455921d40..f12e08fe9e9 100644 --- a/psyneulink/library/models/Cohen_Huston1994.py +++ b/psyneulink/library/models/Cohen_Huston1994.py @@ -24,26 +24,26 @@ # Create mechanisms --------------------------------------------------------------------------------------------------- # Linear input units, colors: ('red', 'green'), words: ('RED','GREEN') colors_input_layer = pnl.TransferMechanism( - size=3, + input_shapes=3, function=pnl.Linear, name='COLORS_INPUT' ) words_input_layer = pnl.TransferMechanism( - size=3, + input_shapes=3, function=pnl.Linear, name='WORDS_INPUT' ) task_input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='TASK_INPUT' ) # Task layer, tasks: ('name the color', 'read the word') task_layer = pnl.RecurrentTransferMechanism( - size=2, + input_shapes=2, function=pnl.Logistic(), hetero=inhibition, integrator_mode=True, @@ -53,7 +53,7 @@ # Hidden layer units, colors: ('red','green') words: ('RED','GREEN') colors_hidden_layer = pnl.RecurrentTransferMechanism( - size=3, + input_shapes=3, function=pnl.Logistic(x_0=bias), integrator_mode=True, hetero=inhibition, @@ -63,7 +63,7 @@ ) words_hidden_layer = pnl.RecurrentTransferMechanism( - size=3, + input_shapes=3, function=pnl.Logistic(x_0=bias), hetero=inhibition, integrator_mode=True, @@ -73,7 +73,7 @@ ) # Response layer, responses: ('red', 'green'): RecurrentTransferMechanism for self inhibition matrix response_layer = pnl.RecurrentTransferMechanism( - size=2, + input_shapes=2, function=pnl.Logistic(), hetero=inhibition, integrator_mode=True, diff --git a/psyneulink/library/models/Cohen_Huston1994_horse_race.py b/psyneulink/library/models/Cohen_Huston1994_horse_race.py index 39365f0c08d..88d5bf528e6 100644 --- a/psyneulink/library/models/Cohen_Huston1994_horse_race.py +++ b/psyneulink/library/models/Cohen_Huston1994_horse_race.py @@ -35,20 +35,24 @@ # Create mechanisms --------------------------------------------------------------------------------------------------- # Linear input units, colors: ('red', 'green'), words: ('RED','GREEN') -colors_input_layer = pnl.TransferMechanism(size=3, +colors_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='COLORS_INPUT') -words_input_layer = pnl.TransferMechanism(size=3, +words_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='WORDS_INPUT') -task_input_layer = pnl.TransferMechanism(size=2, +task_input_layer = pnl.TransferMechanism( + input_shapes=2, function=pnl.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') -task_layer = pnl.RecurrentTransferMechanism(size=2, +task_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic(), hetero=-2, integrator_mode=True, @@ -56,7 +60,8 @@ name='TASK') # Hidden layer units, colors: ('red','green') words: ('RED','GREEN') -colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, +colors_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl .Logistic(x_0=4.0), integrator_mode=True, @@ -65,7 +70,8 @@ integration_rate=0.1, # cohen-huston text says 0.01 name='COLORS HIDDEN') -words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, +words_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), hetero=-2, integrator_mode=True, @@ -73,7 +79,8 @@ integration_rate=0.1, name='WORDS HIDDEN') # Response layer, responses: ('red', 'green'): RecurrentTransferMechanism for self inhibition matrix -response_layer = pnl.RecurrentTransferMechanism(size=2, +response_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic(), hetero=-2.0, integrator_mode=True, diff --git a/psyneulink/library/models/GilzenratModel.py b/psyneulink/library/models/GilzenratModel.py index cb8c1eda1a3..79a31a7e1b9 100644 --- a/psyneulink/library/models/GilzenratModel.py +++ b/psyneulink/library/models/GilzenratModel.py @@ -60,7 +60,7 @@ # Input Layer --- [ Target, Distractor ] input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, initial_value=np.array([[0.0, 0.0]]), name='INPUT LAYER' ) @@ -68,7 +68,7 @@ # Create Decision Layer --- [ Target, Distractor ] decision_layer = pnl.LCAMechanism( - size=2, + input_shapes=2, time_step_size=dt, leak=1.0, self_excitation=w_XiXi, @@ -84,7 +84,7 @@ # Create Response Layer --- [ Target ] response_layer = pnl.LCAMechanism( - size=1, + input_shapes=1, time_step_size=dt, leak=1.0, self_excitation=w_X3X3, diff --git a/psyneulink/library/models/Kalanthroff_PCTC_2018.py b/psyneulink/library/models/Kalanthroff_PCTC_2018.py index 3dce04f48eb..939a4f65c7e 100644 --- a/psyneulink/library/models/Kalanthroff_PCTC_2018.py +++ b/psyneulink/library/models/Kalanthroff_PCTC_2018.py @@ -28,25 +28,25 @@ # Create mechanisms --------------------------------------------------------------------------------------------------- # 4 Input layers for color, word, task & bias colors_input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='COLORS_INPUT' ) words_input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='WORDS_INPUT' ) task_input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='PROACTIVE_CONTROL' ) bias_input = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='BIAS' ) @@ -72,7 +72,7 @@ def my_conflict_function(variable): # Create color feature layer, word feature layer, task demand layer and response layer color_feature_layer = pnl.RecurrentTransferMechanism( - size=2, # Define unit size + input_shapes=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate @@ -86,7 +86,7 @@ def my_conflict_function(variable): # The word_feature_layer is set up as the color_feature_layer word_feature_layer = pnl.RecurrentTransferMechanism( - size=2, # Define unit size + input_shapes=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate @@ -101,7 +101,7 @@ def my_conflict_function(variable): # The response_layer is set up as the color_feature_layer & the word_feature_layer response_layer = pnl.RecurrentTransferMechanism( - size=2, # Define unit size + input_shapes=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate @@ -117,7 +117,7 @@ def my_conflict_function(variable): # The task_demand_layer is set up as the color_feature_layer but with a different python function on it's OutputPort # and a differnet inhibition weight on the hetero task_demand_layer = pnl.RecurrentTransferMechanism( - size=2, # Define unit size + input_shapes=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate diff --git a/psyneulink/library/models/Nieuwenhuis2005Model.py b/psyneulink/library/models/Nieuwenhuis2005Model.py index 446a02d5579..66856886a84 100644 --- a/psyneulink/library/models/Nieuwenhuis2005Model.py +++ b/psyneulink/library/models/Nieuwenhuis2005Model.py @@ -52,14 +52,14 @@ # First, we create the 3 layers of the behavioral network, i.e. INPUT LAYER, DECISION LAYER, and RESPONSE LAYER. input_layer = pnl.TransferMechanism( - size=3, # Number of units in input layer + input_shapes=3, # Number of units in input layer initial_value=[[0.0, 0.0, 0.0]], # Initial input values name='INPUT LAYER' # Define the name of the layer; this is optional, ) # but will help you to overview your model later on # Create Decision Layer --- [ Target 1, Target 2, Distractor ] decision_layer = pnl.LCAMechanism( - size=3, # Number of units in input layer + input_shapes=3, # Number of units in input layer initial_value=[[0.0, 0.0, 0.0]], # Initial input values time_step_size=dt, # Integration step size leak=1.0, # Sets off diagonals to negative values @@ -79,7 +79,7 @@ # Create Response Layer --- [ Target1, Target2 ] response_layer = pnl.LCAMechanism( - size=2, # Number of units in input layer + input_shapes=2, # Number of units in input layer initial_value=[[0.0, 0.0]], # Initial input values time_step_size=dt, # Integration step size leak=1.0, # Sets off diagonals to negative values diff --git a/tests/components/test_component.py b/tests/components/test_component.py index ffb328c6705..08237bec3e8 100644 --- a/tests/components/test_component.py +++ b/tests/components/test_component.py @@ -142,7 +142,7 @@ def __init__(self, default_variable=None, **kwargs): 'cls_', [pnl.ProcessingMechanism, pnl.TransferMechanism, pnl.IntegratorMechanism] ) @pytest.mark.parametrize( - 'size, expected_variable', + 'input_shapes, expected_variable', [ (1, [[0]]), (2, [[0, 0]]), @@ -153,8 +153,8 @@ def __init__(self, default_variable=None, **kwargs): ] ) @pytest.mark.parametrize('params_dict_entry', [NotImplemented, 'params']) - def test_size(self, cls_, params_dict_entry, size, expected_variable): - c = cls_(**nest_dictionary({'size': size}, params_dict_entry)) + def test_input_shapes(self, cls_, params_dict_entry, input_shapes, expected_variable): + c = cls_(**nest_dictionary({'input_shapes': input_shapes}, params_dict_entry)) np.testing.assert_array_equal(c.defaults.variable, expected_variable) @pytest.mark.parametrize( diff --git a/tests/composition/test_autodiffcomposition.py b/tests/composition/test_autodiffcomposition.py index 858390f6581..74033644193 100644 --- a/tests/composition/test_autodiffcomposition.py +++ b/tests/composition/test_autodiffcomposition.py @@ -619,11 +619,13 @@ def test_pytorch_equivalence_with_autodiff_composition(self, autodiff_mode): min_delt = 0.00001 learning_rate = 100 - il = TransferMechanism(size=D_i, name='input') - cl = TransferMechanism(size=D_c, name='task') - hl = TransferMechanism(size=D_h, name='hidden', + il = TransferMechanism(input_shapes=D_i, name='input') + cl = TransferMechanism(input_shapes=D_c, name='task') + hl = TransferMechanism( + input_shapes=D_h, name='hidden', function=Logistic(bias=-2)) - ol = TransferMechanism(size=D_o, name='output', + ol = TransferMechanism( + input_shapes=D_o, name='output', function=Logistic(bias=-2)) input_set = { @@ -833,11 +835,13 @@ def test_pytorch_equivalence_with_autodiff_forward_disabled_on_proj(self): min_delt = 0.00001 learning_rate = 100 - il = TransferMechanism(size=D_i, name='input') - cl = TransferMechanism(size=D_c, name='task') - hl = TransferMechanism(size=D_h, name='hidden', + il = TransferMechanism(input_shapes=D_i, name='input') + cl = TransferMechanism(input_shapes=D_c, name='task') + hl = TransferMechanism( + input_shapes=D_h, name='hidden', function=Logistic(bias=-2)) - ol = TransferMechanism(size=D_o, name='output', + ol = TransferMechanism( + input_shapes=D_o, name='output', function=Logistic(bias=-2)) input_set = { @@ -1457,7 +1461,7 @@ def test_xor_nested_no_train_then_train(self, num_epochs, learning_rate, # # input_dict = {'inputs': {xor_in: xor_inputs}, 'targets': {xor_out: xor_targets}, 'epochs': num_epochs} # xor_autodiff.run(inputs = input_dict) - # myTransfer = pnl.TransferMechanism(size = 2) + # myTransfer = pnl.TransferMechanism(input_shapes = 2) # myMappingProj = pnl.MappingProjection(sender = myTransfer, receiver = xor_autodiff) # # no_training_input_dict = {xor_in: xor_inputs} @@ -1739,15 +1743,15 @@ class TestNestedLearning: @pytest.fixture def nodes_for_testing_nested_comps(self): - input_nodes = [pnl.ProcessingMechanism(name='input_1', size=2), - pnl.ProcessingMechanism(name='input_2', size=3), - pnl.ProcessingMechanism(name='input_3', size=3)] - hidden_nodes = [pnl.ProcessingMechanism(name='hidden_1', size=3), - pnl.ProcessingMechanism(name='hidden_2', size=4), - pnl.ProcessingMechanism(name='hidden_3', size=5), - pnl.ProcessingMechanism(name='hidden_4', size=6)] - output_nodes = [pnl.ProcessingMechanism(name='output_1', size=3), - pnl.ProcessingMechanism(name='output_2', size=5)] + input_nodes = [pnl.ProcessingMechanism(name='input_1', input_shapes=2), + pnl.ProcessingMechanism(name='input_2', input_shapes=3), + pnl.ProcessingMechanism(name='input_3', input_shapes=3)] + hidden_nodes = [pnl.ProcessingMechanism(name='hidden_1', input_shapes=3), + pnl.ProcessingMechanism(name='hidden_2', input_shapes=4), + pnl.ProcessingMechanism(name='hidden_3', input_shapes=5), + pnl.ProcessingMechanism(name='hidden_4', input_shapes=6)] + output_nodes = [pnl.ProcessingMechanism(name='output_1', input_shapes=3), + pnl.ProcessingMechanism(name='output_2', input_shapes=5)] def _get_nodes(num_input_nodes, num_hidden_nodes, num_output_nodes): return (input_nodes[0:num_input_nodes], hidden_nodes[0:num_hidden_nodes], @@ -1850,7 +1854,7 @@ def test_1_input_to_1_nested_hidden_with_2_output_ports(self, nodes_for_testing_ nodes = nodes_for_testing_nested_comps(1, 1, 2) input_nodes, hidden_nodes, output_nodes = nodes inputs = {input_nodes[0]:np.array([[0, 0], [0, 1], [1, 0], [1, 1]])} - hidden_with_two_output_ports = pnl.ProcessingMechanism(size=3, output_ports=['FIRST','SECOND']) + hidden_with_two_output_ports = pnl.ProcessingMechanism(input_shapes=3, output_ports=['FIRST', 'SECOND']) nested = AutodiffComposition([hidden_nodes[0], hidden_with_two_output_ports], name='nested') pathway_a = [input_nodes[0], @@ -1954,7 +1958,7 @@ def test_2_inputs_to_2_input_ports_of_single_nested_hidden(self, nodes_for_testi nodes = nodes_for_testing_nested_comps(2, 0, 1) input_nodes, hidden_nodes, output_nodes = nodes - hidden_with_2_inputs = pnl.ProcessingMechanism(name='hidden_x', size=(3,3), function=pnl.LinearCombination) + hidden_with_2_inputs = pnl.ProcessingMechanism(name='hidden_x', input_shapes=(3, 3), function=pnl.LinearCombination) inputs = {input_nodes[0]:np.array([[0, 0], [0, 1], [1, 0], [1, 1]])} @@ -2191,7 +2195,7 @@ def test_inputs_to_multiple_input_ports_and_INPUT_nodes(self, nodes_for_testing_ # input_nodes, hidden_nodes, output_nodes = nodes # inputs = {input_nodes[0]:np.array([[0, 0], [0, 1], [1, 0], [1, 1]])} # - # hidden_2d = pnl.ProcessingMechanism(name='hidden 2d', size=(2,2)) + # hidden_2d = pnl.ProcessingMechanism(name='hidden 2d', input_shapes=(2,2)) # nested = AutodiffComposition(nodes = [hidden_nodes[0], hidden_2d], name='nested') # pathway_a = [input_nodes[0], # MappingProjection(input_nodes[0], hidden_2d), @@ -2328,14 +2332,14 @@ def get_targets_comp(idx): # input_nodes, hidden_nodes, output_nodes = nodes # inputs = {input_nodes[0]:np.array([[0, 0], [0, 1], [1, 0], [1, 1]])} # - # hidden_1 = pnl.ProcessingMechanism(name='hidden_1', size=3) + # hidden_1 = pnl.ProcessingMechanism(name='hidden_1', input_shapes=3) # nested_01 = AutodiffComposition(name='nested_01', nodes=[hidden_1], learning_rate=.01) # autodiff_01_results = execute_learning(comp_type='autodiff', # execution_mode=pnl.ExecutionMode.PyTorch, # pathways=[input_nodes[0], nested_01, output_nodes[0]], # inputs=inputs) # - # hidden_2 = pnl.ProcessingMechanism(name='hidden_2', size=3) + # hidden_2 = pnl.ProcessingMechanism(name='hidden_2', input_shapes=3) # nested_1 = AutodiffComposition(name='nested_2', nodes=[hidden_2], learning_rate=.1) # autodiff_1_results = execute_learning(comp_type='autodiff', # execution_mode=pnl.ExecutionMode.PyTorch, @@ -2346,9 +2350,9 @@ def get_targets_comp(idx): # np.testing.assert_allclose(autodiff_01_results, autodiff_1_results) def test_error_for_running_nested_learning_in_Python_mode(self): - input_mech = pnl.ProcessingMechanism(name='input_mech', size=2) - hidden_mech = pnl.ProcessingMechanism(name='hidden_mech', size=2) - output_mech = pnl.ProcessingMechanism(name='output_mech', size=2) + input_mech = pnl.ProcessingMechanism(name='input_mech', input_shapes=2) + hidden_mech = pnl.ProcessingMechanism(name='hidden_mech', input_shapes=2) + output_mech = pnl.ProcessingMechanism(name='output_mech', input_shapes=2) # Test for error on learning if nested is Composition nested = pnl.Composition(name='nested', nodes=[hidden_mech]) @@ -2381,14 +2385,14 @@ def test_error_for_running_nested_learning_in_Python_mode(self): OUTPUT_A = 'output_A' OUTPUT_B = 'output_B' def nodes_for_testing_nested_comps(sizes): - return {INPUT_A: pnl.ProcessingMechanism(name=INPUT_A, size=sizes.pop(INPUT_A, 2)), - INPUT_B: pnl.ProcessingMechanism(name=INPUT_B, size=sizes.pop(INPUT_B, 2)), - INPUT_C: pnl.ProcessingMechanism(name=INPUT_C, size=sizes.pop(INPUT_C, 2)), - HIDDEN_A: pnl.ProcessingMechanism(name=HIDDEN_A, size=sizes.pop(HIDDEN_A, 2)), - HIDDEN_B: pnl.ProcessingMechanism(name=HIDDEN_B, size=sizes.pop(HIDDEN_B, 2)), - HIDDEN_C: pnl.ProcessingMechanism(name=HIDDEN_C, size=sizes.pop(HIDDEN_C, 2)), - OUTPUT_A: pnl.ProcessingMechanism(name=OUTPUT_A, size=sizes.pop(OUTPUT_A, 2)), - OUTPUT_B: pnl.ProcessingMechanism(name=OUTPUT_B, size=sizes.pop(OUTPUT_B, 2))} + return {INPUT_A: pnl.ProcessingMechanism(name=INPUT_A, input_shapes=sizes.pop(INPUT_A, 2)), + INPUT_B: pnl.ProcessingMechanism(name=INPUT_B, input_shapes=sizes.pop(INPUT_B, 2)), + INPUT_C: pnl.ProcessingMechanism(name=INPUT_C, input_shapes=sizes.pop(INPUT_C, 2)), + HIDDEN_A: pnl.ProcessingMechanism(name=HIDDEN_A, input_shapes=sizes.pop(HIDDEN_A, 2)), + HIDDEN_B: pnl.ProcessingMechanism(name=HIDDEN_B, input_shapes=sizes.pop(HIDDEN_B, 2)), + HIDDEN_C: pnl.ProcessingMechanism(name=HIDDEN_C, input_shapes=sizes.pop(HIDDEN_C, 2)), + OUTPUT_A: pnl.ProcessingMechanism(name=OUTPUT_A, input_shapes=sizes.pop(OUTPUT_A, 2)), + OUTPUT_B: pnl.ProcessingMechanism(name=OUTPUT_B, input_shapes=sizes.pop(OUTPUT_B, 2))} @pytest.mark.pytorch @@ -3603,17 +3607,17 @@ def test_autodiff_logging(self): np.testing.assert_equal(in_np_dict_vals[0:4], xor_inputs) np.testing.assert_equal(in_np_vals, in_np_dict_vals) - assert in_np_dict_vals.shape == (expected_length, 1, xor_in.size) + assert in_np_dict_vals.shape == (expected_length, 1, xor_in.input_shapes) - assert hid_map_np_dict_mats.shape == (expected_length, xor_in.size, xor_hid.size) + assert hid_map_np_dict_mats.shape == (expected_length, xor_in.input_shapes, xor_hid.input_shapes) np.testing.assert_equal(hid_map_np_mats, hid_map_np_dict_mats) - assert hid_np_dict_vals.shape == (expected_length, 1, xor_hid.size) + assert hid_np_dict_vals.shape == (expected_length, 1, xor_hid.input_shapes) - assert out_map_np_dict_mats.shape == (expected_length, xor_hid.size, xor_out.size) + assert out_map_np_dict_mats.shape == (expected_length, xor_hid.input_shapes, xor_out.input_shapes) np.testing.assert_equal(out_map_np_mats, out_map_np_dict_mats) - assert out_np_dict_vals.shape == (expected_length, 1, xor_out.size) + assert out_np_dict_vals.shape == (expected_length, 1, xor_out.input_shapes) xor_out.log.print_entries() diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 81c13b223a2..ba9b9157f75 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -472,10 +472,10 @@ def test_add_linear_processing_pathway_with_noderole_specified_in_tuple(self): def test_add_linear_processing_pathway_containing_nodes_with_existing_projections(self): """ Test that add_linear_processing_pathway uses MappingProjections already specified for Hidden_layer_2 and Output_Layer in the pathway it creates within the Composition""" - Input_Layer = TransferMechanism(name='Input Layer', size=2) - Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', size=5) - Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', size=4) - Output_Layer = TransferMechanism(name='Output Layer', size=3) + Input_Layer = TransferMechanism(name='Input Layer', input_shapes=2) + Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', input_shapes=5) + Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', input_shapes=4) + Output_Layer = TransferMechanism(name='Output Layer', input_shapes=3) Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3) @@ -493,10 +493,10 @@ def test_add_linear_processing_pathway_containing_nodes_with_existing_projection def test_add_backpropagation_learning_pathway_containing_nodes_with_existing_projections(self): """ Test that add_backpropagation_learning_pathway uses MappingProjections already specified for Hidden_layer_2 and Output_Layer in the pathway it creates within the Composition""" - Input_Layer = TransferMechanism(name='Input Layer', size=2) - Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', size=5) - Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', size=4) - Output_Layer = TransferMechanism(name='Output Layer', size=3) + Input_Layer = TransferMechanism(name='Input Layer', input_shapes=2) + Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', input_shapes=5) + Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', input_shapes=4) + Output_Layer = TransferMechanism(name='Output Layer', input_shapes=3) Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3) @@ -3211,7 +3211,7 @@ def test_inputs_key_errors(self, input_args): def test_input_shape_errors(self): # Mechanism with single InputPort - mech = pnl.TransferMechanism(name='input', size=2) + mech = pnl.TransferMechanism(name='input', input_shapes=2) comp = pnl.Composition(mech, name='comp') with pytest.raises(CompositionError) as error_text: @@ -3231,7 +3231,7 @@ def test_input_shape_errors(self): assert "is incorrect for Mechanism with a single InputPort" in str(error_text.value) # Mechanism with two InputPorts - mech2 = pnl.TransferMechanism(name='input', size=(2,2)) + mech2 = pnl.TransferMechanism(name='input', input_shapes=(2, 2)) comp = pnl.Composition(mech2, name='comp') with pytest.raises(CompositionError) as error_text: @@ -3847,9 +3847,9 @@ def test_LPP_wrong_component(self): pytest.param(pnl.ExecutionMode.PTXExec, marks=[pytest.mark.llvm, pytest.mark.cuda]), ]) def test_execute_no_inputs(self, mode): - m_inner = ProcessingMechanism(size=2) + m_inner = ProcessingMechanism(input_shapes=2) inner_comp = Composition(pathways=[m_inner]) - m_outer = ProcessingMechanism(size=2) + m_outer = ProcessingMechanism(input_shapes=2) outer_comp = Composition(pathways=[m_outer, inner_comp]) with pytest.warns(UserWarning, match="No inputs provided in call"): @@ -3859,9 +3859,9 @@ def test_execute_no_inputs(self, mode): @pytest.mark.composition def test_run_no_inputs(self, comp_mode): - m_inner = ProcessingMechanism(size=2) + m_inner = ProcessingMechanism(input_shapes=2) inner_comp = Composition(pathways=[m_inner]) - m_outer = ProcessingMechanism(size=2) + m_outer = ProcessingMechanism(input_shapes=2) outer_comp = Composition(pathways=[m_outer, inner_comp]) with pytest.warns(UserWarning, match="No inputs provided in call"): @@ -4083,7 +4083,7 @@ def test_3_mechanisms_2_origins_1_terminal_mimo_all_sum(self, benchmark, comp_mo @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism(self, benchmark, comp_mode): comp = Composition() - A = RecurrentTransferMechanism(size=3, function=Linear(slope=5.0), name="A") + A = RecurrentTransferMechanism(input_shapes=3, function=Linear(slope=5.0), name="A") comp.add_node(A) sched = Scheduler(composition=comp) output1 = comp.run(inputs={A: [[1.0, 2.0, 3.0]]}, scheduler=sched, execution_mode=comp_mode) @@ -4098,7 +4098,8 @@ def test_run_recurrent_transfer_mechanism(self, benchmark, comp_mode): @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_hetero(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=1, + R = RecurrentTransferMechanism( + input_shapes=1, function=Logistic(), hetero=-2.0, output_ports = [RESULT]) @@ -4117,7 +4118,8 @@ def test_run_recurrent_transfer_mechanism_hetero(self, benchmark, comp_mode): @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_integrator(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=1, + R = RecurrentTransferMechanism( + input_shapes=1, function=Logistic(), hetero=-2.0, integrator_mode=True, @@ -4138,7 +4140,7 @@ def test_run_recurrent_transfer_mechanism_integrator(self, benchmark, comp_mode) @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_vector_2(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=2, function=Logistic()) + R = RecurrentTransferMechanism(input_shapes=2, function=Logistic()) comp.add_node(R) comp._analyze_graph() val = comp.run(inputs={R: [[1.0, 2.0]]}, num_trials=1, execution_mode=comp_mode) @@ -4155,7 +4157,8 @@ def test_run_recurrent_transfer_mechanism_vector_2(self, benchmark, comp_mode): @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_hetero_2(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=2, + R = RecurrentTransferMechanism( + input_shapes=2, function=Logistic(), hetero=-2.0, output_ports = [RESULT]) @@ -4174,7 +4177,8 @@ def test_run_recurrent_transfer_mechanism_hetero_2(self, benchmark, comp_mode): @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_integrator_2(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=2, + R = RecurrentTransferMechanism( + input_shapes=2, function=Logistic(), hetero=-2.0, integrator_mode=True, @@ -4287,7 +4291,7 @@ def _check_comp_ex(self, comp, comparison, comp_mode, struct_name, context=None, def test_multiple_runs_with_parameter_change(self, comp_mode): struct_name = '_param' - A = TransferMechanism(size=2) + A = TransferMechanism(input_shapes=2) comp = Composition([A]) inputs_dict = {A: [1, 1]} @@ -4332,7 +4336,7 @@ def test_multiple_runs_with_parameter_change(self, comp_mode): def test_multiple_runs_with_parameter_change_arr(self, comp_mode): struct_name = '_state' - A = TransferMechanism(size=2, integrator_mode=True) + A = TransferMechanism(input_shapes=2, integrator_mode=True) comp = Composition([A]) inputs_dict = {A: [1, 1]} @@ -4378,7 +4382,7 @@ def test_multiple_runs_with_parameter_change_from_data_struct(self, comp_mode): # non-existence of compiled structures after set struct_name = '_data' - A = TransferMechanism(size=2, integrator_mode=True) + A = TransferMechanism(input_shapes=2, integrator_mode=True) comp = Composition([A]) inputs_dict = {A: [1, 1]} @@ -6553,25 +6557,28 @@ def inputs_generator_function(): def test_get_input_format(self, form, use_labels, show_nested, num_trials, expected_format_string): """Also tests input_labels_dict""" - A = pnl.ProcessingMechanism(size=1, name='A', + A = pnl.ProcessingMechanism( + input_shapes=1, name='A', input_labels={0:{'red':0, 'green':1}, 1:{'blue':2, 'yellow':3}}) - B = pnl.ProcessingMechanism(size=2, name='B') - C = pnl.ProcessingMechanism(size=[3,3], + B = pnl.ProcessingMechanism(input_shapes=2, name='B') + C = pnl.ProcessingMechanism( + input_shapes=[3, 3], input_ports=['C INPUT 1', 'C INPUT 2'], input_labels={'C INPUT 1':{'red':[0,0,0], 'green':[1,1,1], 'orange':[2,2,2]}, 'C INPUT 2':{'blue':[3,3,3], 'yellow':[4,4,4], 'purple':[5,5,5]}}, name='C') assert C.variable.shape == (2,3) - X = ProcessingMechanism(size=[3,3], + X = ProcessingMechanism( + input_shapes=[3, 3], input_ports=['X INPUT 1', 'X INPUT 2'], name='X', # input_labels={0:{'red':[0,0,0], 'green':[1,1,1]}} # Specify dict for only one port ) # Use TransferMechanism so that 2nd OutputPort uses 2nd item of Mechanism's value # (i.e. ,without having to specify that explicitly, as would be the case for ProcessingMechanism) - Y = pnl.TransferMechanism(input_ports=[{NAME:'Y INPUT 1', pnl.SIZE: 3, pnl.FUNCTION: pnl.Reduce}, - {NAME:'Y INPUT 2', pnl.SIZE: 3}], + Y = pnl.TransferMechanism(input_ports=[{NAME:'Y INPUT 1', pnl.INPUT_SHAPES: 3, pnl.FUNCTION: pnl.Reduce}, + {NAME:'Y INPUT 2', pnl.INPUT_SHAPES: 3}], # Test specification of labels for all InputPorts of Mechanism: input_labels={'red':[0,0,0], 'green':[1,1,1]}, name='Y') @@ -7792,7 +7799,7 @@ def test_force_two_control_mechanisms_as_OUTPUT(self): assert {ctl_mech_B} == set(comp.get_nodes_by_role(NodeRole.TERMINAL)) def test_LEARNING_hebbian(self): - A = RecurrentTransferMechanism(name='A', size=2, enable_learning=True) + A = RecurrentTransferMechanism(name='A', input_shapes=2, enable_learning=True) comp = Composition(pathways=A) pathway = comp.pathways[0] assert pathway.target is None diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 04a512b15ee..a9cf60e1fed 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1205,7 +1205,7 @@ def test_ocm_state_feature_specs_and_warnings_and_errors(self, state_feature_arg ib = pnl.ProcessingMechanism(name='IB') ic = pnl.ProcessingMechanism(name='IC') oa = pnl.ProcessingMechanism(name='OA') - ob = pnl.ProcessingMechanism(name='OB', size=3) + ob = pnl.ProcessingMechanism(name='OB', input_shapes=3) oc = pnl.ProcessingMechanism(name='OC') ext = pnl.ProcessingMechanism(name='EXT') icomp = pnl.Composition(pathways=[ia,ib,ic], name='INNER COMP') @@ -1448,7 +1448,7 @@ def test_state_features_in_nested_composition_as_agent_rep(self, nested_agent_re icomp = pnl.Composition(nodes=[I1,I2], name='INNER COMP') A = pnl.ComparatorMechanism(name='A') B = pnl.ProcessingMechanism(name='B') - C = pnl.ProcessingMechanism(name='C', size=3) + C = pnl.ProcessingMechanism(name='C', input_shapes=3) D = pnl.ProcessingMechanism(name='D') mcomp = pnl.Composition(pathways=[[A,B,C], icomp], name='MIDDLE COMP') ocomp = pnl.Composition(nodes=[mcomp], name='OUTER COMP') @@ -1701,7 +1701,7 @@ def test_ocm_state_and_state_dict(self): ib = pnl.ProcessingMechanism(name='IB') ic = pnl.ProcessingMechanism(name='IC') oa = pnl.ProcessingMechanism(name='OA') - ob = pnl.ProcessingMechanism(name='OB', size=3) + ob = pnl.ProcessingMechanism(name='OB', input_shapes=3) oc = pnl.ProcessingMechanism(name='OC') icomp = pnl.Composition(pathways=[ia,ib,ic], name='INNER COMP') ocomp = pnl.Composition(pathways=[icomp], name='OUTER COMP') @@ -2093,20 +2093,20 @@ def test_two_tier_ocm(self): # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - # size=2, + # input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]') # Stimulus Layer: [Color Stimulus, Motion Stimulus] stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - # size=2, + # input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Input [S1, S2]") congruenceWeighting = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=congruentWeight, intercept=0), name='Congruence * Automatic Component') @@ -2126,14 +2126,15 @@ def test_two_tier_ocm(self): # Hadamard product of Activation and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], name='Non-Automatic Component') # Summation of nonAutomatic and Automatic Components - ddmCombination = pnl.TransferMechanism(size=1, + ddmCombination = pnl.TransferMechanism( + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.SUM), output_ports=[pnl.RESULT], @@ -2150,13 +2151,14 @@ def test_two_tier_ocm(self): name='DDM') weightingFunction = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], name='Bias') - topCorrect = pnl.TransferMechanism(size=1, + topCorrect = pnl.TransferMechanism( + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -2310,7 +2312,7 @@ def test_multilevel_control(self, comp_mode, benchmark): @pytest.mark.composition def test_recurrent_control(self, comp_mode): monitor = pnl.TransferMechanism(default_variable=[[0.0]], - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='monitor') @@ -3479,7 +3481,7 @@ def computeAccuracy(trialInformation): # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive # Origin Node taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]') @@ -3487,7 +3489,7 @@ def computeAccuracy(trialInformation): # Stimulus Layer: [Color Stimulus, Motion Stimulus] # Origin Node stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Input [S1, S2]") @@ -3507,14 +3509,15 @@ def computeAccuracy(trialInformation): # Hadamard product of Activation and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], name='Non-Automatic Component [S1*Activity1, S2*Activity2]') # Summation of nonAutomatic and Automatic Components - ddmCombination = pnl.TransferMechanism(size=1, + ddmCombination = pnl.TransferMechanism( + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.SUM), output_ports=[pnl.RESULT], diff --git a/tests/composition/test_gating.py b/tests/composition/test_gating.py index e27cda9f4b7..04750f7414e 100644 --- a/tests/composition/test_gating.py +++ b/tests/composition/test_gating.py @@ -25,7 +25,7 @@ def test_gating(benchmark, comp_mode): ) Gating_Mechanism = pnl.GatingMechanism( - size=[1], + input_shapes=[1], gating_signals=[Output_Layer.output_port] ) @@ -73,7 +73,7 @@ def test_gating(benchmark, comp_mode): # ) # # Gating_Mechanism = pnl.ControlMechanism( -# size=[1], +# input_shapes=[1], # control_signals=[Output_Layer.output_port] # ) # diff --git a/tests/composition/test_interfaces.py b/tests/composition/test_interfaces.py index b43bf535656..8cec47de867 100644 --- a/tests/composition/test_interfaces.py +++ b/tests/composition/test_interfaces.py @@ -446,7 +446,7 @@ def test_input_specification_multiple_nested_compositions(self): level_2 = Composition(name="level_2") A2 = TransferMechanism(name="A2", - size=2, + input_shapes=2, function=Linear(slope=1.)) B2 = TransferMechanism(name="B2", function=Linear(slope=2.)) diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index 77e94d70d4a..4c15aa2a699 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -245,7 +245,7 @@ def test_target_dict_spec_multi_trial_lists_bp(self): def test_dict_target_spec_converging_pathways(self): A = TransferMechanism(name="converging-learning-pathways-mech-A") B = TransferMechanism(name="converging-learning-pathways-mech-B") - C = TransferMechanism(name="converging-learning-pathways-mech-C", size=2) + C = TransferMechanism(name="converging-learning-pathways-mech-C", input_shapes=2) D = TransferMechanism(name="converging-learning-pathways-mech-D") E = TransferMechanism(name="converging-learning-pathways-mech-E") comp = Composition() @@ -265,7 +265,7 @@ def test_dict_target_spec_converging_pathways(self): def test_function_target_spec_converging_pathways(self): A = TransferMechanism(name="converging-learning-pathways-mech-A") B = TransferMechanism(name="converging-learning-pathways-mech-B") - C = TransferMechanism(name="converging-learning-pathways-mech-C", size=2) + C = TransferMechanism(name="converging-learning-pathways-mech-C", input_shapes=2) D = TransferMechanism(name="converging-learning-pathways-mech-D") E = TransferMechanism(name="converging-learning-pathways-mech-E") comp = Composition() @@ -590,7 +590,7 @@ def test_simple_hebbian(self): size = 9 Hebb2 = pnl.RecurrentTransferMechanism( - size=size, + input_shapes=size, function=pnl.Linear, enable_learning=True, hetero=0., @@ -614,7 +614,8 @@ def test_simple_hebbian(self): class TestReinforcement: def test_rl(self): - input_layer = pnl.TransferMechanism(size=2, + input_layer = pnl.TransferMechanism( + input_shapes=2, name='Input Layer') input_layer.log.set_log_conditions(items=pnl.VALUE) action_selection = pnl.DDM(input_format=pnl.ARRAY, @@ -654,7 +655,8 @@ def test_rl(self): ) def test_reinforcement_fixed_targets(self): - input_layer = pnl.TransferMechanism(size=2, + input_layer = pnl.TransferMechanism( + input_shapes=2, name='Input Layer', ) @@ -1454,7 +1456,8 @@ def test_prediction_error_delta_first_run(self): err_msg="mismatch on timestep {}".format(i)) def test_rl_enable_learning_false(self): - input_layer = pnl.TransferMechanism(size=2, + input_layer = pnl.TransferMechanism( + input_shapes=2, name='Input Layer') input_layer.log.set_log_conditions(items=pnl.VALUE) action_selection = pnl.DDM(input_format=pnl.ARRAY, @@ -1627,29 +1630,29 @@ def Concatenate(variable): return np.append(variable[0], variable[1]) stim_in = pnl.ProcessingMechanism(name='Stimulus', - size=stim_size) + input_shapes=stim_size) context_in = pnl.ProcessingMechanism(name='Context', - size=context_size) + input_shapes=context_size) reward_in = pnl.ProcessingMechanism(name='Reward', - size=1) + input_shapes=1) perceptual_state = pnl.ProcessingMechanism(name='Current Port', function=Concatenate, input_ports=[{pnl.NAME: 'STIM', - pnl.SIZE: stim_size, + pnl.INPUT_SHAPES: stim_size, pnl.PROJECTIONS: stim_in}, {pnl.NAME: 'CONTEXT', - pnl.SIZE: context_size, + pnl.INPUT_SHAPES: context_size, pnl.PROJECTIONS: context_in}]) action = pnl.ProcessingMechanism(name='Action', - size=num_actions) + input_shapes=num_actions) # Nested Composition rl_agent_state = pnl.ProcessingMechanism(name='RL Agent Port', - size=5) + input_shapes=5) rl_agent_action = pnl.ProcessingMechanism(name='RL Agent Action', - size=5) + input_shapes=5) rl_agent = pnl.Composition(name='RL Agent') rl_learning_components = rl_agent.add_reinforcement_learning_pathway([rl_agent_state, rl_agent_action]) @@ -1784,11 +1787,13 @@ def test_nested_learn_then_run(self): wco = np.random.rand(D_c, D_o) * 0.02 - 0.01 who = np.random.rand(D_h, D_o) * 0.02 - 0.01 - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, name='hidden', + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, name='output', + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) pch = pnl.MappingProjection(matrix=wch) @@ -1852,9 +1857,9 @@ def test_stranded_nested_target_mech_error(self): ) def test_no_learning_of_spanning_nested_compositions(self): - input_mech = pnl.ProcessingMechanism(name='input_mech', size=2) - hidden_mech = pnl.ProcessingMechanism(name='hidden_mech', size=2) - output_mech = pnl.ProcessingMechanism(name='output_mech', size=2) + input_mech = pnl.ProcessingMechanism(name='input_mech', input_shapes=2) + hidden_mech = pnl.ProcessingMechanism(name='hidden_mech', input_shapes=2) + output_mech = pnl.ProcessingMechanism(name='output_mech', input_shapes=2) nested = pnl.Composition(name='nested', nodes=[hidden_mech]) error_msg = ('Learning in Python mode does not currently support nested Compositions; ' 'try using an AutodiffComposition with ExecutionMode.PyTorch.') @@ -1866,10 +1871,12 @@ def test_no_learning_of_spanning_nested_compositions(self): class TestBackPropLearning: def test_matrix_spec_and_learning_rate(self): - T1 = pnl.TransferMechanism(size = 2, + T1 = pnl.TransferMechanism( + input_shapes= 2, initial_value= [[0.0,0.0]], name = 'INPUT LAYER') - T2 = pnl.TransferMechanism(size= 1, + T2 = pnl.TransferMechanism( + input_shapes= 1, function =pnl.Logistic, name = 'OUTPUT LAYER') W = np.array([[0.1],[0.2]]) @@ -1918,15 +1925,15 @@ def test_different_learning_rate_specs_for_comp(self, spec_types): def test_basic_python_back_prop(self): input_layer = pnl.TransferMechanism(name="input", - size=2, + input_shapes=2, function=pnl.Logistic()) hidden_layer = pnl.TransferMechanism(name="hidden", - size=2, + input_shapes=2, function=pnl.Logistic()) output_layer = pnl.TransferMechanism(name="output", - size=2, + input_shapes=2, function=pnl.Logistic()) comp = pnl.Composition(name="backprop-composition") @@ -1974,17 +1981,17 @@ def test_backprop_fct_with_2_inputs_to_linear_combination_product(self, test_var comp_type = test_vars[1] exec_mode = test_vars[2] input_layer1 = pnl.TransferMechanism(name="input1", - size=2, + input_shapes=2, function=pnl.Linear()) input_layer2 = pnl.TransferMechanism(name="input2", - size=2, + input_shapes=2, function=pnl.Linear()) hidden_layer = pnl.ProcessingMechanism(name="hidden", input_ports=['input1','input2'], - size=(4,4), + input_shapes=(4, 4), function=pnl.LinearCombination(operation=pnl.PRODUCT)) output_layer = pnl.TransferMechanism(name="output", - size=2, + input_shapes=2, function=pnl.Linear()) i1_h_wts = pnl.MappingProjection(name='input_to_hidden1', sender=input_layer1, @@ -2036,20 +2043,20 @@ def test_backprop_fct_with_3_inputs_to_linear_combination_product(self, test_var comp_type = test_vars[1] exec_mode = test_vars[2] input_layer1 = pnl.TransferMechanism(name="input1", - size=2, + input_shapes=2, function=pnl.Linear()) input_layer2 = pnl.TransferMechanism(name="input2", - size=2, + input_shapes=2, function=pnl.Linear()) input_layer3 = pnl.TransferMechanism(name="input3", - size=2, + input_shapes=2, function=pnl.Linear()) hidden_layer = pnl.ProcessingMechanism(name="hidden", input_ports=['input1','input2','input3'], - size=(5,5,5), + input_shapes=(5, 5, 5), function=pnl.LinearCombination(operation=pnl.PRODUCT)) output_layer = pnl.TransferMechanism(name="output", - size=2, + input_shapes=2, function=pnl.Linear()) i1_h_wts = pnl.MappingProjection(name='input_to_hidden1', sender=input_layer1, @@ -2100,9 +2107,9 @@ def test_backprop_fct_with_3_inputs_to_linear_combination_product(self, test_var def test_two_output_ports_on_OUTPUT_Node(self): - input_A = pnl.ProcessingMechanism(name='INPUT_A', size=2) - input_B = pnl.ProcessingMechanism(name='INPUT_B', size=2) - output = pnl.ProcessingMechanism(name='OUTPUT', size=(2,3)) + input_A = pnl.ProcessingMechanism(name='INPUT_A', input_shapes=2) + input_B = pnl.ProcessingMechanism(name='INPUT_B', input_shapes=2) + output = pnl.ProcessingMechanism(name='OUTPUT', input_shapes=(2, 3)) comp = Composition(name='comp') with pytest.raises(CompositionError) as error_text: @@ -2242,19 +2249,19 @@ def test_multilayer_truth(self, expected_quantities): input_layer = pnl.TransferMechanism(name='input_layer', function=pnl.Logistic, - size=2) + input_shapes=2) hidden_layer_1 = pnl.TransferMechanism(name='hidden_layer_1', function=pnl.Logistic, - size=5) + input_shapes=5) hidden_layer_2 = pnl.TransferMechanism(name='hidden_layer_2', function=pnl.Logistic, - size=4) + input_shapes=4) output_layer = pnl.TransferMechanism(name='output_layer', function=pnl.Logistic, - size=3) + input_shapes=3) input_weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) middle_weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) @@ -2734,10 +2741,10 @@ def test_stroop_model_learning(self, order): word_to_hidden_wts = np.arange(4).reshape((2, 2)) hidden_to_response_wts = np.arange(4).reshape((2, 2)) - color_comp = pnl.TransferMechanism(size=2, name='Color') - word_comp = pnl.TransferMechanism(size=2, name='Word') - hidden_comp = pnl.TransferMechanism(size=2, function=pnl.Logistic(), name='Hidden') - response_comp = pnl.TransferMechanism(size=2, function=pnl.Logistic(), name='Response') + color_comp = pnl.TransferMechanism(input_shapes=2, name='Color') + word_comp = pnl.TransferMechanism(input_shapes=2, name='Word') + hidden_comp = pnl.TransferMechanism(input_shapes=2, function=pnl.Logistic(), name='Hidden') + response_comp = pnl.TransferMechanism(input_shapes=2, function=pnl.Logistic(), name='Response') if order == 'color_full': color_pathway = [color_comp, @@ -2955,11 +2962,13 @@ def test_pytorch_equivalence_with_learning_enabled_composition(self): wco = np.random.rand(D_c, D_o) * 0.02 - 0.01 who = np.random.rand(D_h, D_o) * 0.02 - 0.01 - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, name='hidden', + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, name='output', + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) pch = pnl.MappingProjection(matrix=wch) @@ -3092,14 +3101,14 @@ class TestRumelhartSemanticNetwork: def test_rumelhart_semantic_network_sequential(self): - rep_in = pnl.TransferMechanism(size=10, name='REP_IN') - rel_in = pnl.TransferMechanism(size=11, name='REL_IN') - rep_hidden = pnl.TransferMechanism(size=4, function=pnl.Logistic, name='REP_HIDDEN') - rel_hidden = pnl.TransferMechanism(size=5, function=pnl.Logistic, name='REL_HIDDEN') - rep_out = pnl.TransferMechanism(size=10, function=pnl.Logistic, name='REP_OUT') - prop_out = pnl.TransferMechanism(size=12, function=pnl.Logistic, name='PROP_OUT') - qual_out = pnl.TransferMechanism(size=13, function=pnl.Logistic, name='QUAL_OUT') - act_out = pnl.TransferMechanism(size=14, function=pnl.Logistic, name='ACT_OUT') + rep_in = pnl.TransferMechanism(input_shapes=10, name='REP_IN') + rel_in = pnl.TransferMechanism(input_shapes=11, name='REL_IN') + rep_hidden = pnl.TransferMechanism(input_shapes=4, function=pnl.Logistic, name='REP_HIDDEN') + rel_hidden = pnl.TransferMechanism(input_shapes=5, function=pnl.Logistic, name='REL_HIDDEN') + rep_out = pnl.TransferMechanism(input_shapes=10, function=pnl.Logistic, name='REP_OUT') + prop_out = pnl.TransferMechanism(input_shapes=12, function=pnl.Logistic, name='PROP_OUT') + qual_out = pnl.TransferMechanism(input_shapes=13, function=pnl.Logistic, name='QUAL_OUT') + act_out = pnl.TransferMechanism(input_shapes=14, function=pnl.Logistic, name='ACT_OUT') comp = pnl.Composition() diff --git a/tests/composition/test_models.py b/tests/composition/test_models.py index 10169b1f2c7..5bbff13c2a1 100644 --- a/tests/composition/test_models.py +++ b/tests/composition/test_models.py @@ -64,16 +64,19 @@ def test_bustamante_Stroop_model(self): # INPUT UNITS # colors: ('red', 'green'), words: ('RED','GREEN') - colors_input_layer = pnl.TransferMechanism(size=2, + colors_input_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='COLORS_INPUT') - words_input_layer = pnl.TransferMechanism(size=2, + words_input_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='WORDS_INPUT') # Task layer, tasks: ('name the color', 'read the word') - task_layer = pnl.TransferMechanism(size=2, + task_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='TASK') @@ -84,7 +87,8 @@ def test_bustamante_Stroop_model(self): # randomly distributed noise to the net input # time averaging = integration_rate = 0.1 unit_noise = 0.005 - colors_hidden_layer = pnl.TransferMechanism(size=2, + colors_hidden_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions .Logistic(gain=1.0, x_0=4.0), # should be able to get same result with offset = -4.0 @@ -94,7 +98,8 @@ def test_bustamante_Stroop_model(self): integration_rate=0.1, name='COLORS HIDDEN') # words_hidden: ('RED','GREEN') - words_hidden_layer = pnl.TransferMechanism(size=2, + words_hidden_layer = pnl.TransferMechanism( + input_shapes=2, function=pnl.Logistic(gain=1.0, x_0=4.0), integrator_mode=True, noise=pnl.NormalDist(mean=0, @@ -107,7 +112,8 @@ def test_bustamante_Stroop_model(self): # Response layer, provide input to accumulator, responses: ('red', 'green') # time averaging = tau = 0.1 # randomly distributed noise to the net input - response_layer = pnl.TransferMechanism(size=2, + response_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic, name='RESPONSE', integrator_mode=True, @@ -305,26 +311,26 @@ def switch_trial_type(): # def test_botvinick_model(self): # - # colors_input_layer = pnl.TransferMechanism(size=3, + # colors_input_layer = pnl.TransferMechanism(input_shapes=3, # function=pnl.Linear, # name='COLORS_INPUT') # - # words_input_layer = pnl.TransferMechanism(size=3, + # words_input_layer = pnl.TransferMechanism(input_shapes=3, # function=pnl.Linear, # name='WORDS_INPUT') # - # task_input_layer = pnl.TransferMechanism(size=2, + # task_input_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Linear, # name='TASK_INPUT') # - # task_layer = pnl.RecurrentTransferMechanism(size=2, + # task_layer = pnl.RecurrentTransferMechanism(input_shapes=2, # function=pnl.Logistic(), # hetero=-2, # integrator_mode=True, # integration_rate=0.01, # name='TASK_LAYER') # - # colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, + # colors_hidden_layer = pnl.RecurrentTransferMechanism(input_shapes=3, # function=pnl.Logistic(bias=4.0), # # bias 4.0 is -4.0 in the paper see Docs for description # integrator_mode=True, @@ -332,7 +338,7 @@ def switch_trial_type(): # integration_rate=0.01, # cohen-huston text says 0.01 # name='COLORS_HIDDEN') # - # words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, + # words_hidden_layer = pnl.RecurrentTransferMechanism(input_shapes=3, # function=pnl.Logistic(bias=4.0), # integrator_mode=True, # hetero=-2, @@ -340,7 +346,7 @@ def switch_trial_type(): # name='WORDS_HIDDEN') # # # Response layer, responses: ('red', 'green') - # response_layer = pnl.RecurrentTransferMechanism(size=2, + # response_layer = pnl.RecurrentTransferMechanism(input_shapes=2, # function=pnl.Logistic(), # hetero=-2.0, # integrator_mode=True, diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 12c39a64a3a..ce2c28d9e6d 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -13,10 +13,10 @@ ) -input_node_1 = pnl.ProcessingMechanism(size=1) -input_node_2 = pnl.ProcessingMechanism(size=2) -input_node_3 = pnl.ProcessingMechanism(size=3) -output_node = pnl.ProcessingMechanism(size=2) +input_node_1 = pnl.ProcessingMechanism(input_shapes=1) +input_node_2 = pnl.ProcessingMechanism(input_shapes=2) +input_node_3 = pnl.ProcessingMechanism(input_shapes=3) +output_node = pnl.ProcessingMechanism(input_shapes=2) model = pnl.Composition( [{input_node_1, input_node_2, input_node_3}, output_node], name="model" ) diff --git a/tests/composition/test_show_graph.py b/tests/composition/test_show_graph.py index 296f5964eed..7f9ecf2b208 100644 --- a/tests/composition/test_show_graph.py +++ b/tests/composition/test_show_graph.py @@ -74,13 +74,13 @@ class TestNested: def test_multiple_projections_to_node_of_nested_composition(self): '''This is based on the nback script''' - stim = TransferMechanism(name='STIM', size=5) - context = TransferMechanism(name='CONTEXT', size=5) + stim = TransferMechanism(name='STIM', input_shapes=5) + context = TransferMechanism(name='CONTEXT', input_shapes=5) # Nested comp (ffn) - stim_input_layer = TransferMechanism(name='STIM INPUT LAYER', size=5) - context_input_layer = TransferMechanism(name='CONTEXT INPUT LAYER', size=5) - match_output_layer = TransferMechanism(name='MATCH LAYER', size=1) + stim_input_layer = TransferMechanism(name='STIM INPUT LAYER', input_shapes=5) + context_input_layer = TransferMechanism(name='CONTEXT INPUT LAYER', input_shapes=5) + match_output_layer = TransferMechanism(name='MATCH LAYER', input_shapes=1) ffn = Composition(name='FFN', pathways=[[stim_input_layer, match_output_layer], [context_input_layer, match_output_layer]]) diff --git a/tests/control/test_gilzenrat.py b/tests/control/test_gilzenrat.py index 1f2c546a21e..d8d79f0af7a 100644 --- a/tests/control/test_gilzenrat.py +++ b/tests/control/test_gilzenrat.py @@ -185,7 +185,7 @@ def test_fitzHughNagumo_gilzenrat_figure_2(self): # input_weights = np.array([[1, .33], [.33, 1]]) # # # Implement self-excitatory (auto) and mutually inhibitory (hetero) connections within the decision layer -# decision_layer = GilzenratTransferMechanism(size=2, +# decision_layer = GilzenratTransferMechanism(input_shapes=2, # initial_value=np.array([[1, 0]]), # matrix=np.array([[1, 0], [0, -1]]), # # auto=1.0, @@ -201,7 +201,7 @@ def test_fitzHughNagumo_gilzenrat_figure_2(self): # # # Implement response layer with a single, self-excitatory connection # # To do Markus: specify recurrent self-connrection weight for response unit to 2.00 -# response = GilzenratTransferMechanism(size=1, +# response = GilzenratTransferMechanism(input_shapes=1, # initial_value=np.array([[2.0]]), # matrix=np.array([[0.5]]), # function=Logistic(bias=2), diff --git a/tests/functions/test_accumulator_integrator.py b/tests/functions/test_accumulator_integrator.py index f4fca045d25..372c5a4be81 100644 --- a/tests/functions/test_accumulator_integrator.py +++ b/tests/functions/test_accumulator_integrator.py @@ -179,8 +179,8 @@ def test_accumulator_as_function_of_matrix_param_of_mapping_projection(self): # Test that accumulator is function of parameter_port of mapping project, # and that its increment param works properly (used as modulatory param by LearningProjetion) - T1 = TransferMechanism(size=3) - T2 = TransferMechanism(size=3) + T1 = TransferMechanism(input_shapes=3) + T2 = TransferMechanism(input_shapes=3) M = MappingProjection(sender=T1, receiver=T2) C = Composition() C.add_linear_processing_pathway([T1, M, T2]) diff --git a/tests/functions/test_combination.py b/tests/functions/test_combination.py index cf83c64580e..f2b52826ee0 100644 --- a/tests/functions/test_combination.py +++ b/tests/functions/test_combination.py @@ -281,7 +281,7 @@ def test_linear_combination_function(variable, operation, exponents, weights, sc @pytest.mark.parametrize("offset", [None, 1.5, [1,2.5,0,0]], ids=["O_NONE", "O_SCALAR", "O_VECTOR"]) def test_linear_combination_function_in_mechanism(operation, input, input_ports, scale, offset, benchmark, mech_mode): f = pnl.LinearCombination(default_variable=input, operation=operation, scale=scale, offset=offset) - p = pnl.ProcessingMechanism(size=[len(input[0])] * len(input), function=f, input_ports=input_ports) + p = pnl.ProcessingMechanism(input_shapes=[len(input[0])] * len(input), function=f, input_ports=input_ports) EX = pytest.helpers.get_mech_execution(p, mech_mode) diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index 8654833f94b..bcf979b33d6 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -300,10 +300,10 @@ def test_DictionaryMemory_with_initializer_and_key_size_diff_from_val_size(self) # def test_DictionaryMemory_without_initializer_in_composition(): # - # content = TransferMechanism(size=5) - # assoc = TransferMechanism(size=3) - # content_out = TransferMechanism(size=5) - # assoc_out = TransferMechanism(size=3) + # content = TransferMechanism(input_shapes=5) + # assoc = TransferMechanism(input_shapes=3) + # content_out = TransferMechanism(input_shapes=5) + # assoc_out = TransferMechanism(input_shapes=3) # # # Episodic Memory, Decision and Control # em = EpisodicMemoryMechanism(name='EM', @@ -1490,7 +1490,7 @@ def test_ContentAddressableMemory_unique_functions(self, param_name): # with pytest.raises(FunctionError) as error_text: # f = ContentAddressableMemory(initializer=[[[[1,0],[1,0],[1,0]], [[1,0],[1,0],[1,0]], [[1,0],[1,0],[1,0]]], # [[[0,1],[0,1],[0,1]], [[0,1],[0,0],[1,0]], [[0,1],[0,1],[0,1]]]]) - # em = EpisodicMemoryMechanism(size = [1,1,1], function=f) + # em = EpisodicMemoryMechanism(input_shapes = [1,1,1], function=f) # em.execute([[[0,1],[0,1],[0,1]], [[0,1],[0,0],[1,0]], [[0,1],[0,1],[0,1]]]) # assert 'Attempt to store and/or retrieve an entry in ContentAddressableMemory that has more than 2 dimensions (' \ # '3); try flattening innermost ones.' in str(error_text.value) @@ -1498,7 +1498,7 @@ def test_ContentAddressableMemory_unique_functions(self, param_name): # # Initializer with >2d ragged array # with pytest.raises(FunctionError) as error_text: # f = ContentAddressableMemory(initializer=[ [[1,2,3], [4]], [[1,2,3], [[1],[4]]] ]) - # em = EpisodicMemoryMechanism(size = [1,1,1], function=f) + # em = EpisodicMemoryMechanism(input_shapes = [1,1,1], function=f) # em.execute([[[0,1],[0,1],[0,1]], [[0,1],[0,0],[1,0]], [[0,1],[0,1],[0,1]]]) # assert 'Attempt to store and/or retrieve an entry in ContentAddressableMemory that has more than 2 dimensions (' \ # '3); try flattening innermost ones.' in str(error_text.value) diff --git a/tests/functions/test_user_defined_func.py b/tests/functions/test_user_defined_func.py index ac1b4d1fa74..a57fe70afa5 100644 --- a/tests/functions/test_user_defined_func.py +++ b/tests/functions/test_user_defined_func.py @@ -541,7 +541,7 @@ def test_udf_in_mechanism(mech_mode, benchmark): def myFunction(variable, param1, param2): return sum(variable[0]) + 2 - myMech = ProcessingMechanism(function=myFunction, size=4, name='myMech') + myMech = ProcessingMechanism(function=myFunction, input_shapes=4, name='myMech') # assert 'param1' in myMech.parameter_ports.names # <- FIX reinstate when problem with function params is fixed # assert 'param2' in myMech.parameter_ports.names # <- FIX reinstate when problem with function params is fixed e = pytest.helpers.get_mech_execution(myMech, mech_mode) @@ -610,8 +610,8 @@ def test_udf_composition_origin(comp_mode, benchmark): def myFunction(variable, context): return [variable[0][1], variable[0][0]] - myMech = ProcessingMechanism(function=myFunction, size=3, name='myMech') - T = TransferMechanism(size=2, function=Linear) + myMech = ProcessingMechanism(function=myFunction, input_shapes=3, name='myMech') + T = TransferMechanism(input_shapes=2, function=Linear) c = Composition(pathways=[myMech, T]) benchmark(c.run, inputs={myMech: [[1, 3, 5]]}, execution_mode=comp_mode) np.testing.assert_allclose(c.results[0][0], [3, 1]) @@ -623,8 +623,8 @@ def test_udf_composition_terminal(comp_mode, benchmark): def myFunction(variable, context): return [variable[0][2], variable[0][0]] - myMech = ProcessingMechanism(function=myFunction, size=3, name='myMech') - T2 = TransferMechanism(size=3, function=Linear) + myMech = ProcessingMechanism(function=myFunction, input_shapes=3, name='myMech') + T2 = TransferMechanism(input_shapes=3, function=Linear) c2 = Composition(pathways=[[T2, myMech]]) benchmark(c2.run, inputs={T2: [[1, 2, 3]]}, execution_mode=comp_mode) np.testing.assert_allclose(c2.results[0][0], [3, 1]) @@ -637,7 +637,7 @@ def myFunction(variable, context): return L(variable) + 2 U = UserDefinedFunction(custom_function=myFunction, default_variable=[[0, 0, 0]]) - myMech = ProcessingMechanism(function=myFunction, size=3, name='myMech') + myMech = ProcessingMechanism(function=myFunction, input_shapes=3, name='myMech') val1 = myMech.execute(input=[1, 2, 3]) val2 = U.execute(variable=[[1, 2, 3]]) np.testing.assert_allclose(val1, val2) diff --git a/tests/log/test_log.py b/tests/log/test_log.py index f691a60ccbc..30375fe9dfe 100644 --- a/tests/log/test_log.py +++ b/tests/log/test_log.py @@ -11,8 +11,8 @@ class TestLog: def test_log(self): - T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2) - T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2) + T_1 = pnl.TransferMechanism(name='log_test_T_1', input_shapes=2) + T_2 = pnl.TransferMechanism(name='log_test_T_2', input_shapes=2) PS = pnl.Composition(name='log_test_PS', pathways=[T_1, T_2]) PJ = T_2.path_afferents[0] @@ -262,9 +262,9 @@ def test_log_initialization(self): def test_log_dictionary_without_time(self): T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', - size=2) + input_shapes=2) PS = pnl.Composition(name='log_test_PS', pathways=[T1, T2]) PJ = T2.path_afferents[0] @@ -495,9 +495,9 @@ def test_log_dictionary_without_time(self): def test_run_resets(self): import psyneulink as pnl T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', - size=2) + input_shapes=2) COMP = pnl.Composition(name='COMP', pathways=[T1, T2]) T1.set_log_conditions('mod_slope') T2.set_log_conditions('value') @@ -519,10 +519,10 @@ def test_run_resets(self): def test_log_dictionary_with_time(self): T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear(slope=2.0), - size=2) + input_shapes=2) COMP = pnl.Composition(name='log_test_COMP', pathways=[T1, T2]) assert T1.loggable_items == { @@ -958,14 +958,14 @@ def test_log_csv_multiple_contexts(self): ) def test_log_multi_calls_single_timestep(self, scheduler_conditions, multi_run): lca = pnl.LCAMechanism( - size=2, + input_shapes=2, leak=0.5, threshold=0.515, reset_stateful_function_when=pnl.AtTrialStart() ) lca.set_log_conditions(pnl.VALUE) m0 = pnl.ProcessingMechanism( - size=2 + input_shapes=2 ) comp = pnl.Composition() comp.add_linear_processing_pathway([m0, lca]) @@ -1000,8 +1000,8 @@ class TestClearLog: def test_clear_log(self): # Create Composition - T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2) - T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2) + T_1 = pnl.TransferMechanism(name='log_test_T_1', input_shapes=2) + T_2 = pnl.TransferMechanism(name='log_test_T_2', input_shapes=2) COMP = pnl.Composition(name="log_test_COMP", pathways=[T_1, T_2]) PJ = T_2.path_afferents[0] @@ -1210,19 +1210,19 @@ def test_multilayer(self): input_layer = pnl.TransferMechanism(name='input_layer', function=pnl.Logistic, - size=2) + input_shapes=2) hidden_layer_1 = pnl.TransferMechanism(name='hidden_layer_1', function=pnl.Logistic, - size=5) + input_shapes=5) hidden_layer_2 = pnl.TransferMechanism(name='hidden_layer_2', function=pnl.Logistic, - size=4) + input_shapes=4) output_layer = pnl.TransferMechanism(name='output_layer', function=pnl.Logistic, - size=3) + input_shapes=3) input_weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) middle_weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) diff --git a/tests/log/test_rpc.py b/tests/log/test_rpc.py index b90b27f8d2c..51c7b03c5ea 100644 --- a/tests/log/test_rpc.py +++ b/tests/log/test_rpc.py @@ -9,8 +9,8 @@ class TestRPC: def test_transfer_mech(self): - T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2) - T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2) + T_1 = pnl.TransferMechanism(name='log_test_T_1', input_shapes=2) + T_2 = pnl.TransferMechanism(name='log_test_T_2', input_shapes=2) PS = pnl.Composition(name='log_test_PS', pathways=[T_1, T_2]) con_with_rpc_pipeline = pnl.Context(rpc_pipeline=Queue(), execution_id=PS) @@ -77,9 +77,9 @@ def test_delivery_initialization(self): def test_run_resets(self): T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', - size=2) + input_shapes=2) COMP = pnl.Composition(name='COMP', pathways=[T1, T2]) con_with_rpc_pipeline = pnl.Context(rpc_pipeline=Queue(), execution_id=COMP) pipeline = con_with_rpc_pipeline.rpc_pipeline @@ -111,10 +111,10 @@ def test_run_resets(self): def test_log_dictionary_with_time(self): T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', function=pnl.Linear(slope=2.0), - size=2) + input_shapes=2) COMP = pnl.Composition(name='log_test_COMP', pathways=[T1, T2]) con_with_rpc_pipeline = pnl.Context(rpc_pipeline=Queue(), execution_id=COMP) pipeline = con_with_rpc_pipeline.rpc_pipeline @@ -390,14 +390,14 @@ def test_log_csv_multiple_contexts(self): ) def test_log_multi_calls_single_timestep(self, scheduler_conditions, multi_run): lca = pnl.LCAMechanism( - size=2, + input_shapes=2, leak=0.5, threshold=0.515, reset_stateful_function_when=pnl.AtTrialStart() ) lca.set_delivery_conditions(pnl.VALUE) m0 = pnl.ProcessingMechanism( - size=2 + input_shapes=2 ) comp = pnl.Composition() comp.add_linear_processing_pathway([m0, lca]) @@ -438,19 +438,19 @@ class TestFullModels: def test_multilayer(self): input_layer = pnl.TransferMechanism(name='input_layer', function=pnl.Logistic, - size=2) + input_shapes=2) hidden_layer_1 = pnl.TransferMechanism(name='hidden_layer_1', function=pnl.Logistic, - size=5) + input_shapes=5) hidden_layer_2 = pnl.TransferMechanism(name='hidden_layer_2', function=pnl.Logistic, - size=4) + input_shapes=4) output_layer = pnl.TransferMechanism(name='output_layer', function=pnl.Logistic, - size=3) + input_shapes=3) input_weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) middle_weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) diff --git a/tests/mdf/model_varied_matrix_sizes.py b/tests/mdf/model_varied_matrix_sizes.py index 900f0b570f1..cea792773b9 100644 --- a/tests/mdf/model_varied_matrix_sizes.py +++ b/tests/mdf/model_varied_matrix_sizes.py @@ -1,10 +1,10 @@ import psyneulink as pnl comp = pnl.Composition(name='comp') -A = pnl.TransferMechanism(name='A', size=2) -B = pnl.TransferMechanism(name='B', size=3) -C = pnl.TransferMechanism(name='C', size=4) -D = pnl.TransferMechanism(name='D', size=5) +A = pnl.TransferMechanism(name='A', input_shapes=2) +B = pnl.TransferMechanism(name='B', input_shapes=3) +C = pnl.TransferMechanism(name='C', input_shapes=4) +D = pnl.TransferMechanism(name='D', input_shapes=5) for n in [A, B, C, D]: comp.add_node(n) diff --git a/tests/mdf/stroop_conflict_monitoring.py b/tests/mdf/stroop_conflict_monitoring.py index d46b18a70b8..6bc9b19bfe2 100644 --- a/tests/mdf/stroop_conflict_monitoring.py +++ b/tests/mdf/stroop_conflict_monitoring.py @@ -5,14 +5,14 @@ # Construct the color naming pathway: color_input = pnl.ProcessingMechanism( - name="color_input", size=2 + name="color_input", input_shapes=2 ) # Note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) color_hidden = pnl.ProcessingMechanism( - name="color_hidden", size=2, function=pnl.Logistic(bias=-4) + name="color_hidden", input_shapes=2, function=pnl.Logistic(bias=-4) ) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) -output = pnl.ProcessingMechanism(name="OUTPUT", size=2, function=pnl.Logistic) +output = pnl.ProcessingMechanism(name="OUTPUT", input_shapes=2, function=pnl.Logistic) color_pathway = [ color_input, color_input_to_hidden_wts, @@ -22,10 +22,10 @@ ] # Construct the word reading pathway (using the same output_layer) -word_input = pnl.ProcessingMechanism(name="word_input", size=2) +word_input = pnl.ProcessingMechanism(name="word_input", input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) word_hidden = pnl.ProcessingMechanism( - name="word_hidden", size=2, function=pnl.Logistic(bias=-4) + name="word_hidden", input_shapes=2, function=pnl.Logistic(bias=-4) ) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [ @@ -37,8 +37,8 @@ ] # Construct the task specification pathways -task_input = pnl.ProcessingMechanism(name="task_input", size=2) -task = pnl.LCAMechanism(name="TASK", size=2, initial_value=[0.5, 0.5]) +task_input = pnl.ProcessingMechanism(name="task_input", input_shapes=2) +task = pnl.LCAMechanism(name="TASK", input_shapes=2, initial_value=[0.5, 0.5]) task_color_wts = np.array([[4, 4], [0, 0]]) task_word_wts = np.array([[0, 0], [4, 4]]) task_color_pathway = [task_input, task, task_color_wts, color_hidden] @@ -53,7 +53,7 @@ name="CONTROL", objective_mechanism=pnl.ObjectiveMechanism( name="Conflict Monitor", - function=pnl.Energy(size=2, matrix=[[0, -2.5], [-2.5, 0]]), + function=pnl.Energy(input_shapes=2, matrix=[[0, -2.5], [-2.5, 0]]), monitor=output, ), default_allocation=[0.5], diff --git a/tests/mechanisms/test_control_mechanism.py b/tests/mechanisms/test_control_mechanism.py index ef622863db3..a4b7cb7720b 100644 --- a/tests/mechanisms/test_control_mechanism.py +++ b/tests/mechanisms/test_control_mechanism.py @@ -136,14 +136,15 @@ def test_control_modulation(self): def test_identicalness_of_control_and_gating(self): """Tests same configuration as gating in tests/mechansims/test_gating_mechanism""" - Input_Layer = pnl.TransferMechanism(name='Input Layer', function=pnl.Logistic, size=2) - Hidden_Layer_1 = pnl.TransferMechanism(name='Hidden Layer_1', function=pnl.Logistic, size=5) - Hidden_Layer_2 = pnl.TransferMechanism(name='Hidden Layer_2', function=pnl.Logistic, size=4) - Output_Layer = pnl.TransferMechanism(name='Output Layer', function=pnl.Logistic, size=3) - - Control_Mechanism = pnl.ControlMechanism(size=[1], control=[Hidden_Layer_1.input_port, - Hidden_Layer_2.input_port, - Output_Layer.input_port]) + Input_Layer = pnl.TransferMechanism(name='Input Layer', function=pnl.Logistic, input_shapes=2) + Hidden_Layer_1 = pnl.TransferMechanism(name='Hidden Layer_1', function=pnl.Logistic, input_shapes=5) + Hidden_Layer_2 = pnl.TransferMechanism(name='Hidden Layer_2', function=pnl.Logistic, input_shapes=4) + Output_Layer = pnl.TransferMechanism(name='Output Layer', function=pnl.Logistic, input_shapes=3) + + Control_Mechanism = pnl.ControlMechanism( + input_shapes=[1], control=[Hidden_Layer_1.input_port, + Hidden_Layer_2.input_port, + Output_Layer.input_port]) Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index 92955c64a38..2e5c8920397 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -174,7 +174,7 @@ def test_is_finished_stops_composition(self): class TestInputPorts: def test_regular_input_mode(self): - input_mech = ProcessingMechanism(size=2) + input_mech = ProcessingMechanism(input_shapes=2) ddm = DDM( function=DriftDiffusionAnalytical(), output_ports=[SELECTED_INPUT_ARRAY, DECISION_VARIABLE_ARRAY], @@ -192,7 +192,7 @@ def test_regular_input_mode(self): np.testing.assert_allclose(result, [[1.0], [1.0]]) def test_array_mode(self): - input_mech = ProcessingMechanism(size=2) + input_mech = ProcessingMechanism(input_shapes=2) ddm = DDM( input_format=ARRAY, function=DriftDiffusionAnalytical(), @@ -487,13 +487,13 @@ def test_DDM_rate_fn(): # ------------------------------------------------------------------------------------------------ # TEST 1 -# size = int, check if variable is an array of zeros +# input_shapes = int, check if variable is an array of zeros def test_DDM_size_int_check_var(): T = DDM( name='DDM', - size=1, + input_shapes=1, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -505,13 +505,13 @@ def test_DDM_size_int_check_var(): # ------------------------------------------------------------------------------------------------ # TEST 2 -# size = float, variable = [.4], check output after execution +# input_shapes = float, variable = [.4], check output after execution def test_DDM_size_int_inputs(): T = DDM( name='DDM', - size=1, + input_shapes=1, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -531,14 +531,14 @@ def test_DDM_size_int_inputs(): # ------------------------------------------------------------------------------------------------ # TEST 2 -# size = -1.0, check less-than-one error +# input_shapes = -1.0, check less-than-one error def test_DDM_mech_size_negative_one(): with pytest.raises(ComponentError) as error_text: T = DDM( name='DDM', - size=-1, + input_shapes=-1, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -550,14 +550,14 @@ def test_DDM_mech_size_negative_one(): # ------------------------------------------------------------------------------------------------ # TEST 3 -# size = 3.0, check size-too-large error +# input_shapes = 3.0, check input_shapes-too-large error def test_DDM_size_too_large(): with pytest.raises(DDMError) as error_text: T = DDM( name='DDM', - size=3, + input_shapes=3, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -569,14 +569,14 @@ def test_DDM_size_too_large(): # ------------------------------------------------------------------------------------------------ # TEST 4 -# size = [1,1], check too-many-input-ports error +# input_shapes = [1,1], check too-many-input-ports error def test_DDM_size_too_long(): with pytest.raises(DDMError) as error_text: T = DDM( name='DDM', - size=[1, 1], + input_shapes=[1, 1], function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, diff --git a/tests/mechanisms/test_episodic_memory.py b/tests/mechanisms/test_episodic_memory.py index 68066e6e15b..69958f56997 100644 --- a/tests/mechanisms/test_episodic_memory.py +++ b/tests/mechanisms/test_episodic_memory.py @@ -48,7 +48,7 @@ @pytest.mark.parametrize('variable, func, params, expected', test_data, ids=names) def test_with_dictionary_memory(variable, func, params, expected, benchmark, mech_mode): f = func(seed=0, **params) - m = EpisodicMemoryMechanism(size=len(variable[0]), assoc_size=len(variable[1]), function=f) + m = EpisodicMemoryMechanism(input_shapes=len(variable[0]), assoc_size=len(variable[1]), function=f) EX = pytest.helpers.get_mech_execution(m, mech_mode) EX(variable) @@ -86,7 +86,7 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec # func_params {'default_variable': [[0,0],[0,0],[0,0,0]]}, # mech_params - {'size':[2,2,3]}, + {'input_shapes':[2,2,3]}, # test_var [[10.,10.],[20., 30.],[40., 50., 60.]], # expected input_port names @@ -112,7 +112,7 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec {'initializer':np.array([[np.array([1]), np.array([2, 3]), np.array([4, 5, 6])], [list([10]), list([20, 30]), list([40, 50, 60])], [np.array([11]), np.array([22, 33]), np.array([44, 55, 66])]], dtype=object)}, - {'size':[1,2,3]}, + {'input_shapes':[1,2,3]}, [[10.],[20., 30.],[40., 50., 60.]], ['FIELD_0_INPUT', 'FIELD_1_INPUT', 'FIELD_2_INPUT'], ['RETRIEVED_FIELD_0', 'RETRIEVED_FIELD_1', 'RETRIEVED_FIELD_2'], @@ -137,7 +137,7 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec {'initializer':np.array([[np.array([1,2]), np.array([3,4]), np.array([5, 6])], [[10,20], [30,40], [50,60]], [np.array([11,12]), np.array([22, 23]), np.array([34, 35])]])}, - {'size':[2,2,2]}, + {'input_shapes':[2,2,2]}, [[11,13], [22,23], [34, 35]], ['FIELD_0_INPUT', 'FIELD_1_INPUT', 'FIELD_2_INPUT'], ['RETRIEVED_FIELD_0', 'RETRIEVED_FIELD_1', 'RETRIEVED_FIELD_2'], @@ -253,7 +253,7 @@ def test_contentaddressable_memory_warnings_and_errors(): with pytest.raises(FunctionError) as error_text: f = ContentAddressableMemory(initializer=[[[[1],[0],[1]], [[1],[0],[0]], [[0],[1],[1]]], [[[0],[1],[0]], [[0],[1],[1]], [[1],[1],[0]]]]) - em = EpisodicMemoryMechanism(size = [1,1,1], function=f) + em = EpisodicMemoryMechanism(input_shapes= [1, 1, 1], function=f) em.execute([[[0],[1],[0]], [[0],[1],[1]], [[1],[1],[0]]]) assert 'Attempt to store and/or retrieve an entry in ContentAddressableMemory ' \ '([[[1]\n [0]\n [1]]\n\n [[1]\n [0]\n [0]]\n\n [[0]\n [1]\n [1]]]) ' \ diff --git a/tests/mechanisms/test_gating_mechanism.py b/tests/mechanisms/test_gating_mechanism.py index 05f1a325009..717d5ec24ae 100644 --- a/tests/mechanisms/test_gating_mechanism.py +++ b/tests/mechanisms/test_gating_mechanism.py @@ -15,12 +15,12 @@ def test_gating_with_composition(): """Tests same configuration as control of InputPort in tests/mechansims/test_identicalness_of_control_and_gating """ - Input_Layer = TransferMechanism(name='Input Layer', function=Logistic, size=2) - Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', function=Logistic, size=5) - Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', function=Logistic, size=4) - Output_Layer = TransferMechanism(name='Output Layer', function=Logistic, size=3) + Input_Layer = TransferMechanism(name='Input Layer', function=Logistic, input_shapes=2) + Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', function=Logistic, input_shapes=5) + Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', function=Logistic, input_shapes=4) + Output_Layer = TransferMechanism(name='Output Layer', function=Logistic, input_shapes=3) - Gating_Mechanism = GatingMechanism(size=[1], gate=[Hidden_Layer_1, Hidden_Layer_2, Output_Layer]) + Gating_Mechanism = GatingMechanism(input_shapes=[1], gate=[Hidden_Layer_1, Hidden_Layer_2, Output_Layer]) Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) @@ -137,7 +137,7 @@ def my_sinusoidal_fct( ) Gating_Mechanism = pnl.GatingMechanism( - size=[1], + input_shapes=[1], gating_signals=[ # Output_Layer Output_Layer.output_port, diff --git a/tests/mechanisms/test_input_output_labels.py b/tests/mechanisms/test_input_output_labels.py index 714bf1d43d8..d92e368d385 100644 --- a/tests/mechanisms/test_input_output_labels.py +++ b/tests/mechanisms/test_input_output_labels.py @@ -187,9 +187,9 @@ # "green": [0, 0]} # output_labels_dict_M2 = {"red": [0, 0], # "green": [1, 1]} -# M1 = ProcessingMechanism(size=2, +# M1 = ProcessingMechanism(input_shapes=2, # params={INPUT_LABELS_DICT: input_labels_dict_M1}) -# M2 = ProcessingMechanism(size=2, +# M2 = ProcessingMechanism(input_shapes=2, # params={OUTPUT_LABELS_DICT: output_labels_dict_M2}) # C = Composition() # learning_pathway = C.add_backpropagation_learning_pathway(pathway=[M1, M2], learning_rate=0.25) @@ -216,9 +216,9 @@ # output_labels_dict_M2 = {0: {"red": [0, 0], # "green": [1, 1]} # } -# M1 = ProcessingMechanism(size=2, +# M1 = ProcessingMechanism(input_shapes=2, # params={INPUT_LABELS_DICT: input_labels_dict_M1}) -# M2 = ProcessingMechanism(size=2, +# M2 = ProcessingMechanism(input_shapes=2, # params={OUTPUT_LABELS_DICT: output_labels_dict_M2}) # C = Composition() # @@ -273,7 +273,7 @@ # "green": [0.0, 1.0]} # output_labels_dict = {"red": [1.0, 0.0], # "green": [0.0, 1.0]} -# M = ProcessingMechanism(size=2, +# M = ProcessingMechanism(input_shapes=2, # params={INPUT_LABELS_DICT: input_labels_dict, # OUTPUT_LABELS_DICT: output_labels_dict}) # C = Composition(pathways=[M]) @@ -301,7 +301,7 @@ # "blue": [2.0, 2.0]} # output_labels_dict = {"red": [1.0, 0.0], # "green": [0.0, 1.0]} -# M = ProcessingMechanism(size=2, +# M = ProcessingMechanism(input_shapes=2, # params={INPUT_LABELS_DICT: input_labels_dict, # OUTPUT_LABELS_DICT: output_labels_dict}) # C = Composition(pathways=[M]) diff --git a/tests/mechanisms/test_input_port_spec.py b/tests/mechanisms/test_input_port_spec.py index 8c36b6f7f3a..482deefa83e 100644 --- a/tests/mechanisms/test_input_port_spec.py +++ b/tests/mechanisms/test_input_port_spec.py @@ -16,7 +16,7 @@ mismatches_specified_default_variable_error_text = 'not compatible with its specified default variable' mismatches_default_variable_format_error_text = 'is not compatible with its expected format' -mismatches_size_error_text = 'not compatible with the default variable determined from size parameter' +mismatches_input_shapes_error_text = 'not compatible with the default variable determined from input_shapes parameter' mismatches_more_input_ports_than_default_variable_error_text = 'There are more InputPorts specified' mismatches_fewer_input_ports_than_default_variable_error_text = 'There are fewer InputPorts specified' mismatches_specified_matrix_pattern = r'The number of rows \(\d\) of the matrix provided for .+ does not equal the length \(\d\) of the sender vector' @@ -283,7 +283,7 @@ def test_specification_dict(self): def test_default_variable_override_mech_list(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) # default_variable override of OutputPort.value T = TransferMechanism( @@ -301,8 +301,8 @@ def test_default_variable_override_mech_list(self): # 2-item tuple specification with default_variable override of OutputPort.value def test_2_item_tuple_spec(self): - R2 = TransferMechanism(size=3) - T = TransferMechanism(size=2, input_ports=[(R2, np.zeros((3, 2)))]) + R2 = TransferMechanism(input_shapes=3) + T = TransferMechanism(input_shapes=2, input_ports=[(R2, np.zeros((3, 2)))]) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) assert len(T.input_ports) == 1 assert len(T.input_port.path_afferents[0].sender.defaults.variable) == 3 @@ -311,10 +311,10 @@ def test_2_item_tuple_spec(self): # ------------------------------------------------------------------------------------------------ # TEST 12.1 - # 2-item tuple specification with value as first item (and no size specification for T) + # 2-item tuple specification with value as first item (and no input_shapes specification for T) def test_2_item_tuple_value_for_first_item(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) T = TransferMechanism(input_ports=[([0,0], R2)]) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) assert len(T.input_ports) == 1 @@ -327,8 +327,8 @@ def test_2_item_tuple_value_for_first_item(self): # 4-item tuple Specification def test_projection_tuple_with_matrix_spec(self): - R2 = TransferMechanism(size=3) - T = TransferMechanism(size=2, input_ports=[(R2, None, None, np.zeros((3, 2)))]) + R2 = TransferMechanism(input_shapes=3) + T = TransferMechanism(input_shapes=2, input_ports=[(R2, None, None, np.zeros((3, 2)))]) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) assert len(T.input_ports) == 1 assert T.input_port.path_afferents[0].sender.defaults.variable.shape[-1] == 3 @@ -340,10 +340,10 @@ def test_projection_tuple_with_matrix_spec(self): # Standalone Projection specification with Mechanism as sender def test_projection_list_mech_as_send(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) P = MappingProjection(sender=R2) T = TransferMechanism( - size=2, + input_shapes=2, input_ports=[P] ) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) @@ -357,10 +357,10 @@ def test_projection_list_mech_as_send(self): # Standalone Projection specification with Port as sender def test_projection_list_port_as_sender(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) P = MappingProjection(sender=R2.output_port) T = TransferMechanism( - size=2, + input_shapes=2, input_ports=[P] ) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) @@ -374,10 +374,10 @@ def test_projection_list_port_as_sender(self): # Projection specification in Tuple def test_projection_in_tuple(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) P = MappingProjection(sender=R2) T = TransferMechanism( - size=2, + input_shapes=2, input_ports=[(R2, None, None, P)] ) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) @@ -501,9 +501,9 @@ def test_dict_with_variable_mismatches_default_multiple_input_ports(self): # ------------------------------------------------------------------------------------------------ # TEST 24 - def test_dict_with_variable_matches_size(self): + def test_dict_with_variable_matches_input_shapes(self): T = TransferMechanism( - size=2, + input_shapes=2, input_ports=[{NAME: 'FIRST', VARIABLE: [0, 0]}] ) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) @@ -512,13 +512,13 @@ def test_dict_with_variable_matches_size(self): # ------------------------------------------------------------------------------------------------ # TEST 25 - def test_dict_with_variable_mismatches_size(self): + def test_dict_with_variable_mismatches_input_shapes(self): with pytest.raises(MechanismError) as error_text: TransferMechanism( - size=1, + input_shapes=1, input_ports=[{NAME: 'FIRST', VARIABLE: [0, 0]}] ) - assert mismatches_size_error_text in str(error_text.value) + assert mismatches_input_shapes_error_text in str(error_text.value) # ------------------------------------------------------------------------------------------------ # TEST 26 @@ -562,7 +562,7 @@ def test_InputPort_mismatches_default(self): # TEST 31 def test_projection_with_matrix_and_sender(self): - m = TransferMechanism(size=2) + m = TransferMechanism(input_shapes=2) p = MappingProjection(sender=m, matrix=[[0, 0, 0], [0, 0, 0]]) T = TransferMechanism(input_ports=[p]) @@ -574,13 +574,13 @@ def test_projection_with_matrix_and_sender(self): def tests_for_projection_with_matrix_and_sender_mismatches_default(self): with pytest.raises(MechanismError) as error_text: - m = TransferMechanism(size=2) + m = TransferMechanism(input_shapes=2) p = MappingProjection(sender=m, matrix=[[0, 0, 0], [0, 0, 0]]) TransferMechanism(default_variable=[0, 0], input_ports=[p]) assert mismatches_specified_default_variable_error_text in str(error_text.value) with pytest.raises(FunctionError) as error_text: - m = TransferMechanism(size=3, output_ports=[pnl.MEAN]) + m = TransferMechanism(input_shapes=3, output_ports=[pnl.MEAN]) p = MappingProjection(sender=m, matrix=[[0,0,0], [0,0,0]]) T = TransferMechanism(input_ports=[p]) assert re.match( @@ -589,7 +589,7 @@ def tests_for_projection_with_matrix_and_sender_mismatches_default(self): ) with pytest.raises(FunctionError) as error_text: - m2 = TransferMechanism(size=2, output_ports=[pnl.MEAN]) + m2 = TransferMechanism(input_shapes=2, output_ports=[pnl.MEAN]) p2 = MappingProjection(sender=m2, matrix=[[1,1,1],[1,1,1]]) T2 = TransferMechanism(input_ports=[p2]) assert re.match( @@ -601,7 +601,7 @@ def tests_for_projection_with_matrix_and_sender_mismatches_default(self): # TEST 33 def test_projection_with_sender_and_default(self): - t = TransferMechanism(size=3) + t = TransferMechanism(input_shapes=3) p = MappingProjection(sender=t) T = TransferMechanism(default_variable=[[0, 0]], input_ports=[p]) @@ -801,25 +801,25 @@ def test_list_of_mechanisms_with_gating_mechanism(self): assert T2.output_ports[0].mod_afferents[0].sender.name=='b' # ------------------------------------------------------------------------------------------------ - # THOROUGH TESTING OF mech, 2-item, 3-item and 4-item tuple specifications with and without default_variable/size + # THOROUGH TESTING OF mech, 2-item, 3-item and 4-item tuple specifications with and without default_variable/input_shapes # (some of these may be duplicative of tests above) # pytest does not support fixtures in parametrize, but a class member is enough for this test - transfer_mech = TransferMechanism(size=3) + transfer_mech = TransferMechanism(input_shapes=3) - @pytest.mark.parametrize('default_variable, size, input_ports, variable_len_state, variable_len_mech', [ + @pytest.mark.parametrize('default_variable, input_shapes, input_ports, variable_len_state, variable_len_mech', [ # default_variable tests ([0, 0], None, [transfer_mech], 2, 2), ([0, 0], None, [(transfer_mech, None)], 2, 2), ([0, 0], None, [(transfer_mech, 1, 1)], 2, 2), ([0, 0], None, [((RESULT, transfer_mech), 1, 1)], 2, 2), ([0, 0], None, [(transfer_mech, 1, 1, None)], 2, 2), - # size tests + # input_shapes tests (None, 2, [transfer_mech], 2, 2), (None, 2, [(transfer_mech, None)], 2, 2), (None, 2, [(transfer_mech, 1, 1)], 2, 2), (None, 2, [(transfer_mech, 1, 1, None)], 2, 2), - # no default_variable or size tests + # no default_variable or input_shapes tests (None, None, [transfer_mech], 3, 3), (None, None, [(transfer_mech, None)], 3, 3), (None, None, [(transfer_mech, 1, 1)], 3, 3), @@ -836,10 +836,10 @@ def test_list_of_mechanisms_with_gating_mechanism(self): # ([[0]], None, [{VARIABLE: [[0], [0]], FUNCTION: LinearCombination}], 2, 1), # (None, 1, [{VARIABLE: [0, 0], FUNCTION: Reduce(weights=[1, -1])}], 2, 1), ]) - def test_mech_and_tuple_specifications_with_and_without_default_variable_or_size( + def test_mech_and_tuple_specifications_with_and_without_default_variable_or_input_shapes( self, default_variable, - size, + input_shapes, input_ports, variable_len_state, variable_len_mech, @@ -849,7 +849,7 @@ def test_mech_and_tuple_specifications_with_and_without_default_variable_or_size T = TransferMechanism( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports ) assert T.input_ports[0].socket_width == variable_len_state diff --git a/tests/mechanisms/test_kwta.py b/tests/mechanisms/test_kwta.py index b9449ea4cbf..a481a27ae90 100644 --- a/tests/mechanisms/test_kwta.py +++ b/tests/mechanisms/test_kwta.py @@ -17,17 +17,17 @@ def test_kwta_empty_spec(self): K = KWTAMechanism() np.testing.assert_allclose(K.value, K.defaults.value) assert K.defaults.variable == [[0]] - assert K.size == [1] + assert K.input_shapes == [1] assert K.matrix.base == [[5]] def test_kwta_check_attrs(self): K = KWTAMechanism( name='K', - size=3 + input_shapes=3 ) np.testing.assert_allclose(K.value, K.defaults.value) np.testing.assert_allclose(K.defaults.variable, [[0., 0., 0.]]) - assert K.size == [3] + assert K.input_shapes == [3] np.testing.assert_allclose(K.matrix.base, [[5, 0, 0], [0, 5, 0], [0, 0, 5]]) assert K.recurrent_projection.sender is K.output_port assert K.recurrent_projection.receiver is K.input_port @@ -54,7 +54,7 @@ def test_kwta_inputs_list_of_strings(self): with pytest.raises(MechanismError) as error_text: K = KWTAMechanism( name='K', - size = 4, + input_shapes= 4, ) K.execute(["one", "two", "three", "four"]) assert ('Input to \'K\' ([\'one\' \'two\' \'three\' \'four\']) is incompatible with its corresponding ' @@ -73,7 +73,7 @@ def test_recurrent_mech_inputs_mismatched_with_default_longer(self): with pytest.raises(MechanismError) as error_text: K = KWTAMechanism( name='K', - size=4 + input_shapes=4 ) K.execute([1, 2, 3, 4, 5]) assert ("Shape ((5,)) of input ([1 2 3 4 5]) does not match required shape ((4,)) " @@ -83,7 +83,7 @@ def test_recurrent_mech_inputs_mismatched_with_default_shorter(self): with pytest.raises(MechanismError) as error_text: K = KWTAMechanism( name='K', - size=6 + input_shapes=6 ) K.execute([1, 2, 3, 4, 5]) assert ("Shape ((5,)) of input ([1 2 3 4 5]) does not match required shape ((6,)) " @@ -97,7 +97,7 @@ def test_kwta_function_various_spec(self): for s in specs: K = KWTAMechanism( name='K', - size=5, + input_shapes=5, function=s, k_value=4 ) @@ -106,7 +106,7 @@ def test_kwta_function_various_spec(self): def test_kwta_log_gain(self): K = KWTAMechanism( name='K', - size=3, + input_shapes=3, function=Logistic(gain=2), k_value=2 ) @@ -116,7 +116,7 @@ def test_kwta_log_gain(self): def test_kwta_log_offset(self): K = KWTAMechanism( name='K', - size=3, + input_shapes=3, function=Logistic(offset=-.2), k_value=2 ) @@ -127,7 +127,7 @@ def test_kwta_log_offset(self): def test_kwta_log_gain_offset(self): K = KWTAMechanism( name='K', - size=2, + input_shapes=2, function=Logistic(gain=-.2, offset=4), k_value=1 ) @@ -138,7 +138,7 @@ def test_kwta_linear(self): # inhibition would be positive: so instead it is set K = KWTAMechanism( name='K', threshold=3, - size=3, + input_shapes=3, k_value=2, function=Linear ) @@ -149,7 +149,7 @@ def test_kwta_linear_slope(self): K = KWTAMechanism( name='K', threshold=.5, - size=5, + input_shapes=5, k_value=2, function=Linear(slope=2) ) @@ -159,7 +159,7 @@ def test_kwta_linear_slope(self): def test_kwta_linear_system(self): K=KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=3, function=Linear ) @@ -172,7 +172,7 @@ def test_kwta_matrix_keyword_spec(self): if m != RANDOM_CONNECTIVITY_MATRIX: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, matrix=m ) val = K.execute([10, 10, 10, 10]) @@ -181,7 +181,7 @@ def test_kwta_matrix_keyword_spec(self): def test_kwta_matrix_auto_hetero_spec(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, auto=3, hetero=2 ) @@ -190,7 +190,7 @@ def test_kwta_matrix_auto_hetero_spec(self): def test_kwta_matrix_hetero_spec(self): K = KWTAMechanism( name='K', - size=3, + input_shapes=3, hetero=-.5, ) np.testing.assert_allclose(K.recurrent_projection.matrix.base, [[5, -.5, -.5], [-.5, 5, -.5], [-.5, -.5, 5]]) @@ -198,7 +198,7 @@ def test_kwta_matrix_hetero_spec(self): def test_kwta_matrix_auto_spec(self): K = KWTAMechanism( name='K', - size=3, + input_shapes=3, auto=-.5, ) np.testing.assert_allclose(K.recurrent_projection.matrix.base, [[-.5, 0, 0], [0, -.5, 0], [0, 0, -.5]]) @@ -210,7 +210,7 @@ class TestKWTARatio: def test_kwta_ratio_empty(self): K = KWTAMechanism( name='K', - size=4 + input_shapes=4 ) c = Composition(pathways=[K], prefs=TestKWTARatio.simple_prefs) @@ -226,7 +226,7 @@ def test_kwta_ratio_empty(self): def test_kwta_ratio_1(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=1 ) c = Composition(pathways=[K], @@ -243,7 +243,7 @@ def test_kwta_ratio_1(self): def test_kwta_ratio_0(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=0 ) c = Composition(pathways=[K], @@ -261,7 +261,7 @@ def test_kwta_ratio_0(self): def test_kwta_ratio_0_3(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=0.3 ) c = Composition(pathways=[K], @@ -279,7 +279,7 @@ def test_kwta_ratio_2(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=2 ) assert "must be between 0 and 1" in str(error_text.value) @@ -288,7 +288,7 @@ def test_kwta_ratio_neg_1(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=-1 ) assert "must be between 0 and 1" in str(error_text.value) @@ -298,7 +298,7 @@ class TestKWTAKValue: def test_kwta_k_value_empty_size_4(self): K = KWTAMechanism( name='K', - size=4 + input_shapes=4 ) assert K.k_value.base == 0.5 c = Composition(pathways=[K], @@ -312,7 +312,7 @@ def test_kwta_k_value_empty_size_4(self): def test_kwta_k_value_empty_size_6(self): K = KWTAMechanism( name='K', - size=6 + input_shapes=6 ) assert K.k_value.base == 0.5 c = Composition(pathways=[K], @@ -327,7 +327,7 @@ def test_kwta_k_value_empty_size_6(self): def test_kwta_k_value_int_size_5(self): K = KWTAMechanism( name='K', - size=5, + input_shapes=5, k_value=3 ) assert K.k_value.base == 3 @@ -339,7 +339,7 @@ def test_kwta_k_value_int_size_5(self): # for size_val, expected_int_k in size_and_int_k_pairs: # K = KWTA( # name='K', - # size=size_val, + # input_shapes=size_val, # k_value=0.4 # ) # assert K.k_value.base == 0.4 @@ -353,7 +353,7 @@ def test_kwta_k_value_bad_float(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=2.5 ) assert "must be an integer, or between 0 and 1." in str(error_text.value) @@ -362,7 +362,7 @@ def test_kwta_k_value_list(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=[1, 2] ) assert "must be a single number" in str(error_text.value) @@ -371,7 +371,7 @@ def test_kwta_k_value_too_large(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=5 ) assert "was larger than the total number of elements" in str(error_text.value) @@ -380,7 +380,7 @@ def test_kwta_k_value_too_low(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=-5 ) assert "was larger than the total number of elements" in str(error_text.value) @@ -392,14 +392,14 @@ class TestKWTAThreshold: def test_kwta_threshold_empty(self): K = KWTAMechanism( name='K', - size=4 + input_shapes=4 ) assert K.threshold.base == 0 def test_kwta_threshold_int(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, threshold=-1 ) c = Composition(pathways=[K], @@ -412,7 +412,7 @@ def test_kwta_threshold_int(self): def test_kwta_threshold_float(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, threshold=0.5 ) c = Composition(pathways=[K], @@ -434,7 +434,7 @@ class TestKWTALongTerm: def test_kwta_size_10_k_3_threshold_1(self): K = KWTAMechanism( name='K', - size=10, + input_shapes=10, k_value=3, threshold=1, ) @@ -471,7 +471,7 @@ class TestKWTAAverageBased: def test_kwta_average_k_2(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=2, threshold=0, function=Linear, @@ -486,7 +486,7 @@ def test_kwta_average_k_2(self): def test_kwta_average_k_1(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=1, threshold=0, function=Linear, @@ -501,7 +501,7 @@ def test_kwta_average_k_1(self): def test_kwta_average_k_1_ratio_0_2(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=1, threshold=0, ratio=0.2, @@ -517,7 +517,7 @@ def test_kwta_average_k_1_ratio_0_2(self): def test_kwta_average_k_1_ratio_0_8(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=1, threshold=0, ratio=0.8, diff --git a/tests/mechanisms/test_lca.py b/tests/mechanisms/test_lca.py index 9996dca42d6..6482dd68b0e 100644 --- a/tests/mechanisms/test_lca.py +++ b/tests/mechanisms/test_lca.py @@ -65,9 +65,9 @@ def test_LCAMechanism_length_2(self, benchmark, comp_mode): # Note: since the LCAMechanism's threshold is not specified in this test, each execution only updates # the Mechanism once. - T = TransferMechanism(function=Linear(slope=1.0), size=2) + T = TransferMechanism(function=Linear(slope=1.0), input_shapes=2) L = LCAMechanism(function=Linear(slope=2.0), - size=2, + input_shapes=2, self_excitation=3.0, leak=0.5, competition=1.0, @@ -126,16 +126,16 @@ def test_equivalance_of_threshold_and_when_finished_condition(self): # that causes the LCAMechanism it to execute until it reaches threshold (2nd test). # loop Mechanism's call to execute - lca_until_thresh = LCAMechanism(size=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default - response = ProcessingMechanism(size=2) + lca_until_thresh = LCAMechanism(input_shapes=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default + response = ProcessingMechanism(input_shapes=2) comp = Composition() comp.add_linear_processing_pathway([lca_until_thresh, response]) result1 = comp.run(inputs={lca_until_thresh:[1,0]}) # loop Composition's call to Mechanism - lca_single_step = LCAMechanism(size=2, leak=0.5, threshold=0.7, execute_until_finished=False) + lca_single_step = LCAMechanism(input_shapes=2, leak=0.5, threshold=0.7, execute_until_finished=False) comp2 = Composition() - response2 = ProcessingMechanism(size=2) + response2 = ProcessingMechanism(input_shapes=2) comp2.add_linear_processing_pathway([lca_single_step,response2]) comp2.scheduler.add_condition(response2, WhenFinished(lca_single_step)) result2 = comp2.run(inputs={lca_single_step:[1,0]}) @@ -143,9 +143,9 @@ def test_equivalance_of_threshold_and_when_finished_condition(self): def test_LCAMechanism_matrix(self): matrix = [[0,-2],[-2,0]] - lca1 = LCAMechanism(size=2, leak=0.5, competition=2) + lca1 = LCAMechanism(input_shapes=2, leak=0.5, competition=2) np.testing.assert_allclose(lca1.matrix.base, matrix) - lca2 = LCAMechanism(size=2, leak=0.5, matrix=matrix) + lca2 = LCAMechanism(input_shapes=2, leak=0.5, matrix=matrix) np.testing.assert_allclose(lca1.matrix.base, lca2.matrix.base) # Note: In the following tests, since the LCAMechanism's threshold is specified @@ -154,7 +154,7 @@ def test_LCAMechanism_matrix(self): @pytest.mark.lca_mechanism @pytest.mark.benchmark(group="LCAMechanism") def test_LCAMechanism_threshold(self, benchmark, comp_mode): - lca = LCAMechanism(size=2, leak=0.5, threshold=0.7) + lca = LCAMechanism(input_shapes=2, leak=0.5, threshold=0.7) comp = Composition() comp.add_node(lca) @@ -163,7 +163,7 @@ def test_LCAMechanism_threshold(self, benchmark, comp_mode): @pytest.mark.composition def test_LCAMechanism_threshold_with_max_vs_next(self): - lca = LCAMechanism(size=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT) + lca = LCAMechanism(input_shapes=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT) comp = Composition() comp.add_node(lca) result = comp.run(inputs={lca:[1,0.5,0]}) @@ -171,7 +171,7 @@ def test_LCAMechanism_threshold_with_max_vs_next(self): @pytest.mark.composition def test_LCAMechanism_threshold_with_max_vs_avg(self): - lca = LCAMechanism(size=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_AVG) + lca = LCAMechanism(input_shapes=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_AVG) comp = Composition() comp.add_node(lca) result = comp.run(inputs={lca:[1,0.5,0]}) @@ -181,7 +181,7 @@ def test_LCAMechanism_threshold_with_max_vs_avg(self): @pytest.mark.lca_mechanism @pytest.mark.benchmark(group="LCAMechanism") def test_LCAMechanism_threshold_with_convergence(self, benchmark, comp_mode): - lca = LCAMechanism(size=3, leak=0.5, threshold=0.01, threshold_criterion=CONVERGENCE) + lca = LCAMechanism(input_shapes=3, leak=0.5, threshold=0.01, threshold_criterion=CONVERGENCE) comp = Composition() comp.add_node(lca) @@ -200,19 +200,20 @@ def test_equivalance_of_threshold_and_termination_specifications_just_threshold( # Note: This tests the equivalence of using LCAMechanism-specific threshold arguments and # generic TransferMechanism termination_<*> arguments - lca_thresh = LCAMechanism(size=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default - response = ProcessingMechanism(size=2) + lca_thresh = LCAMechanism(input_shapes=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default + response = ProcessingMechanism(input_shapes=2) comp = Composition() comp.add_linear_processing_pathway([lca_thresh, response]) result1 = comp.run(inputs={lca_thresh:[1,0]}, execution_mode=comp_mode) - lca_termination = LCAMechanism(size=2, + lca_termination = LCAMechanism( + input_shapes=2, leak=0.5, termination_threshold=0.7, termination_measure=max, termination_comparison_op='>=') comp2 = Composition() - response2 = ProcessingMechanism(size=2) + response2 = ProcessingMechanism(input_shapes=2) comp2.add_linear_processing_pathway([lca_termination,response2]) result2 = comp2.run(inputs={lca_termination:[1,0]}, execution_mode=comp_mode) np.testing.assert_allclose(result1, result2) @@ -222,27 +223,28 @@ def test_equivalance_of_threshold_and_termination_specifications_max_vs_next(sel # Note: This tests the equivalence of using LCAMechanism-specific threshold arguments and # generic TransferMechanism termination_<*> arguments - lca_thresh = LCAMechanism(size=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT) - response = ProcessingMechanism(size=3) + lca_thresh = LCAMechanism(input_shapes=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT) + response = ProcessingMechanism(input_shapes=3) comp = Composition() comp.add_linear_processing_pathway([lca_thresh, response]) result1 = comp.run(inputs={lca_thresh:[1,0.5,0]}) - lca_termination = LCAMechanism(size=3, + lca_termination = LCAMechanism( + input_shapes=3, leak=0.5, termination_threshold=0.1, termination_measure=max_vs_next, termination_comparison_op='>=') comp2 = Composition() - response2 = ProcessingMechanism(size=3) + response2 = ProcessingMechanism(input_shapes=3) comp2.add_linear_processing_pathway([lca_termination,response2]) result2 = comp2.run(inputs={lca_termination:[1,0.5,0]}) np.testing.assert_allclose(result1, result2) # def test_LCAMechanism_threshold_with_str(self): - # lca = LCAMechanism(size=2, threshold=0.7, threshold_criterion='MY_OUTPUT_PORT', + # lca = LCAMechanism(input_shapes=2, threshold=0.7, threshold_criterion='MY_OUTPUT_PORT', # output_ports=[RESULT, 'MY_OUTPUT_PORT']) - # response = ProcessingMechanism(size=2) + # response = ProcessingMechanism(input_shapes=2) # comp = Composition() # comp.add_linear_processing_pathway([lca,response]) # comp.scheduler.add_condition(response, WhenFinished(lca)) @@ -250,8 +252,8 @@ def test_equivalance_of_threshold_and_termination_specifications_max_vs_next(sel # np.testing.assert_allclose(result, [[0.71463572, 0.28536428]]) # # def test_LCAMechanism_threshold_with_int(self): - # lca = LCAMechanism(size=2, threshold=0.7, threshold_criterion=1, output_ports=[RESULT, 'MY_OUTPUT_PORT']) - # response = ProcessingMechanism(size=2) + # lca = LCAMechanism(input_shapes=2, threshold=0.7, threshold_criterion=1, output_ports=[RESULT, 'MY_OUTPUT_PORT']) + # response = ProcessingMechanism(input_shapes=2) # comp = Composition() # comp.add_linear_processing_pathway([lca,response]) # comp.scheduler.add_condition(response, WhenFinished(lca)) @@ -261,7 +263,8 @@ def test_equivalance_of_threshold_and_termination_specifications_max_vs_next(sel @pytest.mark.composition @pytest.mark.lca_mechanism def test_LCAMechanism_DDM_equivalent(self, comp_mode): - lca = LCAMechanism(size=2, leak=0., threshold=1, auto=0, hetero=0, + lca = LCAMechanism( + input_shapes=2, leak=0., threshold=1, auto=0, hetero=0, initial_value=[0, 0], execute_until_finished=False) comp1 = Composition() comp1.add_node(lca) diff --git a/tests/mechanisms/test_leabra_mechanism.py b/tests/mechanisms/test_leabra_mechanism.py index fe11e19ad99..90f61dc271d 100644 --- a/tests/mechanisms/test_leabra_mechanism.py +++ b/tests/mechanisms/test_leabra_mechanism.py @@ -105,10 +105,10 @@ def test_leabra_prec_no_train(self): L_net = LeabraMechanism(leabra_net2) # leabra_net should be identical to the network inside L_net - T1_spec = TransferMechanism(name='T1_spec', size=in_size, function=Linear) - T2_spec = TransferMechanism(name='T2_spec', size=out_size, function=Linear) - T1_net = TransferMechanism(name='T1_net', size=in_size, function=Linear) - T2_net = TransferMechanism(name='T2_net', size=out_size, function=Linear) + T1_spec = TransferMechanism(name='T1_spec', input_shapes=in_size, function=Linear) + T2_spec = TransferMechanism(name='T2_spec', input_shapes=out_size, function=Linear) + T1_net = TransferMechanism(name='T1_net', input_shapes=in_size, function=Linear) + T2_net = TransferMechanism(name='T2_net', input_shapes=out_size, function=Linear) proj_spec = MappingProjection(sender=T2_spec, receiver=L_spec.input_ports[1]) c_spec = Composition(pathways=[[T1_spec, L_spec],[T2_spec, proj_spec, L_spec]]) @@ -154,10 +154,10 @@ def test_leabra_prec_with_train(self): L_net = LeabraMechanism(leabra_net2) # leabra_net should be identical to the network inside L_net - T1_spec = TransferMechanism(name='T1_spec', size=in_size, function=Linear) - T2_spec = TransferMechanism(name='T2_spec', size=out_size, function=Linear) - T1_net = TransferMechanism(name='T1_net', size=in_size, function=Linear) - T2_net = TransferMechanism(name='T2_net', size=out_size, function=Linear) + T1_spec = TransferMechanism(name='T1_spec', input_shapes=in_size, function=Linear) + T2_spec = TransferMechanism(name='T2_spec', input_shapes=out_size, function=Linear) + T1_net = TransferMechanism(name='T1_net', input_shapes=in_size, function=Linear) + T2_net = TransferMechanism(name='T2_net', input_shapes=out_size, function=Linear) proj_spec = MappingProjection(sender=T2_spec, receiver=L_spec.input_ports[1]) c_spec = Composition(pathways=[[T1_spec, L_spec],[T2_spec, proj_spec, L_spec]]) @@ -205,10 +205,10 @@ def test_leabra_prec_half_train(self): L_net = LeabraMechanism(leabra_net2) # leabra_net should be identical to the network inside L_net - T1_spec = TransferMechanism(name='T1', size=in_size, function=Linear) - T2_spec = TransferMechanism(name='T2', size=out_size, function=Linear) - T1_net = TransferMechanism(name='T1', size=in_size, function=Linear) - T2_net = TransferMechanism(name='T2', size=out_size, function=Linear) + T1_spec = TransferMechanism(name='T1', input_shapes=in_size, function=Linear) + T2_spec = TransferMechanism(name='T2', input_shapes=out_size, function=Linear) + T1_net = TransferMechanism(name='T1', input_shapes=in_size, function=Linear) + T2_net = TransferMechanism(name='T2', input_shapes=out_size, function=Linear) proj_spec = MappingProjection(sender=T2_spec, receiver=L_spec.input_ports[1]) c_spec = Composition(pathways=[[T1_spec, L_spec], [T2_spec, proj_spec, L_spec]]) @@ -249,11 +249,11 @@ def test_leabra_prec_half_train(self): # class TestLeabraMechInSystem: # # def test_leabra_mech_learning(self): -# T1 = TransferMechanism(size=5, function=Linear) -# T2 = TransferMechanism(size=3, function=Linear) +# T1 = TransferMechanism(input_shapes=5, function=Linear) +# T2 = TransferMechanism(input_shapes=3, function=Linear) # L = LeabraMechanism(input_size=5, output_size=3, hidden_layers=2, hidden_sizes=[4, 4]) # train_data_proj = MappingProjection(sender=T2, receiver=L.input_ports[1]) -# out = TransferMechanism(size=3, function=Logistic(bias=2)) +# out = TransferMechanism(input_shapes=3, function=Logistic(bias=2)) # p1 = Process(pathway=[T1, L, out], learning=LEARNING, learning_rate=1.0, target=[0, .1, .8]) # p2 = Process(pathway=[T2, train_data_proj, L, out]) # s = System(processes=[p1, p2]) diff --git a/tests/mechanisms/test_mechanisms.py b/tests/mechanisms/test_mechanisms.py index 2835140936f..f07b2810aa2 100644 --- a/tests/mechanisms/test_mechanisms.py +++ b/tests/mechanisms/test_mechanisms.py @@ -46,8 +46,8 @@ def test_value_shapes(self, mechanism_type, default_variable, mechanism_value, f [pnl.GaussianDistort, pnl.NormalDist] ) def test_noise_assignment_equivalence(self, noise): - t1 = pnl.TransferMechanism(name='t1', size=2, noise=noise()) - t2 = pnl.TransferMechanism(name='t2', size=2) + t1 = pnl.TransferMechanism(name='t1', input_shapes=2, noise=noise()) + t2 = pnl.TransferMechanism(name='t2', input_shapes=2) t2.integrator_function.parameters.noise.set(noise()) t1.integrator_function.noise.seed.base = 0 @@ -79,7 +79,7 @@ def test_numeric_noise_specifications( except TypeError: size = 1 - t = pnl.TransferMechanism(size=size, noise=noise) + t = pnl.TransferMechanism(input_shapes=size, noise=noise) assert all(p in t.parameter_ports for p in included_parameter_ports) assert all(p not in t.parameter_ports for p in excluded_parameter_ports) @@ -100,7 +100,7 @@ def test_noise_change_warning_to_numeric(self, noise): except TypeError: size = 1 - t = pnl.TransferMechanism(size=size, noise=noise) + t = pnl.TransferMechanism(input_shapes=size, noise=noise) with pytest.warns( UserWarning, @@ -122,7 +122,7 @@ def test_noise_change_warning_to_function(self, noise): except TypeError: size = 1 - t = pnl.TransferMechanism(size=size, noise=noise) + t = pnl.TransferMechanism(input_shapes=size, noise=noise) with pytest.warns( UserWarning, diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index 54c9a123cc7..1dbada01f21 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -134,8 +134,8 @@ def test_processing_mechanism_TDLearning_function(self): def test_processing_mechanism_multiple_input_ports(self): - PM1 = ProcessingMechanism(size=[4, 4], function=LinearCombination, input_ports=['input_1', 'input_2']) - PM2 = ProcessingMechanism(size=[2, 2, 2], function=LinearCombination, input_ports=['1', '2', '3']) + PM1 = ProcessingMechanism(input_shapes=[4, 4], function=LinearCombination, input_ports=['input_1', 'input_2']) + PM2 = ProcessingMechanism(input_shapes=[2, 2, 2], function=LinearCombination, input_ports=['1', '2', '3']) PM1.execute([[1, 2, 3, 4], [5, 4, 2, 2]]) PM2.execute([[2, 0], [1, 3], [1, 0]]) np.testing.assert_allclose(PM1.value, [[6, 6, 5, 6]]) diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 469fed705a6..9affb1375d2 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -78,7 +78,7 @@ def test_recurrent_mech_empty_spec(self): def test_recurrent_mech_check_attrs(self): R = RecurrentTransferMechanism( name='R', - size=3, + input_shapes=3, auto=1.0 ) print("matrix = ", R.matrix.base) @@ -91,7 +91,7 @@ def test_recurrent_mech_check_attrs(self): def test_recurrent_mech_check_proj_attrs(self): R = RecurrentTransferMechanism( name='R', - size=3 + input_shapes=3 ) np.testing.assert_allclose(R.recurrent_projection.matrix.base, R.matrix.base) assert R.recurrent_projection.sender is R.output_port @@ -102,8 +102,8 @@ def test_recurrent_mech_check_proj_attrs(self): @pytest.mark.benchmark(group="RecurrentTransferMechanism") @pytest.mark.parametrize("variable, params", [ - pytest.param(([10, 12, 0, -1], [1, 2, 3, 0]), {'size': 4}, id="list_of_ints"), - pytest.param(([1.0, 1.2, 0., -1.3], [1., 5., 3., 0.]), {'size': 4}, id="list_of_floats"), + pytest.param(([10, 12, 0, -1], [1, 2, 3, 0]), {'input_shapes': 4}, id="list_of_ints"), + pytest.param(([1.0, 1.2, 0., -1.3], [1., 5., 3., 0.]), {'input_shapes': 4}, id="list_of_floats"), pytest.param(([10], [10]), {}, id="no_init_params"), ]) def test_recurrent_mech_inputs(self, benchmark, params, variable, mech_mode): @@ -123,7 +123,8 @@ def test_recurrent_mech_inputs(self, benchmark, params, variable, mech_mode): @pytest.mark.recurrent_transfer_mechanism @pytest.mark.benchmark(group="RecurrentTransferMechanism") def test_recurrent_mech_integrator(self, benchmark, mech_mode): - R = RecurrentTransferMechanism(size=2, + R = RecurrentTransferMechanism( + input_shapes=2, function=Logistic(), hetero=-2.0, integrator_mode=True, @@ -149,7 +150,8 @@ def test_recurrent_mech_integrator(self, benchmark, mech_mode): @pytest.mark.benchmark(group="RecurrentTransferMechanism") def test_recurrent_mech_lci(self, benchmark, mech_mode): LCI = pnl.LeakyCompetingIntegrator(rate=0.4) - R = RecurrentTransferMechanism(size=2, + R = RecurrentTransferMechanism( + input_shapes=2, hetero=-2.0, integrator_mode=True, integrator_function=LCI, @@ -171,7 +173,7 @@ def test_recurrent_mech_lci(self, benchmark, mech_mode): # def test_recurrent_mech_inputs_list_of_fns(self): # R = RecurrentTransferMechanism( # name='R', - # size=4, + # input_shapes=4, # integrator_mode=True # ) # val = R.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()]) @@ -205,7 +207,7 @@ def test_recurrent_mech_inputs_mismatched_with_default_longer(self): with pytest.raises(MechanismError) as error_text: R = RecurrentTransferMechanism( name='R', - size=4 + input_shapes=4 ) R.execute([1, 2, 3, 4, 5]) assert ("Shape ((5,)) of input ([1 2 3 4 5]) does not match required shape ((4,)) " @@ -215,7 +217,7 @@ def test_recurrent_mech_inputs_mismatched_with_default_shorter(self): with pytest.raises(MechanismError) as error_text: R = RecurrentTransferMechanism( name='R', - size=6 + input_shapes=6 ) R.execute([1, 2, 3, 4, 5]) assert ("Shape ((5,)) of input ([1 2 3 4 5]) does not match required shape ((6,)) " @@ -231,19 +233,19 @@ def test_recurrent_mech_matrix_keyword_spec(self, matrix): pytest.skip("Random test") R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, matrix=matrix ) val = R.execute([10, 10, 10, 10]) np.testing.assert_allclose(val, [[10., 10., 10., 10.]]) - np.testing.assert_allclose(R.recurrent_projection.matrix.base, get_matrix(matrix, R.size[0], R.size[0])) + np.testing.assert_allclose(R.recurrent_projection.matrix.base, get_matrix(matrix, R.input_shapes[0], R.input_shapes[0])) @pytest.mark.parametrize("matrix", [pnl.array_from_matrix_string('1 2; 3 4'), np.array([[1, 2], [3, 4]]), [[1, 2], [3, 4]], '1 2; 3 4']) def test_recurrent_mech_matrix_other_spec(self, matrix): R = RecurrentTransferMechanism( name='R', - size=2, + input_shapes=2, matrix=matrix ) val = R.execute([10, 10]) @@ -257,7 +259,7 @@ def test_recurrent_mech_matrix_other_spec(self, matrix): def test_recurrent_mech_matrix_auto_spec(self): R = RecurrentTransferMechanism( name='R', - size=3, + input_shapes=3, auto=2 ) assert isinstance(R.matrix.base, np.ndarray) @@ -267,7 +269,7 @@ def test_recurrent_mech_matrix_auto_spec(self): def test_recurrent_mech_matrix_hetero_spec(self): R = RecurrentTransferMechanism( name='R', - size=3, + input_shapes=3, hetero=-1 ) # (7/28/17 CW) these numbers assume that execute() leaves its value in the outputPort of the mechanism: if @@ -287,7 +289,7 @@ def test_recurrent_mech_matrix_hetero_spec(self): def test_recurrent_mech_matrix_auto_hetero_spec_size_1(self): R = RecurrentTransferMechanism( name='R', - size=1, + input_shapes=1, auto=-2, hetero=4.4 ) @@ -299,7 +301,7 @@ def test_recurrent_mech_matrix_auto_hetero_spec_size_1(self): def test_recurrent_mech_matrix_auto_hetero_spec_size_4(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=2.2, hetero=-3 ) @@ -312,7 +314,7 @@ def test_recurrent_mech_matrix_auto_hetero_matrix_spec(self): # when auto, hetero, and matrix are all specified, auto and hetero should take precedence R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=2.2, hetero=-3, matrix=[[1, 2, 3, 4]] * 4 @@ -326,7 +328,7 @@ def test_recurrent_mech_auto_matrix_spec(self): # auto should override the diagonal only R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=2.2, matrix=[[1, 2, 3, 4]] * 4 ) @@ -337,7 +339,7 @@ def test_recurrent_mech_auto_matrix_spec(self): def test_recurrent_mech_auto_array_matrix_spec(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=[1.1, 2.2, 3.3, 4.4], matrix=[[1, 2, 3, 4]] * 4 ) @@ -349,7 +351,7 @@ def test_recurrent_mech_hetero_float_matrix_spec(self): # hetero should override off-diagonal only R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, hetero=-2.2, matrix=[[1, 2, 3, 4]] * 4 ) @@ -363,7 +365,7 @@ def test_recurrent_mech_hetero_float_matrix_spec(self): def test_recurrent_mech_hetero_matrix_matrix_spec(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, hetero=np.array([[-4, -3, -2, -1]] * 4), matrix=[[1, 2, 3, 4]] * 4 ) @@ -378,7 +380,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v1(self): # auto and hetero should override matrix R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=[1, 3, 5, 7], hetero=np.array([[-4, -3, -2, -1]] * 4), matrix=[[1, 2, 3, 4]] * 4 @@ -393,7 +395,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v1(self): def test_recurrent_mech_auto_hetero_matrix_spec_v2(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=[3], hetero=np.array([[-4, -3, -2, -1]] * 4), matrix=[[1, 2, 3, 4]] * 4 @@ -408,7 +410,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v2(self): def test_recurrent_mech_auto_hetero_matrix_spec_v3(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=[3], hetero=2, matrix=[[1, 2, 3, 4]] * 4 @@ -424,7 +426,7 @@ def test_recurrent_mech_matrix_too_large(self): with pytest.raises(RecurrentTransferError) as error_text: R = RecurrentTransferMechanism( name='R', - size=3, + input_shapes=3, matrix=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]] ) @@ -434,7 +436,7 @@ def test_recurrent_mech_matrix_too_small(self): with pytest.raises(RecurrentTransferError) as error_text: R = RecurrentTransferMechanism( name='R', - size=5, + input_shapes=5, matrix=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]] ) assert "must be the same as its variable" in str(error_text.value) @@ -443,7 +445,7 @@ def test_recurrent_mech_matrix_strings(self): with pytest.raises(RecurrentTransferError) as error_text: R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, matrix=[['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']] ) assert "has non-numeric entries" in str(error_text.value) @@ -452,7 +454,7 @@ def test_recurrent_mech_matrix_nonsquare(self): with pytest.raises(RecurrentTransferError) as error_text: R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, matrix=[[1, 3]] ) assert "must be square" in str(error_text.value) @@ -461,7 +463,7 @@ def test_recurrent_mech_matrix_3d(self): with pytest.raises(FunctionError) as error_text: R = RecurrentTransferMechanism( name='R', - size=2, + input_shapes=2, matrix=[[[1, 3], [2, 4]], [[5, 7], [6, 8]]] ) assert "more than 2d" in str(error_text.value) @@ -473,7 +475,7 @@ def test_recurrent_mech_function_logistic(self): R = RecurrentTransferMechanism( name='R', - size=10, + input_shapes=10, function=Logistic(gain=2, offset=1) ) val = R.execute(np.ones(10)) @@ -485,7 +487,7 @@ def test_recurrent_mech_function_psyneulink(self): R = RecurrentTransferMechanism( name='R', - size=7, + input_shapes=7, function=a ) val = R.execute(np.zeros(7)) @@ -623,12 +625,12 @@ def test_recurrent_mech_transfer_mech_process_three_runs(self): # this test ASSUMES that the ParameterPort for auto and hetero is updated one run-cycle AFTER they are set by # lines by `R.auto = 0`. If this (potentially buggy) behavior is changed, then change these values R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=0, hetero=-1 ) T = TransferMechanism( - size=3, + input_shapes=3, function=Linear ) c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) @@ -645,11 +647,11 @@ def test_recurrent_mech_transfer_mech_process_three_runs(self): def test_transfer_mech_process_matrix_change(self): from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection T1 = TransferMechanism( - size=4, + input_shapes=4, function=Linear) proj = MappingProjection(matrix=[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) T2 = TransferMechanism( - size=4, + input_shapes=4, function=Linear) c = Composition(pathways=[[T1, proj, T2]]) c.run(inputs={T1: [[1, 2, 3, 4]]}) @@ -664,11 +666,11 @@ def test_transfer_mech_process_matrix_change(self): def test_recurrent_mech_process_matrix_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=1, hetero=-1) T = TransferMechanism( - size=4, + input_shapes=4, function=Linear) c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) R.matrix = [[2, 0, 1, 3]] * 4 @@ -684,11 +686,11 @@ def test_recurrent_mech_process_matrix_change(self): # this test must wait until we create a property such that R.recurrent_projection.matrix sets R.auto and R.hetero def test_recurrent_mech_process_proj_matrix_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=1, hetero=-1) T = TransferMechanism( - size=4, + input_shapes=4, function=Linear) c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) R.recurrent_projection.matrix = [[2, 0, 1, 3]] * 4 @@ -709,11 +711,11 @@ def test_recurrent_mech_transfer_mech_composition_three_runs(self): # this test ASSUMES that the ParameterPort for auto and hetero is updated one run-cycle AFTER they are set by # lines by `R.auto = 0`. If this (potentially buggy) behavior is changed, then change these values R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=0, hetero=-1) T = TransferMechanism( - size=3, + input_shapes=3, function=Linear) c = Composition(pathways=[R,T]) @@ -730,11 +732,11 @@ def test_recurrent_mech_transfer_mech_composition_three_runs(self): @pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter') def test_recurrent_mech_composition_auto_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=[1, 2, 3, 4], hetero=-1) T = TransferMechanism( - size=3, + input_shapes=3, function=Linear) c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) c.run(inputs={R: [[1, 2, 3, 4]]}) @@ -752,11 +754,11 @@ def test_recurrent_mech_composition_auto_change(self): @pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter') def test_recurrent_mech_composition_hetero_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=[1, 2, 3, 4], hetero=[[-1, -2, -3, -4]] * 4) T = TransferMechanism( - size=5, + input_shapes=5, function=Linear) c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) c.run(inputs={R: [[1, 2, 3, -0.5]]}) @@ -774,11 +776,11 @@ def test_recurrent_mech_composition_hetero_change(self): @pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter') def test_recurrent_mech_composition_auto_and_hetero_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=[1, 2, 3, 4], hetero=[[-1, -2, -3, -4]] * 4) T = TransferMechanism( - size=5, + input_shapes=5, function=Linear) c = Composition(pathways=[R,T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) c.run(inputs={R: [[1, 2, 3, -0.5]]}) @@ -796,11 +798,11 @@ def test_recurrent_mech_composition_auto_and_hetero_change(self): @pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter') def test_recurrent_mech_composition_matrix_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=1, hetero=-1) T = TransferMechanism( - size=4, + input_shapes=4, function=Linear) c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) R.parameters.matrix.set([[2, 0, 1, 3]] * 4, c) @@ -813,7 +815,8 @@ def test_recurrent_mech_composition_matrix_change(self): np.testing.assert_allclose(R.parameters.value.get(c), [[21, 3, 12, 35]]) def test_recurrent_mech_with_learning(self): - R = RecurrentTransferMechanism(size=4, + R = RecurrentTransferMechanism( + input_shapes=4, function=Linear, matrix=np.full((4, 4), 0.1), enable_learning=True @@ -865,7 +868,8 @@ def test_recurrent_mech_with_learning(self): ) def test_recurrent_mech_change_learning_rate(self): - R = RecurrentTransferMechanism(size=4, + R = RecurrentTransferMechanism( + input_shapes=4, function=Linear, enable_learning=True, learning_rate=0.1 @@ -898,7 +902,7 @@ def test_recurrent_mech_change_learning_rate(self): def test_learning_of_orthognal_inputs(self): size=4 R = RecurrentTransferMechanism( - size=size, + input_shapes=size, function=Linear, enable_learning=True, auto=0, @@ -1027,7 +1031,7 @@ class TestCustomCombinationFunction: def test_rt_without_custom_comb_fct(self): R1 = RecurrentTransferMechanism( has_recurrent_input_port=True, - size=2, + input_shapes=2, ) result = R1.execute([1,2]) np.testing.assert_allclose(result, [[1,2]]) @@ -1037,7 +1041,7 @@ def my_fct(x): return x[0] * x[1] if len(x) == 2 else x[0] R2 = RecurrentTransferMechanism( has_recurrent_input_port=True, - size=2, + input_shapes=2, combination_function=my_fct ) result = R2.execute([1,2]) @@ -1171,7 +1175,7 @@ class TestDebugProperties: def test_defaults(self): R = RecurrentTransferMechanism(name='R', - size=3) + input_shapes=3) print("\n\nTEST DEFAULTS") print("\n\nAuto Values -----------------------------------") print("R.auto = ", R.auto) @@ -1208,7 +1212,7 @@ def test_defaults(self): def test_auto(self): auto_val = 10.0 R = RecurrentTransferMechanism(name='R', - size=3, + input_shapes=3, auto=auto_val) print("\n\nTEST AUTO [auto = ", auto_val, "]") @@ -1247,7 +1251,7 @@ def test_auto(self): def test_hetero(self): hetero_val = 10.0 R = RecurrentTransferMechanism(name='R', - size=3, + input_shapes=3, hetero=hetero_val) print("\n\nTEST HETERO [hetero = ", hetero_val, "]") print("\n\nAuto Values -----------------------------------") @@ -1290,7 +1294,7 @@ def test_auto_and_hetero(self): hetero_val = 5.0 R = RecurrentTransferMechanism(name='R', - size=3, + input_shapes=3, auto=auto_val, hetero=hetero_val) print("\n\nTEST AUTO AND HETERO\n [auto = ", auto_val, " | hetero = ", hetero_val, "] ") @@ -1332,7 +1336,7 @@ def test_matrix(self): [10.0, 10.0, 5.0]] R = RecurrentTransferMechanism(name='R', - size=3, + input_shapes=3, matrix=matrix_val) print("\n\nTEST MATRIX\n", matrix_val) print("\n\nAuto Values -----------------------------------") diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 0dad5034f89..b54f18d68f3 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -43,7 +43,7 @@ def test_transfer_mech_inputs_list_of_ints(self, benchmark): T.reset_stateful_function_when = Never() val = benchmark(T.execute, [10 for i in range(VECTOR_SIZE)]) np.testing.assert_allclose(val, [[10.0 for i in range(VECTOR_SIZE)]]) - assert len(T.size) == 1 and T.size[0] == VECTOR_SIZE and isinstance(T.size[0], np.integer) + assert len(T.input_shapes) == 1 and T.input_shapes[0] == VECTOR_SIZE and isinstance(T.input_shapes[0], np.integer) # this test assumes size is returned as a 1D array: if it's not, then several tests in this file must be changed @pytest.mark.mechanism @@ -953,10 +953,10 @@ class TestTransferMechanismSize: def test_transfer_mech_size_int_check_var(self): T = TransferMechanism( name='T', - size=4 + input_shapes=4 ) np.testing.assert_array_equal(T.defaults.variable, [[0, 0, 0, 0]]) - assert len(T.size) == 1 and T.size[0] == 4 and isinstance(T.size[0], np.integer) + assert len(T.input_shapes) == 1 and T.input_shapes[0] == 4 and isinstance(T.input_shapes[0], np.integer) @pytest.mark.mechanism @@ -964,35 +964,35 @@ def test_transfer_mech_size_int_check_var(self): def test_transfer_mech_size_int_inputs_ints(self): T = TransferMechanism( name='T', - size=4 + input_shapes=4 ) val = T.execute([10, 10, 10, 10]) np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) # ------------------------------------------------------------------------------------------------ # TEST 3 - # size = int, variable = list of floats + # input_shapes = int, variable = list of floats @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_int_inputs_floats(self): T = TransferMechanism( name='T', - size=VECTOR_SIZE + input_shapes=VECTOR_SIZE ) val = T.execute([10.0 for i in range(VECTOR_SIZE)]) np.testing.assert_array_equal(val, [[10.0 for i in range(VECTOR_SIZE)]]) # ------------------------------------------------------------------------------------------------ # TEST 4 - # size = int, variable = list of functions + # input_shapes = int, variable = list of functions #@pytest.mark.mechanism #@pytest.mark.transfer_mechanism # def test_transfer_mech_size_int_inputs_fns(self): # T = TransferMechanism( # name='T', - # size=4, + # input_shapes=4, # integrator_mode=True # ) # val = T.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()]) @@ -1000,14 +1000,14 @@ def test_transfer_mech_size_int_inputs_floats(self): # ------------------------------------------------------------------------------------------------ # TEST 8 - # size = float, variable = list of functions + # input_shapes = float, variable = list of functions #@pytest.mark.mechanism #@pytest.mark.transfer_mechanism # def test_transfer_mech_size_float_inputs_fns(self): # T = TransferMechanism( # name='T', - # size=4.0, + # input_shapes=4.0, # integrator_mode=True # ) # val = T.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()]) @@ -1015,14 +1015,14 @@ def test_transfer_mech_size_int_inputs_floats(self): # ------------------------------------------------------------------------------------------------ # TEST 9 - # size = list of ints, check that variable is correct + # input_shapes = list of ints, check that variable is correct @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_list_of_ints(self): T = TransferMechanism( name='T', - size=[2, 3, 4] + input_shapes=[2, 3, 4] ) assert len(T.defaults.variable) == 3 and len(T.defaults.variable[0]) == 2 and len(T.defaults.variable[1]) == 3 and len(T.defaults.variable[2]) == 4 @@ -1035,7 +1035,7 @@ def test_transfer_mech_size_list_of_ints(self): def test_transfer_mech_size_var_both_lists(self): T = TransferMechanism( name='T', - size=[2, 3], + input_shapes=[2, 3], default_variable=[[1, 2], [3, 4, 5]] ) assert len(T.defaults.variable) == 2 @@ -1044,17 +1044,17 @@ def test_transfer_mech_size_var_both_lists(self): # ------------------------------------------------------------------------------------------------ # TEST 12 - # size = int, variable = a compatible 2D array: check that variable is correct + # input_shapes = int, variable = a compatible 2D array: check that variable is correct @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_scalar_var_2d(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', - size=2, + input_shapes=2, default_variable=[[1, 2], [3, 4]] ) @@ -1073,65 +1073,65 @@ def test_transfer_mech_var_2d_array(self): # ------------------------------------------------------------------------------------------------ # TEST 14 - # variable = a 1D array, size does not match: check that variable and output are correct + # variable = a 1D array, input_shapes does not match: check that variable and output are correct @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_var_1D_size_wrong(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', default_variable=[1, 2, 3, 4], - size=2 + input_shapes=2 ) # ------------------------------------------------------------------------------------------------ # TEST 15 - # variable = a 1D array, size does not match again: check that variable and output are correct + # variable = a 1D array, input_shapes does not match again: check that variable and output are correct @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_var_1D_size_wrong_2(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', default_variable=[1, 2, 3, 4], - size=[2, 3, 4] + input_shapes=[2, 3, 4] ) # ------------------------------------------------------------------------------------------------ # TEST 16 - # size = int, variable = incompatible array, check variable + # input_shapes = int, variable = incompatible array, check variable @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_var_incompatible1(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', - size=2, + input_shapes=2, default_variable=[[1, 2], [3, 4, 5]] ) # ------------------------------------------------------------------------------------------------ # TEST 17 - # size = array, variable = incompatible array, check variable + # input_shapes = array, variable = incompatible array, check variable @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_var_incompatible2(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', - size=[2, 2], + input_shapes=[2, 2], default_variable=[[1, 2], [3, 4, 5]] ) @@ -1141,7 +1141,7 @@ def test_transfer_mech_size_var_incompatible2(self): # ------------------------------------------------------------------------------------------------ # TEST 2 - # size = -1.0, check less-than-one error + # input_shapes = -1.0, check less-than-one error @pytest.mark.mechanism @pytest.mark.transfer_mechanism @@ -1149,7 +1149,7 @@ def test_transfer_mech_size_negative_one(self): with pytest.raises(ComponentError) as error_text: T = TransferMechanism( name='T', - size=-1, + input_shapes=-1, ) assert "negative dimensions" in str(error_text.value) @@ -1163,25 +1163,25 @@ def test_transfer_mech_size_negative_one(self): # with pytest.raises(UserWarning) as error_text: # T = TransferMechanism( # name='T', - # size=3.5, + # input_shapes=3.5, # ) # assert "cast to integer, its value changed" in str(error_text.value) # ------------------------------------------------------------------------------------------------ # TEST 4 - # size = 2D array, check too-many-dimensions warning + # input_shapes = 2D array, check too-many-dimensions warning # def test_transfer_mech_size_2d(self): # with pytest.raises(UserWarning) as error_text: # T = TransferMechanism( # name='T', - # size=[[2]], + # input_shapes=[[2]], # ) # assert "had more than one dimension" in str(error_text.value) # ------------------------------------------------------------------------------------------------ # TEST 5 - # size = 2D array, check variable is correctly instantiated + # input_shapes = 2D array, check variable is correctly instantiated # for now, since the test above doesn't work, we use this tesT.6/30/17 (CW) @pytest.mark.mechanism @@ -1189,10 +1189,10 @@ def test_transfer_mech_size_negative_one(self): def test_transfer_mech_size_2d(self): T = TransferMechanism( name='T', - size=[[2]], + input_shapes=[[2]], ) assert len(T.defaults.variable) == 1 and len(T.defaults.variable[0]) == 2 - assert len(T.size) == 1 and T.size[0] == 2 + assert len(T.input_shapes) == 1 and T.input_shapes[0] == 2 class TestTransferMechanismMultipleInputPorts: @@ -1267,7 +1267,7 @@ def test_multiple_output_ports_for_multiple_input_ports(self, benchmark, mech_mo class TestIntegratorMode: def test_integrator_mode_simple_on_and_off(self): - T = TransferMechanism(size=2) + T = TransferMechanism(input_shapes=2) np.testing.assert_allclose(T.execute([0.5, 1]), [[0.5, 1]]) T.integrator_mode=True np.testing.assert_allclose(T.execute([0.5, 1]), [[0.25, 0.5 ]]) @@ -1654,21 +1654,21 @@ def test_reset_spec(self): # python values during execution is not implemented. @pytest.mark.usefixtures("comp_mode_no_llvm") def test_termination_measures(self, comp_mode): - stim_input = ProcessingMechanism(size=2, name='Stim Input') - stim_percept = TransferMechanism(name='Stimulus', size=2, function=Logistic) - instruction_input = ProcessingMechanism(size=2, function=Linear(slope=10)) - attention = LCAMechanism(name='Attention', size=2, function=Logistic, + stim_input = ProcessingMechanism(input_shapes=2, name='Stim Input') + stim_percept = TransferMechanism(name='Stimulus', input_shapes=2, function=Logistic) + instruction_input = ProcessingMechanism(input_shapes=2, function=Linear(slope=10)) + attention = LCAMechanism(name='Attention', input_shapes=2, function=Logistic, leak=8, competition=8, self_excitation=0, noise=0, time_step_size=.1, termination_threshold=3, termination_measure=TimeScale.TRIAL) - decision = TransferMechanism(name='Decision', size=2, + decision = TransferMechanism(name='Decision', input_shapes=2, integrator_mode=True, execute_until_finished=False, termination_threshold=0.65, termination_measure=max, termination_comparison_op=GREATER_THAN) - response = ProcessingMechanism(size=2, name='Response') + response = ProcessingMechanism(input_shapes=2, name='Response') comp = Composition() comp.add_linear_processing_pathway([stim_input, [[1,-1],[-1,1]], stim_percept, decision, response]) diff --git a/tests/models/test_botvinick.py b/tests/models/test_botvinick.py index e75704529bf..daa0fb4eca6 100644 --- a/tests/models/test_botvinick.py +++ b/tests/models/test_botvinick.py @@ -27,20 +27,24 @@ def test_botvinick_model(benchmark, comp_mode, reps): # SET UP MECHANISMS ---------------------------------------------------------------------------------------------------- # Linear input layer # colors: ('red', 'green'), words: ('RED','GREEN') - colors_input_layer = pnl.TransferMechanism(size=3, + colors_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='COLORS_INPUT') - words_input_layer = pnl.TransferMechanism(size=3, + words_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='WORDS_INPUT') - task_input_layer = pnl.TransferMechanism(size=2, + task_input_layer = pnl.TransferMechanism( + input_shapes=2, function=pnl.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') - task_layer = pnl.RecurrentTransferMechanism(size=2, + task_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic, hetero=-2, integrator_mode=True, @@ -49,14 +53,16 @@ def test_botvinick_model(benchmark, comp_mode, reps): # Hidden layer # colors: ('red','green', 'neutral') words: ('RED','GREEN', 'NEUTRAL') - colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, + colors_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), # bias 4.0 is -4.0 in the paper see Docs for description integrator_mode=True, hetero=-2, integration_rate=0.01, # cohen-huston text says 0.01 name='COLORS_HIDDEN') - words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, + words_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), integrator_mode=True, hetero=-2, @@ -64,7 +70,8 @@ def test_botvinick_model(benchmark, comp_mode, reps): name='WORDS_HIDDEN') # Response layer, responses: ('red', 'green') - response_layer = pnl.RecurrentTransferMechanism(size=2, + response_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic, hetero=-2.0, integrator_mode=True, diff --git a/tests/models/test_greedy_agent.py b/tests/models/test_greedy_agent.py index f283bc21a43..a2f0211a1a5 100644 --- a/tests/models/test_greedy_agent.py +++ b/tests/models/test_greedy_agent.py @@ -33,10 +33,10 @@ def test_simplified_greedy_agent(benchmark, comp_mode): player_len = prey_len = predator_len = obs_len # The original needs GaussianDistort -# player = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") -# prey = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") - player = TransferMechanism(size=prey_len, name="PLAYER OBS") - prey = TransferMechanism(size=prey_len, name="PREY OBS") +# player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") +# prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") + player = TransferMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey = TransferMechanism(input_shapes=prey_len, name="PREY OBS") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: # note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI) @@ -70,8 +70,8 @@ def test_simplified_greedy_agent_random(benchmark, comp_mode): player_len = prey_len = predator_len = obs_len - player = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") - prey = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") + player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") + prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: # note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI) @@ -117,14 +117,14 @@ def test_predator_prey(benchmark, mode, ocm_mode, prng, samples, fp_type): player_len = prey_len = predator_len = obs_coords # Input Mechanisms - player_pos = ProcessingMechanism(size=player_len, name="PLAYER POS") - prey_pos = ProcessingMechanism(size=prey_len, name="PREY POS") - predator_pos = ProcessingMechanism(size=predator_len, name="PREDATOR POS") + player_pos = ProcessingMechanism(input_shapes=player_len, name="PLAYER POS") + prey_pos = ProcessingMechanism(input_shapes=prey_len, name="PREY POS") + predator_pos = ProcessingMechanism(input_shapes=predator_len, name="PREDATOR POS") # Perceptual Mechanisms - player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") def action_fn(variable): diff --git a/tests/ports/test_input_ports.py b/tests/ports/test_input_ports.py index 897053c2f60..fdf5773b40e 100644 --- a/tests/ports/test_input_ports.py +++ b/tests/ports/test_input_ports.py @@ -9,10 +9,10 @@ class TestInputPorts: def test_combine_param_alone(self): - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) t3 = pnl.TransferMechanism( - size=2, + input_shapes=2, input_ports=pnl.InputPort( combine=pnl.PRODUCT)) c = pnl.Composition(pathways=[[t1, t3], [t2, t3]]) @@ -21,10 +21,10 @@ def test_combine_param_alone(self): np.testing.assert_allclose(val, [[3, 8]]) def test_combine_param_redundant_fct_class_spec(self): - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) t3 = pnl.TransferMechanism( - size=2, + input_shapes=2, input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.combinationfunctions .LinearCombination, combine=pnl.PRODUCT)) @@ -34,10 +34,10 @@ def test_combine_param_redundant_fct_class_spec(self): np.testing.assert_allclose(val, [[3, 8]]) def test_combine_param_redundant_fct_constructor_spec(self): - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) t3 = pnl.TransferMechanism( - size=2, + input_shapes=2, input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.combinationfunctions.LinearCombination(operation=pnl.PRODUCT), combine=pnl.PRODUCT)) c = pnl.Composition(pathways=[[t1, t3],[t2, t3]]) diff --git a/tests/projections/test_projection_specifications.py b/tests/projections/test_projection_specifications.py index adee0838155..d850aff939a 100644 --- a/tests/projections/test_projection_specifications.py +++ b/tests/projections/test_projection_specifications.py @@ -18,10 +18,10 @@ def test_projection_specification_formats(self): (currently it should be ignored; in the future, if/when Projections between the same sender and receiver in different Compositions are allowed, then it should be used) """ - M1 = pnl.ProcessingMechanism(size=2) - M2 = pnl.ProcessingMechanism(size=5) - M3 = pnl.ProcessingMechanism(size=4) - M4 = pnl.ProcessingMechanism(size=3) + M1 = pnl.ProcessingMechanism(input_shapes=2) + M2 = pnl.ProcessingMechanism(input_shapes=5) + M3 = pnl.ProcessingMechanism(input_shapes=4) + M4 = pnl.ProcessingMechanism(input_shapes=3) M1_M2_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) M2_M3_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) @@ -483,8 +483,8 @@ def test_no_warning_when_matrix_specified(self): # KDM: this is a good candidate for pytest.parametrize def test_masked_mapping_projection(self): - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) proj = pnl.MaskedMappingProjection(sender=t1, receiver=t2, matrix=[[1,2],[3,4]], @@ -495,8 +495,8 @@ def test_masked_mapping_projection(self): val = c.execute(inputs={t1:[1,2]}) np.testing.assert_allclose(val, [[8, 12]]) - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) proj = pnl.MaskedMappingProjection(sender=t1, receiver=t2, matrix=[[1,2],[3,4]], @@ -507,8 +507,8 @@ def test_masked_mapping_projection(self): val = c.execute(inputs={t1:[1,2]}) np.testing.assert_allclose(val, [[1, 8]]) - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) proj = pnl.MaskedMappingProjection(sender=t1, receiver=t2, mask=[[1,2],[3,4]], @@ -522,8 +522,8 @@ def test_masked_mapping_projection_mask_conficts_with_matrix(self): with pytest.raises(pnl.MaskedMappingProjectionError) as error_text: - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) pnl.MaskedMappingProjection(sender=t1, receiver=t2, mask=[[1,2,3],[4,5,6]], diff --git a/tests/scheduling/test_scheduler.py b/tests/scheduling/test_scheduler.py index 07feba1fcad..eda13f5d0b9 100644 --- a/tests/scheduling/test_scheduler.py +++ b/tests/scheduling/test_scheduler.py @@ -1493,21 +1493,21 @@ def test_objective_and_control(self): def test_inline_control_mechanism_example(self): cueInterval = pnl.TransferMechanism( default_variable=[[0.0]], - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Cue-Stimulus Interval' ) taskLayer = pnl.TransferMechanism( default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]' ) activation = pnl.LCAMechanism( default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Logistic(gain=1), leak=.5, competition=2, @@ -1609,7 +1609,7 @@ def test_scheduler_conditions(self, comp_mode, condition, scale, expected_result output_ports=[pnl.DECISION_VARIABLE], name='DDM') - response = pnl.ProcessingMechanism(size=2, name="GATE") + response = pnl.ProcessingMechanism(input_shapes=2, name="GATE") comp = pnl.Composition() comp.add_linear_processing_pathway([decisionMaker, response])