From 8ce3372f74d4a734dfbb5a5e0b08280de02fd5ea Mon Sep 17 00:00:00 2001 From: Emil Jansson Date: Thu, 12 Jan 2023 14:02:53 +0100 Subject: [PATCH 1/8] Added makefile for IF_curr_delta neurons to be used in convolutions --- .../IF_curr_delta_conv/Makefile | 26 +++++++++++++++++++ .../makefiles/local_only_combined/Makefile | 3 ++- 2 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile diff --git a/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile b/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile new file mode 100644 index 0000000000..8fdc82724e --- /dev/null +++ b/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile @@ -0,0 +1,26 @@ +# Copyright (c) 2021-2022 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP = $(notdir $(CURDIR)) + +NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c +NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h +INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h +THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h +SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_delta_impl.h +LOCAL_ONLY_IMPL = $(NEURON_DIR)/neuron/local_only/local_only_conv_impl.c + +include ../local_only.mk diff --git a/neural_modelling/makefiles/local_only_combined/Makefile b/neural_modelling/makefiles/local_only_combined/Makefile index 4b67130b99..b3b9193129 100644 --- a/neural_modelling/makefiles/local_only_combined/Makefile +++ b/neural_modelling/makefiles/local_only_combined/Makefile @@ -13,7 +13,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -MODELS = IF_curr_exp_conv\ +MODELS = IF_curr_delta_conv\ + IF_curr_exp_conv\ IF_curr_exp_pool_dense all: From d954bf343254d486994bda9000d3467d14d64779 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Mon, 23 Jan 2023 15:13:49 +0000 Subject: [PATCH 2/8] Add delays if desired --- .../neuron/local_only/local_only_convolution.py | 16 +++++++++++----- .../neuron/local_only/local_only_pool_dense.py | 13 ++++++++++++- 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/local_only/local_only_convolution.py b/spynnaker/pyNN/models/neuron/local_only/local_only_convolution.py index 7e27e8e278..60c4eb1144 100644 --- a/spynnaker/pyNN/models/neuron/local_only/local_only_convolution.py +++ b/spynnaker/pyNN/models/neuron/local_only/local_only_convolution.py @@ -38,14 +38,20 @@ class LocalOnlyConvolution(AbstractLocalOnly, AbstractSupportsSignedWeights): """ __slots__ = [ - "__cached_2d_overlaps" + "__cached_2d_overlaps", + "__delay" ] - def __init__(self): + def __init__(self, delay=None): + """ + :param float delay: + The delay used in the connection; by default 1 time step + """ # Store the overlaps between 2d vertices to avoid recalculation self.__cached_2d_overlaps = dict() - - # Store the merged keys for sources to avoid recalculation + self.__delay = delay + if delay is None: + self.__delay = SpynnakerDataView.get_simulation_time_step_ms() @overrides(AbstractLocalOnly.merge) def merge(self, synapse_dynamics): @@ -193,7 +199,7 @@ def __get_rinfo_for_sources(self, key_cache, srcs, incoming): @property @overrides(AbstractLocalOnly.delay) def delay(self): - return SpynnakerDataView.get_simulation_time_step_ms() + return self.__delay @property @overrides(AbstractLocalOnly.weight) diff --git a/spynnaker/pyNN/models/neuron/local_only/local_only_pool_dense.py b/spynnaker/pyNN/models/neuron/local_only/local_only_pool_dense.py index da63852a5d..4f8c7b2e58 100644 --- a/spynnaker/pyNN/models/neuron/local_only/local_only_pool_dense.py +++ b/spynnaker/pyNN/models/neuron/local_only/local_only_pool_dense.py @@ -31,6 +31,17 @@ class LocalOnlyPoolDense(AbstractLocalOnly, AbstractSupportsSignedWeights): """ A convolution synapse dynamics that can process spikes with only DTCM """ + __slots__ = ["__delay"] + + def __init__(self, delay=None): + """ + :param float delay: + The delay used in the connection; by default 1 time step + """ + self.__delay = delay + if delay is None: + self.__delay = SpynnakerDataView.get_simulation_time_step_ms() + @overrides(AbstractLocalOnly.merge) def merge(self, synapse_dynamics): if not isinstance(synapse_dynamics, LocalOnlyPoolDense): @@ -135,7 +146,7 @@ def __merge_key_and_mask(self, key_a, mask_a, key_b, mask_b): @property @overrides(AbstractLocalOnly.delay) def delay(self): - return SpynnakerDataView.get_simulation_time_step_ms() + return self.__delay @property @overrides(AbstractLocalOnly.weight) From 3f620815763ec883abde0339d7d888fb531d5604 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Mon, 23 Jan 2023 15:28:04 +0000 Subject: [PATCH 3/8] Pass delays --- .../src/neuron/local_only/local_only_conv_impl.c | 5 +++-- .../src/neuron/local_only/local_only_pool_dense_impl.c | 5 +++-- .../neural_projections/connectors/convolution_connector.py | 5 ++++- .../neural_projections/connectors/pool_dense_connector.py | 5 ++++- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/neural_modelling/src/neuron/local_only/local_only_conv_impl.c b/neural_modelling/src/neuron/local_only/local_only_conv_impl.c index 5b82bdbd95..dd6e6bb8aa 100644 --- a/neural_modelling/src/neuron/local_only/local_only_conv_impl.c +++ b/neural_modelling/src/neuron/local_only/local_only_conv_impl.c @@ -63,6 +63,7 @@ typedef struct { lc_coord_t recip_pool_strides; uint16_t positive_synapse_type; uint16_t negative_synapse_type; + uint32_t delay; lc_weight_t weights[]; // n_weights = next_even(kernel.width * kernel.height) } connector; @@ -198,12 +199,12 @@ static inline void do_convolution_operation( } uint32_t rb_index = 0; if (weight > 0) { - rb_index = synapse_row_get_ring_buffer_index(time + 1, + rb_index = synapse_row_get_ring_buffer_index(time + connector->delay, connector->positive_synapse_type, post_index, synapse_type_index_bits, synapse_index_bits, synapse_delay_mask); } else { - rb_index = synapse_row_get_ring_buffer_index(time + 1, + rb_index = synapse_row_get_ring_buffer_index(time + connector->delay, connector->negative_synapse_type, post_index, synapse_type_index_bits, synapse_index_bits, synapse_delay_mask); diff --git a/neural_modelling/src/neuron/local_only/local_only_pool_dense_impl.c b/neural_modelling/src/neuron/local_only/local_only_pool_dense_impl.c index 9dfbf3840c..515c5bed3f 100644 --- a/neural_modelling/src/neuron/local_only/local_only_pool_dense_impl.c +++ b/neural_modelling/src/neuron/local_only/local_only_pool_dense_impl.c @@ -68,6 +68,7 @@ typedef struct { uint32_t n_weights; uint16_t positive_synapse_type; uint16_t negative_synapse_type; + uint32_t delay; dimension dimensions[]; // Also follows: // lc_weight_t weights[]; @@ -213,12 +214,12 @@ void local_only_impl_process_spike( } uint32_t rb_index = 0; if (weight > 0) { - rb_index = synapse_row_get_ring_buffer_index(time + 1, + rb_index = synapse_row_get_ring_buffer_index(time + connector->delay, connector->positive_synapse_type, post_index, synapse_type_index_bits, synapse_index_bits, synapse_delay_mask); } else { - rb_index = synapse_row_get_ring_buffer_index(time + 1, + rb_index = synapse_row_get_ring_buffer_index(time + connector->delay, connector->negative_synapse_type, post_index, synapse_type_index_bits, synapse_index_bits, synapse_delay_mask); diff --git a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py index 55d699f554..6ce118326f 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py @@ -33,7 +33,7 @@ #: The number of 16-bit shorts in the connector struct, #: ignoring the source_key_info struct and the weights (which are dynamic) -CONNECTOR_CONFIG_SHORTS = 12 +CONNECTOR_CONFIG_SHORTS = 14 class ConvolutionConnector(AbstractConnector): @@ -395,6 +395,9 @@ def write_local_only_data( spec.write_value(pos_synapse_type, data_type=DataType.UINT16) spec.write_value(neg_synapse_type, data_type=DataType.UINT16) + # Write delay + spec.write_value(app_edge.post_vertex.synapse_dynamics.delay) + # Encode weights with weight scaling encoded_kernel_weights = self.__kernel_weights.flatten() if len(encoded_kernel_weights) % 2 != 0: diff --git a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py index bc37b15829..ea4d53f4c3 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py @@ -29,7 +29,7 @@ _DIMENSION_SIZE = (2 * BYTES_PER_WORD) + (6 * BYTES_PER_SHORT) _KEY_INFO_SIZE = 3 * BYTES_PER_WORD -_CONN_SIZE = _KEY_INFO_SIZE + (2 * BYTES_PER_WORD) + (2 * BYTES_PER_SHORT) +_CONN_SIZE = _KEY_INFO_SIZE + (3 * BYTES_PER_WORD) + (2 * BYTES_PER_SHORT) _DIM_DTYPE = [("mask", "uint32"), ("shift", "uint32"), ("pre_start", "uint16"), ("pre_in_post_start", "uint16"), ("pre_in_post_end", "uint16"), ("pre_in_post_shape", "uint16"), ("recip_pool_stride", "uint16"), @@ -283,6 +283,9 @@ def write_local_only_data( spec.write_value(pos_synapse_type, data_type=DataType.UINT16) spec.write_value(neg_synapse_type, data_type=DataType.UINT16) + # Write delay + spec.write_value(app_edge.post_vertex.synapse_dynamics.delay) + # Generate the dimension information dim_info = numpy.zeros(n_dims, dtype=_DIM_DTYPE) if self.__pool_stride is not None: From 56f3eb82c0852f8652e4c6dca5f1c1e439265f70 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Mon, 23 Jan 2023 16:10:16 +0000 Subject: [PATCH 4/8] Do delay correctly --- .../connectors/convolution_connector.py | 4 +++- .../connectors/pool_dense_connector.py | 4 +++- .../population_machine_local_only_combined_vertex.py | 10 +++++++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py index 6ce118326f..a5f6176fa9 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py @@ -27,6 +27,7 @@ from spynnaker.pyNN.utilities.utility_calls import get_n_bits from spynnaker.pyNN.models.abstract_models import HasShapeKeyFields from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID +from spynnaker.pyNN.data.spynnaker_data_view import SpynnakerDataView #: The number of 32-bit words in the source_key_info struct SOURCE_KEY_INFO_WORDS = 7 @@ -396,7 +397,8 @@ def write_local_only_data( spec.write_value(neg_synapse_type, data_type=DataType.UINT16) # Write delay - spec.write_value(app_edge.post_vertex.synapse_dynamics.delay) + spec.write_value(app_edge.post_vertex.synapse_dynamics.delay * + SpynnakerDataView.get_simulation_time_step_per_ms()) # Encode weights with weight scaling encoded_kernel_weights = self.__kernel_weights.flatten() diff --git a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py index ea4d53f4c3..04d3ce5c14 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py @@ -25,6 +25,7 @@ from collections.abc import Iterable from spinn_front_end_common.utilities.exceptions import ConfigurationException from spynnaker.pyNN.models.abstract_models import HasShapeKeyFields +from spynnaker.pyNN.data.spynnaker_data_view import SpynnakerDataView _DIMENSION_SIZE = (2 * BYTES_PER_WORD) + (6 * BYTES_PER_SHORT) @@ -284,7 +285,8 @@ def write_local_only_data( spec.write_value(neg_synapse_type, data_type=DataType.UINT16) # Write delay - spec.write_value(app_edge.post_vertex.synapse_dynamics.delay) + spec.write_value(app_edge.post_vertex.synapse_dynamics.delay * + SpynnakerDataView.get_simulation_time_step_per_ms()) # Generate the dimension information dim_info = numpy.zeros(n_dims, dtype=_DIM_DTYPE) diff --git a/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py index d4f9cbe0ff..1ce4ac5946 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py @@ -25,6 +25,7 @@ from .population_machine_common import CommonRegions, PopulationMachineCommon from .population_machine_neurons import ( NeuronRegions, PopulationMachineNeurons, NeuronProvenance) +from spynnaker.pyNN.data.spynnaker_data_view import SpynnakerDataView class LocalOnlyProvenance(ctypes.LittleEndianStructure): @@ -270,12 +271,15 @@ def __write_local_only_data(self, spec): log_n_max_atoms = get_n_bits(self._max_atoms_per_core) log_n_synapse_types = get_n_bits( self._app_vertex.neuron_impl.get_n_synapse_types()) - # Delay is always 1 - log_max_delay = 1 + # Find the maximum delay + # pylint: disable=protected-access + max_delay = SpynnakerDataView.get_simulation_time_step_per_ms() * max( + proj._synapse_information.delays + for proj in self._app_vertex.incoming_projections) spec.write_value(log_n_max_atoms) spec.write_value(log_n_synapse_types) - spec.write_value(log_max_delay) + spec.write_value(get_n_bits(max_delay)) spec.write_value(self._app_vertex.incoming_spike_buffer_size) spec.write_value(int(self._app_vertex.drop_late_spikes)) From 7accc3f51f16d1c79cfa47bb6b08f9067446fbe6 Mon Sep 17 00:00:00 2001 From: Emil Jansson Date: Thu, 12 Jan 2023 14:02:53 +0100 Subject: [PATCH 5/8] Added makefile for IF_curr_delta neurons to be used in convolutions --- .../IF_curr_delta_conv/Makefile | 26 +++++++++++++++++++ .../makefiles/local_only_combined/Makefile | 3 ++- 2 files changed, 28 insertions(+), 1 deletion(-) create mode 100644 neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile diff --git a/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile b/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile new file mode 100644 index 0000000000..8fdc82724e --- /dev/null +++ b/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile @@ -0,0 +1,26 @@ +# Copyright (c) 2021-2022 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP = $(notdir $(CURDIR)) + +NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c +NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h +INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h +THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h +SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_delta_impl.h +LOCAL_ONLY_IMPL = $(NEURON_DIR)/neuron/local_only/local_only_conv_impl.c + +include ../local_only.mk diff --git a/neural_modelling/makefiles/local_only_combined/Makefile b/neural_modelling/makefiles/local_only_combined/Makefile index 4b67130b99..b3b9193129 100644 --- a/neural_modelling/makefiles/local_only_combined/Makefile +++ b/neural_modelling/makefiles/local_only_combined/Makefile @@ -13,7 +13,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -MODELS = IF_curr_exp_conv\ +MODELS = IF_curr_delta_conv\ + IF_curr_exp_conv\ IF_curr_exp_pool_dense all: From 6f70b3012a280c359f060f36fdd4edf950a7c5a5 Mon Sep 17 00:00:00 2001 From: emijan-kth <56410542+emijan-kth@users.noreply.github.com> Date: Thu, 2 Feb 2023 16:17:32 +0100 Subject: [PATCH 6/8] Multiple fixes to convolutions. (#2) Arbitrary size of convolution kernels (including non-square, non-odd sized) Strides, in particular non-square Fixed [row, column] order on convolution parameters (eg. padding and strides), in line with documentation comments --- .../IF_curr_delta_conv/Makefile | 2 +- .../neuron/local_only/local_only_conv_impl.c | 56 ++++++++++------ .../connectors/convolution_connector.py | 66 ++++++++++++------- 3 files changed, 77 insertions(+), 47 deletions(-) diff --git a/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile b/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile index 8fdc82724e..5026a0cca8 100644 --- a/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile +++ b/neural_modelling/makefiles/local_only_combined/IF_curr_delta_conv/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h -INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h +INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_delta.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_delta_impl.h diff --git a/neural_modelling/src/neuron/local_only/local_only_conv_impl.c b/neural_modelling/src/neuron/local_only/local_only_conv_impl.c index 5b82bdbd95..709c7213e6 100644 --- a/neural_modelling/src/neuron/local_only/local_only_conv_impl.c +++ b/neural_modelling/src/neuron/local_only/local_only_conv_impl.c @@ -60,6 +60,7 @@ typedef struct { lc_shape_t kernel; lc_shape_t padding; lc_coord_t recip_strides; + lc_coord_t strides; lc_coord_t recip_pool_strides; uint16_t positive_synapse_type; uint16_t negative_synapse_type; @@ -138,7 +139,15 @@ bool local_only_impl_initialise(void *address){ return true; } -//! \brief Multiply an integer by a 16-bit reciprocal and return the floored +//! \brief Calculate the remainder from a division +static inline int16_t calc_remainder(int16_t dividend, int16_t divisor, int16_t quotient) { + int16_t remainder = dividend - quotient * divisor; + log_debug("remainder: %d = %d * %d + %d", + dividend, quotient, divisor, remainder); + return remainder; +} + +//! \brief Calculate remainder Multiply an integer by a 16-bit reciprocal and return the floored //! integer result static inline int16_t recip_multiply(int16_t integer, int16_t recip) { int32_t i = integer; @@ -146,19 +155,17 @@ static inline int16_t recip_multiply(int16_t integer, int16_t recip) { return (int16_t) ((i * r) >> RECIP_FRACT_BITS); } -//! \brief Do a mapping from pre to post 2D spaces, we use the standard -//! padding, kernel, strides from Convolutional Neural Networks -//! because of the way we're looping through the kernel, we divide the kernel -//! shape by 2. -static inline lc_coord_t map_pre_to_post(connector *connector, lc_coord_t pre, - int16_t half_kh, int16_t half_kw) { - lc_coord_t post = pre; - post.col = recip_multiply(post.col, connector->recip_pool_strides.col); - post.row = recip_multiply(post.row, connector->recip_pool_strides.row); - post.col = post.col - half_kw + connector->padding.width; - post.row = post.row - half_kh + connector->padding.height; - post.col = recip_multiply(post.col, connector->recip_strides.col); - post.row = recip_multiply(post.row, connector->recip_strides.row); +//! \brief Do a mapping from pre to post 2D spaces +static inline lc_coord_t map_pre_to_post(connector *connector, lc_coord_t pre, lc_coord_t *start_i) { + pre.col = recip_multiply(pre.col, connector->recip_pool_strides.col); + pre.row = recip_multiply(pre.row, connector->recip_pool_strides.row); + pre.col += connector->padding.width; + pre.row += connector->padding.height; + lc_coord_t post; + post.col = recip_multiply(pre.col, connector->recip_strides.col); + post.row = recip_multiply(pre.row, connector->recip_strides.row); + start_i->col = calc_remainder(pre.col, connector->strides.col, post.col); + start_i->row = calc_remainder(pre.row, connector->strides.row, post.row); return post; } @@ -169,21 +176,26 @@ static inline lc_coord_t map_pre_to_post(connector *connector, lc_coord_t pre, static inline void do_convolution_operation( uint32_t time, lc_coord_t pre_coord, connector *connector, uint16_t *ring_buffers) { - int32_t half_kh = connector->kernel.height / 2; - int32_t half_kw = connector->kernel.width / 2; - lc_coord_t post_coord = map_pre_to_post(connector, pre_coord, half_kh, half_kw); + lc_coord_t start_i; + log_debug("kernel height: %d, kernel width: %d, padding height: %d, padding width: %d, strides row: %d, strides col: %d", connector->kernel.height, connector->kernel.width, connector->padding.height, connector->padding.width, connector->strides.row, connector->strides.col); + lc_coord_t post_coord = map_pre_to_post(connector, pre_coord, &start_i); log_debug("pre row %d, col %d AS post row %d, col %d", pre_coord.row, pre_coord.col, post_coord.row, post_coord.col); int32_t kw = connector->kernel.width; - for (int32_t r = -half_kh, kr = 0; r <= half_kh; r++, kr++) { - int32_t tmp_row = post_coord.row + r; + for (int32_t i_row = start_i.row, tmp_row = post_coord.row; i_row < connector->kernel.height; i_row += connector->strides.row, --tmp_row) { + int32_t kr = connector->kernel.height - 1 - i_row; + log_debug("i_row = %u, kr = %u, tmp_row = %u", i_row, kr, tmp_row); + if ((tmp_row < config.post_start.row) || (tmp_row > config.post_end.row)) { + log_debug("tmp_row outside"); continue; } - for (int32_t c = -half_kw, kc = 0; c <= half_kw; c++, kc++) { - int32_t tmp_col = post_coord.col + c; + for (int32_t i_col = start_i.col, tmp_col = post_coord.col; i_col < connector->kernel.width; i_col += connector->strides.col, --tmp_col) { + int32_t kc = connector->kernel.width - 1 - i_col; + log_debug("i_col = %u, kc = %u, tmp_col = %u", i_col, kc, tmp_col); if ((tmp_col < config.post_start.col) || (tmp_col > config.post_end.col)) { + log_debug("tmp_col outside"); continue; } @@ -192,8 +204,10 @@ static inline void do_convolution_operation( ((tmp_row - config.post_start.row) * config.post_shape.width) + (tmp_col - config.post_start.col); uint32_t k = (kr * kw) + kc; + log_debug("weight index = %u", k); lc_weight_t weight = connector->weights[k]; if (weight == 0) { + log_debug("zero weight"); continue; } uint32_t rb_index = 0; diff --git a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py index 55d699f554..9bda1e36c8 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py @@ -33,7 +33,7 @@ #: The number of 16-bit shorts in the connector struct, #: ignoring the source_key_info struct and the weights (which are dynamic) -CONNECTOR_CONFIG_SHORTS = 12 +CONNECTOR_CONFIG_SHORTS = 14 class ConvolutionConnector(AbstractConnector): @@ -215,11 +215,10 @@ def get_post_shape(self, shape): shape = (post_pool_shape // self.__pool_stride) + 1 kernel_shape = numpy.array(self.__kernel_weights.shape) - post_shape = (shape - (kernel_shape - 1) + - (2 * self.__padding_shape)) + post_shape = shape - kernel_shape + (2 * self.__padding_shape) return numpy.clip( - post_shape // self.__strides, 1, numpy.inf).astype('int') + post_shape // self.__strides + 1, 1, numpy.inf).astype('int') @overrides(AbstractConnector.validate_connection) def validate_connection(self, application_edge, synapse_info): @@ -230,7 +229,9 @@ def validate_connection(self, application_edge, synapse_info): "The ConvolutionConnector only works where the Populations" " of a Projection are both 2D. Please ensure that both the" " Populations use a Grid2D structure.") - expected_post_shape = tuple(self.get_post_shape(pre.atoms_shape)) + pre_shape = pre.atoms_shape + expected_post_shape = tuple(self.get_post_shape((pre_shape[1], pre_shape[0]))) + expected_post_shape = expected_post_shape[1], expected_post_shape[0] if expected_post_shape != post.atoms_shape: raise ConfigurationException( f"With a source population with shape {pre.atoms_shape}, " @@ -281,10 +282,20 @@ def get_connected_vertices(self, s_info, source_vertex, target_vertex): pre_slices = [m_vertex.vertex_slice for m_vertex in pre_vertices] pre_slices_x = [vtx_slice.get_slice(0) for vtx_slice in pre_slices] pre_slices_y = [vtx_slice.get_slice(1) for vtx_slice in pre_slices] - pre_ranges = [[[px.start, py.start], [px.stop - 1, py.stop - 1]] + pre_ranges = [[[py.start, px.start], [py.stop - 1, px.stop - 1]] for px, py in zip(pre_slices_x, pre_slices_y)] - pres_as_posts = self.__pre_as_post(pre_ranges) - hlf_k_w, hlf_k_h = numpy.array(self.__kernel_weights.shape) // 2 + pre_vertex_in_post_layer, start_i = self.__pre_as_post(pre_ranges) + + pre_vertex_in_post_layer_upper_left = pre_vertex_in_post_layer[:,0] + pre_vertex_in_post_layer_lower_right = pre_vertex_in_post_layer[:,1] + + kernel_shape = numpy.array(self.__kernel_weights.shape) + + j = (kernel_shape - 1 - start_i) // self.__strides + j_upper_left = j[:,0] + + pre_vertex_max_reach_in_post_layer_upper_left = pre_vertex_in_post_layer_upper_left - j_upper_left + pre_vertex_max_reach_in_post_layer_lower_right = pre_vertex_in_post_layer_lower_right connected = list() for post in target_vertex.splitter.get_in_coming_vertices( @@ -293,18 +304,18 @@ def get_connected_vertices(self, s_info, source_vertex, target_vertex): post_slice_x = post_slice.get_slice(0) post_slice_y = post_slice.get_slice(1) - # Get ranges allowed in post - min_x = post_slice_x.start - hlf_k_w - max_x = (post_slice_x.stop + hlf_k_w) - 1 - min_y = post_slice_y.start - hlf_k_h - max_y = (post_slice_y.stop + hlf_k_h) - 1 + # Get ranges allowed in post vertex + min_x = post_slice_x.start + max_x = post_slice_x.stop - 1 + min_y = post_slice_y.start + max_y = post_slice_y.stop - 1 # Test that the start coords are in range i.e. less than max start_in_range = numpy.logical_not( - numpy.any(pres_as_posts[:, 0] > [max_x, max_y], axis=1)) + numpy.any(pre_vertex_max_reach_in_post_layer_upper_left > [max_y, max_x], axis=1)) # Test that the end coords are in range i.e. more than min end_in_range = numpy.logical_not( - numpy.any(pres_as_posts[:, 1] < [min_x, min_y], axis=1)) + numpy.any(pre_vertex_max_reach_in_post_layer_lower_right < [min_y, min_x], axis=1)) # When both things are true, we have a vertex in range pre_in_range = pre_vertices[ numpy.logical_and(start_in_range, end_in_range)] @@ -315,17 +326,18 @@ def get_connected_vertices(self, s_info, source_vertex, target_vertex): def __pre_as_post(self, pre_coords): """ Write pre coords as post coords. - :param Iterable pre_coords: An iterable of (x, y) coordinates + :param Iterable pre_coords: An iterable of (y, x) coordinates :rtype: numpy.ndarray """ coords = numpy.array(pre_coords) if self.__pool_stride is not None: coords //= self.__pool_stride - kernel_shape = numpy.array(self.__kernel_weights.shape) - coords = coords - kernel_shape // 2 + self.__padding_shape - coords //= self.__strides - return coords + coords += self.__padding_shape + coord_by_strides = coords // self.__strides + start_i = coords % self.__strides + + return coord_by_strides, start_i @property def local_only_n_bytes(self): @@ -343,9 +355,9 @@ def write_local_only_data( weight_scales): # Get info about things kernel_shape = self.__kernel_weights.shape - ps_x, ps_y = 1, 1 + ps_y, ps_x = 1, 1 if self.__pool_stride is not None: - ps_x, ps_y = self.__pool_stride + ps_y, ps_x = self.__pool_stride # Write source key info spec.write_value(key, data_type=DataType.UINT32) @@ -376,13 +388,17 @@ def write_local_only_data( # Write remaining connector details spec.write_value(start[1], data_type=DataType.INT16) spec.write_value(start[0], data_type=DataType.INT16) - spec.write_value(kernel_shape[1], data_type=DataType.INT16) spec.write_value(kernel_shape[0], data_type=DataType.INT16) - spec.write_value(self.__padding_shape[1], data_type=DataType.INT16) + spec.write_value(kernel_shape[1], data_type=DataType.INT16) spec.write_value(self.__padding_shape[0], data_type=DataType.INT16) + spec.write_value(self.__padding_shape[1], data_type=DataType.INT16) + spec.write_value(self.__recip(self.__strides[0]), + data_type=DataType.INT16) spec.write_value(self.__recip(self.__strides[1]), data_type=DataType.INT16) - spec.write_value(self.__recip(self.__strides[0]), + spec.write_value(self.__strides[0], + data_type=DataType.INT16) + spec.write_value(self.__strides[1], data_type=DataType.INT16) spec.write_value(self.__recip(ps_y), data_type=DataType.INT16) spec.write_value(self.__recip(ps_x), data_type=DataType.INT16) From bb90642846ec993975146c9eeee6e9880609c0e5 Mon Sep 17 00:00:00 2001 From: Emil Jansson Date: Fri, 10 Feb 2023 20:30:20 +0100 Subject: [PATCH 7/8] Fixed merge error: local_only_delays should have increased size of connector struct. --- .../neural_projections/connectors/convolution_connector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py index 2a6ac1ffc0..eb0b09a85f 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py @@ -34,7 +34,7 @@ #: The number of 16-bit shorts in the connector struct, #: ignoring the source_key_info struct and the weights (which are dynamic) -CONNECTOR_CONFIG_SHORTS = 14 +CONNECTOR_CONFIG_SHORTS = 16 class ConvolutionConnector(AbstractConnector): From 91eeed643a03be914b79467fadc0e941a1e3d818 Mon Sep 17 00:00:00 2001 From: Emil Jansson Date: Fri, 10 Feb 2023 20:39:36 +0100 Subject: [PATCH 8/8] Added option to ConvolutionConnector for delays varying horizontally over kernel. --- .../src/neuron/local_only/local_only_conv_impl.c | 13 +++++++++++-- .../connectors/convolution_connector.py | 13 ++++++++++--- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/neural_modelling/src/neuron/local_only/local_only_conv_impl.c b/neural_modelling/src/neuron/local_only/local_only_conv_impl.c index 58d2c98c6d..0d50f3e684 100644 --- a/neural_modelling/src/neuron/local_only/local_only_conv_impl.c +++ b/neural_modelling/src/neuron/local_only/local_only_conv_impl.c @@ -65,6 +65,7 @@ typedef struct { uint16_t positive_synapse_type; uint16_t negative_synapse_type; uint32_t delay; + uint32_t strides_delay_step; lc_weight_t weights[]; // n_weights = next_even(kernel.width * kernel.height) } connector; @@ -192,6 +193,14 @@ static inline void do_convolution_operation( log_debug("tmp_row outside"); continue; } + + uint32_t delay = connector->delay; + if (connector->strides_delay_step != 0) + { + delay -= start_i.col * connector->strides_delay_step; + log_debug("start_i.col = %u, delay = %u", start_i.col, delay); + } + for (int32_t i_col = start_i.col, tmp_col = post_coord.col; i_col < connector->kernel.width; i_col += connector->strides.col, --tmp_col) { int32_t kc = connector->kernel.width - 1 - i_col; log_debug("i_col = %u, kc = %u, tmp_col = %u", i_col, kc, tmp_col); @@ -213,12 +222,12 @@ static inline void do_convolution_operation( } uint32_t rb_index = 0; if (weight > 0) { - rb_index = synapse_row_get_ring_buffer_index(time + connector->delay, + rb_index = synapse_row_get_ring_buffer_index(time + delay, connector->positive_synapse_type, post_index, synapse_type_index_bits, synapse_index_bits, synapse_delay_mask); } else { - rb_index = synapse_row_get_ring_buffer_index(time + connector->delay, + rb_index = synapse_row_get_ring_buffer_index(time + delay, connector->negative_synapse_type, post_index, synapse_type_index_bits, synapse_index_bits, synapse_delay_mask); diff --git a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py index eb0b09a85f..5dcf453bb9 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py @@ -34,7 +34,7 @@ #: The number of 16-bit shorts in the connector struct, #: ignoring the source_key_info struct and the weights (which are dynamic) -CONNECTOR_CONFIG_SHORTS = 16 +CONNECTOR_CONFIG_SHORTS = 18 class ConvolutionConnector(AbstractConnector): @@ -51,13 +51,16 @@ class ConvolutionConnector(AbstractConnector): "__pool_shape", "__pool_stride", "__positive_receptor_type", - "__negative_receptor_type" + "__negative_receptor_type", + "__horizontal_delay_step" ] def __init__(self, kernel_weights, kernel_shape=None, strides=None, padding=None, pool_shape=None, pool_stride=None, positive_receptor_type="excitatory", - negative_receptor_type="inhibitory", safe=True, + negative_receptor_type="inhibitory", + horizontal_delay_step=0, + safe=True, verbose=False, callback=None): """ :param kernel_weights: @@ -135,6 +138,8 @@ def __init__(self, kernel_weights, kernel_shape=None, strides=None, self.__positive_receptor_type = positive_receptor_type self.__negative_receptor_type = negative_receptor_type + self.__horizontal_delay_step = horizontal_delay_step + @property def positive_receptor_type(self): return self.__positive_receptor_type @@ -416,6 +421,8 @@ def write_local_only_data( spec.write_value(app_edge.post_vertex.synapse_dynamics.delay * SpynnakerDataView.get_simulation_time_step_per_ms()) + spec.write_value(self.__horizontal_delay_step, data_type=DataType.UINT32) + # Encode weights with weight scaling encoded_kernel_weights = self.__kernel_weights.flatten() if len(encoded_kernel_weights) % 2 != 0: