diff --git a/neural_modelling/src/neuron/c_main_synapse_common.h b/neural_modelling/src/neuron/c_main_synapse_common.h index ad75118b00..50e3388dc5 100644 --- a/neural_modelling/src/neuron/c_main_synapse_common.h +++ b/neural_modelling/src/neuron/c_main_synapse_common.h @@ -105,13 +105,12 @@ static inline bool initialise_synapse_regions( bool *clear_input_buffer_of_late_packets, uint32_t *n_recording_regions_used) { // Set up the synapses - uint32_t *ring_buffer_to_input_buffer_left_shifts; + REAL *min_weights; uint32_t n_neurons; uint32_t n_synapse_types; if (!synapses_initialise( data_specification_get_region(regions.synapse_params, ds_regions), - &n_neurons, &n_synapse_types, ring_buffers, - &ring_buffer_to_input_buffer_left_shifts, + &n_neurons, &n_synapse_types, ring_buffers, &min_weights, clear_input_buffer_of_late_packets, incoming_spike_buffer_size)) { return false; @@ -124,10 +123,11 @@ static inline bool initialise_synapse_regions( row_max_n_words)) { return false; } + // Set up the synapse dynamics if (!synapse_dynamics_initialise( data_specification_get_region(regions.synapse_dynamics, ds_regions), - n_neurons, n_synapse_types, ring_buffer_to_input_buffer_left_shifts)) { + n_neurons, n_synapse_types, min_weights)) { return false; } @@ -137,5 +137,6 @@ static inline bool initialise_synapse_regions( return false; } + return true; } diff --git a/neural_modelling/src/neuron/current_sources/current_source_ac.h b/neural_modelling/src/neuron/current_sources/current_source_ac.h index 0b9bf469b8..fcce4ca17e 100644 --- a/neural_modelling/src/neuron/current_sources/current_source_ac.h +++ b/neural_modelling/src/neuron/current_sources/current_source_ac.h @@ -40,7 +40,7 @@ static bool current_source_ac_init(uint32_t n_ac_sources, uint32_t *next) { for (uint32_t n_ac=0; n_ac < n_ac_sources; n_ac++) { ac_source[n_ac] = spin1_malloc(sizeof(ac_source_t)); if (ac_source[n_ac] == NULL) { - log_error("Unable to allocate DC source parameters - out of DTCM"); + log_error("Unable to allocate AC source parameters - out of DTCM"); return false; } *next += sizeof(ac_source_t) / 4; diff --git a/neural_modelling/src/neuron/current_sources/current_source_noisy.h b/neural_modelling/src/neuron/current_sources/current_source_noisy.h index 6e82425513..18e5bc1d67 100644 --- a/neural_modelling/src/neuron/current_sources/current_source_noisy.h +++ b/neural_modelling/src/neuron/current_sources/current_source_noisy.h @@ -41,7 +41,7 @@ static bool current_source_noisy_init(uint32_t n_noisy_sources, uint32_t *next) for (uint32_t n_noisy=0; n_noisy < n_noisy_sources; n_noisy++) { noisy_source[n_noisy] = spin1_malloc(sizeof(noisy_current_source_t)); if (noisy_source[n_noisy] == NULL) { - log_error("Unable to allocate DC source parameters - out of DTCM"); + log_error("Unable to allocate noisy source parameters - out of DTCM"); return false; } *next += sizeof(noisy_current_source_t) / 4; diff --git a/neural_modelling/src/neuron/current_sources/current_source_step.h b/neural_modelling/src/neuron/current_sources/current_source_step.h index 22de4b7f63..30474edb97 100644 --- a/neural_modelling/src/neuron/current_sources/current_source_step.h +++ b/neural_modelling/src/neuron/current_sources/current_source_step.h @@ -48,29 +48,20 @@ static bool current_source_step_init( step_cs_amps = spin1_malloc(n_step_current_sources * sizeof(uint32_t*)); step_cs_amp_last = spin1_malloc(n_step_current_sources * sizeof(REAL)); step_cs_index = spin1_malloc(n_step_current_sources * sizeof(uint32_t)); - if (step_cs_amp_last == NULL) { - log_error("Unable to allocate step current source amp last - out of DTCM"); - return false; - } - if (step_cs_index == NULL) { - log_error("Unable to allocate step current source index - out of DTCM"); - return false; - } } + for (uint32_t n_step=0; n_step < n_step_current_sources; n_step++) { uint32_t arr_len = (uint32_t) cs_address[*next]; uint32_t struct_size = (arr_len + 1) * sizeof(uint32_t); step_cs_times[n_step] = spin1_malloc(struct_size); if (step_cs_times[n_step] == NULL) { - log_error("Unable to allocate step current source times - out of DTCM", - "struct_size is %u next %u n_step %u)", struct_size, *next, n_step); + log_error("Unable to allocate step current source times - out of DTCM"); return false; } step_cs_amps[n_step] = spin1_malloc(struct_size); if (step_cs_amps[n_step] == NULL) { - log_error("Unable to allocate step current source amplitudes - out of DTCM", - "(struct_size is %u next %u n_step %u)", struct_size, *next, n_step); + log_error("Unable to allocate step current source amplitudes - out of DTCM"); return false; } diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index 80a249cd8f..68cfd379d1 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -189,20 +189,20 @@ static inline state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) } static inline void neuron_model_print_state_variables(const neuron_t *neuron) { - log_info("V membrane = %11.4k mv", neuron->V_membrane); - log_info("Refract timer = %u timesteps", neuron->refract_timer); + log_debug("V membrane = %11.4k mv", neuron->V_membrane); + log_debug("Refract timer = %u timesteps", neuron->refract_timer); } static inline void neuron_model_print_parameters(const neuron_t *neuron) { - log_info("V reset = %11.4k mv", neuron->V_reset); - log_info("V rest = %11.4k mv", neuron->V_rest); + log_debug("V reset = %11.4k mv", neuron->V_reset); + log_debug("V rest = %11.4k mv", neuron->V_rest); - log_info("I offset = %11.4k nA", neuron->I_offset); - log_info("R membrane = %11.4k Mohm", neuron->R_membrane); + log_debug("I offset = %11.4k nA", neuron->I_offset); + log_debug("R membrane = %11.4k Mohm", neuron->R_membrane); - log_info("exp(-ms/(RC)) = %11.4k [.]", neuron->exp_TC); + log_debug("exp(-ms/(RC)) = %11.4k [.]", neuron->exp_TC); - log_info("T refract = %u timesteps", neuron->T_refract); + log_debug("T refract = %u timesteps", neuron->T_refract); } diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c index 03728a76a3..63d49d1d50 100644 --- a/neural_modelling/src/neuron/neuron.c +++ b/neural_modelling/src/neuron/neuron.c @@ -50,12 +50,12 @@ static uint32_t n_neurons_peak; //! The number of synapse types static uint32_t n_synapse_types; +//! Minimum weight value +static REAL *min_weights; + //! The mask of the colour static uint32_t colour_mask; -//! Amount to left shift the ring buffer by to make it an input -static uint32_t *ring_buffer_to_input_left_shifts; - //! The address where the actual neuron parameters start static void *saved_neuron_params_address; @@ -72,7 +72,7 @@ struct neuron_core_parameters { uint32_t n_neurons_peak; uint32_t n_colour_bits; uint32_t n_synapse_types; - uint32_t ring_buffer_shifts[]; + REAL min_weights[]; // Following this struct in memory (as it can't be expressed in C) is: // uint32_t neuron_keys[n_neurons_to_simulate]; }; @@ -120,22 +120,20 @@ bool neuron_initialise( // Get colour details colour_mask = (1 << params->n_colour_bits) - 1; - // Set up ring buffer left shifts - uint32_t ring_buffer_bytes = n_synapse_types * sizeof(uint32_t); - ring_buffer_to_input_left_shifts = spin1_malloc(ring_buffer_bytes); - if (ring_buffer_to_input_left_shifts == NULL) { - log_error("Not enough memory to allocate ring buffer"); + // Set up min weights + uint32_t min_weights_bytes = n_synapse_types * sizeof(REAL); + min_weights = spin1_malloc(min_weights_bytes); + if (min_weights == NULL) { + log_error("Not enough memory to allocate min_weights"); return false; } - // read in ring buffer to input left shifts - spin1_memcpy( - ring_buffer_to_input_left_shifts, params->ring_buffer_shifts, - ring_buffer_bytes); + // read in min_weights + spin1_memcpy(min_weights, params->min_weights, min_weights_bytes); - // The key list comes after the ring buffer shifts + // The key list comes after the min weights uint32_t *neuron_keys_sdram = - (uint32_t *) ¶ms->ring_buffer_shifts[n_synapse_types]; + (uint32_t *) ¶ms->min_weights[n_synapse_types]; uint32_t neuron_keys_size = n_neurons * sizeof(uint32_t); neuron_keys = spin1_malloc(neuron_keys_size); if (neuron_keys == NULL) { @@ -205,7 +203,7 @@ void neuron_transfer(weight_t *syns) { // EXPORTED uint32_t synapse_index = 0; uint32_t ring_buffer_index = 0; for (uint32_t s_i = n_synapse_types; s_i > 0; s_i--) { - uint32_t rb_shift = ring_buffer_to_input_left_shifts[synapse_index]; + REAL min_weight = min_weights[synapse_index]; uint32_t neuron_index = 0; for (uint32_t n_i = n_neurons_peak; n_i > 0; n_i--) { weight_t value = syns[ring_buffer_index]; @@ -215,7 +213,7 @@ void neuron_transfer(weight_t *syns) { // EXPORTED rt_error(RTE_SWERR); } input_t val_to_add = synapse_row_convert_weight_to_input( - value, rb_shift); + value, min_weight); neuron_impl_add_inputs(synapse_index, neuron_index, val_to_add); } syns[ring_buffer_index] = 0; diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h index 6d73a0105a..29595a6d5b 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h @@ -112,7 +112,7 @@ static uint32_t plastic_saturation_count = 0; static inline bool synapse_dynamics_stdp_init( address_t *address, stdp_params *params, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + REAL *min_weights) { // Load parameters stdp_params *sdram_params = (stdp_params *) *address; @@ -127,8 +127,7 @@ static inline bool synapse_dynamics_stdp_init( // Load weight dependence data address_t weight_result = weight_initialise( - weight_region_address, n_synapse_types, - ring_buffer_to_input_buffer_left_shifts); + weight_region_address, n_synapse_types, min_weights); if (weight_result == NULL) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c index 92e083e476..ec22760cdc 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c @@ -33,7 +33,7 @@ typedef struct neuromodulated_synapse_t { typedef struct nm_update_state_t { accum weight; - uint32_t weight_shift; + REAL min_weight; update_state_t eligibility_state; } nm_update_state_t; @@ -74,7 +74,7 @@ static int16_lut *tau_c_lookup; static int16_lut *tau_d_lookup; -static uint32_t *nm_weight_shift; +static REAL *nm_min_weight; extern uint32_t skipped_synapses; @@ -85,10 +85,13 @@ extern uint32_t skipped_synapses; static inline nm_update_state_t get_nm_update_state( neuromodulated_synapse_t synapse, index_t synapse_type) { - accum s1615_weight = kbits(synapse.weight << nm_weight_shift[synapse_type]); + uint64_t mw = (uint64_t) bitsk(nm_min_weight[synapse_type]); + uint64_t w = (uint64_t) (synapse.weight); + + accum s1615_weight = kbits((int_k_t) mw * w); nm_update_state_t update_state = { .weight=s1615_weight, - .weight_shift=nm_weight_shift[synapse_type], + .min_weight=nm_min_weight[synapse_type], .eligibility_state=synapse_structure_get_update_state( synapse.eligibility_synapse, synapse_type) }; @@ -102,7 +105,7 @@ static inline nm_final_state_t get_nm_final_state( update_state.weight = kbits(MIN(bitsk(update_state.weight), bitsk(nm_params.max_weight))); nm_final_state_t final_state = { - .weight=(weight_t) (bitsk(update_state.weight) >> update_state.weight_shift), + .weight=(weight_t) (bitsk(update_state.weight) / bitsk(update_state.min_weight)), .final_state=synapse_structure_get_final_state( update_state.eligibility_state) }; @@ -275,10 +278,10 @@ static inline nm_final_state_t izhikevich_neuromodulation_plasticity_update_syna bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + REAL *min_weights) { - if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types, - ring_buffer_to_input_buffer_left_shifts)) { + if (!synapse_dynamics_stdp_init( + &address, ¶ms, n_synapse_types, min_weights)) { return false; } @@ -296,14 +299,14 @@ bool synapse_dynamics_initialise( tau_c_lookup = maths_copy_int16_lut(&lut_address); tau_d_lookup = maths_copy_int16_lut(&lut_address); - // Store weight shifts - nm_weight_shift = spin1_malloc(sizeof(uint32_t) * n_synapse_types); - if (nm_weight_shift == NULL) { - log_error("Could not initialise weight region data"); + // Store min weights + nm_min_weight = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (nm_min_weight == NULL) { + log_error("Could not initialise min weight region data"); return NULL; } for (uint32_t s = 0; s < n_synapse_types; s++) { - nm_weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; + nm_min_weight[s] = min_weights[s]; } return true; @@ -315,10 +318,10 @@ bool synapse_dynamics_initialise( void synapse_dynamics_print_plastic_synapses( synapse_row_plastic_data_t *plastic_region_data, synapse_row_fixed_part_t *fixed_region, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + accum *min_weights) { __use(plastic_region_data); __use(fixed_region); - __use(ring_buffer_to_input_buffer_left_shifts); + __use(min_weights); #if LOG_LEVEL >= LOG_DEBUG // Extract separate arrays of weights (from plastic region), @@ -344,8 +347,7 @@ void synapse_dynamics_print_plastic_synapses( weight_t weight = synapse_structure_get_final_weight(final_state); log_debug("%08x [%3d: (w: %5u (=", control_word, i, weight); - synapses_print_weight( - weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); + synapses_print_weight(weight, min_weights[synapse_type]); log_debug("nA) d: %2u, n = %3u)] - {%08x %08x}\n", synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), synapse_row_sparse_index(control_word, synapse_index_mask), diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c index 845c7274b3..d59c7d4d75 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c @@ -104,10 +104,10 @@ static inline final_state_t plasticity_update_synapse( bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + REAL *min_weights) { - if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types, - ring_buffer_to_input_buffer_left_shifts)) { + if (!synapse_dynamics_stdp_init( + &address, ¶ms, n_synapse_types, min_weights)) { return false; } @@ -125,10 +125,10 @@ bool synapse_dynamics_initialise( void synapse_dynamics_print_plastic_synapses( synapse_row_plastic_data_t *plastic_region_data, synapse_row_fixed_part_t *fixed_region, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + accum *min_weights) { __use(plastic_region_data); __use(fixed_region); - __use(ring_buffer_to_input_buffer_left_shifts); + __use(min_weights); #if LOG_LEVEL >= LOG_DEBUG // Extract separate arrays of weights (from plastic region), @@ -154,8 +154,7 @@ void synapse_dynamics_print_plastic_synapses( weight_t weight = synapse_structure_get_final_weight(final_state); log_debug("%08x [%3d: (w: %5u (=", control_word, i, weight); - synapses_print_weight( - weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); + synapses_print_weight(weight, min_weights[synapse_type]); log_debug("nA) d: %2u, n = %3u)] - {%08x %08x}\n", synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), synapse_row_sparse_index(control_word, synapse_index_mask), @@ -178,7 +177,6 @@ static inline index_t sparse_axonal_delay(uint32_t x) { #endif } -//--------------------------------------- void synapse_dynamics_process_post_synaptic_event( uint32_t time, index_t neuron_index) { log_debug("Adding post-synaptic event to trace at time:%u", time); diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h index 393e90a1a7..3a6cee14cf 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h @@ -76,14 +76,14 @@ static inline post_trace_t timing_decay_post( uint32_t delta_time = time - last_time; // Decay previous o1 trace - int32_t decayed_o1 = STDP_FIXED_MUL_16X16(last_trace.o1, - maths_lut_exponential_decay(delta_time, tau_minus_lookup)); + int32_t decay_minus = maths_lut_exponential_decay(delta_time, tau_minus_lookup); + int32_t decayed_o1 = STDP_FIXED_MUL_16X16(last_trace.o1, decay_minus); // If we have already added on the last spike effect, just decay // (as it's sampled BEFORE the spike), // otherwise, add on energy caused by last spike and decay that int32_t new_o2 = 0; - int32_t next_spike_time = last_trace.last_spike_time; + uint32_t next_spike_time = last_trace.last_spike_time; if (last_trace.last_spike_time == 0) { int32_t decay = maths_lut_exponential_decay(delta_time, tau_y_lookup); new_o2 = STDP_FIXED_MUL_16X16(last_trace.o2, decay); @@ -127,17 +127,17 @@ static inline pre_trace_t timing_add_pre_spike( uint32_t delta_time = time - last_time; // Decay previous r1 trace and add energy caused by new spike - int32_t decayed_r1 = STDP_FIXED_MUL_16X16(last_trace.r1, - maths_lut_exponential_decay(delta_time, tau_plus_lookup)); + int32_t decay_tau = maths_lut_exponential_decay(delta_time, tau_plus_lookup); + int32_t decayed_r1 = STDP_FIXED_MUL_16X16(last_trace.r1, decay_tau); int32_t new_r1 = decayed_r1 + STDP_FIXED_POINT_ONE; // If this is the 1st pre-synaptic event, r2 trace is zero // (as it's sampled BEFORE the spike), // otherwise, add on energy caused by last spike and decay that + int32_t decay_x = maths_lut_exponential_decay(delta_time, tau_x_lookup); int32_t new_r2 = (last_time == 0) ? 0 : STDP_FIXED_MUL_16X16( - last_trace.r2 + STDP_FIXED_POINT_ONE, - maths_lut_exponential_decay(delta_time, tau_x_lookup)); + last_trace.r2 + STDP_FIXED_POINT_ONE, decay_x); log_debug("\tdelta_time=%u, r1=%d, r2=%d\n", delta_time, new_r1, new_r2); @@ -162,8 +162,8 @@ static inline update_state_t timing_apply_pre_spike( post_trace_t last_post_trace, update_state_t previous_state) { // Get time of event relative to last post-synaptic event uint32_t time_since_last_post = time - last_post_time; - int32_t decayed_o1 = STDP_FIXED_MUL_16X16(last_post_trace.o1, - maths_lut_exponential_decay(time_since_last_post, tau_minus_lookup)); + int32_t decay_minus = maths_lut_exponential_decay(time_since_last_post, tau_minus_lookup); + int32_t decayed_o1 = STDP_FIXED_MUL_16X16(last_post_trace.o1, decay_minus); // Calculate triplet term int32_t decayed_o1_r2 = STDP_FIXED_MUL_16X16(decayed_o1, trace.r2); @@ -194,8 +194,8 @@ static inline update_state_t timing_apply_post_spike( // Get time of event relative to last pre-synaptic event uint32_t time_since_last_pre = time - last_pre_time; if (time_since_last_pre > 0) { - int32_t decayed_r1 = STDP_FIXED_MUL_16X16(last_pre_trace.r1, - maths_lut_exponential_decay(time_since_last_pre, tau_plus_lookup)); + int32_t decay_plus = maths_lut_exponential_decay(time_since_last_pre, tau_plus_lookup); + int32_t decayed_r1 = STDP_FIXED_MUL_16X16(last_pre_trace.r1, decay_plus); // Calculate triplet term int32_t decayed_r1_o2 = STDP_FIXED_MUL_16X16(decayed_r1, trace.o2); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h index e999f3337a..1f76636a1a 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h @@ -41,14 +41,11 @@ * \param[in] address: the absolute address in SRAM where the weight parameters * are stored. * \param[in] n_synapse_types: The number of synapse types - * \param[in] ring_buffer_to_input_buffer_left_shifts: how much a value needs - * to be shifted in the left direction to support comprises with fixed - * point arithmetic + * \param[in] min_weights: The value of the weight of the LSB of the weight * \return the end of the weight region as an absolute SDRAM memory address. */ address_t weight_initialise( - address_t address, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts); + address_t address, uint32_t n_synapse_types, REAL *min_weights); /*! * \brief Gets the initial weight state. diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c index ba3d0d4c83..7a572dd0f0 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c @@ -24,8 +24,9 @@ //! Global plasticity parameter data plasticity_weight_region_data_t *plasticity_weight_region_data; -//! Plasticity multiply shift array, in DTCM -uint32_t *weight_shift; +//! Plasticity min_weight array, in DTCM +REAL *min_weight; +REAL *min_weight_recip; //! \brief How the configuration data for additive_one_term is laid out in //! SDRAM. The layout is an array of these. @@ -40,8 +41,9 @@ typedef struct { // Functions //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, - UNUSED uint32_t *ring_buffer_to_input_buffer_left_shifts) { + address_t address, uint32_t n_synapse_types, REAL *min_weights) { + log_debug("weight_initialise: starting"); + log_debug("\tSTDP additive one-term weight dependence"); // Copy plasticity region data from address // **NOTE** this seems somewhat safer than relying on sizeof additive_one_term_config_t *config = (additive_one_term_config_t *) address; @@ -53,8 +55,13 @@ address_t weight_initialise( return NULL; } - weight_shift = spin1_malloc(sizeof(uint32_t) * n_synapse_types); - if (weight_shift == NULL) { + min_weight = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } + min_weight_recip = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight_recip == NULL) { log_error("Could not initialise weight region data"); return NULL; } @@ -65,12 +72,12 @@ address_t weight_initialise( dtcm_copy[s].a2_plus = config->a2_plus; dtcm_copy[s].a2_minus = config->a2_minus; - // Copy weight shift - weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; + min_weight[s] = min_weights[s]; + min_weight_recip[s] = min_weights[s+n_synapse_types]; - log_debug("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k", - s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, - dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus); + log_debug("\tSynapse type %u: Min w:%k, Max w:%k, A2+:%k, A2-:%k min_weight %k recip %k", + s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, dtcm_copy[s].a2_plus, + dtcm_copy[s].a2_minus, min_weight[s], min_weight_recip[s]); } // Return end address of region diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h index fd58a78627..2eca3ab990 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h @@ -42,7 +42,8 @@ typedef struct { typedef struct { accum weight; //!< The starting weight - uint32_t weight_shift; //!< Weight shift to S1615 version + REAL min_weight; //!< Min weight + REAL min_weight_recip; //!< Min weight //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; @@ -59,15 +60,20 @@ typedef struct { * \param[in] synapse_type: The type of synapse involved * \return The initial weight state. */ -static inline weight_state_t weight_get_initial( - weight_t weight, index_t synapse_type) { +static inline weight_state_t weight_get_initial(weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; - extern uint32_t *weight_shift; + extern REAL *min_weight; + extern REAL *min_weight_recip; + + uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); + uint64_t w = (uint64_t) (weight); + + accum s1615_weight = kbits((int_k_t) mw * w); - accum s1615_weight = kbits(weight << weight_shift[synapse_type]); return (weight_state_t) { .weight = s1615_weight, - .weight_shift = weight_shift[synapse_type], + .min_weight = min_weight[synapse_type], + .min_weight_recip = min_weight_recip[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -103,7 +109,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t state) { - return (weight_t) (bitsk(state.weight) >> state.weight_shift); + return (weight_t) (state.weight * state.min_weight_recip); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c index 118d47be01..191b504318 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c @@ -24,9 +24,6 @@ //! Global plasticity parameter data plasticity_weight_region_data_t *plasticity_weight_region_data; -//! Plasticity multiply shift array, in DTCM -uint32_t *weight_shift; - //! \brief How the configuration data for additive_two_term is laid out in //! SDRAM. The layout is an array of these. typedef struct { @@ -38,12 +35,17 @@ typedef struct { accum a3_minus; } additive_two_term_config_t; +//! Plasticity min_weight array, in DTCM +REAL *min_weight; +REAL *min_weight_recip; + //--------------------------------------- // Functions //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, - UNUSED uint32_t *ring_buffer_to_input_buffer_left_shifts) { + address_t address, uint32_t n_synapse_types, REAL *min_weights) { + log_debug("weight_initialise: starting"); + log_debug("\tSTDP additive two-term weight dependance"); // Copy plasticity region data from address // **NOTE** this seems somewhat safer than relying on sizeof additive_two_term_config_t *config = (additive_two_term_config_t *) address; @@ -57,8 +59,13 @@ address_t weight_initialise( return NULL; } - weight_shift = spin1_malloc(sizeof(uint32_t) * n_synapse_types); - if (weight_shift == NULL) { + min_weight = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } + min_weight_recip = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight_recip == NULL) { log_error("Could not initialise weight region data"); return NULL; } @@ -71,14 +78,14 @@ address_t weight_initialise( dtcm_copy[s].a3_plus = config->a3_plus; dtcm_copy[s].a3_minus = config->a3_minus; - // Copy weight shift - weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; + min_weight[s] = min_weights[s]; + min_weight_recip[s] = min_weights[s+n_synapse_types]; - log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d," + log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d, min_weight %k" " A3+:%d, A3-:%d", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, - dtcm_copy[s].a3_plus, dtcm_copy[s].a3_minus); + dtcm_copy[s].a3_plus, dtcm_copy[s].a3_minus, min_weight[s]); } // Return end address of region diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h index 6e865b96d0..db5f6fb15b 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h @@ -43,7 +43,9 @@ typedef struct plasticity_weight_region_data_two_term_t { //! The current state data for the rule typedef struct weight_state_t { accum weight; //!< The weight - uint32_t weight_shift; //!< Shift of weight to and from S1615 format + + REAL min_weight; //!< The min_weight + REAL min_weight_recip; //!< The min_weight //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; @@ -60,16 +62,20 @@ typedef struct weight_state_t { * \param[in] synapse_type: The type of synapse involved * \return The initial weight state. */ -static inline weight_state_t weight_get_initial( - weight_t weight, index_t synapse_type) { +static inline weight_state_t weight_get_initial(weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; - extern uint32_t *weight_shift; + extern REAL *min_weight; + extern REAL *min_weight_recip; + + uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); + uint64_t w = (uint64_t) (weight); - accum s1615_weight = kbits(weight << weight_shift[synapse_type]); + accum s1615_weight = kbits((int_k_t) mw * w); return (weight_state_t) { .weight = s1615_weight, - .weight_shift = weight_shift[synapse_type], + .min_weight = min_weight[synapse_type], + .min_weight_recip = min_weight_recip[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -109,7 +115,7 @@ static inline weight_state_t weight_two_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t state) { - return (weight_t) (bitsk(state.weight) >> state.weight_shift); + return (weight_t) (state.weight * state.min_weight_recip); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c index edc9a3eb8d..5f47bdd09c 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c @@ -24,8 +24,9 @@ //! Global plasticity parameter data array, in DTCM plasticity_weight_region_data_t *plasticity_weight_region_data; -//! Plasticity multiply shift array, in DTCM -uint32_t *weight_shift; +//! Plasticity min_weight array, in DTCM +REAL *min_weight; +REAL *min_weight_recip; //! \brief How the configuration data for multiplicative is laid out in SDRAM. //! The layout is an array of these. @@ -39,9 +40,11 @@ typedef struct { //--------------------------------------- // Functions //--------------------------------------- + address_t weight_initialise( - address_t address, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + address_t address, uint32_t n_synapse_types, REAL *min_weights) { + log_debug("weight_initialise: starting"); + log_debug("\tSTDP multiplicative weight dependence"); // Copy plasticity region data from address // **NOTE** this seems somewhat safer than relying on sizeof plasticity_weight_region_data_t *dtcm_copy = plasticity_weight_region_data = @@ -50,8 +53,14 @@ address_t weight_initialise( log_error("Could not initialise weight region data"); return NULL; } - weight_shift = spin1_malloc(sizeof(uint32_t) * n_synapse_types); - if (weight_shift == NULL) { + + min_weight = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } + min_weight_recip = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight_recip == NULL) { log_error("Could not initialise weight region data"); return NULL; } @@ -64,13 +73,12 @@ address_t weight_initialise( dtcm_copy[s].a2_plus = config->a2_plus; dtcm_copy[s].a2_minus = config->a2_minus; - // Copy weight shift - weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; + min_weight[s] = min_weights[s]; + min_weight_recip[s] = min_weights[s+n_synapse_types]; - log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d," - " Weight multiply right shift:%u", + log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d, min_weight %k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, - dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, weight_shift[s]); + dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, min_weight[s]); } // Return end address of region diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h index 6d147aa37b..5ad025d2ca 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h @@ -42,8 +42,9 @@ typedef struct { typedef struct { accum weight; //!< The current weight - //! The shift to use when multiplying - uint32_t weight_shift; + REAL min_weight; //!< The min weight + REAL min_weight_recip; //!< The min weight + //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; } weight_state_t; @@ -51,6 +52,12 @@ typedef struct { #include "weight_one_term.h" //--------------------------------------- +// Externals +//--------------------------------------- +extern plasticity_weight_region_data_t *plasticity_weight_region_data; + +//--------------------------------------- + // Weight dependance functions //--------------------------------------- /*! @@ -62,12 +69,18 @@ typedef struct { static inline weight_state_t weight_get_initial( weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; - extern uint32_t *weight_shift; + extern REAL *min_weight; + extern REAL *min_weight_recip; + + uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); + uint64_t w = (uint64_t) (weight); + + accum s1615_weight = kbits((int_k_t) mw * w); - accum s1615_weight = kbits(weight << weight_shift[synapse_type]); return (weight_state_t) { .weight = s1615_weight, - .weight_shift = weight_shift[synapse_type], + .min_weight = min_weight[synapse_type], + .min_weight_recip = min_weight_recip[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -84,7 +97,9 @@ static inline weight_state_t weight_one_term_apply_depression( state.weight_region->a2_minus; // Multiply scale by depression and subtract + // **NOTE** using standard STDP fixed-point format handles format conversion state.weight -= mul_accum_fixed(scale, depression); + return state; } //--------------------------------------- @@ -101,6 +116,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( // Multiply scale by potentiation and add // **NOTE** using standard STDP fixed-point format handles format conversion state.weight += mul_accum_fixed(scale, potentiation); + return state; } //--------------------------------------- @@ -110,7 +126,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t state) { - return (weight_t) (bitsk(state.weight) >> state.weight_shift); + return (weight_t) (state.weight * state.min_weight_recip); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h index cf1815127c..bc10664a33 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h @@ -33,7 +33,7 @@ //! \return Whether the initialisation succeeded. bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts); + REAL *min_weights); //! \brief Process the dynamics of the synapses //! \param[in,out] plastic_region_data: Where the plastic data is @@ -62,7 +62,7 @@ void synapse_dynamics_process_post_synaptic_event( void synapse_dynamics_print_plastic_synapses( synapse_row_plastic_data_t *plastic_region_data, synapse_row_fixed_part_t *fixed_region, - uint32_t *ring_buffer_to_input_buffer_left_shifts); + REAL *min_weights); //! \brief Get the counters for plastic pre synaptic events based on (if //! the model was compiled with SYNAPSE_BENCHMARK parameter) or returns 0 diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c index 8ff221dc1f..44aef1ae6c 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c @@ -28,8 +28,7 @@ bool synapse_dynamics_initialise( UNUSED address_t address, UNUSED uint32_t n_neurons, - UNUSED uint32_t n_synapse_types, - UNUSED uint32_t *ring_buffer_to_input_buffer_left_shifts) { + UNUSED uint32_t n_synapse_types, UNUSED REAL *min_weights) { return true; } @@ -52,7 +51,7 @@ bool synapse_dynamics_process_plastic_synapses( void synapse_dynamics_print_plastic_synapses( UNUSED synapse_row_plastic_data_t *plastic_region_data, UNUSED synapse_row_fixed_part_t *fixed_region, - UNUSED uint32_t *ring_buffer_to_input_left_shifts) { + UNUSED REAL *min_weights) { } uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index a8ad535744..c85edc964b 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -233,18 +233,15 @@ static inline weight_t synapse_row_sparse_weight(uint32_t x) { //! \brief Converts a weight stored in a synapse row to an input //! \param[in] weight: the weight to convert in synapse-row form -//! \param[in] left_shift: the shift to use when decoding +//! \param[in] min_weight: the minimum weight to use in the conversion //! \return the actual input weight for the model static inline input_t synapse_row_convert_weight_to_input( - weight_t weight, uint32_t left_shift) { - union { - int_k_t input_type; - s1615 output_type; - } converter; + weight_t weight, REAL min_weight) { + // Simply doing weight * min_weight adds unnecessary compiler instructions + uint64_t mw = (uint64_t) bitsk(min_weight); + uint64_t w = (uint64_t) (weight); - converter.input_type = (int_k_t) (weight) << left_shift; - - return converter.output_type; + return kbits((int_k_t) (mw * w)); } //! \brief Get the index of the ring buffer for a given timestep, synapse type diff --git a/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h b/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h index 708b52586e..d8d5e09709 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h @@ -37,9 +37,9 @@ //! Number of inhibitory receptors #define NUM_INHIBITORY_RECEPTORS 1 -#include #include #include "synapse_types.h" +#include //--------------------------------------- // Synapse parameters @@ -160,16 +160,14 @@ static inline void add_input_alpha(alpha_state_t *a_params, input_t input) { static inline void synapse_types_add_neuron_input( index_t synapse_type_index, synapse_types_t *parameters, input_t input) { - if (input > ZERO) { - switch (synapse_type_index) { - case EXCITATORY: - add_input_alpha(¶meters->exc, input); - break; - case INHIBITORY: - add_input_alpha(¶meters->inh, input); - break; - } - } + switch (synapse_type_index) { + case EXCITATORY: + add_input_alpha(¶meters->exc, input); + break; + case INHIBITORY: + add_input_alpha(¶meters->inh, input); + break; + } } //! \brief extracts the excitatory input buffers from the buffers available @@ -221,7 +219,7 @@ static inline const char *synapse_types_get_type_char( //! \param[in] parameters: the pointer to the parameters to print static inline void synapse_types_print_input( synapse_types_t *parameters) { - io_printf(IO_BUF, "%12.6k - %12.6k", + log_debug("%12.6k - %12.6k", parameters->exc.lin_buff * parameters->exc.exp_buff, parameters->inh.lin_buff * parameters->inh.exp_buff); } diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 1b7e141505..e334fb55fb 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -48,8 +48,8 @@ static uint32_t ring_buffer_size; //! Ring buffer mask static uint32_t ring_buffer_mask; -//! Amount to left shift the ring buffer by to make it an input -static uint32_t *ring_buffer_to_input_left_shifts; +// The minimum weight value, and the reciprocal of these minimum values +static REAL *min_weights; //! \brief Number of bits needed for the synapse type and index //! \details @@ -137,8 +137,9 @@ static inline void print_synaptic_row(synaptic_row_t synaptic_row) { io_printf(IO_BUF, "%08x [%3d: (w: %5u (=", synapse, i, synapse_row_sparse_weight(synapse)); synapses_print_weight(synapse_row_sparse_weight(synapse), - ring_buffer_to_input_left_shifts[synapse_type]); - io_printf(IO_BUF, "nA) d: %2u, %d, n = %3u)] - {%08x %08x}\n", + min_weights[synapse_type]); + log_debug( + "nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", synapse_row_sparse_delay(synapse, synapse_type_index_bits, synapse_delay_mask), synapse_type, @@ -152,14 +153,14 @@ static inline void print_synaptic_row(synaptic_row_t synaptic_row) { synapse_row_plastic_data_t *plastic_data = synapse_row_plastic_region(synaptic_row); synapse_dynamics_print_plastic_synapses( - plastic_data, fixed_region, ring_buffer_to_input_left_shifts); + plastic_data, fixed_region, min_weights); } io_printf(IO_BUF, "----------------------------------------\n"); #endif // LOG_LEVEL >= LOG_DEBUG } -//! \brief Print the contents of the ring buffers. +//! \brief Print the contents of the ring buffers. // equivalent min_weight function? //! \details Only does anything when debugging. //! \param[in] time: The current timestamp static inline void print_ring_buffers(uint32_t time) { @@ -187,7 +188,7 @@ static inline void print_ring_buffers(uint32_t time) { d + time, t, n, synapse_type_index_bits, synapse_index_bits, synapse_delay_mask); synapses_print_weight(ring_buffers[ring_buffer_index], - ring_buffer_to_input_left_shifts[t]); + min_weights[t]); } io_printf(IO_BUF, "\n"); } @@ -262,15 +263,14 @@ struct synapse_params { uint32_t log_max_delay; uint32_t drop_late_packets; uint32_t incoming_spike_buffer_size; - uint32_t ring_buffer_shifts[]; + REAL min_weights_recip[]; // this is min_weight followed by the reciprocals }; /* INTERFACE FUNCTIONS */ bool synapses_initialise( address_t synapse_params_address, uint32_t *n_neurons_out, uint32_t *n_synapse_types_out, - weight_t **ring_buffers_out, - uint32_t **ring_buffer_to_input_buffer_left_shifts, + weight_t **ring_buffers_out, REAL **min_weights_out, bool* clear_input_buffers_of_late_packets_init, uint32_t *incoming_spike_buffer_size) { struct synapse_params *params = (struct synapse_params *) synapse_params_address; @@ -285,20 +285,17 @@ bool synapses_initialise( uint32_t log_n_synapse_types = params->log_n_synapse_types; uint32_t log_max_delay = params->log_max_delay; - // Set up ring buffer left shifts - ring_buffer_to_input_left_shifts = - spin1_malloc(n_synapse_types * sizeof(uint32_t)); - if (ring_buffer_to_input_left_shifts == NULL) { - log_error("Not enough memory to allocate ring buffer"); + // Set up min_weights + uint32_t min_weights_bytes = 2 * n_synapse_types * sizeof(REAL); + min_weights = spin1_malloc(min_weights_bytes); + if (min_weights == NULL) { + log_error("Not enough memory to allocate min weights"); return false; } - // read in ring buffer to input left shifts - spin1_memcpy( - ring_buffer_to_input_left_shifts, params->ring_buffer_shifts, - n_synapse_types * sizeof(uint32_t)); - *ring_buffer_to_input_buffer_left_shifts = - ring_buffer_to_input_left_shifts; + // read in min_weights and reciprocals + spin1_memcpy(min_weights, params->min_weights_recip, min_weights_bytes); + *min_weights_out = min_weights; synapse_type_index_bits = log_n_neurons + log_n_synapse_types; synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; @@ -328,7 +325,7 @@ bool synapses_initialise( } *ring_buffers_out = ring_buffers; - log_info("Ready to process synapses for %u neurons with %u synapse types", + log_debug("Ready to process synapses for %u neurons with %u synapse types", n_neurons, n_synapse_types); return true; diff --git a/neural_modelling/src/neuron/synapses.h b/neural_modelling/src/neuron/synapses.h index f98fdda653..e9a4258f66 100644 --- a/neural_modelling/src/neuron/synapses.h +++ b/neural_modelling/src/neuron/synapses.h @@ -61,15 +61,14 @@ extern uint32_t late_spikes; //! The maximum lateness of a spike extern uint32_t max_late_spike; - //! \brief Print the weight of a synapse //! \param[in] weight: the weight to print in synapse-row form -//! \param[in] left_shift: the shift to use when decoding +//! \param[in] min_weight: the minimum weight to use in the conversion static inline void synapses_print_weight( - weight_t weight, uint32_t left_shift) { + weight_t weight, REAL min_weight) { if (weight != 0) { io_printf(IO_BUF, "%12.6k", - synapse_row_convert_weight_to_input(weight, left_shift)); + synapse_row_convert_weight_to_input(weight, min_weight)); } else { io_printf(IO_BUF, " "); } @@ -91,8 +90,7 @@ static inline void synapses_print_weight( bool synapses_initialise( address_t synapse_params_address, uint32_t *n_neurons, uint32_t *n_synapse_types, - weight_t **ring_buffers, - uint32_t **ring_buffer_to_input_buffer_left_shifts, + weight_t **ring_buffers, REAL **min_weights, bool* clear_input_buffers_of_late_packets_init, uint32_t *incoming_spike_buffer_size); diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py index c338e7881a..a5ceb1b805 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py @@ -99,7 +99,8 @@ def create_vertex( self, n_neurons, label, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, n_steps_per_timestep, drop_late_spikes, splitter, seed, - n_colour_bits): + n_colour_bits, min_weights, weight_random_sigma, + max_stdp_spike_delta): if n_neurons != len(self._devices): raise ConfigurationException( "Number of neurons does not match number of " @@ -110,4 +111,5 @@ def create_vertex( self._devices, self._create_edges, max_atoms, self._model, self, self._translator, spikes_per_second, label, ring_buffer_sigma, incoming_spike_buffer_size, drop_late_spikes, splitter, seed, - n_colour_bits) + n_colour_bits, min_weights, weight_random_sigma, + max_stdp_spike_delta) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index aa5220f194..9ce71c6d0c 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -44,7 +44,8 @@ def __init__( pynn_model, translator=None, spikes_per_second=None, label=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, drop_late_spikes=None, splitter=None, seed=None, - n_colour_bits=None): + n_colour_bits=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None): """ :param list(AbstractMulticastControllableDevice) devices: The AbstractMulticastControllableDevice instances to be controlled @@ -66,14 +67,19 @@ def __init__( :param splitter: splitter from application vertices to machine vertices :type splitter: ~pacman.model.partitioner_splitters.AbstractSplitterCommon or None + :param int seed: The seed to use :param int n_colour_bits: The number of colour bits to use + :param list min_weights: The min_weights + :param float weight_random_sigma: The random sigma to use + :param float max_stdp_spike_delta: The max delta to use """ # pylint: disable=too-many-arguments super().__init__( len(devices), label, max_atoms_per_core, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, neuron_impl, pynn_model, drop_late_spikes, splitter, seed, - n_colour_bits) + n_colour_bits, min_weights, weight_random_sigma, + max_stdp_spike_delta) if not devices: raise ConfigurationException("No devices specified") diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py index f4488e7071..b28e366be3 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py @@ -82,8 +82,8 @@ def create_machine_vertices(self, chip_counter): max_atoms_per_core = min( app_vertex.get_max_atoms_per_core(), app_vertex.n_atoms) - ring_buffer_shifts = app_vertex.get_ring_buffer_shifts() - weight_scales = app_vertex.get_weight_scales(ring_buffer_shifts) + min_weights = app_vertex.get_min_weights() + weight_scales = app_vertex.get_weight_scales(min_weights) all_syn_block_sz = app_vertex.get_synapses_size( max_atoms_per_core) structural_sz = app_vertex.get_structural_dynamics_size( @@ -103,7 +103,7 @@ def create_machine_vertices(self, chip_counter): label = f"{app_vertex.label}{vertex_slice}" machine_vertex = self.create_machine_vertex( vertex_slice, sdram, label, - structural_sz, ring_buffer_shifts, weight_scales, + structural_sz, min_weights, weight_scales, index, max_atoms_per_core, synaptic_matrices, neuron_data) self.governed_app_vertex.remember_machine_vertex(machine_vertex) @@ -152,7 +152,7 @@ def machine_vertices_for_recording(self, variable_to_record): def create_machine_vertex( self, vertex_slice, sdram, label, - structural_sz, ring_buffer_shifts, weight_scales, index, + structural_sz, min_weights, weight_scales, index, max_atoms_per_core, synaptic_matrices, neuron_data): # If using local-only create a local-only vertex s_dynamics = self.governed_app_vertex.synapse_dynamics @@ -160,13 +160,12 @@ def create_machine_vertex( return PopulationMachineLocalOnlyCombinedVertex( sdram, label, self.governed_app_vertex, vertex_slice, index, - ring_buffer_shifts, weight_scales, neuron_data, - max_atoms_per_core) + min_weights, weight_scales, neuron_data, max_atoms_per_core) # Otherwise create a normal vertex return PopulationMachineVertex( sdram, label, self.governed_app_vertex, - vertex_slice, index, ring_buffer_shifts, weight_scales, + vertex_slice, index, min_weights, weight_scales, structural_sz, max_atoms_per_core, synaptic_matrices, neuron_data) def get_sdram_used_by_atoms( diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py index f0b54bbcba..ecfe1bc940 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_neurons_synapses.py @@ -208,9 +208,9 @@ def create_machine_vertices(self, chip_counter): atoms_per_core = min( app_vertex.get_max_atoms_per_core(), app_vertex.n_atoms) - # Work out the ring buffer shifts based on all incoming things - rb_shifts = app_vertex.get_ring_buffer_shifts() - weight_scales = app_vertex.get_weight_scales(rb_shifts) + # Work out the minimum weights based on all incoming things + min_weights = app_vertex.get_min_weights() + weight_scales = app_vertex.get_weight_scales(min_weights) # We add the SDRAM edge SDRAM to the neuron resources so it is # accounted for within the placement @@ -249,7 +249,7 @@ def create_machine_vertices(self, chip_counter): # Create the neuron vertex for the slice neuron_vertex = self.__add_neuron_core( - vertex_slice, neuron_sdram, label, index, rb_shifts, + vertex_slice, neuron_sdram, label, index, min_weights, weight_scales, neuron_data, atoms_per_core) chip_counter.add_core(neuron_sdram) @@ -262,7 +262,7 @@ def create_machine_vertices(self, chip_counter): synapse_references, syn_label, feedback_partition = \ self.__add_lead_synapse_core( vertex_slice, structural_sz, lead_synapse_core_sdram, - label, rb_shifts, weight_scales, synapse_vertices, + label, min_weights, weight_scales, synapse_vertices, neuron_vertex, atoms_per_core, synaptic_matrices) chip_counter.add_core(lead_synapse_core_sdram) @@ -313,7 +313,7 @@ def create_machine_vertices(self, chip_counter): self.__neuromodulators.add(proj._projection_edge.pre_vertex) def __add_neuron_core( - self, vertex_slice, sdram, label, index, rb_shifts, + self, vertex_slice, sdram, label, index, min_weights, weight_scales, neuron_data, atoms_per_core): """ Add a neuron core for for a slice of neurons. @@ -323,9 +323,8 @@ def __add_neuron_core( :param ~pacman.model.resources.MultiRegionSDRAM sdram: :param str label: The name to give the core :param int index: The index of the slice in the ordered list of slices - :param list(int) rb_shifts: - The computed ring-buffer shift values to use to get the weights - back to S1615 values + :param list(float) min_weights: + The computed minimum weights to be used in the simulation :param list(int) weight_scales: The scale to apply to weights to encode them in the 16-bit synapses :return: The neuron vertex created and the resources used @@ -335,7 +334,7 @@ def __add_neuron_core( neuron_vertex = PopulationNeuronsMachineVertex( sdram, f"{label}_Neurons:{vertex_slice.lo_atom}-{vertex_slice.hi_atom}", - app_vertex, vertex_slice, index, rb_shifts, weight_scales, + app_vertex, vertex_slice, index, min_weights, weight_scales, neuron_data, atoms_per_core) app_vertex.remember_machine_vertex(neuron_vertex) self.__neuron_vertices.append(neuron_vertex) @@ -344,7 +343,7 @@ def __add_neuron_core( def __add_lead_synapse_core( self, vertex_slice, structural_sz, lead_synapse_core_sdram, label, - rb_shifts, weight_scales, synapse_vertices, neuron_vertex, + min_weights, weight_scales, synapse_vertices, neuron_vertex, atoms_per_core, synaptic_matrices): """ Add the first synapse core for a neuron core. This core will @@ -358,9 +357,8 @@ def __add_lead_synapse_core( The SDRAM that will be used by the synapse core to handle a given set of projections :param str label: The name to give the core - :param list(int) rb_shifts: - The computed ring-buffer shift values to use to get the weights - back to S1615 values + :param list(float) min_weights: + The computed minimum weights to be used in the simulation :param list(int) weight_scales: The scale to apply to weights to encode them in the 16-bit synapses :param synapse_vertices: A list to add the core to @@ -381,7 +379,7 @@ def __add_lead_synapse_core( # Do the lead synapse core lead_synapse_vertex = PopulationSynapsesMachineVertexLead( lead_synapse_core_sdram, f"{syn_label}(0)", - self.governed_app_vertex, vertex_slice, rb_shifts, weight_scales, + self.governed_app_vertex, vertex_slice, min_weights, weight_scales, structural_sz, synapse_references, atoms_per_core, synaptic_matrices) self.governed_app_vertex.remember_machine_vertex(lead_synapse_vertex) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index e83a150334..48697110af 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -272,6 +272,52 @@ def get_weight_mean(self, weights, synapse_info): return abs(weights) raise SpynnakerException("Unrecognised weight format") + def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): + """ Get the minimum of the weights. + + :type weights: ~numpy.ndarray or ~pyNN.random.NumpyRNG or int or float + or list(int) or list(float) + :param int weight_random_sigma: The number of standard deviations from + the mean to allow for when using a random distribution + :rtype: float + """ + if isinstance(weights, RandomDistribution): + mean_weight = utility_calls.get_mean(weights) + weight_sd = math.sqrt(utility_calls.get_variance(weights)) + if mean_weight < 0: + min_weight = mean_weight + (weight_sd * weight_random_sigma) + if min_weight > 0: + min_weight = -min_weight + high = utility_calls.high(weights) + if high is None: + return abs(min_weight) + return abs(max(min_weight, high)) + else: + min_weight = mean_weight - (weight_sd * weight_random_sigma) + low = utility_calls.low(weights) + if low is None: + return abs(min_weight) + if min_weight < 0: + min_weight = abs(min_weight) + return abs(min(min_weight, low)) + + elif isinstance(weights, str): + d = self._get_distances(weights, synapse_info) + return numpy.min(_expr_context.eval(weights, d=d)) + elif numpy.isscalar(weights): + return abs(weights) + elif hasattr(weights, "__getitem__"): + # Have to assume here that the list of weights that has been + # provided has different (non-zero) values in it. In order to + # represent these correctly, it's the greatest common divisor + # across the array of weights that we need + non_zero_weights = numpy.abs(weights)[ + numpy.nonzero(numpy.abs(weights))] + if len(non_zero_weights) == 0: + return 0.0 + return utility_calls.float_gcd_of_array(non_zero_weights) + raise SpynnakerException("Unrecognised weight format") + def _get_weight_maximum(self, weights, n_connections, synapse_info): """ Get the maximum of the weights. @@ -302,6 +348,14 @@ def _get_weight_maximum(self, weights, n_connections, synapse_info): return numpy.max(_expr_context.eval(weights, d=d)) elif numpy.isscalar(weights): return abs(weights) + elif hasattr(weights, "__getitem__"): + # Have to assume here that the list of weights that has been + # provided has different (non-zero) values in it. + non_zero_weights = numpy.abs(weights)[ + numpy.nonzero(numpy.abs(weights))] + if len(non_zero_weights) == 0: + return 0.0 + return numpy.max(non_zero_weights) raise SpynnakerException("Unrecognised weight format") @abstractmethod diff --git a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py index da936e78f5..3e21572d3a 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py @@ -281,6 +281,16 @@ def get_n_connections_to_post_vertex_maximum(self, synapse_info): def get_weight_maximum(self, synapse_info): return numpy.amax(self.__kernel_weights) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): + # Use the kernel weights if user has supplied them + if self.__kernel_weights is not None: + return super(ConvolutionConnector, self).get_weight_minimum( + self.__kernel_weights, weight_random_sigma, synapse_info) + + return super(ConvolutionConnector, self).get_weight_minimum( + weights, weight_random_sigma, synapse_info) + @overrides(AbstractConnector.get_connected_vertices) def get_connected_vertices(self, s_info, source_vertex, target_vertex): pre_vertices = numpy.array( diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index ac117ac7cc..a1f3b056d8 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -84,6 +84,15 @@ def __init__(self, conn_list, safe=True, verbose=False, column_names=None, # Call the conn_list setter, as this sets the internal values self.conn_list = conn_list + @overrides(AbstractConnector.set_projection_information) + def set_projection_information(self, synapse_info): + AbstractConnector.set_projection_information(self, synapse_info) + # now we want to tell the synapse_info about weights and delays + if self.__weights is not None: + synapse_info.weights = self.__weights.flatten() + if self.__delays is not None: + synapse_info.delays = self.__delays + @overrides(AbstractConnector.get_delay_maximum) def get_delay_maximum(self, synapse_info): if self.__delays is None: @@ -258,6 +267,14 @@ def get_weight_variance(self, weights, synapse_info): else: return numpy.var(numpy.abs(self.__weights)) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): + if self.__weights is None: + return super(FromListConnector, self).get_weight_minimum( + weights, weight_random_sigma, synapse_info) + return super(FromListConnector, self).get_weight_minimum( + self.__weights, weight_random_sigma, synapse_info) + @overrides(AbstractGenerateConnectorOnHost.create_synaptic_block) def create_synaptic_block( self, post_slices, post_vertex_slice, synapse_type, synapse_info): diff --git a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py index 7cbf89e4f2..ec552655bb 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py @@ -319,6 +319,15 @@ def __compute_statistics( numpy.array(all_pre_ids, dtype='uint32'), numpy.array(all_delays), numpy.array(all_weights)) + @overrides(AbstractConnector.set_projection_information) + def set_projection_information(self, synapse_info): + AbstractConnector.set_projection_information(self, synapse_info) + # now we want to tell the synapse_info about weights and delays + if self._krn_weights is not None: + synapse_info.weights = self._krn_weights.flatten() + if self._krn_delays is not None: + synapse_info.delays = self._krn_delays + @overrides(AbstractConnector.get_delay_maximum) def get_delay_maximum(self, synapse_info): # Use the kernel delays if user has supplied them @@ -379,6 +388,16 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, n_conns, synapse_info) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): + # Use the kernel weights if user has supplied them + if self._krn_weights is not None: + return super(KernelConnector, self).get_weight_minimum( + self._krn_weights, weight_random_sigma, synapse_info) + + return super(KernelConnector, self).get_weight_minimum( + weights, weight_random_sigma, synapse_info) + @overrides(AbstractConnector.get_weight_mean) def get_weight_mean(self, weights, synapse_info): # Use the kernel weights if user has supplied them diff --git a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py index 2884855511..4d1948d85f 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py @@ -257,6 +257,16 @@ def get_weight_maximum(self, synapse_info): return super(PoolDenseConnector, self)._get_weight_maximum( self.__weights, n_conns, synapse_info) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): + # Use the kernel weights if user has supplied them + if self.__weights is not None: + return super(PoolDenseConnector, self).get_weight_minimum( + self.__weights, weight_random_sigma, synapse_info) + + return super(PoolDenseConnector, self).get_weight_minimum( + weights, weight_random_sigma, synapse_info) + def __pre_as_post(self, pre_coords): """ Write pre coordinates as post coordinates. diff --git a/spynnaker/pyNN/models/neural_projections/synapse_information.py b/spynnaker/pyNN/models/neural_projections/synapse_information.py index 3f0b9f0304..d7c36ec627 100644 --- a/spynnaker/pyNN/models/neural_projections/synapse_information.py +++ b/spynnaker/pyNN/models/neural_projections/synapse_information.py @@ -180,6 +180,10 @@ def weights(self): """ return self.__weights + @weights.setter + def weights(self, weights): + self.__weights = weights + @property def delays(self): """ @@ -189,6 +193,10 @@ def delays(self): """ return self.__delays + @delays.setter + def delays(self, delays): + self.__delays = delays + def may_generate_on_machine(self): """ Do we describe a collection of synapses whose synaptic matrix may diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 9ada2fa356..61d75747b3 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -13,9 +13,10 @@ # limitations under the License. import logging +import sys import math import numpy -from scipy import special # @UnresolvedImport +# from scipy import special # @UnresolvedImport import operator from functools import reduce from collections import defaultdict @@ -30,10 +31,13 @@ from spinn_utilities.helpful_functions import is_singleton from spinn_utilities.config_holder import ( get_config_int, get_config_float, get_config_bool) + from pacman.model.resources import MultiRegionSDRAM from pacman.utilities.utility_calls import get_n_bits_for_fields, get_n_bits from spinn_front_end_common.abstract_models import ( AbstractCanReset) +from spinn_front_end_common.interface.provenance import ( + AbstractProvidesLocalProvenanceData) from spinn_front_end_common.utilities.constants import ( BYTES_PER_WORD, SYSTEM_BYTES_REQUIREMENT) from spinn_front_end_common.interface.profiling.profile_utils import ( @@ -43,18 +47,15 @@ .recording_utilities import ( get_recording_header_size, get_recording_data_constant_size) from spinn_front_end_common.interface.provenance import ( - ProvidesProvenanceDataFromMachineImpl) + ProvidesProvenanceDataFromMachineImpl, ProvenanceWriter) from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.models.common import NeuronRecorder from spynnaker.pyNN.models.abstract_models import ( - AbstractAcceptsIncomingSynapses, AbstractMaxSpikes, HasSynapses, - SupportsStructure) -from spynnaker.pyNN.utilities.constants import ( - POSSION_SIGMA_SUMMATION_LIMIT) -from spynnaker.pyNN.utilities.running_stats import RunningStats + AbstractAcceptsIncomingSynapses, HasSynapses, SupportsStructure) +from spynnaker.pyNN.exceptions import SynapticConfigurationException +from spynnaker.pyNN.utilities.utility_calls import float_gcd from spynnaker.pyNN.models.neuron.synapse_dynamics import ( - AbstractSDRAMSynapseDynamics, AbstractSynapseDynamicsStructural, - AbstractSupportsSignedWeights) + AbstractSDRAMSynapseDynamics, AbstractSynapseDynamicsStructural) from spynnaker.pyNN.models.neuron.local_only import AbstractLocalOnly from spynnaker.pyNN.models.neuron.synapse_dynamics import SynapseDynamicsStatic from spynnaker.pyNN.utilities.utility_calls import ( @@ -144,10 +145,10 @@ def _check_random_dists(rd): class AbstractPopulationVertex( PopulationApplicationVertex, AbstractAcceptsIncomingSynapses, - AbstractCanReset, SupportsStructure): - """ - Underlying vertex model for Neural Populations. - Not actually abstract. + AbstractCanReset, SupportsStructure, + AbstractProvidesLocalProvenanceData): + """ Underlying vertex model for Neural Populations.\ + Not actually abstract. """ __slots__ = [ @@ -173,6 +174,12 @@ class AbstractPopulationVertex( "__current_sources", "__current_source_id_list", "__structure", + "__weight_scales", + "__min_weights", + "__min_weights_auto", + "__weight_random_sigma", + "__max_stdp_spike_delta", + "__weight_provenance", "__rng", "__pop_seed", "__core_seeds", @@ -198,14 +205,15 @@ class AbstractPopulationVertex( _SYNAPSE_BASE_N_CPU_CYCLES = 10 # Elements before the start of global parameters - # 1. has key, 2. key, 3. n atoms, 4. n_atoms_peak 5. n_colour_bits + # 1 has key 2 n atoms 3 n_atoms_peak 4 n_colour_bits 5 n_synapse_types CORE_PARAMS_BASE_SIZE = 5 * BYTES_PER_WORD def __init__( self, n_neurons, label, max_atoms_per_core, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, neuron_impl, pynn_model, drop_late_spikes, splitter, seed, - n_colour_bits): + n_colour_bits, min_weights, weight_random_sigma, + max_stdp_spike_delta): """ :param int n_neurons: The number of neurons in the population :param str label: The label on the population @@ -232,6 +240,12 @@ def __init__( The Population seed, used to ensure the same random generation on each run. :param int n_colour_bits: The number of colour bits to use + :param min_weights: minimum weight list + :type min_weights: float array or None + :param weight_random_sigma: sigma value when using random weights + :type weight_random_sigma: float or None + :param max_stdp_spike_delta: the maximum expected spike time difference + :type max_stdp_spike_delta: float or None """ # pylint: disable=too-many-arguments super().__init__(label, max_atoms_per_core, splitter) @@ -306,6 +320,32 @@ def __init__( self.__structure = None + # Store (local) weight scales + self.__weight_scales = None + + # Read the minimum weight if not set; this might *still* be None, + # meaning "auto calculate"; the number of weights needs to match + # the number of synapse types + self.__min_weights = min_weights + self.__min_weights_auto = True + if self.__min_weights is not None: + self.__min_weights_auto = False + n_synapse_types = self.__neuron_impl.get_n_synapse_types() + if len(self.__min_weights) != n_synapse_types: + raise SynapticConfigurationException( + "The number of minimum weights provided ({} - {}) does not" + " match the number of synapse types ({})".format( + len(self.__min_weights), self.__min_weights, + n_synapse_types)) + + # Get the other minimum weight configuration parameters + self.__weight_random_sigma = weight_random_sigma + self.__max_stdp_spike_delta = max_stdp_spike_delta + + # Store weight provenance information mapping from + # (real weight, represented weight) -> projections + self.__weight_provenance = defaultdict(list) + # An RNG for use in synaptic generation self.__rng = numpy.random.RandomState(seed) self.__pop_seed = create_mars_kiss_seeds(self.__rng) @@ -889,6 +929,14 @@ def clear_connection_cache(self): Flush the cache of connection information; needed for a second run. """ self.__connection_cache.clear() + if SpynnakerDataView.get_requires_mapping(): + self.__reset_min_weights() + + def __reset_min_weights(self): + """ Reset min_weights if set to auto-calculate + """ + if self.__min_weights_auto: + self.__min_weights = None def describe(self): """ @@ -971,92 +1019,35 @@ def reset_to_first_timestep(self): # generation self.__tell_neuron_vertices_to_regenerate() - @staticmethod - def _ring_buffer_expected_upper_bound( - weight_mean, weight_std_dev, spikes_per_second, - n_synapses_in, sigma): - """ - Provides expected upper bound on accumulated values in a ring - buffer element. - - Requires an assessment of maximum Poisson input rate. - - Assumes knowledge of mean and SD of weight distribution, fan-in - and timestep. - - All arguments should be assumed real values except n_synapses_in - which will be an integer. - - :param float weight_mean: Mean of weight distribution (in either nA or - microSiemens as required) - :param float weight_std_dev: SD of weight distribution - :param float spikes_per_second: Maximum expected Poisson rate in Hz - :param int machine_timestep: in us - :param int n_synapses_in: No of connected synapses - :param float sigma: How many SD above the mean to go for upper bound; - a good starting choice is 5.0. Given length of simulation we can - set this for approximate number of saturation events. - :rtype: float + # TODO: The upcoming functions replace the ring_buffer bound calculations + # and use minimum weights instead; it may be that a mixture of the two + # methods is necessary in the long run + def __get_closest_weight(self, value): + """ Get the best representation of the weight so that both weight and + 1 / w work + + :param float value: value to get the closest weight of """ - # E[ number of spikes ] in a timestep - average_spikes_per_timestep = ( - float(n_synapses_in * spikes_per_second) / - SpynnakerDataView.get_simulation_time_step_per_s()) - - # Exact variance contribution from inherent Poisson variation - poisson_variance = average_spikes_per_timestep * (weight_mean ** 2) - - # Upper end of range for Poisson summation required below - # upper_bound needs to be an integer - upper_bound = int(round(average_spikes_per_timestep + - POSSION_SIGMA_SUMMATION_LIMIT * - math.sqrt(average_spikes_per_timestep))) - - # Closed-form exact solution for summation that gives the variance - # contributed by weight distribution variation when modulated by - # Poisson PDF. Requires scipy.special for gamma and incomplete gamma - # functions. Beware: incomplete gamma doesn't work the same as - # Mathematica because (1) it's regularised and needs a further - # multiplication and (2) it's actually the complement that is needed - # i.e. 'gammaincc'] - - weight_variance = 0.0 - - if weight_std_dev > 0: - # pylint: disable=no-member - lngamma = special.gammaln(1 + upper_bound) - gammai = special.gammaincc( - 1 + upper_bound, average_spikes_per_timestep) - - big_ratio = (math.log(average_spikes_per_timestep) * upper_bound - - lngamma) - - if -701.0 < big_ratio < 701.0 and big_ratio != 0.0: - log_weight_variance = ( - -average_spikes_per_timestep + - math.log(average_spikes_per_timestep) + - 2.0 * math.log(weight_std_dev) + - math.log(math.exp(average_spikes_per_timestep) * gammai - - math.exp(big_ratio))) - weight_variance = math.exp(log_weight_variance) - - # upper bound calculation -> mean + n * SD - return ((average_spikes_per_timestep * weight_mean) + - (sigma * math.sqrt(poisson_variance + weight_variance))) - - def get_ring_buffer_shifts(self): - """ - Get the shift of the ring buffers for transfer of values into the - input buffers for this model. + if abs(value) < 1.0: + return DataType.S1615.closest_representable_value(value) + return 1 / ( + DataType.S1615.closest_representable_value_above(1 / value)) - :param incoming_projections: - The projections to consider in the calculations - :type incoming_projections: - list(~spynnaker.pyNN.models.projection.Projection) - :rtype: list(int) + def __calculate_min_weights(self): + """ Calculate the minimum weights required to best represent all the + possible weights coming into this vertex + + :param list(~.Projection) incoming_projections: incoming proj to vertex + + :return: list of minimum weights + :rtype: list(float) """ - stats = _Stats(self.__neuron_impl, self.__spikes_per_second, - self.__ring_buffer_sigma) + # Initialise to a maximum value + min_weights = [sys.maxsize for _ in range( + self.__neuron_impl.get_n_synapse_types())] + + # Get the (global) weight_scale from the input_type in the neuron_impl + weight_scale = self.__neuron_impl.get_global_weight_scale() for proj in self.incoming_projections: # pylint: disable=protected-access @@ -1064,52 +1055,146 @@ def get_ring_buffer_shifts(self): # Skip if this is a synapse dynamics synapse type if synapse_info.synapse_type_from_dynamics: continue - stats.add_projection(proj) - n_synapse_types = self.__neuron_impl.get_n_synapse_types() - max_weights = numpy.zeros(n_synapse_types) - for synapse_type in range(n_synapse_types): - max_weights[synapse_type] = stats.get_max_weight(synapse_type) + synapse_dynamics = synapse_info.synapse_dynamics + connector = synapse_info.connector + conn_weight_min = synapse_dynamics.get_weight_minimum( + connector, self.__weight_random_sigma, synapse_info) + if conn_weight_min == 0: + conn_weight_min = DataType.S1615.decode_from_int(1) + conn_weight_min *= weight_scale + + # If local-only then deal with both positive and negative index + if isinstance(synapse_dynamics, AbstractLocalOnly): + s_type_pos = synapse_dynamics.get_positive_synapse_index(proj) + s_type_neg = synapse_dynamics.get_negative_synapse_index(proj) + if not numpy.isnan(conn_weight_min): + for s_type in [s_type_pos, s_type_neg]: + if min_weights[s_type] != sys.maxsize: + conn_weight_min = float_gcd( + min_weights[s_type], conn_weight_min) + min_weights[s_type] = min( + min_weights[s_type], conn_weight_min) + + # Do any remaining calculations in the synapse dynamics + min_weights = synapse_dynamics.calculate_min_weight( + min_weights, self.__max_stdp_spike_delta, + weight_scale, conn_weight_min, s_type) + else: + synapse_type = synapse_info.synapse_type + if not numpy.isnan(conn_weight_min): + if min_weights[synapse_type] != sys.maxsize: + conn_weight_min = float_gcd( + min_weights[synapse_type], conn_weight_min) + min_weights[synapse_type] = min( + min_weights[synapse_type], conn_weight_min) + + # Do any remaining calculations in the synapse dynamics + min_weights = synapse_dynamics.calculate_min_weight( + min_weights, self.__max_stdp_spike_delta, + weight_scale, conn_weight_min, synapse_type) + + # Convert values to their closest representable value to ensure + # that division works for the minimum value + min_weights = [self.__get_closest_weight(m) + if m != sys.maxsize else 0 for m in min_weights] + + # The minimum weight shouldn't be 0 unless set above (and then it + # doesn't matter that we use the min as there are no weights); so + # set the weight to the smallest representable value if 0 + min_weights = [m if m > 0 else DataType.S1615.decode_from_int(1) + for m in min_weights] + + # Now check that the maximum weight isn't too big + for proj in self.incoming_projections: + # pylint: disable-next=protected-access + synapse_info = proj._synapse_information + synapse_type = synapse_info.synapse_type + connector = synapse_info.connector + synapse_dynamics = synapse_info.synapse_dynamics - # Convert these to powers; we could use int.bit_length() for this if - # they were integers, but they aren't... - max_weight_powers = ( - 0 if w <= 0 else int(math.ceil(max(0, math.log2(w)))) - for w in max_weights) + weight_max = synapse_dynamics.get_weight_maximum( + connector, synapse_info) + weight_max *= weight_scale - # If 2^max_weight_power equals the max weight, we have to add another - # power, as range is 0 - (just under 2^max_weight_power)! - max_weight_powers = ( - w + 1 if (2 ** w) <= a else w - for w, a in zip(max_weight_powers, max_weights)) + weight_scale_limit = float(DataType.S1615.scale) + if weight_scale_limit * min_weights[synapse_type] < weight_max: + max_weight = self.__get_closest_weight(weight_max) + min_weights[synapse_type] = max_weight / weight_scale_limit - return list(max_weight_powers) + self.__check_weights(min_weights, weight_scale) - @staticmethod - def __get_weight_scale(ring_buffer_to_input_left_shift): - """ - Return the amount to scale the weights by to convert them from - floating point values to 16-bit fixed point numbers which can be - shifted left by ring_buffer_to_input_left_shift to produce an - s1615 fixed point number. + return min_weights - :param int ring_buffer_to_input_left_shift: - :rtype: float - """ - return float(math.pow(2, 16 - (ring_buffer_to_input_left_shift + 1))) + def __check_weights( + self, min_weights, weight_scale): + """ Warn the user about weights that can't be represented properly + where possible - def get_weight_scales(self, ring_buffer_shifts): + :param ~numpy.ndarray min_weights: Minimum weights per synapse type + :param float weight_scale: The weight_scale from the synapse input_type """ - Get the weight scaling to apply to weights in synapses. + for proj in self.incoming_projections: + # pylint: disable-next=protected-access + synapse_info = proj._synapse_information + weights = synapse_info.weights + synapse_type = synapse_info.synapse_type + min_weight = min_weights[synapse_type] + if not isinstance(weights, str): + if numpy.isscalar(weights): + self.__check_weight( + min_weight, weights, weight_scale, proj, synapse_info) + elif hasattr(weights, "__getitem__"): + for w in weights: + self.__check_weight( + min_weight, w, weight_scale, proj, synapse_info) + + def __check_weight( + self, min_weight, weight, weight_scale, projection, + synapse_info): + """ Warn the user about a weight that can't be represented properly + where possible + + :param float min_weight: Minimum weight value + :param float weight: weight value being checked + :param float weight_scale: The weight_scale from the synapse input_type + :param ~.Projection projection: The projection + :param ~.SynapseInformation synapse_info: The synapse information + """ + r_weight = weight * weight_scale / min_weight + r_weight = (DataType.UINT16.closest_representable_value( + r_weight) * min_weight) / weight_scale + if weight != r_weight: + self.__weight_provenance[weight, r_weight].append( + (projection, synapse_info)) + + def get_min_weights(self): + """ Calculate the minimum weights required to best represent all the + possible weights coming into this vertex + + :return: list of minimum weights + :rtype: list(float) + """ + if self.__min_weights is None: + self.__min_weights = self.__calculate_min_weights() + else: + weight_scale = self.__neuron_impl.get_global_weight_scale() + self.__check_weights( + self.__min_weights, weight_scale) + + return self.__min_weights + + def get_weight_scales(self, min_weights): + """ Get the weight scaling to apply to weights in synapses - :param list(int) ring_buffer_shifts: - The shifts to convert to weight scales + :param list(int) min_weights: + The min weights to convert to weight scales :rtype: list(int) """ weight_scale = self.__neuron_impl.get_global_weight_scale() - return numpy.array([ - self.__get_weight_scale(r) * weight_scale - for r in ring_buffer_shifts]) + self.__weight_scales = numpy.array( + [(1 / w) * weight_scale if w != 0 else 0 for w in min_weights]) + return self.__weight_scales @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine) def get_connections_from_machine( @@ -1143,8 +1228,8 @@ def get_synapse_params_size(self): """ # This will only hold ring buffer scaling for the neuron synapse # types - return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES + - (BYTES_PER_WORD * self.__neuron_impl.get_n_synapse_types())) + return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES + ( + BYTES_PER_WORD * 2 * self.__neuron_impl.get_n_synapse_types())) def get_synapse_dynamics_size(self, n_atoms): """ @@ -1578,125 +1663,154 @@ def can_generate_on_machine(self): _check_random_dists(self.__state_variables) return True - -class _Stats(object): - """ - Object to keep hold of and process statistics for ring buffer scaling. - """ - __slots__ = [ - "w_scale", - "w_scale_sq", - "n_synapse_types", - "running_totals", - "delay_running_totals", - "total_weights", - "biggest_weight", - "rate_stats", - "steps_per_second", - "default_spikes_per_second", - "ring_buffer_sigma" - ] - - def __init__( - self, neuron_impl, default_spikes_per_second, ring_buffer_sigma): - self.w_scale = neuron_impl.get_global_weight_scale() - self.w_scale_sq = self.w_scale ** 2 - n_synapse_types = neuron_impl.get_n_synapse_types() - - self.running_totals = [ - RunningStats() for _ in range(n_synapse_types)] - self.delay_running_totals = [ - RunningStats() for _ in range(n_synapse_types)] - self.total_weights = numpy.zeros(n_synapse_types) - self.biggest_weight = numpy.zeros(n_synapse_types) - self.rate_stats = [RunningStats() for _ in range(n_synapse_types)] - - self.steps_per_second = ( - SpynnakerDataView.get_simulation_time_step_per_s()) - self.default_spikes_per_second = default_spikes_per_second - self.ring_buffer_sigma = ring_buffer_sigma - - def add_projection(self, proj): - # pylint: disable=protected-access - s_dynamics = proj._synapse_information.synapse_dynamics - if isinstance(s_dynamics, AbstractSupportsSignedWeights): - self.__add_signed_projection(proj) - else: - self.__add_unsigned_projection(proj) - - def __add_signed_projection(self, proj): - # pylint: disable=protected-access - s_info = proj._synapse_information - connector = s_info.connector - s_dynamics = s_info.synapse_dynamics - - n_conns = connector.get_n_connections_to_post_vertex_maximum(s_info) - d_var = s_dynamics.get_delay_variance(connector, s_info.delays, s_info) - - s_type_pos = s_dynamics.get_positive_synapse_index(proj) - w_mean_pos = s_dynamics.get_mean_positive_weight(proj) - w_var_pos = s_dynamics.get_variance_positive_weight(proj) - w_max_pos = s_dynamics.get_maximum_positive_weight(proj) - self.__add_details( - proj, s_type_pos, n_conns, w_mean_pos, w_var_pos, w_max_pos, d_var) - - s_type_neg = s_dynamics.get_negative_synapse_index(proj) - w_mean_neg = -s_dynamics.get_mean_negative_weight(proj) - w_var_neg = -s_dynamics.get_variance_negative_weight(proj) - w_max_neg = -s_dynamics.get_minimum_negative_weight(proj) - self.__add_details( - proj, s_type_neg, n_conns, w_mean_neg, w_var_neg, w_max_neg, d_var) - - def __add_unsigned_projection(self, proj): - # pylint: disable=protected-access - s_info = proj._synapse_information - s_type = s_info.synapse_type - s_dynamics = s_info.synapse_dynamics - connector = s_info.connector - - n_conns = connector.get_n_connections_to_post_vertex_maximum(s_info) - w_mean = s_dynamics.get_weight_mean(connector, s_info) - w_var = s_dynamics.get_weight_variance( - connector, s_info.weights, s_info) - w_max = s_dynamics.get_weight_maximum(connector, s_info) - d_var = s_dynamics.get_delay_variance(connector, s_info.delays, s_info) - self.__add_details(proj, s_type, n_conns, w_mean, w_var, w_max, d_var) - - def __add_details( - self, proj, s_type, n_conns, w_mean, w_var, w_max, d_var): - self.running_totals[s_type].add_items( - w_mean * self.w_scale, w_var * self.w_scale_sq, n_conns) - self.biggest_weight[s_type] = max( - self.biggest_weight[s_type], w_max * self.w_scale) - self.delay_running_totals[s_type].add_items(0.0, d_var, n_conns) - - spikes_per_tick, spikes_per_second = self.__pre_spike_stats(proj) - self.rate_stats[s_type].add_items(spikes_per_second, 0, n_conns) - self.total_weights[s_type] += spikes_per_tick * (w_max * n_conns) - - def __pre_spike_stats(self, proj): - spikes_per_tick = max( - 1.0, self.default_spikes_per_second / self.steps_per_second) - spikes_per_second = self.default_spikes_per_second - # pylint: disable=protected-access - pre_vertex = proj._projection_edge.pre_vertex - if isinstance(pre_vertex, AbstractMaxSpikes): - rate = pre_vertex.max_spikes_per_second() - if rate != 0: - spikes_per_second = rate - spikes_per_tick = pre_vertex.max_spikes_per_ts() - return spikes_per_tick, spikes_per_second - - def get_max_weight(self, s_type): - if self.delay_running_totals[s_type].variance == 0.0: - return max(self.total_weights[s_type], self.biggest_weight[s_type]) - - stats = self.running_totals[s_type] - rates = self.rate_stats[s_type] - # pylint: disable=protected-access - w_max = AbstractPopulationVertex._ring_buffer_expected_upper_bound( - stats.mean, stats.standard_deviation, rates.mean, - stats.n_items, self.ring_buffer_sigma) - w_max = min(w_max, self.total_weights[s_type]) - w_max = max(w_max, self.biggest_weight[s_type]) - return w_max + @overrides(AbstractProvidesLocalProvenanceData.get_local_provenance_data) + def get_local_provenance_data(self): + synapse_names = list(self.__neuron_impl.get_synapse_targets()) + with ProvenanceWriter() as db: + for i, weight in enumerate(self.__min_weights): + db.insert_app_vertex( + self.label, synapse_names[i], "min_weight", weight) + for (weight, r_weight) in self.__weight_provenance: + proj_info = self.__weight_provenance[weight, r_weight] + for i, (_proj, s_info) in enumerate(proj_info): + db.insert_connector( + s_info.pre_population.label, + s_info.post_population.label, + s_info.connector.__class__.__name__, + "weight_representation", + weight == r_weight + ) + + if (weight != r_weight): + db.insert_report( + "Weight of {} could not be represented precisely;" + " a weight of {} was used instead".format( + weight, r_weight)) + +# not sure anything after this point is used any more? +# class _Stats(object): +# """ Object to keep hold of and process statistics for ring buffer scaling +# """ +# __slots__ = [ +# "w_scale", +# "w_scale_sq", +# "n_synapse_types", +# "running_totals", +# "delay_running_totals", +# "total_weights", +# "biggest_weight", +# "rate_stats", +# "steps_per_second", +# "default_spikes_per_second", +# "ring_buffer_sigma" +# ] +# +# def __init__( +# self, neuron_impl, default_spikes_per_second, ring_buffer_sigma): +# self.w_scale = neuron_impl.get_global_weight_scale() +# self.w_scale_sq = self.w_scale ** 2 +# n_synapse_types = neuron_impl.get_n_synapse_types() +# +# self.running_totals = [ +# RunningStats() for _ in range(n_synapse_types)] +# self.delay_running_totals = [ +# RunningStats() for _ in range(n_synapse_types)] +# self.total_weights = numpy.zeros(n_synapse_types) +# self.biggest_weight = numpy.zeros(n_synapse_types) +# self.rate_stats = [RunningStats() for _ in range(n_synapse_types)] +# +# self.steps_per_second = ( +# SpynnakerDataView.get_simulation_time_step_per_s()) +# self.default_spikes_per_second = default_spikes_per_second +# self.ring_buffer_sigma = ring_buffer_sigma +# +# def add_projection(self, proj): +# # pylint: disable=protected-access +# s_dynamics = proj._synapse_information.synapse_dynamics +# if isinstance(s_dynamics, AbstractSupportsSignedWeights): +# self.__add_signed_projection(proj) +# else: +# self.__add_unsigned_projection(proj) +# +# def __add_signed_projection(self, proj): +# # pylint: disable=protected-access +# s_info = proj._synapse_information +# connector = s_info.connector +# s_dynamics = s_info.synapse_dynamics +# +# n_conns = connector.get_n_connections_to_post_vertex_maximum(s_info) +# d_var = s_dynamics.get_delay_variance( +# connector, s_info.delays, s_info) +# +# s_type_pos = s_dynamics.get_positive_synapse_index(proj) +# w_mean_pos = s_dynamics.get_mean_positive_weight(proj) +# w_var_pos = s_dynamics.get_variance_positive_weight(proj) +# w_max_pos = s_dynamics.get_maximum_positive_weight(proj) +# self.__add_details( +# proj, s_type_pos, n_conns, w_mean_pos, w_var_pos, w_max_pos, +# d_var) +# +# s_type_neg = s_dynamics.get_negative_synapse_index(proj) +# w_mean_neg = -s_dynamics.get_mean_negative_weight(proj) +# w_var_neg = -s_dynamics.get_variance_negative_weight(proj) +# w_max_neg = -s_dynamics.get_minimum_negative_weight(proj) +# self.__add_details( +# proj, s_type_neg, n_conns, w_mean_neg, w_var_neg, w_max_neg, +# d_var) +# +# def __add_unsigned_projection(self, proj): +# # pylint: disable=protected-access +# s_info = proj._synapse_information +# s_type = s_info.synapse_type +# s_dynamics = s_info.synapse_dynamics +# connector = s_info.connector +# +# n_conns = connector.get_n_connections_to_post_vertex_maximum(s_info) +# w_mean = s_dynamics.get_weight_mean(connector, s_info) +# w_var = s_dynamics.get_weight_variance( +# connector, s_info.weights, s_info) +# w_max = s_dynamics.get_weight_maximum(connector, s_info) +# d_var = s_dynamics.get_delay_variance( +# connector, s_info.delays, s_info) +# self.__add_details( +# proj, s_type, n_conns, w_mean, w_var, w_max, d_var) +# +# def __add_details( +# self, proj, s_type, n_conns, w_mean, w_var, w_max, d_var): +# self.running_totals[s_type].add_items( +# w_mean * self.w_scale, w_var * self.w_scale_sq, n_conns) +# self.biggest_weight[s_type] = max( +# self.biggest_weight[s_type], w_max * self.w_scale) +# self.delay_running_totals[s_type].add_items(0.0, d_var, n_conns) +# +# spikes_per_tick, spikes_per_second = self.__pre_spike_stats(proj) +# self.rate_stats[s_type].add_items(spikes_per_second, 0, n_conns) +# self.total_weights[s_type] += spikes_per_tick * (w_max * n_conns) +# +# def __pre_spike_stats(self, proj): +# spikes_per_tick = max( +# 1.0, self.default_spikes_per_second / self.steps_per_second) +# spikes_per_second = self.default_spikes_per_second +# # pylint: disable=protected-access +# pre_vertex = proj._projection_edge.pre_vertex +# if isinstance(pre_vertex, AbstractMaxSpikes): +# rate = pre_vertex.max_spikes_per_second() +# if rate != 0: +# spikes_per_second = rate +# spikes_per_tick = pre_vertex.max_spikes_per_ts() +# return spikes_per_tick, spikes_per_second +# +# def get_max_weight(self, s_type): +# if self.delay_running_totals[s_type].variance == 0.0: +# return max( +# self.total_weights[s_type], self.biggest_weight[s_type]) +# +# stats = self.running_totals[s_type] +# rates = self.rate_stats[s_type] +# # pylint: disable=protected-access +# w_max = AbstractPopulationVertex._ring_buffer_expected_upper_bound( +# stats.mean, stats.standard_deviation, rates.mean, +# stats.n_items, self.ring_buffer_sigma) +# w_max = min(w_max, self.total_weights[s_type]) +# w_max = max(w_max, self.biggest_weight[s_type]) +# return w_max diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py index 79d7b41741..94e05358bd 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py @@ -24,7 +24,9 @@ _population_parameters = { "spikes_per_second": None, "ring_buffer_sigma": None, "incoming_spike_buffer_size": None, "drop_late_spikes": None, - "splitter": None, "seed": None, "n_colour_bits": None + "splitter": None, "seed": None, "n_colour_bits": None, + "min_weights": None, "weight_random_sigma": 2, + "max_stdp_spike_delta": 50, } @@ -48,7 +50,8 @@ def _model(self): def create_vertex( self, n_neurons, label, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, drop_late_spikes, splitter, seed, - n_colour_bits): + n_colour_bits, min_weights, weight_random_sigma, + max_stdp_spike_delta): """ :param float spikes_per_second: :param float ring_buffer_sigma: @@ -65,7 +68,8 @@ def create_vertex( return AbstractPopulationVertex( n_neurons, label, max_atoms, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, self.__model, self, drop_late_spikes, - splitter, seed, n_colour_bits) + splitter, seed, n_colour_bits, min_weights, weight_random_sigma, + max_stdp_spike_delta) @property @overrides(AbstractPyNNModel.name) diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py index bb64a9691d..7affa2da21 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py @@ -58,13 +58,15 @@ def create_vertex( self, n_neurons, label, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, n_steps_per_timestep, drop_late_spikes, splitter, seed, - n_colour_bits): + n_colour_bits, min_weights, weight_random_sigma, + max_stdp_spike_delta): """ :param int n_steps_per_timestep: """ # pylint: disable=arguments-differ self._model.n_steps_per_timestep = n_steps_per_timestep return super().create_vertex( - n_neurons, label, spikes_per_second, - ring_buffer_sigma, incoming_spike_buffer_size, drop_late_spikes, - splitter, seed, n_colour_bits) + n_neurons, label, spikes_per_second, ring_buffer_sigma, + incoming_spike_buffer_size, drop_late_spikes, splitter, + seed, n_colour_bits, min_weights, weight_random_sigma, + max_stdp_spike_delta) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/common.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/common.py index 7adb0567bc..d3537f0c33 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/common.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/common.py @@ -50,3 +50,39 @@ def get_exp_lut_array(time_step, time_constant, shift=0): # Concatenate with the header header = numpy.array([len(a), shift], dtype="uint16") return numpy.concatenate((header, a.astype("uint16"))).view("uint32") + + +def _get_last_non_zero_value(values): + """ Get the last non-zero value (rescaled) from a LUT array + """ + # Either the ultimate or penultimate value must be non-zero as generated + # from the above function + if values[-1] != 0: + return values[-1] / STDP_FIXED_POINT_ONE + return values[-2] / STDP_FIXED_POINT_ONE + + +def get_min_lut_value( + exp_lut_array, time_step=None, max_stdp_spike_delta=None): + """ Get the smallest non-zero value of an exponential lookup array,\ + or None if no such value + + :param numpy.ndarray exp_lut_array: The lookup array + :param float time_step: The time step in milliseconds + :param float max_stdp_spike_delta: The maximum expected difference between + spike times in milliseconds + :rtype: float + """ + values = exp_lut_array.view("uint16") + + # If there isn't a time step and a limit + if time_step is None or max_stdp_spike_delta is None: + return _get_last_non_zero_value(values) + + # If there is a time step and limit, use it to work out which value + pos = int(math.ceil(max_stdp_spike_delta / time_step)) + 1 + if pos >= len(values): + return _get_last_non_zero_value(values) + + # Make sure we haven't just picked the last value which happens to be 0 + return _get_last_non_zero_value(values[:pos]) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py index bf0a3af9f7..d3e5e87c58 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py @@ -91,3 +91,17 @@ def get_parameter_names(self): :rtype: iterable(str) """ + + @abstractmethod + def minimum_delta(self, max_stdp_spike_delta): + """ The smallest non-zero changes that will be passed to the weight\ + rule + + :param float max_stdp_spike_delta: The maximum expected time difference + between two spikes in milliseconds + + :return: An array of minimum change values, one for potentiation,\ + one for depression. If this requires a 2-parameter weight rule, + each of the values of the arrays must then be an array of arrays + :rtype: list of (float or list of float) + """ diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py index f73fcd725b..59eb3601f9 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py @@ -17,11 +17,11 @@ BYTES_PER_SHORT, BYTES_PER_WORD) from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( - get_exp_lut_array) -from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence import ( - AbstractTimingDependence) -from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( - SynapseStructureWeightOnly) + get_exp_lut_array, get_min_lut_value) +from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence\ + import AbstractTimingDependence +from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure\ + import SynapseStructureWeightOnly class TimingDependencePfisterSpikeTriplet(AbstractTimingDependence): @@ -202,3 +202,20 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES + + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self, max_stdp_spike_delta): + ts = SpynnakerDataView.get_simulation_time_step_ms() + + # The minimums for potentiation + min_decayed_r1 = get_min_lut_value(self.__tau_plus_data) + min_decayed_r1_o2 = min_decayed_r1 * get_min_lut_value( + self.__tau_y_data, ts, max_stdp_spike_delta) + + # The minimums for depression + min_decayed_o1 = get_min_lut_value(self.__tau_minus_data) + min_decayed_o1_r2 = min_decayed_o1 * get_min_lut_value( + self.__tau_x_data, ts, max_stdp_spike_delta) + + return [[min_decayed_r1, min_decayed_r1_o2], + [min_decayed_o1, min_decayed_o1_r2]] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py index a592b507a1..e2c99c938e 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py @@ -198,3 +198,8 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES + + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self, max_stdp_spike_delta): + # This rule always has a delta of 1 + return [1.0, 1.0] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py index c254415cad..dff6a61649 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py @@ -16,7 +16,7 @@ from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( - get_exp_lut_array) + get_exp_lut_array, get_min_lut_value) from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( SynapseStructureWeightOnly) from .abstract_timing_dependence import AbstractTimingDependence @@ -161,3 +161,10 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES + + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self, max_stdp_spike_delta): + ts = SpynnakerDataView.get_simulation_time_step_ms() + return [ + get_min_lut_value(self.__tau_plus_data, ts, max_stdp_spike_delta), + get_min_lut_value(self.__tau_minus_data, ts, max_stdp_spike_delta)] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py index 91b7e8d963..9398b96d86 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py @@ -17,7 +17,7 @@ BYTES_PER_SHORT, BYTES_PER_WORD) from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( - get_exp_lut_array) + get_exp_lut_array, get_min_lut_value) from .abstract_timing_dependence import AbstractTimingDependence from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( SynapseStructureWeightOnly) @@ -162,3 +162,10 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES + + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self, max_stdp_spike_delta): + ts = SpynnakerDataView.get_simulation_time_step_ms() + return [ + get_min_lut_value(self.__tau_plus_data, ts, max_stdp_spike_delta), + get_min_lut_value(self.__tau_minus_data, ts, max_stdp_spike_delta)] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py index cefbf28d5c..316e755ed6 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py @@ -22,7 +22,7 @@ from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( SynapseStructureWeightOnly) from spynnaker.pyNN.models.neuron.plasticity.stdp.common import ( - float_to_fixed, get_exp_lut_array) + float_to_fixed, get_exp_lut_array, get_min_lut_value) class TimingDependenceVogels2011(AbstractTimingDependence): @@ -163,3 +163,9 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES + + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self, max_stdp_spike_delta): + ts = SpynnakerDataView.get_simulation_time_step_ms() + min_tau = get_min_lut_value(self.__tau_data, ts, max_stdp_spike_delta) + return [min_tau - self.__alpha, min_tau] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py index 9cc2f4e398..b6f65610f9 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py @@ -79,3 +79,19 @@ def weight_maximum(self): :rtype: float """ + + @abstractproperty + def weight_minimum(self): + """ The minimum weight that will ever be set in a synapse as a result\ + of this rule + + :rtype: float + """ + + @abstractmethod + def weight_change_minimum(self, min_delta): + """ The minimum non-zero change in weight that will occur + + :param list min_delta: The minimum delta values from the timing rules + :rtype: float + """ diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py index 647861c5c5..d4d745e564 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py @@ -122,6 +122,20 @@ def weight_maximum(self): """ return self.__w_max + @property + def weight_minimum(self): + """ The minimum weight that will ever be set in a synapse as a result\ + of this rule + + :rtype: float + """ + return self.__w_min + + @overrides(AbstractWeightDependence.weight_change_minimum) + def weight_change_minimum(self, min_delta): + pot, dep = min_delta + return min(pot * self.A_plus, dep * self.A_minus) + @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py index 6fddf10de3..cd6256fae3 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py @@ -17,6 +17,7 @@ from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from .abstract_has_a_plus_a_minus import AbstractHasAPlusAMinus from .abstract_weight_dependence import AbstractWeightDependence + # Six words per synapse type _SPACE_PER_SYNAPSE_TYPE = 6 * BYTES_PER_WORD @@ -150,6 +151,7 @@ def write_parameters( data_type=DataType.S1615) @property + @overrides(AbstractWeightDependence.weight_maximum) def weight_maximum(self): """ The maximum weight that will ever be set in a synapse as a result @@ -159,6 +161,18 @@ def weight_maximum(self): """ return self.__w_max + @property + @overrides(AbstractWeightDependence.weight_minimum) + def weight_minimum(self): + return self.__w_min + + @overrides(AbstractWeightDependence.weight_change_minimum) + def weight_change_minimum(self, min_delta): + (a2_plus, a3_plus), (a2_minus, a3_minus) = min_delta + min_pot = a2_plus * self.A_plus + a3_plus * self.__a3_plus + min_dep = a2_minus * self.A_minus + a3_minus * self.__a3_minus + return min(min_pot, min_dep) + @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py index 8d44883c90..85a6f8abd5 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py @@ -114,6 +114,20 @@ def weight_maximum(self): """ return self.__w_max + @property + def weight_minimum(self): + """ The minimum weight that will ever be set in a synapse as a result\ + of this rule + + :rtype: float + """ + return self.__w_min + + @overrides(AbstractWeightDependence.weight_change_minimum) + def weight_change_minimum(self, min_delta): + pot, dep = min_delta + return min(pot * self.A_plus, dep * self.A_minus) + @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): return self.__PARAM_NAMES diff --git a/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py index b04b6a6f17..a630046a68 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py @@ -67,7 +67,7 @@ class PopulationMachineLocalOnlyCombinedVertex( __slots__ = [ "__key", - "__ring_buffer_shifts", + "__min_weights", "__weight_scales", "__slice_index", "__neuron_data", @@ -126,7 +126,7 @@ class REGIONS(Enum): def __init__( self, sdram, label, app_vertex, vertex_slice, slice_index, - ring_buffer_shifts, weight_scales, neuron_data, + min_weights, weight_scales, neuron_data, max_atoms_per_core): """ :param ~pacman.model.resources.AbstractSDRAM sdram: @@ -138,8 +138,8 @@ def __init__( The slice of the population that this implements :param int slice_index: The index of the slice in the ordered list of slices - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values + :param list(int) min_weights: + The min_weights used in the calculations :param list(int) weight_scales: The scaling to apply to weights to store them in the synapses :param int all_syn_block_sz: The maximum size of the synapses in bytes @@ -157,7 +157,7 @@ def __init__( self._PROFILE_TAG_LABELS, self.__get_binary_file_name(app_vertex)) self.__key = None self.__slice_index = slice_index - self.__ring_buffer_shifts = ring_buffer_shifts + self.__min_weights = min_weights self.__weight_scales = weight_scales self.__neuron_data = neuron_data self.__max_atoms_per_core = max_atoms_per_core @@ -255,7 +255,7 @@ def generate_data_specification(self, spec, placement): self.vertex_slice)) self._write_common_data_spec(spec, rec_regions) - self._write_neuron_data_spec(spec, self.__ring_buffer_shifts) + self._write_neuron_data_spec(spec, self.__min_weights) self.__write_local_only_data(spec) diff --git a/spynnaker/pyNN/models/neuron/population_machine_neurons.py b/spynnaker/pyNN/models/neuron/population_machine_neurons.py index b11f1cbd1e..28f6781e2f 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_neurons.py +++ b/spynnaker/pyNN/models/neuron/population_machine_neurons.py @@ -19,7 +19,9 @@ from pacman.utilities.utility_calls import get_field_based_keys +from spinn_front_end_common.interface.ds import DataType from spinn_front_end_common.interface.provenance import ProvenanceWriter + from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID from spynnaker.pyNN.utilities.utility_calls import get_n_bits @@ -167,14 +169,14 @@ def _parse_neuron_provenance(self, x, y, p, provenance_data): db.insert_core( x, y, p, "Latest_Send_time", neuron_prov.latest_send) - def _write_neuron_data_spec(self, spec, ring_buffer_shifts): + def _write_neuron_data_spec(self, spec, min_weights): """ Write the data specification of the neuron data. :param ~data_specification.DataSpecificationGenerator spec: The data specification to write to - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values + :param list(float) min_weights: + The computed minimum weights to be used in the simulation """ # Get and store the key routing_info = SpynnakerDataView.get_routing_infos() @@ -182,7 +184,7 @@ def _write_neuron_data_spec(self, spec, ring_buffer_shifts): self, SPIKE_PARTITION_ID)) # Write the neuron core parameters - self._write_neuron_core_parameters(spec, ring_buffer_shifts) + self._write_neuron_core_parameters(spec, min_weights) # Write the current source parameters self._write_current_source_parameters(spec) @@ -197,8 +199,6 @@ def _rewrite_neuron_data_spec(self, spec): :param ~data_specification.DataSpecificationGenerator spec: The data specification to write to - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values """ # Write the current source parameters self._write_current_source_parameters(spec) @@ -207,14 +207,14 @@ def _rewrite_neuron_data_spec(self, spec): self._neuron_data.write_data( spec, self._vertex_slice, self._neuron_regions, False) - def _write_neuron_core_parameters(self, spec, ring_buffer_shifts): + def _write_neuron_core_parameters(self, spec, min_weights): """ Write the neuron parameters region. :param ~data_specification.DataSpecificationGenerator spec: The data specification to write to - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values + :param list(float) min_weights: + The computed minimum weights to be used in the simulation """ # pylint: disable=too-many-arguments n_atoms = self._vertex_slice.n_atoms @@ -257,12 +257,14 @@ def _write_neuron_core_parameters(self, spec, ring_buffer_shifts): # Write the number of colour bits spec.write_value(self._app_vertex.n_colour_bits) - # Write the ring buffer data + # Write the min weights data # This is only the synapse types that need a ring buffer i.e. not # those stored in synapse dynamics n_synapse_types = self._app_vertex.neuron_impl.get_n_synapse_types() spec.write_value(n_synapse_types) - spec.write_array(ring_buffer_shifts) + # Write the minimum weights + for min_w in min_weights: + spec.write_value(min_w, data_type=DataType.S1615) # Write the keys spec.write_array(keys) diff --git a/spynnaker/pyNN/models/neuron/population_machine_synapses.py b/spynnaker/pyNN/models/neuron/population_machine_synapses.py index f589ab23eb..d883b74482 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_synapses.py +++ b/spynnaker/pyNN/models/neuron/population_machine_synapses.py @@ -14,6 +14,7 @@ from spinn_utilities.overrides import overrides from spinn_utilities.abstract_base import abstractproperty, abstractmethod +from spinn_front_end_common.interface.ds import DataType from spinn_front_end_common.utilities.helpful_functions import ( locate_memory_region_for_placement) from spinn_front_end_common.abstract_models import ( @@ -120,15 +121,14 @@ def regeneratable_sdram_blocks_and_sizes(self, placement): self._synaptic_matrices.on_chip_generated_matrix_size)] def _write_synapse_data_spec( - self, spec, ring_buffer_shifts, weight_scales, - structural_sz): + self, spec, min_weights, weight_scales, structural_sz): """ Write the data specification for the synapse data. :param ~data_specification.DataSpecificationGenerator spec: The data specification to write to - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values + :param list(float) min_weights: + The computed minimum weights to be used in the simulation :param list(int) weight_scales: The scaling to apply to weights to store them in the synapses :param int all_syn_block_sz: The maximum size of the synapses in bytes @@ -136,7 +136,7 @@ def _write_synapse_data_spec( :param int n_neuron_bits: The number of bits to use for neuron ids """ # Write the synapse parameters - self._write_synapse_parameters(spec, ring_buffer_shifts) + self._write_synapse_parameters(spec, min_weights) # Write the synaptic matrices self._synaptic_matrices.generate_data() @@ -177,14 +177,14 @@ def _write_synapse_data_spec( size=4, label='synapseDynamicsStructuralParams', reference=self._synapse_references.structural_dynamics) - def _write_synapse_parameters(self, spec, ring_buffer_shifts): + def _write_synapse_parameters(self, spec, min_weights): """ Write the synapse parameters data region. :param ~data_specification.DataSpecificationGenerator spec: The data specification to write to - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values + :param list(float) min_weights: + The computed minimum weights to be used in the simulation """ # Reserve space spec.reserve_memory_region( @@ -209,7 +209,11 @@ def _write_synapse_parameters(self, spec, ring_buffer_shifts): spec.write_value(get_n_bits(max_delay)) spec.write_value(int(self._app_vertex.drop_late_spikes)) spec.write_value(self._app_vertex.incoming_spike_buffer_size) - spec.write_array(ring_buffer_shifts) + # Write the minimum weights and the reciprocals (no machine division) + for min_w in min_weights: + spec.write_value(min_w, data_type=DataType.S1615) + for min_w in min_weights: + spec.write_value(1 / min_w, data_type=DataType.S1615) @overrides(AbstractSynapseExpandable.gen_on_machine) def gen_on_machine(self): diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index 5033f72e79..c2ec750d0a 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -75,7 +75,7 @@ class PopulationMachineVertex( "__synaptic_matrices", "__neuron_data", "__key", - "__ring_buffer_shifts", + "__min_weights", "__weight_scales", "__structural_sz", "__slice_index", @@ -151,8 +151,8 @@ class REGIONS(Enum): def __init__( self, sdram, label, app_vertex, vertex_slice, slice_index, - ring_buffer_shifts, weight_scales, - structural_sz, max_atoms_per_core, synaptic_matrices, neuron_data): + min_weights, weight_scales, structural_sz, max_atoms_per_core, + synaptic_matrices, neuron_data): """ :param ~pacman.model.resources.AbstractSDRAM sdram: The SDRAM used by the vertex @@ -163,8 +163,8 @@ def __init__( The slice of the population that this implements :param int slice_index: The index of the slice in the ordered list of slices - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values + :param list(float) min_weights: + The computed minimum weights to be used in the simulation :param list(int) weight_scales: The scaling to apply to weights to store them in the synapses :param int structural_sz: The size of the structural data @@ -180,7 +180,7 @@ def __init__( self._PROFILE_TAG_LABELS, self.__get_binary_file_name(app_vertex)) self.__key = None self.__slice_index = slice_index - self.__ring_buffer_shifts = ring_buffer_shifts + self.__min_weights = min_weights self.__weight_scales = weight_scales self.__structural_sz = structural_sz self.__max_atoms_per_core = max_atoms_per_core @@ -296,11 +296,11 @@ def generate_data_specification(self, spec, placement): self.vertex_slice)) self._write_common_data_spec(spec, rec_regions) - self._write_neuron_data_spec(spec, self.__ring_buffer_shifts) + self._write_neuron_data_spec(spec, self.__min_weights) self._write_synapse_data_spec( - spec, self.__ring_buffer_shifts, - self.__weight_scales, self.__structural_sz) + spec, self.__min_weights, self.__weight_scales, + self.__structural_sz) # End the writing of this specification: spec.end_specification() @@ -314,7 +314,7 @@ def regenerate_data_specification(self, spec, placement): if self.__regenerate_synapse_data: self._write_synapse_data_spec( - spec, self.__ring_buffer_shifts, + spec, self.__min_weights, self.__weight_scales, self.__structural_sz) self.__regenerate_synapse_data = False diff --git a/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py index 16582f4603..5d3c8b6d68 100644 --- a/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_neurons_machine_vertex.py @@ -60,7 +60,7 @@ class PopulationNeuronsMachineVertex( __slots__ = [ "__key", "__sdram_partition", - "__ring_buffer_shifts", + "__min_weights", "__weight_scales", "__slice_index", "__neuron_data", @@ -105,8 +105,7 @@ class REGIONS(Enum): def __init__( self, sdram, label, app_vertex, vertex_slice, slice_index, - ring_buffer_shifts, weight_scales, - neuron_data, max_atoms_per_core): + min_weights, weight_scales, neuron_data, max_atoms_per_core): """ :param ~pacman.model.resources.AbstractSDRAM sdram: The SDRAM used by the vertex @@ -117,8 +116,8 @@ def __init__( The slice of the population that this implements :param int slice_index: The index of the slice in the ordered list of slices - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values + :param list(float) min_weights: + The computed minimum weights to be used in the simulation :param list(int) weight_scales: The scaling to apply to weights to store them in the synapses :param NeuronData neuron_data: @@ -133,7 +132,7 @@ def __init__( self.__key = None self.__sdram_partition = None self.__slice_index = slice_index - self.__ring_buffer_shifts = ring_buffer_shifts + self.__min_weights = min_weights self.__weight_scales = weight_scales self.__neuron_data = neuron_data self.__max_atoms_per_core = max_atoms_per_core @@ -229,7 +228,7 @@ def generate_data_specification(self, spec, placement): self.vertex_slice) self._write_common_data_spec(spec, rec_regions) - self._write_neuron_data_spec(spec, self.__ring_buffer_shifts) + self._write_neuron_data_spec(spec, self.__min_weights) # Write information about SDRAM spec.reserve_memory_region( diff --git a/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_lead.py b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_lead.py index 0fb86b4510..5b2bcfe2d6 100644 --- a/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_lead.py +++ b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_lead.py @@ -32,7 +32,7 @@ class PopulationSynapsesMachineVertexLead( __slots__ = [ "__synaptic_matrices", - "__ring_buffer_shifts", + "__min_weights", "__weight_scales", "__structural_sz", "__synapse_references", @@ -41,9 +41,8 @@ class PopulationSynapsesMachineVertexLead( def __init__( self, sdram, label, app_vertex, - vertex_slice, ring_buffer_shifts, weight_scales, - structural_sz, synapse_references, max_atoms_per_core, - synaptic_matrices): + vertex_slice, min_weights, weight_scales, structural_sz, + synapse_references, max_atoms_per_core, synaptic_matrices): """ :param ~pacman.model.resources.AbstractSDRAM sdram: The SDRAM used by the vertex @@ -52,10 +51,12 @@ def __init__( The associated application vertex :param ~pacman.model.graphs.common.Slice vertex_slice: The slice of the population that this implements + :param list(float) min_weights: + The computed minimum weights to be used in the simulation """ super(PopulationSynapsesMachineVertexLead, self).__init__( sdram, label, app_vertex, vertex_slice) - self.__ring_buffer_shifts = ring_buffer_shifts + self.__min_weights = min_weights self.__weight_scales = weight_scales self.__structural_sz = structural_sz self.__synapse_references = synapse_references @@ -98,7 +99,7 @@ def generate_data_specification(self, spec, placement): self._write_common_data_spec(spec, rec_regions) self._write_synapse_data_spec( - spec, self.__ring_buffer_shifts, + spec, self.__min_weights, self.__weight_scales, self.__structural_sz) # Write information about SDRAM diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 9603703521..5bd4449624 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -138,7 +138,7 @@ def get_delay_maximum(self, connector, synapse_info): Get the maximum delay for the synapses. :param AbstractConnector connector: - :param ~numpy.ndarray delays: + :param SynapseInformation synapse_info: """ return connector.get_delay_maximum(synapse_info) @@ -159,6 +159,7 @@ def get_delay_variance(self, connector, delays, synapse_info): Get the variance in delay for the synapses. :param AbstractConnector connector: + :param SynapseInformation synapse_info: :param ~numpy.ndarray delays: """ return connector.get_delay_variance(delays, synapse_info) @@ -168,7 +169,7 @@ def get_weight_mean(self, connector, synapse_info): Get the mean weight for the synapses. :param AbstractConnector connector: - :param ~numpy.ndarray weights: + :param SynapseInformation synapse_info: """ return connector.get_weight_mean(synapse_info.weights, synapse_info) @@ -177,10 +178,21 @@ def get_weight_maximum(self, connector, synapse_info): Get the maximum weight for the synapses. :param AbstractConnector connector: - :param ~numpy.ndarray weights: + :param SynapseInformation synapse_info: """ return connector.get_weight_maximum(synapse_info) + def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): + """ Get the minimum weight for the synapses + + :param AbstractConnector connector: + :param float weight_random_sigma: + :param SynapseInformation synapse_info: + """ + # pylint: disable=too-many-arguments + return connector.get_weight_minimum( + synapse_info.weights, weight_random_sigma, synapse_info) + def get_weight_variance(self, connector, weights, synapse_info): """ Get the variance in weight for the synapses. @@ -214,6 +226,22 @@ def get_synapse_id_by_target(self, target): # pylint: disable=unused-argument return None + def calculate_min_weight(self, min_weights, max_stdp_spike_delta, + weight_scale, conn_weight_min, synapse_type): + """ Do any further calculations required to work out the minimum + weight value used on the machine. + + :param list min_weights: the current minimum weights + :param int max_stdp_spike_delta: the max time between spikes + :param float weight_scale: the amount to scale the weights, from input + :param float conn_weight_min: the weight minimum from the connector + :param int synapse_type: the synapse ID for which to calculate the min + :rtype: list + """ + # pylint: disable=unused-argument + # By default no further calculation is required + return min_weights + def get_connected_vertices(self, s_info, source_vertex, target_vertex): """ Get the machine vertices that are connected to each other with diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index f837436132..fbd94ccdf8 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -21,7 +21,7 @@ from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.exceptions import ( SynapticConfigurationException, InvalidParameterType) -from spynnaker.pyNN.utilities.utility_calls import get_n_bits +from spynnaker.pyNN.utilities.utility_calls import get_n_bits, float_gcd from .abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics from .abstract_synapse_dynamics_structural import ( AbstractSynapseDynamicsStructural) @@ -496,6 +496,35 @@ def get_weight_maximum(self, connector, synapse_info): # the weight dependence return max(w_max, self.__weight_dependence.weight_maximum) + @overrides(AbstractPlasticSynapseDynamics.get_weight_minimum) + def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): + w_min = super().get_weight_minimum( + connector, weight_random_sigma, synapse_info) + # The minimum weight is the largest that it could be set to from + # the weight dependence + return min(w_min, self.__weight_dependence.weight_minimum) + + def __get_weight_min_delta(self, max_stdp_spike_delta): + """ Get the minimum non-zero weight change + + :param float max_stdp_spike_delta: The maximum expected time between + spikes in milliseconds + """ + return self.__weight_dependence.weight_change_minimum( + self.__timing_dependence.minimum_delta(max_stdp_spike_delta)) + + @overrides(AbstractPlasticSynapseDynamics.calculate_min_weight) + def calculate_min_weight(self, min_weights, max_stdp_spike_delta, + weight_scale, conn_weight_min, synapse_type): + min_delta = self.__get_weight_min_delta(max_stdp_spike_delta) + min_delta *= weight_scale + if min_delta is not None and min_delta != 0: + # This also depends on the earlier calculated minimum + min_delta = float_gcd(min_delta, conn_weight_min) + min_weights[synapse_type] = min( + min_weights[synapse_type], min_delta) + return min_weights + @overrides(AbstractPlasticSynapseDynamics.get_parameter_names) def get_parameter_names(self): names = ['weight', 'delay'] diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py index f5f720bd07..5b768fefe8 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py @@ -16,7 +16,8 @@ from pyNN.standardmodels.synapses import StaticSynapse from spinn_utilities.overrides import overrides from spynnaker.pyNN.exceptions import SynapticConfigurationException -from spynnaker.pyNN.utilities.utility_calls import create_mars_kiss_seeds +from spynnaker.pyNN.utilities.utility_calls import ( + create_mars_kiss_seeds, float_gcd) from .abstract_synapse_dynamics_structural import ( AbstractSynapseDynamicsStructural) from .synapse_dynamics_structural_common import ( @@ -247,6 +248,12 @@ def get_weight_maximum(self, connector, synapse_info): w_m = super().get_weight_maximum(connector, synapse_info) return max(w_m, self.__initial_weight) + @overrides(SynapseDynamicsStatic.get_weight_minimum) + def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): + w_min = super().get_weight_minimum( + connector, weight_random_sigma, synapse_info) + return min(w_min, self.__initial_weight) + @overrides(SynapseDynamicsStatic.get_delay_maximum) def get_delay_maximum(self, connector, synapse_info): d_m = super().get_delay_maximum(connector, synapse_info) @@ -261,6 +268,18 @@ def get_delay_minimum(self, connector, synapse_info): def get_delay_variance(self, connector, delays, synapse_info): return 0.0 + @overrides(SynapseDynamicsStatic.calculate_min_weight) + def calculate_min_weight(self, min_weights, max_stdp_spike_delta, + weight_scale, conn_weight_min, synapse_type): + weight_min = self.__initial_weight + weight_min *= weight_scale + if weight_min != 0: + weight_min = float_gcd(min_weights[synapse_type], + weight_min) + min_weights[synapse_type] = min( + min_weights[synapse_type], weight_min) + return min_weights + @overrides(_Common.get_seeds) def get_seeds(self, app_vertex=None): if app_vertex: diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py index 783e841f48..5f3b9b97a0 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py @@ -255,6 +255,12 @@ def get_weight_maximum(self, connector, synapse_info): w_max = super().get_weight_maximum(connector, synapse_info) return max(w_max, self.__initial_weight) + @overrides(SynapseDynamicsSTDP.get_weight_minimum) + def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): + w_min = super().get_weight_minimum( + connector, weight_random_sigma, synapse_info) + return min(w_min, self.__initial_weight) + @overrides(SynapseDynamicsSTDP.get_delay_maximum) def get_delay_maximum(self, connector, synapse_info): d_m = super().get_delay_maximum(connector, synapse_info) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 2229c26342..72dcd3b532 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -27,6 +27,7 @@ from spinn_utilities.safe_eval import SafeEval from spinn_utilities.config_holder import get_config_bool from spinn_utilities.logger_utils import warn_once +from spinn_front_end_common.interface.ds import DataType from spinn_front_end_common.utilities.exceptions import ConfigurationException from spynnaker.pyNN.utilities.random_stats import ( RandomStatsExponentialImpl, RandomStatsGammaImpl, RandomStatsLogNormalImpl, @@ -48,6 +49,8 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 +FLOAT_GCD_TOLERANCE = DataType.S1615.decode_from_int(1) + STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), 'gamma': RandomStatsGammaImpl(), @@ -389,6 +392,45 @@ def get_n_bits(n_values): return int(math.ceil(math.log2(n_values))) +def float_gcd(a, b): + """ + Floating point gcd of two values + + :param float a: first input + :param float b: second input + :return: the gcd of the two values (to a specified tolerance) + :rtype: float + """ + if (a < b): + # pylint: disable-next=arguments-out-of-order + return float_gcd(b, a) + + # base case + if (abs(b) < FLOAT_GCD_TOLERANCE): + return a + else: + return (float_gcd(b, a - math.floor(a / b) * b)) + + +def float_gcd_of_array(input_array): + """ + Work out the floating point gcd of an array of numbers + + :param numpy.float(array) input: the input array + :return: the floating point gcd of the array + :rtype: float + """ + if len(input_array) == 1: + return input_array[0] + + gcd = float_gcd(input_array[0], input_array[1]) + + for i in range(2, len(input_array)): + gcd = float_gcd(gcd, input_array[i]) + + return gcd + + def get_time_to_write_us(n_bytes, n_cores): """ Determine how long a write of a given number of bytes will take in us. diff --git a/spynnaker_integration_tests/test_connectors/test_kernel_connector.py b/spynnaker_integration_tests/test_connectors/test_kernel_connector.py index 1659a06d2a..04c24853a9 100644 --- a/spynnaker_integration_tests/test_connectors/test_kernel_connector.py +++ b/spynnaker_integration_tests/test_connectors/test_kernel_connector.py @@ -75,12 +75,10 @@ def test_oddsquarek_run(self): self.assertEqual(25, len(weightsdelays)) list10 = (1, 0, 5.0, 20.0) list11 = (1, 1, 7.0, 10.0) - [self.assertEqual(list10[i], weightsdelays[1][i]) for i in range(4)] - [self.assertEqual(list11[i], weightsdelays[5][i]) for i in range(4)] - # NOTE: you can probably replace the above in later versions of python3 - # with the following, but in 3.5 it generates a FutureWarning -# self.assertSequenceEqual(list10, weightsdelays[1]) -# self.assertSequenceEqual(list11, weightsdelays[5]) + [self.assertAlmostEqual( + list10[i], weightsdelays[1][i], places=3) for i in range(4)] + [self.assertAlmostEqual( + list11[i], weightsdelays[5][i], places=3) for i in range(4)] def test_evensquarek_run(self): (psh, psw, ksh, ksw) = (4, 4, 2, 2) @@ -89,8 +87,10 @@ def test_evensquarek_run(self): self.assertEqual(9, len(weightsdelays)) list01 = (0, 1, 5.0, 20.0) list03 = (0, 3, 7.0, 10.0) - [self.assertEqual(list01[i], weightsdelays[1][i]) for i in range(4)] - [self.assertEqual(list03[i], weightsdelays[5][i]) for i in range(4)] + [self.assertAlmostEqual( + list01[i], weightsdelays[1][i], places=3) for i in range(4)] + [self.assertAlmostEqual( + list03[i], weightsdelays[5][i], places=3) for i in range(4)] def test_nonsquarek_run(self): (psh, psw, ksh, ksw) = (4, 4, 1, 3) @@ -99,8 +99,10 @@ def test_nonsquarek_run(self): self.assertEqual(10, len(weightsdelays)) list10 = (1, 0, 7.0, 10.0) list42 = (4, 2, 5.0, 20.0) - [self.assertEqual(list10[i], weightsdelays[1][i]) for i in range(4)] - [self.assertEqual(list42[i], weightsdelays[5][i]) for i in range(4)] + [self.assertAlmostEqual( + list10[i], weightsdelays[1][i], places=3) for i in range(4)] + [self.assertAlmostEqual( + list42[i], weightsdelays[5][i], places=3) for i in range(4)] def test_bigger_nonsquarep_run(self): (psh, psw, ksh, ksw) = (32, 16, 3, 3) @@ -109,5 +111,7 @@ def test_bigger_nonsquarep_run(self): self.assertEqual(1081, len(weightsdelays)) list10 = (1, 0, 5.0, 20.0) list11 = (1, 1, 7.0, 10.0) - [self.assertEqual(list10[i], weightsdelays[1][i]) for i in range(4)] - [self.assertEqual(list11[i], weightsdelays[5][i]) for i in range(4)] + [self.assertAlmostEqual( + list10[i], weightsdelays[1][i], places=3) for i in range(4)] + [self.assertAlmostEqual( + list11[i], weightsdelays[5][i], places=3) for i in range(4)] diff --git a/spynnaker_integration_tests/test_connectors/test_one_to_one_connector.py b/spynnaker_integration_tests/test_connectors/test_one_to_one_connector.py index c412ddaff3..5f54128dc6 100644 --- a/spynnaker_integration_tests/test_connectors/test_one_to_one_connector.py +++ b/spynnaker_integration_tests/test_connectors/test_one_to_one_connector.py @@ -35,7 +35,7 @@ def do_one_to_one_test( sim.end() for pre, post, w, d in conns: assert pre == post - assert w == weight + assert numpy.allclose(w, weight, rtol=0.0001) assert d == delay def do_one_to_one_conductance_test( diff --git a/spynnaker_integration_tests/test_grid_based_connectors/test_distance_dependent_weights_and_delays.py b/spynnaker_integration_tests/test_grid_based_connectors/test_distance_dependent_weights_and_delays.py index 96f22277d2..3e36ac0012 100644 --- a/spynnaker_integration_tests/test_grid_based_connectors/test_distance_dependent_weights_and_delays.py +++ b/spynnaker_integration_tests/test_grid_based_connectors/test_distance_dependent_weights_and_delays.py @@ -119,7 +119,7 @@ def check_exc_weights(self, exc_weights_delays): def check_inh_weights(self, inh_weights_delays): for conn in inh_weights_delays: # weights are constant - self.assertEqual(1.5, conn[2]) + self.assertAlmostEqual(1.5, conn[2], places=3) source_pos = self.POSITIONS[conn[0]] target_pos = self.POSITIONS[conn[1]] dist = math.sqrt((source_pos[0]-target_pos[0])**2 + diff --git a/spynnaker_integration_tests/test_stdp/test_STDP_nearest_pair_multiplicative.py b/spynnaker_integration_tests/test_stdp/test_STDP_nearest_pair_multiplicative.py index aa6af556da..39fe7cf5e2 100644 --- a/spynnaker_integration_tests/test_stdp/test_STDP_nearest_pair_multiplicative.py +++ b/spynnaker_integration_tests/test_stdp/test_STDP_nearest_pair_multiplicative.py @@ -18,7 +18,7 @@ import unittest -class TestSTDPNearestPairAdditive(BaseTestCase): +class TestSTDPNearestPairMultiplicative(BaseTestCase): def potentiation_and_depression(self): p.setup(1) @@ -126,8 +126,6 @@ def potentiation_and_depression(self): # print("Pre neuron spikes at: {}".format(pre_spikes)) # print("Post-neuron spikes at: {}".format(post_spikes)) - target_spikes = [1014, 1032, 1053] - self.assertListEqual(list(post_spikes), target_spikes) # print("Potentiation time differences: {}".format(potentiation_times)) # print("Depression time differences: {}".format(depression_times)) # print("Potentiation: {}".format(potentiations)) @@ -135,6 +133,11 @@ def potentiation_and_depression(self): # print("New weight exact: {}".format(new_weight_exact)) # print("New weight SpiNNaker: {}".format(weights)) + target_spikes = [1014, 1032, 1053] + self.assertListEqual(list(post_spikes), target_spikes) + + print("weights, new_weight_exact: ", weights[0], new_weight_exact) + self.assertTrue(numpy.allclose( weights[0], new_weight_exact, atol=0.001)) diff --git a/spynnaker_integration_tests/test_stdp/test_STDP_pair_multiplicative.py b/spynnaker_integration_tests/test_stdp/test_STDP_pair_multiplicative.py index eeae8ae77b..69d8904700 100644 --- a/spynnaker_integration_tests/test_stdp/test_STDP_pair_multiplicative.py +++ b/spynnaker_integration_tests/test_stdp/test_STDP_pair_multiplicative.py @@ -69,9 +69,9 @@ def post_spike_same_time(): pre_spikes, post_spikes, initial_weight, plastic_delay, min_weight, max_weight, a_plus, a_minus, tau_plus, tau_minus) - print(weights_1) - print(weights_2) - print(new_weight_exact) +# print(weights_1) +# print(weights_2) +# print(new_weight_exact) assert len(weights_1) == 1 assert len(weights_2) == 1 @@ -170,7 +170,7 @@ def potentiation_and_depression(): assert numpy.allclose(weights, new_weight_exact, rtol=0.001) -class TestSTDPPairAdditive(BaseTestCase): +class TestSTDPPairMultiplicative(BaseTestCase): def test_potentiation_and_depression(self): self.runsafe(potentiation_and_depression) diff --git a/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py b/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py index c9ecde57a9..95eb0a4f60 100644 --- a/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py +++ b/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from spinnaker_testbase import BaseTestCase import pyNN.spiNNaker as p import numpy +from spynnaker.pyNN.models.neuron.synapse_dynamics import ( + calculate_spike_pair_additive_stdp_weight) def structural_shared(): @@ -26,37 +29,56 @@ def structural_shared(): w_min = 0.0 w_max = 5.0 w_init = 5.0 + w_init_stdp = 2.0 delay_init = 2.0 + delay_init_stdp = 5.0 stim = p.Population(1, p.SpikeSourceArray(pre_spikes), label="stim") pop = p.Population(1, p.IF_curr_exp(), label="pop") pop_2 = p.Population(1, p.IF_curr_exp(), label="pop_2") pop_3 = p.Population(1, p.IF_curr_exp(), label="pop_3") pop_4 = p.Population(1, p.IF_curr_exp(), label="pop_4") pop.record("spikes") - pop_2.record("spikes") - struct_pl_static = p.StructuralMechanismStatic( + pop_3.record("spikes") + struct_pl_static_form = p.StructuralMechanismStatic( partner_selection=p.LastNeuronSelection(), formation=p.DistanceDependentFormation([1, 1], 1.0), elimination=p.RandomByWeightElimination(2.0, 0, 0), f_rew=1000, initial_weight=w_init, initial_delay=delay_init, s_max=1, seed=0, weight=0.0, delay=1.0) - struct_pl_stdp = p.StructuralMechanismSTDP( + struct_pl_static_elim = p.StructuralMechanismStatic( + partner_selection=p.LastNeuronSelection(), + formation=p.DistanceDependentFormation([1, 1], 0.0), + elimination=p.RandomByWeightElimination(4.0, 1.0, 1.0), + f_rew=1000, initial_weight=w_init, initial_delay=delay_init, + s_max=1, seed=0, weight=0.0, delay=1.0) + struct_pl_stdp_form = p.StructuralMechanismSTDP( + partner_selection=p.LastNeuronSelection(), + formation=p.DistanceDependentFormation([1, 1], 1.0), + elimination=p.RandomByWeightElimination(2.0, 0, 0), + timing_dependence=p.SpikePairRule( + tau_plus, tau_minus, A_plus, A_minus), + weight_dependence=p.AdditiveWeightDependence(w_min, w_max), + f_rew=1000, initial_weight=w_init_stdp, + initial_delay=delay_init_stdp, + s_max=1, seed=0, weight=0.0, delay=1.0) + struct_pl_stdp_elim = p.StructuralMechanismSTDP( partner_selection=p.LastNeuronSelection(), formation=p.DistanceDependentFormation([1, 1], 0.0), elimination=p.RandomByWeightElimination(4.0, 1.0, 1.0), timing_dependence=p.SpikePairRule( tau_plus, tau_minus, A_plus, A_minus), weight_dependence=p.AdditiveWeightDependence(w_min, w_max), - f_rew=1000, initial_weight=2.0, initial_delay=5.0, + f_rew=1000, initial_weight=w_init_stdp, + initial_delay=delay_init_stdp, s_max=1, seed=0, weight=0.0, delay=1.0) proj = p.Projection( - stim, pop, p.FromListConnector([]), struct_pl_static) + stim, pop, p.FromListConnector([]), struct_pl_static_form) proj_2 = p.Projection( - stim, pop_2, p.FromListConnector([]), struct_pl_static) + stim, pop_2, p.FromListConnector([(0, 0)]), struct_pl_static_elim) proj_3 = p.Projection( - stim, pop_3, p.FromListConnector([(0, 0)]), struct_pl_stdp) + stim, pop_3, p.FromListConnector([]), struct_pl_stdp_form) proj_4 = p.Projection( - stim, pop_4, p.FromListConnector([(0, 0)]), struct_pl_stdp) + stim, pop_4, p.FromListConnector([(0, 0)]), struct_pl_stdp_elim) p.Projection(pop_3, pop_4, p.AllToAllConnector(), p.StaticSynapse(weight=1, delay=3)) p.run(10) @@ -66,6 +88,8 @@ def structural_shared(): conns_3 = list(proj_3.get(["weight", "delay"], "list")) conns_4 = list(proj_4.get(["weight", "delay"], "list")) + spikes_3 = [s.magnitude + for s in pop_3.get_data("spikes").segments[0].spiketrains] p.end() print(conns) @@ -73,11 +97,17 @@ def structural_shared(): print(conns_3) print(conns_4) + w_final_1 = calculate_spike_pair_additive_stdp_weight( + pre_spikes, spikes_3[0], w_init_stdp, delay_init_stdp, + A_plus, A_minus, tau_plus, tau_minus) + assert len(conns) == 1 - assert tuple(conns[0]) == (0, 0, w_init, delay_init) - assert len(conns_2) == 1 - assert tuple(conns_2[0]) == (0, 0, w_init, delay_init) - assert len(conns_3) == 0 + assert (conns[0][2] >= w_init - 0.01 and + conns[0][2] <= w_init + 0.01) + assert len(conns_2) == 0 + assert len(conns_3) == 1 + assert (conns_3[0][2] >= w_final_1 - 0.01 and + conns_3[0][2] <= w_final_1 + 0.01) assert len(conns_4) == 0 diff --git a/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py b/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py index 867e346af0..88dc9cd710 100644 --- a/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py +++ b/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py @@ -39,7 +39,9 @@ def do_run(self): sim.end() weight_sum = sum(weight[2] for weight in weight_list) - self.assertEqual(weight_sum, sources * weights) + # 50.0 is not exactly representable so specify a relevant tolerance + self.assertAlmostEqual(weight_sum, sources * weights, + delta=sources*0.05) def test_run(self): self.runsafe(self.do_run) diff --git a/spynnaker_integration_tests/test_various/test_synaptic_expander.py b/spynnaker_integration_tests/test_various/test_synaptic_expander.py index 1e2ac2f976..331ed51bcc 100644 --- a/spynnaker_integration_tests/test_various/test_synaptic_expander.py +++ b/spynnaker_integration_tests/test_various/test_synaptic_expander.py @@ -99,7 +99,7 @@ def run_script(): def check_params(param, result): if not isinstance(param, p.RandomDistribution): - assert all(param == value for value in result) + assert numpy.allclose(result, param, atol=0.01) else: # Check the values are "random" (yes I know they might be the same, # but the chances are quite small!) diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index 441abb62b0..1ad5743be0 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -182,7 +182,8 @@ def test_write_data_spec(): # Check that all the connections have the right weight and delay assert len(connections_1) == post_vertex_slice.n_atoms - assert all([conn["weight"] == 1.5 for conn in connections_1]) + assert all([numpy.isclose(conn["weight"], 1.5, atol=0.001) + for conn in connections_1]) assert all([conn["delay"] == 1.0 for conn in connections_1]) connections_2 = numpy.concatenate( @@ -193,7 +194,8 @@ def test_write_data_spec(): # Check that all the connections have the right weight and delay assert len(connections_2) == post_vertex_slice.n_atoms - assert all([conn["weight"] == 2.5 for conn in connections_2]) + assert all([numpy.isclose(conn["weight"], 2.5, atol=0.001) + for conn in connections_2]) assert all([conn["delay"] == 2.0 for conn in connections_2]) connections_3 = numpy.concatenate( @@ -204,7 +206,8 @@ def test_write_data_spec(): # Check that all the connections have the right weight and delay assert len(connections_3) == 90 - assert all([conn["weight"] == 4.5 for conn in connections_3]) + assert all([numpy.isclose(conn["weight"], 4.5, atol=0.001) + for conn in connections_3]) assert all([conn["delay"] == 4.0 for conn in connections_3]) connections_4 = numpy.concatenate( @@ -217,7 +220,8 @@ def test_write_data_spec(): assert len(connections_4) == len(from_list_list) list_weights = [values[2] for values in from_list_list] list_delays = [values[3] for values in from_list_list] - assert all(list_weights == connections_4["weight"]) + assert numpy.allclose( + list_weights, connections_4["weight"], atol=0.001) assert all(list_delays == connections_4["delay"]) finally: shutil.rmtree(report_folder, ignore_errors=True) @@ -229,7 +233,8 @@ def test_set_synapse_dynamics(): post_app_model = IFCurrExpBase() post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, n_steps_per_timestep=1, drop_late_spikes=True, splitter=None, seed=None, n_colour_bits=None) @@ -340,7 +345,8 @@ def test_set_synapse_dynamics(): # Try starting again to get a couple more combinations post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, n_steps_per_timestep=1, drop_late_spikes=True, splitter=None, seed=None, n_colour_bits=None) @@ -363,7 +369,8 @@ def test_set_synapse_dynamics(): # One more time! post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, n_steps_per_timestep=1, drop_late_spikes=True, splitter=None, seed=None, n_colour_bits=None) @@ -401,7 +408,8 @@ def test_set_synapse_dynamics(): # OK, just one more, honest post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, n_steps_per_timestep=1, drop_late_spikes=True, splitter=None, seed=None, n_colour_bits=None) post_app_vertex.synapse_dynamics = static_struct diff --git a/unittests/test_populations/test_vertex.py b/unittests/test_populations/test_vertex.py index fde6e56eec..c22b640b9f 100644 --- a/unittests/test_populations/test_vertex.py +++ b/unittests/test_populations/test_vertex.py @@ -103,7 +103,8 @@ def __init__(self): super().__init__( n_neurons=5, label="Mock", max_atoms_per_core=None, spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, neuron_impl=foo_bar.model, pynn_model=foo_bar, drop_late_spikes=True, splitter=None, seed=None, n_colour_bits=None) diff --git a/unittests/test_using_virtual_board/test_from_file_connector.py b/unittests/test_using_virtual_board/test_from_file_connector.py index 6551cd5aa3..186cfea2a5 100644 --- a/unittests/test_using_virtual_board/test_from_file_connector.py +++ b/unittests/test_using_virtual_board/test_from_file_connector.py @@ -42,13 +42,13 @@ def check_weights( self.assertEqual(from_as[0], source) self.assertEqual(from_as[1], dest) if w_index: - self.assertAlmostEqual(from_as[w_index], weight, 4) + self.assertAlmostEqual(from_as[w_index], weight, 3) else: - self.assertEqual(WEIGHT, weight) + self.assertAlmostEqual(WEIGHT, weight, 3) if d_index: - self.assertAlmostEqual(from_as[d_index], delay, 4) + self.assertAlmostEqual(from_as[d_index], delay, 3) else: - self.assertEqual(DELAY, delay) + self.assertAlmostEqual(DELAY, delay, 3) as_index += 1 while as_index < len(aslist): from_as = aslist[as_index] diff --git a/unittests/test_using_virtual_board/test_from_list_connector.py b/unittests/test_using_virtual_board/test_from_list_connector.py index fff6bef3a6..1d8c092cde 100644 --- a/unittests/test_using_virtual_board/test_from_list_connector.py +++ b/unittests/test_using_virtual_board/test_from_list_connector.py @@ -39,13 +39,13 @@ def check_weights( self.assertEqual(from_as[0], source) self.assertEqual(from_as[1], dest) if w_index: - self.assertAlmostEqual(from_as[w_index], weight, 4) + self.assertAlmostEqual(from_as[w_index], weight, 3) else: - self.assertEqual(WEIGHT, weight) + self.assertAlmostEqual(WEIGHT, weight, 3) if d_index: - self.assertAlmostEqual(from_as[d_index], delay, 4) + self.assertAlmostEqual(from_as[d_index], delay, 3) else: - self.assertEqual(DELAY, delay) + self.assertAlmostEqual(DELAY, delay, 3) as_index += 1 while as_index < len(aslist): from_as = aslist[as_index]