Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Excitatory/Inhibitory LIF neuron model #888

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions src/lava/proc/io/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,3 +280,44 @@ def validate_channel_config(channel_config: ChannelConfig) -> None:
"to be of type ReceiveNotEmpty. Got "
"<channel_config>.receive_not_empty = "
f"{channel_config.receive_not_empty}.")


def convert_to_numpy_array(val, shape, name="value", verbose=False):
"""
Converts a given value to a numpy array if it is not already

Parameters
----------
val : scalar | list | np.ndarray
The value to convert. Can be a scalar, list, or numpy array
shape : tuple
The shape of the array to convert to
verbose : bool
Whether to print debug messages

Returns
----------
value: np.ndarray
The value as a numpy array

Raises
----------
ValueError: If the value cannot be converted to a numpy array
"""
if np.isscalar(val):
if verbose:
print(f"{name} is scalar, converting to numpy array")
# If val is a scalar, create an array filled with that value
# with shape (n_neurons)
val = np.full(shape, val)
elif not isinstance(val, np.ndarray):
# If val is not a scalar and not a numpy array, try to convert
# it to a numpy array
try:
val = np.array(val)
except Exception as e:
raise ValueError(
f"""Failed to convert {name} to a numpy array. Please ensure it
is either a scalar, list, or numpy array.""") from e

return val
173 changes: 172 additions & 1 deletion src/lava/proc/lif/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from lava.magma.core.decorator import implements, requires, tag
from lava.magma.core.model.py.model import PyLoihiProcessModel
from lava.proc.lif.process import (LIF, LIFReset, TernaryLIF, LearningLIF,
LIFRefractory)
LIFRefractory, EILIFRefractory, EILIF)


class AbstractPyLifModelFloat(PyLoihiProcessModel):
Expand Down Expand Up @@ -559,3 +559,174 @@ def run_spk(self) -> None:
Dense process for learning.
"""
super().run_spk()


class AbstractPyEILifModelFloat(PyLoihiProcessModel):
"""Abstract implementation of floating point precision
excitatory/inhibitory leaky-integrate-and-fire neuron model.

Specific implementations inherit from here.
"""

# a_in is the input port that receives the synaptic input.
# The positive values of a_in will increase the u_exc and negative values
# will increase the u_inh.
a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, float)
s_out = None # This will be an OutPort of different LavaPyTypes
u_exc: np.ndarray = LavaPyType(np.ndarray, float)
u_inh: np.ndarray = LavaPyType(np.ndarray, float)
# Net current (u_exc + u_inh)
u: np.ndarray = LavaPyType(np.ndarray, float)
v: np.ndarray = LavaPyType(np.ndarray, float)
bias_mant: np.ndarray = LavaPyType(np.ndarray, float)
bias_exp: np.ndarray = LavaPyType(np.ndarray, float)
du_exc: np.ndarray = LavaPyType(np.ndarray, float)
du_inh: np.ndarray = LavaPyType(np.ndarray, float)
dv: np.ndarray = LavaPyType(np.ndarray, float)

def spiking_activation(self):
"""Abstract method to define the activation function that determines
how spikes are generated.
"""
raise NotImplementedError(
"spiking activation() cannot be called from "
"an abstract ProcessModel"
)

def subthr_dynamics(self, activation_in: np.ndarray):
"""Common sub-threshold dynamics of current and voltage variables for
all Configurable Time Constants LIF models.
This is where the 'leaky integration' happens.
"""
# Get the excitatory input from a_in -- Positive values increase u_exc
exc_a_in = np.clip(activation_in, a_min=0, a_max=None)
# Get the inhibitory input from a_in -- Negative values increase u_inh
inh_a_in = np.clip(activation_in, a_min=None, a_max=0)

# Update the excitatory and inhibitory currents
self.u_exc[:] = self.u_exc * (1 - self.du_exc)
self.u_exc[:] += exc_a_in

self.u_inh[:] = self.u_inh * (1 - self.du_inh)
self.u_inh[:] += inh_a_in

# Update the voltage
# Calculate the net current by adding the
# excitatory and inhibitory currents
self.u = self.u_exc + self.u_inh # u_inh is negative
self.v[:] = self.v * (1 - self.dv) + self.u + self.bias_mant

def reset_voltage(self, spike_vector: np.ndarray):
"""Voltage reset behaviour. This can differ for different neuron
models."""
self.v[spike_vector] = 0

def run_spk(self):
"""The run function that performs the actual computation during
execution orchestrated by a PyLoihiProcessModel using the
LoihiProtocol.
"""
super().run_spk()
a_in_data = self.a_in.recv()

self.subthr_dynamics(activation_in=a_in_data)
self.s_out_buff = self.spiking_activation()
self.reset_voltage(spike_vector=self.s_out_buff)
self.s_out.send(self.s_out_buff)


@implements(proc=EILIF, protocol=LoihiProtocol)
@requires(CPU)
@tag("floating_pt")
class PyEILifFloat(AbstractPyEILifModelFloat):
"""Implementation of Excitatory/Inhibitory Leaky-Integrate-and-Fire
neural process in floating point precision. This short and simple
ProcessModel can be used for quick algorithmic prototyping, without
engaging with the nuances of a fixed point implementation.
"""
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float)
vth: float = LavaPyType(float, float)

def spiking_activation(self):
"""Spiking activation function for LIF."""
return self.v > self.vth


@implements(proc=EILIFRefractory, protocol=LoihiProtocol)
@requires(CPU)
@tag("floating_pt")
class PyEILifRefractoryFloat(AbstractPyEILifModelFloat):
"""Implementation of Excitatory/Inhibitory Refractory
Leaky-Integrate-and-Fire neural process in floating point precision.
This short and simple ProcessModel can be used for quick algorithmic
prototyping, without engaging with the nuances of a fixed
point implementation.
"""
s_out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, float)
vth: float = LavaPyType(float, float)
refractory_period_end: np.ndarray = LavaPyType(np.ndarray, int)

def __init__(self, proc_params):
super(PyEILifRefractoryFloat, self).__init__(proc_params)
self.refractory_period = proc_params["refractory_period"]

def spiking_activation(self):
"""Spiking activation function for LIF."""
return self.v > self.vth

def subthr_dynamics(self, activation_in: np.ndarray):
"""Sub-threshold dynamics of current and voltage variables for
ConfigTimeConstantsLIF
This is where the 'leaky integration' happens.
"""
# if np.max(activation_in) > 0:
# print(f"Time step: {self.time_step} has activation.")

# Get the excitatory input from a_in -- Positive values increase u_exc
exc_a_in = np.clip(activation_in, a_min=0, a_max=None)
# Get the inhibitory input from a_in -- Negative values increase u_inh
inh_a_in = np.clip(activation_in, a_min=None, a_max=0)

# Update the excitatory and inhibitory currents
self.u_exc[:] = self.u_exc * (1 - self.du_exc)
self.u_exc[:] += exc_a_in

self.u_inh[:] = self.u_inh * (1 - self.du_inh)
self.u_inh[:] += inh_a_in

# Check which neurons are not in refractory period
non_refractory = self.refractory_period_end < self.time_step

# Update the voltage of the non-refractory neurons
# Calculate the net current by adding the excitatory
# and inhibitory currents
self.u = self.u_exc + self.u_inh # u_inh is negative

self.v[non_refractory] = (
self.v[non_refractory] * (1 - self.dv[non_refractory]) + (
self.u[non_refractory] + self.bias_mant[non_refractory]
)
)

def process_spikes(self, spike_vector: np.ndarray):
"""
Set the refractory_period_end for the neurons that spiked and
Reset the voltage of the neurons that spiked to 0
"""
self.refractory_period_end[spike_vector] = (self.time_step
+ self.refractory_period)
super().reset_voltage(spike_vector)

def run_spk(self):
"""The run function that performs the actual computation during
execution orchestrated by a PyLoihiProcessModel using the
LoihiProtocol.
"""
a_in_data = self.a_in.recv()

self.subthr_dynamics(activation_in=a_in_data)
spike_vector = self.spiking_activation()

# Reset voltage of spiked neurons to 0 and update refractory period
self.process_spikes(spike_vector=spike_vector)
self.s_out.send(spike_vector)
Loading