diff --git a/HISTORY.rst b/HISTORY.rst index aa3f261..f2098cb 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -2,13 +2,22 @@ History ======= +0.1.4 (2024-03-16) +------------------ + +* Bug fixes. +* Src and dst can be `None` for syanpses. +* Hierarchical structure. + 0.1.3 (2023-08-16) +------------------ * BREAKING CHANGE: `Network` no longer accept settings. Individual setting are now argument for Network. -* Bugg fixes. +* Bug fixes. 0.1.2 (2023-06-14) +------------------ * `tensor` method for NetworkObject diff --git a/README.rst b/README.rst index a13ed35..6bacfd8 100644 --- a/README.rst +++ b/README.rst @@ -93,7 +93,7 @@ Similarly, you can write your own ``Behavior`` Modules with the same logic as `` 9: Recorder(['voltage']), 10: EventRecorder(['spike']) }) - SynapseGroup(ng, ng, net, tag='GLUTAMATE') + SynapseGroup(src=ng, dst=ng, net=net, tag='GLUTAMATE') net.initialize() net.simulate_iterations(1000) diff --git a/benchmark/Benchmarks/Izhikevich/brian_izh.py b/benchmark/Benchmarks/Izhikevich/brian_izh.py deleted file mode 100644 index e308f6a..0000000 --- a/benchmark/Benchmarks/Izhikevich/brian_izh.py +++ /dev/null @@ -1,76 +0,0 @@ -import time -import numpy as np -from brian2 import ( - StateMonitor, - SpikeMonitor, - defaultclock, - prefs, - NeuronGroup, - run, - Synapses, - plot, - ms, - mV, - float32, -) -import matplotlib.pyplot as plt -from globparams import * - -defaultclock.dt = 1.0 * ms -prefs.core.default_float_dtype = float32 - -a, b, c, d = A, B, C, D -offset = OFFSET - -ng = NeuronGroup( - SIZE, - """dv/dt = (0.04*v**2.0 + 5.0*v + 140.0 - u + I + randn() * NOISE_STD + NOISE_MEAN) / ms : 1 - du/dt = (a * (b * v - u)) / ms : 1 - dI/dt = (-I + offset) / ms : 1""", - threshold="v>=THRESHOLD", - reset="v=c; u+=d", - method="euler", -) - -sg = Synapses( - ng, - ng, - """w : 1 - dApre/dt = -Apre / TRACE_TAU / ms : 1 (event-driven) - dApost/dt = -Apost / TRACE_TAU / ms : 1 (event-driven)""", - on_pre=""" - Apre += 1 - I_post += w * DIRAC_STRENGTH - w = w - Apost * (w - W_MIN) """, - on_post=""" - Apost += 1 - w = w + Apre * (W_MAX - w) """, -) - -# sg.connect(condition='i!=j') -sg.connect() - -sg.w = "rand() * W_MAX" -ng.v = "V_STD * randn() + V_MEAN" -ng.u = "U_STD * randn() + U_MEAN" - -#print(np.min(ng.u), np.max(ng.u), np.mean(ng.u), np.var(ng.u)) -#print(np.sum(ng.u)) -#import matplotlib.pyplot as plt -#plt.hist(ng.u, bins=100) -#plt.show() - -if PLOT: - spikemon = SpikeMonitor(ng) - -start = time.time() -run(DURATION * ms) -print("simulation time: ", time.time() - start) - -if PLOT: - print(f"Total spikes: {len(spikemon.i)}") - plt.plot(spikemon.t/ms, spikemon.i, '.k') - plt.xlabel('Time (ms)') - plt.ylabel('Neuron index') - plt.show() - diff --git a/benchmark/Benchmarks/Izhikevich/brian_izh_cpp.py b/benchmark/Benchmarks/Izhikevich/brian_izh_cpp.py deleted file mode 100644 index 68447f9..0000000 --- a/benchmark/Benchmarks/Izhikevich/brian_izh_cpp.py +++ /dev/null @@ -1,74 +0,0 @@ -import time -import numpy as np -from brian2 import ( - StateMonitor, - SpikeMonitor, - defaultclock, - prefs, - NeuronGroup, - run, - Synapses, - plot, - ms, - mV, - float32, - set_device, - device, -) -import matplotlib.pyplot as plt -from globparams import * -set_device('cpp_standalone', clean=True) -prefs.codegen.target = 'cython' - -defaultclock.dt = 1 * ms -prefs.core.default_float_dtype = float32 - - -a, b, c, d = A, B, C, D -offset = OFFSET - -ng = NeuronGroup( - SIZE, - """dv/dt = (0.04*v**2 + 5*v + 140 - u + I + randn() * NOISE_STD + NOISE_MEAN) / ms : 1 - du/dt = (a*(b*v - u)) / ms : 1 - dI/dt = (-I + OFFSET) / ms : 1""", - threshold="v>=THRESHOLD", - reset="v=c; u+=d", - method="euler", -) - -sg = Synapses( - ng, - ng, - """w : 1 - dApre/dt = -Apre / TRACE_TAU / ms : 1 (event-driven) - dApost/dt = -Apost / TRACE_TAU / ms : 1 (event-driven)""", - on_pre=""" - I_post += w * DIRAC_STRENGTH - Apre += 1 - w = w - Apost * (w - W_MIN) """, - on_post=""" - Apost += 1 - w = w + Apre * (W_MAX - w) """, -) - -# sg.connect(condition='i!=j') -sg.connect() - -sg.w = "rand() * W_MAX" -ng.v = "V_STD * randn() + V_MEAN" -ng.u = "U_STD * randn() + U_MEAN" - -if PLOT: - spikemon = SpikeMonitor(ng) - - -run(DURATION * ms, report=REPORT_FUNC) - -if PLOT: - print(f"Total spikes: {len(spikemon.i)}") - plt.plot(spikemon.t/ms, spikemon.i, '.k') - plt.xlabel('Time (ms)') - plt.ylabel('Neuron index'); - plt.show() - diff --git a/benchmark/Benchmarks/Izhikevich/brian_izh_cuda.py b/benchmark/Benchmarks/Izhikevich/brian_izh_cuda.py deleted file mode 100644 index 2ff23fc..0000000 --- a/benchmark/Benchmarks/Izhikevich/brian_izh_cuda.py +++ /dev/null @@ -1,65 +0,0 @@ -import platform -import time -import numpy as np -import matplotlib.pyplot as plt -from globparams import * -from brian2 import * -import brian2cuda -set_device("cuda_standalone", clean=True) - -defaultclock.dt = 1 * ms -prefs.core.default_float_dtype = float32 - -if platform.node() == 'saeed-Swift-SF315-51G': - prefs.devices.cuda_standalone.cuda_backend.detect_gpus = False - prefs.devices.cuda_standalone.cuda_backend.gpu_id = 0 - prefs.devices.cuda_standalone.cuda_backend.compute_capability = 6.1 - prefs.devices.cuda_standalone.default_functions_integral_convertion = np.float32 - -a, b, c, d = A, B, C, D -offset = OFFSET - -ng = NeuronGroup( - SIZE, - """dv/dt = (0.04*v**2 + 5*v + 140 - u + I + randn() * NOISE_STD + NOISE_MEAN) / ms : 1 - du/dt = (a*(b*v - u)) / ms : 1 - dI/dt = (-I + OFFSET) / ms : 1""", - threshold="v>=THRESHOLD", - reset="v=c; u+=d", - method="euler", -) - -sg = Synapses( - ng, - ng, - """w : 1 - dApre/dt = -Apre / TRACE_TAU / ms : 1 (event-driven) - dApost/dt = -Apost / TRACE_TAU / ms : 1 (event-driven)""", - on_pre=""" - I_post += w * DIRAC_STRENGTH - Apre += 1 - w = w - Apost * (w - W_MIN) """, - on_post=""" - Apost += 1 - w = w + Apre * (W_MAX - w) """, -) - -# sg.connect(condition='i!=j') -sg.connect() - -sg.w = "rand() * W_MAX" -ng.v = "V_STD * randn() + V_MEAN" -ng.u = "U_STD * randn() + U_MEAN" - -if PLOT: - spikemon = SpikeMonitor(ng) - -run(DURATION * ms, report=REPORT_FUNC) - -if PLOT: - print(f"Total spikes: {len(spikemon.i)}") - plt.plot(spikemon.t/ms, spikemon.i, '.k') - plt.xlabel('Time (ms)') - plt.ylabel('Neuron index') - plt.show() - diff --git a/benchmark/Benchmarks/Izhikevich/globparams.py b/benchmark/Benchmarks/Izhikevich/globparams.py deleted file mode 100644 index 05938eb..0000000 --- a/benchmark/Benchmarks/Izhikevich/globparams.py +++ /dev/null @@ -1,25 +0,0 @@ -import sys -PLOT = not 'no_plot' in sys.argv - -DURATION = 300 -SIZE = 2500 - -A, B, C, D = 0.02, 0.04, -65.0, 2.0 -THRESHOLD = 30.0 -V_MEAN, U_MEAN = -65.0, 12.0 -V_STD, U_STD= 7.0, 7.0 - -TRACE_TAU = 20.0 -TRACE_TAIL = 0.01 - -DIRAC_STRENGTH = 1.0 / SIZE -A_PLUS, A_MINUS = 0.01, 0.012 -W_MIN, W_MAX = 0.0, 1.0 - -OFFSET = 15.0 -NOISE_MEAN = 0.0 -NOISE_STD = 1.0 - -REPORT_FUNC = ''' - if (completed == 1.0) std::cout << "simulation time: " << elapsed << std::endl << std::flush; -''' diff --git a/benchmark/Benchmarks/Izhikevich/pymonnto_izh.py b/benchmark/Benchmarks/Izhikevich/pymonnto_izh.py deleted file mode 100644 index dc0913a..0000000 --- a/benchmark/Benchmarks/Izhikevich/pymonnto_izh.py +++ /dev/null @@ -1,134 +0,0 @@ -from PymoNNto import ( - Network, - SynapseGroup, - NeuronGroup, - Behavior, - EventRecorder, - SxD, - float32, -) -import numpy as np -import time -import matplotlib.pyplot as plt -from globparams import * - -settings = {"dtype": float32, "synapse_mode": SxD} - - -class TimeResolution(Behavior): - def initialize(self, n): - n.dt = self.parameter("dt", 1.0) - - -class Izhikevich(Behavior): - def initialize(self, n): - self.a = self.parameter("a") - self.b = self.parameter("b") - self.c = self.parameter("c") - self.d = self.parameter("d") - self.threshold = self.parameter("threshold") - - n.v = V_STD * n.vector("normal") + V_MEAN - n.u = U_STD * n.vector("normal") + U_MEAN - n.spikes = n.vector("bool") - - def iteration(self, n): - n.spikes = (n.v >= self.threshold) - - n.v[n.spikes] = self.c - n.u[n.spikes] += self.d - - dv = (0.04 * n.v**2.0 + 5.0 * n.v + 140.0 - n.u + n.I) - du = (self.a * (self.b * n.v - n.u)) - - n.v += dv * n.network.dt - n.u += du * n.network.dt - - - - -class Dendrite(Behavior): - def initialize(self, n): - self.offset = self.parameter("offset", None) - n.I = n.vector(self.offset) - - def iteration(self, n): - n.I.fill(self.offset) - n.I += n.vector("normal") * NOISE_STD + NOISE_MEAN - - for s in n.afferent_synapses["GLU"]: - n.I += s.I - - -class STDP(Behavior): - def initialize(self, s): - self.pre_tau = self.parameter("pre_tau") - self.post_tau = self.parameter("post_tau") - self.a_plus = self.parameter("a_plus") - self.a_minus = self.parameter("a_minus") - - s.src_trace = s.src.vector() - s.dst_trace = s.dst.vector() - - def iteration(self, s): - src_spikes = s.src.spikes - dst_spikes = s.dst.spikes - s.src_trace += src_spikes - s.src_trace / self.pre_tau * s.network.dt - s.dst_trace += dst_spikes - s.dst_trace / self.post_tau * s.network.dt - s.W[src_spikes] -= ( - s.dst_trace[None, ...] * self.a_minus * (s.W[src_spikes] - W_MIN) - ) - s.W[:, dst_spikes] += ( - s.src_trace[..., None] * self.a_plus * (W_MAX - s.W[:, dst_spikes]) - ) - # s.W = np.clip(s.W, W_MIN, W_MAX) - - -class DiracInput(Behavior): - def initialize(self, s): - self.strength = self.parameter("strength") - s.I = s.dst.vector() - s.W = s.matrix("random") * W_MAX + W_MIN - # np.fill_diagonal(s.W, 0) - - def iteration(self, s): - s.I = np.sum(s.W[s.src.spikes], axis=0) * self.strength - - -net = Network(behavior={1: TimeResolution()}, settings=settings) - -NeuronGroup( - net, - tag="NG", - size=SIZE, - behavior={ - 1: Dendrite(offset=OFFSET), - 2: Izhikevich(a=A, b=B, c=C, d=D, threshold=THRESHOLD), - }, -) - -if PLOT: - net.NG.add_behavior(9, EventRecorder("spikes"), False) - -SynapseGroup( - net, - src="NG", - dst="NG", - tag="GLU", - behavior={ - 4: DiracInput(strength=DIRAC_STRENGTH), - 5: STDP(a_plus=A_PLUS, a_minus=A_MINUS, pre_tau=TRACE_TAU, post_tau=TRACE_TAU), - }, -) - - -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION, batch_size=DURATION, measure_block_time=True) -print("simulation time: ", time.time() - start) - -if PLOT: - print(f"Total spikes: {len(net['spikes.i', 0])}") - plt.plot(net["spikes.t", 0], net["spikes.i", 0], ".k") - plt.show() diff --git a/benchmark/Benchmarks/Izhikevich/pymonntorch_izh_cpu.py b/benchmark/Benchmarks/Izhikevich/pymonntorch_izh_cpu.py deleted file mode 100644 index 500b178..0000000 --- a/benchmark/Benchmarks/Izhikevich/pymonntorch_izh_cpu.py +++ /dev/null @@ -1,125 +0,0 @@ -from pymonntorch import Network, SynapseGroup, NeuronGroup, Behavior, EventRecorder -import torch -import time -from globparams import * -import matplotlib.pyplot as plt - -settings = {"dtype": torch.float32, "synapse_mode": "SxD", "device": "cpu"} - - -class TimeResolution(Behavior): - def initialize(self, n): - n.dt = self.parameter("dt", 1) - - -class Izhikevich(Behavior): - def initialize(self, n): - self.a = self.parameter("a", None) - self.b = self.parameter("b", None) - self.c = self.parameter("c", None) - self.d = self.parameter("d", None) - self.threshold = self.parameter("threshold", None) - - n.u = n.vector(f"normal({U_MEAN}, {U_STD})") - n.v = n.vector(f"normal({V_MEAN}, {V_STD})") - n.spikes = n.vector(dtype=torch.bool) - - def forward(self, n): - n.spikes = n.v >= self.threshold - - n.v[n.spikes] = self.c - n.u[n.spikes] += self.d - - dv = (0.04 * n.v**2 + 5 * n.v + 140 - n.u + n.I) - du = self.a * (self.b * n.v - n.u) - - n.v += dv * n.network.dt - n.u += du * n.network.dt - - -class Dendrite(Behavior): - def initialize(self, n): - self.offset = self.parameter("offset", None) - n.I = n.vector(self.offset) - - def forward(self, n): - n.I.fill_(self.offset) - for s in n.afferent_synapses["GLU"]: - n.I += s.I - n.I += n.vector(f"normal({NOISE_MEAN}, {NOISE_STD})") - - -class STDP(Behavior): - def initialize(self, s): - self.pre_tau = self.parameter("pre_tau", None) - self.post_tau = self.parameter("post_tau", None) - self.a_plus = self.parameter("a_plus", None) - self.a_minus = self.parameter("a_minus", None) - - s.src_trace = s.src.vector() - s.dst_trace = s.dst.vector() - - def forward(self, s): - src_spikes = s.src.spikes - dst_spikes = s.dst.spikes - s.src_trace += src_spikes * 1.0 - s.src_trace / self.pre_tau * s.network.dt - s.dst_trace += dst_spikes * 1.0 - s.dst_trace / self.post_tau * s.network.dt - s.W[src_spikes] -= ( - s.dst_trace[None, ...] * self.a_minus * (s.W[src_spikes] - W_MIN) - ) - s.W[:, dst_spikes] += ( - s.src_trace[..., None] * self.a_plus * (W_MAX - s.W[:, dst_spikes]) - ) - # s.W = torch.clip(s.W, W_MIN, W_MAX) - - -class DiracInput(Behavior): - def initialize(self, s): - self.strength = self.parameter("strength", None) - s.I = s.dst.vector() - s.W = s.matrix("random") * W_MAX + W_MIN - # s.W.fill_diagonal_(0) - - def forward(self, s): - s.I = torch.sum(s.W[s.src.spikes], axis=0) * self.strength - - -net = Network(behavior={1: TimeResolution()}, - **settings) - -NeuronGroup( - net=net, - tag="NG", - size=SIZE, - behavior={ - 1: Dendrite(offset=OFFSET), - 2: Izhikevich(a=A, b=B, c=C, d=D, threshold=THRESHOLD), - }, -) - -if PLOT: - net.NG.add_behavior(9, EventRecorder("spikes"), False) - -SynapseGroup( - net=net, - src="NG", - dst="NG", - tag="GLU", - behavior={ - 4: DiracInput(strength=DIRAC_STRENGTH), - 5: STDP(a_plus=A_PLUS, a_minus=A_MINUS, pre_tau=TRACE_TAU, post_tau=TRACE_TAU), - }, -) - - -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION) -print("simulation time: ", time.time() - start) - - -if PLOT: - print(f"Total spikes: {len(net['spikes.i', 0])}") - plt.plot(net["spikes.t", 0].to("cpu"), net["spikes.i", 0].to("cpu"), ".k") - plt.show() diff --git a/benchmark/Benchmarks/Izhikevich/pymonntorch_izh_cuda.py b/benchmark/Benchmarks/Izhikevich/pymonntorch_izh_cuda.py deleted file mode 100644 index 9882723..0000000 --- a/benchmark/Benchmarks/Izhikevich/pymonntorch_izh_cuda.py +++ /dev/null @@ -1,124 +0,0 @@ -from pymonntorch import Network, SynapseGroup, NeuronGroup, Behavior, EventRecorder -import torch -import time -from globparams import * -import matplotlib.pyplot as plt - -settings = {"dtype": torch.float32, "synapse_mode": "SxD", "device": "cuda"} - - -class TimeResolution(Behavior): - def initialize(self, n): - n.dt = self.parameter("dt", 1) - - -class Izhikevich(Behavior): - def initialize(self, n): - self.a = self.parameter("a", None) - self.b = self.parameter("b", None) - self.c = self.parameter("c", None) - self.d = self.parameter("d", None) - self.threshold = self.parameter("threshold", None) - - n.u = n.vector(f"normal({U_MEAN}, {U_STD})") - n.v = n.vector(f"normal({V_MEAN}, {V_STD})") - n.spikes = n.vector(dtype=torch.bool) - - def forward(self, n): - n.spikes = n.v >= self.threshold - - n.v[n.spikes] = self.c - n.u[n.spikes] += self.d - - dv = (0.04 * n.v**2 + 5 * n.v + 140 - n.u + n.I) - du = self.a * (self.b * n.v - n.u) - - n.v += dv * n.network.dt - n.u += du * n.network.dt - - -class Dendrite(Behavior): - def initialize(self, n): - self.offset = self.parameter("offset", None) - n.I = n.vector(self.offset) - - def forward(self, n): - n.I.fill_(self.offset) - for s in n.afferent_synapses["GLU"]: - n.I += s.I - n.I += n.vector(f"normal({NOISE_MEAN}, {NOISE_STD})") - - -class STDP(Behavior): - def initialize(self, s): - self.pre_tau = self.parameter("pre_tau", None) - self.post_tau = self.parameter("post_tau", None) - self.a_plus = self.parameter("a_plus", None) - self.a_minus = self.parameter("a_minus", None) - - s.src_trace = s.src.vector() - s.dst_trace = s.dst.vector() - - def forward(self, s): - src_spikes = s.src.spikes - dst_spikes = s.dst.spikes - s.src_trace += src_spikes * 1.0 - s.src_trace / self.pre_tau * s.network.dt - s.dst_trace += dst_spikes * 1.0 - s.dst_trace / self.post_tau * s.network.dt - s.W[src_spikes] -= ( - s.dst_trace[None, ...] * self.a_minus * (s.W[src_spikes] - W_MIN) - ) - s.W[:, dst_spikes] += ( - s.src_trace[..., None] * self.a_plus * (W_MAX - s.W[:, dst_spikes]) - ) - # s.W = torch.clip(s.W, W_MIN, W_MAX) - - -class DiracInput(Behavior): - def initialize(self, s): - self.strength = self.parameter("strength", None) - s.I = s.dst.vector() - s.W = s.matrix("random") * W_MAX + W_MIN - # s.W.fill_diagonal_(0) - - def forward(self, s): - s.I = torch.sum(s.W[s.src.spikes], axis=0) * self.strength - - -net = Network(behavior={1: TimeResolution()}, **settings) - -NeuronGroup( - net=net, - tag="NG", - size=SIZE, - behavior={ - 1: Dendrite(offset=OFFSET), - 2: Izhikevich(a=A, b=B, c=C, d=D, threshold=THRESHOLD), - }, -) - -if PLOT: - net.NG.add_behavior(9, EventRecorder("spikes"), False) - -SynapseGroup( - net=net, - src="NG", - dst="NG", - tag="GLU", - behavior={ - 4: DiracInput(strength=DIRAC_STRENGTH), - 5: STDP(a_plus=A_PLUS, a_minus=A_MINUS, pre_tau=TRACE_TAU, post_tau=TRACE_TAU), - }, -) - - -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION) -print("simulation time: ", time.time() - start) - - -if PLOT: - print(f"Total spikes: {len(net['spikes.i', 0])}") - plt.plot(net["spikes.t", 0].to("cpu"), net["spikes.i", 0].to("cpu"), ".k") - plt.show() diff --git a/benchmark/Benchmarks/Izhikevich/pynn_nest_izh.py b/benchmark/Benchmarks/Izhikevich/pynn_nest_izh.py deleted file mode 100644 index 173f943..0000000 --- a/benchmark/Benchmarks/Izhikevich/pynn_nest_izh.py +++ /dev/null @@ -1,66 +0,0 @@ -import pyNN.nest as sim -from pyNN.utility.plotting import Figure, Panel -from pyNN.random import RandomDistribution, NumpyRNG -import time -from globparams import * - -sim.setup(timestep=1, min_delay=1, max_delay=1) -rng = NumpyRNG() - -cell_type = sim.Izhikevich(a=A, b=B, c=C, d=D, i_offset=OFFSET * 10**(-3)) - -pop1 = sim.Population(size=SIZE, cellclass=cell_type, label="pop1") - -w_min = W_MIN * DIRAC_STRENGTH * 10**(-3) -w_max = W_MAX * DIRAC_STRENGTH * 10**(-3) - -stdp_model = sim.STDPMechanism( - timing_dependence=sim.SpikePairRule( - tau_plus=TRACE_TAU, - tau_minus=TRACE_TAU, - A_plus=A_PLUS, - A_minus=A_MINUS, - ), - weight_dependence=sim.MultiplicativeWeightDependence(w_min=w_min, w_max=w_max), - voltage_dependence=None, - dendritic_delay_fraction=1.0, - weight=RandomDistribution("uniform", (w_min, w_max)), - delay=None, -) - - -syn = sim.Projection( - pop1, pop1, sim.AllToAllConnector(allow_self_connections=True), stdp_model -) - -pop1.initialize( - v=RandomDistribution('normal', mu=V_MEAN, sigma=V_STD), - u=RandomDistribution('normal', mu=U_MEAN, sigma=U_STD) -) - - -noise = sim.standardmodels.electrodes.NoisyCurrentSource( - mean=NOISE_MEAN * 10**(-3), - stdev=NOISE_STD * 10**(-3), - dt=1, -) - -pop1.inject(noise) - -if PLOT: - pop1.record(["spikes"]) - -start = time.time() -sim.run(DURATION) -print("simulation time: ", time.time() - start) - - -if PLOT: - data = pop1.get_data().segments[0] - print(data.spiketrains) - Figure( - Panel(data.spiketrains, xlabel="Time (ms)", xticks=True) - ).show() - - -sim.end() \ No newline at end of file diff --git a/benchmark/Benchmarks/Operations/numpy_operations.py b/benchmark/Benchmarks/Operations/numpy_operations.py deleted file mode 100644 index c6d37a0..0000000 --- a/benchmark/Benchmarks/Operations/numpy_operations.py +++ /dev/null @@ -1,312 +0,0 @@ -import numpy as np -import time - -#heating up the CPU -#for i in range(1000): -# temp = np.random.rand(10000, 5000)*np.random.rand(10000, 5000)+np.random.rand(10000, 5000) - -t = np.float64 - -#for i in range(60): -for i in range(1): - measurements = [] - - steps = 1000 - - ########################################### - print('Initialization...') - ########################################### - - src = np.random.rand(5000) < 0.01 # 1% spikes - dst = np.random.rand(10000) < 0.01 # 1% spikes - W1 = np.random.rand(10000, 5000).astype(t) # dense DxS synapses - W2 = np.random.rand(5000, 10000).astype(t) # dense SxD synapses - - ########################################### - print('\nSynapse Operation...') - ########################################### - - start = time.time() - for i in range(steps): - W1.dot(src) - t1 = (time.time()-start)/steps*1000 - print(' W1.dot(s):', t1, 'ms') - measurements.append(t1) - - - start = time.time() - for i in range(steps): - np.sum(W1[:, src], axis=1) - t2 = (time.time()-start)/steps*1000 - print(' np.sum(W1[:, s], axis=1):', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - # same but with W2 (SxD) instead of W1 (DxS): - - - start = time.time() - for i in range(steps): - W2.T.dot(src) - t3 = (time.time()-start)/steps*1000 - print(' W2.T.dot(s):', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - - start = time.time() - for i in range(steps): - np.sum(W2[src], axis=0) - t4 = (time.time()-start)/steps*1000 - print(' np.sum(W2[s], axis=0):', t4, 'ms', t1/t4, 'x ratio') - measurements.append(t4) - - - ########################################### - print('\nSTDP...') - ########################################### - - start = time.time() - for i in range(steps): - W1 += dst[:, None] * src[None, :] - t1 = (time.time()-start)/steps*1000 - print(' W1 += d[:, None] * s[None, :]:', t1, 'ms') - measurements.append(t1) - - - #W1[d, s] += 1 # ERROR! - - - start = time.time() - for i in range(steps): - W1[dst[:, None] * src[None, :]] += 1 - t2 = (time.time()-start)/steps*1000 - print(' W1[d[:, None] * s[None, :]] += 1:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - start = time.time() - for i in range(steps): - W1[np.ix_(dst, src)] += 1 - t3 = (time.time()-start)/steps*1000 - print(' W1[np.ix_(d, s)] += 1:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - #same but with W2 (SxD) instead of W1 (DxS): - - start = time.time() - for i in range(steps): - W2 += src[:, None] * dst[None, :] - t1 = (time.time()-start)/steps*1000 - print(' W2 += s[:, None] * d[None, :]:', t1, 'ms') - measurements.append(t1) - - - #W1[d, s] += 1 # ERROR! - - - start = time.time() - for i in range(steps): - W2[src[:, None] * dst[None, :]] += 1 - t2 = (time.time()-start)/steps*1000 - print(' W2[s[:, None] * d[None, :]] += 1:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - start = time.time() - for i in range(steps): - W2[np.ix_(src, dst)] += 1 - t3 = (time.time()-start)/steps*1000 - print(' W2[np.ix_(s, d)] += 1:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - - - - - - - - - - - - - -############################################################################################################################ Old - - - - """ - ########################################### - # print('\nAdvanced STDP...') - ########################################### - - ''' - To create more complex STDP functions (Figure 2b), we can follow the same approach and include two buffers (lists of vectors) - that hold the spike history of the source and destination groups (Bs and Bd). - Here, the 0 index denotes the most recent spikes: - ''' - - #Version B (multiple blocks) - Bd = [np.random.rand(10000) < 0.01 for _ in range(2)] - Bs = [np.random.rand(5000) < 0.01 for _ in range(3)] - - W1[np.ix_(Bd[1], Bs[0])] -= 0.4 - W1[np.ix_(Bd[0], Bs[0])] += 0.6 - W1[np.ix_(Bd[0], Bs[1])] += 1.0 - W1[np.ix_(Bd[0], Bs[2])] += 0.2 - - ''' - The method depicted in Figure 2c is also possible, but it requires additional decaying trace variables - for both the source and destination groups. - These trace variables indirectly store the history of the most recent spiking activity of these groups. - ''' - - #Version C (traces) - sTrace = np.zeros(5000) - dTrace = np.zeros(10000) - - sTrace = (sTrace + src) * 0.9 - dTrace = (dTrace + dst) * 0.9 - stMask = sTrace>0.01 - dtMask = dTrace>0.01 - W1[np.ix_(dst, stMask)] += sTrace[None, stMask] - W1[np.ix_(dtMask, src)] -= dTrace[dtMask, None] - - ''' - Note that the trace variables have to be converted to binary masks for indexing. - To improve the performance the masks have to be as sparse as possible, - hence it is necessary to cut of the trace at some point if it gets too small (here 0.01). - ''' - - - ########################################### - #print('\nClipping...') - ########################################### - - mask = np.ix_(dst, src) - W1[mask] += 1 - W1[mask] = np.clip(W1[mask], 0.1, 10.0) - - - ########################################### - #print('\nNormalization...') - ########################################### - - iteration = 100 - - if iteration % 100 == 0: - W1 /= np.sum(W1, axis=1)[:, None] # afferent - W1 /= np.sum(W1, axis=0) # efferent - - - ''' - One way to accelerate the nomrmalization is to create a variable that tracks the sum of the rows or columns - so that the summation operation need not be computed every time. - Another approach is to apply indexing to normalize only the required rows and columns: - ''' - - # initialization - eff_sum = np.sum(W1, axis=0) - - # STDP sparse update sum - eff_sum[src] += np.sum(W1[:, src], axis=0) - - # Norm - mask = eff_sum > 1 - W1[:, mask] /= eff_sum[mask] # efferent norm - eff_sum[mask].fill(1) - - ''' - We can apply the same technique to afferent synapses. The major limitation here is that this method only works - for either afferent or efferent normalization but not both simultaneously. - A minor issue is that the variable could potentially drift over time depending on its accuracy. However, - this can be mitigated by calling the ``initialization'' function periodically. - We are using the DxS synapse matrix here. For the SxD version, we need to swap the afferent and efferent operations. - ''' - - """ - ########################################### - print('\nReset operation...') - ########################################### - - ''' - When optimizing a network, this synaptic mechanisms are obviously the most promising target. - However, there are additional steps to optimize a network simulation. - If we want to zero some neuron or synapse properties repeatedly, one common way is something like this: - ''' - - steps = 100000 - voltage = np.random.rand(5000) - - start = time.time() - for i in range(steps): - voltage = voltage * 0.0 - t1 = (time.time()-start)/steps*1000 - print(' voltage = voltage * 0.0:', t1, 'ms') - measurements.append(t1) - - ''' - However, this approach involves multiplication, which can be expensive and unnecessary for the given task. - A better alternative is to create a new variable: - ''' - - start = time.time() - for i in range(steps): - voltage = np.zeros(5000) - t2 = (time.time()-start)/steps*1000 - print(' voltage = np.zeros(5000, dtype=dtype):', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - ''' - This approach is still sub optimal because we have to allocate new memory during each iteration. - The best method here is to use the existing memory section and overwrite it with a new value: - ''' - - start = time.time() - for i in range(steps): - voltage.fill(0) - t3 = (time.time()-start)/steps*1000 - print(' voltage.fill(0):', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - ''' - In the previous normalization code example, we also observed how the .fill() - function can be combined with a masked operation, - which can further accelerate the process if the filling is sparse enough or if we already have a pre-computed mask. - ''' - - ########################################### - print('\nDatatypes...') - ########################################### - steps = 1000 - - W2 = W2.astype(np.float64) - start = time.time() - for i in range(steps): - #W1[np.ix_(d, s)] += 1 - np.sum(W2[src], axis=0) - t1 = (time.time()-start)/steps*1000 - print(' float64:', t1, 'ms') - measurements.append(t1) - - - W2 = W2.astype(np.float32) - start = time.time() - for i in range(steps): - #W1[np.ix_(d, s)] += 1 - np.sum(W2[src], axis=0) - t2 = (time.time()-start)/steps*1000 - print(' float32:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - #can create a overflow sometimes because the synapses are relatively big. - W2 = W2.astype(np.float16) - start = time.time() - for i in range(steps): - #W1[np.ix_(d, s)] += 1 - np.sum(W2[src], axis=0) - t3 = (time.time()-start)/steps*1000 - print(' float16:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) \ No newline at end of file diff --git a/benchmark/Benchmarks/Operations/torch_operations_cpu.py b/benchmark/Benchmarks/Operations/torch_operations_cpu.py deleted file mode 100644 index 680d4d5..0000000 --- a/benchmark/Benchmarks/Operations/torch_operations_cpu.py +++ /dev/null @@ -1,188 +0,0 @@ -import torch -import time - -#heating up the CPU -#for i in range(1000): -# temp = np.random.rand(10000, 5000)*np.random.rand(10000, 5000)+np.random.rand(10000, 5000) - -d = 'cpu' -t = torch.float64 - -#for i in range(60): -for i in range(1): - measurements = [] - - steps = 1000 - - ########################################### - print('Initialization...') - ########################################### - - src = torch.rand(5000, device=d) < 0.01 # d = 'cpu' or 'gpu' - dst = torch.rand(10000, device=d) < 0.01 - W1 = torch.rand(10000, 5000, device=d, dtype=t) - W2 = torch.rand(5000, 10000, device=d, dtype=t) - - ########################################### - print('\nSynapse Operation...') - ########################################### - - start = time.time() - for i in range(steps): - torch.tensordot(W1, src.to(t), dims=([1],[0])) - t1 = (time.time()-start)/steps*1000 - print(' W1.dot(s):', t1, 'ms') - measurements.append(t1) - - - start = time.time() - for i in range(steps): - torch.sum(W1[:, src], dim=1) - t2 = (time.time()-start)/steps*1000 - print(' np.sum(W1[:, s], axis=1):', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - # same but with W2 (SxD) instead of W1 (DxS): - - - start = time.time() - for i in range(steps): - torch.tensordot(W2.T, src.to(t), dims=([1], [0])) - #W2.T.dot(src) - t3 = (time.time()-start)/steps*1000 - print(' W2.T.dot(s):', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - - start = time.time() - for i in range(steps): - torch.sum(W2[src], axis=0) - t4 = (time.time()-start)/steps*1000 - print(' np.sum(W2[s], axis=0):', t4, 'ms', t1/t4, 'x ratio') - measurements.append(t4) - - - ########################################### - print('\nSTDP...') - ########################################### - - start = time.time() - for i in range(steps): - W1 += dst[:, None] * src[None, :] - t1 = (time.time()-start)/steps*1000 - print(' W1 += d[:, None] * s[None, :]:', t1, 'ms') - measurements.append(t1) - - - #W1[d, s] += 1 # ERROR! - - - start = time.time() - for i in range(steps): - W1[dst[:, None] * src[None, :]] += 1 - t2 = (time.time()-start)/steps*1000 - print(' W1[d[:, None] * s[None, :]] += 1:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - start = time.time() - for i in range(steps): - W1[(torch.where(dst)[0].view(-1, 1), torch.where(src)[0].view(1, -1))] += 1 - #W1[np.ix_(dst, src)] += 1 - t3 = (time.time()-start)/steps*1000 - print(' W1[np.ix_(d, s)] += 1:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - #same but with W2 (SxD) instead of W1 (DxS): - - start = time.time() - for i in range(steps): - W2 += src[:, None] * dst[None, :] - t1 = (time.time()-start)/steps*1000 - print(' W2 += s[:, None] * d[None, :]:', t1, 'ms') - measurements.append(t1) - - - #W1[d, s] += 1 # ERROR! - - - start = time.time() - for i in range(steps): - W2[src[:, None] * dst[None, :]] += 1 - t2 = (time.time()-start)/steps*1000 - print(' W2[s[:, None] * d[None, :]] += 1:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - start = time.time() - for i in range(steps): - W2[(torch.where(src)[0].view(-1, 1), torch.where(dst)[0].view(1, -1))] += 1 - #W2[np.ix_(src, dst)] += 1 - t3 = (time.time()-start)/steps*1000 - print(' W2[np.ix_(s, d)] += 1:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - - - ########################################### - print('\nReset operation...') - ########################################### - - steps = 100000 - voltage = torch.rand(5000, device=d, dtype=t) - - start = time.time() - for i in range(steps): - voltage = voltage * 0.0 - t1 = (time.time()-start)/steps*1000 - print(' voltage = voltage * 0.0:', t1, 'ms') - measurements.append(t1) - - - start = time.time() - for i in range(steps): - voltage = torch.zeros(5000, device=d, dtype=t) - t2 = (time.time()-start)/steps*1000 - print(' voltage = torch.zeros(5000, dtype=dtype):', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - start = time.time() - for i in range(steps): - voltage.fill_(0) - t3 = (time.time()-start)/steps*1000 - print(' voltage.fill(0):', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - - ########################################### - print('\nDatatypes...') - ########################################### - steps = 1000 - - W2 = W2.to(torch.float64) - start = time.time() - for i in range(steps): - torch.sum(W2[src], axis=0) - t1 = (time.time()-start)/steps*1000 - print(' float64:', t1, 'ms') - measurements.append(t1) - - - W2 = W2.to(torch.float32) - start = time.time() - for i in range(steps): - torch.sum(W2[src], axis=0) - t2 = (time.time()-start)/steps*1000 - print(' float32:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - W2 = W2.to(torch.float16) - start = time.time() - for i in range(steps): - torch.sum(W2[src], axis=0) - t3 = (time.time()-start)/steps*1000 - print(' float16:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) \ No newline at end of file diff --git a/benchmark/Benchmarks/Operations/torch_operations_cuda.py b/benchmark/Benchmarks/Operations/torch_operations_cuda.py deleted file mode 100644 index 2b275f3..0000000 --- a/benchmark/Benchmarks/Operations/torch_operations_cuda.py +++ /dev/null @@ -1,204 +0,0 @@ -import torch -import time - -#heating up the CPU -#for i in range(1000): -# temp = np.random.rand(10000, 5000)*np.random.rand(10000, 5000)+np.random.rand(10000, 5000) - -d = 'cuda' -t = torch.float64 - -#for i in range(60): -for i in range(1): - measurements = [] - - steps = 1000 - - ########################################### - print('Initialization...') - ########################################### - - src = torch.rand(5000, device=d) < 0.01 # d = 'cpu' or 'gpu' - dst = torch.rand(10000, device=d) < 0.01 - W1 = torch.rand(10000, 5000, device=d, dtype=t) - W2 = torch.rand(5000, 10000, device=d, dtype=t) - - ########################################### - print('\nSynapse Operation...') - ########################################### - - start = time.time() - for i in range(steps): - torch.tensordot(W1, src.to(t), dims=([1],[0])) - torch.cuda.synchronize() - t1 = (time.time()-start)/steps*1000 - print(' W1.dot(s):', t1, 'ms') - measurements.append(t1) - - - start = time.time() - for i in range(steps): - torch.sum(W1[:, src], dim=1) - torch.cuda.synchronize() - t2 = (time.time()-start)/steps*1000 - print(' np.sum(W1[:, s], axis=1):', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - # same but with W2 (SxD) instead of W1 (DxS): - - - start = time.time() - for i in range(steps): - torch.tensordot(W2.T, src.to(t), dims=([1], [0])) - torch.cuda.synchronize() - #W2.T.dot(src) - t3 = (time.time()-start)/steps*1000 - print(' W2.T.dot(s):', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - - start = time.time() - for i in range(steps): - torch.sum(W2[src], axis=0) - torch.cuda.synchronize() - t4 = (time.time()-start)/steps*1000 - print(' np.sum(W2[s], axis=0):', t4, 'ms', t1/t4, 'x ratio') - measurements.append(t4) - - - ########################################### - print('\nSTDP...') - ########################################### - - start = time.time() - for i in range(steps): - W1 += dst[:, None] * src[None, :] - torch.cuda.synchronize() - t1 = (time.time()-start)/steps*1000 - print(' W1 += d[:, None] * s[None, :]:', t1, 'ms') - measurements.append(t1) - - - #W1[d, s] += 1 # ERROR! - - - start = time.time() - for i in range(steps): - W1[dst[:, None] * src[None, :]] += 1 - torch.cuda.synchronize() - t2 = (time.time()-start)/steps*1000 - print(' W1[d[:, None] * s[None, :]] += 1:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - start = time.time() - for i in range(steps): - W1[(torch.where(dst)[0].view(-1, 1), torch.where(src)[0].view(1, -1))] += 1 - #W1[np.ix_(dst, src)] += 1 - torch.cuda.synchronize() - t3 = (time.time()-start)/steps*1000 - print(' W1[np.ix_(d, s)] += 1:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - #same but with W2 (SxD) instead of W1 (DxS): - - start = time.time() - for i in range(steps): - W2 += src[:, None] * dst[None, :] - torch.cuda.synchronize() - t1 = (time.time()-start)/steps*1000 - print(' W2 += s[:, None] * d[None, :]:', t1, 'ms') - measurements.append(t1) - - - #W1[d, s] += 1 # ERROR! - - - start = time.time() - for i in range(steps): - W2[src[:, None] * dst[None, :]] += 1 - torch.cuda.synchronize() - t2 = (time.time()-start)/steps*1000 - print(' W2[s[:, None] * d[None, :]] += 1:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - start = time.time() - for i in range(steps): - W2[(torch.where(src)[0].view(-1, 1), torch.where(dst)[0].view(1, -1))] += 1 - #W2[np.ix_(src, dst)] += 1 - torch.cuda.synchronize() - t3 = (time.time()-start)/steps*1000 - print(' W2[np.ix_(s, d)] += 1:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - - - ########################################### - print('\nReset operation...') - ########################################### - - steps = 100000 - voltage = torch.rand(5000, device=d, dtype=t) - - start = time.time() - for i in range(steps): - voltage = voltage * 0.0 - torch.cuda.synchronize() - t1 = (time.time()-start)/steps*1000 - print(' voltage = voltage * 0.0:', t1, 'ms') - measurements.append(t1) - - - start = time.time() - for i in range(steps): - voltage = torch.zeros(5000, device=d, dtype=t) - torch.cuda.synchronize() - t2 = (time.time()-start)/steps*1000 - print(' voltage = torch.zeros(5000, dtype=dtype):', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - start = time.time() - for i in range(steps): - voltage.fill_(0) - torch.cuda.synchronize() - t3 = (time.time()-start)/steps*1000 - print(' voltage.fill(0):', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) - - - ########################################### - print('\nDatatypes...') - ########################################### - steps = 1000 - - W2 = W2.to(torch.float64) - start = time.time() - for i in range(steps): - torch.sum(W2[src], axis=0) - torch.cuda.synchronize() - t1 = (time.time()-start)/steps*1000 - print(' float64:', t1, 'ms') - measurements.append(t1) - - - W2 = W2.to(torch.float32) - start = time.time() - for i in range(steps): - torch.sum(W2[src], axis=0) - torch.cuda.synchronize() - t2 = (time.time()-start)/steps*1000 - print(' float32:', t2, 'ms', t1/t2, 'x ratio') - measurements.append(t2) - - - W2 = W2.to(torch.float16) - start = time.time() - for i in range(steps): - torch.sum(W2[src], axis=0) - torch.cuda.synchronize() - t3 = (time.time()-start)/steps*1000 - print(' float16:', t3, 'ms', t1/t3, 'x ratio') - measurements.append(t3) diff --git a/benchmark/Benchmarks/Simple/brian_LIF.py b/benchmark/Benchmarks/Simple/brian_LIF.py deleted file mode 100644 index c230a77..0000000 --- a/benchmark/Benchmarks/Simple/brian_LIF.py +++ /dev/null @@ -1,53 +0,0 @@ -from brian2 import * -import time -from globparams import * - -defaultclock.dt = 1*ms -prefs.core.default_float_dtype = float32 - -eqs_neurons = ''' -dv/dt = (ge + rand() - v * OM_DECAY) / (1*ms) : 1 -dge/dt = -ge / (1*ms) : 1 -dspiked/dt = -spiked / (1*ms) : 1 -''' - -N = NeuronGroup(SIZE, eqs_neurons, threshold='v>VT', reset='v = VR', method='euler') - -synaptic_model = ''' -w : 1 -''' - -pre = ''' -ge_post += w -spiked_pre = 1 -''' - -post = ''' -w = clip(w + spiked_pre * STDP_SPEED, 0.0, 1.0) -''' - -S = Synapses(N, N, synaptic_model, on_pre=pre, on_post=post) - -S.connect() -S.w = 'rand()/SIZE' #initialize -#S.w /= sum(S.w, axis=0) #normalize - -if PLOT: - M = SpikeMonitor(N) - - -start = time.time() -run(DURATION*ms, report='text') -print("simulation time: ", time.time()-start) - -if PLOT: - plot(M.t/ms, M.i, '.') - show() - - - -#@network_operation(when='start', dt=10*ms)#, dt=10*ms -#def syn_norm(): -# print('test') -# S2.w /= sum(S2.w, axis=0) - diff --git a/benchmark/Benchmarks/Simple/brian_LIF_cpp.py b/benchmark/Benchmarks/Simple/brian_LIF_cpp.py deleted file mode 100644 index 765c5f0..0000000 --- a/benchmark/Benchmarks/Simple/brian_LIF_cpp.py +++ /dev/null @@ -1,47 +0,0 @@ -from brian2 import * -import time -from globparams import * - -set_device('cpp_standalone') - -defaultclock.dt = 1*ms -prefs.core.default_float_dtype = float32 -prefs.codegen.target = 'cython' - -eqs_neurons = ''' -dv/dt = (ge + rand() - v*OM_DECAY) / (1*ms) : 1 -dge/dt = -ge / (1*ms) : 1 -dspiked/dt = -spiked / (1*ms) : 1 -''' - -N = NeuronGroup(SIZE, eqs_neurons, threshold='v>VT', reset='v = VR', method='euler') - -synaptic_model = ''' -w : 1 -''' - -pre = ''' -ge_post += w -spiked_pre = 1 -''' - -post = ''' -w = clip(w + spiked_pre * STDP_SPEED, 0.0, 1.0) -''' - -S = Synapses(N, N, synaptic_model, on_pre=pre, on_post=post) - -S.connect() -S.w = 'rand()/SIZE' #initialize -#S.w /= sum(S.w, axis=0) #normalize - -if PLOT: - M = SpikeMonitor(N) - - -run(DURATION*ms, report=REPORT_FUNC) - - -if PLOT: - plot(M.t/ms, M.i, '.') - show() diff --git a/benchmark/Benchmarks/Simple/brian_LIF_gpu.py b/benchmark/Benchmarks/Simple/brian_LIF_gpu.py deleted file mode 100644 index f833fcc..0000000 --- a/benchmark/Benchmarks/Simple/brian_LIF_gpu.py +++ /dev/null @@ -1,62 +0,0 @@ -from brian2 import * -import time -import platform -from globparams import * -import brian2cuda -#import brian2genn -#set_device('genn', use_GPU=True, debug=True) -set_device("cuda_standalone", clean=True) - -defaultclock.dt = 1*ms -prefs.core.default_float_dtype = float32 - -if platform.node() == 'saeed-Swift-SF315-51G': - prefs.devices.cuda_standalone.cuda_backend.detect_gpus = False - prefs.devices.cuda_standalone.cuda_backend.gpu_id = 0 - prefs.devices.cuda_standalone.cuda_backend.compute_capability = 6.1 - prefs.devices.cuda_standalone.default_functions_integral_convertion = np.float32 - -vt = 6.1 -vr = 0.0 -input_strength = 1.0 -stdp_speed = 0.001 -decay = 1 - 0.9 - - -eqs_neurons = ''' -dv/dt = (ge + rand() - v*OM_DECAY) / (1*ms) : 1 -dge/dt = -ge / (1*ms) : 1 -dspiked/dt = -spiked / (1*ms) : 1 -''' - -N = NeuronGroup(SIZE, eqs_neurons, threshold='v>VT', reset='v = VR', method='euler') - -synaptic_model = ''' -w : 1 -''' - -pre = ''' -ge_post += w -spiked_pre = 1 -''' - -post = ''' -w = clip(w + spiked_pre * STDP_SPEED, 0.0, 1.0) -''' - -S = Synapses(N, N, synaptic_model, on_pre=pre, on_post=post) - -S.connect() -S.w = 'rand()/SIZE' #initialize -#S.w /= sum(S.w, axis=0) #normalize - -if PLOT: - M = SpikeMonitor(N) - - -run(DURATION*ms, report=REPORT_FUNC) - -if PLOT: - plot(M.t/ms, M.i, '.') - show() - diff --git a/benchmark/Benchmarks/Simple/globparams.py b/benchmark/Benchmarks/Simple/globparams.py deleted file mode 100644 index e268f98..0000000 --- a/benchmark/Benchmarks/Simple/globparams.py +++ /dev/null @@ -1,15 +0,0 @@ -import sys -PLOT = not 'no_plot' in sys.argv - -DURATION = 100 -SIZE = 7500 - -VT = 6.1 -VR = 0.0 -STDP_SPEED = 0.001 -DECAY = 0.9 -OM_DECAY = 1 - DECAY - -REPORT_FUNC = ''' - if (completed == 1.0) std::cout << "simulation time: " << elapsed << std::endl << std::flush; -''' diff --git a/benchmark/Benchmarks/Simple/nest_native_LIF.py b/benchmark/Benchmarks/Simple/nest_native_LIF.py deleted file mode 100644 index 4dadbb2..0000000 --- a/benchmark/Benchmarks/Simple/nest_native_LIF.py +++ /dev/null @@ -1,150 +0,0 @@ -import nest -import matplotlib.pyplot as plt -import numpy as np -import os -import time -from globparams import * - -from pynestml.codegeneration.nest_code_generator_utils import NESTCodeGeneratorUtils - -############################################################# -#model generation -############################################################# - -simple_neuron_str = """ -neuron simple_neuron: - - state: - v mV = 0 mV - - equations: - v' = (-v * decay) / ms - - parameters: - vt real = 6.1 - vr real = 0.0 - input_strength real = 1.0 - decay real = 0.1 - - - input: - spikes mV <- spike - - output: - spike - - update: - # threshold crossing - if v >= vt * mV: - v = vr * mV - emit_spike() - - integrate_odes() - - v += random_uniform(0,1) * mV + spikes * input_strength -""" - -simple_stdp_synapse = """ -synapse stdp_nn_symm: - state: - w real = 1. - pre_trace real = 0. - - parameters: - d ms = 1 ms @nest::delay - tau_tr_pre ms = 1 ms - stdp_speed real = 0.01 - - equations: - pre_trace' = -pre_trace / tau_tr_pre - - input: - pre_spikes real <- spike - post_spikes real <- spike - - output: - spike - - onReceive(post_spikes): - if pre_trace>0: - w += stdp_speed * pre_trace - - onReceive(pre_spikes): - pre_trace = 1 - w = w - deliver_spike(w, d) -""" - -module_name, neuron_model_name, synapse_model_name = NESTCodeGeneratorUtils.generate_code_for( - simple_neuron_str, - simple_stdp_synapse, - post_ports=["post_spikes"]) - -nest.Install(module_name) - - -############################################################# -#simulation -############################################################# - - -# Set up the NEST simulation -nest.ResetKernel() -nest.SetKernelStatus({'resolution': 1.0, 'print_time': False}) - -# Define parameters -num_neurons = SIZE -simulation_time = DURATION # ms -dt = 1.0 # ms - -# Create neurons -neuron_params = {'vt': VT, - 'vr': VR, - 'decay': OM_DECAY} -neurons = nest.Create(neuron_model_name, num_neurons, params=neuron_params) - -# Create synapses -synapse_params = {'synapse_model': synapse_model_name, - 'w': nest.random.uniform(min=0.0, max=1.0/num_neurons), - 'stdp_speed': STDP_SPEED} -nest.Connect(neurons, neurons, 'all_to_all', synapse_params) - - -#add voltage fluctuations to neurons - -# for i in range(num_neurons): -# times = list(np.arange(1.0, 101.0, 1.0)) -# values = list(np.random.rand(int(simulation_time))) -# ng = nest.Create('step_current_generator') -# ng.set({"amplitude_times": times, "amplitude_values": values}) -# nest.Connect(ng, neurons[i]) - -#random noise -#times = list(np.arange(1.0, 100.0, 1.0)) -#values = list(np.random.rand(99)) -#ng = nest.Create('step_current_generator', num_neurons) -#ng.set({"amplitude_times": times, "amplitude_values": nest.random.uniform(min=0.0, max=1.0)}) -#nest.Connect(ng, neurons, "one_to_one") - -if PLOT: - sr = nest.Create("spike_recorder") - nest.Connect(neurons, sr) - - -#print(f"Start time: {time.time()}") -#start = time.time() -nest.Simulate(1/dt) -#print(time.time()-start) -#print(f"End time: {time.time()}") - -start = time.time() -nest.Simulate(simulation_time - 1/dt) -print("simulation time: ", time.time() - start) - -if PLOT: - spike_rec=nest.GetStatus(sr, keys='events')[0] - print(f"Total spikes: {len(spike_rec['times'])}") - plt.plot(spike_rec['times'], spike_rec['senders'], '.k') - plt.ylabel("neurons") - plt.xlabel("t") - plt.show() diff --git a/benchmark/Benchmarks/Simple/pymonnto_fast_LIF.py b/benchmark/Benchmarks/Simple/pymonnto_fast_LIF.py deleted file mode 100644 index bdef417..0000000 --- a/benchmark/Benchmarks/Simple/pymonnto_fast_LIF.py +++ /dev/null @@ -1,78 +0,0 @@ -from PymoNNto import * -import time -from globparams import * - -settings = {'dtype': float32, 'synapse_mode': SxD} - - -class SpikeGeneration(Behavior): - def initialize(self, neurons): - neurons.spikes = neurons.vector('bool') - neurons.spikesOld = neurons.vector('bool') - neurons.voltage = neurons.vector() - self.threshold = self.parameter('threshold') - self.decay = self.parameter('decay') - - def iteration(self, neurons): - neurons.spikesOld = neurons.spikes.copy() - neurons.spikes = neurons.voltage > self.threshold - #print(np.sum(neurons.spikes))# number of active neurons around 1.5% - #neurons.voltage.fill(0.0) - neurons.voltage *= np.invert(neurons.spikes) #reset VR - neurons.voltage *= self.decay #voltage decay - - - -class Input(Behavior): - def initialize(self, neurons): - for s in neurons.synapses(afferent, 'GLU'): - s.W = s.matrix('random') - s.W = s.W / SIZE - # s.W /= np.sum(s.W, axis=0) #normalize during initialization - - def iteration(self, neurons): - neurons.voltage += neurons.vector('random') - for s in neurons.synapses(afferent, 'GLU'): - input = np.sum(s.W[s.src.spikes], axis=0) - s.dst.voltage += input - - -class STDP(Behavior): - def initialize(self, neurons): - self.speed = self.parameter('speed') - - def iteration(self, neurons): - for s in neurons.synapses(afferent, 'GLU'): - mask = np.ix_(s.src.spikesOld, s.dst.spikes) - s.W[mask] += self.speed - s.W[mask] = np.clip(s.W[mask], 0.0, 1.0) - - -#class Norm(Behavior): -# def iteration(self, neurons): -# if neurons.iteration % 10 == 9: -# for s in neurons.synapses(afferent, 'GLU'): -# s.W /= np.sum(s.W, axis=0) - - -net = Network(settings=settings) -NeuronGroup(net, tag='NG', size=SIZE, behavior={ - 1: SpikeGeneration(threshold=VT, decay=DECAY), - 2: Input(), - 3: STDP(speed=STDP_SPEED), - #4: Norm(), -}) - -if PLOT: - net.NG.add_behavior(9, EventRecorder('spikes'), False) - -SynapseGroup(net, src='NG', dst='NG', tag='GLU') -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION) -print("simulation time: ", time.time()-start) - -if PLOT: - plt.plot(net['spikes.t', 0], net['spikes.i', 0], '.k') - plt.show() diff --git a/benchmark/Benchmarks/Simple/pymonnto_slow_LIF.py b/benchmark/Benchmarks/Simple/pymonnto_slow_LIF.py deleted file mode 100644 index 80cde0e..0000000 --- a/benchmark/Benchmarks/Simple/pymonnto_slow_LIF.py +++ /dev/null @@ -1,75 +0,0 @@ -from PymoNNto import * -import time -from globparams import * - -settings = {'dtype': float64, 'synapse_mode': DxS} - - -class SpikeGeneration(Behavior): - def initialize(self, neurons): - neurons.spikes = neurons.vector('bool') - neurons.spikesOld = neurons.vector('bool') - neurons.voltage = neurons.vector() - self.threshold = self.parameter('threshold') - self.decay = self.parameter('decay') - - def iteration(self, neurons): - neurons.spikesOld = neurons.spikes.copy() - neurons.spikes = neurons.voltage > self.threshold - #print(np.sum(neurons.spikes)) number of active neurons around 1.5% - # neurons.voltage.fill(0.0) - neurons.voltage *= np.invert(neurons.spikes) #reset - neurons.voltage *= self.decay #voltage decay - - -class Input(Behavior): - def initialize(self, neurons): - for s in neurons.synapses(afferent, 'GLU'): - s.W = s.matrix('random') - s.W = s.W / SIZE - # s.W /= np.sum(s.W, axis=0)################################## - - def iteration(self, neurons): - neurons.voltage += neurons.vector('random') - for s in neurons.synapses(afferent, 'GLU'): - input = s.W.dot(s.src.spikes) - s.dst.voltage += input - - -class STDP(Behavior): - def initialize(self, neurons): - self.speed = self.parameter('speed') - - def iteration(self, neurons): - for s in neurons.synapses(afferent, 'GLU'): - s.W += s.dst.spikes[:, None] * s.src.spikesOld[None, :] * self.speed - s.W = np.clip(s.W, 0.0, 1.0) - - -#class Norm(Behavior): -# def iteration(self, neurons): -# for s in neurons.synapses(afferent, 'GLU'): -# s.W /= np.sum(s.W, axis=0) - - -net = Network(settings=settings) -NeuronGroup(net, tag='NG', size=SIZE, behavior={ - 1: SpikeGeneration(threshold=VT, decay=DECAY), - 2: Input(), - 3: STDP(speed=STDP_SPEED), - #4: Norm() -}) - -if PLOT: - net.NG.add_behavior(9, EventRecorder('spikes'), False) - -SynapseGroup(net, src='NG', dst='NG', tag='GLU') -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION) -print("simulation time: ", time.time()-start) - -if PLOT: - plt.plot(net['spikes.t', 0], net['spikes.i', 0], '.k') - plt.show() diff --git a/benchmark/Benchmarks/Simple/pymonntorch_fast_LIF_cpu.py b/benchmark/Benchmarks/Simple/pymonntorch_fast_LIF_cpu.py deleted file mode 100644 index fba5600..0000000 --- a/benchmark/Benchmarks/Simple/pymonntorch_fast_LIF_cpu.py +++ /dev/null @@ -1,83 +0,0 @@ -from pymonntorch import * -import time -import torch -from matplotlib import pyplot as plt -import numpy as np -from globparams import * - -settings = {'dtype': torch.float32, 'synapse_mode': "SxD", 'device': 'cpu'} - - -class SpikeGeneration(Behavior): - def initialize(self, neurons): - neurons.spikes = neurons.vector(dtype=torch.bool) - neurons.spikesOld = neurons.vector(dtype=torch.bool) - neurons.voltage = neurons.vector() - self.threshold = self.parameter('threshold', None) - self.decay = self.parameter('decay', None) - - def forward(self, neurons): - neurons.spikesOld = neurons.spikes.clone() - neurons.spikes = neurons.voltage > self.threshold - #print(np.sum(neurons.spikes))# number of active neurons around 1.5% - #neurons.voltage.fill(0.0) - neurons.voltage *= ~neurons.spikes #reset - neurons.voltage *= self.decay #voltage decay - - - -class Input(Behavior): - def initialize(self, neurons): - for s in neurons.synapses('afferent', 'GLU'): - s.W = s.matrix('random') - s.W = s.W / SIZE - # s.W /= torch.sum(s.W, axis=0) #normalize during initialization - - def forward(self, neurons): - neurons.voltage += neurons.vector('random') - for s in neurons.synapses('afferent', 'GLU'): - input = torch.sum(s.W[s.src.spikes], axis=0) - s.dst.voltage += input - - -class STDP(Behavior): - def initialize(self, neurons): - self.speed = self.parameter('speed', None) - - def forward(self, neurons): - for s in neurons.synapses('afferent', 'GLU'): - # mask = np.ix_(s.src.spikesOld, s.dst.spikes) - mask = (torch.where(s.src.spikesOld)[0].view(-1, 1), torch.where(s.dst.spikes)[0].view(1, -1)) - s.W[mask] += self.speed - s.W[mask] = torch.clip(s.W[mask], 0.0, 1.0) - - -#class Norm(Behavior): -# def iteration(self, neurons): -# if neurons.iteration % 10 == 9: -# for s in neurons.synapses(afferent, 'GLU'): -# s.W /= np.sum(s.W, axis=0) - - -net = Network(**settings) -NeuronGroup(net=net, tag='NG', size=SIZE, behavior={ - 1: SpikeGeneration(threshold=VT, decay=DECAY), - 2: Input(), - 3: STDP(speed=STDP_SPEED), - #4: Norm(), - #5: EventRecorder(['spikes']) -}) - -if PLOT: - net.NG.add_behavior(9, EventRecorder('spikes'), False) - -SynapseGroup(net=net, src='NG', dst='NG', tag='GLU') -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION) -print("simulation time: ", time.time()-start) - -if PLOT: - plt.plot(net['spikes.t', 0], net['spikes.i', 0], '.k') - plt.show() diff --git a/benchmark/Benchmarks/Simple/pymonntorch_fast_LIF_cuda.py b/benchmark/Benchmarks/Simple/pymonntorch_fast_LIF_cuda.py deleted file mode 100644 index c4fb247..0000000 --- a/benchmark/Benchmarks/Simple/pymonntorch_fast_LIF_cuda.py +++ /dev/null @@ -1,82 +0,0 @@ -from pymonntorch import * -import time -import torch -from matplotlib import pyplot as plt -import numpy as np -from globparams import * - -settings = {'dtype': torch.float32, 'synapse_mode': "SxD", 'device': 'cuda'} - - -class SpikeGeneration(Behavior): - def initialize(self, neurons): - neurons.spikes = neurons.vector(dtype=torch.bool) - neurons.spikesOld = neurons.vector(dtype=torch.bool) - neurons.voltage = neurons.vector() - self.threshold = self.parameter('threshold', None) - self.decay = self.parameter('decay', None) - - def forward(self, neurons): - neurons.spikesOld = neurons.spikes.clone() - neurons.spikes = neurons.voltage > self.threshold - #print(np.sum(neurons.spikes))# number of active neurons around 1.5% - #neurons.voltage.fill(0.0) - neurons.voltage *= ~neurons.spikes #reset - neurons.voltage *= self.decay #voltage decay - - - -class Input(Behavior): - def initialize(self, neurons): - for s in neurons.synapses('afferent', 'GLU'): - s.W = s.matrix('random') - s.W = s.W / SIZE - # s.W /= torch.sum(s.W, axis=0) #normalize during initialization - - def forward(self, neurons): - neurons.voltage += neurons.vector('random') - for s in neurons.synapses('afferent', 'GLU'): - input = torch.sum(s.W[s.src.spikes], axis=0) - s.dst.voltage += input - - -class STDP(Behavior): - def initialize(self, neurons): - self.speed = self.parameter('speed', None) - - def forward(self, neurons): - for s in neurons.synapses('afferent', 'GLU'): - mask = (torch.where(s.src.spikesOld)[0].view(-1, 1), torch.where(s.dst.spikes)[0].view(1, -1)) - s.W[mask] += self.speed - s.W[mask] = torch.clip(s.W[mask], 0.0, 1.0) - - -#class Norm(Behavior): -# def iteration(self, neurons): -# if neurons.iteration % 10 == 9: -# for s in neurons.synapses(afferent, 'GLU'): -# s.W /= np.sum(s.W, axis=0) - - -net = Network(**settings) -NeuronGroup(net=net, tag='NG', size=SIZE, behavior={ - 1: SpikeGeneration(threshold=VT, decay=DECAY), - 2: Input(), - 3: STDP(speed=STDP_SPEED), - #4: Norm(), - #5: EventRecorder(['spikes']) -}) - -if PLOT: - net.NG.add_behavior(9, EventRecorder('spikes'), False) - -SynapseGroup(net=net, src='NG', dst='NG', tag='GLU') -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION) -print("simulation time: ", time.time()-start) - -if PLOT: - plt.plot(net['spikes.t', 0].cpu(), net['spikes.i', 0].cpu(), '.k') - plt.show() diff --git a/benchmark/Benchmarks/Simple/pymonntorch_slow_LIF_cpu.py b/benchmark/Benchmarks/Simple/pymonntorch_slow_LIF_cpu.py deleted file mode 100644 index 71caea5..0000000 --- a/benchmark/Benchmarks/Simple/pymonntorch_slow_LIF_cpu.py +++ /dev/null @@ -1,78 +0,0 @@ -from pymonntorch import * -import torch -import time -from matplotlib import pyplot as plt -from globparams import * - -settings = {'dtype': torch.float64, 'synapse_mode': "DxS", 'device': 'cpu'} - - -class SpikeGeneration(Behavior): - def initialize(self, neurons): - neurons.spikes = neurons.vector(dtype=torch.bool) - neurons.spikesOld = neurons.vector(dtype=torch.bool) - neurons.voltage = neurons.vector() - self.threshold = self.parameter('threshold', None) - self.decay = self.parameter('decay', None) - - def forward(self, neurons): - neurons.spikesOld = neurons.spikes.clone() - neurons.spikes = neurons.voltage > self.threshold - #print(np.sum(neurons.spikes)) number of active neurons around 1.5% - # neurons.voltage.fill(0.0) - neurons.voltage *= ~neurons.spikes #reset - neurons.voltage *= self.decay #voltage decay - - -class Input(Behavior): - def initialize(self, neurons): - for s in neurons.synapses('afferent', 'GLU'): - s.W = s.matrix('random') - s.W = s.W / SIZE - # s.W /= torch.sum(s.W, axis=0)################################## - - def forward(self, neurons): - neurons.voltage += neurons.vector('random') - for s in neurons.synapses('afferent', 'GLU'): - input = torch.tensordot(s.W, s.src.spikes.to(neurons.def_dtype), dims=[[1], [0]]) - s.dst.voltage += input - - -class STDP(Behavior): - def initialize(self, neurons): - self.speed = self.parameter('speed', None) - - def forward(self, neurons): - for s in neurons.synapses('afferent', 'GLU'): - s.W += s.dst.spikes[:, None] * s.src.spikesOld[None, :] * self.speed - s.W = torch.clip(s.W, 0.0, 1.0) - - -#class Norm(Behavior): -# def iteration(self, neurons): -# for s in neurons.synapses(afferent, 'GLU'): -# s.W /= np.sum(s.W, axis=0) - - -net = Network(**settings) -NeuronGroup(net=net, tag='NG', size=SIZE, behavior={ - 1: SpikeGeneration(threshold=VT, decay=DECAY), - 2: Input(), - 3: STDP(speed=STDP_SPEED), - #4: Norm() - #5: EventRecorder(variables=['spikes']) -}) - -if PLOT: - net.NG.add_behavior(9, EventRecorder('spikes'), False) - -SynapseGroup(net=net, src='NG', dst='NG', tag='GLU') -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION) -print("simulation time: ", time.time()-start) - -if PLOT: - plt.plot(net['spikes.t', 0], net['spikes.i', 0], '.k') - plt.show() diff --git a/benchmark/Benchmarks/Simple/pymonntorch_slow_LIF_cuda.py b/benchmark/Benchmarks/Simple/pymonntorch_slow_LIF_cuda.py deleted file mode 100644 index 655cf92..0000000 --- a/benchmark/Benchmarks/Simple/pymonntorch_slow_LIF_cuda.py +++ /dev/null @@ -1,78 +0,0 @@ -from pymonntorch import * -import torch -import time -from matplotlib import pyplot as plt -from globparams import * - -settings = {'dtype': torch.float64, 'synapse_mode': "DxS", 'device': 'cuda'} - - -class SpikeGeneration(Behavior): - def initialize(self, neurons): - neurons.spikes = neurons.vector(dtype=torch.bool) - neurons.spikesOld = neurons.vector(dtype=torch.bool) - neurons.voltage = neurons.vector() - self.threshold = self.parameter('threshold', None) - self.decay = self.parameter('decay', None) - - def forward(self, neurons): - neurons.spikesOld = neurons.spikes.clone() - neurons.spikes = neurons.voltage > self.threshold - #print(np.sum(neurons.spikes)) number of active neurons around 1.5% - # neurons.voltage.fill(0.0) - neurons.voltage *= ~neurons.spikes #reset - neurons.voltage *= self.decay #voltage decay - - -class Input(Behavior): - def initialize(self, neurons): - for s in neurons.synapses('afferent', 'GLU'): - s.W = s.matrix('random') - s.W = s.W / SIZE - # s.W /= torch.sum(s.W, axis=0)################################## - - def forward(self, neurons): - neurons.voltage += neurons.vector('random') - for s in neurons.synapses('afferent', 'GLU'): - input = torch.tensordot(s.W, s.src.spikes.to(neurons.def_dtype), dims=[[1], [0]]) - s.dst.voltage += input - - -class STDP(Behavior): - def initialize(self, neurons): - self.speed = self.parameter('speed', None) - - def forward(self, neurons): - for s in neurons.synapses('afferent', 'GLU'): - s.W += s.dst.spikes[:, None] * s.src.spikesOld[None, :] * self.speed - s.W = torch.clip(s.W, 0.0, 1.0) - - -#class Norm(Behavior): -# def iteration(self, neurons): -# for s in neurons.synapses(afferent, 'GLU'): -# s.W /= np.sum(s.W, axis=0) - - -net = Network(**settings) -NeuronGroup(net=net, tag='NG', size=SIZE, behavior={ - 1: SpikeGeneration(threshold=VT, decay=DECAY), - 2: Input(), - 3: STDP(speed=STDP_SPEED), - #4: Norm() - #5: EventRecorder(variables=['spikes']) -}) - -if PLOT: - net.NG.add_behavior(9, EventRecorder('spikes'), False) - -SynapseGroup(net=net, src='NG', dst='NG', tag='GLU') -net.initialize() - -start = time.time() -net.simulate_iterations(DURATION) -print("simulation time: ", time.time()-start) - -if PLOT: - plt.plot(net['spikes.t', 0].cpu(), net['spikes.i', 0].cpu(), '.k') - plt.show() diff --git a/benchmark/Plot/Results/Swift-SF315-51G/IZH.csv b/benchmark/Plot/Results/Swift-SF315-51G/IZH.csv deleted file mode 100644 index d856a1d..0000000 --- a/benchmark/Plot/Results/Swift-SF315-51G/IZH.csv +++ /dev/null @@ -1,771 +0,0 @@ -,script_name,size,time -0,pymonnto_izh.py,10,0.032991409301758 -1,pymonntorch_izh_cpu.py,10,0.133836984634399 -2,pymonntorch_izh_cuda.py,10,0.25417685508728 -3,pymonnto_izh.py,10,0.020552158355713 -4,pymonntorch_izh_cpu.py,10,0.116746187210083 -5,pymonntorch_izh_cuda.py,10,0.249530553817749 -6,pymonnto_izh.py,10,0.018023490905762 -7,pymonntorch_izh_cpu.py,10,0.117838621139526 -8,pymonntorch_izh_cuda.py,10,0.248977661132812 -9,pymonnto_izh.py,10,0.01841402053833 -10,pymonntorch_izh_cpu.py,10,0.11761212348938 -11,pymonntorch_izh_cuda.py,10,0.257966756820679 -12,pymonnto_izh.py,10,0.018589019775391 -13,pymonntorch_izh_cpu.py,10,0.118752002716064 -14,pymonntorch_izh_cuda.py,10,0.265898466110229 -15,pymonnto_izh.py,20,0.025196313858032 -16,pymonntorch_izh_cpu.py,20,0.119515180587768 -17,pymonntorch_izh_cuda.py,20,0.262241125106811 -18,pymonnto_izh.py,20,0.018878698348999 -19,pymonntorch_izh_cpu.py,20,0.118935823440552 -20,pymonntorch_izh_cuda.py,20,0.263084650039673 -21,pymonnto_izh.py,20,0.019480466842651 -22,pymonntorch_izh_cpu.py,20,0.116752862930298 -23,pymonntorch_izh_cuda.py,20,0.26537275314331 -24,pymonnto_izh.py,20,0.01916766166687 -25,pymonntorch_izh_cpu.py,20,0.11841082572937 -26,pymonntorch_izh_cuda.py,20,0.251236200332642 -27,pymonnto_izh.py,20,0.019250154495239 -28,pymonntorch_izh_cpu.py,20,0.11689305305481 -29,pymonntorch_izh_cuda.py,20,0.251023054122925 -30,pymonnto_izh.py,50,0.020197629928589 -31,pymonntorch_izh_cpu.py,50,0.120246410369873 -32,pymonntorch_izh_cuda.py,50,0.258030652999878 -33,pymonnto_izh.py,50,0.019414901733398 -34,pymonntorch_izh_cpu.py,50,0.120467662811279 -35,pymonntorch_izh_cuda.py,50,0.266721248626709 -36,pymonnto_izh.py,50,0.019653558731079 -37,pymonntorch_izh_cpu.py,50,0.119279623031616 -38,pymonntorch_izh_cuda.py,50,0.271978139877319 -39,pymonnto_izh.py,50,0.020308256149292 -40,pymonntorch_izh_cpu.py,50,0.129639387130737 -41,pymonntorch_izh_cuda.py,50,0.275496006011963 -42,pymonnto_izh.py,50,0.021753311157227 -43,pymonntorch_izh_cpu.py,50,0.120654582977295 -44,pymonntorch_izh_cuda.py,50,0.261199951171875 -45,pymonnto_izh.py,100,0.020742654800415 -46,pymonntorch_izh_cpu.py,100,0.124077320098877 -47,pymonntorch_izh_cuda.py,100,0.302738666534424 -48,pymonnto_izh.py,100,0.020668029785156 -49,pymonntorch_izh_cpu.py,100,0.122058391571045 -50,pymonntorch_izh_cuda.py,100,0.264579057693481 -51,pymonnto_izh.py,100,0.021377801895142 -52,pymonntorch_izh_cpu.py,100,0.124364376068115 -53,pymonntorch_izh_cuda.py,100,0.295809745788574 -54,pymonnto_izh.py,100,0.020902633666992 -55,pymonntorch_izh_cpu.py,100,0.128972053527832 -56,pymonntorch_izh_cuda.py,100,0.26634931564331 -57,pymonnto_izh.py,100,0.020752906799316 -58,pymonntorch_izh_cpu.py,100,0.121841907501221 -59,pymonntorch_izh_cuda.py,100,0.28750205039978 -60,pymonnto_izh.py,250,0.024996042251587 -61,pymonntorch_izh_cpu.py,250,0.131926774978638 -62,pymonntorch_izh_cuda.py,250,0.271305561065674 -63,pymonnto_izh.py,250,0.025187253952026 -64,pymonntorch_izh_cpu.py,250,0.138012170791626 -65,pymonntorch_izh_cuda.py,250,0.294023990631104 -66,pymonnto_izh.py,250,0.025542020797729 -67,pymonntorch_izh_cpu.py,250,0.132508993148804 -68,pymonntorch_izh_cuda.py,250,0.271704435348511 -69,pymonnto_izh.py,250,0.025309801101685 -70,pymonntorch_izh_cpu.py,250,0.134806871414184 -71,pymonntorch_izh_cuda.py,250,0.297303199768066 -72,pymonnto_izh.py,250,0.025542736053467 -73,pymonntorch_izh_cpu.py,250,0.164273977279663 -74,pymonntorch_izh_cuda.py,250,0.278284788131714 -75,pymonnto_izh.py,500,0.033448457717896 -76,pymonntorch_izh_cpu.py,500,0.172162532806396 -77,pymonntorch_izh_cuda.py,500,0.363487482070923 -78,pymonnto_izh.py,500,0.03844690322876 -79,pymonntorch_izh_cpu.py,500,0.19976806640625 -80,pymonntorch_izh_cuda.py,500,0.332287311553955 -81,pymonnto_izh.py,500,0.040884494781494 -82,pymonntorch_izh_cpu.py,500,0.193455934524536 -83,pymonntorch_izh_cuda.py,500,0.305108785629272 -84,pymonnto_izh.py,500,0.033061981201172 -85,pymonntorch_izh_cpu.py,500,0.146108627319336 -86,pymonntorch_izh_cuda.py,500,0.271456718444824 -87,pymonnto_izh.py,500,0.03400993347168 -88,pymonntorch_izh_cpu.py,500,0.171453237533569 -89,pymonntorch_izh_cuda.py,500,0.268697023391724 -90,pymonnto_izh.py,750,0.045611381530762 -91,pymonntorch_izh_cpu.py,750,0.193727731704712 -92,pymonntorch_izh_cuda.py,750,0.305218935012817 -93,pymonnto_izh.py,750,0.045769214630127 -94,pymonntorch_izh_cpu.py,750,0.219374418258667 -95,pymonntorch_izh_cuda.py,750,0.280409336090088 -96,pymonnto_izh.py,750,0.044842720031738 -97,pymonntorch_izh_cpu.py,750,0.191648244857788 -98,pymonntorch_izh_cuda.py,750,0.309268474578857 -99,pymonnto_izh.py,750,0.053059816360474 -100,pymonntorch_izh_cpu.py,750,0.219376802444458 -101,pymonntorch_izh_cuda.py,750,0.349288702011108 -102,pymonnto_izh.py,750,0.054110288619995 -103,pymonntorch_izh_cpu.py,750,0.214464664459228 -104,pymonntorch_izh_cuda.py,750,0.314250469207764 -105,pymonnto_izh.py,1000,0.069771528244019 -106,pymonntorch_izh_cpu.py,1000,0.24582839012146 -107,pymonntorch_izh_cuda.py,1000,0.312383890151977 -108,pymonnto_izh.py,1000,0.072553396224976 -109,pymonntorch_izh_cpu.py,1000,0.283058404922485 -110,pymonntorch_izh_cuda.py,1000,0.315168619155884 -111,pymonnto_izh.py,1000,0.069753885269165 -112,pymonntorch_izh_cpu.py,1000,0.275806665420532 -113,pymonntorch_izh_cuda.py,1000,0.344824075698852 -114,pymonnto_izh.py,1000,0.06918740272522 -115,pymonntorch_izh_cpu.py,1000,0.246065855026245 -116,pymonntorch_izh_cuda.py,1000,0.360431909561157 -117,pymonnto_izh.py,1000,0.063349485397339 -118,pymonntorch_izh_cpu.py,1000,0.214341163635254 -119,pymonntorch_izh_cuda.py,1000,0.269978523254394 -120,pymonnto_izh.py,1500,0.142395973205566 -121,pymonntorch_izh_cpu.py,1500,0.318470239639282 -122,pymonntorch_izh_cuda.py,1500,0.288399696350098 -123,pymonnto_izh.py,1500,0.157048463821411 -124,pymonntorch_izh_cpu.py,1500,0.330550909042358 -125,pymonntorch_izh_cuda.py,1500,0.28787088394165 -126,pymonnto_izh.py,1500,0.141680955886841 -127,pymonntorch_izh_cpu.py,1500,0.333054542541504 -128,pymonntorch_izh_cuda.py,1500,0.306177139282226 -129,pymonnto_izh.py,1500,0.142448425292969 -130,pymonntorch_izh_cpu.py,1500,0.373534202575684 -131,pymonntorch_izh_cuda.py,1500,0.354241847991943 -132,pymonnto_izh.py,1500,0.145970344543457 -133,pymonntorch_izh_cpu.py,1500,0.345590353012085 -134,pymonntorch_izh_cuda.py,1500,0.311316251754761 -135,pymonnto_izh.py,2000,0.314323902130127 -136,pymonntorch_izh_cpu.py,2000,0.504384517669678 -137,pymonntorch_izh_cuda.py,2000,0.395695924758911 -138,pymonnto_izh.py,2000,0.316334247589111 -139,pymonntorch_izh_cpu.py,2000,0.507842302322388 -140,pymonntorch_izh_cuda.py,2000,0.369848728179932 -141,pymonnto_izh.py,2000,0.303716421127319 -142,pymonntorch_izh_cpu.py,2000,0.468461990356445 -143,pymonntorch_izh_cuda.py,2000,0.335073232650757 -144,pymonnto_izh.py,2000,0.322027921676636 -145,pymonntorch_izh_cpu.py,2000,0.463479995727539 -146,pymonntorch_izh_cuda.py,2000,0.330122232437134 -147,pymonnto_izh.py,2000,0.320664405822754 -148,pymonntorch_izh_cpu.py,2000,0.507390737533569 -149,pymonntorch_izh_cuda.py,2000,0.368898630142212 -150,pymonnto_izh.py,3000,1.08062434196472 -151,pymonntorch_izh_cpu.py,3000,0.916598796844482 -152,pymonntorch_izh_cuda.py,3000,0.486913919448852 -153,pymonnto_izh.py,3000,1.04640340805054 -154,pymonntorch_izh_cpu.py,3000,0.873945474624634 -155,pymonntorch_izh_cuda.py,3000,0.42999005317688 -156,pymonnto_izh.py,3000,1.07156324386597 -157,pymonntorch_izh_cpu.py,3000,0.857541561126709 -158,pymonntorch_izh_cuda.py,3000,0.435133218765259 -159,pymonnto_izh.py,3000,1.07976698875427 -160,pymonntorch_izh_cpu.py,3000,0.860671758651733 -161,pymonntorch_izh_cuda.py,3000,0.429748058319092 -162,pymonnto_izh.py,3000,1.03417587280273 -163,pymonntorch_izh_cpu.py,3000,0.876765727996826 -164,pymonntorch_izh_cuda.py,3000,0.475497245788574 -165,pymonnto_izh.py,4000,2.0692994594574 -166,pymonntorch_izh_cpu.py,4000,1.42336058616638 -167,pymonntorch_izh_cuda.py,4000,0.559803485870361 -168,pymonnto_izh.py,4000,1.99543070793152 -169,pymonntorch_izh_cpu.py,4000,1.38440942764282 -170,pymonntorch_izh_cuda.py,4000,0.569406032562256 -171,pymonnto_izh.py,4000,2.09228515625 -172,pymonntorch_izh_cpu.py,4000,1.49167799949646 -173,pymonntorch_izh_cuda.py,4000,0.580037593841553 -174,pymonnto_izh.py,4000,2.17213153839111 -175,pymonntorch_izh_cpu.py,4000,1.47693300247192 -176,pymonntorch_izh_cuda.py,4000,0.594357490539551 -177,pymonnto_izh.py,4000,2.02120733261108 -178,pymonntorch_izh_cpu.py,4000,1.48136973381042 -179,pymonntorch_izh_cuda.py,4000,0.604264736175537 -180,pymonnto_izh.py,5000,3.47158217430115 -181,pymonntorch_izh_cpu.py,5000,2.18594217300415 -182,pymonntorch_izh_cuda.py,5000,0.772057056427002 -183,pymonnto_izh.py,5000,3.39407253265381 -184,pymonntorch_izh_cpu.py,5000,2.29014372825623 -185,pymonntorch_izh_cuda.py,5000,0.767153739929199 -186,pymonnto_izh.py,5000,3.3113694190979 -187,pymonntorch_izh_cpu.py,5000,2.21214604377747 -188,pymonntorch_izh_cuda.py,5000,0.787933111190796 -189,pymonnto_izh.py,5000,3.45780849456787 -190,pymonntorch_izh_cpu.py,5000,2.18901371955872 -191,pymonntorch_izh_cuda.py,5000,0.775554418563843 -192,pymonnto_izh.py,5000,3.36616444587708 -193,pymonntorch_izh_cpu.py,5000,2.22133803367615 -194,pymonntorch_izh_cuda.py,5000,0.765582084655762 -195,pymonnto_izh.py,6000,5.05101203918457 -196,pymonntorch_izh_cpu.py,6000,3.28674340248108 -197,pymonntorch_izh_cuda.py,6000,1.01610660552979 -198,pymonnto_izh.py,6000,5.05244565010071 -199,pymonntorch_izh_cpu.py,6000,3.21947240829468 -200,pymonntorch_izh_cuda.py,6000,1.02059674263 -201,pymonnto_izh.py,6000,5.18157982826233 -202,pymonntorch_izh_cpu.py,6000,3.38219833374023 -203,pymonntorch_izh_cuda.py,6000,1.03292798995972 -204,pymonnto_izh.py,6000,5.06173539161682 -205,pymonntorch_izh_cpu.py,6000,3.11603927612305 -206,pymonntorch_izh_cuda.py,6000,1.02605938911438 -207,pymonnto_izh.py,6000,4.89320302009583 -208,pymonntorch_izh_cpu.py,6000,3.11143589019775 -209,pymonntorch_izh_cuda.py,6000,1.0278754234314 -210,pymonnto_izh.py,7000,6.79460406303406 -211,pymonntorch_izh_cpu.py,7000,4.26932191848755 -212,pymonntorch_izh_cuda.py,7000,1.30721092224121 -213,pymonnto_izh.py,7000,6.81576156616211 -214,pymonntorch_izh_cpu.py,7000,4.30981063842773 -215,pymonntorch_izh_cuda.py,7000,1.29918837547302 -216,pymonnto_izh.py,7000,6.93153691291809 -217,pymonntorch_izh_cpu.py,7000,4.26843547821045 -218,pymonntorch_izh_cuda.py,7000,1.29788041114807 -219,pymonnto_izh.py,7000,6.94604730606079 -220,pymonntorch_izh_cpu.py,7000,4.32830905914307 -221,pymonntorch_izh_cuda.py,7000,1.30429410934448 -222,pymonnto_izh.py,7000,6.93461036682129 -223,pymonntorch_izh_cpu.py,7000,4.43547797203064 -224,pymonntorch_izh_cuda.py,7000,1.31354808807373 -225,pymonnto_izh.py,8000,9.04382157325745 -226,pymonntorch_izh_cpu.py,8000,5.70801687240601 -227,pymonntorch_izh_cuda.py,8000,1.63206338882446 -228,pymonnto_izh.py,8000,8.90807485580444 -229,pymonntorch_izh_cpu.py,8000,5.89202904701233 -230,pymonntorch_izh_cuda.py,8000,1.63252425193787 -231,pymonnto_izh.py,8000,8.86541032791138 -232,pymonntorch_izh_cpu.py,8000,5.6545193195343 -233,pymonntorch_izh_cuda.py,8000,1.64969348907471 -234,pymonnto_izh.py,8000,8.96783924102783 -235,pymonntorch_izh_cpu.py,8000,5.66378712654114 -236,pymonntorch_izh_cuda.py,8000,1.64490461349487 -237,pymonnto_izh.py,8000,9.18251347541809 -238,pymonntorch_izh_cpu.py,8000,5.77416515350342 -239,pymonntorch_izh_cuda.py,8000,1.63132667541504 -240,pymonnto_izh.py,9000,10.9576964378357 -241,pymonntorch_izh_cpu.py,9000,7.14944171905518 -242,pymonntorch_izh_cuda.py,9000,2.02945971488953 -243,pymonnto_izh.py,9000,10.8653423786163 -244,pymonntorch_izh_cpu.py,9000,7.2914092540741 -245,pymonntorch_izh_cuda.py,9000,2.02877712249756 -246,pymonnto_izh.py,9000,10.7915170192719 -247,pymonntorch_izh_cpu.py,9000,7.20225095748901 -248,pymonntorch_izh_cuda.py,9000,2.02255439758301 -249,pymonnto_izh.py,9000,10.9189050197601 -250,pymonntorch_izh_cpu.py,9000,7.18108081817627 -251,pymonntorch_izh_cuda.py,9000,2.0180675983429 -252,pymonnto_izh.py,9000,10.9531371593475 -253,pymonntorch_izh_cpu.py,9000,7.21270561218262 -254,pymonntorch_izh_cuda.py,9000,2.02455306053162 -255,pymonnto_izh.py,10000,13.2891788482666 -256,pymonntorch_izh_cpu.py,10000,8.90361213684082 -257,pymonntorch_izh_cuda.py,10000,2.43409872055054 -258,pymonnto_izh.py,10000,13.3618071079254 -259,pymonntorch_izh_cpu.py,10000,8.90511155128479 -260,pymonntorch_izh_cuda.py,10000,2.45296859741211 -261,pymonnto_izh.py,10000,13.2411198616028 -262,pymonntorch_izh_cpu.py,10000,8.903728723526 -263,pymonntorch_izh_cuda.py,10000,2.46364784240723 -264,pymonnto_izh.py,10000,13.2297852039337 -265,pymonntorch_izh_cpu.py,10000,9.03444719314575 -266,pymonntorch_izh_cuda.py,10000,2.45684552192688 -267,pymonnto_izh.py,10000,13.2900285720825 -268,pymonntorch_izh_cpu.py,10000,8.86710739135742 -269,pymonntorch_izh_cuda.py,10000,2.44551396369934 -270,pymonnto_izh.py,11000,16.0265226364136 -271,pymonntorch_izh_cpu.py,11000,10.7661943435669 -272,pymonntorch_izh_cuda.py,11000,2.91199517250061 -273,pymonnto_izh.py,11000,16.1246643066406 -274,pymonntorch_izh_cpu.py,11000,10.893839597702 -275,pymonntorch_izh_cuda.py,11000,2.92111539840698 -276,pymonnto_izh.py,11000,15.8282086849213 -277,pymonntorch_izh_cpu.py,11000,10.8978810310364 -278,pymonntorch_izh_cuda.py,11000,2.92128729820251 -279,pymonnto_izh.py,11000,16.0694341659546 -280,pymonntorch_izh_cpu.py,11000,10.8137557506561 -281,pymonntorch_izh_cuda.py,11000,2.91232585906982 -282,pymonnto_izh.py,11000,16.2355403900146 -283,pymonntorch_izh_cpu.py,11000,10.8401687145233 -284,pymonntorch_izh_cuda.py,11000,2.89150404930115 -285,pymonnto_izh.py,12000,19.5447924137115 -286,pymonntorch_izh_cpu.py,12000,12.9905407428741 -287,pymonntorch_izh_cuda.py,12000,3.40585947036743 -288,pymonnto_izh.py,12000,19.5533308982849 -289,pymonntorch_izh_cpu.py,12000,13.000958442688 -290,pymonntorch_izh_cuda.py,12000,3.4168963432312 -291,pymonnto_izh.py,12000,20.2095503807068 -292,pymonntorch_izh_cpu.py,12000,13.1709880828857 -293,pymonntorch_izh_cuda.py,12000,3.40541052818298 -294,pymonnto_izh.py,12000,20.1956658363342 -295,pymonntorch_izh_cpu.py,12000,13.0317413806915 -296,pymonntorch_izh_cuda.py,12000,3.39516091346741 -297,pymonnto_izh.py,12000,19.64133477211 -298,pymonntorch_izh_cpu.py,12000,12.8235857486725 -299,pymonntorch_izh_cuda.py,12000,3.41586136817932 -300,pymonnto_izh.py,13000,22.7227761745453 -301,pymonntorch_izh_cpu.py,13000,15.2302794456482 -302,pymonntorch_izh_cuda.py,13000,3.9511821269989 -303,pymonnto_izh.py,13000,23.3322749137878 -304,pymonntorch_izh_cpu.py,13000,15.6124384403229 -305,pymonntorch_izh_cuda.py,13000,3.99277567863464 -306,pymonnto_izh.py,13000,23.1976590156555 -307,pymonntorch_izh_cpu.py,13000,15.3181023597717 -308,pymonntorch_izh_cuda.py,13000,4.00069570541382 -309,pymonnto_izh.py,13000,23.4044945240021 -310,pymonntorch_izh_cpu.py,13000,15.377916097641 -311,pymonntorch_izh_cuda.py,13000,3.96334218978882 -312,pymonnto_izh.py,13000,23.1431465148926 -313,pymonntorch_izh_cpu.py,13000,15.6402070522308 -314,pymonntorch_izh_cuda.py,13000,3.97895550727844 -315,pymonnto_izh.py,14000,26.9950575828552 -316,pymonntorch_izh_cpu.py,14000,17.8539657592773 -317,pymonntorch_izh_cuda.py,14000,4.57687997817993 -318,pymonnto_izh.py,14000,26.147646188736 -319,pymonntorch_izh_cpu.py,14000,18.1744658946991 -320,pymonntorch_izh_cuda.py,14000,4.56769824028015 -321,pymonnto_izh.py,14000,26.4688539505005 -322,pymonntorch_izh_cpu.py,14000,18.0798938274384 -323,pymonntorch_izh_cuda.py,14000,4.57321190834045 -324,pymonnto_izh.py,14000,26.7790372371674 -325,pymonntorch_izh_cpu.py,14000,18.0632662773132 -326,pymonntorch_izh_cuda.py,14000,4.57813405990601 -327,pymonnto_izh.py,14000,27.067676782608 -328,pymonntorch_izh_cpu.py,14000,18.1004478931427 -329,pymonntorch_izh_cuda.py,14000,4.56180143356323 -330,pymonnto_izh.py,15000,31.6347239017487 -331,pymonntorch_izh_cpu.py,15000,20.9053699970245 -332,pymonnto_izh.py,15000,31.8710944652557 -333,pymonntorch_izh_cpu.py,15000,20.8803806304932 -334,pymonnto_izh.py,15000,31.4761688709259 -335,pymonntorch_izh_cpu.py,15000,20.7469246387482 -336,pymonnto_izh.py,15000,31.3973388671875 -337,pymonntorch_izh_cpu.py,15000,20.9054527282715 -338,pymonnto_izh.py,15000,31.3394210338593 -339,pymonntorch_izh_cpu.py,15000,20.6870503425598 -340,brian_izh_cpp.py,10,0.002876 -341,brian_izh_cuda.py,10,0.013 -342,brian_izh.py,10,0.466598987579346 -343,brian_izh_cpp.py,10,0.002831 -344,brian_izh_cuda.py,10,0.009 -345,brian_izh.py,10,0.424249649047852 -346,brian_izh_cpp.py,10,0.002876 -347,brian_izh_cuda.py,10,0.01 -348,brian_izh.py,10,0.45332145690918 -349,brian_izh_cpp.py,10,0.002874 -350,brian_izh_cuda.py,10,0.009 -351,brian_izh.py,10,0.4267418384552 -352,brian_izh_cpp.py,10,0.002832 -353,brian_izh_cuda.py,10,0.01 -354,brian_izh.py,10,0.429680824279785 -355,brian_izh_cpp.py,20,0.003117 -356,brian_izh_cuda.py,20,0.011 -357,brian_izh.py,20,0.444624185562134 -358,brian_izh_cpp.py,20,0.003212 -359,brian_izh_cuda.py,20,0.009 -360,brian_izh.py,20,0.42950439453125 -361,brian_izh_cpp.py,20,0.003251 -362,brian_izh_cuda.py,20,0.01 -363,brian_izh.py,20,0.419471740722656 -364,brian_izh_cpp.py,20,0.003128 -365,brian_izh_cuda.py,20,0.009 -366,brian_izh.py,20,0.427942514419556 -367,brian_izh_cpp.py,20,0.003249 -368,brian_izh_cuda.py,20,0.011 -369,brian_izh.py,20,0.420017242431641 -370,brian_izh_cpp.py,50,0.00402 -371,brian_izh_cuda.py,50,0.01 -372,brian_izh.py,50,0.424075603485107 -373,brian_izh_cpp.py,50,0.004169 -374,brian_izh_cuda.py,50,0.01 -375,brian_izh.py,50,0.427181005477905 -376,brian_izh_cpp.py,50,0.004166 -377,brian_izh_cuda.py,50,0.01 -378,brian_izh.py,50,0.447376012802124 -379,brian_izh_cpp.py,50,0.004158 -380,brian_izh_cuda.py,50,0.009 -381,brian_izh.py,50,0.46279764175415 -382,brian_izh_cpp.py,50,0.004089 -383,brian_izh_cuda.py,50,0.017 -384,brian_izh.py,50,0.439213752746582 -385,brian_izh_cpp.py,100,0.006132 -386,brian_izh_cuda.py,100,0.011 -387,brian_izh.py,100,0.436349630355835 -388,brian_izh_cpp.py,100,0.006211 -389,brian_izh_cuda.py,100,0.011 -390,brian_izh.py,100,0.460581064224243 -391,brian_izh_cpp.py,100,0.006119 -392,brian_izh_cuda.py,100,0.011 -393,brian_izh.py,100,0.423349618911743 -394,brian_izh_cpp.py,100,0.00629 -395,brian_izh_cuda.py,100,0.011 -396,brian_izh.py,100,0.448997735977173 -397,brian_izh_cpp.py,100,0.005973 -398,brian_izh_cuda.py,100,0.011 -399,brian_izh.py,100,0.451291561126709 -400,brian_izh_cpp.py,250,0.01684 -401,brian_izh_cuda.py,250,0.015 -402,brian_izh.py,250,0.448139429092407 -403,brian_izh_cpp.py,250,0.016874 -404,brian_izh_cuda.py,250,0.015 -405,brian_izh.py,250,0.452611446380615 -406,brian_izh_cpp.py,250,0.016417 -407,brian_izh_cuda.py,250,0.015 -408,brian_izh.py,250,0.44346284866333 -409,brian_izh_cpp.py,250,0.016829 -410,brian_izh_cuda.py,250,0.015 -411,brian_izh.py,250,0.436567783355713 -412,brian_izh_cpp.py,250,0.016603 -413,brian_izh_cuda.py,250,0.015 -414,brian_izh.py,250,0.432596206665039 -415,brian_izh_cpp.py,500,0.051793 -416,brian_izh_cuda.py,500,0.03 -417,brian_izh.py,500,0.472482204437256 -418,brian_izh_cpp.py,500,0.073811 -419,brian_izh_cuda.py,500,0.03 -420,brian_izh.py,500,0.489950180053711 -421,brian_izh_cpp.py,500,0.049628 -422,brian_izh_cuda.py,500,0.03 -423,brian_izh.py,500,0.511229991912842 -424,brian_izh_cpp.py,500,0.050166 -425,brian_izh_cuda.py,500,0.03 -426,brian_izh.py,500,0.494636297225952 -427,brian_izh_cpp.py,500,0.049237 -428,brian_izh_cuda.py,500,0.03 -429,brian_izh.py,500,0.482289552688599 -430,brian_izh_cpp.py,750,0.160712 -431,brian_izh_cuda.py,750,0.06 -432,brian_izh.py,750,0.62227988243103 -433,brian_izh_cpp.py,750,0.163403 -434,brian_izh_cuda.py,750,0.06 -435,brian_izh.py,750,0.654393434524536 -436,brian_izh_cpp.py,750,0.163296 -437,brian_izh_cuda.py,750,0.06 -438,brian_izh.py,750,0.63255786895752 -439,brian_izh_cpp.py,750,0.1667 -440,brian_izh_cuda.py,750,0.062 -441,brian_izh.py,750,0.636169672012329 -442,brian_izh_cpp.py,750,0.160179 -443,brian_izh_cuda.py,750,0.061 -444,brian_izh.py,750,0.626873970031738 -445,brian_izh_cpp.py,1000,0.379794 -446,brian_izh_cuda.py,1000,0.089 -447,brian_izh.py,1000,0.850050687789917 -448,brian_izh_cpp.py,1000,0.37491 -449,brian_izh_cuda.py,1000,0.09 -450,brian_izh.py,1000,0.858251333236694 -451,brian_izh_cpp.py,1000,0.358678 -452,brian_izh_cuda.py,1000,0.091 -453,brian_izh.py,1000,0.865492343902588 -454,brian_izh_cpp.py,1000,0.37419 -455,brian_izh_cuda.py,1000,0.091 -456,brian_izh.py,1000,0.888575792312622 -457,brian_izh_cpp.py,1000,0.367214 -458,brian_izh_cuda.py,1000,0.09 -459,brian_izh.py,1000,0.841342926025391 -460,brian_izh_cpp.py,1500,0.960912 -461,brian_izh_cuda.py,1500,0.211 -462,brian_izh.py,1500,1.47872090339661 -463,brian_izh_cpp.py,1500,0.939579 -464,brian_izh_cuda.py,1500,0.212 -465,brian_izh.py,1500,1.4835889339447 -466,brian_izh_cpp.py,1500,0.928853 -467,brian_izh_cuda.py,1500,0.213 -468,brian_izh.py,1500,1.44867563247681 -469,brian_izh_cpp.py,1500,0.944166 -470,brian_izh_cuda.py,1500,0.216 -471,brian_izh.py,1500,1.42937874794006 -472,brian_izh_cpp.py,1500,0.932168 -473,brian_izh_cuda.py,1500,0.213 -474,brian_izh.py,1500,1.46332573890686 -475,brian_izh_cpp.py,2000,1.71923 -476,brian_izh_cuda.py,2000,0.343 -477,brian_izh.py,2000,2.26499629020691 -478,brian_izh_cpp.py,2000,1.70134 -479,brian_izh_cuda.py,2000,0.34 -480,brian_izh.py,2000,2.30839014053345 -481,brian_izh_cpp.py,2000,1.62817 -482,brian_izh_cuda.py,2000,0.341 -483,brian_izh.py,2000,2.34715223312378 -484,brian_izh_cpp.py,2000,1.69901 -485,brian_izh_cuda.py,2000,0.34 -486,brian_izh.py,2000,2.34443020820618 -487,brian_izh_cpp.py,2000,1.71477 -488,brian_izh_cuda.py,2000,0.339 -489,brian_izh.py,2000,2.30450057983398 -490,brian_izh_cpp.py,3000,3.9678 -491,brian_izh_cuda.py,3000,0.836 -492,brian_izh.py,3000,4.69653296470642 -493,brian_izh_cpp.py,3000,3.98226 -494,brian_izh_cuda.py,3000,0.833 -495,brian_izh.py,3000,4.70671796798706 -496,brian_izh_cpp.py,3000,4.00432 -497,brian_izh_cuda.py,3000,0.834 -498,brian_izh.py,3000,4.79064583778381 -499,brian_izh_cpp.py,3000,4.00569 -500,brian_izh_cuda.py,3000,0.834 -501,brian_izh.py,3000,4.67546200752258 -502,brian_izh_cpp.py,3000,3.94538 -503,brian_izh_cuda.py,3000,0.834 -504,brian_izh.py,3000,4.81315159797668 -505,brian_izh_cpp.py,4000,7.16097 -506,brian_izh_cuda.py,4000,1.391 -507,brian_izh.py,4000,8.10702848434448 -508,brian_izh_cpp.py,4000,7.29134 -509,brian_izh_cuda.py,4000,1.397 -510,brian_izh.py,4000,8.23333358764648 -511,brian_izh_cpp.py,4000,7.48549 -512,brian_izh_cuda.py,4000,1.393 -513,brian_izh.py,4000,8.27254557609558 -514,brian_izh_cpp.py,4000,7.32196 -515,brian_izh_cuda.py,4000,1.394 -516,brian_izh.py,4000,8.18488502502441 -517,brian_izh_cpp.py,4000,7.34546 -518,brian_izh_cuda.py,4000,1.395 -519,brian_izh.py,4000,8.21425485610962 -520,brian_izh_cpp.py,5000,11.4072 -521,brian_izh_cuda.py,5000,2.312 -522,brian_izh.py,5000,12.6283340454102 -523,brian_izh_cpp.py,5000,11.2982 -524,brian_izh_cuda.py,5000,2.298 -525,brian_izh.py,5000,12.9338479042053 -526,brian_izh_cpp.py,5000,11.3538 -527,brian_izh_cuda.py,5000,2.316 -528,brian_izh.py,5000,12.8643786907196 -529,brian_izh_cpp.py,5000,11.7169 -530,brian_izh_cuda.py,5000,2.315 -531,brian_izh.py,5000,12.9135956764221 -532,brian_izh_cpp.py,5000,11.3438 -533,brian_izh_cuda.py,5000,2.319 -534,brian_izh.py,5000,12.6860120296478 -535,brian_izh_cpp.py,6000,16.8457 -536,brian_izh_cuda.py,6000,3.359 -537,brian_izh.py,6000,18.303423166275 -538,brian_izh_cpp.py,6000,16.7857 -539,brian_izh_cuda.py,6000,3.364 -540,brian_izh.py,6000,18.4067342281342 -541,brian_izh_cpp.py,6000,17.0635 -542,brian_izh_cuda.py,6000,3.366 -543,brian_izh.py,6000,18.3998210430145 -544,brian_izh_cpp.py,6000,16.7487 -545,brian_izh_cuda.py,6000,3.369 -546,brian_izh.py,6000,18.4654953479767 -547,brian_izh_cpp.py,6000,16.9168 -548,brian_izh_cuda.py,6000,3.371 -549,brian_izh.py,6000,19.0039446353912 -550,brian_izh_cpp.py,7000,23.4751 -551,brian_izh.py,7000,26.0536541938782 -552,brian_izh_cpp.py,7000,23.3884 -553,brian_izh.py,7000,26.1675896644592 -554,brian_izh_cpp.py,7000,24.4164 -555,brian_izh.py,7000,25.6168527603149 -556,brian_izh_cpp.py,7000,23.7133 -557,brian_izh.py,7000,25.454847574234 -558,brian_izh_cpp.py,7000,23.7897 -559,brian_izh.py,7000,25.3582994937897 -560,brian_izh_cpp.py,8000,31.9601 -561,brian_izh.py,8000,34.1658861637115 -562,brian_izh_cpp.py,8000,31.9841 -563,brian_izh.py,8000,33.767023563385 -564,brian_izh_cpp.py,8000,31.8404 -565,brian_izh.py,8000,33.5812859535217 -566,brian_izh_cpp.py,8000,31.7117 -567,brian_izh.py,8000,33.4776639938354 -568,brian_izh_cpp.py,8000,31.872 -569,brian_izh.py,8000,33.4805614948273 -570,brian_izh_cpp.py,9000,41.5102 -571,brian_izh.py,9000,44.7606265544891 -572,brian_izh_cpp.py,9000,42.6524 -573,brian_izh.py,9000,45.9764566421509 -574,brian_izh_cpp.py,9000,41.8947 -575,brian_izh.py,9000,45.0407848358154 -576,brian_izh_cpp.py,9000,41.5538 -577,brian_izh.py,9000,44.6227600574493 -578,brian_izh_cpp.py,9000,41.4446 -579,brian_izh.py,9000,46.263730764389 -580,brian_izh_cpp.py,10000,54.3807 -581,brian_izh.py,10000,57.9954173564911 -582,brian_izh_cpp.py,10000,53.1051 -583,brian_izh.py,10000,57.3292937278748 -584,brian_izh_cpp.py,10000,55.862 -585,brian_izh.py,10000,58.6095671653748 -586,brian_izh_cpp.py,10000,53.5958 -587,brian_izh.py,10000,59.2895181179047 -588,brian_izh_cpp.py,10000,54.1027 -589,brian_izh.py,10000,59.0859644412994 -590,brian_izh_cpp.py,11000,72.4748 -591,brian_izh.py,11000,75.8621115684509 -592,brian_izh_cpp.py,11000,71.2807 -593,brian_izh.py,11000,72.2982611656189 -594,brian_izh_cpp.py,11000,68.1498 -595,brian_izh.py,11000,73.9615547657013 -596,brian_izh_cpp.py,11000,69.5294 -597,brian_izh.py,11000,74.9125142097473 -598,brian_izh_cpp.py,11000,71.776 -599,brian_izh.py,11000,78.5046782493591 -600,brian_izh_cpp.py,12000,88.4113 -601,brian_izh.py,12000,80.7162580490112 -602,brian_izh_cpp.py,12000,89.2474 -603,brian_izh.py,12000,90.9494035243988 -604,brian_izh_cpp.py,12000,89.35 -605,brian_izh.py,12000,74.9083354473114 -606,brian_izh_cpp.py,12000,80.6944 -607,brian_izh.py,12000,90.7035307884216 -608,brian_izh_cpp.py,12000,86.9704 -609,brian_izh.py,12000,89.5266361236572 -610,brian_izh_cpp.py,13000,107.642 -611,brian_izh.py,13000,115.318281650543 -612,brian_izh_cpp.py,13000,108.723 -613,brian_izh.py,13000,117.146318674088 -614,brian_izh_cpp.py,13000,103.896 -615,brian_izh.py,13000,113.907171487808 -616,brian_izh_cpp.py,13000,107.245 -617,brian_izh.py,13000,112.162793159485 -618,brian_izh_cpp.py,13000,106.353 -619,brian_izh.py,13000,113.575128316879 -620,brian_izh_cpp.py,14000,131.176 -621,brian_izh.py,14000,137.167668104172 -622,brian_izh_cpp.py,14000,130.297 -623,brian_izh.py,14000,137.779871702194 -624,brian_izh_cpp.py,14000,130.06 -625,brian_izh.py,14000,136.080766916275 -626,brian_izh_cpp.py,14000,129.061 -627,brian_izh.py,14000,136.077105998993 -628,brian_izh_cpp.py,14000,130.298 -629,brian_izh.py,14000,135.801756620407 -630,brian_izh_cpp.py,15000,154.67 -631,brian_izh.py,15000,162.027059555054 -632,brian_izh_cpp.py,15000,152.58 -633,brian_izh.py,15000,163.867361068726 -634,brian_izh_cpp.py,15000,153.493 -635,brian_izh.py,15000,158.880830526352 -636,brian_izh_cpp.py,15000,151.951 -637,brian_izh.py,15000,163.550531625748 -638,brian_izh_cpp.py,15000,157.593 -639,brian_izh.py,15000,159.618934631348 -640,pynn_nest_izh.py,10,0.007119417190552 -641,pynn_nest_izh.py,20,0.005602121353149 -642,pynn_nest_izh.py,50,0.006001949310303 -643,pynn_nest_izh.py,100,0.009738445281982 -644,pynn_nest_izh.py,250,0.035452842712402 -645,pynn_nest_izh.py,500,0.122118949890137 -646,pynn_nest_izh.py,750,0.24433159828186 -647,pynn_nest_izh.py,1000,0.443263292312622 -648,pynn_nest_izh.py,1500,0.953883647918701 -649,pynn_nest_izh.py,2000,1.67197418212891 -650,pynn_nest_izh.py,3000,3.76414728164673 -651,pynn_nest_izh.py,4000,6.76924538612366 -652,pynn_nest_izh.py,5000,12.0133469104767 -653,pynn_nest_izh.py,6000,18.9744284152985 -654,pynn_nest_izh.py,7000,28.1055245399475 -655,pynn_nest_izh.py,8000,39.8059604167938 -656,pynn_nest_izh.py,9000,50.9399399757385 -657,pynn_nest_izh.py,10000,66.0336031913757 -658,pynn_nest_izh.py,11000,82.727064371109 -659,pynn_nest_izh.py,10,0.006565570831299 -660,pynn_nest_izh.py,20,0.005766868591309 -661,pynn_nest_izh.py,50,0.006170272827148 -662,pynn_nest_izh.py,100,0.009546756744385 -663,pynn_nest_izh.py,250,0.034337997436523 -664,pynn_nest_izh.py,500,0.117258787155151 -665,pynn_nest_izh.py,750,0.247231483459473 -666,pynn_nest_izh.py,1000,0.427734613418579 -667,pynn_nest_izh.py,1500,0.948466777801514 -668,pynn_nest_izh.py,2000,1.67237854003906 -669,pynn_nest_izh.py,3000,3.80270886421204 -670,pynn_nest_izh.py,4000,6.88535904884338 -671,pynn_nest_izh.py,5000,12.5203161239624 -672,pynn_nest_izh.py,6000,18.1416347026825 -673,pynn_nest_izh.py,7000,28.4685804843903 -674,pynn_nest_izh.py,8000,40.0146548748016 -675,pynn_nest_izh.py,9000,50.8023982048035 -676,pynn_nest_izh.py,10000,66.5363240242004 -677,pynn_nest_izh.py,11000,82.7933866977692 -678,pynn_nest_izh.py,10,0.006266355514526 -679,pynn_nest_izh.py,20,0.00554370880127 -680,pynn_nest_izh.py,50,0.005761384963989 -681,pynn_nest_izh.py,100,0.009755611419678 -682,pynn_nest_izh.py,250,0.042130470275879 -683,pynn_nest_izh.py,500,0.121298313140869 -684,pynn_nest_izh.py,750,0.253603458404541 -685,pynn_nest_izh.py,1000,0.436272859573364 -686,pynn_nest_izh.py,1500,0.964898824691772 -687,pynn_nest_izh.py,2000,1.69490313529968 -688,pynn_nest_izh.py,3000,3.86808490753174 -689,pynn_nest_izh.py,4000,6.85308194160461 -690,pynn_nest_izh.py,5000,13.3183224201202 -691,pynn_nest_izh.py,6000,20.6445116996765 -692,pynn_nest_izh.py,7000,29.6795034408569 -693,pynn_nest_izh.py,8000,41.2699823379517 -694,pynn_nest_izh.py,9000,54.4383058547974 -695,pynn_nest_izh.py,10000,70.90758061409 -696,pynn_nest_izh.py,11000,91.2416250705719 -697,pynn_nest_izh.py,10,0.005751609802246 -698,pynn_nest_izh.py,20,0.005758047103882 -699,pynn_nest_izh.py,50,0.006054639816284 -700,pynn_nest_izh.py,100,0.009867429733276 -701,pynn_nest_izh.py,250,0.0344078540802 -702,pynn_nest_izh.py,500,0.117695569992065 -703,pynn_nest_izh.py,750,0.249545574188232 -704,pynn_nest_izh.py,1000,0.445414781570435 -705,pynn_nest_izh.py,1500,0.964312076568604 -706,pynn_nest_izh.py,2000,1.69434809684753 -707,pynn_nest_izh.py,3000,3.83296489715576 -708,pynn_nest_izh.py,4000,6.92224359512329 -709,pynn_nest_izh.py,5000,12.3567323684692 -710,pynn_nest_izh.py,6000,20.0469624996185 -711,pynn_nest_izh.py,7000,29.402633190155 -712,pynn_nest_izh.py,8000,41.7473745346069 -713,pynn_nest_izh.py,9000,55.5102479457855 -714,pynn_nest_izh.py,10000,71.0006785392761 -715,pynn_nest_izh.py,11000,90.8065145015717 -716,pynn_nest_izh.py,10,0.007463693618774 -717,pynn_nest_izh.py,20,0.005828380584717 -718,pynn_nest_izh.py,50,0.005948066711426 -719,pynn_nest_izh.py,100,0.016487121582031 -720,pynn_nest_izh.py,250,0.036030292510986 -721,pynn_nest_izh.py,500,0.118506669998169 -722,pynn_nest_izh.py,750,0.271506786346436 -723,pynn_nest_izh.py,1000,0.435741901397705 -724,pynn_nest_izh.py,1500,0.961171388626099 -725,pynn_nest_izh.py,2000,1.70816898345947 -726,pynn_nest_izh.py,3000,3.84042859077454 -727,pynn_nest_izh.py,4000,7.02516770362854 -728,pynn_nest_izh.py,5000,12.8102900981903 -729,pynn_nest_izh.py,6000,20.8697283267975 -730,pynn_nest_izh.py,7000,29.6732840538025 -731,pynn_nest_izh.py,8000,41.7032980918884 -732,pynn_nest_izh.py,9000,54.961462020874 -733,pynn_nest_izh.py,10000,71.7743618488312 -734,pynn_nest_izh.py,11000,90.8146879673004 -735,pymonnto_izh.py,2500,0.703134298324585 -736,pymonntorch_izh_cpu.py,2500,0.635671854019165 -737,pymonntorch_izh_cuda.py,2500,0.391748666763306 -738,pynn_nest_izh.py,2500,2.82775712013245 -739,pymonnto_izh.py,2500,0.728072881698608 -740,pymonntorch_izh_cpu.py,2500,0.664825916290283 -741,pymonntorch_izh_cuda.py,2500,0.37099552154541 -742,pynn_nest_izh.py,2500,2.93594360351562 -743,pymonnto_izh.py,2500,0.693753957748413 -744,pymonntorch_izh_cpu.py,2500,0.652793884277344 -745,pymonntorch_izh_cuda.py,2500,0.37587571144104 -746,pynn_nest_izh.py,2500,2.86498522758484 -747,pymonnto_izh.py,2500,0.727940320968628 -748,pymonntorch_izh_cpu.py,2500,0.659935474395752 -749,pymonntorch_izh_cuda.py,2500,0.380735397338867 -750,pynn_nest_izh.py,2500,2.83924603462219 -751,pymonnto_izh.py,2500,0.711217403411865 -752,pymonntorch_izh_cpu.py,2500,0.641978263854981 -753,pymonntorch_izh_cuda.py,2500,0.3928062915802 -754,pynn_nest_izh.py,2500,2.84574580192566 -755,brian_izh_cpp.py,2500,2.70848 -756,brian_izh_cuda.py,2500,0.58 -757,brian_izh.py,2500,3.4784836769104 -758,brian_izh_cpp.py,2500,2.71564 -759,brian_izh_cuda.py,2500,0.585 -760,brian_izh.py,2500,3.39900970458984 -761,brian_izh_cpp.py,2500,2.72046 -762,brian_izh_cuda.py,2500,0.579 -763,brian_izh.py,2500,3.40062832832336 -764,brian_izh_cpp.py,2500,2.68474 -765,brian_izh_cuda.py,2500,0.583 -766,brian_izh.py,2500,3.46744465827942 -767,brian_izh_cpp.py,2500,2.69193 -768,brian_izh_cuda.py,2500,0.577 -769,brian_izh.py,2500,3.39489388465881 diff --git a/benchmark/Plot/Results/Swift-SF315-51G/Izhikevich.csv b/benchmark/Plot/Results/Swift-SF315-51G/Izhikevich.csv deleted file mode 100644 index 9cf696f..0000000 --- a/benchmark/Plot/Results/Swift-SF315-51G/Izhikevich.csv +++ /dev/null @@ -1,11 +0,0 @@ -,brian_izh_cpp.py,brian_izh_cuda.py,brian_izh.py,pymonnto_izh.py,pymonntorch_izh_cpu.py,pymonntorch_izh_cuda.py,pynn_nest_izh.py -0,2.6841,0.578,3.47268319129944,0.719762802124023,0.651740312576294,0.389385938644409,2.82302737236023 -1,2.71105,0.583,3.43610143661499,0.740023851394653,0.653839588165283,0.36807370185852,2.85830187797546 -2,2.69182,0.583,3.41020655632019,0.729773998260498,0.633002519607544,0.394907474517822,2.88890671730042 -3,2.71508,0.578,3.42736411094666,0.723941326141357,0.657864809036255,0.380845546722412,2.90896105766296 -4,2.71922,0.582,3.40595364570618,0.734187841415405,0.643698215484619,0.376919031143188,2.84079718589783 -5,2.65805,0.581,3.34814476966858,0.704172134399414,0.659475326538086,0.39015007019043,2.85012125968933 -6,2.67772,0.581,3.36823678016663,0.721926689147949,0.662043333053589,0.365718841552734,2.87331008911133 -7,2.7001,0.58,3.37607884407043,0.732372999191284,0.630667448043823,0.369609832763672,2.85388255119324 -8,2.66467,0.583,3.43324422836304,0.713395595550537,0.634222984313965,0.387043476104736,2.90254998207092 -9,2.67575,0.578,3.34806656837463,0.721349239349365,0.638222694396973,0.388360261917114,2.89329314231873 diff --git a/benchmark/Plot/Results/Swift-SF315-51G/LIF.csv b/benchmark/Plot/Results/Swift-SF315-51G/LIF.csv deleted file mode 100644 index 3322415..0000000 --- a/benchmark/Plot/Results/Swift-SF315-51G/LIF.csv +++ /dev/null @@ -1,776 +0,0 @@ -,script_name,size,time -0,nest_native_LIF.py,10,0.001524448394775 -1,pymonnto_fast_LIF.py,10,0.005386352539063 -2,pymonntorch_fast_LIF_cpu.py,10,0.034502506256104 -3,pymonntorch_fast_LIF_cuda.py,10,0.055164098739624 -4,nest_native_LIF.py,10,0.001684904098511 -5,pymonnto_fast_LIF.py,10,0.004922389984131 -6,pymonntorch_fast_LIF_cpu.py,10,0.0227370262146 -7,pymonntorch_fast_LIF_cuda.py,10,0.044079065322876 -8,nest_native_LIF.py,10,0.00147533416748 -9,pymonnto_fast_LIF.py,10,0.004964828491211 -10,pymonntorch_fast_LIF_cpu.py,10,0.023900032043457 -11,pymonntorch_fast_LIF_cuda.py,10,0.051133394241333 -12,nest_native_LIF.py,10,0.001452922821045 -13,pymonnto_fast_LIF.py,10,0.005087614059448 -14,pymonntorch_fast_LIF_cpu.py,10,0.029099702835083 -15,pymonntorch_fast_LIF_cuda.py,10,0.055068969726563 -16,nest_native_LIF.py,10,0.001457929611206 -17,pymonnto_fast_LIF.py,10,0.004977464675903 -18,pymonntorch_fast_LIF_cpu.py,10,0.023283004760742 -19,pymonntorch_fast_LIF_cuda.py,10,0.050300121307373 -20,nest_native_LIF.py,20,0.001723527908325 -21,pymonnto_fast_LIF.py,20,0.005069732666016 -22,pymonntorch_fast_LIF_cpu.py,20,0.024091482162476 -23,pymonntorch_fast_LIF_cuda.py,20,0.047647476196289 -24,nest_native_LIF.py,20,0.001506328582764 -25,pymonnto_fast_LIF.py,20,0.005184412002563 -26,pymonntorch_fast_LIF_cpu.py,20,0.022828102111816 -27,pymonntorch_fast_LIF_cuda.py,20,0.051375150680542 -28,nest_native_LIF.py,20,0.001717805862427 -29,pymonnto_fast_LIF.py,20,0.004831314086914 -30,pymonntorch_fast_LIF_cpu.py,20,0.022794961929321 -31,pymonntorch_fast_LIF_cuda.py,20,0.047909259796143 -32,nest_native_LIF.py,20,0.001639604568481 -33,pymonnto_fast_LIF.py,20,0.005386590957642 -34,pymonntorch_fast_LIF_cpu.py,20,0.024707317352295 -35,pymonntorch_fast_LIF_cuda.py,20,0.043691873550415 -36,nest_native_LIF.py,20,0.001563549041748 -37,pymonnto_fast_LIF.py,20,0.004980564117432 -38,pymonntorch_fast_LIF_cpu.py,20,0.023945331573486 -39,pymonntorch_fast_LIF_cuda.py,20,0.047730445861816 -40,nest_native_LIF.py,50,0.002141714096069 -41,pymonnto_fast_LIF.py,50,0.005130767822266 -42,pymonntorch_fast_LIF_cpu.py,50,0.023430824279785 -43,pymonntorch_fast_LIF_cuda.py,50,0.048935413360596 -44,nest_native_LIF.py,50,0.002191781997681 -45,pymonnto_fast_LIF.py,50,0.005886077880859 -46,pymonntorch_fast_LIF_cpu.py,50,0.021432876586914 -47,pymonntorch_fast_LIF_cuda.py,50,0.059073686599731 -48,nest_native_LIF.py,50,0.002090454101563 -49,pymonnto_fast_LIF.py,50,0.005071878433228 -50,pymonntorch_fast_LIF_cpu.py,50,0.024738311767578 -51,pymonntorch_fast_LIF_cuda.py,50,0.043869972229004 -52,nest_native_LIF.py,50,0.002288818359375 -53,pymonnto_fast_LIF.py,50,0.005506038665771 -54,pymonntorch_fast_LIF_cpu.py,50,0.022710800170898 -55,pymonntorch_fast_LIF_cuda.py,50,0.043739080429077 -56,nest_native_LIF.py,50,0.002059698104858 -57,pymonnto_fast_LIF.py,50,0.004725217819214 -58,pymonntorch_fast_LIF_cpu.py,50,0.020483732223511 -59,pymonntorch_fast_LIF_cuda.py,50,0.043893098831177 -60,nest_native_LIF.py,100,0.00417423248291 -61,pymonnto_fast_LIF.py,100,0.005785465240479 -62,pymonntorch_fast_LIF_cpu.py,100,0.023471593856812 -63,pymonntorch_fast_LIF_cuda.py,100,0.045702219009399 -64,nest_native_LIF.py,100,0.003896236419678 -65,pymonnto_fast_LIF.py,100,0.005562543869019 -66,pymonntorch_fast_LIF_cpu.py,100,0.023291826248169 -67,pymonntorch_fast_LIF_cuda.py,100,0.051311254501343 -68,nest_native_LIF.py,100,0.003752708435059 -69,pymonnto_fast_LIF.py,100,0.005925178527832 -70,pymonntorch_fast_LIF_cpu.py,100,0.023505687713623 -71,pymonntorch_fast_LIF_cuda.py,100,0.05451226234436 -72,nest_native_LIF.py,100,0.003802061080933 -73,pymonnto_fast_LIF.py,100,0.00531792640686 -74,pymonntorch_fast_LIF_cpu.py,100,0.02370548248291 -75,pymonntorch_fast_LIF_cuda.py,100,0.054864645004272 -76,nest_native_LIF.py,100,0.003887891769409 -77,pymonnto_fast_LIF.py,100,0.005355358123779 -78,pymonntorch_fast_LIF_cpu.py,100,0.023903608322144 -79,pymonntorch_fast_LIF_cuda.py,100,0.047017335891724 -80,nest_native_LIF.py,250,0.012444019317627 -81,pymonnto_fast_LIF.py,250,0.00588846206665 -82,pymonntorch_fast_LIF_cpu.py,250,0.023908853530884 -83,pymonntorch_fast_LIF_cuda.py,250,0.052321672439575 -84,nest_native_LIF.py,250,0.012434720993042 -85,pymonnto_fast_LIF.py,250,0.00576639175415 -86,pymonntorch_fast_LIF_cpu.py,250,0.027485370635986 -87,pymonntorch_fast_LIF_cuda.py,250,0.056386709213257 -88,nest_native_LIF.py,250,0.011616230010986 -89,pymonnto_fast_LIF.py,250,0.006087303161621 -90,pymonntorch_fast_LIF_cpu.py,250,0.024095058441162 -91,pymonntorch_fast_LIF_cuda.py,250,0.069028854370117 -92,nest_native_LIF.py,250,0.012560129165649 -93,pymonnto_fast_LIF.py,250,0.005810022354126 -94,pymonntorch_fast_LIF_cpu.py,250,0.024135828018188 -95,pymonntorch_fast_LIF_cuda.py,250,0.057261228561401 -96,nest_native_LIF.py,250,0.012349605560303 -97,pymonnto_fast_LIF.py,250,0.005878686904907 -98,pymonntorch_fast_LIF_cpu.py,250,0.024557590484619 -99,pymonntorch_fast_LIF_cuda.py,250,0.058730602264404 -100,nest_native_LIF.py,500,0.035265922546387 -101,pymonnto_fast_LIF.py,500,0.006334781646729 -102,pymonntorch_fast_LIF_cpu.py,500,0.026100873947144 -103,pymonntorch_fast_LIF_cuda.py,500,0.062910318374634 -104,nest_native_LIF.py,500,0.036166906356812 -105,pymonnto_fast_LIF.py,500,0.009318590164185 -106,pymonntorch_fast_LIF_cpu.py,500,0.02650260925293 -107,pymonntorch_fast_LIF_cuda.py,500,0.062495470046997 -108,nest_native_LIF.py,500,0.035293579101563 -109,pymonnto_fast_LIF.py,500,0.006694793701172 -110,pymonntorch_fast_LIF_cpu.py,500,0.025193452835083 -111,pymonntorch_fast_LIF_cuda.py,500,0.054792642593384 -112,nest_native_LIF.py,500,0.037978410720825 -113,pymonnto_fast_LIF.py,500,0.006460666656494 -114,pymonntorch_fast_LIF_cpu.py,500,0.024914503097534 -115,pymonntorch_fast_LIF_cuda.py,500,0.062903642654419 -116,nest_native_LIF.py,500,0.037924289703369 -117,pymonnto_fast_LIF.py,500,0.006444454193115 -118,pymonntorch_fast_LIF_cpu.py,500,0.026264667510986 -119,pymonntorch_fast_LIF_cuda.py,500,0.056375026702881 -120,nest_native_LIF.py,750,0.084751605987549 -121,pymonnto_fast_LIF.py,750,0.007598400115967 -122,pymonntorch_fast_LIF_cpu.py,750,0.025261640548706 -123,pymonntorch_fast_LIF_cuda.py,750,0.085970878601074 -124,nest_native_LIF.py,750,0.072199821472168 -125,pymonnto_fast_LIF.py,750,0.007187366485596 -126,pymonntorch_fast_LIF_cpu.py,750,0.032128572463989 -127,pymonntorch_fast_LIF_cuda.py,750,0.063899993896484 -128,nest_native_LIF.py,750,0.073548793792725 -129,pymonnto_fast_LIF.py,750,0.0072340965271 -130,pymonntorch_fast_LIF_cpu.py,750,0.029910087585449 -131,pymonntorch_fast_LIF_cuda.py,750,0.053325653076172 -132,nest_native_LIF.py,750,0.072981357574463 -133,pymonnto_fast_LIF.py,750,0.007878541946411 -134,pymonntorch_fast_LIF_cpu.py,750,0.029700994491577 -135,pymonntorch_fast_LIF_cuda.py,750,0.061626434326172 -136,nest_native_LIF.py,750,0.069378137588501 -137,pymonnto_fast_LIF.py,750,0.00751256942749 -138,pymonntorch_fast_LIF_cpu.py,750,0.024507522583008 -139,pymonntorch_fast_LIF_cuda.py,750,0.06043267250061 -140,nest_native_LIF.py,1000,0.113282442092895 -141,pymonnto_fast_LIF.py,1000,0.008148431777954 -142,pymonntorch_fast_LIF_cpu.py,1000,0.026764154434204 -143,pymonntorch_fast_LIF_cuda.py,1000,0.064014911651611 -144,nest_native_LIF.py,1000,0.117449760437012 -145,pymonnto_fast_LIF.py,1000,0.008025407791138 -146,pymonntorch_fast_LIF_cpu.py,1000,0.030419588088989 -147,pymonntorch_fast_LIF_cuda.py,1000,0.066635370254517 -148,nest_native_LIF.py,1000,0.113091468811035 -149,pymonnto_fast_LIF.py,1000,0.009140729904175 -150,pymonntorch_fast_LIF_cpu.py,1000,0.030916452407837 -151,pymonntorch_fast_LIF_cuda.py,1000,0.050729274749756 -152,nest_native_LIF.py,1000,0.1162269115448 -153,pymonnto_fast_LIF.py,1000,0.008344650268555 -154,pymonntorch_fast_LIF_cpu.py,1000,0.028815746307373 -155,pymonntorch_fast_LIF_cuda.py,1000,0.056997060775757 -156,nest_native_LIF.py,1000,0.110388040542603 -157,pymonnto_fast_LIF.py,1000,0.007949829101563 -158,pymonntorch_fast_LIF_cpu.py,1000,0.035729169845581 -159,pymonntorch_fast_LIF_cuda.py,1000,0.074911832809448 -160,nest_native_LIF.py,1500,0.264184713363647 -161,pymonnto_fast_LIF.py,1500,0.009308099746704 -162,pymonntorch_fast_LIF_cpu.py,1500,0.033133745193481 -163,pymonntorch_fast_LIF_cuda.py,1500,0.054878711700439 -164,nest_native_LIF.py,1500,0.288661003112793 -165,pymonnto_fast_LIF.py,1500,0.010066509246826 -166,pymonntorch_fast_LIF_cpu.py,1500,0.028742790222168 -167,pymonntorch_fast_LIF_cuda.py,1500,0.055286407470703 -168,nest_native_LIF.py,1500,0.269500494003296 -169,pymonnto_fast_LIF.py,1500,0.009757280349731 -170,pymonntorch_fast_LIF_cpu.py,1500,0.027848243713379 -171,pymonntorch_fast_LIF_cuda.py,1500,0.063924312591553 -172,nest_native_LIF.py,1500,0.315436601638794 -173,pymonnto_fast_LIF.py,1500,0.010309934616089 -174,pymonntorch_fast_LIF_cpu.py,1500,0.032901525497437 -175,pymonntorch_fast_LIF_cuda.py,1500,0.055024147033691 -176,nest_native_LIF.py,1500,0.30599308013916 -177,pymonnto_fast_LIF.py,1500,0.009583234786987 -178,pymonntorch_fast_LIF_cpu.py,1500,0.03114128112793 -179,pymonntorch_fast_LIF_cuda.py,1500,0.054648160934448 -180,nest_native_LIF.py,2000,0.488514423370361 -181,pymonnto_fast_LIF.py,2000,0.011345148086548 -182,pymonntorch_fast_LIF_cpu.py,2000,0.032130241394043 -183,pymonntorch_fast_LIF_cuda.py,2000,0.053071975708008 -184,nest_native_LIF.py,2000,0.459297657012939 -185,pymonnto_fast_LIF.py,2000,0.010900974273682 -186,pymonntorch_fast_LIF_cpu.py,2000,0.030582189559937 -187,pymonntorch_fast_LIF_cuda.py,2000,0.052516460418701 -188,nest_native_LIF.py,2000,0.506140470504761 -189,pymonnto_fast_LIF.py,2000,0.011380195617676 -190,pymonntorch_fast_LIF_cpu.py,2000,0.031455755233765 -191,pymonntorch_fast_LIF_cuda.py,2000,0.062368154525757 -192,nest_native_LIF.py,2000,0.476053714752197 -193,pymonnto_fast_LIF.py,2000,0.011074304580688 -194,pymonntorch_fast_LIF_cpu.py,2000,0.035351276397705 -195,pymonntorch_fast_LIF_cuda.py,2000,0.08483362197876 -196,nest_native_LIF.py,2000,0.48887825012207 -197,pymonnto_fast_LIF.py,2000,0.011583805084229 -198,pymonntorch_fast_LIF_cpu.py,2000,0.049885511398315 -199,pymonntorch_fast_LIF_cuda.py,2000,0.059856414794922 -200,nest_native_LIF.py,3000,1.09365940093994 -201,pymonnto_fast_LIF.py,3000,0.026428461074829 -202,pymonntorch_fast_LIF_cpu.py,3000,0.041145086288452 -203,pymonntorch_fast_LIF_cuda.py,3000,0.054208755493164 -204,nest_native_LIF.py,3000,1.10320472717285 -205,pymonnto_fast_LIF.py,3000,0.016770839691162 -206,pymonntorch_fast_LIF_cpu.py,3000,0.038397550582886 -207,pymonntorch_fast_LIF_cuda.py,3000,0.063611030578613 -208,nest_native_LIF.py,3000,1.09729409217834 -209,pymonnto_fast_LIF.py,3000,0.016826391220093 -210,pymonntorch_fast_LIF_cpu.py,3000,0.037886619567871 -211,pymonntorch_fast_LIF_cuda.py,3000,0.059650659561157 -212,nest_native_LIF.py,3000,1.13203907012939 -213,pymonnto_fast_LIF.py,3000,0.01774263381958 -214,pymonntorch_fast_LIF_cpu.py,3000,0.040239810943604 -215,pymonntorch_fast_LIF_cuda.py,3000,0.063223600387573 -216,nest_native_LIF.py,3000,1.15662407875061 -217,pymonnto_fast_LIF.py,3000,0.016928195953369 -218,pymonntorch_fast_LIF_cpu.py,3000,0.038628816604614 -219,pymonntorch_fast_LIF_cuda.py,3000,0.059532642364502 -220,nest_native_LIF.py,4000,1.89711451530457 -221,pymonnto_fast_LIF.py,4000,0.024436712265015 -222,pymonntorch_fast_LIF_cpu.py,4000,0.04723334312439 -223,pymonntorch_fast_LIF_cuda.py,4000,0.066207647323608 -224,nest_native_LIF.py,4000,2.05894756317139 -225,pymonnto_fast_LIF.py,4000,0.024017333984375 -226,pymonntorch_fast_LIF_cpu.py,4000,0.045944213867188 -227,pymonntorch_fast_LIF_cuda.py,4000,0.068514347076416 -228,nest_native_LIF.py,4000,1.86301231384277 -229,pymonnto_fast_LIF.py,4000,0.023324728012085 -230,pymonntorch_fast_LIF_cpu.py,4000,0.045899868011475 -231,pymonntorch_fast_LIF_cuda.py,4000,0.060991764068604 -232,nest_native_LIF.py,4000,1.89280319213867 -233,pymonnto_fast_LIF.py,4000,0.023765325546265 -234,pymonntorch_fast_LIF_cpu.py,4000,0.046280384063721 -235,pymonntorch_fast_LIF_cuda.py,4000,0.061787843704224 -236,nest_native_LIF.py,4000,1.83891820907593 -237,pymonnto_fast_LIF.py,4000,0.024575233459473 -238,pymonntorch_fast_LIF_cpu.py,4000,0.050399780273438 -239,pymonntorch_fast_LIF_cuda.py,4000,0.062714338302612 -240,nest_native_LIF.py,5000,2.94453978538513 -241,pymonnto_fast_LIF.py,5000,0.033069372177124 -242,pymonntorch_fast_LIF_cpu.py,5000,0.052842855453491 -243,pymonntorch_fast_LIF_cuda.py,5000,0.070848226547241 -244,nest_native_LIF.py,5000,2.86024618148804 -245,pymonnto_fast_LIF.py,5000,0.035665273666382 -246,pymonntorch_fast_LIF_cpu.py,5000,0.057090520858765 -247,pymonntorch_fast_LIF_cuda.py,5000,0.066527843475342 -248,nest_native_LIF.py,5000,2.86605978012085 -249,pymonnto_fast_LIF.py,5000,0.034255743026733 -250,pymonntorch_fast_LIF_cpu.py,5000,0.055938243865967 -251,pymonntorch_fast_LIF_cuda.py,5000,0.069139719009399 -252,nest_native_LIF.py,5000,3.01605200767517 -253,pymonnto_fast_LIF.py,5000,0.033422231674194 -254,pymonntorch_fast_LIF_cpu.py,5000,0.054458379745483 -255,pymonntorch_fast_LIF_cuda.py,5000,0.072757959365845 -256,nest_native_LIF.py,5000,2.83705258369446 -257,pymonnto_fast_LIF.py,5000,0.034260034561157 -258,pymonntorch_fast_LIF_cpu.py,5000,0.070479154586792 -259,pymonntorch_fast_LIF_cuda.py,5000,0.071895599365234 -260,nest_native_LIF.py,6000,4.26159715652466 -261,pymonnto_fast_LIF.py,6000,0.045517444610596 -262,pymonntorch_fast_LIF_cpu.py,6000,0.059213876724243 -263,pymonntorch_fast_LIF_cuda.py,6000,0.075293064117432 -264,nest_native_LIF.py,6000,4.29579424858093 -265,pymonnto_fast_LIF.py,6000,0.046085834503174 -266,pymonntorch_fast_LIF_cpu.py,6000,0.059893131256104 -267,pymonntorch_fast_LIF_cuda.py,6000,0.075949192047119 -268,nest_native_LIF.py,6000,4.2268545627594 -269,pymonnto_fast_LIF.py,6000,0.047609567642212 -270,pymonntorch_fast_LIF_cpu.py,6000,0.086046457290649 -271,pymonntorch_fast_LIF_cuda.py,6000,0.074642658233643 -272,nest_native_LIF.py,6000,4.45915246009827 -273,pymonnto_fast_LIF.py,6000,0.049729108810425 -274,pymonntorch_fast_LIF_cpu.py,6000,0.057090044021606 -275,pymonntorch_fast_LIF_cuda.py,6000,0.075714111328125 -276,nest_native_LIF.py,6000,4.25487017631531 -277,pymonnto_fast_LIF.py,6000,0.045973539352417 -278,pymonntorch_fast_LIF_cpu.py,6000,0.059790372848511 -279,pymonntorch_fast_LIF_cuda.py,6000,0.080984830856323 -280,nest_native_LIF.py,7000,5.97297978401184 -281,pymonnto_fast_LIF.py,7000,0.061326026916504 -282,pymonntorch_fast_LIF_cpu.py,7000,0.065951108932495 -283,pymonntorch_fast_LIF_cuda.py,7000,0.084087610244751 -284,nest_native_LIF.py,7000,6.02749371528626 -285,pymonnto_fast_LIF.py,7000,0.059701681137085 -286,pymonntorch_fast_LIF_cpu.py,7000,0.064623594284058 -287,pymonntorch_fast_LIF_cuda.py,7000,0.090868234634399 -288,nest_native_LIF.py,7000,6.02861762046814 -289,pymonnto_fast_LIF.py,7000,0.058447360992432 -290,pymonntorch_fast_LIF_cpu.py,7000,0.067902088165283 -291,pymonntorch_fast_LIF_cuda.py,7000,0.083520889282227 -292,nest_native_LIF.py,7000,6.05444526672363 -293,pymonnto_fast_LIF.py,7000,0.060391187667847 -294,pymonntorch_fast_LIF_cpu.py,7000,0.067834854125977 -295,pymonntorch_fast_LIF_cuda.py,7000,0.085067987442017 -296,nest_native_LIF.py,7000,5.7717068195343 -297,pymonnto_fast_LIF.py,7000,0.059735536575317 -298,pymonntorch_fast_LIF_cpu.py,7000,0.074220657348633 -299,pymonntorch_fast_LIF_cuda.py,7000,0.084863424301147 -300,nest_native_LIF.py,8000,8.091721534729 -301,pymonnto_fast_LIF.py,8000,0.084036588668823 -302,pymonntorch_fast_LIF_cpu.py,8000,0.076589107513428 -303,pymonntorch_fast_LIF_cuda.py,8000,0.093154430389404 -304,nest_native_LIF.py,8000,7.91637706756592 -305,pymonnto_fast_LIF.py,8000,0.094157934188843 -306,pymonntorch_fast_LIF_cpu.py,8000,0.074823379516602 -307,pymonntorch_fast_LIF_cuda.py,8000,0.092209815979004 -308,nest_native_LIF.py,8000,8.07622265815735 -309,pymonnto_fast_LIF.py,8000,0.077788114547729 -310,pymonntorch_fast_LIF_cpu.py,8000,0.074698925018311 -311,pymonntorch_fast_LIF_cuda.py,8000,0.099495649337769 -312,nest_native_LIF.py,8000,7.82283926010132 -313,pymonnto_fast_LIF.py,8000,0.083887338638306 -314,pymonntorch_fast_LIF_cpu.py,8000,0.074449300765991 -315,pymonntorch_fast_LIF_cuda.py,8000,0.09286093711853 -316,nest_native_LIF.py,8000,8.1030330657959 -317,pymonnto_fast_LIF.py,8000,0.072176218032837 -318,pymonntorch_fast_LIF_cpu.py,8000,0.073380947113037 -319,pymonntorch_fast_LIF_cuda.py,8000,0.093743085861206 -320,nest_native_LIF.py,9000,10.4836430549622 -321,pymonnto_fast_LIF.py,9000,0.095473766326904 -322,pymonntorch_fast_LIF_cpu.py,9000,0.086922168731689 -323,pymonntorch_fast_LIF_cuda.py,9000,0.113145112991333 -324,nest_native_LIF.py,9000,10.3508825302124 -325,pymonnto_fast_LIF.py,9000,0.100857496261597 -326,pymonntorch_fast_LIF_cpu.py,9000,0.087909460067749 -327,pymonntorch_fast_LIF_cuda.py,9000,0.105234146118164 -328,nest_native_LIF.py,9000,10.1623322963715 -329,pymonnto_fast_LIF.py,9000,0.109147310256958 -330,pymonntorch_fast_LIF_cpu.py,9000,0.088621377944946 -331,pymonntorch_fast_LIF_cuda.py,9000,0.105245351791382 -332,nest_native_LIF.py,9000,10.4761099815369 -333,pymonnto_fast_LIF.py,9000,0.099167585372925 -334,pymonntorch_fast_LIF_cpu.py,9000,0.099905490875244 -335,pymonntorch_fast_LIF_cuda.py,9000,0.10552167892456 -336,nest_native_LIF.py,9000,10.6645112037659 -337,pymonnto_fast_LIF.py,9000,0.100338697433472 -338,pymonntorch_fast_LIF_cpu.py,9000,0.087633371353149 -339,pymonntorch_fast_LIF_cuda.py,9000,0.107631921768188 -340,nest_native_LIF.py,10000,12.8722681999207 -341,pymonnto_fast_LIF.py,10000,0.122548341751099 -342,pymonntorch_fast_LIF_cpu.py,10000,0.135946273803711 -343,pymonntorch_fast_LIF_cuda.py,10000,0.139399290084839 -344,nest_native_LIF.py,10000,13.1135301589966 -345,pymonnto_fast_LIF.py,10000,0.116753816604614 -346,pymonntorch_fast_LIF_cpu.py,10000,0.102744579315185 -347,pymonntorch_fast_LIF_cuda.py,10000,0.121877670288086 -348,nest_native_LIF.py,10000,13.1232960224152 -349,pymonnto_fast_LIF.py,10000,0.121785402297974 -350,pymonntorch_fast_LIF_cpu.py,10000,0.101681709289551 -351,pymonntorch_fast_LIF_cuda.py,10000,0.120166540145874 -352,nest_native_LIF.py,10000,13.2628364562988 -353,pymonnto_fast_LIF.py,10000,0.123827457427979 -354,pymonntorch_fast_LIF_cpu.py,10000,0.103217124938965 -355,pymonntorch_fast_LIF_cuda.py,10000,0.118340492248535 -356,nest_native_LIF.py,10000,13.0744025707245 -357,pymonnto_fast_LIF.py,10000,0.119674682617188 -358,pymonntorch_fast_LIF_cpu.py,10000,0.09944486618042 -359,pymonntorch_fast_LIF_cuda.py,10000,0.120822429656982 -360,nest_native_LIF.py,11000,16.7193293571472 -361,pymonnto_fast_LIF.py,11000,0.150612831115723 -362,pymonntorch_fast_LIF_cpu.py,11000,0.121677160263061 -363,pymonntorch_fast_LIF_cuda.py,11000,0.134602069854736 -364,nest_native_LIF.py,11000,16.097599029541 -365,pymonnto_fast_LIF.py,11000,0.139222383499146 -366,pymonntorch_fast_LIF_cpu.py,11000,0.12121057510376 -367,pymonntorch_fast_LIF_cuda.py,11000,0.134164094924927 -368,nest_native_LIF.py,11000,16.1147344112396 -369,pymonnto_fast_LIF.py,11000,0.147589206695557 -370,pymonntorch_fast_LIF_cpu.py,11000,0.149416208267212 -371,pymonntorch_fast_LIF_cuda.py,11000,0.134333610534668 -372,nest_native_LIF.py,11000,16.7137060165405 -373,pymonnto_fast_LIF.py,11000,0.148797988891602 -374,pymonntorch_fast_LIF_cpu.py,11000,0.149162530899048 -375,pymonntorch_fast_LIF_cuda.py,11000,0.134407520294189 -376,nest_native_LIF.py,11000,16.9206433296204 -377,pymonnto_fast_LIF.py,11000,0.144206523895264 -378,pymonntorch_fast_LIF_cpu.py,11000,0.119715690612793 -379,pymonntorch_fast_LIF_cuda.py,11000,0.134873151779175 -380,pymonnto_fast_LIF.py,12000,0.164752006530762 -381,pymonntorch_fast_LIF_cpu.py,12000,0.151120185852051 -382,pymonntorch_fast_LIF_cuda.py,12000,0.148332834243774 -383,pymonnto_fast_LIF.py,12000,0.166510105133057 -384,pymonntorch_fast_LIF_cpu.py,12000,0.157703876495361 -385,pymonntorch_fast_LIF_cuda.py,12000,0.149085283279419 -386,pymonnto_fast_LIF.py,12000,0.173627853393555 -387,pymonntorch_fast_LIF_cpu.py,12000,0.147019863128662 -388,pymonntorch_fast_LIF_cuda.py,12000,0.148355007171631 -389,pymonnto_fast_LIF.py,12000,0.180977582931518 -390,pymonntorch_fast_LIF_cpu.py,12000,0.149316549301147 -391,pymonntorch_fast_LIF_cuda.py,12000,0.144497871398926 -392,pymonnto_fast_LIF.py,12000,0.167462825775146 -393,pymonntorch_fast_LIF_cpu.py,12000,0.145257949829102 -394,pymonntorch_fast_LIF_cuda.py,12000,0.150506496429443 -395,pymonnto_fast_LIF.py,13000,0.204609155654907 -396,pymonntorch_fast_LIF_cpu.py,13000,0.174745798110962 -397,pymonntorch_fast_LIF_cuda.py,13000,0.171835660934448 -398,pymonnto_fast_LIF.py,13000,0.192398548126221 -399,pymonntorch_fast_LIF_cpu.py,13000,0.177845478057861 -400,pymonntorch_fast_LIF_cuda.py,13000,0.165466785430908 -401,pymonnto_fast_LIF.py,13000,0.202807188034058 -402,pymonntorch_fast_LIF_cpu.py,13000,0.198911428451538 -403,pymonntorch_fast_LIF_cuda.py,13000,0.179162979125977 -404,pymonnto_fast_LIF.py,13000,0.22229528427124 -405,pymonntorch_fast_LIF_cpu.py,13000,0.180191278457642 -406,pymonntorch_fast_LIF_cuda.py,13000,0.169281959533691 -407,pymonnto_fast_LIF.py,13000,0.198553323745727 -408,pymonntorch_fast_LIF_cpu.py,13000,0.17979097366333 -409,pymonntorch_fast_LIF_cuda.py,13000,0.168280124664307 -410,pymonnto_fast_LIF.py,14000,0.266068458557129 -411,pymonntorch_fast_LIF_cpu.py,14000,0.236438751220703 -412,pymonntorch_fast_LIF_cuda.py,14000,0.193077087402344 -413,pymonnto_fast_LIF.py,14000,0.246877670288086 -414,pymonntorch_fast_LIF_cpu.py,14000,0.22211217880249 -415,pymonntorch_fast_LIF_cuda.py,14000,0.189296007156372 -416,pymonnto_fast_LIF.py,14000,0.235018253326416 -417,pymonntorch_fast_LIF_cpu.py,14000,0.206639766693115 -418,pymonntorch_fast_LIF_cuda.py,14000,0.191163301467896 -419,pymonnto_fast_LIF.py,14000,0.22843074798584 -420,pymonntorch_fast_LIF_cpu.py,14000,0.204524755477905 -421,pymonntorch_fast_LIF_cuda.py,14000,0.189229726791382 -422,pymonnto_fast_LIF.py,14000,0.251139402389526 -423,pymonntorch_fast_LIF_cpu.py,14000,0.207221508026123 -424,pymonntorch_fast_LIF_cuda.py,14000,0.192740440368652 -425,pymonnto_fast_LIF.py,15000,0.261828899383545 -426,pymonntorch_fast_LIF_cpu.py,15000,0.242669105529785 -427,pymonnto_fast_LIF.py,15000,0.26447319984436 -428,pymonntorch_fast_LIF_cpu.py,15000,0.243803739547729 -429,pymonnto_fast_LIF.py,15000,0.254960298538208 -430,pymonntorch_fast_LIF_cpu.py,15000,0.23711371421814 -431,pymonnto_fast_LIF.py,15000,0.270259618759155 -432,pymonntorch_fast_LIF_cpu.py,15000,0.260066270828247 -433,pymonnto_fast_LIF.py,15000,0.263427734375 -434,pymonntorch_fast_LIF_cpu.py,15000,0.262037515640259 -435,brian_LIF_cpp.py,10,0.000858 -436,brian_LIF_gpu.py,10,0.003 -437,brian_LIF.py,10,0.140318870544434 -438,brian_LIF_cpp.py,10,0.000891 -439,brian_LIF_gpu.py,10,0.003 -440,brian_LIF.py,10,0.128252029418945 -441,brian_LIF_cpp.py,10,0.000882 -442,brian_LIF_gpu.py,10,0.003 -443,brian_LIF.py,10,0.130455017089844 -444,brian_LIF_cpp.py,10,0.00088 -445,brian_LIF_gpu.py,10,0.003 -446,brian_LIF.py,10,0.132129192352295 -447,brian_LIF_cpp.py,10,0.000881 -448,brian_LIF_gpu.py,10,0.003 -449,brian_LIF.py,10,0.127707004547119 -450,brian_LIF_cpp.py,20,0.000941 -451,brian_LIF_gpu.py,20,0.003 -452,brian_LIF.py,20,0.12734866142273 -453,brian_LIF_cpp.py,20,0.00096 -454,brian_LIF_gpu.py,20,0.004 -455,brian_LIF.py,20,0.129726409912109 -456,brian_LIF_cpp.py,20,0.000925 -457,brian_LIF_gpu.py,20,0.003 -458,brian_LIF.py,20,0.128740072250366 -459,brian_LIF_cpp.py,20,0.000956 -460,brian_LIF_gpu.py,20,0.003 -461,brian_LIF.py,20,0.129517793655396 -462,brian_LIF_cpp.py,20,0.000956 -463,brian_LIF_gpu.py,20,0.004 -464,brian_LIF.py,20,0.132304191589356 -465,brian_LIF_cpp.py,50,0.001046 -466,brian_LIF_gpu.py,50,0.004 -467,brian_LIF.py,50,0.129223346710205 -468,brian_LIF_cpp.py,50,0.001088 -469,brian_LIF_gpu.py,50,0.003 -470,brian_LIF.py,50,0.129581451416016 -471,brian_LIF_cpp.py,50,0.001053 -472,brian_LIF_gpu.py,50,0.003 -473,brian_LIF.py,50,0.128509283065796 -474,brian_LIF_cpp.py,50,0.001051 -475,brian_LIF_gpu.py,50,0.003 -476,brian_LIF.py,50,0.127313613891602 -477,brian_LIF_cpp.py,50,0.001057 -478,brian_LIF_gpu.py,50,0.003 -479,brian_LIF.py,50,0.129062414169312 -480,brian_LIF_cpp.py,100,0.001329 -481,brian_LIF_gpu.py,100,0.003 -482,brian_LIF.py,100,0.127542018890381 -483,brian_LIF_cpp.py,100,0.001266 -484,brian_LIF_gpu.py,100,0.003 -485,brian_LIF.py,100,0.128968238830566 -486,brian_LIF_cpp.py,100,0.001279 -487,brian_LIF_gpu.py,100,0.003 -488,brian_LIF.py,100,0.154637813568115 -489,brian_LIF_cpp.py,100,0.001324 -490,brian_LIF_gpu.py,100,0.003 -491,brian_LIF.py,100,0.150275230407715 -492,brian_LIF_cpp.py,100,0.001326 -493,brian_LIF_gpu.py,100,0.004 -494,brian_LIF.py,100,0.164090156555176 -495,brian_LIF_cpp.py,250,0.00218 -496,brian_LIF_gpu.py,250,0.003 -497,brian_LIF.py,250,0.132863521575928 -498,brian_LIF_cpp.py,250,0.00211 -499,brian_LIF_gpu.py,250,0.003 -500,brian_LIF.py,250,0.154364347457886 -501,brian_LIF_cpp.py,250,0.002046 -502,brian_LIF_gpu.py,250,0.004 -503,brian_LIF.py,250,0.132218599319458 -504,brian_LIF_cpp.py,250,0.002096 -505,brian_LIF_gpu.py,250,0.003 -506,brian_LIF.py,250,0.131069421768188 -507,brian_LIF_cpp.py,250,0.002083 -508,brian_LIF_gpu.py,250,0.003 -509,brian_LIF.py,250,0.129155874252319 -510,brian_LIF_cpp.py,500,0.004196 -511,brian_LIF_gpu.py,500,0.005 -512,brian_LIF.py,500,0.141484975814819 -513,brian_LIF_cpp.py,500,0.004272 -514,brian_LIF_gpu.py,500,0.005 -515,brian_LIF.py,500,0.135302305221558 -516,brian_LIF_cpp.py,500,0.00415 -517,brian_LIF_gpu.py,500,0.005 -518,brian_LIF.py,500,0.139315128326416 -519,brian_LIF_cpp.py,500,0.004308 -520,brian_LIF_gpu.py,500,0.005 -521,brian_LIF.py,500,0.136044979095459 -522,brian_LIF_cpp.py,500,0.004562 -523,brian_LIF_gpu.py,500,0.005 -524,brian_LIF.py,500,0.136786460876465 -525,brian_LIF_cpp.py,750,0.007716 -526,brian_LIF_gpu.py,750,0.007 -527,brian_LIF.py,750,0.147737264633179 -528,brian_LIF_cpp.py,750,0.00719 -529,brian_LIF_gpu.py,750,0.007 -530,brian_LIF.py,750,0.154213190078735 -531,brian_LIF_cpp.py,750,0.007813 -532,brian_LIF_gpu.py,750,0.007 -533,brian_LIF.py,750,0.149576187133789 -534,brian_LIF_cpp.py,750,0.007613 -535,brian_LIF_gpu.py,750,0.007 -536,brian_LIF.py,750,0.149434566497803 -537,brian_LIF_cpp.py,750,0.007647 -538,brian_LIF_gpu.py,750,0.007 -539,brian_LIF.py,750,0.147205829620361 -540,brian_LIF_cpp.py,1000,0.015917 -541,brian_LIF_gpu.py,1000,0.009 -542,brian_LIF.py,1000,0.16684365272522 -543,brian_LIF_cpp.py,1000,0.016532 -544,brian_LIF_gpu.py,1000,0.009 -545,brian_LIF.py,1000,0.161670923233032 -546,brian_LIF_cpp.py,1000,0.017506 -547,brian_LIF_gpu.py,1000,0.009 -548,brian_LIF.py,1000,0.162681818008423 -549,brian_LIF_cpp.py,1000,0.016655 -550,brian_LIF_gpu.py,1000,0.009 -551,brian_LIF.py,1000,0.164472818374634 -552,brian_LIF_cpp.py,1000,0.017194 -553,brian_LIF_gpu.py,1000,0.009 -554,brian_LIF.py,1000,0.194838285446167 -555,brian_LIF_cpp.py,1500,0.044977 -556,brian_LIF_gpu.py,1500,0.019 -557,brian_LIF.py,1500,0.224204063415527 -558,brian_LIF_cpp.py,1500,0.04354 -559,brian_LIF_gpu.py,1500,0.019 -560,brian_LIF.py,1500,0.220430850982666 -561,brian_LIF_cpp.py,1500,0.044341 -562,brian_LIF_gpu.py,1500,0.019 -563,brian_LIF.py,1500,0.22893500328064 -564,brian_LIF_cpp.py,1500,0.04691 -565,brian_LIF_gpu.py,1500,0.02 -566,brian_LIF.py,1500,0.224814891815186 -567,brian_LIF_cpp.py,1500,0.048882 -568,brian_LIF_gpu.py,1500,0.019 -569,brian_LIF.py,1500,0.222842454910278 -570,brian_LIF_cpp.py,2000,0.086037 -571,brian_LIF_gpu.py,2000,0.028 -572,brian_LIF.py,2000,0.307031631469727 -573,brian_LIF_cpp.py,2000,0.093278 -574,brian_LIF_gpu.py,2000,0.027 -575,brian_LIF.py,2000,0.298977375030518 -576,brian_LIF_cpp.py,2000,0.093622 -577,brian_LIF_gpu.py,2000,0.027 -578,brian_LIF.py,2000,0.30195951461792 -579,brian_LIF_cpp.py,2000,0.091449 -580,brian_LIF_gpu.py,2000,0.029 -581,brian_LIF.py,2000,0.304603099822998 -582,brian_LIF_cpp.py,2000,0.093894 -583,brian_LIF_gpu.py,2000,0.029 -584,brian_LIF.py,2000,0.305223941802979 -585,brian_LIF_cpp.py,3000,0.246803 -586,brian_LIF_gpu.py,3000,0.068 -587,brian_LIF.py,3000,0.627051830291748 -588,brian_LIF_cpp.py,3000,0.254219 -589,brian_LIF_gpu.py,3000,0.069 -590,brian_LIF.py,3000,0.602783203125 -591,brian_LIF_cpp.py,3000,0.282571 -592,brian_LIF_gpu.py,3000,0.068 -593,brian_LIF.py,3000,0.635653495788574 -594,brian_LIF_cpp.py,3000,0.274999 -595,brian_LIF_gpu.py,3000,0.068 -596,brian_LIF.py,3000,0.615356206893921 -597,brian_LIF_cpp.py,3000,0.281416 -598,brian_LIF_gpu.py,3000,0.069 -599,brian_LIF.py,3000,0.630361318588257 -600,brian_LIF_cpp.py,4000,0.544936 -601,brian_LIF_gpu.py,4000,0.105 -602,brian_LIF.py,4000,1.1204936504364 -603,brian_LIF_cpp.py,4000,0.641515 -604,brian_LIF_gpu.py,4000,0.107 -605,brian_LIF.py,4000,1.02776885032654 -606,brian_LIF_cpp.py,4000,0.506917 -607,brian_LIF_gpu.py,4000,0.106 -608,brian_LIF.py,4000,1.00563526153564 -609,brian_LIF_cpp.py,4000,0.517655 -610,brian_LIF_gpu.py,4000,0.108 -611,brian_LIF.py,4000,0.996399402618408 -612,brian_LIF_cpp.py,4000,0.598892 -613,brian_LIF_gpu.py,4000,0.107 -614,brian_LIF.py,4000,1.01198959350586 -615,brian_LIF_cpp.py,5000,0.940683 -616,brian_LIF_gpu.py,5000,0.189 -617,brian_LIF.py,5000,1.72931170463562 -618,brian_LIF_cpp.py,5000,0.921519 -619,brian_LIF_gpu.py,5000,0.187 -620,brian_LIF.py,5000,1.72908687591553 -621,brian_LIF_cpp.py,5000,0.961467 -622,brian_LIF_gpu.py,5000,0.189 -623,brian_LIF.py,5000,1.6776762008667 -624,brian_LIF_cpp.py,5000,0.90254 -625,brian_LIF_gpu.py,5000,0.191 -626,brian_LIF.py,5000,1.67272758483887 -627,brian_LIF_cpp.py,5000,0.948928 -628,brian_LIF_gpu.py,5000,0.189 -629,brian_LIF.py,5000,1.71612596511841 -630,brian_LIF_cpp.py,6000,1.36229 -631,brian_LIF_gpu.py,6000,0.275 -632,brian_LIF.py,6000,2.31785225868225 -633,brian_LIF_cpp.py,6000,1.3837 -634,brian_LIF_gpu.py,6000,0.277 -635,brian_LIF.py,6000,2.37460875511169 -636,brian_LIF_cpp.py,6000,1.43554 -637,brian_LIF_gpu.py,6000,0.274 -638,brian_LIF.py,6000,2.41932916641235 -639,brian_LIF_cpp.py,6000,1.42454 -640,brian_LIF_gpu.py,6000,0.277 -641,brian_LIF.py,6000,2.37286496162415 -642,brian_LIF_cpp.py,6000,1.40571 -643,brian_LIF_gpu.py,6000,0.272 -644,brian_LIF.py,6000,2.31523394584656 -645,brian_LIF_cpp.py,7000,1.93965 -646,brian_LIF_gpu.py,7000,0.365 -647,brian_LIF.py,7000,3.11352896690369 -648,brian_LIF_cpp.py,7000,1.93781 -649,brian_LIF_gpu.py,7000,0.371 -650,brian_LIF.py,7000,3.18710541725159 -651,brian_LIF_cpp.py,7000,1.89808 -652,brian_LIF_gpu.py,7000,0.371 -653,brian_LIF.py,7000,3.13745713233948 -654,brian_LIF_cpp.py,7000,1.93827 -655,brian_LIF_gpu.py,7000,0.368 -656,brian_LIF.py,7000,3.16357517242432 -657,brian_LIF_cpp.py,7000,1.92576 -658,brian_LIF_gpu.py,7000,0.372 -659,brian_LIF.py,7000,3.10922694206238 -660,brian_LIF_cpp.py,8000,2.75401 -661,brian_LIF.py,8000,4.30301189422607 -662,brian_LIF_cpp.py,8000,2.75657 -663,brian_LIF.py,8000,4.38448739051819 -664,brian_LIF_cpp.py,8000,2.77501 -665,brian_LIF.py,8000,4.26257419586182 -666,brian_LIF_cpp.py,8000,2.78062 -667,brian_LIF.py,8000,4.1912248134613 -668,brian_LIF_cpp.py,8000,2.72318 -669,brian_LIF.py,8000,4.20221662521362 -670,brian_LIF_cpp.py,9000,3.37875 -671,brian_LIF.py,9000,5.47676038742065 -672,brian_LIF_cpp.py,9000,3.39071 -673,brian_LIF.py,9000,5.43741798400879 -674,brian_LIF_cpp.py,9000,3.56027 -675,brian_LIF.py,9000,5.33178782463074 -676,brian_LIF_cpp.py,9000,3.2647 -677,brian_LIF.py,9000,5.22928857803345 -678,brian_LIF_cpp.py,9000,3.51132 -679,brian_LIF.py,9000,5.41265344619751 -680,brian_LIF_cpp.py,10000,4.2807 -681,brian_LIF.py,10000,6.70665621757507 -682,brian_LIF_cpp.py,10000,4.28373 -683,brian_LIF.py,10000,6.71734356880188 -684,brian_LIF_cpp.py,10000,4.19049 -685,brian_LIF.py,10000,6.67602443695068 -686,brian_LIF_cpp.py,10000,4.2521 -687,brian_LIF.py,10000,6.661052942276 -688,brian_LIF_cpp.py,10000,4.31806 -689,brian_LIF.py,10000,6.59888029098511 -690,brian_LIF_cpp.py,11000,5.27208 -691,brian_LIF.py,11000,8.0736837387085 -692,brian_LIF_cpp.py,11000,5.35968 -693,brian_LIF.py,11000,8.13364601135254 -694,brian_LIF_cpp.py,11000,5.40573 -695,brian_LIF.py,11000,7.98653721809387 -696,brian_LIF_cpp.py,11000,5.22792 -697,brian_LIF.py,11000,8.24580121040344 -698,brian_LIF_cpp.py,11000,5.20389 -699,brian_LIF.py,11000,8.02317357063293 -700,brian_LIF_cpp.py,12000,6.49998 -701,brian_LIF.py,12000,9.8948814868927 -702,brian_LIF_cpp.py,12000,6.76943 -703,brian_LIF.py,12000,10.1041555404663 -704,brian_LIF_cpp.py,12000,6.67478 -705,brian_LIF.py,12000,9.97086405754089 -706,brian_LIF_cpp.py,12000,6.71516 -707,brian_LIF.py,12000,10.0239236354828 -708,brian_LIF_cpp.py,12000,6.9736 -709,brian_LIF.py,12000,9.97677874565125 -710,brian_LIF_cpp.py,13000,7.6773 -711,brian_LIF.py,13000,11.6899321079254 -712,brian_LIF_cpp.py,13000,7.94528 -713,brian_LIF.py,13000,11.8777542114258 -714,brian_LIF_cpp.py,13000,7.99816 -715,brian_LIF.py,13000,11.7182266712189 -716,brian_LIF_cpp.py,13000,7.9 -717,brian_LIF.py,13000,11.8046402931213 -718,brian_LIF_cpp.py,13000,7.86629 -719,brian_LIF.py,13000,11.6273083686829 -720,brian_LIF_cpp.py,14000,9.97329 -721,brian_LIF.py,14000,14.1358249187469 -722,brian_LIF_cpp.py,14000,9.78769 -723,brian_LIF.py,14000,13.8032264709473 -724,brian_LIF_cpp.py,14000,9.70551 -725,brian_LIF.py,14000,13.8473932743073 -726,brian_LIF_cpp.py,14000,9.88618 -727,brian_LIF.py,14000,14.0600399971008 -728,brian_LIF_cpp.py,14000,9.43271 -729,brian_LIF.py,14000,13.8581650257111 -730,brian_LIF_cpp.py,15000,12.0627 -731,brian_LIF.py,15000,16.8331196308136 -732,brian_LIF_cpp.py,15000,11.893 -733,brian_LIF.py,15000,16.7040882110596 -734,brian_LIF_cpp.py,15000,11.8641 -735,brian_LIF.py,15000,16.8080172538757 -736,brian_LIF_cpp.py,15000,11.6892 -737,brian_LIF.py,15000,17.2773096561432 -738,brian_LIF_cpp.py,15000,11.8576 -739,brian_LIF.py,15000,17.1513369083405 -740,nest_native_LIF.py,7500,7.36070251464844 -741,pymonnto_fast_LIF.py,7500,0.071255445480347 -742,pymonntorch_fast_LIF_cpu.py,7500,0.098394870758057 -743,pymonntorch_fast_LIF_cuda.py,7500,0.089777946472168 -744,nest_native_LIF.py,7500,6.95034885406494 -745,pymonnto_fast_LIF.py,7500,0.066580533981323 -746,pymonntorch_fast_LIF_cpu.py,7500,0.070746898651123 -747,pymonntorch_fast_LIF_cuda.py,7500,0.089270830154419 -748,nest_native_LIF.py,7500,6.73014068603516 -749,pymonnto_fast_LIF.py,7500,0.066211700439453 -750,pymonntorch_fast_LIF_cpu.py,7500,0.069818258285523 -751,pymonntorch_fast_LIF_cuda.py,7500,0.088913440704346 -752,nest_native_LIF.py,7500,6.75975394248962 -753,pymonnto_fast_LIF.py,7500,0.073413848876953 -754,pymonntorch_fast_LIF_cpu.py,7500,0.074777603149414 -755,pymonntorch_fast_LIF_cuda.py,7500,0.088781833648682 -756,nest_native_LIF.py,7500,6.886878490448 -757,pymonnto_fast_LIF.py,7500,0.070654392242432 -758,pymonntorch_fast_LIF_cpu.py,7500,0.071615934371948 -759,pymonntorch_fast_LIF_cuda.py,7500,0.09160590171814 -760,brian_LIF_cpp.py,7500,2.08676 -761,brian_LIF_gpu.py,7500,0.426 -762,brian_LIF.py,7500,3.43933844566345 -763,brian_LIF_cpp.py,7500,2.11104 -764,brian_LIF_gpu.py,7500,0.421 -765,brian_LIF.py,7500,3.53736519813538 -766,brian_LIF_cpp.py,7500,2.06397 -767,brian_LIF_gpu.py,7500,0.434 -768,brian_LIF.py,7500,3.56699585914612 -769,brian_LIF_cpp.py,7500,2.09058 -770,brian_LIF_gpu.py,7500,0.429 -771,brian_LIF.py,7500,3.45991587638855 -772,brian_LIF_cpp.py,7500,2.02707 -773,brian_LIF_gpu.py,7500,0.427 -774,brian_LIF.py,7500,3.6414635181427 diff --git a/benchmark/Plot/Results/Swift-SF315-51G/Simple.csv b/benchmark/Plot/Results/Swift-SF315-51G/Simple.csv deleted file mode 100644 index 7e22fd1..0000000 --- a/benchmark/Plot/Results/Swift-SF315-51G/Simple.csv +++ /dev/null @@ -1,11 +0,0 @@ -,brian_LIF_cpp.py,brian_LIF_gpu.py,brian_LIF.py,nest_native_LIF.py,pymonnto_fast_LIF.py,pymonntorch_fast_LIF_cpu.py,pymonntorch_fast_LIF_cuda.py -0,1.69082,0.418,2.9928178787231445,6.9833290576934814,0.07109808921813965,0.08388805389404297,0.0873117446899414 -1,1.65526,0.429,3.165710687637329,6.942539691925049,0.070404052734375,0.07147860527038574,0.08885478973388672 -2,1.69382,0.419,3.187665700912476,7.0798423290252686,0.06569647789001465,0.07119369506835938,0.08845043182373047 -3,1.72043,0.423,3.1918904781341557,6.920550346374512,0.06680846214294434,0.07062816619873047,0.08751368522644043 -4,1.71997,0.431,3.1005539894104004,7.03631854057312,0.06830263137817383,0.06896257400512695,0.08762502670288086 -5,1.56293,0.427,3.2021844387054443,6.769859075546265,0.08044266700744629,0.07278227806091309,0.08746576309204102 -6,1.78403,0.426,3.1367297172546387,7.719399929046631,0.06453108787536621,0.07947111129760742,0.0869908332824707 -7,1.80724,0.425,3.391397714614868,7.6029558181762695,0.06969523429870605,0.08382129669189453,0.0874178409576416 -8,2.22378,0.422,3.609426259994507,7.0238258838653564,0.06973862648010254,0.06985259056091309,0.08691000938415527 -9,2.10281,0.422,3.587639093399048,7.153616666793823,0.06676721572875977,0.07092046737670898,0.10186362266540527 diff --git a/benchmark/Plot/Results/Swift-SF315-51G/Simple2.csv b/benchmark/Plot/Results/Swift-SF315-51G/Simple2.csv deleted file mode 100644 index 23fa394..0000000 --- a/benchmark/Plot/Results/Swift-SF315-51G/Simple2.csv +++ /dev/null @@ -1,11 +0,0 @@ -,pymonnto_fast_LIF.py,pymonntorch_fast_LIF_cpu.py,pymonntorch_fast_LIF_cuda.py,pymonntorch_slow_LIF_cpu.py,pymonntorch_slow_LIF_cuda.py,pymonnto_slow_LIF.py -0,0.07109808921813965,0.08388805389404297,0.0873117446899414,44.05892062187195,2.441441535949707,38.6971971988678 -1,0.070404052734375,0.07147860527038574,0.08885478973388672,44.09564733505249,2.426706314086914,39.781362771987915 -2,0.06569647789001465,0.07119369506835938,0.08845043182373047,45.61512279510498,2.42649245262146,39.13650107383728 -3,0.06680846214294434,0.07062816619873047,0.08751368522644043,44.02295756340027,2.423428773880005,39.70451784133911 -4,0.06830263137817383,0.06896257400512695,0.08762502670288086,43.90470743179321,2.4264540672302246,40.12606692314148 -5,0.08044266700744629,0.07278227806091309,0.08746576309204102,44.04615831375122,2.4441137313842773,40.17040300369263 -6,0.06453108787536621,0.07947111129760742,0.0869908332824707,43.977859020233154,2.4348905086517334,39.988298654556274 -7,0.06969523429870605,0.08382129669189453,0.0874178409576416,45.63514733314514,2.4489986896514893,39.07223033905029 -8,0.06973862648010254,0.06985259056091309,0.08691000938415527,45.46747422218323,2.427772045135498,39.76540946960449 -9,0.06676721572875977,0.07092046737670898,0.10186362266540527,45.70976424217224,2.4411652088165283,40.357197284698486 diff --git a/benchmark/Plot/Results/Swift-SF315-51G/numpy_operations.txt b/benchmark/Plot/Results/Swift-SF315-51G/numpy_operations.txt deleted file mode 100644 index 6726258..0000000 --- a/benchmark/Plot/Results/Swift-SF315-51G/numpy_operations.txt +++ /dev/null @@ -1,28 +0,0 @@ -Initialization... - -Synapse Operation... - W1.dot(s): 29.056288480758667 ms - np.sum(W1[:, s], axis=1): 6.352580785751343 ms 4.57393450956674 x ratio - W2.T.dot(s): 29.328654527664185 ms 0.990713312584844 x ratio - np.sum(W2[s], axis=0): 0.5672938823699951 ms 51.21911126446424 x ratio - -STDP... - W1 += d[:, None] * s[None, :]: 65.17059278488159 ms - W1[d[:, None] * s[None, :]] += 1: 41.54950761795044 ms 1.568504574930901 x ratio - W1[np.ix_(d, s)] += 1: 0.0778799057006836 ms 836.808830192376 x ratio - W2 += s[:, None] * d[None, :]: 113.40348815917969 ms - W2[s[:, None] * d[None, :]] += 1: 83.35632538795471 ms 1.3604664988695254 x ratio - W2[np.ix_(s, d)] += 1: 0.07498407363891602 ms 1512.36766230215 x ratio - -Reset operation... - voltage = voltage * 0.0: 0.002754652500152588 ms - voltage = np.zeros(5000, dtype=dtype): 0.0012801361083984374 ms 2.151843450146016 x ratio - voltage.fill(0): 0.001611979007720947 ms 1.708863755023199 x ratio - -Datatypes... - float64: 0.6118819713592529 ms - float32: 0.18270540237426758 ms 3.3490086412761215 x ratio -/home/saeed/anaconda3/envs/wnestml/lib/python3.11/site-packages/numpy/core/fromnumeric.py:88: RuntimeWarning: overflow encountered in reduce - return ufunc.reduce(obj, axis, dtype, out, **passkwargs) - float16: 3.877153158187866 ms 0.15781733307776757 x ratio - diff --git a/benchmark/Plot/Results/Swift-SF315-51G/torch_operations_cpu.txt b/benchmark/Plot/Results/Swift-SF315-51G/torch_operations_cpu.txt deleted file mode 100644 index f93baba..0000000 --- a/benchmark/Plot/Results/Swift-SF315-51G/torch_operations_cpu.txt +++ /dev/null @@ -1,26 +0,0 @@ -Initialization... - -Synapse Operation... - W1.dot(s): 28.728623628616333 ms - np.sum(W1[:, s], axis=1): 7.292013883590698 ms 3.9397379225051505 x ratio - W2.T.dot(s): 29.517022371292114 ms 0.9732900313331548 x ratio - np.sum(W2[s], axis=0): 0.24581956863403318 ms 116.86874152556348 x ratio - -STDP... - W1 += d[:, None] * s[None, :]: 190.00608563423157 ms - W1[d[:, None] * s[None, :]] += 1: 107.68864870071411 ms 1.7644021716930653 x ratio - W1[np.ix_(d, s)] += 1: 0.17751264572143555 ms 1070.3805628157982 x ratio - W2 += s[:, None] * d[None, :]: 195.76051211357117 ms - W2[s[:, None] * d[None, :]] += 1: 104.63068795204163 ms 1.8709665007965892 x ratio - W2[np.ix_(s, d)] += 1: 0.15137124061584473 ms 1293.247722071454 x ratio - -Reset operation... - voltage = voltage * 0.0: 0.004859132766723633 ms - voltage = torch.zeros(5000, dtype=dtype): 0.003850061893463135 ms 1.2620921172653765 x ratio - voltage.fill(0): 0.002020423412322998 ms 2.405007156958652 x ratio - -Datatypes... - float64: 0.22604584693908691 ms - float32: 0.17580652236938477 ms 1.2857648504311174 x ratio - float16: 0.6492710113525391 ms 0.3481533026835681 x ratio - diff --git a/benchmark/Plot/Results/Swift-SF315-51G/torch_operations_cuda.txt b/benchmark/Plot/Results/Swift-SF315-51G/torch_operations_cuda.txt deleted file mode 100644 index c6894e9..0000000 --- a/benchmark/Plot/Results/Swift-SF315-51G/torch_operations_cuda.txt +++ /dev/null @@ -1,26 +0,0 @@ -Initialization... - -Synapse Operation... - W1.dot(s): 10.010446071624756 ms - np.sum(W1[:, s], axis=1): 1.1984899044036865 ms 8.352549349679748 x ratio - W2.T.dot(s): 9.450144529342651 ms 1.0592902617036566 x ratio - np.sum(W2[s], axis=0): 0.3907957077026367 ms 25.615547648854626 x ratio - -STDP... - W1 += d[:, None] * s[None, :]: 26.415058135986328 ms - W1[d[:, None] * s[None, :]] += 1: 18.3481023311615 ms 1.4396615878430281 x ratio - W1[np.ix_(d, s)] += 1: 0.1987447738647461 ms 132.9094477420717 x ratio - W2 += s[:, None] * d[None, :]: 26.496868133544922 ms - W2[s[:, None] * d[None, :]] += 1: 18.433886766433716 ms 1.4373999617808815 x ratio - W2[np.ix_(s, d)] += 1: 0.2005159854888916 ms 132.14342023126542 x ratio - -Reset operation... - voltage = voltage * 0.0: 0.02358676195144653 ms - voltage = torch.zeros(5000, dtype=dtype): 0.0241587495803833 ms 0.9763237899778878 x ratio - voltage.fill(0): 0.014910402297973633 ms 1.5818997690392325 x ratio - -Datatypes... - float64: 0.4024181365966797 ms - float32: 0.2694706916809082 ms 1.4933651377315655 x ratio - float16: 0.3095419406890869 ms 1.3000439801496249 x ratio - diff --git a/benchmark/Plot/comparison.py b/benchmark/Plot/comparison.py deleted file mode 100644 index b4ad4e7..0000000 --- a/benchmark/Plot/comparison.py +++ /dev/null @@ -1,162 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np -import csv - -ekw = dict(ecolor=(0, 0, 0, 1.0), lw=1, capsize=3, capthick=1) - -color1 = (0, 176/255, 80/255, 1) -color2 = (1/255, 127/255, 157/255, 1) -color3 = (120/255,110/255,120/255,1)#(117/255,117/255,117/255,1)#(253/255, 97/255, 0, 1) - -#color11 = (0, 176/255, 80/255, 0.7) -color11 = (0, 176/255*0.5, 80/255*0.5, 0.7) -#color11 = (0, 0, 0, 1.0) - -lookup = {'brian_LIF.py': ['Brian2', color2], - 'brian_LIF_cpp.py': ['Brian2 C++', color2], - 'brian_LIF_gpu.py': ['Brian2 GPU', color2], - 'nest_native_LIF.py': ['Nest', color3], - 'pymonnto_fast_LIF.py': ['PymoNNto', color11], - 'pymonnto_slow_LIF.py': ['PymoNNto', color11],# naive - 'pymonntorch_fast_LIF_cpu.py': ['Pymonntorch CPU', color1], - 'pymonntorch_fast_LIF_cuda.py': ['Pymonntorch GPU', color1], - 'pymonntorch_slow_LIF_cpu.py': ['Pymonntorch CPU', color1],# naive - 'pymonntorch_slow_LIF_cuda.py': ['Pymonntorch GPU', color1],# naive - - 'brian_izh.py': ['Brian2', color2], - 'brian_izh_cpp.py': ['Brian2 C++', color2], - 'brian_izh_cuda.py': ['Brian2 GPU', color2], - 'pymonnto_izh.py': ['PymoNNto', color11], - 'pymonntorch_izh_cpu.py': ['PymoNNtorch CPU', color1], - 'pymonntorch_izh_cuda.py': ['PymoNNtorch GPU', color1], - 'pynn_nest_izh.py': ['Nest (PyNN)', color3] - } - -def load(filename): - sim_col = [] - measurements = [] - with open(filename, newline='') as csvfile: - reader = csv.reader(csvfile, delimiter=',') - for i, row in enumerate(reader): - if i==0: - sim_col = [lookup[s] for s in row[1:]] - else: - measurements.append([float(s) for s in row]) - - return sim_col, np.array(measurements)[:,1:] #remove enumeration in first column - - -fig, ax = plt.subplots(1, 1) -fig.set_figwidth(4)#12 -fig.set_figheight(4) - -axis = ax#ax[2] -sim_col, data = load('Results/Swift-SF315-51G/Simple2.csv') -measurements = np.mean(data, axis=0) -err = np.std(data, axis=0) -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -index = list(range(len(sim_col))) -index = [1,2,3,5,6,4]#5,6,7, 2,3,1 - -text_gap = np.max(measurements)*0.01 -for i, s, m, e, c in zip(index, simulators, measurements, err, colors): - axis.bar(i, m, width=0.8, color=c, yerr=e, error_kw=ekw) - axis.text(i, m+e+text_gap, '{0:.2f}'.format(m)+'s', ha='center', va='bottom', color=c) # , color='gray' - -axis.tick_params(axis='both', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylim([0, np.max(measurements)*1.5])#1.3 -axis.set_xticks(np.array(index), simulators, rotation=30, ha="right") -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_title('Simple LIF with\nOne Step STDP', fontweight='bold') #Simple LIF with\nOne Step STDP\n optimized vs naive - -for xtick, color in zip(axis.get_xticklabels(), colors): - xtick.set_color(color) - -axis.plot([3.5, 3.5], [0.0, 60], '--', c='black', linewidth=1) - -axis.text(2, 55, 'optimized', size=12, ha='center', va='bottom', color=(0,0,0,1), fontweight='bold') -axis.text(5, 55, 'naive', size=12, ha='center', va='bottom', color=(0,0,0,1), fontweight='bold') - -fig.tight_layout() -#plt.savefig('filename.png', dpi=600) -plt.show() - - - - - -fig, ax = plt.subplots(1, 2) -fig.set_figwidth(8)#12 -fig.set_figheight(4) - - -axis = ax[0] -sim_col, data = load('Results/Swift-SF315-51G/Simple.csv') -measurements = np.mean(data, axis=0) -err = np.std(data, axis=0) -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -#index = list(range(len(sim_col))) -index = [3,4,2,1,5,6,7] - -text_gap = np.max(measurements)*0.01 -for i, s, m, e, c in zip(index, simulators, measurements, err, colors): - axis.bar(i, m, width=0.8, color=c, yerr=e, error_kw=ekw) - axis.text(i, m + e+text_gap, '{0:.2f}'.format(m)+'s', ha='center', va='bottom', color=c) - - -axis.tick_params(axis='both', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylim([0, np.max(measurements)*1.3]) -axis.set_xticks(index, simulators, rotation=30, ha="right") -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_title('Simple LIF with\nOne Step STDP', fontweight='bold') - -for xtick, color in zip(axis.get_xticklabels(), colors): - xtick.set_color(color) - - - - - - - -axis = ax[1] -sim_col, data = load('Results/Swift-SF315-51G/Izhikevich.csv') -measurements = np.mean(data, axis=0) -err = np.std(data, axis=0) -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -#index = list(range(len(sim_col))) -index = [4,3,2,5,6,7,1] - -text_gap = np.max(measurements)*0.01 -for i, s, m, e, c in zip(index, simulators, measurements, err, colors): - axis.bar(i, m, width=0.8, color=c, yerr=e, error_kw=ekw) - axis.text(i, m + e+text_gap, '{0:.2f}'.format(m)+'s', ha='center', va='bottom', color=c) - - -axis.tick_params(axis='both', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylim([0, np.max(measurements)*1.3]) -axis.set_xticks(index, simulators, rotation=30, ha="right") -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_title('Izhikevich with\nTrace STDP', fontweight='bold') - -for xtick, color in zip(axis.get_xticklabels(), colors): - xtick.set_color(color) - - - -ax[0].text(x=-0.2, y=10, s='A', size=20, weight='bold') -ax[1].text(x=-0.2, y=4.75, s='B', size=20, weight='bold') -#ax[0].text(x=100, y=0, s=' ', size=20, weight='bold') - -#ax[0].set_ylabel('compute time') - -fig.tight_layout() -#plt.savefig('filename.png', dpi=600) -plt.show() - diff --git a/benchmark/Plot/comparison2.py b/benchmark/Plot/comparison2.py deleted file mode 100644 index 8605d6c..0000000 --- a/benchmark/Plot/comparison2.py +++ /dev/null @@ -1,298 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np -import csv - -ekw = dict(ecolor=(0, 0, 0, 1.0), lw=1, capsize=3, capthick=1) - -color1 = (0, 176/255, 80/255, 1) -color2 = (1/255, 127/255, 157/255, 1) -color3 = (120/255,110/255,120/255,1)#(117/255,117/255,117/255,1)#(253/255, 97/255, 0, 1) - -#color11 = (0, 176/255, 80/255, 0.7) -color11 = (0, 176/255*0.5, 80/255*0.5, 0.7) -#color11 = (0, 0, 0, 1.0) - -lookup = {'brian_LIF.py': ['Brian2', color2], - 'brian_LIF_cpp.py': ['Brian2 C++', color2], - 'brian_LIF_gpu.py': ['Brian2 GPU', color2], - 'nest_native_LIF.py': ['Nest', color3], - 'pymonnto_fast_LIF.py': ['PymoNNto', color11], - 'pymonnto_slow_LIF.py': ['PymoNNto', color11],# naive - 'pymonntorch_fast_LIF_cpu.py': ['PymoNNtorch CPU ', color1], - 'pymonntorch_fast_LIF_cuda.py': ['PymoNNtorch GPU', color1], - 'pymonntorch_slow_LIF_cpu.py': ['PymoNNtorch CPU', color1],# naive - 'pymonntorch_slow_LIF_cuda.py': ['PymoNNtorch GPU', color1],# naive - - 'brian_izh.py': ['Brian2', color2], - 'brian_izh_cpp.py': ['Brian2 C++', color2], - 'brian_izh_cuda.py': ['Brian2 GPU', color2], - 'pymonnto_izh.py': ['PymoNNto', color11], - 'pymonntorch_izh_cpu.py': ['PymoNNtorch CPU', color1], - 'pymonntorch_izh_cuda.py': ['PymoNNtorch GPU', color1], - 'pynn_nest_izh.py': ['Nest (PyNN)', color3] - } - -markers_lookup = {"⋅⋅⋅ Pymonntorch GPU": ':', -"⋅⋅⋅ PymoNNtorch GPU": ':', - "-- Pymonntorch CPU": '--', - "-- Pymonntorch CPU ": '--', - "-- PymoNNtorch CPU": '--', - "-- PymoNNtorch CPU ": '--', - "— PymoNNto": '-', -"— Nest": '-', -"⋅⋅⋅ Brian2 GPU": ":", -"-- Brian2 C++": "--", -"— Brian2": "-", -'— Nest (PyNN)': "-", -} - -def load(filename): - sim_col = [] - measurements = [] - with open(filename, newline='') as csvfile: - reader = csv.reader(csvfile, delimiter=',') - for i, row in enumerate(reader): - if i==0: - sim_col = [lookup[s] for s in row[1:]] - else: - measurements.append([float(s) for s in row]) - - return sim_col, np.array(measurements)[:,1:] #remove enumeration in first column - - -fig, ax = plt.subplots(1, 1) -fig.set_figwidth(4)#12 -fig.set_figheight(4) - -axis = ax -sim_col, data = load('Results/Swift-SF315-51G/Simple2.csv') - -avg_measurements = np.mean(data, axis=0) -avg_speed_ups = np.max(avg_measurements)/avg_measurements -speed_up_err = np.std(np.max(avg_measurements)/data, axis=0) - -text_gap = np.max(avg_speed_ups)*0.01 - -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -index = list(range(len(sim_col))) -index = [1,2,3,5,6,4]#5,6,7, 2,3,1 - - -for i, s, m, e, c in zip(index, simulators, avg_speed_ups, speed_up_err, colors): - #axis.plot([i-0.4,i+0.4],[m*14,m*14], color='red',) - axis.bar(i, m, width=0.8, color=c, yerr=e, error_kw=ekw) - axis.text(i, m+e+text_gap, '{0:.1f}'.format(m)+'x', ha='center', va='bottom', color=c) # , color='gray' - -axis.tick_params(axis='both', which='both', length=0) -axis.set_yticks([], []) -#axis.set_ylim([0, np.max(measurements)*1.5])#1.3 -axis.set_xticks(np.array(index), simulators, rotation=30, ha="right") -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_title('Simple LIF with\nOne Step STDP', fontweight='bold') #Simple LIF with\nOne Step STDP\n optimized vs naive - -for xtick, color in zip(axis.get_xticklabels(), colors): - xtick.set_color(color) - -axis.plot([3.5, 3.5], [0.0, 850], '--', c='black', linewidth=1) - -axis.text(2, 750, 'optimized', size=12, ha='center', va='bottom', color=(0,0,0,1), fontweight='bold') -axis.text(5, 750, 'naive', size=12, ha='center', va='bottom', color=(0,0,0,1), fontweight='bold') - -fig.tight_layout() -plt.savefig('measurements_x2.png', dpi=600) -plt.show() - - -lookup = {'brian_LIF.py': [ '— Brian2', color2], - 'brian_LIF_cpp.py': ['-- Brian2 C++', color2], - 'brian_LIF_gpu.py': ['⋅⋅⋅ Brian2 GPU', color2], - 'nest_native_LIF.py': ['— Nest', color3], - 'pymonnto_fast_LIF.py': ['— PymoNNto', color11], - 'pymonnto_slow_LIF.py': ['— PymoNNto', color11],# naive - 'pymonntorch_fast_LIF_cpu.py': ['-- PymoNNtorch CPU ', color1], - 'pymonntorch_fast_LIF_cuda.py': ['⋅⋅⋅ PymoNNtorch GPU', color1], - 'pymonntorch_slow_LIF_cpu.py': ['-- PymoNNtorch CPU', color1],# naive - 'pymonntorch_slow_LIF_cuda.py': ['⋅⋅⋅ PymoNNtorch GPU', color1],# naive - - 'brian_izh.py': ['— Brian2', color2], - 'brian_izh_cpp.py': ['-- Brian2 C++', color2], - 'brian_izh_cuda.py': ['⋅⋅⋅ Brian2 GPU', color2], - 'pymonnto_izh.py': ['— PymoNNto', color11], - 'pymonntorch_izh_cpu.py': ['-- PymoNNtorch CPU', color1], - 'pymonntorch_izh_cuda.py': ['⋅⋅⋅ PymoNNtorch GPU', color1], - 'pynn_nest_izh.py': ['— Nest (PyNN)', color3] - } - - - -fig, ax = plt.subplots(1, 2) -fig.set_figwidth(8)#12 -fig.set_figheight(4) - - -axis = ax[0] -sim_col, data = load('Results/Swift-SF315-51G/Simple.csv') - -avg_measurements = np.mean(data, axis=0) -avg_speed_ups = np.max(avg_measurements)/avg_measurements -speed_up_err = np.std(np.max(avg_measurements)/data, axis=0) - -text_gap = np.max(avg_speed_ups)*0.01 - -#measurements = np.mean(data, axis=0) -#err = np.std(data, axis=0) -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -#index = list(range(len(sim_col))) -index = [3,4,2,1,5,6,7] - -for i, s, m, e, c in zip(index, simulators, avg_speed_ups, speed_up_err, colors): - axis.bar(i, m, width=0.8, color=c, yerr=e, error_kw=ekw) - axis.text(i, m + e+text_gap, '{0:.1f}'.format(m)+'x', ha='center', va='bottom', color=c) - - -axis.tick_params(axis='both', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylim([0, np.max(avg_speed_ups)*1.3]) -axis.set_xticks(index, simulators, rotation=30, ha="right") -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_title('Simple LIF with\nOne Step STDP', fontweight='bold') - -for xtick, color in zip(axis.get_xticklabels(), colors): - xtick.set_color(color) - - - - - - - -axis = ax[1] -sim_col, data = load('Results/Swift-SF315-51G/Izhikevich.csv') - -avg_measurements = np.mean(data, axis=0) -avg_speed_ups = np.max(avg_measurements)/avg_measurements -speed_up_err = np.std(np.max(avg_measurements)/data, axis=0) - -text_gap = np.max(avg_speed_ups)*0.01 - -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -#index = list(range(len(sim_col))) -index = [3,4,2,5,6,7,1] - -for i, s, m, e, c in zip(index, simulators, avg_speed_ups, speed_up_err, colors): - axis.bar(i, m, width=0.8, color=c, yerr=e, error_kw=ekw) - axis.text(i, m + e+text_gap, '{0:.1f}'.format(m)+'x', ha='center', va='bottom', color=c) - - -axis.tick_params(axis='both', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylim([0, np.max(avg_speed_ups)*1.3]) -axis.set_xticks(index, simulators, rotation=30, ha="right") -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_title('Izhikevich with\nTrace STDP', fontweight='bold') - -for xtick, color in zip(axis.get_xticklabels(), colors): - xtick.set_color(color) - - - -ax[0].text(x=-0.2, y=145, s='A', size=20, weight='bold') -ax[1].text(x=-0.2, y=12.6, s='B', size=20, weight='bold') -#ax[0].text(x=100, y=0, s=' ', size=20, weight='bold') - -#ax[0].set_ylabel('compute time') - -fig.tight_layout() -plt.savefig('measurements_x.png', dpi=600) -plt.show() - - - -############################################################################### - - -def load2(filename): - sim = [] - col = [] - num = [] - mes = [] - with open(filename, newline='') as csvfile: - reader = csv.reader(csvfile, delimiter=',') - for i, row in enumerate(reader): - if i>0: - sim.append(lookup[row[1]][0]) - col.append(lookup[row[1]][1]) - num.append(int(row[2])) - mes.append(float(row[3])) - return np.array(sim), np.array(col), np.array(num), np.array(mes) - #print(i, row) - #if i==0: - # sim_col = [lookup[s] for s in row[1:]] - #else: - # measurements.append([float(s) for s in row]) - - #return sim_col, np.array(measurements)[:,1:] #remove enumeration in first column - - -fig, ax = plt.subplots(1, 2) -fig.set_figwidth(8)#12 -fig.set_figheight(3.25) - -axis = ax[0] -axis.axvline(7500, c='lightgray', linestyle='--') -sim, col, num, mes = load2('Results/Swift-SF315-51G/LIF.csv') -for s in np.flip(np.unique(sim)): - idx = sim==s - x = np.sort(np.unique(num[idx])) - y = np.array([np.mean(mes[idx][num[idx]==n]) for n in x]) - e = np.array([np.std(mes[idx][num[idx]==n]) for n in x]) - axis.fill_between(x, y - e, y + e, alpha=0.5, edgecolor=col[idx][0], facecolor=col[idx][0], linewidth=0) #, c=col[idx][0] - # axis.plot(x, y, c=col[idx][0]) - axis.plot(x, y, c=col[idx][0], linestyle=markers_lookup[s], label=s) -handles, labels = axis.get_legend_handles_labels() -axis.semilogy() -axis.tick_params(axis='x', which='both', length=3) -axis.tick_params(axis='y', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylabel('simulation time (log scale)') -axis.set_xticks([0, 2500, 5000, 7500, 10000, 12500, 15000], [0, 2500, 5000, 7500, 7500, 12500, 15000]) -axis.set_title('number of neurons', x=0.85, y=0.1, pad=-14, fontsize=10) -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_xlim([0, 15000]) - -axis = ax[1] -axis.axvline(2500, c='lightgray', linestyle='--') -sim, col, num, mes = load2('Results/Swift-SF315-51G/IZH.csv') -for s in np.flip(np.unique(sim)): - idx = sim==s - x = np.sort(np.unique(num[idx])) - y = np.array([np.mean(mes[idx][num[idx]==n]) for n in x]) - e = np.array([np.std(mes[idx][num[idx]==n]) for n in x]) - axis.fill_between(x, y - e, y + e, alpha=0.5, edgecolor=col[idx][0], facecolor=col[idx][0], linewidth=0) #, c=col[idx][0] - # axis.plot(x, y, c=col[idx][0]) - axis.plot(x, y, c=col[idx][0], linestyle=markers_lookup[s], label=s) -axis.semilogy() -axis.tick_params(axis='x', which='both', length=3) -axis.tick_params(axis='y', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylabel('simulation time (log scale)') - -axis.set_xticks([0, 2500, 5000, 7500, 10000, 12500, 15000], [0, 2500, 5000, 7500, 10000, 12500, 15000]) -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_xlim([0, 15000]) -axis.set_title('number of neurons', x=0.85, y=0.1, pad=-14, fontsize=10) -# fig.legend(handles, labels, ncol=5, loc='lower center', bbox_to_anchor=(0.5, 0.01), prop={'size':8} ) - -fig.tight_layout() -# plt.subplots_adjust(left=None, bottom=0.2, right=None, top=None, wspace=None, hspace=0.1) - -ax[0].text(x=-0.2, y=15, s='C', size=20, weight='bold') -ax[1].text(x=-0.2, y=125, s='D', size=20, weight='bold') - -plt.savefig('measurements_n.png', dpi=600) -plt.show() - - diff --git a/benchmark/Plot/comparison3.py b/benchmark/Plot/comparison3.py deleted file mode 100644 index 5ddf95d..0000000 --- a/benchmark/Plot/comparison3.py +++ /dev/null @@ -1,319 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np -import csv - -def load2(filename): - sim = [] - col = [] - num = [] - mes = [] - with open(filename, newline='') as csvfile: - reader = csv.reader(csvfile, delimiter=',') - for i, row in enumerate(reader): - if i>0: - sim.append(lookup[row[1]][0]) - col.append(lookup[row[1]][1]) - num.append(int(row[2])) - mes.append(float(row[3])) - return np.array(sim), np.array(col), np.array(num), np.array(mes) - -ekw = dict(ecolor=(0, 0, 0, 1.0), lw=1, capsize=3, capthick=1) - -color1 = (0, 176/255, 80/255, 1) -color2 = (1/255, 127/255, 157/255, 1) -color3 = (120/255,110/255,120/255,1)#(117/255,117/255,117/255,1)#(253/255, 97/255, 0, 1) - -#color11 = (0, 176/255, 80/255, 0.7) -color11 = (0, 176/255*0.5, 80/255*0.5, 0.7) -#color11 = (0, 0, 0, 1.0) - -lookup = {'brian_LIF.py': ['Brian2', color2], - 'brian_LIF_cpp.py': ['Brian2 C++', color2], - 'brian_LIF_gpu.py': ['Brian2 GPU', color2], - 'nest_native_LIF.py': ['Nest', color3], - 'pymonnto_fast_LIF.py': ['PymoNNto', color11], - 'pymonnto_slow_LIF.py': ['PymoNNto', color11],# naive - 'pymonntorch_fast_LIF_cpu.py': ['PymoNNtorch CPU ', color1], - 'pymonntorch_fast_LIF_cuda.py': ['PymoNNtorch GPU', color1], - 'pymonntorch_slow_LIF_cpu.py': ['PymoNNtorch CPU', color1],# naive - 'pymonntorch_slow_LIF_cuda.py': ['PymoNNtorch GPU', color1],# naive - - 'brian_izh.py': ['Brian2', color2], - 'brian_izh_cpp.py': ['Brian2 C++', color2], - 'brian_izh_cuda.py': ['Brian2 GPU', color2], - 'pymonnto_izh.py': ['PymoNNto', color11], - 'pymonntorch_izh_cpu.py': ['PymoNNtorch CPU', color1], - 'pymonntorch_izh_cuda.py': ['PymoNNtorch GPU', color1], - 'pynn_nest_izh.py': ['Nest (PyNN)', color3] - } - -markers_lookup = {"⋅⋅⋅ Pymonntorch GPU": ':', -"⋅⋅⋅ PymoNNtorch GPU": ':', - "-- Pymonntorch CPU": '--', - "-- Pymonntorch CPU ": '--', - "-- PymoNNtorch CPU": '--', - "-- PymoNNtorch CPU ": '--', - "— PymoNNto": '-', -"— Nest": '-', -"⋅⋅⋅ Brian2 GPU": ":", -"-- Brian2 C++": "--", -"— Brian2": "-", -'— Nest (PyNN)': "-", -} - -def load(filename): - sim_col = [] - measurements = [] - with open(filename, newline='') as csvfile: - reader = csv.reader(csvfile, delimiter=',') - for i, row in enumerate(reader): - if i==0: - sim_col = [lookup[s] for s in row[1:]] - else: - measurements.append([float(s) for s in row]) - - return sim_col, np.array(measurements)[:,1:] #remove enumeration in first column - - -fig, ax = plt.subplots(1, 1) -fig.set_figwidth(5)#12 -fig.set_figheight(4) - -axis = ax -sim_col, data = load('Results/Swift-SF315-51G/Simple2.csv') - -avg_measurements = np.mean(data, axis=0) -avg_speed_ups = np.max(avg_measurements)/avg_measurements -speed_up_err = np.std(np.max(avg_measurements)/data, axis=0) - -text_gap = np.max(avg_speed_ups)*0.01 - -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -index = list(range(len(sim_col))) -index = [6,5,4,2,1,3]#5,6,7, 2,3,1 - - -for i, s, m, e, c in zip(index, simulators, avg_speed_ups, speed_up_err, colors): - #axis.plot([i-0.4,i+0.4],[m*14,m*14], color='red',) - # axis.barh(i, m, width=0.8, color=c, yerr=e, error_kw=ekw) - axis.barh(i, m , color=c, xerr=e, error_kw=ekw) - axis.text(m+e+text_gap+20, i-0.02, '{0:.1f}'.format(m)+'x', ha='left', va='center', color=c) # , color='gray' - -axis.tick_params(axis='both', which='both', length=0) -axis.set_xticks([], []) -#axis.set_ylim([0, np.max(measurements)*1.5])#1.3 -# axis.set_xticks(np.array(index), simulators, rotation=30, ha="right") -axis.set_yticks(np.array(index), simulators) -# axis.spines[['left', 'right', 'top']].set_visible(False) -axis.spines[['bottom', 'right', 'top']].set_visible(False) -axis.set_title('Simple LIF with\nOne Step STDP', size=10, fontweight='bold') #Simple LIF with\nOne Step STDP\n optimized vs naive - -for ytick, color in zip(axis.get_yticklabels(), colors): - ytick.set_color(color) - -axis.hlines(3.5, 0, 1000, linestyles='--', color='black', linewidth=1) - -axis.text(1000, 3.55, 'optimized', ha='right', va='bottom', color=(0,0,0,1)) -axis.text(1000, 3.15, 'naive', ha='right', va='bottom', color=(0,0,0,1)) - -fig.tight_layout() -plt.savefig('new_measurements_x2.png', dpi=600) -plt.show() - - -lookup = {'brian_LIF.py': [ '— Brian2', color2], - 'brian_LIF_cpp.py': ['-- Brian2 C++', color2], - 'brian_LIF_gpu.py': ['⋅⋅⋅ Brian2 GPU', color2], - 'nest_native_LIF.py': ['— Nest', color3], - 'pymonnto_fast_LIF.py': ['— PymoNNto', color11], - 'pymonnto_slow_LIF.py': ['— PymoNNto', color11],# naive - 'pymonntorch_fast_LIF_cpu.py': ['-- PymoNNtorch CPU ', color1], - 'pymonntorch_fast_LIF_cuda.py': ['⋅⋅⋅ PymoNNtorch GPU', color1], - 'pymonntorch_slow_LIF_cpu.py': ['-- PymoNNtorch CPU', color1],# naive - 'pymonntorch_slow_LIF_cuda.py': ['⋅⋅⋅ PymoNNtorch GPU', color1],# naive - - 'brian_izh.py': ['— Brian2', color2], - 'brian_izh_cpp.py': ['-- Brian2 C++', color2], - 'brian_izh_cuda.py': ['⋅⋅⋅ Brian2 GPU', color2], - 'pymonnto_izh.py': ['— PymoNNto', color11], - 'pymonntorch_izh_cpu.py': ['-- PymoNNtorch CPU', color1], - 'pymonntorch_izh_cuda.py': ['⋅⋅⋅ PymoNNtorch GPU', color1], - 'pynn_nest_izh.py': ['— Nest (PyNN)', color3] - } - - -#1 -############################################################################### -#2 - -fig, ax = plt.subplots(1, 2, width_ratios=[55, 45]) -fig.set_figwidth(8)#12 -fig.set_figheight(4) - - - -fig.suptitle('Izhikevich with\nTrace STDP', fontweight='bold') - - -axis = ax[0] -axis.axvline(2500, c='lightgray', linestyle='--') -sim, col, num, mes = load2('Results/Swift-SF315-51G/IZH.csv') -for s in np.flip(np.unique(sim)): - idx = sim==s - x = np.sort(np.unique(num[idx])) - y = np.array([np.mean(mes[idx][num[idx]==n]) for n in x]) - e = np.array([np.std(mes[idx][num[idx]==n]) for n in x]) - axis.fill_between(x, y - e, y + e, alpha=0.5, edgecolor=col[idx][0], facecolor=col[idx][0], linewidth=0) #, c=col[idx][0] - # axis.plot(x, y, c=col[idx][0]) - axis.plot(x, y, c=col[idx][0], linestyle=markers_lookup[s], label=s) -axis.semilogy() -axis.tick_params(axis='x', which='both', length=3) -axis.tick_params(axis='y', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylabel('Simulation Time (log scale)') - -axis.set_xticks([0, 2500, 5000, 7500, 10000, 12500, 15000], [0, 2500, 5000, 7500, 10000, 12500, 15000]) -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_xlim([0, 15000]) -axis.set_title('Number of Neurons', x=0.85, y=0, pad=-14, fontsize=10) -# fig.legend(handles, labels, ncol=5, loc='lower center', bbox_to_anchor=(0.5, 0.01), prop={'size':8} ) - - - - - -axis = ax[1] -sim_col, data = load('Results/Swift-SF315-51G/Izhikevich.csv') - -avg_measurements = np.mean(data, axis=0) -avg_speed_ups = np.max(avg_measurements)/avg_measurements -speed_up_err = np.std(np.max(avg_measurements)/data, axis=0) - -text_gap = np.max(avg_speed_ups)*0.01 - -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -#index = list(range(len(sim_col))) -index = [3,4,2,5,6,7,1] - -for i, s, m, e, c in zip(index, simulators, avg_speed_ups, speed_up_err, colors): - axis.barh(i, m, color=c, xerr=e, error_kw=ekw) - axis.text(m + e+text_gap + 0.2, i-0.02, '{0:.1f}'.format(m)+'x', ha='left', va='center', color=c) - - -axis.tick_params(axis='both', which='both', length=0) -axis.set_xticks([], []) -axis.set_xlim([0, np.max(avg_speed_ups)*1.3]) -axis.set_yticks(index, simulators, ha="right") -axis.spines[['bottom', 'right', 'top']].set_visible(False) - -for ytick, color in zip(axis.get_yticklabels(), colors): - ytick.set_color(color) - - - -# ax[0].text(x=0, y=160, s='C', size=17) -# ax[1].text(x=0, y=7.5, s='D', size=17) -ax[0].set_title("C", loc='left') -ax[1].set_title("D", loc='left') - -#ax[0].text(x=100, y=0, s=' ', size=20, weight='bold') - -#ax[0].set_ylabel('compute time') - -fig.tight_layout() -plt.savefig('new_measurements_x.png', dpi=600) -plt.show() - - -#2 -############################################################################### -#3 - - #print(i, row) - #if i==0: - # sim_col = [lookup[s] for s in row[1:]] - #else: - # measurements.append([float(s) for s in row]) - - #return sim_col, np.array(measurements)[:,1:] #remove enumeration in first column - -#2 -############################################################################### -#3 - -fig, ax = plt.subplots(1, 2, width_ratios=[55, 45]) -fig.set_figwidth(8)#12 -fig.set_figheight(4) - -fig.suptitle('Simple LIF with\nOne Step STDP', fontweight='bold') -axis = ax[0] -axis.axvline(7500, c='lightgray', linestyle='--') -sim, col, num, mes = load2('Results/Swift-SF315-51G/LIF.csv') -for s in np.flip(np.unique(sim)): - idx = sim==s - x = np.sort(np.unique(num[idx])) - y = np.array([np.mean(mes[idx][num[idx]==n]) for n in x]) - e = np.array([np.std(mes[idx][num[idx]==n]) for n in x]) - axis.fill_between(x, y - e, y + e, alpha=0.5, edgecolor=col[idx][0], facecolor=col[idx][0], linewidth=0) #, c=col[idx][0] - # axis.plot(x, y, c=col[idx][0]) - axis.plot(x, y, c=col[idx][0], linestyle=markers_lookup[s], label=s) -handles, labels = axis.get_legend_handles_labels() -axis.semilogy() -axis.tick_params(axis='x', which='both', length=3) -axis.tick_params(axis='y', which='both', length=0) -axis.set_yticks([], []) -axis.set_ylabel('Simulation Time (log scale)') -axis.set_xticks([0, 2500, 5000, 7500, 10000, 12500, 15000], [0, 2500, 5000, 7500, 7500, 12500, 15000]) -axis.set_title('Number of Neurons', x=0.85, y=0, pad=-14, fontsize=10) -axis.spines[['left', 'right', 'top']].set_visible(False) -axis.set_xlim([0, 15000]) - - -axis = ax[1] -sim_col, data = load('Results/Swift-SF315-51G/Simple.csv') - -avg_measurements = np.mean(data, axis=0) -avg_speed_ups = np.max(avg_measurements)/avg_measurements -speed_up_err = np.std(np.max(avg_measurements)/data, axis=0) - -text_gap = np.max(avg_speed_ups)*0.01 - -#measurements = np.mean(data, axis=0) -#err = np.std(data, axis=0) -simulators = [s for s, c in sim_col] -colors = [c for s, c in sim_col] -#index = list(range(len(sim_col))) -index = [3,4,2,1,5,6,7] - -for i, s, m, e, c in zip(index, simulators, avg_speed_ups, speed_up_err, colors): - axis.barh(i, m, color=c, xerr=e, error_kw=ekw) - axis.text(m + e+text_gap+0.2, i-0.02, '{0:.1f}'.format(m)+'x', ha='left', va='center', color=c) - - - -axis.tick_params(axis='both', which='both', length=0) -axis.set_xticks([], []) -axis.set_xlim([0, np.max(avg_speed_ups)*1.3]) -axis.set_yticks(index, simulators, ha="right") -axis.spines[['bottom', 'right', 'top']].set_visible(False) - -for ytick, color in zip(axis.get_yticklabels(), colors): - ytick.set_color(color) - - - -ax[0].set_title("A", loc='left') -ax[1].set_title("B", loc='left') - -fig.tight_layout() -# plt.subplots_adjust(left=None, bottom=0.2, right=None, top=None, wspace=None, hspace=0.1) - -# ax[0].text(x=0, y=15, s='A', size=17) -# ax[1].text(x=0, y=7.5, s='B', size=17) - -plt.savefig('new_measurements_n.png', dpi=600) -plt.show() - - diff --git a/benchmark/Plot/test.csv b/benchmark/Plot/test.csv deleted file mode 100644 index af39688..0000000 --- a/benchmark/Plot/test.csv +++ /dev/null @@ -1,5 +0,0 @@ -,brian_LIF.py,pymonnto_fast_LIF.py,pymonntorch_fast_LIF_cpu.py -0,8.777140140533447,0.24930357933044434,0.3555452823638916 -1,6.681550979614258,0.2197866439819336,0.28249645233154297 -2,8.733273029327393,0.32936787605285645,0.29815196990966797 -3,7.276941299438477,0.2546813488006592,0.3293635845184326 diff --git a/benchmark/automation.py b/benchmark/automation.py deleted file mode 100644 index dff43c7..0000000 --- a/benchmark/automation.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -import sys -import platform -import subprocess -import pandas as pd -from tqdm import tqdm - - -def get_simulation_time(byte_out): - str_out = byte_out.decode("utf-8") - result = [ - line for line in str_out.split("\n") if line.startswith("simulation time:") - ] - return result - - -def get_number(line, script_loc): - try: - time = float(line.lstrip("simulation time:")) - except RuntimeError: - print(f"ERROR: couldn't get the time from string on script {script_loc}, line:") - print(line) - return None - return time - - -def get_script_time(script_loc): - result = get_simulation_time(subprocess.check_output(["python3", script_loc, 'no_plot'])) - if len(result) == 0: - print(f"ERROR: no simulation time on {script_loc}") - elif len(result) > 1: - print(f"ERROR: multiple simulation time on {script_loc}") - else: - return get_number(result[0], script_loc) - return None - - -if __name__ == "__main__": - n = int(sys.argv[1]) # repeat - out_loc = ( - f"{platform.node()}.csv" if sys.argv[2] == "_" else sys.argv[2] - ) # csv output location - scripts_loc = sys.argv[3:] # list of script to run - - result = {} - - for _ in tqdm(range(n)): - for script_loc in tqdm(scripts_loc): - script_name = os.path.basename(script_loc) - if script_name not in result: - result[script_name] = [] - t = get_script_time(script_loc) - if t is not None: - result[script_name].append(t) - # result[script_name].append(get_script_time(script_loc)) - - new_df = pd.DataFrame(result) - df = pd.DataFrame({}) - if os.path.exists(out_loc): - df = pd.read_csv(out_loc, index_col=0) - - result_df = pd.concat([df, new_df], ignore_index=True, sort=False) - result_df.to_csv(out_loc) diff --git a/pymonntorch/NetworkCore/Base.py b/pymonntorch/NetworkCore/Base.py index 94d5ba7..9afd284 100644 --- a/pymonntorch/NetworkCore/Base.py +++ b/pymonntorch/NetworkCore/Base.py @@ -14,15 +14,19 @@ class NetworkObject(TaggableObject): analysis_modules (list): List of analysis modules. """ - def __init__(self, tag, network, behavior, device="cpu"): + def __init__(self, network, tag=None, behavior=None, device=None): """Initialize the object. Args: tag (str): Tag to add to the object. It can also be a comma-separated string of multiple tags. network (Network): The parent network object. behavior (list or dict): List or dictionary of behaviors. If a dictionary is used, the keys must be integers. - device (str): Device on which the object is located. The default is "cpu". + device (str): Device on which the object is located. If not provided object reside on network device. """ + if device is None: + device = network.device + if tag is None: + tag = self.__class__.__name__ + "_" + str(len(network.Structures)+1) super().__init__(tag, device) self.network = network @@ -44,8 +48,33 @@ def __init__(self, tag, network, behavior, device="cpu"): self.analysis_modules = [] + self.sub_structures = [] + self.parent_structure = network + if network != self: + network.Structures.append(self) + self.recording = True + def add_sub_structure(self, new_structure): + """add a new structure as sub structure + + Args: + new_structure (NetworkObject): the new structure to add. + """ + if new_structure.network == self.network: + self.sub_structures.append(new_structure) + new_structure.parent_structure = self + else: + print(f"ERROR: {new_structure.tags[0]} can not be a sub structure of {self.tags[0]} because of not mutual network.") + + def all_sub_structure(self): + """Returns a list of all sub_structures. + """ + result = [] + for sub_struc in self.sub_structures: + result.extend(sub_struc.all_sub_structure()) + return result + def add_behavior(self, key, behavior, initialize=True): """Add a single behavior to the network object. diff --git a/pymonntorch/NetworkCore/Network.py b/pymonntorch/NetworkCore/Network.py index dda7e42..8a728b6 100644 --- a/pymonntorch/NetworkCore/Network.py +++ b/pymonntorch/NetworkCore/Network.py @@ -55,6 +55,7 @@ def __init__( self.NeuronGroups = [] self.SynapseGroups = [] + self.Structures = [] self._iteration = 0 @@ -62,8 +63,14 @@ def __init__( [] ) # stores (key, beh_parent, behavior) triplets - super().__init__(tag, self, behavior, device=self.device) + super().__init__(self, tag, behavior, device=self.device) + def fill_substructures(self): + self.sub_structures = [] + for struc in self.Structures: + if struc.parent_structure == self: + self.sub_structures.append(struc) + def set_behaviors(self, tag, enabled): """Set behaviors of specific tag to be enabled or disabled. @@ -92,8 +99,7 @@ def recording_on(self): def all_objects(self): """Return a list of all objects in the network.""" l = [self] - l.extend(self.NeuronGroups) - l.extend(self.SynapseGroups) + l.extend(self.Structures) return l def all_behaviors(self): @@ -116,7 +122,7 @@ def clear_recorder(self, keys=None): def __repr__(self): neuron_count = torch.sum(torch.tensor([ng.size for ng in self.NeuronGroups])) synapse_count = torch.sum( - torch.tensor([sg.src.size * sg.dst.size for sg in self.SynapseGroups]) + torch.tensor([sg.src.size * sg.dst.size for sg in self.SynapseGroups if (sg.src is not None and sg.dst is not None)]) ) basic_info = ( @@ -146,6 +152,10 @@ def __repr__(self): result += str(sg) + "\r\n" used_tags.append(tags) + for struc in self.Structures: + if struc not in self.NeuronGroups and struc not in self.SynapseGroups: + result += str(struc) + "\r\n" + return result[:-2] def find_objects(self, key): @@ -159,11 +169,8 @@ def find_objects(self, key): """ result = super().find_objects(key) - for ng in self.NeuronGroups: - result.extend(ng[key]) - - for sg in self.SynapseGroups: - result.extend(sg[key]) + for struc in self.Structures: + result.extend(struc[key]) for am in self.analysis_modules: result.extend(am[key]) @@ -186,6 +193,7 @@ def initialize(self, info=True, warnings=True, storage_manager=None): self.initialize_behaviors() self.check_unique_tags(warnings) + self.fill_substructures() def initialize_behaviors(self): for key, parent, behavior in self.sorted_behavior_execution_list: @@ -245,6 +253,7 @@ def check_unique_tags(self, warnings=True): + new_tag + '". Multiple Tags can be separated with a "," (NeuronGroup(..., tag="tag1,tag2,..."))' ) + ng.tags.append(ng.tags[0]) ng.tags[0] = new_tag else: diff --git a/pymonntorch/NetworkCore/NeuronGroup.py b/pymonntorch/NetworkCore/NeuronGroup.py index 9c32073..5ea26c5 100644 --- a/pymonntorch/NetworkCore/NeuronGroup.py +++ b/pymonntorch/NetworkCore/NeuronGroup.py @@ -22,7 +22,7 @@ class NeuronGroup(NetworkObject): id (torch.Tensor): The integer id of the population. """ - def __init__(self, size, behavior, net, tag=None): + def __init__(self, size, net, behavior=None, tag=None): """Initialize the neuronal population. Args: @@ -47,7 +47,7 @@ def __init__(self, size, behavior, net, tag=None): self.size = size - super().__init__(tag, net, behavior, net.device) + super().__init__(net, tag, behavior, net.device) self.add_tag("ng") self.BaseNeuronGroup = self # used for subgroup reconstruction diff --git a/pymonntorch/NetworkCore/SynapseGroup.py b/pymonntorch/NetworkCore/SynapseGroup.py index 0f499c1..c195cb4 100644 --- a/pymonntorch/NetworkCore/SynapseGroup.py +++ b/pymonntorch/NetworkCore/SynapseGroup.py @@ -16,7 +16,7 @@ class SynapseGroup(NetworkObject): group_weighting (float): The weighting of the synapse group. """ - def __init__(self, src, dst, net, tag=None, behavior=None): + def __init__(self, net, src=None, dst=None, tag=None, behavior=None): """This is the constructor of the SynapseGroup class. Args: @@ -35,12 +35,9 @@ def __init__(self, src, dst, net, tag=None, behavior=None): if tag is None and net is not None: tag = "SynapseGroup_" + str(len(net.SynapseGroups) + 1) - super().__init__(tag, net, behavior, net.device) + super().__init__(net, tag, behavior, net.device) self.add_tag("syn") - if len(src.tags) > 0 and len(dst.tags) > 0: - self.add_tag(src.tags[0] + " => " + dst.tags[0]) - if net is not None: net.SynapseGroups.append(self) setattr(net, self.tags[0], self) @@ -57,22 +54,57 @@ def __init__(self, src, dst, net, tag=None, behavior=None): if tag not in ng.efferent_synapses: ng.efferent_synapses[tag] = [] - if ( - self.dst.BaseNeuronGroup == self.dst - ): # only add to NeuronGroup not to NeuronSubGroup - for tag in self.tags + ["All"]: - self.dst.afferent_synapses[tag].append(self) + if self.dst is not None: + self.connect_dst(self.dst) + if self.src is not None: + self.connect_src(self.src) + + def set_connection_tag(self): + """Adds the connection tag + """ + if self.src is not None and self.dst is not None: + tag = self.src.tags[0] + " => " + self.dst.tags[0] + if tag not in self.tags: + self.add_tag(tag) + for ng in self.network.NeuronGroups: + if tag not in ng.afferent_synapses: + ng.afferent_synapses[tag] = [] + if tag not in ng.efferent_synapses: + ng.efferent_synapses[tag] = [] + + def connect_src(self, src): + """Connects the pre-synaptic neuron group to the synapse. + + Args: + src (NeuronGroup): The pre-synaptic neuron group. + """ + self.src = src + # only add to NeuronGroup not to NeuronSubGroup if self.src.BaseNeuronGroup == self.src: for tag in self.tags + ["All"]: self.src.efferent_synapses[tag].append(self) + self.set_connection_tag() + + def connect_dst(self, dst): + """Connects the post-synaptic neuron group to the synapse. + + Args: + dst (NeuronGroup): The post-synaptic neuron group. + """ + self.dst = dst + if self.dst.BaseNeuronGroup == self.dst: + for tag in self.tags + ["All"]: + self.dst.afferent_synapses[tag].append(self) + self.set_connection_tag() def __repr__(self): result = "SynapseGroup" + str(self.tags) - if self.network.transposed_synapse_matrix_mode: - result = result + "(S" + str(self.src.size) + "xD" + str(self.dst.size) - else: - result = result + "(D" + str(self.dst.size) + "xS" + str(self.src.size) + if self.src is not None and self.dst is not None: + if self.network.transposed_synapse_matrix_mode: + result = result + "(S" + str(self.src.size) + "xD" + str(self.dst.size) + else: + result = result + "(D" + str(self.dst.size) + "xS" + str(self.src.size) result = result + "){" for k in sorted(list(self.behavior.keys())): diff --git a/pymonntorch/__init__.py b/pymonntorch/__init__.py index a2e434b..53bdb96 100644 --- a/pymonntorch/__init__.py +++ b/pymonntorch/__init__.py @@ -2,7 +2,7 @@ __author__ = """Computational Neuroscience Research Laboratory""" __email__ = "ashenatena@gmail.com" -__version__ = "0.1.1" +__version__ = "0.1.4" from pymonntorch.NetworkCore.Network import * from pymonntorch.NetworkCore.Behavior import * diff --git a/setup.py b/setup.py index 0c8a70c..3a9c971 100644 --- a/setup.py +++ b/setup.py @@ -49,6 +49,6 @@ test_suite="tests", tests_require=test_requirements, url="https://github.com/cnrl/PymoNNtorch", - version="0.1.3", + version="0.1.4", zip_safe=False, )