Skip to content

Commit

Permalink
Merge pull request #299 from brian-team/fix-variable-set-host-device-…
Browse files Browse the repository at this point in the history
…copy

Copy variables modified on device correctly to host
  • Loading branch information
denisalevi authored Jun 10, 2022
2 parents 369ae0b + 3d89de9 commit 22a935b
Show file tree
Hide file tree
Showing 5 changed files with 80 additions and 13 deletions.
10 changes: 10 additions & 0 deletions brian2cuda/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,16 @@ def code_object(self, owner, name, abstract_code, variables, template_name,
owner=owner))
if idx == '_syaptic_post':
self.delete_synaptic_post[synaptic_post_array_name] = False

if template_name in ['group_variable_set_conditional', 'group_variable_set']:
read, write = self.get_array_read_write(abstract_code, variables)
written_variables = {}
for variable_name in write:
var = variables[variable_name]
varname = self.get_array_name(var, access_data=False)
written_variables[var] = varname
template_kwds['written_variables'] = written_variables

if template_name == "synapses":
prepost = template_kwds['pathway'].prepost
synaptic_effects = "synapse"
Expand Down
22 changes: 22 additions & 0 deletions brian2cuda/templates/group_variable_set.cu
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,28 @@
///// endblock kernel_maincode /////
{% endblock %}

{% block extra_kernel_call_post %}
{# We need to copy modifed variables back to host in case they are used in
codeobjects that run on the host, which are synapse connect calls (e.g. in the
connect condition) and before run synapses push spikes, which initialized
synaptic variables.
#}
{% for var, varname in written_variables.items() %}
{% if var.dynamic %}
{{varname}} = dev{{varname}};
{% else %}
CUDA_SAFE_CALL(
cudaMemcpy(
{{varname}},
dev{{varname}},
sizeof({{c_data_type(var.dtype)}})*_num_{{varname}},
cudaMemcpyDeviceToHost
)
);
{% endif %}
{% endfor %}
{% endblock %}

{# _num_group_idx is defined in HOST_CONSTANTS, so we can't set _N before #}
{% block define_N %}
{% endblock %}
Expand Down
22 changes: 16 additions & 6 deletions brian2cuda/templates/group_variable_set_conditional.cu
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,23 @@


{% block extra_kernel_call_post %}
{% for var in variables.values() %}
{# We want to copy only those variables that were potentially modified in aboves kernel call. #}
{% if var is not callable and var.array and not var.constant and not var.dynamic %}
{% set varname = get_array_name(var, access_data=False) %}
{# We need to copy modifed variables back to host in case they are used in
codeobjects that run on the host, which are synapse connect calls (e.g. in the
connect condition) and before run synapses push spikes, which initialized
synaptic variables.
#}
{% for var, varname in written_variables.items() %}
{% if var.dynamic %}
{{varname}} = dev{{varname}};
{% else %}
CUDA_SAFE_CALL(
cudaMemcpy({{varname}}, dev{{varname}}, sizeof({{c_data_type(var.dtype)}})*_num_{{varname}}, cudaMemcpyDeviceToHost)
);
cudaMemcpy(
{{varname}},
dev{{varname}},
sizeof({{c_data_type(var.dtype)}})*_num_{{varname}},
cudaMemcpyDeviceToHost
)
);
{% endif %}
{% endfor %}
{% endblock %}
Expand Down
6 changes: 0 additions & 6 deletions brian2cuda/templates/synapses_push_spikes.cu
Original file line number Diff line number Diff line change
Expand Up @@ -184,12 +184,6 @@ __global__ void _before_run_kernel_{{codeobj_name}}(
// number of neurons in target group
int target_N = {{constant_or_scalar('_n_targets', variables['_n_targets'])}};

// TODO: for multiple SynapticPathways for the same Synapses object (on_pre and on_post) the following copy is identical in both pathways initialise templates
{% if not no_or_const_delay_mode %}
// delay (on device) was potentially set in group_variable_set_conditional and needs to be copied to host
{{_dynamic_delay}} = dev{{_dynamic_delay}};
{% endif %}

//////////////////////
// Scalar variables //
//////////////////////
Expand Down
33 changes: 32 additions & 1 deletion brian2cuda/tests/test_neurongroup.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,11 @@
from brian2.core.clocks import defaultclock
from brian2.core.magic import run
from brian2.groups.neurongroup import NeuronGroup
from brian2.synapses import Synapses
from brian2.tests import make_argv
from brian2.tests.utils import assert_allclose
from brian2.utils.logger import catch_logs
from brian2.units import second

# Adapted from brian2/tests/test_neurongroup.py::test_semantics_floor_division
# (brian2 test asserts for 0 warnings, brian2cuda warns for int to float64 conversion)
Expand Down Expand Up @@ -43,7 +45,36 @@ def test_semantics_floor_division():
assert_allclose(G.y[:], float_values // 3)


@pytest.mark.standalone_compatible
def test_group_variable_set_copy_to_host():

G = NeuronGroup(1, 'v : 1')
# uses group_variable_set template
G.v[:1] = '50'
# connect template runs on host, requiring G.v on host after the group_variable_set
# template call above (this tests that data is copied from device to host)
S = Synapses(G, G)
S.connect(condition='v_pre == 50')
run(0*second)
assert len(S) == 1, len(S)


@pytest.mark.standalone_compatible
def test_group_variable_set_conditional_copy_to_host():

G = NeuronGroup(1, 'v : 1')
# uses group_variable_set_conditional template
G.v['i < 1'] = '50'
# connect template runs on host, requiring G.v on host after the group_variable_set
# template call above (this tests that data is copied from device to host)
S = Synapses(G, G)
S.connect(condition='v_pre == 50')
run(0*second)
assert len(S) == 1, len(S)


if __name__ == '__main__':
import brian2
brian2.set_device('cuda_standalone')
test_semantics_floor_division()
test_group_variable_set_copy_to_host()
#test_group_variable_set_conditional_copy_to_host()

0 comments on commit 22a935b

Please sign in to comment.