From f456afb0bf51eacd8c462926dd8e63248323da48 Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Fri, 12 Nov 2021 14:53:17 +0100 Subject: [PATCH 01/14] - Initial enablement of RefPort and VarPorts --- src/lava/magma/compiler/builder.py | 101 ++++++++-- src/lava/magma/compiler/compiler.py | 103 +++++++++- src/lava/magma/compiler/utils.py | 28 ++- src/lava/magma/core/model/interfaces.py | 23 +-- src/lava/magma/core/model/py/model.py | 61 +++--- src/lava/magma/core/model/py/ports.py | 190 ++++++++++++++++--- src/lava/magma/core/process/ports/ports.py | 17 +- src/lava/magma/runtime/runtime_service.py | 122 ++++++------ tests/lava/magma/compiler/test_compiler.py | 93 ++++++++- tests/lava/magma/core/model/test_py_model.py | 98 +++++++++- tests/lava/magma/core/process/test_ports.py | 65 ++++++- tests/lava/magma/runtime/test_get_set_var.py | 4 + 12 files changed, 738 insertions(+), 167 deletions(-) diff --git a/src/lava/magma/compiler/builder.py b/src/lava/magma/compiler/builder.py index db74adda6..b9f223f44 100644 --- a/src/lava/magma/compiler/builder.py +++ b/src/lava/magma/compiler/builder.py @@ -19,13 +19,17 @@ import numpy as np from dataclasses import dataclass - from lava.magma.compiler.channels.pypychannel import CspSendPort, CspRecvPort from lava.magma.core.model.py.model import AbstractPyProcessModel from lava.magma.core.model.py.type import LavaPyType -from lava.magma.compiler.utils import VarInitializer, PortInitializer -from lava.magma.core.model.py.ports import AbstractPyPort, \ - PyInPort, PyOutPort, PyRefPort +from lava.magma.compiler.utils import VarInitializer, PortInitializer, \ + VarPortInitializer +from lava.magma.core.model.py.ports import ( + AbstractPyPort, + PyInPort, + PyOutPort, + PyRefPort, PyVarPort, +) from lava.magma.compiler.channels.interfaces import AbstractCspPort, Channel, \ ChannelType @@ -91,6 +95,8 @@ def __init__( self._model_id = model_id self.vars: ty.Dict[str, VarInitializer] = {} self.py_ports: ty.Dict[str, PortInitializer] = {} + self.ref_ports: ty.Dict[str, PortInitializer] = {} + self.var_ports: ty.Dict[str, VarPortInitializer] = {} self.csp_ports: ty.Dict[str, ty.List[AbstractCspPort]] = {} self.csp_rs_send_port: ty.Dict[str, CspSendPort] = {} self.csp_rs_recv_port: ty.Dict[str, CspRecvPort] = {} @@ -167,6 +173,8 @@ def check_all_vars_and_ports_set(self): if ( attr_name not in self.vars and attr_name not in self.py_ports + and attr_name not in self.ref_ports + and attr_name not in self.var_ports ): raise AssertionError( f"No LavaPyType '{attr_name}' found in ProcModel " @@ -235,6 +243,29 @@ def set_py_ports(self, py_ports: ty.List[PortInitializer], check=True): self._check_not_assigned_yet(self.py_ports, new_ports.keys(), "ports") self.py_ports.update(new_ports) + def set_ref_ports(self, ref_ports: ty.List[PortInitializer]): + """Set py_ports + + Parameters + ---------- + ref_ports : ty.List[PortInitializer] + """ + self._check_members_exist(ref_ports, "Port") + new_ports = {p.name: p for p in ref_ports} + self._check_not_assigned_yet(self.ref_ports, new_ports.keys(), "ports") + self.ref_ports.update(new_ports) + + def set_var_ports(self, var_ports: ty.List[VarPortInitializer]): + """Set var_ports + + Parameters + ---------- + var_ports : ty.List[VarPortInitializer] + """ + new_ports = {p.name: p for p in var_ports} + self._check_not_assigned_yet(self.var_ports, new_ports.keys(), "ports") + self.var_ports.update(new_ports) + def set_csp_ports(self, csp_ports: ty.List[AbstractCspPort]): """Set CSP Ports @@ -253,22 +284,18 @@ def set_csp_ports(self, csp_ports: ty.List[AbstractCspPort]): new_ports.setdefault(p.name, []).extend( p if isinstance(p, list) else [p] ) - self._check_not_assigned_yet( - self.csp_ports, new_ports.keys(), "csp_ports" - ) + # Check that there's a PyPort for each new CspPort proc_name = self.proc_model.implements_process.__name__ for port_name in new_ports: if not hasattr(self.proc_model, port_name): - raise AssertionError( - "PyProcessModel '{}' has \ - no port named '{}'.".format( - proc_name, port_name - ) - ) - # Set new CspPorts - for key, ports in new_ports.items(): - self.csp_ports.setdefault(key, []).extend(ports) + raise AssertionError("PyProcessModel '{}' has \ + no port named '{}'.".format(proc_name, port_name)) + + if port_name in self.csp_ports: + self.csp_ports[port_name].extend(new_ports[port_name]) + else: + self.csp_ports[port_name] = new_ports[port_name] def set_rs_csp_ports(self, csp_ports: ty.List[AbstractCspPort]): """Set RS CSP Ports @@ -326,13 +353,53 @@ def build(self): csp_ports = self.csp_ports[name] if not isinstance(csp_ports, list): csp_ports = [csp_ports] - port = port_cls(pm, csp_ports, p.shape, lt.d_type) + port = port_cls(csp_ports, pm, p.shape, lt.d_type) # Create dynamic PyPort attribute on ProcModel setattr(pm, name, port) # Create private attribute for port precision # setattr(pm, "_" + name + "_p", lt.precision) + # Initialize RefPorts + for name, p in self.ref_ports.items(): + # Build PyPort + lt = self._get_lava_type(name) + port_cls = ty.cast(ty.Type[PyRefPort], lt.cls) + csp_recv = None + csp_send = None + if name in self.csp_ports: + csp_ports = self.csp_ports[name] + csp_recv = csp_ports[0] if isinstance( + csp_ports[0], CspRecvPort) else csp_ports[1] + csp_send = csp_ports[0] if isinstance( + csp_ports[0], CspSendPort) else csp_ports[1] + + port = port_cls(csp_send, csp_recv, pm, p.shape, lt.d_type) + + # Create dynamic PyPort attribute on ProcModel + setattr(pm, name, port) + + # Initialize VarPorts + for name, p in self.var_ports.items(): + # Build PyPort + if p.port_cls is None: + # VarPort is not connected + continue + port_cls = ty.cast(ty.Type[PyVarPort], p.port_cls) + csp_recv = None + csp_send = None + if name in self.csp_ports: + csp_ports = self.csp_ports[name] + csp_recv = csp_ports[0] if isinstance( + csp_ports[0], CspRecvPort) else csp_ports[1] + csp_send = csp_ports[0] if isinstance( + csp_ports[0], CspSendPort) else csp_ports[1] + port = port_cls( + p.var_name, csp_send, csp_recv, pm, p.shape, p.d_type) + + # Create dynamic PyPort attribute on ProcModel + setattr(pm, name, port) + for port in self.csp_rs_recv_port.values(): if "service_to_process_cmd" in port.name: pm.service_to_process_cmd = port diff --git a/src/lava/magma/compiler/compiler.py b/src/lava/magma/compiler/compiler.py index 2948fa0d9..36ded48a8 100644 --- a/src/lava/magma/compiler/compiler.py +++ b/src/lava/magma/compiler/compiler.py @@ -24,13 +24,17 @@ from lava.magma.compiler.channels.interfaces import ChannelType from lava.magma.compiler.executable import Executable from lava.magma.compiler.node import NodeConfig, Node -from lava.magma.compiler.utils import VarInitializer, PortInitializer +from lava.magma.compiler.utils import VarInitializer, PortInitializer, \ + VarPortInitializer from lava.magma.core import resources from lava.magma.core.model.c.model import AbstractCProcessModel from lava.magma.core.model.model import AbstractProcessModel from lava.magma.core.model.nc.model import AbstractNcProcessModel from lava.magma.core.model.py.model import AbstractPyProcessModel +from lava.magma.core.model.py.ports import PyVarPort, RefVarTypeMapping from lava.magma.core.model.sub.model import AbstractSubProcessModel +from lava.magma.core.process.ports.ports import AbstractPort, VarPort, \ + ImplicitVarPort from lava.magma.core.process.process import AbstractProcess from lava.magma.core.resources import CPU, NeuroCore from lava.magma.core.run_configs import RunConfig @@ -49,7 +53,6 @@ def __init__(self, compile_cfg: ty.Optional[ty.Dict[str, ty.Any]] = None): self._compile_config.update(compile_cfg) # ToDo: (AW) Clean this up by avoiding redundant search paths - # ToDo: (AW) @PP Please include RefPorts/VarPorts in connection tracing def _find_processes(self, proc: AbstractProcess, seen_procs: ty.List[AbstractProcess] = None) \ @@ -69,14 +72,14 @@ def _find_processes(self, new_list: ty.List[AbstractProcess] = [] # add processes connecting to the main process - for in_port in proc.in_ports: + for in_port in proc.in_ports.members + proc.var_ports.members: for con in in_port.in_connections: new_list.append(con.process) for con in in_port.out_connections: new_list.append(con.process) # add processes connecting from the main process - for out_port in proc.out_ports: + for out_port in proc.out_ports.members + proc.ref_ports.members: for con in out_port.in_connections: new_list.append(con.process) for con in out_port.out_connections: @@ -251,6 +254,35 @@ def _group_proc_by_model(proc_map: PROC_MAP) \ return grouped_models + # TODO: (PP) This currently only works for PyPorts - needs general solution + # TODO: (PP) Currently does not support 1:many/many:1 connections + @staticmethod + def _map_var_port_class(port: VarPort, + proc_groups: ty.Dict[ty.Type[AbstractProcessModel], + ty.List[AbstractProcess]]): + """Derives the port class of a given VarPort from its source RefPort.""" + + # Get the source RefPort of the VarPort + rp = port.get_src_ports() + if len(rp) > 0: + rp = rp[0] + else: + # VarPort is not connect, hence there is no LavaType + return None + + # Get the ProcessModel of the source RefPort + r_pm = None + for pm in proc_groups: + if rp.process in proc_groups[pm]: + r_pm = pm + + # Get the LavaType of the RefPort from its ProcessModel + lt = getattr(r_pm, rp.name) + + # Return mapping of the RefPort class to VarPort class + return RefVarTypeMapping.get(lt.cls) + + # TODO: (PP) possible shorten creation of PortInitializers def _compile_proc_models( self, proc_groups: ty.Dict[ty.Type[AbstractProcessModel], @@ -271,16 +303,42 @@ def _compile_proc_models( # and Ports v = [VarInitializer(v.name, v.shape, v.init, v.id) for v in p.vars] - ports = (list(p.in_ports) + list(p.out_ports) - + list(p.ref_ports)) + ports = (list(p.in_ports) + list(p.out_ports)) ports = [PortInitializer(pt.name, pt.shape, - getattr(pm, pt.name).d_type, + self._get_port_dtype(pt, pm), pt.__class__.__name__, pp_ch_size) for pt in ports] + # Create RefPort (also use PortInitializers) + ref_ports = list(p.ref_ports) + ref_ports = [ + PortInitializer(pt.name, + pt.shape, + self._get_port_dtype(pt, pm), + pt.__class__.__name__, + pp_ch_size) for pt in ref_ports] + # Create VarPortInitializers (contain also the Var name) + var_ports = [] + for pt in list(p.var_ports): + var_ports.append( + VarPortInitializer( + pt.name, + pt.shape, + pt.var.name, + self._get_port_dtype(pt, pm), + pt.__class__.__name__, + pp_ch_size, + self._map_var_port_class(pt, proc_groups))) + + # Set implicit VarPorts as attribute to ProcessModel + if isinstance(pt, ImplicitVarPort): + setattr(pm, pt.name, pt) + # Assigns initializers to builder b.set_variables(v) b.set_py_ports(ports) + b.set_ref_ports(ref_ports) + b.set_var_ports(var_ports) b.check_all_vars_and_ports_set() py_builders[p] = b elif issubclass(pm, AbstractCProcessModel): @@ -496,6 +554,26 @@ def _get_channel_type(src: ty.Type[AbstractProcessModel], f"'({src.__name__}, {dst.__name__})' yet." ) + @staticmethod + def _get_port_dtype(port: AbstractPort, + proc_model: ty.Type[AbstractProcessModel]) -> type: + """Returns the type of a port, as specified in the corresponding + ProcessModel.""" + + # In-, Out-, Ref- and explicit VarPorts + if hasattr(proc_model, port.name): + # Handle VarPorts (use dtype of corresponding Var) + if isinstance(port, VarPort): + return getattr(proc_model, port.var.name).d_type + return getattr(proc_model, port.name).d_type + # Implicitly created VarPorts + elif isinstance(port, ImplicitVarPort): + return getattr(proc_model, port.var.name).d_type + # Port has different name in Process and ProcessModel + else: + raise AssertionError("Port {!r} not found in " + "ProcessModel {!r}".format(port, proc_model)) + # ToDo: (AW) Fix hard-coded hacks in this method and extend to other # channel types def _create_channel_builders(self, proc_map: PROC_MAP) \ @@ -525,7 +603,7 @@ def _create_channel_builders(self, proc_map: PROC_MAP) \ # Find destination ports for each source port for src_pt in src_ports: # Create PortInitializer for source port - src_pt_dtype = getattr(src_pm, src_pt.name).d_type + src_pt_dtype = self._get_port_dtype(src_pt, src_pm) src_pt_init = PortInitializer( src_pt.name, src_pt.shape, src_pt_dtype, src_pt.__class__.__name__, ch_size) @@ -540,7 +618,7 @@ def _create_channel_builders(self, proc_map: PROC_MAP) \ # Find appropriate channel type ch_type = self._get_channel_type(src_pm, dst_pm) # Create PortInitializer for destination port - dst_pt_d_type = getattr(dst_pm, dst_pt.name).d_type + dst_pt_d_type = self._get_port_dtype(dst_pt, dst_pm) dst_pt_init = PortInitializer( dst_pt.name, dst_pt.shape, dst_pt_d_type, dst_pt.__class__.__name__, ch_size) @@ -548,6 +626,13 @@ def _create_channel_builders(self, proc_map: PROC_MAP) \ chb = ChannelBuilderMp( ch_type, src_p, dst_p, src_pt_init, dst_pt_init) channel_builders.append(chb) + # Create additional channel builder for every VarPort + if isinstance(dst_pt, VarPort): + # RefPort to VarPort connections need channels for + # read and write + rv_chb = ChannelBuilderMp( + ch_type, dst_p, src_p, dst_pt_init, src_pt_init) + channel_builders.append(rv_chb) return channel_builders diff --git a/src/lava/magma/compiler/utils.py b/src/lava/magma/compiler/utils.py index fbf76d07b..3a20c51f0 100644 --- a/src/lava/magma/compiler/utils.py +++ b/src/lava/magma/compiler/utils.py @@ -1,8 +1,6 @@ import typing as ty from dataclasses import dataclass -import numpy as np - @dataclass class VarInitializer: @@ -16,6 +14,30 @@ class VarInitializer: class PortInitializer: name: str shape: ty.Tuple[int, ...] - d_type: ty.Type[np.intc] + d_type: type + port_type: str + size: int + + +# check if can be a subclass of PortInitializer +@dataclass +class VarPortInitializer: + name: str + shape: ty.Tuple[int, ...] + var_name: str + d_type: type + port_type: str + size: int + + +# check if can be a subclass of PortInitializer +@dataclass +class VarPortInitializer: + name: str + shape: ty.Tuple[int, ...] + var_name: str + d_type: type port_type: str size: int + port_cls: type + port_cls: type diff --git a/src/lava/magma/core/model/interfaces.py b/src/lava/magma/core/model/interfaces.py index d8e3b918d..6b03f01cd 100644 --- a/src/lava/magma/core/model/interfaces.py +++ b/src/lava/magma/core/model/interfaces.py @@ -2,33 +2,26 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import typing as ty -from abc import ABC +from abc import ABCMeta, abstractmethod -from lava.magma.compiler.channels.interfaces import AbstractCspPort - -# ToDo: (AW) This type hierarchy is still not clean. csp_port could be a -# CspSendPort or CspRecvPort so down-stream classes can't do proper type -# inference to determine if there is a send/peek/recv/probe method. -class AbstractPortImplementation(ABC): +class AbstractPortImplementation(metaclass=ABCMeta): def __init__( self, process_model: "AbstractProcessModel", # noqa: F821 - csp_ports: ty.List[AbstractCspPort] = [], shape: ty.Tuple[int, ...] = tuple(), d_type: type = int, ): self._process_model = process_model - self._csp_ports = ( - csp_ports if isinstance(csp_ports, list) else [csp_ports] - ) self._shape = shape self._d_type = d_type + @abstractmethod def start(self): - for csp_port in self._csp_ports: - csp_port.start() + # start all csp ports + ... + @abstractmethod def join(self): - for csp_port in self._csp_ports: - csp_port.join() + # join all csp ports + ... diff --git a/src/lava/magma/core/model/py/model.py b/src/lava/magma/core/model/py/model.py index 4652eace5..7a1daa764 100644 --- a/src/lava/magma/core/model/py/model.py +++ b/src/lava/magma/core/model/py/model.py @@ -8,7 +8,7 @@ from lava.magma.compiler.channels.pypychannel import CspSendPort, CspRecvPort from lava.magma.core.model.model import AbstractProcessModel -from lava.magma.core.model.py.ports import AbstractPyPort +from lava.magma.core.model.py.ports import AbstractPyPort, PyVarPort from lava.magma.runtime.mgmt_token_enums import ( enum_to_np, MGMT_COMMAND, @@ -37,12 +37,15 @@ def __init__(self): self.process_to_service_data: ty.Optional[CspSendPort] = None self.service_to_process_data: ty.Optional[CspRecvPort] = None self.py_ports: ty.List[AbstractPyPort] = [] + self.var_ports: ty.List[PyVarPort] = [] self.var_id_to_var_map: ty.Dict[int, ty.Any] = {} def __setattr__(self, key: str, value: ty.Any): self.__dict__[key] = value if isinstance(value, AbstractPyPort): self.py_ports.append(value) + if isinstance(value, PyVarPort): + self.var_ports.append(value) def start(self): self.service_to_process_cmd.start() @@ -92,9 +95,6 @@ def run_lrn(self): def run_post_mgmt(self): pass - def run_host_mgmt(self): - pass - def pre_guard(self): pass @@ -104,9 +104,7 @@ def lrn_guard(self): def post_guard(self): pass - def host_guard(self): - pass - + # TODO: (PP) need to handle PAUSE command def run(self): while True: if self.service_to_process_cmd.probe(): @@ -118,36 +116,43 @@ def run(self): if np.array_equal(phase, PyLoihiProcessModel.Phase.SPK): self.current_ts += 1 self.run_spk() + self.process_to_service_ack.send(MGMT_RESPONSE.DONE) elif np.array_equal(phase, PyLoihiProcessModel.Phase.PRE_MGMT): if self.pre_guard(): self.run_pre_mgmt() - self._handle_get_set_var() + self.process_to_service_ack.send(MGMT_RESPONSE.DONE) + if len(self.var_ports) > 0: + self._handle_var_ports() elif np.array_equal(phase, PyLoihiProcessModel.Phase.LRN): if self.lrn_guard(): self.run_lrn() + self.process_to_service_ack.send(MGMT_RESPONSE.DONE) elif np.array_equal(phase, PyLoihiProcessModel.Phase.POST_MGMT): if self.post_guard(): self.run_post_mgmt() - self._handle_get_set_var() + self.process_to_service_ack.send(MGMT_RESPONSE.DONE) + if len(self.var_ports) > 0: + self._handle_var_ports() elif np.array_equal(phase, PyLoihiProcessModel.Phase.HOST): - if self.host_guard(): - self.run_host_mgmt() + self._handle_get_set_var() else: raise ValueError(f"Wrong Phase Info Received : {phase}") - self.process_to_service_ack.send(MGMT_RESPONSE.DONE) - else: - self._handle_get_set_var() + # FIXME: (PP) might not be able to perform get/set during pause def _handle_get_set_var(self): - while self.service_to_process_req.probe(): - req_port: CspRecvPort = self.service_to_process_req - request: np.ndarray = req_port.recv() - if np.array_equal(request, REQ_TYPE.GET): - self._handle_get_var() - elif np.array_equal(request, REQ_TYPE.SET): - self._handle_set_var() - else: - raise RuntimeError(f"Unknown request type {request}") + while True: + if self.service_to_process_req.probe(): + req_port: CspRecvPort = self.service_to_process_req + request: np.ndarray = req_port.recv() + if np.array_equal(request, REQ_TYPE.GET): + self._handle_get_var() + elif np.array_equal(request, REQ_TYPE.SET): + self._handle_set_var() + else: + raise RuntimeError(f"Unknown request type {request}") + + if self.service_to_process_cmd.probe(): + return def _handle_get_var(self): # 1. Recv Var ID @@ -162,6 +167,7 @@ def _handle_get_var(self): data_port.send(enum_to_np(1)) data_port.send(enum_to_np(var)) elif isinstance(var, np.ndarray): + # FIXME: send a whole vector (also runtime_service.py) var_iter = np.nditer(var) num_items: np.integer = np.prod(var.shape) data_port.send(enum_to_np(num_items)) @@ -195,3 +201,12 @@ def _handle_set_var(self): i[...] = data_port.recv()[0] else: raise RuntimeError("Unsupported type") + + def _handle_var_ports(self): + """Check if a VarPort either receives data from a RefPort or needs to + send data to a RefPort.""" + while True: + for vp in self.var_ports: + vp.service() + if self.service_to_process_cmd.probe(): + return diff --git a/src/lava/magma/core/model/py/ports.py b/src/lava/magma/core/model/py/ports.py index 4169e0966..666d7279b 100644 --- a/src/lava/magma/core/model/py/ports.py +++ b/src/lava/magma/core/model/py/ports.py @@ -3,12 +3,12 @@ # See: https://spdx.org/licenses/ import typing as ty from abc import abstractmethod -from enum import Enum import functools as ft - import numpy as np +from lava.magma.compiler.channels.pypychannel import CspSendPort, CspRecvPort from lava.magma.core.model.interfaces import AbstractPortImplementation +from lava.magma.runtime.mgmt_token_enums import enum_to_np class AbstractPyPort(AbstractPortImplementation): @@ -20,11 +20,23 @@ class PyInPort(AbstractPyPort): If buffer is empty, recv() will be blocking. """ + def __init__(self, csp_recv_ports: ty.List[CspRecvPort], *args): + self._csp_recv_ports = csp_recv_ports + super().__init__(*args) + VEC_DENSE: ty.Type["PyInPortVectorDense"] = None VEC_SPARSE: ty.Type["PyInPortVectorSparse"] = None SCALAR_DENSE: ty.Type["PyInPortScalarDense"] = None SCALAR_SPARSE: ty.Type["PyInPortScalarSparse"] = None + def start(self): + for csp_port in self._csp_recv_ports: + csp_port.start() + + def join(self): + for csp_port in self._csp_recv_ports: + csp_port.join() + @abstractmethod def recv(self): pass @@ -41,14 +53,14 @@ class PyInPortVectorDense(PyInPort): def recv(self) -> np.ndarray: return ft.reduce( lambda acc, csp_port: acc + csp_port.recv(), - self._csp_ports, + self._csp_recv_ports, np.zeros(self._shape, self._d_type), ) def peek(self) -> np.ndarray: return ft.reduce( lambda acc, csp_port: acc + csp_port.peek(), - self._csp_ports, + self._csp_recv_ports, np.zeros(self._shape, self._d_type), ) @@ -83,22 +95,26 @@ def peek(self) -> ty.Tuple[int, int]: PyInPort.SCALAR_SPARSE = PyInPortScalarSparse -# ToDo: Remove... not needed anymore -class _PyInPort(Enum): - VEC_DENSE = PyInPortVectorDense - VEC_SPARSE = PyInPortVectorSparse - SCALAR_DENSE = PyInPortScalarDense - SCALAR_SPARSE = PyInPortScalarSparse - - class PyOutPort(AbstractPyPort): """Python implementation of OutPort used within AbstractPyProcessModels.""" + def __init__(self, csp_send_ports: ty.List[CspSendPort], *args): + self._csp_send_ports = csp_send_ports + super().__init__(*args) + VEC_DENSE: ty.Type["PyOutPortVectorDense"] = None VEC_SPARSE: ty.Type["PyOutPortVectorSparse"] = None SCALAR_DENSE: ty.Type["PyOutPortScalarDense"] = None SCALAR_SPARSE: ty.Type["PyOutPortScalarSparse"] = None + def start(self): + for csp_port in self._csp_send_ports: + csp_port.start() + + def join(self): + for csp_port in self._csp_send_ports: + csp_port.join() + @abstractmethod def send(self, data: ty.Union[np.ndarray, int]): pass @@ -110,7 +126,7 @@ def flush(self): class PyOutPortVectorDense(PyOutPort): def send(self, data: np.ndarray): """Sends data only if port is not dangling.""" - for csp_port in self._csp_ports: + for csp_port in self._csp_send_ports: csp_port.send(data) @@ -135,22 +151,38 @@ def send(self, data: int, idx: int): PyOutPort.SCALAR_SPARSE = PyOutPortScalarSparse -# ToDo: Remove... not needed anymore -class _PyOutPort(Enum): - VEC_DENSE = PyOutPortVectorDense - VEC_SPARSE = PyOutPortVectorSparse - SCALAR_DENSE = PyOutPortScalarDense - SCALAR_SPARSE = PyOutPortScalarSparse +class VarPortCmd: + GET = enum_to_np(0) + SET = enum_to_np(1) class PyRefPort(AbstractPyPort): """Python implementation of RefPort used within AbstractPyProcessModels.""" + def __init__(self, + csp_send_port: ty.Optional[CspSendPort], + csp_recv_port: ty.Optional[CspRecvPort], *args): + self._csp_recv_port = csp_recv_port + self._csp_send_port = csp_send_port + super().__init__(*args) + VEC_DENSE: ty.Type["PyRefPortVectorDense"] = None VEC_SPARSE: ty.Type["PyRefPortVectorSparse"] = None SCALAR_DENSE: ty.Type["PyRefPortScalarDense"] = None SCALAR_SPARSE: ty.Type["PyRefPortScalarSparse"] = None + def start(self): + if self._csp_send_port is not None: + self._csp_send_port.start() + if self._csp_recv_port is not None: + self._csp_recv_port.start() + + def join(self): + if self._csp_send_port is not None: + self._csp_send_port.join() + if self._csp_recv_port is not None: + self._csp_recv_port.join() + def read( self, ) -> ty.Union[ @@ -172,10 +204,21 @@ def write( class PyRefPortVectorDense(PyRefPort): def read(self) -> np.ndarray: - pass + """Requests the data from a VarPort and returns the data.""" + if self._csp_send_port and self._csp_recv_port: + header = np.ones(self._csp_send_port.shape) * VarPortCmd.GET + self._csp_send_port.send(header) + + return self._csp_recv_port.recv() + + return np.zeros(self._shape, self._d_type) def write(self, data: np.ndarray): - pass + """Sends the data to a VarPort to set its Var.""" + if self._csp_send_port: + header = np.ones(self._csp_send_port.shape) * VarPortCmd.SET + self._csp_send_port.send(header) + self._csp_send_port.send(data) class PyRefPortVectorSparse(PyRefPort): @@ -208,9 +251,102 @@ def write(self, data: int, idx: int): PyRefPort.SCALAR_SPARSE = PyRefPortScalarSparse -# ToDo: Remove... not needed anymore -class _PyRefPort(Enum): - VEC_DENSE = PyRefPortVectorDense - VEC_SPARSE = PyRefPortVectorSparse - SCALAR_DENSE = PyRefPortScalarDense - SCALAR_SPARSE = PyRefPortScalarSparse +class PyVarPort(AbstractPyPort): + """Python implementation of InPort used within AbstractPyProcessModel. + If buffer is empty, recv() will be blocking. + """ + + def __init__(self, + var_name: str, + csp_send_port: ty.Optional[CspSendPort], + csp_recv_port: ty.Optional[CspRecvPort], *args): + self._csp_recv_port = csp_recv_port + self._csp_send_port = csp_send_port + self.var_name = var_name + super().__init__(*args) + + VEC_DENSE: ty.Type["PyVarPortVectorDense"] = None + VEC_SPARSE: ty.Type["PyVarPortVectorSparse"] = None + SCALAR_DENSE: ty.Type["PyVarPortScalarDense"] = None + SCALAR_SPARSE: ty.Type["PyVarPortScalarSparse"] = None + + def start(self): + if self._csp_send_port is not None: + self._csp_send_port.start() + if self._csp_recv_port is not None: + self._csp_recv_port.start() + + def join(self): + if self._csp_send_port is not None: + self._csp_send_port.join() + if self._csp_recv_port is not None: + self._csp_recv_port.join() + + def service(self): + pass + + +class PyVarPortVectorDense(PyVarPort): + def service(self): + """Sets the received value to the given var or sends the value of the + var to the csp_send_port, depending on the received header information + of the csp_recv_port.""" + + # Inspect incoming data + if self._csp_send_port is not None and self._csp_recv_port is not None: + while self._csp_recv_port.probe(): + cmd = enum_to_np(self._csp_recv_port.recv()[0]) + + # Set the value of the Var with the given data + if np.array_equal(cmd, VarPortCmd.SET): + data = self._csp_recv_port.recv() + setattr(self._process_model, self.var_name, data) + elif np.array_equal(cmd, VarPortCmd.GET): + data = getattr(self._process_model, self.var_name) + self._csp_send_port.send(data) + else: + raise ValueError(f"Wrong Command Info Received : {cmd}") + + +class PyVarPortVectorSparse(PyVarPort): + def recv(self) -> ty.Tuple[np.ndarray, np.ndarray]: + pass + + def peek(self) -> ty.Tuple[np.ndarray, np.ndarray]: + pass + + +class PyVarPortScalarDense(PyVarPort): + def recv(self) -> int: + pass + + def peek(self) -> int: + pass + + +class PyVarPortScalarSparse(PyVarPort): + def recv(self) -> ty.Tuple[int, int]: + pass + + def peek(self) -> ty.Tuple[int, int]: + pass + + +PyVarPort.VEC_DENSE = PyVarPortVectorDense +PyVarPort.VEC_SPARSE = PyVarPortVectorSparse +PyVarPort.SCALAR_DENSE = PyVarPortScalarDense +PyVarPort.SCALAR_SPARSE = PyVarPortScalarSparse + + +class RefVarTypeMapping: + """Class to get the mapping of PyRefPort types to PyVarPortTypes.""" + + mapping: ty.Dict[PyRefPort, PyVarPort] = { + PyRefPortVectorDense: PyVarPortVectorDense, + PyRefPortVectorSparse: PyVarPortVectorSparse, + PyRefPortScalarDense: PyVarPortScalarDense, + PyRefPortScalarSparse: PyVarPortScalarSparse} + + @classmethod + def get(cls, ref_port: PyRefPort): + return cls.mapping[ref_port] diff --git a/src/lava/magma/core/process/ports/ports.py b/src/lava/magma/core/process/ports/ports.py index 32f609e36..1c55f07e3 100644 --- a/src/lava/magma/core/process/ports/ports.py +++ b/src/lava/magma/core/process/ports/ports.py @@ -389,12 +389,19 @@ def connect_var(self, variables: ty.Union[Var, ty.List[Var]]): if var_shape != v.shape: raise AssertionError("All 'vars' must have same shape.") # Create a VarPort to wrap Var - vp = VarPort(v) + vp = ImplicitVarPort(v) # Propagate name and parent process of Var to VarPort - vp.name = v.name + "_port" + vp.name = "_" + v.name + "_implicit_port" if v.process is not None: # Only assign when parent process is already assigned vp.process = v.process + # VarPort Name could shadow existing attribute + if hasattr(v.process, vp.name): + raise AssertionError( + "Name of implicit VarPort might conflict" + " with existing attribute.") + setattr(v.process, vp.name, vp) + v.process.var_ports.add_members({vp.name: vp}) var_ports.append(vp) # Connect RefPort to VarPorts that wrap Vars self.connect(var_ports) @@ -455,6 +462,12 @@ def connect_from( self._connect_backward(to_list(ports), AbstractRVPort) +class ImplicitVarPort(VarPort): + """Wrapper class for VarPort to identify implicitly created VarPorts when + a RefPort connects directly to a Var.""" + pass + + class AbstractVirtualPort(ABC): """Abstract base class interface for any type of port that merely serves to transforms the properties of a user-defined port. diff --git a/src/lava/magma/runtime/runtime_service.py b/src/lava/magma/runtime/runtime_service.py index 871b2b397..ee70543c4 100644 --- a/src/lava/magma/runtime/runtime_service.py +++ b/src/lava/magma/runtime/runtime_service.py @@ -91,15 +91,19 @@ class Phase: POST_MGMT = enum_to_np(4) HOST = enum_to_np(5) - def _next_phase(self, curr_phase): + def _next_phase(self, curr_phase, is_last_time_step: bool): if curr_phase == LoihiPyRuntimeService.Phase.SPK: return LoihiPyRuntimeService.Phase.PRE_MGMT elif curr_phase == LoihiPyRuntimeService.Phase.PRE_MGMT: return LoihiPyRuntimeService.Phase.LRN elif curr_phase == LoihiPyRuntimeService.Phase.LRN: return LoihiPyRuntimeService.Phase.POST_MGMT - elif curr_phase == LoihiPyRuntimeService.Phase.POST_MGMT: + elif curr_phase == LoihiPyRuntimeService.Phase.POST_MGMT and \ + is_last_time_step: return LoihiPyRuntimeService.Phase.HOST + elif curr_phase == LoihiPyRuntimeService.Phase.POST_MGMT and not \ + is_last_time_step: + return LoihiPyRuntimeService.Phase.SPK elif curr_phase == LoihiPyRuntimeService.Phase.HOST: return LoihiPyRuntimeService.Phase.SPK @@ -113,13 +117,12 @@ def _send_pm_req_given_model_id(self, model_id: int, *requests): for request in requests: req_port.send(request) - def _get_pm_resp(self, phase) -> ty.Iterable[MGMT_RESPONSE]: + def _get_pm_resp(self) -> ty.Iterable[MGMT_RESPONSE]: rcv_msgs = [] num_responses_expected: int = len(self.model_ids) counter: int = 0 while counter < num_responses_expected: ptos_recv_port = self.process_to_service_ack[counter] - self._handle_get_set(phase) if ptos_recv_port.probe(): rcv_msgs.append(ptos_recv_port.recv()) counter += 1 @@ -158,13 +161,13 @@ def _relay_pm_ack_given_model_id(self, model_id: int): ack_relay_port.send(ack_recv_port.recv()) def run(self): - phase = LoihiPyRuntimeService.Phase.SPK + phase = LoihiPyRuntimeService.Phase.HOST while True: if self.runtime_to_service_cmd.probe(): command = self.runtime_to_service_cmd.recv() if np.array_equal(command, MGMT_COMMAND.STOP): self._send_pm_cmd(command) - rsps = self._get_pm_resp(phase) + rsps = self._get_pm_resp() for rsp in rsps: if not np.array_equal(rsp, MGMT_RESPONSE.TERMINATED): raise ValueError(f"Wrong Response Received : {rsp}") @@ -173,7 +176,7 @@ def run(self): return elif np.array_equal(command, MGMT_COMMAND.PAUSE): self._send_pm_cmd(command) - rsps = self._get_pm_resp(phase) + rsps = self._get_pm_resp() for rsp in rsps: if not np.array_equal(rsp, MGMT_RESPONSE.PAUSED): raise ValueError(f"Wrong Response Received : {rsp}") @@ -181,65 +184,70 @@ def run(self): break else: curr_time_step = 0 - phase = LoihiPyRuntimeService.Phase.SPK - while not np.array_equal(enum_to_np(curr_time_step), - command): + phase = LoihiPyRuntimeService.Phase.HOST + while True: + is_last_ts = np.array_equal(enum_to_np(curr_time_step), + command) + phase = self._next_phase(phase, is_last_ts) if np.array_equal(phase, LoihiPyRuntimeService.Phase.SPK): curr_time_step += 1 self._send_pm_cmd(phase) - rsps = self._get_pm_resp(phase) - for rsp in rsps: - if not np.array_equal(rsp, MGMT_RESPONSE.DONE): - raise ValueError( - f"Wrong Response Received : {rsp}") - is_last_ts = np.array_equal(enum_to_np(curr_time_step), - command) - is_last_phase = np.array_equal(phase, - LoihiPyRuntimeService. - Phase.POST_MGMT) - if not (is_last_ts and is_last_phase): - phase = self._next_phase(phase) + if not np.array_equal( + phase, LoihiPyRuntimeService.Phase.HOST): + rsps = self._get_pm_resp() + for rsp in rsps: + if not np.array_equal(rsp, MGMT_RESPONSE.DONE): + raise ValueError( + f"Wrong Response Received : {rsp}") + + if np.array_equal( + phase, LoihiPyRuntimeService.Phase.HOST): + break + self.service_to_runtime_ack.send(MGMT_RESPONSE.DONE) self._handle_get_set(phase) def _handle_get_set(self, phase): - if np.array_equal(phase, LoihiPyRuntimeService.Phase.PRE_MGMT) or \ - np.array_equal(phase, LoihiPyRuntimeService.Phase.POST_MGMT): - while self.runtime_to_service_req.probe(): - request = self.runtime_to_service_req.recv() - if np.array_equal(request, REQ_TYPE.GET): - requests: ty.List[np.ndarray] = [request] - # recv model_id - model_id: int = \ - self.runtime_to_service_req.recv()[ - 0].item() - # recv var_id - requests.append( - self.runtime_to_service_req.recv()) - self._send_pm_req_given_model_id(model_id, - *requests) - - self._relay_to_runtime_data_given_model_id( - model_id) - elif np.array_equal(request, REQ_TYPE.SET): - requests: ty.List[np.ndarray] = [request] - # recv model_id - model_id: int = \ - self.runtime_to_service_req.recv()[ - 0].item() - # recv var_id - requests.append( - self.runtime_to_service_req.recv()) - self._send_pm_req_given_model_id(model_id, - *requests) - - self._relay_to_pm_data_given_model_id( - model_id) - else: - raise RuntimeError( - f"Unknown request {request}") + if np.array_equal(phase, LoihiPyRuntimeService.Phase.HOST): + while True: + if self.runtime_to_service_req.probe(): + request = self.runtime_to_service_req.recv() + if np.array_equal(request, REQ_TYPE.GET): + requests: ty.List[np.ndarray] = [request] + # recv model_id + model_id: int = \ + self.runtime_to_service_req.recv()[ + 0].item() + # recv var_id + requests.append( + self.runtime_to_service_req.recv()) + self._send_pm_req_given_model_id(model_id, + *requests) + + self._relay_to_runtime_data_given_model_id( + model_id) + elif np.array_equal(request, REQ_TYPE.SET): + requests: ty.List[np.ndarray] = [request] + # recv model_id + model_id: int = \ + self.runtime_to_service_req.recv()[ + 0].item() + # recv var_id + requests.append( + self.runtime_to_service_req.recv()) + self._send_pm_req_given_model_id(model_id, + *requests) + + self._relay_to_pm_data_given_model_id( + model_id) + else: + raise RuntimeError( + f"Unknown request {request}") + + if self.runtime_to_service_cmd.probe(): + return class LoihiCRuntimeService(AbstractRuntimeService): diff --git a/tests/lava/magma/compiler/test_compiler.py b/tests/lava/magma/compiler/test_compiler.py index 385a2dfc0..1a5c8f968 100644 --- a/tests/lava/magma/compiler/test_compiler.py +++ b/tests/lava/magma/compiler/test_compiler.py @@ -14,8 +14,9 @@ from lava.magma.core.sync.domain import SyncDomain from lava.magma.core.sync.protocol import AbstractSyncProtocol from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol -from lava.magma.core.process.ports.ports import InPort, OutPort -from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.process.ports.ports import ( + InPort, OutPort, RefPort, VarPort) +from lava.magma.core.model.py.ports import PyInPort, PyOutPort, PyRefPort from lava.magma.core.run_configs import RunConfig from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.process.variable import Var, VarServer @@ -29,6 +30,7 @@ def __init__(self, **kwargs): # Use ReduceOp to allow for multiple input connections self.inp = InPort(shape=(1,), reduce_op=ReduceSum) self.out = OutPort(shape=(1,)) + self.ref = RefPort(shape=(10,)) # Another minimal process (does not matter that it's identical to ProcA) @@ -39,6 +41,7 @@ def __init__(self, **kwargs): self.inp = InPort(shape=(1,), reduce_op=ReduceSum) self.out = OutPort(shape=(1,)) self.some_var = Var((10,), init=10) + self.var_port = VarPort(self.some_var) # Another minimal process (does not matter that it's identical to ProcA) @@ -74,6 +77,7 @@ def runtime_service(self): class PyProcModelA(AbstractPyProcessModel): inp: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) + ref: PyRefPort = LavaPyType(PyRefPort.VEC_DENSE, int) def run(self): pass @@ -86,6 +90,7 @@ class PyProcModelB(AbstractPyProcessModel): inp: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) some_var: int = LavaPyType(int, int) + var_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) def run(self): pass @@ -262,6 +267,30 @@ def test_find_process_circular(self): self.assertEqual(set(procs4), all_procs) self.assertEqual(set(procs6), all_procs) + def test_find_process_ref_ports(self): + """Checks finding all processes for RefPort connection. + [p1 -> ref/var -> p2 -> in/out -> p3]""" + + # Create processes + p1, p2, p3 = ProcA(), ProcB(), ProcC() + + # Connect p1 (RefPort) with p2 (VarPort) + p1.ref.connect(p2.var_port) + # Connect p2 (OutPort) with p3 (InPort) + p2.out.connect(p3.inp) + + # Regardless where we start searching... + c = Compiler() + procs1 = c._find_processes(p1) + procs2 = c._find_processes(p2) + procs3 = c._find_processes(p3) + + # ...we will find all of them + all_procs = {p1, p2, p3} + self.assertEqual(set(procs1), all_procs) + self.assertEqual(set(procs2), all_procs) + self.assertEqual(set(procs3), all_procs) + def test_find_proc_models(self): """Check finding of ProcModels that implement a Process.""" @@ -658,6 +687,66 @@ def test_create_channel_builders_hierarchical_process(self): self.assertEqual(chb[0].src_process, p.procs.proc1) self.assertEqual(chb[0].dst_process, p.procs.proc2) + def test_create_channel_builders_ref_ports(self): + """Checks creation of channel builders when a process is connected + using a RefPort to another process VarPort.""" + + # Create a process with a RefPort (source) + src = ProcA() + + # Create a process with a var (destination) + dst = ProcB() + + # Connect them using RefPort and VarPort + src.ref.connect(dst.var_port) + + # Create a manual proc_map + proc_map = { + src: PyProcModelA, + dst: PyProcModelB + } + + # Create channel builders + c = Compiler() + cbs = c._create_channel_builders(proc_map) + + # This should result in 2 channel builder + from lava.magma.compiler.builder import ChannelBuilderMp + self.assertEqual(len(cbs), 2) + self.assertIsInstance(cbs[0], ChannelBuilderMp) + self.assertEqual(cbs[0].src_process, src) + self.assertEqual(cbs[0].dst_process, dst) + + def test_create_channel_builders_ref_ports_implicit(self): + """Checks creation of channel builders when a process is connected + using a RefPort to another process Var (implicit VarPort).""" + + # Create a process with a RefPort (source) + src = ProcA() + + # Create a process with a var (destination) + dst = ProcB() + + # Connect them using RefPort and Var (creates implicitly a VarPort) + src.ref.connect_var(dst.some_var) + + # Create a manual proc_map + proc_map = { + src: PyProcModelA, + dst: PyProcModelB + } + + # Create channel builders + c = Compiler() + cbs = c._create_channel_builders(proc_map) + + # This should result in 2 channel builder + from lava.magma.compiler.builder import ChannelBuilderMp + self.assertEqual(len(cbs), 2) + self.assertIsInstance(cbs[0], ChannelBuilderMp) + self.assertEqual(cbs[0].src_process, src) + self.assertEqual(cbs[0].dst_process, dst) + # ToDo: (AW) @YS/@JM Please fix unit test by passing run_srv_builders to # _create_exec_vars when ready def test_create_py_exec_vars(self): diff --git a/tests/lava/magma/core/model/test_py_model.py b/tests/lava/magma/core/model/test_py_model.py index 6f9b4b083..1edb5d565 100644 --- a/tests/lava/magma/core/model/test_py_model.py +++ b/tests/lava/magma/core/model/test_py_model.py @@ -8,14 +8,17 @@ from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.variable import Var -from lava.magma.core.process.ports.ports import InPort, OutPort +from lava.magma.core.process.ports.ports import InPort, OutPort, RefPort, \ + VarPort from lava.magma.core.decorator import implements, requires from lava.magma.core.resources import CPU from lava.magma.core.model.py.model import AbstractPyProcessModel from lava.magma.core.model.py.type import LavaPyType -from lava.magma.core.model.py.ports import PyInPort, PyOutPort +from lava.magma.core.model.py.ports import PyInPort, PyOutPort, PyRefPort, \ + PyVarPort -from lava.magma.compiler.utils import VarInitializer, PortInitializer +from lava.magma.compiler.utils import VarInitializer, PortInitializer, \ + VarPortInitializer from lava.magma.compiler.builder import PyProcessBuilder from lava.magma.compiler.channels.interfaces import AbstractCspPort @@ -104,7 +107,7 @@ class ProcModelForLavaPyType1(AbstractPyProcessModel): port: PyInPort = LavaPyType(123, int) # type: ignore -# A wrong ProcessModel with wrong syb type +# A wrong ProcessModel with wrong sub type @implements(proc=ProcForLavaPyType) @requires(CPU) class ProcModelForLavaPyType2(AbstractPyProcessModel): @@ -118,6 +121,24 @@ class ProcModelForLavaPyType3(AbstractPyProcessModel): port: PyInPort = LavaPyType(PyOutPort, int) +# A minimal process to test RefPorts and VarPorts +class ProcRefVar(AbstractProcess): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.ref = RefPort(shape=(3,)) + self.var = Var(shape=(3,), init=4) + self.var_port = VarPort(self.var) + + +# A minimal PyProcModel implementing ProcRefVar +@implements(proc=ProcRefVar) +@requires(CPU) +class PyProcModelRefVar(AbstractPyProcessModel): + ref: PyRefPort = LavaPyType(PyRefPort.VEC_DENSE, int) + var: np.ndarray = LavaPyType(np.ndarray, np.int32) + var_port: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int) + + class TestPyProcessBuilder(unittest.TestCase): """ProcessModels are not not created directly but through a corresponding PyProcessBuilder. Therefore, we test both classes together.""" @@ -234,7 +255,7 @@ def test_check_lava_py_types(self): InPort called 'port' """ - # Create univeral PortInitializer reflecting the 'port' in + # Create universal PortInitializer reflecting the 'port' in # ProcForLavaPyType pi = PortInitializer("port", (1,), np.intc, "InPort", 32) @@ -411,14 +432,73 @@ def test_build_with_dangling_ports(self): # Validate that the Process with no OutPorts indeed has no output # CspPort self.assertIsInstance( - pm_with_no_out_ports.in_port._csp_ports[0], FakeCspPort) - self.assertEqual(pm_with_no_out_ports.out_port._csp_ports, []) + pm_with_no_out_ports.in_port._csp_recv_ports[0], FakeCspPort) + self.assertEqual(pm_with_no_out_ports.out_port._csp_send_ports, []) # Validate that the Process with no InPorts indeed has no input # CspPort - self.assertEqual(pm_with_no_in_ports.in_port._csp_ports, []) + self.assertEqual(pm_with_no_in_ports.in_port._csp_recv_ports, []) self.assertIsInstance( - pm_with_no_in_ports.out_port._csp_ports[0], FakeCspPort) + pm_with_no_in_ports.out_port._csp_send_ports[0], FakeCspPort) + + def test_set_ref_var_ports(self): + """Check RefPorts and VarPorts can be set.""" + + # Create a new ProcBuilder + b = PyProcessBuilder(PyProcModelRefVar, 0) + + # Create Process for which we want to build PyProcModel + proc = ProcRefVar() + + # Normally, the Compiler would create PortInitializers from all + # ref ports holding only its name and shape + ports = list(proc.ref_ports) + ref_ports = [PortInitializer( + pt.name, + pt.shape, + getattr(PyProcModelRefVar, pt.name).d_type, + pt.__class__.__name__, 32) + for pt in ports] + # Similarly, the Compiler would create VarPortInitializers from all + # var ports holding only its name, shape and var_name + ports = list(proc.var_ports) + var_ports = [VarPortInitializer( + pt.name, + pt.shape, + pt.var.name, + getattr(PyProcModelRefVar, pt.name).d_type, + pt.__class__.__name__, 32, PyRefPort.VEC_DENSE) + for pt in ports] + # Later, the Runtime, would normally create CspPorts that implements + # the actual message passing via channels between RefPorts and + # VarPorts. Here we just create some fake CspPorts for each Ref- and + # VarPort. 2 CspChannels per Ref-/VarPort. + csp_ports = [] + for port in list(ref_ports): + csp_ports.append(FakeCspPort(port.name)) + csp_ports.append(FakeCspPort(port.name)) + for port in list(var_ports): + csp_ports.append(FakeCspPort(port.name)) + csp_ports.append(FakeCspPort(port.name)) + + # During compilation, the Compiler creates and then sets + # PortInitializers and VarPortInitializers + b.set_ref_ports(ref_ports) + b.set_var_ports(var_ports) + # The Runtime sets CspPorts + b.set_csp_ports(csp_ports) + + # All the objects are converted into dictionaries to retrieve them by + # name + self.assertEqual(list(b.py_ports.values()), []) + self.assertEqual(list(b.ref_ports.values()), ref_ports) + self.assertEqual(list(b.var_ports.values()), var_ports) + self.assertEqual(list(v for vv in b.csp_ports.values() + for v in vv), csp_ports) + self.assertEqual(b.ref_ports["ref"], ref_ports[0]) + self.assertEqual(b.csp_ports["ref"], [csp_ports[0], csp_ports[1]]) + self.assertEqual(b.var_ports["var_port"], var_ports[0]) + self.assertEqual(b.csp_ports["var_port"], [csp_ports[2], csp_ports[3]]) if __name__ == "__main__": diff --git a/tests/lava/magma/core/process/test_ports.py b/tests/lava/magma/core/process/test_ports.py index a395b22a8..7c611f25a 100644 --- a/tests/lava/magma/core/process/test_ports.py +++ b/tests/lava/magma/core/process/test_ports.py @@ -1,7 +1,7 @@ # Copyright (C) 2021 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause import unittest - +from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import ( InPort, OutPort, @@ -245,9 +245,68 @@ def test_connect_RefPort_to_Var(self): # In this case, the VarPort inherits its name and parent process from # the Var it wraps - self.assertEqual(vp.name, v.name + "_port") + self.assertEqual(vp.name, "_" + v.name + "_implicit_port") # (We can't check for the same parent process here because it has not - # been assigned ot the Var yet) + # been assigned to the Var yet) + + def test_connect_RefPort_to_Var_process(self): + """Checks connecting RefPort implicitly to Var, with registered + processes.""" + + # Create a mock parent process + class VarProcess(AbstractProcess): + ... + + # Create a Var and RefPort... + v = Var((1, 2, 3)) + rp = RefPort((1, 2, 3)) + + # ...register a process for the Var + v.process = VarProcess() + + # ...then connect them directly via connect_var(..) + rp.connect_var(v) + + # This has the same effect as connecting a RefPort explicitly via a + # VarPort to a Var... + self.assertEqual(rp.get_dst_vars(), [v]) + # ... but still creates a VarPort implicitly + vp = rp.get_dst_ports()[0] + self.assertIsInstance(vp, VarPort) + # ... which wraps the original Var + self.assertEqual(vp.var, v) + + # In this case, the VarPort inherits its name and parent process from + # the Var it wraps + self.assertEqual(vp.name, "_" + v.name + "_implicit_port") + self.assertEqual(vp.process, v.process) + + def test_connect_RefPort_to_Var_process_conflict(self): + """Checks connecting RefPort implicitly to Var, with registered + processes and conflicting names. -> AssertionError""" + + # Create a mock parent process + class VarProcess(AbstractProcess): + # Attribute is named like our implicit VarPort after creation + _existing_attr_implicit_port = None + + # Create a Var and RefPort... + v = Var((1, 2, 3)) + rp = RefPort((1, 2, 3)) + + # Create a Var and RefPort... + v = Var((1, 2, 3)) + rp = RefPort((1, 2, 3)) + + # ...register a process for the Var and name it so it conflicts with + # the attribute ov VarProcess (very unlikely to happen) + v.process = VarProcess() + v.name = "existing_attr" + + # ... and connect it directly via connect_var(..) + # The naming conflict should raise an AssertionError + with self.assertRaises(AssertionError): + rp.connect_var(v) def test_connect_RefPort_to_many_Vars(self): """Checks that RefPort can be connected to many Vars.""" diff --git a/tests/lava/magma/runtime/test_get_set_var.py b/tests/lava/magma/runtime/test_get_set_var.py index 02f33ee69..ba11929a4 100644 --- a/tests/lava/magma/runtime/test_get_set_var.py +++ b/tests/lava/magma/runtime/test_get_set_var.py @@ -1,3 +1,7 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + import numpy as np import unittest From 2d0ec745bcc5d350ca0d6a13ae1aac8aba6c2188 Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Fri, 12 Nov 2021 14:54:05 +0100 Subject: [PATCH 02/14] - Initial enablement of RefPort and VarPorts --- .../lava/magma/runtime/test_ref_var_ports.py | 239 ++++++++++++++++++ 1 file changed, 239 insertions(+) create mode 100644 tests/lava/magma/runtime/test_ref_var_ports.py diff --git a/tests/lava/magma/runtime/test_ref_var_ports.py b/tests/lava/magma/runtime/test_ref_var_ports.py new file mode 100644 index 000000000..70c135647 --- /dev/null +++ b/tests/lava/magma/runtime/test_ref_var_ports.py @@ -0,0 +1,239 @@ +# Copyright (C) 2021 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +import numpy as np +import unittest + +from lava.magma.core.decorator import implements, requires +from lava.magma.core.model.py.model import PyLoihiProcessModel +from lava.magma.core.model.py.ports import PyRefPort, PyVarPort +from lava.magma.core.model.py.type import LavaPyType +from lava.magma.core.process.ports.ports import RefPort, VarPort +from lava.magma.core.process.process import AbstractProcess +from lava.magma.core.process.variable import Var +from lava.magma.core.resources import CPU +from lava.magma.core.sync.domain import SyncDomain +from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol +from lava.magma.core.run_configs import RunConfig +from lava.magma.core.run_conditions import RunSteps + + +# minimal process with an OutPort +class P1(AbstractProcess): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.ref = RefPort(shape=(3,)) + self.v = Var(shape=(2,), init=17) + self.var_port_read = VarPort(self.v) + + +# minimal process with an InPort +class P2(AbstractProcess): + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.var = Var(shape=(3,), init=4) + self.var_port = VarPort(self.var) + self.ref_read = RefPort(shape=(2,)) + self.var_read = Var(shape=(2,), init=1) + + +# A minimal PyProcModel implementing P1 +@implements(proc=P1, protocol=LoihiProtocol) +@requires(CPU) +class PyProcModel1(PyLoihiProcessModel): + ref: PyRefPort = LavaPyType(PyRefPort.VEC_DENSE, int) + v: np.ndarray = LavaPyType(np.ndarray, np.int32) + var_port_read: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int) + + def pre_guard(self): + return True + + def run_pre_mgmt(self): + if self.current_ts > 1: + ref_data = np.array([5, 5, 5]) + self.current_ts + self.ref.write(ref_data) + + +# A minimal PyProcModel implementing P2 +@implements(proc=P2, protocol=LoihiProtocol) +@requires(CPU) +class PyProcModel2(PyLoihiProcessModel): + ref_read: PyRefPort = LavaPyType(PyRefPort.VEC_DENSE, int) + var: np.ndarray = LavaPyType(np.ndarray, np.int32) + var_port: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int) + var_read: np.ndarray = LavaPyType(np.ndarray, np.int32) + + def pre_guard(self): + return True + + def run_pre_mgmt(self): + if self.current_ts > 1: + self.var_read = self.ref_read.read() + + +class TestRefVarPorts(unittest.TestCase): + def test_unconnected_Ref_Var_ports(self): + """RefPorts and VarPorts defined in ProcessModels, but not connected + should not lead to an error.""" + sender = P1() + + # No connections are made + + class MyRunCfg(RunConfig): + def select(self, proc, proc_models): + return proc_models[0] + + simple_sync_domain = SyncDomain("simple", LoihiProtocol(), + [sender]) + + # should run without error (not doing anything) + sender.run(RunSteps(num_steps=3, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + sender.stop() + + def test_explicit_Ref_Var_port_write(self): + """Tests the connection of a RefPort to a explicitly created VarPort. + The RefPort sends data after the first time step to the VarPort, + starting with (5 + current time step) = 7). The initial value of the + var is 4. We read out the value after each time step.""" + + sender = P1() + recv = P2() + + # Connect RefPort with explicit VarPort + sender.ref.connect(recv.var_port) + + class MyRunCfg(RunConfig): + def select(self, proc, proc_models): + return proc_models[0] + + simple_sync_domain = SyncDomain("simple", LoihiProtocol(), + [sender, recv]) + + # first time step, no data is sent + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + # initial value is expected + self.assertTrue(np.all(recv.var.get() == np.array([4., 4., 4.]))) + # second time step, data is sent (7) + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + self.assertTrue(np.all(recv.var.get() == np.array([7., 7., 7.]))) + # third time step, data is sent (8) + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + self.assertTrue(np.all(recv.var.get() == np.array([8., 8., 8.]))) + # fourth time step, data is sent (9) + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + self.assertTrue(np.all(recv.var.get() == np.array([9., 9., 9.]))) + sender.stop() + + def test_implicit_Ref_Var_port_write(self): + """Tests the connection of a RefPort to a implicitly created VarPort. + The RefPort sends data after the first time step to the VarPort, + starting with (5 + current time step) = 7). The initial value of the + var is 4. We read out the value after each time step.""" + + sender = P1() + recv = P2() + + # Connect RefPort with Var using an implicit VarPort + sender.ref.connect_var(recv.var) + + class MyRunCfg(RunConfig): + def select(self, proc, proc_models): + return proc_models[0] + + simple_sync_domain = SyncDomain("simple", LoihiProtocol(), + [sender, recv]) + + # first time step, no data is sent + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + # initial value is expected + self.assertTrue(np.all(recv.var.get() == np.array([4., 4., 4.]))) + # second time step, data is sent (7) + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + self.assertTrue(np.all(recv.var.get() == np.array([7., 7., 7.]))) + # third time step, data is sent (8) + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + self.assertTrue(np.all(recv.var.get() == np.array([8., 8., 8.]))) + # fourth time step, data is sent (9) + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + self.assertTrue(np.all(recv.var.get() == np.array([9., 9., 9.]))) + sender.stop() + + def test_explicit_Ref_Var_port_read(self): + """Tests the connection of a RefPort to a explicitly created VarPort. + The RefPort "ref_read" reads data after the first time step of the + VarPort "var_port_read" which has the value of the Var "v" (= 17) and + writes this value into the Var "var_read". The initial value of the var + "var_read" is 1. At time step 2 the value of "var_read" is 17.""" + + sender = P1() + recv = P2() + + # Connect RefPort with explicit VarPort + recv.ref_read.connect(sender.var_port_read) + + class MyRunCfg(RunConfig): + def select(self, proc, proc_models): + return proc_models[0] + + simple_sync_domain = SyncDomain("simple", LoihiProtocol(), + [sender, recv]) + + # first time step, no read + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + # initial value (1) is expected + self.assertTrue(np.all(recv.var_read.get() == np.array([1., 1.]))) + # second time step, the RefPort read from the VarPort and wrote the + # result in "var_read" (= 17) + sender.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + self.assertTrue( + np.all(recv.var_read.get() == np.array([17., 17.]))) + sender.stop() + + def test_implicit_Ref_Var_port_read(self): + """Tests the connection of a RefPort to a implicitly created VarPort. + The RefPort "ref_read" reads data after the first time step of the + of the Var "v" (= 17) using an implicit VarPort and writes this value + into the Var "var_read". The initial value of the var "var_read" is 1. + At time step 2 the value of "var_read" is 17.""" + + sender = P1() + recv = P2() + + # Connect RefPort with explicit VarPort + recv.ref_read.connect_var(sender.v) + + class MyRunCfg(RunConfig): + def select(self, proc, proc_models): + return proc_models[0] + + simple_sync_domain = SyncDomain("simple", LoihiProtocol(), + [sender, recv]) + + # first time step, no read + recv.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + # initial value (1) is expected + self.assertTrue(np.all(recv.var_read.get() == np.array([1., 1.]))) + # second time step, the RefPort read from the VarPort and wrote the + # result in "var_read" (= 17) + recv.run(RunSteps(num_steps=1, blocking=True), + MyRunCfg(custom_sync_domains=[simple_sync_domain])) + self.assertTrue( + np.all(recv.var_read.get() == np.array([17., 17.]))) + recv.stop() + + +if __name__ == '__main__': + unittest.main() From 25a3a6807ed25d35ad0651224a487a2570cc1559 Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Fri, 12 Nov 2021 15:03:41 +0100 Subject: [PATCH 03/14] - Initial enablement of RefPort and VarPorts --- src/lava/magma/compiler/compiler.py | 2 +- src/lava/magma/compiler/utils.py | 13 +------------ tests/lava/magma/core/model/test_py_model.py | 4 ++-- 3 files changed, 4 insertions(+), 15 deletions(-) diff --git a/src/lava/magma/compiler/compiler.py b/src/lava/magma/compiler/compiler.py index 36ded48a8..42d906655 100644 --- a/src/lava/magma/compiler/compiler.py +++ b/src/lava/magma/compiler/compiler.py @@ -31,7 +31,7 @@ from lava.magma.core.model.model import AbstractProcessModel from lava.magma.core.model.nc.model import AbstractNcProcessModel from lava.magma.core.model.py.model import AbstractPyProcessModel -from lava.magma.core.model.py.ports import PyVarPort, RefVarTypeMapping +from lava.magma.core.model.py.ports import RefVarTypeMapping from lava.magma.core.model.sub.model import AbstractSubProcessModel from lava.magma.core.process.ports.ports import AbstractPort, VarPort, \ ImplicitVarPort diff --git a/src/lava/magma/compiler/utils.py b/src/lava/magma/compiler/utils.py index 3a20c51f0..fb52b8ce7 100644 --- a/src/lava/magma/compiler/utils.py +++ b/src/lava/magma/compiler/utils.py @@ -28,16 +28,5 @@ class VarPortInitializer: d_type: type port_type: str size: int - - -# check if can be a subclass of PortInitializer -@dataclass -class VarPortInitializer: - name: str - shape: ty.Tuple[int, ...] - var_name: str - d_type: type - port_type: str - size: int - port_cls: type port_cls: type + diff --git a/tests/lava/magma/core/model/test_py_model.py b/tests/lava/magma/core/model/test_py_model.py index 1edb5d565..1cd6affb9 100644 --- a/tests/lava/magma/core/model/test_py_model.py +++ b/tests/lava/magma/core/model/test_py_model.py @@ -458,7 +458,7 @@ def test_set_ref_var_ports(self): pt.shape, getattr(PyProcModelRefVar, pt.name).d_type, pt.__class__.__name__, 32) - for pt in ports] + for pt in ports] # Similarly, the Compiler would create VarPortInitializers from all # var ports holding only its name, shape and var_name ports = list(proc.var_ports) @@ -468,7 +468,7 @@ def test_set_ref_var_ports(self): pt.var.name, getattr(PyProcModelRefVar, pt.name).d_type, pt.__class__.__name__, 32, PyRefPort.VEC_DENSE) - for pt in ports] + for pt in ports] # Later, the Runtime, would normally create CspPorts that implements # the actual message passing via channels between RefPorts and # VarPorts. Here we just create some fake CspPorts for each Ref- and From a2d1765be6eed4009756db4492f2e7bcaaf96dab Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Fri, 12 Nov 2021 15:13:06 +0100 Subject: [PATCH 04/14] - Initial enablement of RefPort and VarPorts --- src/lava/magma/compiler/utils.py | 1 - src/lava/magma/core/process/ports/ports.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/lava/magma/compiler/utils.py b/src/lava/magma/compiler/utils.py index fb52b8ce7..14da90baf 100644 --- a/src/lava/magma/compiler/utils.py +++ b/src/lava/magma/compiler/utils.py @@ -29,4 +29,3 @@ class VarPortInitializer: port_type: str size: int port_cls: type - diff --git a/src/lava/magma/core/process/ports/ports.py b/src/lava/magma/core/process/ports/ports.py index 1c55f07e3..bef744a0d 100644 --- a/src/lava/magma/core/process/ports/ports.py +++ b/src/lava/magma/core/process/ports/ports.py @@ -556,7 +556,7 @@ def _get_new_shape(ports: ty.List[AbstractPort], axis): # Compute total size along concatenation axis total_size += shape[axis] # Extract shape dimensions other than concatenation axis - shapes_ex_axis.append(shape[:axis] + shape[axis + 1 :]) + shapes_ex_axis.append(shape[:axis] + shape[axis + 1:]) if len(shapes_ex_axis) > 1: shapes_incompatible = shapes_ex_axis[-2] != shapes_ex_axis[-1] From 1daa9af3da02b95bd229ab55d9870f3151a8c9a4 Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Tue, 16 Nov 2021 13:00:51 +0100 Subject: [PATCH 05/14] - Enablement of RefPorts and VarPorts - addressed change requests from PR #46 --- src/lava/magma/compiler/builder.py | 32 +++-- src/lava/magma/compiler/compiler.py | 12 +- src/lava/magma/core/model/interfaces.py | 21 ++- src/lava/magma/core/model/py/model.py | 75 +++++++--- src/lava/magma/core/model/py/ports.py | 104 +++++++------- src/lava/magma/core/process/ports/ports.py | 33 ++++- src/lava/magma/runtime/runtime_service.py | 73 +++++++--- tests/lava/magma/compiler/test_compiler.py | 13 +- tests/lava/magma/core/model/test_py_model.py | 8 +- .../magma/core/process/test_lif_dense_lif.py | 2 +- tests/lava/magma/core/process/test_ports.py | 5 +- .../lava/magma/runtime/test_ref_var_ports.py | 132 ++++++++---------- 12 files changed, 304 insertions(+), 206 deletions(-) diff --git a/src/lava/magma/compiler/builder.py b/src/lava/magma/compiler/builder.py index b9f223f44..11fed95d5 100644 --- a/src/lava/magma/compiler/builder.py +++ b/src/lava/magma/compiler/builder.py @@ -215,7 +215,9 @@ def check_lava_py_types(self): # ToDo: Also check that Vars are initializable with var.value provided def set_variables(self, variables: ty.List[VarInitializer]): - """Set variables list + """Appends the given list of variables to the ProcessModel. Used by the + compiler to create a ProcessBuilder during the compilation of + ProcessModels. Parameters ---------- @@ -228,7 +230,9 @@ def set_variables(self, variables: ty.List[VarInitializer]): self.vars.update(new_vars) def set_py_ports(self, py_ports: ty.List[PortInitializer], check=True): - """Set py_ports + """Appends the given list of PyPorts to the ProcessModel. Used by the + compiler to create a ProcessBuilder during the compilation of + ProcessModels. Parameters ---------- @@ -244,7 +248,9 @@ def set_py_ports(self, py_ports: ty.List[PortInitializer], check=True): self.py_ports.update(new_ports) def set_ref_ports(self, ref_ports: ty.List[PortInitializer]): - """Set py_ports + """Appends the given list of RefPorts to the ProcessModel. Used by the + compiler to create a ProcessBuilder during the compilation of + ProcessModels. Parameters ---------- @@ -256,7 +262,9 @@ def set_ref_ports(self, ref_ports: ty.List[PortInitializer]): self.ref_ports.update(new_ports) def set_var_ports(self, var_ports: ty.List[VarPortInitializer]): - """Set var_ports + """Appends the given list of VarPorts to the ProcessModel. Used by the + compiler to create a ProcessBuilder during the compilation of + ProcessModels. Parameters ---------- @@ -267,7 +275,8 @@ def set_var_ports(self, var_ports: ty.List[VarPortInitializer]): self.var_ports.update(new_ports) def set_csp_ports(self, csp_ports: ty.List[AbstractCspPort]): - """Set CSP Ports + """Appends the given list of CspPorts to the ProcessModel. Used by the + runtime to configure csp ports during initialization (_build_channels). Parameters ---------- @@ -290,7 +299,7 @@ def set_csp_ports(self, csp_ports: ty.List[AbstractCspPort]): for port_name in new_ports: if not hasattr(self.proc_model, port_name): raise AssertionError("PyProcessModel '{}' has \ - no port named '{}'.".format(proc_name, port_name)) + no port named '{}'.".format(proc_name, port_name)) if port_name in self.csp_ports: self.csp_ports[port_name].extend(new_ports[port_name]) @@ -315,6 +324,9 @@ def _get_lava_type(self, name: str) -> LavaPyType: return getattr(self.proc_model, name) # ToDo: Need to differentiate signed and unsigned variable precisions + # TODO: (PP) Combine PyPort/RefPort/VarPort initialization + # TODO: (PP) Find a cleaner way to find/address csp_send/csp_recv ports (in + # Ref/VarPort initialization) def build(self): """Builds a PyProcModel at runtime within Runtime. @@ -362,7 +374,7 @@ def build(self): # Initialize RefPorts for name, p in self.ref_ports.items(): - # Build PyPort + # Build RefPort lt = self._get_lava_type(name) port_cls = ty.cast(ty.Type[PyRefPort], lt.cls) csp_recv = None @@ -376,12 +388,12 @@ def build(self): port = port_cls(csp_send, csp_recv, pm, p.shape, lt.d_type) - # Create dynamic PyPort attribute on ProcModel + # Create dynamic RefPort attribute on ProcModel setattr(pm, name, port) # Initialize VarPorts for name, p in self.var_ports.items(): - # Build PyPort + # Build VarPort if p.port_cls is None: # VarPort is not connected continue @@ -397,7 +409,7 @@ def build(self): port = port_cls( p.var_name, csp_send, csp_recv, pm, p.shape, p.d_type) - # Create dynamic PyPort attribute on ProcModel + # Create dynamic VarPort attribute on ProcModel setattr(pm, name, port) for port in self.csp_rs_recv_port.values(): diff --git a/src/lava/magma/compiler/compiler.py b/src/lava/magma/compiler/compiler.py index 42d906655..2ea6a4ae8 100644 --- a/src/lava/magma/compiler/compiler.py +++ b/src/lava/magma/compiler/compiler.py @@ -260,7 +260,9 @@ def _group_proc_by_model(proc_map: PROC_MAP) \ def _map_var_port_class(port: VarPort, proc_groups: ty.Dict[ty.Type[AbstractProcessModel], ty.List[AbstractProcess]]): - """Derives the port class of a given VarPort from its source RefPort.""" + """Maps the port class of a given VarPort from its source RefPort. This + is needed as implicitly created VarPorts created by connecting RefPorts + directly to Vars, have no LavaType.""" # Get the source RefPort of the VarPort rp = port.get_src_ports() @@ -330,7 +332,8 @@ def _compile_proc_models( pp_ch_size, self._map_var_port_class(pt, proc_groups))) - # Set implicit VarPorts as attribute to ProcessModel + # Set implicit VarPorts (created by connecting a RefPort + # directly to a Var) as attribute to ProcessModel if isinstance(pt, ImplicitVarPort): setattr(pm, pt.name, pt) @@ -557,8 +560,9 @@ def _get_channel_type(src: ty.Type[AbstractProcessModel], @staticmethod def _get_port_dtype(port: AbstractPort, proc_model: ty.Type[AbstractProcessModel]) -> type: - """Returns the type of a port, as specified in the corresponding - ProcessModel.""" + """Returns the d_type of a Process Port, as specified in the + corresponding PortImplementation of the ProcessModel implementing the + Process""" # In-, Out-, Ref- and explicit VarPorts if hasattr(proc_model, port.name): diff --git a/src/lava/magma/core/model/interfaces.py b/src/lava/magma/core/model/interfaces.py index 6b03f01cd..4170f89a6 100644 --- a/src/lava/magma/core/model/interfaces.py +++ b/src/lava/magma/core/model/interfaces.py @@ -2,10 +2,11 @@ # SPDX-License-Identifier: BSD-3-Clause # See: https://spdx.org/licenses/ import typing as ty -from abc import ABCMeta, abstractmethod +from abc import ABC, abstractmethod +from lava.magma.compiler.channels.interfaces import AbstractCspPort -class AbstractPortImplementation(metaclass=ABCMeta): +class AbstractPortImplementation(ABC): def __init__( self, process_model: "AbstractProcessModel", # noqa: F821 @@ -16,12 +17,18 @@ def __init__( self._shape = shape self._d_type = d_type + @property @abstractmethod + def csp_ports(self) -> ty.List[AbstractCspPort]: + """Returns all csp ports of the port.""" + pass + def start(self): - # start all csp ports - ... + """Start all csp ports.""" + for csp_port in self.csp_ports: + csp_port.start() - @abstractmethod def join(self): - # join all csp ports - ... + """Join all csp ports""" + for csp_port in self.csp_ports: + csp_port.join() diff --git a/src/lava/magma/core/model/py/model.py b/src/lava/magma/core/model/py/model.py index 7a1daa764..8f2e24ec3 100644 --- a/src/lava/magma/core/model/py/model.py +++ b/src/lava/magma/core/model/py/model.py @@ -44,6 +44,7 @@ def __setattr__(self, key: str, value: ty.Any): self.__dict__[key] = value if isinstance(value, AbstractPyPort): self.py_ports.append(value) + # Store all VarPorts for efficient RefPort -> VarPort handling if isinstance(value, PyVarPort): self.var_ports.append(value) @@ -106,44 +107,67 @@ def post_guard(self): # TODO: (PP) need to handle PAUSE command def run(self): + """Retrieves commands from the runtime service to iterate through the + phases of Loihi and calls their corresponding methods of the + ProcessModels. The phase is retrieved from runtime service + (service_to_process_cmd). After calling the method of a phase of all + ProcessModels the runtime service is informed about completion. The + loop ends when the STOP command is received.""" while True: + # Probe if there is a new command from the runtime service if self.service_to_process_cmd.probe(): phase = self.service_to_process_cmd.recv() if np.array_equal(phase, MGMT_COMMAND.STOP): self.process_to_service_ack.send(MGMT_RESPONSE.TERMINATED) self.join() return + # Spiking phase - increase time step if np.array_equal(phase, PyLoihiProcessModel.Phase.SPK): self.current_ts += 1 self.run_spk() self.process_to_service_ack.send(MGMT_RESPONSE.DONE) + # Pre-management phase elif np.array_equal(phase, PyLoihiProcessModel.Phase.PRE_MGMT): + # Enable via guard method if self.pre_guard(): self.run_pre_mgmt() self.process_to_service_ack.send(MGMT_RESPONSE.DONE) + # Handle VarPort requests from RefPorts if len(self.var_ports) > 0: self._handle_var_ports() + # Learning phase elif np.array_equal(phase, PyLoihiProcessModel.Phase.LRN): + # Enable via guard method if self.lrn_guard(): self.run_lrn() self.process_to_service_ack.send(MGMT_RESPONSE.DONE) + # Post-management phase elif np.array_equal(phase, PyLoihiProcessModel.Phase.POST_MGMT): + # Enable via guard method if self.post_guard(): self.run_post_mgmt() self.process_to_service_ack.send(MGMT_RESPONSE.DONE) + # Handle VarPort requests from RefPorts if len(self.var_ports) > 0: self._handle_var_ports() + # Host phase - called at the last time step before STOP elif np.array_equal(phase, PyLoihiProcessModel.Phase.HOST): + # Handle get/set Var requests from runtime service self._handle_get_set_var() else: raise ValueError(f"Wrong Phase Info Received : {phase}") # FIXME: (PP) might not be able to perform get/set during pause def _handle_get_set_var(self): + """Handles all get/set Var requests from the runtime service and calls + the corresponding handling methods. The loop ends upon a + new command from runtime service after all get/set Var requests have + been handled.""" while True: + # Probe if there is a get/set Var request from runtime service if self.service_to_process_req.probe(): - req_port: CspRecvPort = self.service_to_process_req - request: np.ndarray = req_port.recv() + # Get the type of the request + request = self.service_to_process_req.recv() if np.array_equal(request, REQ_TYPE.GET): self._handle_get_var() elif np.array_equal(request, REQ_TYPE.SET): @@ -151,18 +175,21 @@ def _handle_get_set_var(self): else: raise RuntimeError(f"Unknown request type {request}") + # End if another command from runtime service arrives if self.service_to_process_cmd.probe(): return def _handle_get_var(self): - # 1. Recv Var ID - req_port: CspRecvPort = self.service_to_process_req - var_id: int = req_port.recv()[0].item() - var_name: str = self.var_id_to_var_map[var_id] - var: ty.Any = getattr(self, var_name) + """Handles the get Var command from runtime service.""" + # 1. Receive Var ID and retrieve the Var + var_id = self.service_to_process_req.recv()[0].item() + var_name = self.var_id_to_var_map[var_id] + var = getattr(self, var_name) # 2. Send Var data - data_port: CspSendPort = self.process_to_service_data + data_port = self.process_to_service_data + # Header corresponds to number of values + # Data is either send once (for int) or one by one (array) if isinstance(var, int) or isinstance(var, np.integer): data_port.send(enum_to_np(1)) data_port.send(enum_to_np(var)) @@ -175,25 +202,28 @@ def _handle_get_var(self): data_port.send(enum_to_np(value)) def _handle_set_var(self): - # 1. Recv Var ID - req_port: CspRecvPort = self.service_to_process_req - var_id: int = req_port.recv()[0].item() - var_name: str = self.var_id_to_var_map[var_id] - var: ty.Any = getattr(self, var_name) - - # 2. Recv Var data - data_port: CspRecvPort = self.service_to_process_data + """Handles the set Var command from runtime service.""" + # 1. Receive Var ID and retrieve the Var + var_id = self.service_to_process_req.recv()[0].item() + var_name = self.var_id_to_var_map[var_id] + var = getattr(self, var_name) + + # 2. Receive Var data + data_port = self.service_to_process_data if isinstance(var, int) or isinstance(var, np.integer): - data_port.recv() # Ignore as this will be 1 (num_items) + # First item is number of items (1) - not needed + data_port.recv() + # Data to set buffer = data_port.recv()[0] if isinstance(var, int): setattr(self, var_name, buffer.item()) else: setattr(self, var_name, buffer.astype(var.dtype)) elif isinstance(var, np.ndarray): + # First item is number of items num_items = data_port.recv()[0] var_iter = np.nditer(var, op_flags=['readwrite']) - + # Set data one by one for i in var_iter: if num_items == 0: break @@ -202,11 +232,16 @@ def _handle_set_var(self): else: raise RuntimeError("Unsupported type") + # TODO: (PP) use select(..) to service VarPorts instead of a loop def _handle_var_ports(self): - """Check if a VarPort either receives data from a RefPort or needs to - send data to a RefPort.""" + """Handles read/write requests on any VarPorts. The loop ends upon a + new command from runtime service after all VarPort service requests have + been handled.""" while True: + # Loop through read/write requests of each VarPort for vp in self.var_ports: vp.service() + + # End if another command from runtime service arrives if self.service_to_process_cmd.probe(): return diff --git a/src/lava/magma/core/model/py/ports.py b/src/lava/magma/core/model/py/ports.py index 666d7279b..0b9377e24 100644 --- a/src/lava/magma/core/model/py/ports.py +++ b/src/lava/magma/core/model/py/ports.py @@ -6,13 +6,18 @@ import functools as ft import numpy as np +from lava.magma.compiler.channels.interfaces import AbstractCspPort from lava.magma.compiler.channels.pypychannel import CspSendPort, CspRecvPort from lava.magma.core.model.interfaces import AbstractPortImplementation from lava.magma.runtime.mgmt_token_enums import enum_to_np class AbstractPyPort(AbstractPortImplementation): - pass + @property + @abstractmethod + def csp_ports(self) -> ty.List[AbstractCspPort]: + """Returns all csp ports of the port.""" + pass class PyInPort(AbstractPyPort): @@ -20,22 +25,19 @@ class PyInPort(AbstractPyPort): If buffer is empty, recv() will be blocking. """ - def __init__(self, csp_recv_ports: ty.List[CspRecvPort], *args): - self._csp_recv_ports = csp_recv_ports - super().__init__(*args) - VEC_DENSE: ty.Type["PyInPortVectorDense"] = None VEC_SPARSE: ty.Type["PyInPortVectorSparse"] = None SCALAR_DENSE: ty.Type["PyInPortScalarDense"] = None SCALAR_SPARSE: ty.Type["PyInPortScalarSparse"] = None - def start(self): - for csp_port in self._csp_recv_ports: - csp_port.start() + def __init__(self, csp_recv_ports: ty.List[CspRecvPort], *args): + self._csp_recv_ports = csp_recv_ports + super().__init__(*args) - def join(self): - for csp_port in self._csp_recv_ports: - csp_port.join() + @property + def csp_ports(self) -> ty.List[AbstractCspPort]: + """Returns all csp ports of the port.""" + return self._csp_recv_ports @abstractmethod def recv(self): @@ -98,22 +100,19 @@ def peek(self) -> ty.Tuple[int, int]: class PyOutPort(AbstractPyPort): """Python implementation of OutPort used within AbstractPyProcessModels.""" - def __init__(self, csp_send_ports: ty.List[CspSendPort], *args): - self._csp_send_ports = csp_send_ports - super().__init__(*args) - VEC_DENSE: ty.Type["PyOutPortVectorDense"] = None VEC_SPARSE: ty.Type["PyOutPortVectorSparse"] = None SCALAR_DENSE: ty.Type["PyOutPortScalarDense"] = None SCALAR_SPARSE: ty.Type["PyOutPortScalarSparse"] = None - def start(self): - for csp_port in self._csp_send_ports: - csp_port.start() + def __init__(self, csp_send_ports: ty.List[CspSendPort], *args): + self._csp_send_ports = csp_send_ports + super().__init__(*args) - def join(self): - for csp_port in self._csp_send_ports: - csp_port.join() + @property + def csp_ports(self) -> ty.List[AbstractCspPort]: + """Returns all csp ports of the port.""" + return self._csp_send_ports @abstractmethod def send(self, data: ty.Union[np.ndarray, int]): @@ -159,6 +158,11 @@ class VarPortCmd: class PyRefPort(AbstractPyPort): """Python implementation of RefPort used within AbstractPyProcessModels.""" + VEC_DENSE: ty.Type["PyRefPortVectorDense"] = None + VEC_SPARSE: ty.Type["PyRefPortVectorSparse"] = None + SCALAR_DENSE: ty.Type["PyRefPortScalarDense"] = None + SCALAR_SPARSE: ty.Type["PyRefPortScalarSparse"] = None + def __init__(self, csp_send_port: ty.Optional[CspSendPort], csp_recv_port: ty.Optional[CspRecvPort], *args): @@ -166,22 +170,14 @@ def __init__(self, self._csp_send_port = csp_send_port super().__init__(*args) - VEC_DENSE: ty.Type["PyRefPortVectorDense"] = None - VEC_SPARSE: ty.Type["PyRefPortVectorSparse"] = None - SCALAR_DENSE: ty.Type["PyRefPortScalarDense"] = None - SCALAR_SPARSE: ty.Type["PyRefPortScalarSparse"] = None - - def start(self): - if self._csp_send_port is not None: - self._csp_send_port.start() - if self._csp_recv_port is not None: - self._csp_recv_port.start() - - def join(self): - if self._csp_send_port is not None: - self._csp_send_port.join() - if self._csp_recv_port is not None: - self._csp_recv_port.join() + @property + def csp_ports(self) -> ty.List[AbstractCspPort]: + """Returns all csp ports of the port.""" + if self._csp_send_port is not None and self._csp_recv_port is not None: + return [self._csp_send_port, self._csp_recv_port] + else: + # In this case the port was not connected + return [] def read( self, @@ -252,10 +248,14 @@ def write(self, data: int, idx: int): class PyVarPort(AbstractPyPort): - """Python implementation of InPort used within AbstractPyProcessModel. - If buffer is empty, recv() will be blocking. + """Python implementation of VarPort used within AbstractPyProcessModel. """ + VEC_DENSE: ty.Type["PyVarPortVectorDense"] = None + VEC_SPARSE: ty.Type["PyVarPortVectorSparse"] = None + SCALAR_DENSE: ty.Type["PyVarPortScalarDense"] = None + SCALAR_SPARSE: ty.Type["PyVarPortScalarSparse"] = None + def __init__(self, var_name: str, csp_send_port: ty.Optional[CspSendPort], @@ -265,22 +265,14 @@ def __init__(self, self.var_name = var_name super().__init__(*args) - VEC_DENSE: ty.Type["PyVarPortVectorDense"] = None - VEC_SPARSE: ty.Type["PyVarPortVectorSparse"] = None - SCALAR_DENSE: ty.Type["PyVarPortScalarDense"] = None - SCALAR_SPARSE: ty.Type["PyVarPortScalarSparse"] = None - - def start(self): - if self._csp_send_port is not None: - self._csp_send_port.start() - if self._csp_recv_port is not None: - self._csp_recv_port.start() - - def join(self): - if self._csp_send_port is not None: - self._csp_send_port.join() - if self._csp_recv_port is not None: - self._csp_recv_port.join() + @property + def csp_ports(self) -> ty.List[AbstractCspPort]: + """Returns all csp ports of the port.""" + if self._csp_send_port is not None and self._csp_recv_port is not None: + return [self._csp_send_port, self._csp_recv_port] + else: + # In this case the port was not connected + return [] def service(self): pass @@ -294,7 +286,7 @@ def service(self): # Inspect incoming data if self._csp_send_port is not None and self._csp_recv_port is not None: - while self._csp_recv_port.probe(): + if self._csp_recv_port.probe(): cmd = enum_to_np(self._csp_recv_port.recv()[0]) # Set the value of the Var with the given data diff --git a/src/lava/magma/core/process/ports/ports.py b/src/lava/magma/core/process/ports/ports.py index bef744a0d..803347d89 100644 --- a/src/lava/magma/core/process/ports/ports.py +++ b/src/lava/magma/core/process/ports/ports.py @@ -347,6 +347,14 @@ def connect( ----------- :param ports: The AbstractRVPort(s) to connect to. """ + for p in to_list(ports): + if not isinstance(p, RefPort) and not isinstance(p, VarPort): + raise TypeError( + "RefPorts can only be connected to RefPorts or " + "VarPorts: {!r}: {!r} -> {!r}: {!r} To connect a RefPort " + "to a Var, use ".format( + self.process.__class__.__name__, self.name, + p.process.__class__.__name__, p.name)) self._connect_forward(to_list(ports), AbstractRVPort) def connect_from(self, ports: ty.Union["RefPort", ty.List["RefPort"]]): @@ -357,6 +365,13 @@ def connect_from(self, ports: ty.Union["RefPort", ty.List["RefPort"]]): ---------- :param ports: The RefPort(s) that connect to this RefPort. """ + for p in to_list(ports): + if not isinstance(p, RefPort): + raise TypeError( + "RefPorts can only receive connections from RefPorts: " + "{!r}: {!r} -> {!r}: {!r}".format( + self.process.__class__.__name__, self.name, + p.process.__class__.__name__, p.name)) self._connect_backward(to_list(ports), RefPort) def connect_var(self, variables: ty.Union[Var, ty.List[Var]]): @@ -395,7 +410,7 @@ def connect_var(self, variables: ty.Union[Var, ty.List[Var]]): if v.process is not None: # Only assign when parent process is already assigned vp.process = v.process - # VarPort Name could shadow existing attribute + # VarPort name could shadow existing attribute if hasattr(v.process, vp.name): raise AssertionError( "Name of implicit VarPort might conflict" @@ -447,6 +462,13 @@ def connect(self, ports: ty.Union["VarPort", ty.List["VarPort"]]): ---------- :param ports: The VarPort(s) to connect to. """ + for p in to_list(ports): + if not isinstance(p, VarPort): + raise TypeError( + "VarPorts can only be connected to VarPorts: " + "{!r}: {!r} -> {!r}: {!r}".format( + self.process.__class__.__name__, self.name, + p.process.__class__.__name__, p.name)) self._connect_forward(to_list(ports), VarPort) def connect_from( @@ -459,11 +481,18 @@ def connect_from( ---------- :param ports: The AbstractRVPort(s) that connect to this VarPort. """ + for p in to_list(ports): + if not isinstance(p, RefPort) and not isinstance(p, VarPort): + raise TypeError( + "VarPorts can only receive connections from RefPorts or " + "VarPorts: {!r}: {!r} -> {!r}: {!r}".format( + self.process.__class__.__name__, self.name, + p.process.__class__.__name__, p.name)) self._connect_backward(to_list(ports), AbstractRVPort) class ImplicitVarPort(VarPort): - """Wrapper class for VarPort to identify implicitly created VarPorts when + """Sub class for VarPort to identify implicitly created VarPorts when a RefPort connects directly to a Var.""" pass diff --git a/src/lava/magma/runtime/runtime_service.py b/src/lava/magma/runtime/runtime_service.py index ee70543c4..819825886 100644 --- a/src/lava/magma/runtime/runtime_service.py +++ b/src/lava/magma/runtime/runtime_service.py @@ -92,6 +92,10 @@ class Phase: HOST = enum_to_np(5) def _next_phase(self, curr_phase, is_last_time_step: bool): + """Advances the current phase to the next phase. + On the first time step it starts with HOST phase and advances to SPK. + Afterwards it loops: SPK -> PRE_MGMT -> LRN -> POST_MGMT -> SPK + On the last time step POST_MGMT advances to HOST phase.""" if curr_phase == LoihiPyRuntimeService.Phase.SPK: return LoihiPyRuntimeService.Phase.PRE_MGMT elif curr_phase == LoihiPyRuntimeService.Phase.PRE_MGMT: @@ -108,19 +112,22 @@ def _next_phase(self, curr_phase, is_last_time_step: bool): return LoihiPyRuntimeService.Phase.SPK def _send_pm_cmd(self, phase: MGMT_COMMAND): + """Sends a command (phase information) to all ProcessModels.""" for send_port in self.service_to_process_cmd: send_port.send(phase) def _send_pm_req_given_model_id(self, model_id: int, *requests): - process_idx: int = self.model_ids.index(model_id) - req_port: CspSendPort = self.service_to_process_req[process_idx] + """Sends requests to a ProcessModel given by the model id.""" + process_idx = self.model_ids.index(model_id) + req_port = self.service_to_process_req[process_idx] for request in requests: req_port.send(request) def _get_pm_resp(self) -> ty.Iterable[MGMT_RESPONSE]: + """Retrieves responses of all ProcessModels.""" rcv_msgs = [] - num_responses_expected: int = len(self.model_ids) - counter: int = 0 + num_responses_expected = len(self.model_ids) + counter = 0 while counter < num_responses_expected: ptos_recv_port = self.process_to_service_ack[counter] if ptos_recv_port.probe(): @@ -129,70 +136,91 @@ def _get_pm_resp(self) -> ty.Iterable[MGMT_RESPONSE]: return rcv_msgs def _relay_to_runtime_data_given_model_id(self, model_id: int): - """Relays data received from pm to runtime""" - process_idx: int = self.model_ids.index(model_id) + """Relays data received from ProcessModel given by model id to the + runtime""" + process_idx = self.model_ids.index(model_id) - data_recv_port: CspRecvPort = self.process_to_service_data[process_idx] - data_relay_port: CspSendPort = self.service_to_runtime_data - num_items: np.ndarray = data_recv_port.recv() + data_recv_port = self.process_to_service_data[process_idx] + data_relay_port = self.service_to_runtime_data + num_items = data_recv_port.recv() data_relay_port.send(num_items) for i in range(num_items[0]): data_relay_port.send(data_recv_port.recv()) def _relay_to_pm_data_given_model_id(self, model_id: int): - """Relays data received from runtime to pm""" - process_idx: int = self.model_ids.index(model_id) - - data_recv_port: CspRecvPort = self.runtime_to_service_data - data_relay_port: CspSendPort = self.service_to_process_data[process_idx] - # recv and relay num_items - num_items: np.ndarray = data_recv_port.recv() + """Relays data received from the runtime to the ProcessModel given by + the model id.""" + process_idx = self.model_ids.index(model_id) + + data_recv_port = self.runtime_to_service_data + data_relay_port = self.service_to_process_data[process_idx] + # Receive and relay number of items + num_items = data_recv_port.recv() data_relay_port.send(num_items) - # recv and relay data1, data2, ... + # Receive and relay data1, data2, ... for i in range(num_items[0].item()): data_relay_port.send(data_recv_port.recv()) def _relay_pm_ack_given_model_id(self, model_id: int): - """Relays ack received from pm to runtime""" - process_idx: int = self.model_ids.index(model_id) + """Relays ack received from ProcessModel given by model id to the + runtime.""" + process_idx = self.model_ids.index(model_id) - ack_recv_port: CspRecvPort = self.process_to_service_ack[process_idx] - ack_relay_port: CspSendPort = self.service_to_runtime_ack + ack_recv_port = self.process_to_service_ack[process_idx] + ack_relay_port = self.service_to_runtime_ack ack_relay_port.send(ack_recv_port.recv()) def run(self): + """Retrieves commands from the runtime. On STOP or PAUSE commands all + ProcessModels are notified and expected to TERMINATE or PAUSE, + respectively. Otherwise the number of time steps is received as command. + In this case iterate through the phases of the Loihi protocol until the + last time step is reached. The runtime is informed after the last time + step. The loop ends when receiving the STOP command from the runtime.""" phase = LoihiPyRuntimeService.Phase.HOST while True: + # Probe if there is a new command from the runtime if self.runtime_to_service_cmd.probe(): command = self.runtime_to_service_cmd.recv() if np.array_equal(command, MGMT_COMMAND.STOP): + # Inform all ProcessModels about the STOP command self._send_pm_cmd(command) rsps = self._get_pm_resp() for rsp in rsps: if not np.array_equal(rsp, MGMT_RESPONSE.TERMINATED): raise ValueError(f"Wrong Response Received : {rsp}") + # Inform the runtime about successful termination self.service_to_runtime_ack.send(MGMT_RESPONSE.TERMINATED) self.join() return elif np.array_equal(command, MGMT_COMMAND.PAUSE): + # Inform all ProcessModels about the PAUSE command self._send_pm_cmd(command) rsps = self._get_pm_resp() for rsp in rsps: if not np.array_equal(rsp, MGMT_RESPONSE.PAUSED): raise ValueError(f"Wrong Response Received : {rsp}") + # Inform the runtime about successful pausing self.service_to_runtime_ack.send(MGMT_RESPONSE.PAUSED) break else: + # The number of time steps was received ("command") + # Start iterating through Loihi phases curr_time_step = 0 phase = LoihiPyRuntimeService.Phase.HOST while True: + # Check if it is the last time step is_last_ts = np.array_equal(enum_to_np(curr_time_step), command) + # Advance to the next phase phase = self._next_phase(phase, is_last_ts) + # Increase time step if spiking phase if np.array_equal(phase, LoihiPyRuntimeService.Phase.SPK): curr_time_step += 1 + # Inform ProcessModels about current phase self._send_pm_cmd(phase) + # ProcessModels respond with DONE if not HOST phase if not np.array_equal( phase, LoihiPyRuntimeService.Phase.HOST): rsps = self._get_pm_resp() @@ -201,12 +229,15 @@ def run(self): raise ValueError( f"Wrong Response Received : {rsp}") + # If HOST phase (last time step ended) break the loop if np.array_equal( phase, LoihiPyRuntimeService.Phase.HOST): break + # Inform the runtime that last time step was reached self.service_to_runtime_ack.send(MGMT_RESPONSE.DONE) + # Handle get/set Var self._handle_get_set(phase) def _handle_get_set(self, phase): diff --git a/tests/lava/magma/compiler/test_compiler.py b/tests/lava/magma/compiler/test_compiler.py index 1a5c8f968..7bcfe3476 100644 --- a/tests/lava/magma/compiler/test_compiler.py +++ b/tests/lava/magma/compiler/test_compiler.py @@ -16,14 +16,15 @@ from lava.magma.core.sync.protocols.async_protocol import AsyncProtocol from lava.magma.core.process.ports.ports import ( InPort, OutPort, RefPort, VarPort) -from lava.magma.core.model.py.ports import PyInPort, PyOutPort, PyRefPort +from lava.magma.core.model.py.ports import PyInPort, PyOutPort, PyRefPort, \ + PyVarPort from lava.magma.core.run_configs import RunConfig from lava.magma.core.model.py.type import LavaPyType from lava.magma.core.process.variable import Var, VarServer from lava.magma.core.resources import CPU -# minimal process with an InPort and OutPortA +# A minimal process (A) with an InPort, OutPort and RefPort class ProcA(AbstractProcess): def __init__(self, **kwargs): super().__init__(**kwargs) @@ -33,7 +34,7 @@ def __init__(self, **kwargs): self.ref = RefPort(shape=(10,)) -# Another minimal process (does not matter that it's identical to ProcA) +# Another minimal process (B) with a Var and an InPort, OutPort and VarPort class ProcB(AbstractProcess): def __init__(self, **kwargs): super().__init__(**kwargs) @@ -44,7 +45,7 @@ def __init__(self, **kwargs): self.var_port = VarPort(self.some_var) -# Another minimal process (does not matter that it's identical to ProcA) +# Another minimal process (C) with an InPort and OutPort class ProcC(AbstractProcess): def __init__(self, **kwargs): super().__init__(**kwargs) @@ -90,7 +91,7 @@ class PyProcModelB(AbstractPyProcessModel): inp: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int) some_var: int = LavaPyType(int, int) - var_port: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int) + var_port: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int) def run(self): pass @@ -269,7 +270,7 @@ def test_find_process_circular(self): def test_find_process_ref_ports(self): """Checks finding all processes for RefPort connection. - [p1 -> ref/var -> p2 -> in/out -> p3]""" + [p1 -> ref/var -> p2 -> out/in -> p3]""" # Create processes p1, p2, p3 = ProcA(), ProcB(), ProcC() diff --git a/tests/lava/magma/core/model/test_py_model.py b/tests/lava/magma/core/model/test_py_model.py index 1cd6affb9..4f9b7a9a4 100644 --- a/tests/lava/magma/core/model/test_py_model.py +++ b/tests/lava/magma/core/model/test_py_model.py @@ -469,10 +469,10 @@ def test_set_ref_var_ports(self): getattr(PyProcModelRefVar, pt.name).d_type, pt.__class__.__name__, 32, PyRefPort.VEC_DENSE) for pt in ports] - # Later, the Runtime, would normally create CspPorts that implements - # the actual message passing via channels between RefPorts and - # VarPorts. Here we just create some fake CspPorts for each Ref- and - # VarPort. 2 CspChannels per Ref-/VarPort. + # The Runtime, would normally create CspPorts that implement the actual + # message passing via channels between RefPorts and VarPorts. Here we + # just create some fake CspPorts for each Ref- and VarPort. + # 2 CspChannels per Ref-/VarPort. csp_ports = [] for port in list(ref_ports): csp_ports.append(FakeCspPort(port.name)) diff --git a/tests/lava/magma/core/process/test_lif_dense_lif.py b/tests/lava/magma/core/process/test_lif_dense_lif.py index 879918023..3d63f36b2 100644 --- a/tests/lava/magma/core/process/test_lif_dense_lif.py +++ b/tests/lava/magma/core/process/test_lif_dense_lif.py @@ -30,7 +30,7 @@ def test_lif_dense_lif(self): self.lif1 = LIF() self.dense = Dense() self.lif2 = LIF() - self.lif1.out_ports.s_out.connect(self.dense.in_ports.s_in) + #self.lif1.out_ports.s_out.connect(self.dense.in_ports.s_in) self.dense.out_ports.a_out.connect(self.lif2.in_ports.a_in) self.lif1.run(condition=RunSteps(num_steps=10), run_cfg=SimpleRunConfig(sync_domains=[])) diff --git a/tests/lava/magma/core/process/test_ports.py b/tests/lava/magma/core/process/test_ports.py index 7c611f25a..2e55da7df 100644 --- a/tests/lava/magma/core/process/test_ports.py +++ b/tests/lava/magma/core/process/test_ports.py @@ -1,5 +1,6 @@ # Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: BSD-3-Clause +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ import unittest from lava.magma.core.process.process import AbstractProcess from lava.magma.core.process.ports.ports import ( @@ -299,7 +300,7 @@ class VarProcess(AbstractProcess): rp = RefPort((1, 2, 3)) # ...register a process for the Var and name it so it conflicts with - # the attribute ov VarProcess (very unlikely to happen) + # the attribute of VarProcess (very unlikely to happen) v.process = VarProcess() v.name = "existing_attr" diff --git a/tests/lava/magma/runtime/test_ref_var_ports.py b/tests/lava/magma/runtime/test_ref_var_ports.py index 70c135647..30c95d219 100644 --- a/tests/lava/magma/runtime/test_ref_var_ports.py +++ b/tests/lava/magma/runtime/test_ref_var_ports.py @@ -19,32 +19,32 @@ from lava.magma.core.run_conditions import RunSteps -# minimal process with an OutPort +# A minimal process with a Var and a RefPort, VarPort class P1(AbstractProcess): def __init__(self, **kwargs): super().__init__(**kwargs) - self.ref = RefPort(shape=(3,)) - self.v = Var(shape=(2,), init=17) - self.var_port_read = VarPort(self.v) + self.ref1 = RefPort(shape=(3,)) + self.var1 = Var(shape=(2,), init=17) + self.var_port_var1 = VarPort(self.var1) -# minimal process with an InPort +# A minimal process with 2 Vars and a RefPort, VarPort class P2(AbstractProcess): def __init__(self, **kwargs): super().__init__(**kwargs) - self.var = Var(shape=(3,), init=4) - self.var_port = VarPort(self.var) - self.ref_read = RefPort(shape=(2,)) - self.var_read = Var(shape=(2,), init=1) + self.var2 = Var(shape=(3,), init=4) + self.var_port_var2 = VarPort(self.var2) + self.ref2 = RefPort(shape=(2,)) + self.var3 = Var(shape=(2,), init=1) # A minimal PyProcModel implementing P1 @implements(proc=P1, protocol=LoihiProtocol) @requires(CPU) class PyProcModel1(PyLoihiProcessModel): - ref: PyRefPort = LavaPyType(PyRefPort.VEC_DENSE, int) - v: np.ndarray = LavaPyType(np.ndarray, np.int32) - var_port_read: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int) + ref1: PyRefPort = LavaPyType(PyRefPort.VEC_DENSE, int) + var1: np.ndarray = LavaPyType(np.ndarray, np.int32) + var_port_var1: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int) def pre_guard(self): return True @@ -52,24 +52,30 @@ def pre_guard(self): def run_pre_mgmt(self): if self.current_ts > 1: ref_data = np.array([5, 5, 5]) + self.current_ts - self.ref.write(ref_data) + self.ref1.write(ref_data) # A minimal PyProcModel implementing P2 @implements(proc=P2, protocol=LoihiProtocol) @requires(CPU) class PyProcModel2(PyLoihiProcessModel): - ref_read: PyRefPort = LavaPyType(PyRefPort.VEC_DENSE, int) - var: np.ndarray = LavaPyType(np.ndarray, np.int32) - var_port: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int) - var_read: np.ndarray = LavaPyType(np.ndarray, np.int32) + ref2: PyRefPort = LavaPyType(PyRefPort.VEC_DENSE, int) + var2: np.ndarray = LavaPyType(np.ndarray, np.int32) + var_port_var2: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int) + var3: np.ndarray = LavaPyType(np.ndarray, np.int32) def pre_guard(self): return True def run_pre_mgmt(self): if self.current_ts > 1: - self.var_read = self.ref_read.read() + self.var3 = self.ref2.read() + + +# A simple RunConfig selecting always the first found process model +class MyRunCfg(RunConfig): + def select(self, proc, proc_models): + return proc_models[0] class TestRefVarPorts(unittest.TestCase): @@ -80,14 +86,10 @@ def test_unconnected_Ref_Var_ports(self): # No connections are made - class MyRunCfg(RunConfig): - def select(self, proc, proc_models): - return proc_models[0] - simple_sync_domain = SyncDomain("simple", LoihiProtocol(), [sender]) - # should run without error (not doing anything) + # The process should compile and run without error (not doing anything) sender.run(RunSteps(num_steps=3, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) sender.stop() @@ -102,32 +104,28 @@ def test_explicit_Ref_Var_port_write(self): recv = P2() # Connect RefPort with explicit VarPort - sender.ref.connect(recv.var_port) - - class MyRunCfg(RunConfig): - def select(self, proc, proc_models): - return proc_models[0] + sender.ref1.connect(recv.var_port_var2) simple_sync_domain = SyncDomain("simple", LoihiProtocol(), [sender, recv]) - # first time step, no data is sent + # First time step, no data is sent sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - # initial value is expected - self.assertTrue(np.all(recv.var.get() == np.array([4., 4., 4.]))) - # second time step, data is sent (7) + # Initial value is expected + self.assertTrue(np.all(recv.var2.get() == np.array([4., 4., 4.]))) + # Second time step, data is sent (7) sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - self.assertTrue(np.all(recv.var.get() == np.array([7., 7., 7.]))) - # third time step, data is sent (8) + self.assertTrue(np.all(recv.var2.get() == np.array([7., 7., 7.]))) + # Third time step, data is sent (8) sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - self.assertTrue(np.all(recv.var.get() == np.array([8., 8., 8.]))) - # fourth time step, data is sent (9) + self.assertTrue(np.all(recv.var2.get() == np.array([8., 8., 8.]))) + # Fourth time step, data is sent (9) sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - self.assertTrue(np.all(recv.var.get() == np.array([9., 9., 9.]))) + self.assertTrue(np.all(recv.var2.get() == np.array([9., 9., 9.]))) sender.stop() def test_implicit_Ref_Var_port_write(self): @@ -140,32 +138,28 @@ def test_implicit_Ref_Var_port_write(self): recv = P2() # Connect RefPort with Var using an implicit VarPort - sender.ref.connect_var(recv.var) - - class MyRunCfg(RunConfig): - def select(self, proc, proc_models): - return proc_models[0] + sender.ref1.connect_var(recv.var2) simple_sync_domain = SyncDomain("simple", LoihiProtocol(), [sender, recv]) - # first time step, no data is sent + # First time step, no data is sent sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - # initial value is expected - self.assertTrue(np.all(recv.var.get() == np.array([4., 4., 4.]))) - # second time step, data is sent (7) + # Initial value is expected + self.assertTrue(np.all(recv.var2.get() == np.array([4., 4., 4.]))) + # Second time step, data is sent (7) sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - self.assertTrue(np.all(recv.var.get() == np.array([7., 7., 7.]))) - # third time step, data is sent (8) + self.assertTrue(np.all(recv.var2.get() == np.array([7., 7., 7.]))) + # Third time step, data is sent (8) sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - self.assertTrue(np.all(recv.var.get() == np.array([8., 8., 8.]))) - # fourth time step, data is sent (9) + self.assertTrue(np.all(recv.var2.get() == np.array([8., 8., 8.]))) + # Fourth time step, data is sent (9) sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - self.assertTrue(np.all(recv.var.get() == np.array([9., 9., 9.]))) + self.assertTrue(np.all(recv.var2.get() == np.array([9., 9., 9.]))) sender.stop() def test_explicit_Ref_Var_port_read(self): @@ -179,26 +173,22 @@ def test_explicit_Ref_Var_port_read(self): recv = P2() # Connect RefPort with explicit VarPort - recv.ref_read.connect(sender.var_port_read) - - class MyRunCfg(RunConfig): - def select(self, proc, proc_models): - return proc_models[0] + recv.ref2.connect(sender.var_port_var1) simple_sync_domain = SyncDomain("simple", LoihiProtocol(), [sender, recv]) - # first time step, no read + # First time step, no read sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - # initial value (1) is expected - self.assertTrue(np.all(recv.var_read.get() == np.array([1., 1.]))) - # second time step, the RefPort read from the VarPort and wrote the - # result in "var_read" (= 17) + # Initial value (1) is expected + self.assertTrue(np.all(recv.var3.get() == np.array([1., 1.]))) + # Second time step, the RefPort read from the VarPort and wrote the + # Result in "var_read" (= 17) sender.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) self.assertTrue( - np.all(recv.var_read.get() == np.array([17., 17.]))) + np.all(recv.var3.get() == np.array([17., 17.]))) sender.stop() def test_implicit_Ref_Var_port_read(self): @@ -212,26 +202,22 @@ def test_implicit_Ref_Var_port_read(self): recv = P2() # Connect RefPort with explicit VarPort - recv.ref_read.connect_var(sender.v) - - class MyRunCfg(RunConfig): - def select(self, proc, proc_models): - return proc_models[0] + recv.ref2.connect_var(sender.var1) simple_sync_domain = SyncDomain("simple", LoihiProtocol(), [sender, recv]) - # first time step, no read + # First time step, no read recv.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) - # initial value (1) is expected - self.assertTrue(np.all(recv.var_read.get() == np.array([1., 1.]))) - # second time step, the RefPort read from the VarPort and wrote the - # result in "var_read" (= 17) + # Initial value (1) is expected + self.assertTrue(np.all(recv.var3.get() == np.array([1., 1.]))) + # Second time step, the RefPort read from the VarPort and wrote the + # Result in "var_read" (= 17) recv.run(RunSteps(num_steps=1, blocking=True), MyRunCfg(custom_sync_domains=[simple_sync_domain])) self.assertTrue( - np.all(recv.var_read.get() == np.array([17., 17.]))) + np.all(recv.var3.get() == np.array([17., 17.]))) recv.stop() From 5908401f36e0212625b035b69cdd0b374578811f Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Tue, 16 Nov 2021 13:07:09 +0100 Subject: [PATCH 06/14] - Enablement of RefPorts and VarPorts - addressed change requests from PR #46 --- tests/lava/magma/core/process/test_lif_dense_lif.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/lava/magma/core/process/test_lif_dense_lif.py b/tests/lava/magma/core/process/test_lif_dense_lif.py index 3d63f36b2..879918023 100644 --- a/tests/lava/magma/core/process/test_lif_dense_lif.py +++ b/tests/lava/magma/core/process/test_lif_dense_lif.py @@ -30,7 +30,7 @@ def test_lif_dense_lif(self): self.lif1 = LIF() self.dense = Dense() self.lif2 = LIF() - #self.lif1.out_ports.s_out.connect(self.dense.in_ports.s_in) + self.lif1.out_ports.s_out.connect(self.dense.in_ports.s_in) self.dense.out_ports.a_out.connect(self.lif2.in_ports.a_in) self.lif1.run(condition=RunSteps(num_steps=10), run_cfg=SimpleRunConfig(sync_domains=[])) From 3df2847e7611a0002ac71f263f8bcebc014e7ed1 Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Tue, 16 Nov 2021 14:45:24 +0100 Subject: [PATCH 07/14] - Enablement of RefPorts and VarPorts - addressed change requests from PR #46 --- tests/lava/magma/runtime/test_ref_var_ports.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/lava/magma/runtime/test_ref_var_ports.py b/tests/lava/magma/runtime/test_ref_var_ports.py index 30c95d219..fbd62c576 100644 --- a/tests/lava/magma/runtime/test_ref_var_ports.py +++ b/tests/lava/magma/runtime/test_ref_var_ports.py @@ -95,7 +95,7 @@ def test_unconnected_Ref_Var_ports(self): sender.stop() def test_explicit_Ref_Var_port_write(self): - """Tests the connection of a RefPort to a explicitly created VarPort. + """Tests the connection of a RefPort to an explicitly created VarPort. The RefPort sends data after the first time step to the VarPort, starting with (5 + current time step) = 7). The initial value of the var is 4. We read out the value after each time step.""" @@ -129,7 +129,7 @@ def test_explicit_Ref_Var_port_write(self): sender.stop() def test_implicit_Ref_Var_port_write(self): - """Tests the connection of a RefPort to a implicitly created VarPort. + """Tests the connection of a RefPort to an implicitly created VarPort. The RefPort sends data after the first time step to the VarPort, starting with (5 + current time step) = 7). The initial value of the var is 4. We read out the value after each time step.""" @@ -163,7 +163,7 @@ def test_implicit_Ref_Var_port_write(self): sender.stop() def test_explicit_Ref_Var_port_read(self): - """Tests the connection of a RefPort to a explicitly created VarPort. + """Tests the connection of a RefPort to an explicitly created VarPort. The RefPort "ref_read" reads data after the first time step of the VarPort "var_port_read" which has the value of the Var "v" (= 17) and writes this value into the Var "var_read". The initial value of the var @@ -192,7 +192,7 @@ def test_explicit_Ref_Var_port_read(self): sender.stop() def test_implicit_Ref_Var_port_read(self): - """Tests the connection of a RefPort to a implicitly created VarPort. + """Tests the connection of a RefPort to an implicitly created VarPort. The RefPort "ref_read" reads data after the first time step of the of the Var "v" (= 17) using an implicit VarPort and writes this value into the Var "var_read". The initial value of the var "var_read" is 1. From 6e4716ad71b651a70dad634d8d19c424ce2931a6 Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Tue, 16 Nov 2021 22:23:27 +0100 Subject: [PATCH 08/14] - Enablement of RefPorts and VarPorts - addressed change requests from PR #46 --- tests/lava/magma/core/process/test_ports.py | 62 +++++++++++++++++++-- 1 file changed, 58 insertions(+), 4 deletions(-) diff --git a/tests/lava/magma/core/process/test_ports.py b/tests/lava/magma/core/process/test_ports.py index 2e55da7df..eed505f4a 100644 --- a/tests/lava/magma/core/process/test_ports.py +++ b/tests/lava/magma/core/process/test_ports.py @@ -295,10 +295,6 @@ class VarProcess(AbstractProcess): v = Var((1, 2, 3)) rp = RefPort((1, 2, 3)) - # Create a Var and RefPort... - v = Var((1, 2, 3)) - rp = RefPort((1, 2, 3)) - # ...register a process for the Var and name it so it conflicts with # the attribute of VarProcess (very unlikely to happen) v.process = VarProcess() @@ -351,6 +347,64 @@ def test_connect_RefPort_to_non_sharable_Var(self): with self.assertRaises(VarNotSharableError): rp.connect_var(v) + def test_connect_RefPort_to_InPort_OutPort(self): + """Checks connecting RefPort to an InPort or OutPort. -> TypeError""" + + # Create an InPort, OutPort, RefPort... + ip = InPort((1, 2, 3)) + op = OutPort((1, 2, 3)) + rp = RefPort((1, 2, 3)) + + # ... and connect them via connect(..) + # The type conflict should raise an TypeError + with self.assertRaises(TypeError): + rp.connect(ip) + + with self.assertRaises(TypeError): + rp.connect(op) + + # Connect them via connect_from(..) + # The type conflict should raise an TypeError + with self.assertRaises(TypeError): + rp.connect_from(ip) + + with self.assertRaises(TypeError): + rp.connect_from(op) + + def test_connect_VarPort_to_InPort_OutPort_RefPort(self): + """Checks connecting VarPort to an InPort, OutPort or RefPort. + -> TypeError (RefPort can only be connected via connect_from(..) to + VarPort.""" + + # Create an InPort, OutPort, RefPort, Var with VarPort... + ip = InPort((1, 2, 3)) + op = OutPort((1, 2, 3)) + rp = RefPort((1, 2, 3)) + v = Var((1, 2, 3)) + vp = VarPort(v) + + # ... and connect them via connect(..) + # The type conflict should raise an TypeError + with self.assertRaises(TypeError): + vp.connect(ip) + + with self.assertRaises(TypeError): + vp.connect(op) + + with self.assertRaises(TypeError): + vp.connect(rp) + + # Connect them via connect_from(..) + # The type conflict should raise an TypeError + with self.assertRaises(TypeError): + vp.connect_from(ip) + + with self.assertRaises(TypeError): + vp.connect_from(op) + + # Connect RefPort via connect_from(..) raises no error + vp.connect_from(rp) + class TestVirtualPorts(unittest.TestCase): """Contains unit tests around virtual ports. Virtual ports are derived From bf934ef75d40b4950a78249e7d4e88b20e476e90 Mon Sep 17 00:00:00 2001 From: Philipp Plank Date: Fri, 26 Nov 2021 08:00:21 -0800 Subject: [PATCH 09/14] modified connection tutorial for release 0.2.0 --- .../tutorial05_connect_processes.ipynb | 252 +++++++----------- 1 file changed, 91 insertions(+), 161 deletions(-) diff --git a/tutorials/in_depth/tutorial05_connect_processes.ipynb b/tutorials/in_depth/tutorial05_connect_processes.ipynb index b660dce64..903c672bb 100644 --- a/tutorials/in_depth/tutorial05_connect_processes.ipynb +++ b/tutorials/in_depth/tutorial05_connect_processes.ipynb @@ -2,7 +2,6 @@ "cells": [ { "cell_type": "markdown", - "id": "19a680fd", "metadata": { "pycharm": { "name": "#%% md\n" @@ -40,8 +39,7 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "6f7a5e6a", + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -51,7 +49,6 @@ }, { "cell_type": "markdown", - "id": "66d488fc", "metadata": {}, "source": [ "As first step we define the _Processes_ _P1_ and _P2_ with their respective _Ports_ _out_ and _inp_." @@ -59,8 +56,7 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "94fcc5b4", + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -82,7 +78,6 @@ }, { "cell_type": "markdown", - "id": "3336d5e4", "metadata": {}, "source": [ "_Process_ _P1_ and _P2_ require a corresponding _ProcessModel_, which implements their _Ports_ and a simple run configuration for sending and receiving data.\n", @@ -94,52 +89,52 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "d9abdcda", + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", - "from lava.magma.core.model.py.model import AbstractPyProcessModel\n", - "from lava.magma.core.decorator import implements, requires\n", + "from lava.magma.core.model.py.model import PyLoihiProcessModel\n", + "from lava.magma.core.decorator import implements, requires, tag\n", "from lava.magma.core.resources import CPU\n", "from lava.magma.core.model.py.type import LavaPyType\n", - "from lava.magma.core.model.py.ports import PyInPort, PyOutPort" + "from lava.magma.core.model.py.ports import PyInPort, PyOutPort\n", + "from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol" ] }, { "cell_type": "code", - "execution_count": null, - "id": "617266c1", + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "# A minimal PyProcModel implementing P1\n", - "@implements(proc=P1)\n", + "@implements(proc=P1, protocol=LoihiProtocol)\n", "@requires(CPU)\n", - "class PyProcModelA(AbstractPyProcessModel):\n", + "@tag('floating_pt')\n", + "class PyProcModelA(PyLoihiProcessModel):\n", " out: PyOutPort = LavaPyType(PyOutPort.VEC_DENSE, int)\n", "\n", - " def run(self):\n", + " def run_spk(self):\n", " data = np.array([1, 2])\n", + " print(\"Sent output data of P1: {}\".format(data))\n", " self.out.send(data)\n", - " print(\"Sent output data of P1: \", str(data))\n", "\n", "\n", "# A minimal PyProcModel implementing P2\n", - "@implements(proc=P2)\n", + "@implements(proc=P2, protocol=LoihiProtocol)\n", "@requires(CPU)\n", - "class PyProcModelB(AbstractPyProcessModel):\n", + "@tag('floating_pt')\n", + "class PyProcModelB(PyLoihiProcessModel):\n", " inp: PyInPort = LavaPyType(PyInPort.VEC_DENSE, int)\n", "\n", - " def run(self):\n", + " def run_spk(self):\n", " in_data = self.inp.recv()\n", - " print(\"Received input data for P2: \", str(in_data))" + " print(\"Received input data for P2: {}\".format(in_data))" ] }, { "cell_type": "markdown", - "id": "9c566e74", "metadata": {}, "source": [ "Next the processes _P1_ and _P2_ are instantiated and the output _Port_ _out_ from _Process_ _P1_ is connected with the input _Port_ _inp_ of _Process_ _P2_." @@ -147,8 +142,7 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "ba4e032e", + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -167,7 +161,6 @@ }, { "cell_type": "markdown", - "id": "6ef166b8", "metadata": {}, "source": [ "Calling `run()` on either of these _Processes_ will first call the _Compiler_. During compilation the specified connection is setup by creating a channel between _P1_ and _P2_. Now data can be transfered during execution as seen by the output print statements." @@ -175,48 +168,48 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "d9abc8cc", + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "from lava.magma.core.run_configs import RunConfig\n", + "from lava.magma.core.run_configs import Loihi1SimCfg\n", "from lava.magma.core.run_conditions import RunSteps" ] }, { "cell_type": "code", - "execution_count": null, - "id": "d4745d9b", - "metadata": {}, - "outputs": [], - "source": [ - "class MyRunCfg(RunConfig):\n", - " def select(self, proc, proc_models):\n", - " return proc_models[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "310954ce", + "execution_count": 7, "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sent output data of P1: [1 2]\n", + "Received input data for P2: [1 2]\n" + ] + } + ], "source": [ - "\n", - "sender.run(RunSteps(num_steps=42, blocking=False), MyRunCfg())\n", + "sender.run(RunSteps(num_steps=1), Loihi1SimCfg())\n", "sender.stop()" ] }, { "cell_type": "markdown", - "id": "bf122ff7", + "metadata": {}, + "source": [ + "The instance `sender` of P1 sent the data `[1 2]` via its _OutPort_ `out` to the _InPort_ `in` of the instance `recv` of P2, where the data is received." + ] + }, + { + "cell_type": "markdown", "metadata": {}, "source": [ "## Possible connections\n", - "This first example was very simple. In principle _Processes_ can have multiple input and output _Ports_, which can be freely connected with each other. Also _Processes_ which execute on different devices can be connected in the same way.\n", + "This first example was very simple. In principle _Processes_ can have multiple input and output _Ports_ which can be freely connected with each other. Also, _Processes_ which execute on different compute resources can be connected in the same way.\n", "\n", "\n", "\n", @@ -224,9 +217,13 @@ "- _InPorts_ cannot connect to _OutPorts_\n", "- Shape and datatype of connect _Ports_ must match\n", "- An _InPort_ might get data from multiple _OutPorts_ - default behavior is a summation of the incoming data\n", - "- An _OutPort_ might send data to multiple _InPorts_ - all _InPorts_ receive the same data\n", - "\n", - "\n", + "- An _OutPort_ might send data to multiple _InPorts_ - all _InPorts_ receive the same data\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "## Connect multiple _InPorts_ from a single _OutPort_\n", "\n", "" @@ -234,8 +231,7 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "2c237a96", + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -261,30 +257,46 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "5d5c0e38", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sent output data of P1: [1 2]\n", + "Received input data for P2: [1 2]Received input data for P2: [1 2]\n", + "Received input data for P2: [1 2]\n", + "\n" + ] + } + ], + "source": [ + "sender.run(RunSteps(num_steps=1), Loihi1SimCfg())\n", + "sender.stop()" + ] + }, + { + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# TODO: run it" + "The instance `sender` of P1 sent the data `[1 2]` to the 3 instances `recv1, recv2, recv3` of P2." ] }, { "cell_type": "markdown", - "id": "b2edbeaf", "metadata": {}, "source": [ "## Connecting multiple _InPorts_ to a single _OutPort_\n", "\n", - "If multiple input _Ports_ connect to the same output _Port_, the default behavior is that the data from each input _Port_ is added up at the output _Port_. Another reduce operation would be multiplication.\n", + "If multiple input _Ports_ connect to the same output _Port_ the default behavior is that the data from each input _Port_ is added up at the output _Port_.\n", "\n", "" ] }, { "cell_type": "code", - "execution_count": null, - "id": "523d8a27", + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -310,116 +322,34 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "347de914", - "metadata": {}, - "outputs": [], - "source": [ - "# TODO: show run" - ] - }, - { - "cell_type": "markdown", - "id": "7694793b", - "metadata": {}, - "source": [ - "## Connecting _Ports_ with different shapes\n", - "\n", - "It cannot be assumed that every _Process_ sends or receives data in the same shape. There might be _Processes_ from different users which work with different vector shapes for various reasons. Still, it is possible to connect such _Processes_ using the reshape feature of _Ports_.\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "id": "56aaf1ef", - "metadata": {}, - "source": [] - }, - { - "cell_type": "markdown", - "id": "b551d9f8", - "metadata": {}, - "source": [ - "First, the _Processes_ _P1_ and _P2_ are instantiated with new shapes (1, 6) and (2, 3). The output port of _P1_ can still be conncected with the input port of _P2_, if appropriate reshape is used. The total number of elements needs to be the same for both ports." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4c8cceb7", - "metadata": {}, - "outputs": [], - "source": [ - "sender = P1(shape=(1,6))\n", - "recv = P2(shape=(2, 3))\n", - "\n", - "# Using reshape(..), ports with different shape can be connected as\n", - "# long as total number of elements does not change\n", - "sender.out.reshape((2, 3)).connect(recv.inp)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "d8af85b6", - "metadata": {}, - "outputs": [], - "source": [ - "# TODO: show run" - ] - }, - { - "cell_type": "markdown", - "id": "95422ef6", + "execution_count": 11, "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sent output data of P1: [1 2]Sent output data of P1: [1 2]Sent output data of P1: [1 2]\n", + "\n", + "\n", + "Received input data for P2: [3 6]\n" + ] + } + ], "source": [ - "## Concatenation of _Ports_\n", - "\n", - "" + "sender1.run(RunSteps(num_steps=1), Loihi1SimCfg())\n", + "sender1.stop()" ] }, { "cell_type": "markdown", - "id": "63ff3046", - "metadata": {}, - "source": [ - "The _Processes_ _P1_ and _P2_ are instantiated with new shapes (1, 2) and (3, 2). Now the output ports of the sending processes are concatenated before connected to the receiving port." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "c330df46", "metadata": {}, - "outputs": [], - "source": [ - "sender1 = P1(shape=(1, 2))\n", - "sender2 = P1(shape=(1, 2))\n", - "sender3 = P1(shape=(1, 2))\n", - "recv = P2(shape=(3, 2))\n", - "\n", - "# concat_with(..) concatenates calling port (sender1.out) with other ports\n", - "# (sender2.out, sender3.out) along given axis\n", - "cp = sender1.out.concat_with([sender2.out, sender3.out], axis=0)\n", - "\n", - " # The return value is a virtual ConcatPort which can be connected to the input port\n", - "cp.connect(recv.inp)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "fcfbce52", - "metadata": {}, - "outputs": [], "source": [ - "# TODO: run" + "The 3 instances `sender1, sender2, sender3` of P1 sent the data `[1 2]` to the instance `recv` of P2, where the data was summed up to `[3 6]`." ] }, { "cell_type": "markdown", - "id": "d6ff9409", "metadata": {}, "source": [ "## How to learn more?\n", @@ -434,9 +364,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "local-venv", "language": "python", - "name": "python3" + "name": "local-venv" }, "language_info": { "codemirror_mode": { From f08a35c8efcc2110bafbf86b17458cc1a9563aa3 Mon Sep 17 00:00:00 2001 From: Philipp Plank Date: Fri, 26 Nov 2021 13:28:47 -0800 Subject: [PATCH 10/14] fixed typos --- .../in_depth/tutorial05_connect_processes.ipynb | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tutorials/in_depth/tutorial05_connect_processes.ipynb b/tutorials/in_depth/tutorial05_connect_processes.ipynb index 903c672bb..db8b9c4ae 100644 --- a/tutorials/in_depth/tutorial05_connect_processes.ipynb +++ b/tutorials/in_depth/tutorial05_connect_processes.ipynb @@ -60,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "# minimal process with an OutPort\n", + "# Minimal process with an OutPort\n", "class P1(AbstractProcess):\n", " def __init__(self, **kwargs):\n", " super().__init__(**kwargs)\n", @@ -68,7 +68,7 @@ " self.out = OutPort(shape=shape)\n", "\n", "\n", - "# minimal process with an InPort\n", + "# Minimal process with an InPort\n", "class P2(AbstractProcess):\n", " def __init__(self, **kwargs):\n", " super().__init__(**kwargs)\n", @@ -80,11 +80,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "_Process_ _P1_ and _P2_ require a corresponding _ProcessModel_, which implements their _Ports_ and a simple run configuration for sending and receiving data.\n", + "_Process_ _P1_ and _P2_ require a corresponding _ProcessModel_ which implements their _Ports_ and a simple RunConfig for sending and receiving data.\n", "\n", - "In the _ProcessModels_ python code should be exectued on a CPU. The input and output _Port_ should be able to receive/send a vector of integers and print the transferred data.\n", + "In the _ProcessModels_ Python code should be exectued on a CPU. The input and output _Port_ should be able to receive/send a vector of integers and print the transferred data.\n", "\n", - "So the _ProcessModel_ inherits form _AbstractPyProcessModel_ in order to execute python code and the configured _ComputeResource_ is a CPU. A _LavaPyType_ is used for the _Ports_. The _LavaPyType_ specifies the expected data format for the _Port_. A dense vector of type integer is chosen with the parameters _PyOutPort.VEC_DENSE_ and _int_. The _Ports_ can be used to send and receive data by calling _send_ or _recv_. The sent and received data is afterwards printed out." + "So the _ProcessModel_ inherits form _AbstractPyProcessModel_ in order to execute Python code and the configured _ComputeResource_ is a CPU. A _LavaPyType_ is used for the _Ports_. The _LavaPyType_ specifies the expected data format for the _Port_. A dense vector of type integer is chosen with the parameters _PyOutPort.VEC_DENSE_ and _int_. The _Ports_ can be used to send and receive data by calling _send_ or _recv_. The sent and received data is afterwards printed out." ] }, { @@ -209,7 +209,7 @@ "metadata": {}, "source": [ "## Possible connections\n", - "This first example was very simple. In principle _Processes_ can have multiple input and output _Ports_ which can be freely connected with each other. Also, _Processes_ which execute on different compute resources can be connected in the same way.\n", + "This first example was very simple. In principle, _Processes_ can have multiple input and output _Ports_ which can be freely connected with each other. Also, _Processes_ which execute on different compute resources can be connected in the same way.\n", "\n", "\n", "\n", From be9aa262ed86f9bfa14eca6151de68e891330bb2 Mon Sep 17 00:00:00 2001 From: PhilippPlank Date: Tue, 14 Jun 2022 19:29:23 +0200 Subject: [PATCH 11/14] Windows fix for tutorials - draft --- tutorials/in_depth/tutorial02_processes.ipynb | 92 +++++++++++++++++-- tutorials/in_depth/win_exec.py | 37 ++++++++ 2 files changed, 119 insertions(+), 10 deletions(-) create mode 100644 tutorials/in_depth/win_exec.py diff --git a/tutorials/in_depth/tutorial02_processes.ipynb b/tutorials/in_depth/tutorial02_processes.ipynb index 8f2b29b5c..0840186bd 100644 --- a/tutorials/in_depth/tutorial02_processes.ipynb +++ b/tutorials/in_depth/tutorial02_processes.ipynb @@ -90,7 +90,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "685af4da", "metadata": {}, "outputs": [], @@ -165,7 +165,35 @@ "execution_count": 2, "id": "bb4835e1", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:13: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + " a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.float)\n", + "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:15: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + " u: np.ndarray = LavaPyType(np.ndarray, np.float)\n", + "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:16: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + " v: np.ndarray = LavaPyType(np.ndarray, np.float)\n", + "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:17: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + " bias: np.ndarray = LavaPyType(np.ndarray, np.float)\n", + "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:18: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + " du: float = LavaPyType(float, np.float)\n", + "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:19: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + " dv: float = LavaPyType(float, np.float)\n", + "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:20: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + " vth: float = LavaPyType(float, np.float)\n" + ] + } + ], "source": [ "import numpy as np\n", "from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol\n", @@ -199,6 +227,31 @@ " self.s_out.send(s_out)" ] }, + { + "cell_type": "markdown", + "id": "e62a6fd4", + "metadata": {}, + "source": [ + "#### Exception for Windows\n", + "\n", + "The next cell is only needed for systems running on Windows. This is not part of the tutorial or Lava, but to allow this jupyter notebook to execute on Windows without errors. The class LIF and PyLifModel need to be imported from a script in order to run this tutorial on Windows due to an issue with the multiprocessing package on Windows. Therefore we create a python script from this jupyter notebook and import the classes defined above from there.\n", + "With this solution you can still modify the classes above and rerun the notebook to see the changes!" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "2a1837e9", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "from win_exec import enable_win, cleanup\n", + "enable_win(\"tutorial02_processes\")\n", + "from tutorial02_processes import LIF" + ] + }, { "cell_type": "markdown", "id": "a27ae936", @@ -211,7 +264,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "2d14869c", "metadata": {}, "outputs": [], @@ -237,7 +290,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "27e749ec", "metadata": {}, "outputs": [ @@ -273,7 +326,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "6ae84fef", "metadata": {}, "outputs": [ @@ -309,7 +362,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "id": "f25e0cd9", "metadata": {}, "outputs": [], @@ -330,7 +383,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 8, "id": "110b9196", "metadata": {}, "outputs": [ @@ -358,7 +411,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 9, "id": "4cb4d6bb", "metadata": {}, "outputs": [ @@ -389,7 +442,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "id": "31b51fb7", "metadata": {}, "outputs": [], @@ -410,6 +463,25 @@ "\n", "To receive regular updates on the latest developments and releases of the Lava Software Framework please subscribe to the [INRC newsletter](http://eepurl.com/hJCyhb \"INRC Newsletter\")." ] + }, + { + "cell_type": "markdown", + "id": "663b1e84", + "metadata": {}, + "source": [ + "#### Exception for Windows\n", + "Delete the created python script again." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "326cc579", + "metadata": {}, + "outputs": [], + "source": [ + "cleanup(\"tutorial02_processes\")" + ] } ], "metadata": { @@ -428,7 +500,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.9.13" } }, "nbformat": 4, diff --git a/tutorials/in_depth/win_exec.py b/tutorials/in_depth/win_exec.py new file mode 100644 index 000000000..a1d24b1d3 --- /dev/null +++ b/tutorials/in_depth/win_exec.py @@ -0,0 +1,37 @@ +# Copyright (C) 2022 Intel Corporation +# SPDX-License-Identifier: BSD-3-Clause +# See: https://spdx.org/licenses/ + +from sys import platform +import os + + +def enable_win(nb_name: str): + """Enables execution of Lava tutorials on Windows systems. Processes + defined within the tutorial jupyter notebook need to be written to a python + script and imported in the notebook again.""" + if platform == "win32" or platform == "cygwin": + # Convert the ipython notebook to a python script + os.system("jupyter nbconvert --to script " + nb_name + ".ipynb") + + # Remove code after definition to avoid execution during import + with open(nb_name + ".py", "r+") as f: + d = f.readlines() + f.seek(0) + for i in d: + if i.strip("\n") != "# #### Exception for Windows": + f.write(i) + elif i.strip("\n") == "# #### Exception for Windows": + break + f.truncate() + + +def cleanup(nb_name: str): + """Removes previously created python script for tutorial execution on + Windows systems.""" + if platform == "win32" or platform == "cygwin": + os.system("del " + nb_name + ".py") + + +if __name__ == "__main__": + pass From b89d6f140c725699260f0d5f7603edf2909f175d Mon Sep 17 00:00:00 2001 From: weidel-p Date: Thu, 16 Jun 2022 11:42:27 +0200 Subject: [PATCH 12/14] moved win_exec.py to utils and changed text --- .../in_depth => src/lava/utils}/win_exec.py | 4 +- tutorials/in_depth/tutorial02_processes.ipynb | 39 ++++++++++++------- 2 files changed, 27 insertions(+), 16 deletions(-) rename {tutorials/in_depth => src/lava/utils}/win_exec.py (93%) diff --git a/tutorials/in_depth/win_exec.py b/src/lava/utils/win_exec.py similarity index 93% rename from tutorials/in_depth/win_exec.py rename to src/lava/utils/win_exec.py index a1d24b1d3..32fbf96a8 100644 --- a/tutorials/in_depth/win_exec.py +++ b/src/lava/utils/win_exec.py @@ -6,7 +6,7 @@ import os -def enable_win(nb_name: str): +def export_notebook(nb_name: str): """Enables execution of Lava tutorials on Windows systems. Processes defined within the tutorial jupyter notebook need to be written to a python script and imported in the notebook again.""" @@ -31,6 +31,8 @@ def cleanup(nb_name: str): Windows systems.""" if platform == "win32" or platform == "cygwin": os.system("del " + nb_name + ".py") + else: + os.remove(nb_name + ".py") if __name__ == "__main__": diff --git a/tutorials/in_depth/tutorial02_processes.ipynb b/tutorials/in_depth/tutorial02_processes.ipynb index 0840186bd..d91372c11 100644 --- a/tutorials/in_depth/tutorial02_processes.ipynb +++ b/tutorials/in_depth/tutorial02_processes.ipynb @@ -170,25 +170,25 @@ "name": "stderr", "output_type": "stream", "text": [ - "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:13: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "/tmp/ipykernel_7164/2851907654.py:13: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", " a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.float)\n", - "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:15: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "/tmp/ipykernel_7164/2851907654.py:15: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", " u: np.ndarray = LavaPyType(np.ndarray, np.float)\n", - "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:16: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "/tmp/ipykernel_7164/2851907654.py:16: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", " v: np.ndarray = LavaPyType(np.ndarray, np.float)\n", - "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:17: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "/tmp/ipykernel_7164/2851907654.py:17: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", " bias: np.ndarray = LavaPyType(np.ndarray, np.float)\n", - "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:18: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "/tmp/ipykernel_7164/2851907654.py:18: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", " du: float = LavaPyType(float, np.float)\n", - "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:19: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "/tmp/ipykernel_7164/2851907654.py:19: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", " dv: float = LavaPyType(float, np.float)\n", - "C:\\Users\\pplank\\AppData\\Local\\Temp\\ipykernel_3388\\2851907654.py:20: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "/tmp/ipykernel_7164/2851907654.py:20: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", " vth: float = LavaPyType(float, np.float)\n" ] @@ -234,21 +234,30 @@ "source": [ "#### Exception for Windows\n", "\n", - "The next cell is only needed for systems running on Windows. This is not part of the tutorial or Lava, but to allow this jupyter notebook to execute on Windows without errors. The class LIF and PyLifModel need to be imported from a script in order to run this tutorial on Windows due to an issue with the multiprocessing package on Windows. Therefore we create a python script from this jupyter notebook and import the classes defined above from there.\n", - "With this solution you can still modify the classes above and rerun the notebook to see the changes!" + "The next cell is only needed for systems running on Windows due to an issue with the multiprocessing package. In order to execute the notebook flawlessly on Windows systems, the LIF class needs to be imported from a script and can not be defined in the notebook itself. Therefore we export a python script from this jupyter notebook and import the LIF class defined above from there. \n", + "With this temporary solution you can still modify the classes above and rerun the notebook to see the changes. In the meantime, we are working on a permanent fix for the issue of the multiprocessing package under Windows." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 12, "id": "2a1837e9", "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[NbConvertApp] Converting notebook tutorial02_processes.ipynb to script\n", + "[NbConvertApp] Writing 12771 bytes to tutorial02_processes.py\n" + ] + } + ], "source": [ - "from win_exec import enable_win, cleanup\n", - "enable_win(\"tutorial02_processes\")\n", + "from lava.utils.win_exec import export_notebook, cleanup\n", + "export_notebook(\"tutorial02_processes\")\n", "from tutorial02_processes import LIF" ] }, @@ -475,7 +484,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "id": "326cc579", "metadata": {}, "outputs": [], @@ -500,7 +509,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.10.4" } }, "nbformat": 4, From b3970800263a4cafdcd07024a0e7f83047877f7e Mon Sep 17 00:00:00 2001 From: weidel-p Date: Thu, 16 Jun 2022 18:23:19 +0200 Subject: [PATCH 13/14] used magic to export single cells instead of complete script --- src/lava/utils/win_exec.py | 45 +- tutorials/in_depth/tutorial02_processes.ipynb | 321 ++++-- tutorials/in_depth/tutorial04_execution.ipynb | 924 +++++++++--------- .../tutorial05_connect_processes.ipynb | 136 ++- .../tutorial06_hierarchical_processes.ipynb | 455 +++++++-- .../tutorial07_remote_memory_access.ipynb | 234 ++++- 6 files changed, 1392 insertions(+), 723 deletions(-) diff --git a/src/lava/utils/win_exec.py b/src/lava/utils/win_exec.py index 32fbf96a8..35a8986bd 100644 --- a/src/lava/utils/win_exec.py +++ b/src/lava/utils/win_exec.py @@ -3,37 +3,30 @@ # See: https://spdx.org/licenses/ from sys import platform -import os -def export_notebook(nb_name: str): - """Enables execution of Lava tutorials on Windows systems. Processes - defined within the tutorial jupyter notebook need to be written to a python - script and imported in the notebook again.""" - if platform == "win32" or platform == "cygwin": - # Convert the ipython notebook to a python script - os.system("jupyter nbconvert --to script " + nb_name + ".ipynb") - - # Remove code after definition to avoid execution during import - with open(nb_name + ".py", "r+") as f: - d = f.readlines() - f.seek(0) - for i in d: - if i.strip("\n") != "# #### Exception for Windows": - f.write(i) - elif i.strip("\n") == "# #### Exception for Windows": - break - f.truncate() - - -def cleanup(nb_name: str): - """Removes previously created python script for tutorial execution on - Windows systems.""" +try: + from IPython import get_ipython +except ModuleNotFoundError: + pass + +def export(filename, cell): + '''Exports the cell to the given file name if system is windows.''' if platform == "win32" or platform == "cygwin": - os.system("del " + nb_name + ".py") + with open(f"{filename}.py", "a") as f: + f.write(cell) else: - os.remove(nb_name + ".py") + get_ipython().ex(cell) + + +def load_ipython_extension(shell): + '''Registers the magic function when the extension loads.''' + shell.register_magic_function(export, 'cell') + +def unload_ipython_extension(shell): + '''Unregisters the magic function when the extension unloads.''' + del shell.magics_manager.magics['cell']['export'] if __name__ == "__main__": pass diff --git a/tutorials/in_depth/tutorial02_processes.ipynb b/tutorials/in_depth/tutorial02_processes.ipynb index d91372c11..e05fab7a0 100644 --- a/tutorials/in_depth/tutorial02_processes.ipynb +++ b/tutorials/in_depth/tutorial02_processes.ipynb @@ -3,7 +3,11 @@ { "cell_type": "markdown", "id": "415e14e3", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2021 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -19,7 +23,11 @@ { "cell_type": "markdown", "id": "919fe51c", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Recommended tutorials before starting:\n", "\n", @@ -50,7 +58,11 @@ { "cell_type": "markdown", "id": "1b068a15", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to build a _Process_?\n", "\n", @@ -63,7 +75,11 @@ { "cell_type": "markdown", "id": "2fa70f25", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### _AbstractProcess_: Defining _Vars_, _Ports_, and the API\n", "\n", @@ -91,10 +107,29 @@ { "cell_type": "code", "execution_count": 1, - "id": "685af4da", - "metadata": {}, + "id": "257f08da", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "%load_ext lava.utils.win_exec" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "7f61a438", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ + "%%export tutorial_02\n", "import numpy as np\n", "\n", "from lava.magma.core.process.process import AbstractProcess\n", @@ -128,14 +163,17 @@ " print(sp + \"du: {}\".format(str(self.du.get())))\n", " print(sp + \"dv: {}\".format(str(self.dv.get())))\n", " print(sp + \"bias: {}\".format(str(self.bias.get())))\n", - " print(sp + \"vth: {}\".format(str(self.vth.get())))\n", - " " + " print(sp + \"vth: {}\".format(str(self.vth.get())))\n" ] }, { "cell_type": "markdown", - "id": "27b56891", - "metadata": {}, + "id": "ff24ec14", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "You may have noticed that most of the _Vars_ were initialized by scalar integers. But the synaptic current _u_ illustrates that _Vars_ can in general be initialized with numeric objects that have a dimensionality equal or less than specified by its _shape_ argument. The initial value will be scaled up to match the _Var_ dimension at run time.\n", "\n", @@ -148,8 +186,12 @@ }, { "cell_type": "markdown", - "id": "6799aedb", - "metadata": {}, + "id": "520d55e7", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### _ProcessModel_: Defining the behavior of a _Process_\n", "\n", @@ -162,39 +204,38 @@ }, { "cell_type": "code", - "execution_count": 2, - "id": "bb4835e1", - "metadata": {}, + "execution_count": 4, + "id": "423eadf0", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "/tmp/ipykernel_7164/2851907654.py:13: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", - "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " a_in: PyInPort = LavaPyType(PyInPort.VEC_DENSE, np.float)\n", - "/tmp/ipykernel_7164/2851907654.py:15: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + ":14: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " u: np.ndarray = LavaPyType(np.ndarray, np.float)\n", - "/tmp/ipykernel_7164/2851907654.py:16: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + ":16: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " v: np.ndarray = LavaPyType(np.ndarray, np.float)\n", - "/tmp/ipykernel_7164/2851907654.py:17: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + ":17: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " bias: np.ndarray = LavaPyType(np.ndarray, np.float)\n", - "/tmp/ipykernel_7164/2851907654.py:18: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + ":18: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " du: float = LavaPyType(float, np.float)\n", - "/tmp/ipykernel_7164/2851907654.py:19: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + ":19: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " dv: float = LavaPyType(float, np.float)\n", - "/tmp/ipykernel_7164/2851907654.py:20: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + ":20: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", - " vth: float = LavaPyType(float, np.float)\n" + ":21: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n" ] } ], "source": [ + "%%export tutorial_02\n", + "\n", "import numpy as np\n", "from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol\n", "from lava.magma.core.model.py.ports import PyInPort, PyOutPort\n", @@ -229,8 +270,12 @@ }, { "cell_type": "markdown", - "id": "e62a6fd4", - "metadata": {}, + "id": "e0213c87", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Exception for Windows\n", "\n", @@ -240,31 +285,28 @@ }, { "cell_type": "code", - "execution_count": 12, - "id": "2a1837e9", + "execution_count": 14, + "id": "a51a83fb", "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[NbConvertApp] Converting notebook tutorial02_processes.ipynb to script\n", - "[NbConvertApp] Writing 12771 bytes to tutorial02_processes.py\n" - ] + "pycharm": { + "name": "#%%\n" } - ], + }, + "outputs": [], "source": [ - "from lava.utils.win_exec import export_notebook, cleanup\n", - "export_notebook(\"tutorial02_processes\")\n", - "from tutorial02_processes import LIF" + "from sys import platform\n", + "if platform == \"win32\" or platform == \"cygwin\":\n", + " from tutorial_02 import LIF, PyLifModel" ] }, { "cell_type": "markdown", - "id": "a27ae936", - "metadata": {}, + "id": "5826569e", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Instantiating the _Process_\n", "\n", @@ -273,9 +315,13 @@ }, { "cell_type": "code", - "execution_count": 4, - "id": "2d14869c", - "metadata": {}, + "execution_count": 15, + "id": "ff02526a", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "n_neurons = 3\n", @@ -285,8 +331,12 @@ }, { "cell_type": "markdown", - "id": "180d1185", - "metadata": {}, + "id": "8d435219", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Interacting with _Processes_\n", "\n", @@ -299,9 +349,13 @@ }, { "cell_type": "code", - "execution_count": 5, - "id": "27e749ec", - "metadata": {}, + "execution_count": 16, + "id": "576bf1d7", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -317,16 +371,24 @@ }, { "cell_type": "markdown", - "id": "13a0c779", - "metadata": {}, + "id": "d9e6c1f1", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "As described above, the _Var_ _v_ has in this example been initialized as a scalar value that describes the membrane voltage of all three neurons simultaneously." ] }, { "cell_type": "markdown", - "id": "a12a9667", - "metadata": {}, + "id": "5cf3d055", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Using custom APIs\n", "\n", @@ -335,9 +397,13 @@ }, { "cell_type": "code", - "execution_count": 6, - "id": "6ae84fef", - "metadata": {}, + "execution_count": 17, + "id": "54c5205b", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -359,8 +425,12 @@ }, { "cell_type": "markdown", - "id": "c2eaabca", - "metadata": {}, + "id": "64da3ce6", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Executing a _Process_\n", "\n", @@ -371,9 +441,13 @@ }, { "cell_type": "code", - "execution_count": 7, - "id": "f25e0cd9", - "metadata": {}, + "execution_count": 18, + "id": "caba0760", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "from lava.magma.core.run_configs import Loihi1SimCfg\n", @@ -384,17 +458,25 @@ }, { "cell_type": "markdown", - "id": "589ef1c2", - "metadata": {}, + "id": "69a07f2d", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The voltage of each LIF neuron should now have increased by the bias value, 3, from their initial values of 0. Check if the neurons have evolved as expected." ] }, { "cell_type": "code", - "execution_count": 8, - "id": "110b9196", - "metadata": {}, + "execution_count": 19, + "id": "b2cf1b19", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -410,8 +492,12 @@ }, { "cell_type": "markdown", - "id": "169bbf54", - "metadata": {}, + "id": "f6ff13d8", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Update _Vars_\n", "\n", @@ -420,9 +506,13 @@ }, { "cell_type": "code", - "execution_count": 9, - "id": "4cb4d6bb", - "metadata": {}, + "execution_count": 20, + "id": "2a9fe84c", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -433,14 +523,20 @@ } ], "source": [ + "import numpy as np\n", + "\n", "lif.v.set(np.array([1, 2, 3]) )\n", "print(lif.v.get())" ] }, { "cell_type": "markdown", - "id": "abf94e11", - "metadata": {}, + "id": "8fa3b475", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Note that the _set()_ method becomes available once the _Process_ has been run. Prior to the first run, use the *\\_\\_init\\_\\_* function of the _Process_ to set _Vars_.\n", "\n", @@ -451,9 +547,13 @@ }, { "cell_type": "code", - "execution_count": 10, - "id": "31b51fb7", - "metadata": {}, + "execution_count": 12, + "id": "c84b1b2d", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ "lif.stop()" @@ -461,22 +561,12 @@ }, { "cell_type": "markdown", - "id": "ddb06944", - "metadata": {}, - "source": [ - "## How to learn more?\n", - "\n", - "Learn how to implement the behavior of _Processes_ in the [next tutorial on ProcessModels](./tutorial03_process_models.ipynb \"Tutorial on ProcessModels\").\n", - "\n", - "If you want to find out more about _Processes_, have a look at the [Lava documentation](https://lava-nc.org/ \"Lava Documentation\") or dive into the [source code](https://github.com/lava-nc/lava/tree/main/src/lava/magma/core/process/process.py \"Process Source Code\").\n", - "\n", - "To receive regular updates on the latest developments and releases of the Lava Software Framework please subscribe to the [INRC newsletter](http://eepurl.com/hJCyhb \"INRC Newsletter\")." - ] - }, - { - "cell_type": "markdown", - "id": "663b1e84", - "metadata": {}, + "id": "e4b696cd", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Exception for Windows\n", "Delete the created python script again." @@ -485,11 +575,34 @@ { "cell_type": "code", "execution_count": 13, - "id": "326cc579", - "metadata": {}, + "id": "dc6272f5", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ - "cleanup(\"tutorial02_processes\")" + "if platform == \"win32\" or platform == \"cygwin\":\n", + " !del tutorial_02.py" + ] + }, + { + "cell_type": "markdown", + "id": "4b2eedbd", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## How to learn more?\n", + "\n", + "Learn how to implement the behavior of _Processes_ in the [next tutorial on ProcessModels](./tutorial03_process_models.ipynb \"Tutorial on ProcessModels\").\n", + "\n", + "If you want to find out more about _Processes_, have a look at the [Lava documentation](https://lava-nc.org/ \"Lava Documentation\") or dive into the [source code](https://github.com/lava-nc/lava/tree/main/src/lava/magma/core/process/process.py \"Process Source Code\").\n", + "\n", + "To receive regular updates on the latest developments and releases of the Lava Software Framework please subscribe to the [INRC newsletter](http://eepurl.com/hJCyhb \"INRC Newsletter\")." ] } ], diff --git a/tutorials/in_depth/tutorial04_execution.ipynb b/tutorials/in_depth/tutorial04_execution.ipynb index 141204953..2302e3d48 100644 --- a/tutorials/in_depth/tutorial04_execution.ipynb +++ b/tutorials/in_depth/tutorial04_execution.ipynb @@ -1,442 +1,482 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "*Copyright (C) 2021 Intel Corporation*
\n", - "*SPDX-License-Identifier: BSD-3-Clause*
\n", - "*See: https://spdx.org/licenses/*\n", - "\n", - "---\n", - "\n", - "# Execution\n", - "\n", - "This tutorial covers how to execute single _Processes_ and networks of _Processes_, how to configure execution, how to pause, resume, and stop execution, and how to manually set up a _Compiler_ and _RunTime_ for more fine-grained control.\n", - "\n", - "## Recommended tutorials before starting:\n", - "\n", - "- [Installing Lava](./tutorial01_installing_lava.ipynb \"Tutorial on Installing Lava\")\n", - "- [Processes](./tutorial02_processes.ipynb \"Tutorial on Processes\")\n", - "- [ProcessModel](./tutorial03_process_models.ipynb \"Tutorial on ProcessModels\")\n", - "\n", - "## Configuring and starting execution\n", - "To start executing a _Process_ call its method `run(condition=..., run_cfg=...)`. The execution must be configured by passing in both a _RunCondition_ and a _RunConfiguration_.\n", - "\n", - "#### Run conditions\n", - "A _RunCondition_ specifies how long a _Process_ is executed.\n", - "\n", - "The run condition _RunSteps_ executes a _Process_ for a specified number time steps, here 42 in the example below. The execution will automatically pause after the specified number of time steps.\n", - "You can also specify whether or not the call to `run()` will block the program flow." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from lava.magma.core.run_conditions import RunSteps\n", - "\n", - "run_condition = RunSteps(num_steps=42, blocking=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The run condition _RunContinuous_ enables you to run a _Process_ continuously. In this case, the _Process_ will run indefinitely until you explicitly call `pause()` or `stop()` (see below). This call never blocks the program flow (blocking=False)." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from lava.magma.core.run_conditions import RunContinuous\n", - "\n", - "run_condition = RunContinuous()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Run configurations\n", - "A _RunConfig_ specifies on what devices the _Processes_ should be executed.\n", - "Based on the _RunConfig_, a _Process_ selects and initializes exactly one\n", - "of its associated [_ProcessModels_](./tutorial03_process_models.ipynb \"Tutorial on ProcessModels\"), which implement the behavior of the _Process_ in a particular programming language and for a particular computing resource.\n", - "If the _Process_ has a _SubProcessModel_ composed of other _Processes_, the _RunConfig_ chooses the appropriate _ProcessModel_ implementation of the child _Process_.\n", - "\n", - "Since Lava currently only supports execution in simulation on a single CPU,\n", - "the only predefined _RunConfig_ is _Loihi1SimCfg_, which simulates executing _Processes_ on Loihi.\n", - "We will make more predefined run configurations available with the upcoming support for Loihi 1 and 2 and\n", - "other devices such as GPUs.\n", - "\n", - "The example below specifies that the _Process_ (and all its connected _Processes_\n", - "and _SubProcesses_) are executed in Python on a CPU." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "\n", - "run_cfg = Loihi1SimCfg()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can now use both a _RunCondition_ and a _RunConfig_ to execute a simple leaky integrate-and-fire (LIF) neuron." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "\n", - "# create a Process for a LIF neuron\n", - "lif = LIF()\n", - "\n", - "# execute that Process for 42 time steps in simulation\n", - "lif.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Running multiple _Processes_\n", - "\n", - "Calling `run()` on a _Process_ will also execute all _Processes_ that are connected to it. In the example below, three _Processes_ _lif1_, _dense_, and _lif2_ are connected in a sequence. We call `run()` on _Process_ _lif2_. Since _lif2_ is connected to _dense_ and _dense_ is connected to _lif1_, all three _Processes_ will be executed. As demonstrated here, the execution will cover the entire connected network of _Processes_, irrespective of the direction in which the _Processes_ are connected." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.proc.dense.process import Dense\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "\n", - "# create processes\n", - "lif1 = LIF()\n", - "dense = Dense()\n", - "lif2 = LIF()\n", - "\n", - "# connect the OutPort of lif1 to the InPort of dense\n", - "lif1.s_out.connect(dense.s_in)\n", - "# connect the OutPort of dense to the InPort of lif2\n", - "dense.a_out.connect(lif2.a_in)\n", - "\n", - "# execute Process lif2 and all Processes connected to it (dense, lif1)\n", - "lif2.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We will add more on running multiple _Processes_ in the future, including synchronization and running networks of _Processes_ on different devices." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Pausing, resuming, and stopping execution\n", - "\n", - "> **Important Note**:\n", - ">\n", - "> Right now, Lava does not support `pause()` and _RunContinuous_. These features will be enabled soon in a feature release.\n", - "> Nevertheless, the following example illustrates how to pause, resume, and stop a process in Lava.\n", - "\n", - "Calling the `pause()` method of a _Process_ pauses execution but preserves its state.\n", - "The _Process_ can then be inspected and manipulated by the user, as shown in the example below.\n", - "\n", - "Afterward, execution can be resumed by calling `run()` again.\n", - "\n", - "Calling the `stop()` method of a _Process_ completely terminates its execution.\n", - "Contrary to pausing execution, `stop()` does not preserve the state of the\n", - "_Process_. If a _Process_ executed on a hardware device, the connection between\n", - "the _Process_ and the device is terminated as well." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "\"\"\"\n", - "from lava.proc.lif.process import LIF\n", - "from lava.magma.core.run_conditions import RunContinuous\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "\n", - "lif3 = LIF()\n", - "\n", - "# start continuous execution\n", - "lif3.run(condition=RunContinuous(), run_cfg=Loihi1SimCfg())\n", - "\n", - "# pause execution\n", - "lif3.pause()\n", - "\n", - "# inspect the state of the Process, here, the voltage variable 'v'\n", - "print(lif.v.get())\n", - "# manipulate the state of the Process, here, resetting the voltage to zero\n", - "lif3.v.set(0)\n", - "\n", - "# resume continuous execution\n", - "lif3.run(condition=RunContinuous(), run_cfg=Loihi1SimCfg())\n", - "\n", - "# terminate execution;\n", - "# after this, you no longer have access to the state of lif\n", - "lif3.stop()\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Manual compilation and execution\n", - "\n", - "In many cases, creating an instance of a _Process_ and calling its `run()`\n", - "method is all you need to do. Calling `run()` internally first compiles\n", - "the _Process_ and then starts execution. These steps can also be manually\n", - "invoked in sequence, for instance to inspect or manipulate the _Process_ before\n", - "starting execution.\n", - "\n", - "1. Instantiation stage: This is the call to the init-method of a _Process_,\n", - "which instantiates an object of the _Process_." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.proc.dense.process import Dense\n", - "\n", - "lif1 = LIF()\n", - "dense = Dense()\n", - "lif2 = LIF()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "2. Configuration stage: After a _Process_ has been instantiated, it can be\n", - "configured further through its public API and connected to other _Processes_ via\n", - "its _Ports_. In addition, probes can be defined for Lava _Vars_ in order to\n", - "record a time series of its evolution during execution (probing will be \n", - "supported in an upcoming Lava release)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# connect the processes\n", - "lif1.s_out.connect(dense.s_in)\n", - "dense.a_out.connect(lif2.a_in)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "3. Compile stage: After a _Process_ has been configured, it needs to be compiled to\n", - "become executable. After the compilation stage, the state of the _Process_ can\n", - "still be manipulated and inspected." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from lava.magma.compiler.compiler import Compiler\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "\n", - "# create a compiler\n", - "compiler = Compiler()\n", - "\n", - "# compile the Process (and all connected Processes) into an executable\n", - "executable = compiler.compile(lif2, run_cfg=Loihi1SimCfg())" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "4. Execution stage: When compilation is complete, _Processes_ can be\n", - "executed. The execution stage ensures that the (prior) compilation stage has\n", - "been completed and otherwise invokes it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from lava.magma.runtime.runtime import Runtime\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.process.message_interface_enum import ActorType\n", - "\n", - "# create and initialize a runtime\n", - "runtime = Runtime(message_infrastructure_type=ActorType.MultiProcessing, exe=executable)\n", - "runtime.initialize()\n", - "\n", - "# start execution\n", - "runtime.start(run_condition=RunSteps(num_steps=42))\n", - "\n", - "# stop execution\n", - "runtime.stop()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "source": [ - "The following does all of the above automatically:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "from lava.proc.lif.process import LIF\n", - "from lava.proc.dense.process import Dense\n", - "from lava.magma.core.run_conditions import RunSteps\n", - "from lava.magma.core.run_configs import Loihi1SimCfg\n", - "\n", - "# create Processes\n", - "lif = LIF()\n", - "dense = Dense()\n", - "\n", - "# connect Processes\n", - "lif.s_out.connect(dense.s_in)\n", - "\n", - "# execute Processes\n", - "lif.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())\n", - "\n", - "# stop Processes\n", - "lif.stop()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## How to learn more?\n", - "\n", - "In upcoming releases, we will continually publish more and more tutorials, covering, for example, how to transfer data between _Processes_ and how to compose the behavior of a process using other processes.\n", - "\n", - "If you want to find out more about how to compile and execute _Processes_, have a look at the [Lava documentation](https://lava-nc.org/ \"Lava Documentation\") or dive into the [Compiler](https://github.com/lava-nc/lava/tree/main/src/lava/magma/compiler/ \"Compiler Source Code\") and [RunTime source code](https://github.com/lava-nc/lava/tree/main/src/lava/magma/runtime/ \"Runtime Source Code\").\n", - "\n", - "To receive regular updates on the latest developments and releases of the Lava Software Framework please subscribe to the [INRC newsletter](http://eepurl.com/hJCyhb \"INRC Newsletter\")." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.10" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "*Copyright (C) 2021 Intel Corporation*
\n", + "*SPDX-License-Identifier: BSD-3-Clause*
\n", + "*See: https://spdx.org/licenses/*\n", + "\n", + "---\n", + "\n", + "# Execution\n", + "\n", + "This tutorial covers how to execute single _Processes_ and networks of _Processes_, how to configure execution, how to pause, resume, and stop execution, and how to manually set up a _Compiler_ and _RunTime_ for more fine-grained control.\n", + "\n", + "## Recommended tutorials before starting:\n", + "\n", + "- [Installing Lava](./tutorial01_installing_lava.ipynb \"Tutorial on Installing Lava\")\n", + "- [Processes](./tutorial02_processes.ipynb \"Tutorial on Processes\")\n", + "- [ProcessModel](./tutorial03_process_models.ipynb \"Tutorial on ProcessModels\")\n", + "\n", + "## Configuring and starting execution\n", + "To start executing a _Process_ call its method `run(condition=..., run_cfg=...)`. The execution must be configured by passing in both a _RunCondition_ and a _RunConfiguration_.\n", + "\n", + "#### Run conditions\n", + "A _RunCondition_ specifies how long a _Process_ is executed.\n", + "\n", + "The run condition _RunSteps_ executes a _Process_ for a specified number time steps, here 42 in the example below. The execution will automatically pause after the specified number of time steps.\n", + "You can also specify whether or not the call to `run()` will block the program flow." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.magma.core.run_conditions import RunSteps\n", + "\n", + "run_condition = RunSteps(num_steps=42, blocking=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "The run condition _RunContinuous_ enables you to run a _Process_ continuously. In this case, the _Process_ will run indefinitely until you explicitly call `pause()` or `stop()` (see below). This call never blocks the program flow (blocking=False)." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.magma.core.run_conditions import RunContinuous\n", + "\n", + "run_condition = RunContinuous()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Run configurations\n", + "A _RunConfig_ specifies on what devices the _Processes_ should be executed.\n", + "Based on the _RunConfig_, a _Process_ selects and initializes exactly one\n", + "of its associated [_ProcessModels_](./tutorial03_process_models.ipynb \"Tutorial on ProcessModels\"), which implement the behavior of the _Process_ in a particular programming language and for a particular computing resource.\n", + "If the _Process_ has a _SubProcessModel_ composed of other _Processes_, the _RunConfig_ chooses the appropriate _ProcessModel_ implementation of the child _Process_.\n", + "\n", + "Since Lava currently only supports execution in simulation on a single CPU,\n", + "the only predefined _RunConfig_ is _Loihi1SimCfg_, which simulates executing _Processes_ on Loihi.\n", + "We will make more predefined run configurations available with the upcoming support for Loihi 1 and 2 and\n", + "other devices such as GPUs.\n", + "\n", + "The example below specifies that the _Process_ (and all its connected _Processes_\n", + "and _SubProcesses_) are executed in Python on a CPU." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "\n", + "run_cfg = Loihi1SimCfg()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "We can now use both a _RunCondition_ and a _RunConfig_ to execute a simple leaky integrate-and-fire (LIF) neuron." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.proc.lif.process import LIF\n", + "from lava.magma.core.run_conditions import RunSteps\n", + "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "\n", + "# create a Process for a LIF neuron\n", + "lif = LIF()\n", + "\n", + "# execute that Process for 42 time steps in simulation\n", + "lif.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## Running multiple _Processes_\n", + "\n", + "Calling `run()` on a _Process_ will also execute all _Processes_ that are connected to it. In the example below, three _Processes_ _lif1_, _dense_, and _lif2_ are connected in a sequence. We call `run()` on _Process_ _lif2_. Since _lif2_ is connected to _dense_ and _dense_ is connected to _lif1_, all three _Processes_ will be executed. As demonstrated here, the execution will cover the entire connected network of _Processes_, irrespective of the direction in which the _Processes_ are connected." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.proc.lif.process import LIF\n", + "from lava.proc.dense.process import Dense\n", + "from lava.magma.core.run_conditions import RunSteps\n", + "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "\n", + "# create processes\n", + "lif1 = LIF()\n", + "dense = Dense()\n", + "lif2 = LIF()\n", + "\n", + "# connect the OutPort of lif1 to the InPort of dense\n", + "lif1.s_out.connect(dense.s_in)\n", + "# connect the OutPort of dense to the InPort of lif2\n", + "dense.a_out.connect(lif2.a_in)\n", + "\n", + "# execute Process lif2 and all Processes connected to it (dense, lif1)\n", + "lif2.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "We will add more on running multiple _Processes_ in the future, including synchronization and running networks of _Processes_ on different devices." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## Pausing, resuming, and stopping execution\n", + "\n", + "> **Important Note**:\n", + ">\n", + "> Right now, Lava does not support `pause()` and _RunContinuous_. These features will be enabled soon in a feature release.\n", + "> Nevertheless, the following example illustrates how to pause, resume, and stop a process in Lava.\n", + "\n", + "Calling the `pause()` method of a _Process_ pauses execution but preserves its state.\n", + "The _Process_ can then be inspected and manipulated by the user, as shown in the example below.\n", + "\n", + "Afterward, execution can be resumed by calling `run()` again.\n", + "\n", + "Calling the `stop()` method of a _Process_ completely terminates its execution.\n", + "Contrary to pausing execution, `stop()` does not preserve the state of the\n", + "_Process_. If a _Process_ executed on a hardware device, the connection between\n", + "the _Process_ and the device is terminated as well." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "from lava.proc.lif.process import LIF\n", + "from lava.magma.core.run_conditions import RunContinuous\n", + "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "\n", + "lif3 = LIF()\n", + "\n", + "# start continuous execution\n", + "lif3.run(condition=RunContinuous(), run_cfg=Loihi1SimCfg())\n", + "\n", + "# pause execution\n", + "lif3.pause()\n", + "\n", + "# inspect the state of the Process, here, the voltage variable 'v'\n", + "print(lif.v.get())\n", + "# manipulate the state of the Process, here, resetting the voltage to zero\n", + "lif3.v.set(0)\n", + "\n", + "# resume continuous execution\n", + "lif3.run(condition=RunContinuous(), run_cfg=Loihi1SimCfg())\n", + "\n", + "# terminate execution;\n", + "# after this, you no longer have access to the state of lif\n", + "lif3.stop()\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## Manual compilation and execution\n", + "\n", + "In many cases, creating an instance of a _Process_ and calling its `run()`\n", + "method is all you need to do. Calling `run()` internally first compiles\n", + "the _Process_ and then starts execution. These steps can also be manually\n", + "invoked in sequence, for instance to inspect or manipulate the _Process_ before\n", + "starting execution.\n", + "\n", + "1. Instantiation stage: This is the call to the init-method of a _Process_,\n", + "which instantiates an object of the _Process_." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.proc.lif.process import LIF\n", + "from lava.proc.dense.process import Dense\n", + "\n", + "lif1 = LIF()\n", + "dense = Dense()\n", + "lif2 = LIF()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "2. Configuration stage: After a _Process_ has been instantiated, it can be\n", + "configured further through its public API and connected to other _Processes_ via\n", + "its _Ports_. In addition, probes can be defined for Lava _Vars_ in order to\n", + "record a time series of its evolution during execution (probing will be \n", + "supported in an upcoming Lava release)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "# connect the processes\n", + "lif1.s_out.connect(dense.s_in)\n", + "dense.a_out.connect(lif2.a_in)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "3. Compile stage: After a _Process_ has been configured, it needs to be compiled to\n", + "become executable. After the compilation stage, the state of the _Process_ can\n", + "still be manipulated and inspected." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.magma.compiler.compiler import Compiler\n", + "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "\n", + "# create a compiler\n", + "compiler = Compiler()\n", + "\n", + "# compile the Process (and all connected Processes) into an executable\n", + "executable = compiler.compile(lif2, run_cfg=Loihi1SimCfg())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "4. Execution stage: When compilation is complete, _Processes_ can be\n", + "executed. The execution stage ensures that the (prior) compilation stage has\n", + "been completed and otherwise invokes it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.magma.runtime.runtime import Runtime\n", + "from lava.magma.core.run_conditions import RunSteps\n", + "from lava.magma.core.process.message_interface_enum import ActorType\n", + "\n", + "# create and initialize a runtime\n", + "runtime = Runtime(message_infrastructure_type=ActorType.MultiProcessing, exe=executable)\n", + "runtime.initialize()\n", + "\n", + "# start execution\n", + "runtime.start(run_condition=RunSteps(num_steps=42))\n", + "\n", + "# stop execution\n", + "runtime.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "The following does all of the above automatically:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from lava.proc.lif.process import LIF\n", + "from lava.proc.dense.process import Dense\n", + "from lava.magma.core.run_conditions import RunSteps\n", + "from lava.magma.core.run_configs import Loihi1SimCfg\n", + "\n", + "# create Processes\n", + "lif = LIF()\n", + "dense = Dense()\n", + "\n", + "# connect Processes\n", + "lif.s_out.connect(dense.s_in)\n", + "\n", + "# execute Processes\n", + "lif.run(condition=RunSteps(num_steps=42), run_cfg=Loihi1SimCfg())\n", + "\n", + "# stop Processes\n", + "lif.stop()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "## How to learn more?\n", + "\n", + "In upcoming releases, we will continually publish more and more tutorials, covering, for example, how to transfer data between _Processes_ and how to compose the behavior of a process using other processes.\n", + "\n", + "If you want to find out more about how to compile and execute _Processes_, have a look at the [Lava documentation](https://lava-nc.org/ \"Lava Documentation\") or dive into the [Compiler](https://github.com/lava-nc/lava/tree/main/src/lava/magma/compiler/ \"Compiler Source Code\") and [RunTime source code](https://github.com/lava-nc/lava/tree/main/src/lava/magma/runtime/ \"Runtime Source Code\").\n", + "\n", + "To receive regular updates on the latest developments and releases of the Lava Software Framework please subscribe to the [INRC newsletter](http://eepurl.com/hJCyhb \"INRC Newsletter\")." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} \ No newline at end of file diff --git a/tutorials/in_depth/tutorial05_connect_processes.ipynb b/tutorials/in_depth/tutorial05_connect_processes.ipynb index 47dd63398..ab5b4ba93 100644 --- a/tutorials/in_depth/tutorial05_connect_processes.ipynb +++ b/tutorials/in_depth/tutorial05_connect_processes.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "d34bf688", "metadata": { "pycharm": { "name": "#%% md\n" @@ -37,29 +38,46 @@ "\n" ] }, + { + "cell_type": "markdown", + "id": "6039a847", + "metadata": {}, + "source": [ + "As first step we define the _Processes_ _P1_ and _P2_ with their respective _Ports_ _out_ and _inp_." + ] + }, { "cell_type": "code", "execution_count": 1, + "id": "54a5da6c", "metadata": {}, "outputs": [], "source": [ - "from lava.magma.core.process.process import AbstractProcess\n", - "from lava.magma.core.process.ports.ports import InPort, OutPort" + "#%load_ext lava.utils.win_exec" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 2, + "id": "d63504fa", "metadata": {}, + "outputs": [], "source": [ - "As first step we define the _Processes_ _P1_ and _P2_ with their respective _Ports_ _out_ and _inp_." + "#%%export tutorial_05\n", + "\n", + "from lava.magma.core.process.process import AbstractProcess\n", + "from lava.magma.core.process.ports.ports import InPort, OutPort" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, + "id": "81edf2e4", "metadata": {}, "outputs": [], "source": [ + "#%%export tutorial_05\n", + "\n", "# Minimal process with an OutPort\n", "class P1(AbstractProcess):\n", " def __init__(self, **kwargs):\n", @@ -78,6 +96,7 @@ }, { "cell_type": "markdown", + "id": "c4adc7cb", "metadata": {}, "source": [ "_Process_ _P1_ and _P2_ require a corresponding _ProcessModel_ which implements their _Ports_ and a simple RunConfig for sending and receiving data.\n", @@ -89,10 +108,12 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, + "id": "8ebfa70c", "metadata": {}, "outputs": [], "source": [ + "#%%export tutorial_05\n", "import numpy as np\n", "from lava.magma.core.model.py.model import PyLoihiProcessModel\n", "from lava.magma.core.decorator import implements, requires, tag\n", @@ -104,10 +125,13 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, + "id": "b4774648", "metadata": {}, "outputs": [], "source": [ + "#%%export tutorial_05\n", + "\n", "# A minimal PyProcModel implementing P1\n", "@implements(proc=P1, protocol=LoihiProtocol)\n", "@requires(CPU)\n", @@ -135,6 +159,39 @@ }, { "cell_type": "markdown", + "id": "bdbeeeb7", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Exception for Windows\n", + "\n", + "The next cell is only needed for systems running on Windows due to an issue with the multiprocessing package. In order to execute the notebook flawlessly on Windows systems, the _Processes_ needs to be imported from a script and can not be defined in the notebook itself. Therefore we export a python script from this jupyter notebook and import the _Processes_ defined above from there. \n", + "With this temporary solution you can still modify the classes above and rerun the notebook to see the changes. In the meantime, we are working on a permanent fix for the issue of the multiprocessing package under Windows." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "d242d4e4", + "metadata": { + "pycharm": { + "name": "#%%\n" + }, + "scrolled": true + }, + "outputs": [], + "source": [ + "from sys import platform\n", + "if platform == \"win32\" or platform == \"cygwin\":\n", + " from tutorial_05 import P1, P2, PyProcModelA, PyProcModelB" + ] + }, + { + "cell_type": "markdown", + "id": "d81f1886", "metadata": {}, "source": [ "Next the processes _P1_ and _P2_ are instantiated and the output _Port_ _out_ from _Process_ _P1_ is connected with the input _Port_ _inp_ of _Process_ _P2_." @@ -142,7 +199,8 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, + "id": "448c5acf", "metadata": {}, "outputs": [], "source": [ @@ -161,6 +219,7 @@ }, { "cell_type": "markdown", + "id": "c78eac45", "metadata": {}, "source": [ "Calling `run()` on either of these _Processes_ will first call the _Compiler_. During compilation the specified connection is setup by creating a channel between _P1_ and _P2_. Now data can be transfered during execution as seen by the output print statements." @@ -168,7 +227,8 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, + "id": "aca0fdac", "metadata": {}, "outputs": [], "source": [ @@ -178,7 +238,8 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, + "id": "28038689", "metadata": { "scrolled": true }, @@ -199,6 +260,7 @@ }, { "cell_type": "markdown", + "id": "2cf0eeb2", "metadata": {}, "source": [ "The instance `sender` of P1 sent the data `[1 2]` via its _OutPort_ `out` to the _InPort_ `in` of the instance `recv` of P2, where the data is received." @@ -206,6 +268,7 @@ }, { "cell_type": "markdown", + "id": "8350828f", "metadata": {}, "source": [ "## Possible connections\n", @@ -222,6 +285,7 @@ }, { "cell_type": "markdown", + "id": "5c73dea7", "metadata": {}, "source": [ "## Connect multiple _InPorts_ from a single _OutPort_\n", @@ -231,7 +295,8 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, + "id": "a11117bb", "metadata": {}, "outputs": [], "source": [ @@ -257,7 +322,8 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, + "id": "f20b5578", "metadata": {}, "outputs": [ { @@ -265,8 +331,8 @@ "output_type": "stream", "text": [ "Sent output data of P1: [1 2]\n", - "Received input data for P2: [1 2]Received input data for P2: [1 2]\n", - "Received input data for P2: [1 2]\n", + "Received input data for P2: [1 2]Received input data for P2: [1 2]Received input data for P2: [1 2]\n", + "\n", "\n" ] } @@ -278,6 +344,7 @@ }, { "cell_type": "markdown", + "id": "93fcc7a0", "metadata": {}, "source": [ "The instance `sender` of P1 sent the data `[1 2]` to the 3 instances `recv1, recv2, recv3` of P2." @@ -285,6 +352,7 @@ }, { "cell_type": "markdown", + "id": "cc8e94de", "metadata": {}, "source": [ "## Connecting multiple _InPorts_ to a single _OutPort_\n", @@ -296,7 +364,8 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, + "id": "0d92eca8", "metadata": {}, "outputs": [], "source": [ @@ -322,7 +391,8 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, + "id": "8d42777e", "metadata": {}, "outputs": [ { @@ -343,6 +413,7 @@ }, { "cell_type": "markdown", + "id": "bbc2d914", "metadata": {}, "source": [ "The 3 instances `sender1, sender2, sender3` of P1 sent the data `[1 2]` to the instance `recv` of P2, where the data was summed up to `[3 6]`." @@ -350,6 +421,35 @@ }, { "cell_type": "markdown", + "id": "663b1e84", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Exception for Windows\n", + "Delete the created python script again." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "326cc579", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "if platform == \"win32\" or platform == \"cygwin\":\n", + " !del tutorial_05.py" + ] + }, + { + "cell_type": "markdown", + "id": "db222d84", "metadata": {}, "source": [ "## How to learn more?\n", @@ -364,7 +464,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -378,7 +478,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.10.4" } }, "nbformat": 4, diff --git a/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb b/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb index 29c7f51ef..6793c07fd 100644 --- a/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb +++ b/tutorials/in_depth/tutorial06_hierarchical_processes.ipynb @@ -50,6 +50,16 @@ "The [ProcessModel Tutorial](#tutorial03_process_models.ipynb) walks through the creation of a LIF _Process_ and an implementing _PyLoihiProcessModel_. Our DenseLayer _Process_ also requires a Dense Lava _Process_ and _ProcessModel_ that have the behavior of a dense set of synaptic connections and weights. The Dense Connection _Process_ can be used to connect neural _Processes_. For completeness, we'll first briefly show an example LIF and Dense _Process_ and _PyLoihiProcessModel_." ] }, + { + "cell_type": "code", + "execution_count": 1, + "id": "9d6e9d67", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext lava.utils.win_exec" + ] + }, { "cell_type": "markdown", "id": "a41d3af0", @@ -60,11 +70,16 @@ }, { "cell_type": "code", - "execution_count": 1, - "id": "c44a34ac", - "metadata": {}, + "execution_count": 2, + "id": "bbc5fe35", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ + "%%export tutorial_06\n", "from lava.magma.core.process.process import AbstractProcess\n", "from lava.magma.core.process.variable import Var\n", "from lava.magma.core.process.ports.ports import InPort, OutPort\n", @@ -86,19 +101,40 @@ }, { "cell_type": "markdown", - "id": "3388c481", - "metadata": {}, + "id": "e58d1c00", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a Python Dense connection _ProcessModel_ implementing the Loihi Sync Protocol and requiring a CPU compute resource" ] }, { "cell_type": "code", - "execution_count": 2, - "id": "cf921be5", - "metadata": {}, - "outputs": [], + "execution_count": 3, + "id": "99cf3149", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":17: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + ":18: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n" + ] + } + ], "source": [ + "%%export tutorial_06\n", + "\n", "import numpy as np\n", "\n", "from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol\n", @@ -125,19 +161,29 @@ }, { "cell_type": "markdown", - "id": "af429f49-0493-4096-a914-a9972f7c5fcb", - "metadata": {}, + "id": "e7ea474f", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a LIF neuron _Process_" ] }, { "cell_type": "code", - "execution_count": 3, - "id": "f909ec9e-2d54-44e0-a095-d2ccc19506a3", - "metadata": {}, + "execution_count": 4, + "id": "77fd7d0e", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ + "%%export tutorial_06\n", + "\n", "from lava.magma.core.process.process import AbstractProcess\n", "from lava.magma.core.process.variable import Var\n", "from lava.magma.core.process.ports.ports import InPort, OutPort\n", @@ -179,19 +225,50 @@ }, { "cell_type": "markdown", - "id": "6c58564f-9fd4-4560-ad53-d2ec4b7c52a5", - "metadata": {}, + "id": "f33ba053", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a Python LIF neuron _ProcessModel_ implementing the Loihi Sync Protocol and requiring a CPU compute resource" ] }, { "cell_type": "code", - "execution_count": 4, - "id": "54b48aa4-a35a-496f-9b07-0dc41d03cb9b", - "metadata": {}, - "outputs": [], + "execution_count": 5, + "id": "89018a8d", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + ":15: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + ":17: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + ":18: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + ":19: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + ":20: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + ":21: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n", + ":22: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.\n", + "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n" + ] + } + ], "source": [ + "%%export tutorial_06\n", + "\n", "import numpy as np\n", "from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol\n", "from lava.magma.core.model.py.ports import PyInPort, PyOutPort\n", @@ -228,27 +305,41 @@ }, { "cell_type": "markdown", - "id": "639dc805", - "metadata": {}, + "id": "aa32bafd", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create a DenseLayer Hierarchical _Process_ that encompasses Dense and LIF _Process_ behavior" ] }, { "cell_type": "markdown", - "id": "c0a6aa43", - "metadata": {}, + "id": "be020b1a", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Now we create a DenseLayer _Hierarchical Process_ combining LIF neural _Processes_ and Dense connection _Processes_. Our _Hierarchical Process_ contains all of the variables (`u`, `v`, `bias`, `du`, `dv`, `vth`, and `s_out`) native to the LIF _Process_ plus the `weights` variable native to the Dense _Process_. The InPort to our _Hierarchical Process_ is `s_in`, which represents the spike inputs to our Dense synaptic connections. These Dense connections synapse onto a population of LIF neurons. The OutPort of our _Hierarchical Process_ is `s_out`, which represents the spikes output by the layer of LIF neurons." ] }, { "cell_type": "code", - "execution_count": 5, - "id": "3035e530", - "metadata": {}, + "execution_count": 6, + "id": "32363de8", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ + "%%export tutorial_06\n", + "\n", "class DenseLayer(AbstractProcess):\n", " \"\"\"Combines Dense and LIF Processes.\n", " \"\"\"\n", @@ -280,16 +371,24 @@ }, { "cell_type": "markdown", - "id": "fd62de90", - "metadata": {}, + "id": "63fd361b", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create a _SubProcessModel_ that implements the DenseLayer _Process_ using Dense and LIF child _Processes_" ] }, { "cell_type": "markdown", - "id": "97ee227b", - "metadata": {}, + "id": "5372a5cc", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Now we will create the _SubProcessModel_ that implements our DenseLayer _Process_. This inherits from the _AbstractSubProcessModel_ class. Recall that _SubProcessModels_ also inherit the compute resource requirements from the _ProcessModels_ of their child _Processes_. In this example, we will use the LIF and Dense _ProcessModels_ requiring a CPU compute resource that were defined earlier in the tutorial, and `SubDenseLayerModel` will therefore implicitly require the CPU compute resource. \n", "\n", @@ -302,11 +401,17 @@ }, { "cell_type": "code", - "execution_count": 6, - "id": "ddd9daba", - "metadata": {}, + "execution_count": 7, + "id": "d9196df4", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ + "%%export tutorial_06\n", + "\n", "import numpy as np\n", "\n", "from lava.proc.lif.process import LIF\n", @@ -350,25 +455,68 @@ }, { "cell_type": "markdown", - "id": "0e38afa1", - "metadata": {}, + "id": "5a900468", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Exception for Windows\n", + "\n", + "The next cell is only needed for systems running on Windows due to an issue with the multiprocessing package. In order to execute the notebook flawlessly on Windows systems, the LIF class needs to be imported from a script and can not be defined in the notebook itself. Therefore we export a python script from this jupyter notebook and import the LIF class defined above from there.\n", + "With this temporary solution you can still modify the classes above and rerun the notebook to see the changes. In the meantime, we are working on a permanent fix for the issue of the multiprocessing package under Windows." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "c307b156", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from sys import platform\n", + "if platform == \"win32\" or platform == \"cygwin\":\n", + " from tutorial_06 import DenseLayer" + ] + }, + { + "cell_type": "markdown", + "id": "9dfb62a5", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Run the DenseLayer _Process_" ] }, { "cell_type": "markdown", - "id": "763a4881-0e8d-4d0a-84c4-c2bde204247a", - "metadata": {}, + "id": "103ea53c", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Run Connected DenseLayer _Processes_" ] }, { "cell_type": "code", - "execution_count": 12, - "id": "657dc72a-4507-4db4-8364-a4be54779bd5", - "metadata": {}, + "execution_count": 9, + "id": "cfa0ddfa", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -381,71 +529,138 @@ "\n", "\n", " ----- \n", - "\n", - "t: 0\n", - "Layer 0 v: [4. 4. 4.]\n", - "Layer 1 u: [0. 0. 0.]\n", - "Layer 1 v: [4. 4. 4.]\n", - "\n", - " ----- \n", - "\n", - "t: 1\n", - "Layer 0 v: [8. 8. 8.]\n", - "Layer 1 u: [0. 0. 0.]\n", - "Layer 1 v: [8. 8. 8.]\n", - "\n", - " ----- \n", - "\n", - "t: 2\n", - "Layer 0 v: [0. 0. 0.]\n", - "Layer 1 u: [0. 1. 0.]\n", - "Layer 1 v: [0. 0. 0.]\n", - "\n", - " ----- \n", - "\n", - "t: 3\n", - "Layer 0 v: [4. 4. 4.]\n", - "Layer 1 u: [0. 1. 0.]\n", - "Layer 1 v: [4. 5. 4.]\n", - "\n", - " ----- \n", - "\n", - "t: 4\n", - "Layer 0 v: [8. 8. 8.]\n", - "Layer 1 u: [0. 1. 0.]\n", - "Layer 1 v: [8. 0. 8.]\n", - "\n", - " ----- \n", - "\n", - "t: 5\n", - "Layer 0 v: [0. 0. 0.]\n", - "Layer 1 u: [0. 2. 0.]\n", - "Layer 1 v: [0. 6. 0.]\n", - "\n", - " ----- \n", - "\n", - "t: 6\n", - "Layer 0 v: [4. 4. 4.]\n", - "Layer 1 u: [0. 2. 0.]\n", - "Layer 1 v: [4. 0. 4.]\n", - "\n", - " ----- \n", - "\n", - "t: 7\n", - "Layer 0 v: [8. 8. 8.]\n", - "Layer 1 u: [0. 2. 0.]\n", - "Layer 1 v: [8. 6. 8.]\n", - "\n", - " ----- \n", - "\n", - "t: 8\n", - "Layer 0 v: [0. 0. 0.]\n", - "Layer 1 u: [0. 3. 0.]\n", - "Layer 1 v: [0. 0. 0.]\n", - "\n", - " ----- \n", "\n" ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Process SystemProcess-4:\n", + "Process SystemProcess-3:\n", + "Process SystemProcess-5:\n", + "Process SystemProcess-6:\n", + "Traceback (most recent call last):\n", + "Traceback (most recent call last):\n", + "Traceback (most recent call last):\n", + "Traceback (most recent call last):\n", + " File \"/usr/lib/python3.10/multiprocessing/process.py\", line 315, in _bootstrap\n", + " self.run()\n", + " File \"/usr/lib/python3.10/multiprocessing/process.py\", line 315, in _bootstrap\n", + " self.run()\n", + " File \"/usr/lib/python3.10/multiprocessing/process.py\", line 315, in _bootstrap\n", + " self.run()\n", + " File \"/usr/lib/python3.10/multiprocessing/process.py\", line 315, in _bootstrap\n", + " self.run()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/message_infrastructure/multiprocessing.py\", line 39, in run\n", + " mp.Process.run(self)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/message_infrastructure/multiprocessing.py\", line 39, in run\n", + " mp.Process.run(self)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/message_infrastructure/multiprocessing.py\", line 39, in run\n", + " mp.Process.run(self)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/message_infrastructure/multiprocessing.py\", line 39, in run\n", + " mp.Process.run(self)\n", + " File \"/usr/lib/python3.10/multiprocessing/process.py\", line 108, in run\n", + " self._target(*self._args, **self._kwargs)\n", + " File \"/usr/lib/python3.10/multiprocessing/process.py\", line 108, in run\n", + " self._target(*self._args, **self._kwargs)\n", + " File \"/usr/lib/python3.10/multiprocessing/process.py\", line 108, in run\n", + " self._target(*self._args, **self._kwargs)\n", + " File \"/usr/lib/python3.10/multiprocessing/process.py\", line 108, in run\n", + " self._target(*self._args, **self._kwargs)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/runtime.py\", line 92, in target_fn\n", + " actor.start(*args, **kwargs)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/runtime.py\", line 92, in target_fn\n", + " actor.start(*args, **kwargs)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/runtime.py\", line 92, in target_fn\n", + " actor.start(*args, **kwargs)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/runtime.py\", line 92, in target_fn\n", + " actor.start(*args, **kwargs)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/model.py\", line 85, in start\n", + " self.run()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/model.py\", line 85, in start\n", + " self.run()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/runtime_services/runtime_service.py\", line 94, in start\n", + " self.run()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/model.py\", line 85, in start\n", + " self.run()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/model.py\", line 172, in run\n", + " self._cmd_handlers[cmd]()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/model.py\", line 191, in run\n", + " self._action = self._selector.select(*self._channel_actions)\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/runtime_services/runtime_service.py\", line 356, in run\n", + " self._get_pm_resp()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/model.py\", line 313, in _spike\n", + " self.run_spk()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/model.py\", line 172, in run\n", + " self._cmd_handlers[cmd]()\n" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "Input \u001b[0;32mIn [9]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 19\u001b[0m rcfg \u001b[38;5;241m=\u001b[39m Loihi1SimCfg(select_tag\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfloating_pt\u001b[39m\u001b[38;5;124m'\u001b[39m, select_sub_proc_model\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m t \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;241m9\u001b[39m):\n\u001b[1;32m 22\u001b[0m \u001b[38;5;66;03m#running layer 1 runs all connected layers (layer 0)\u001b[39;00m\n\u001b[0;32m---> 23\u001b[0m \u001b[43mlayer1\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcondition\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mRunSteps\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnum_steps\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43mrun_cfg\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrcfg\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 24\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt: \u001b[39m\u001b[38;5;124m'\u001b[39m,t)\n\u001b[1;32m 25\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mLayer 0 v: \u001b[39m\u001b[38;5;124m'\u001b[39m, layer0\u001b[38;5;241m.\u001b[39mv\u001b[38;5;241m.\u001b[39mget())\n", + "File \u001b[0;32m~/lava-nc/lava/src/lava/magma/core/process/process.py:424\u001b[0m, in \u001b[0;36mAbstractProcess.run\u001b[0;34m(self, condition, run_cfg)\u001b[0m\n\u001b[1;32m 419\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_runtime \u001b[38;5;241m=\u001b[39m Runtime(executable,\n\u001b[1;32m 420\u001b[0m ActorType\u001b[38;5;241m.\u001b[39mMultiProcessing,\n\u001b[1;32m 421\u001b[0m loglevel\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloglevel)\n\u001b[1;32m 422\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_runtime\u001b[38;5;241m.\u001b[39minitialize()\n\u001b[0;32m--> 424\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_runtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstart\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcondition\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/lava-nc/lava/src/lava/magma/runtime/runtime.py:318\u001b[0m, in \u001b[0;36mRuntime.start\u001b[0;34m(self, run_condition)\u001b[0m\n\u001b[1;32m 315\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_is_initialized:\n\u001b[1;32m 316\u001b[0m \u001b[38;5;66;03m# Start running\u001b[39;00m\n\u001b[1;32m 317\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_is_started \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[0;32m--> 318\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_run\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrun_condition\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 319\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 320\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlog\u001b[38;5;241m.\u001b[39minfo(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mRuntime not initialized yet.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "File \u001b[0;32m~/lava-nc/lava/src/lava/magma/runtime/runtime.py:336\u001b[0m, in \u001b[0;36mRuntime._run\u001b[0;34m(self, run_condition)\u001b[0m\n\u001b[1;32m 334\u001b[0m send_port\u001b[38;5;241m.\u001b[39msend(enum_to_np(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_steps))\n\u001b[1;32m 335\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_condition\u001b[38;5;241m.\u001b[39mblocking:\n\u001b[0;32m--> 336\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_resp_for_run\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 337\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(run_condition, RunContinuous):\n\u001b[1;32m 338\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_steps \u001b[38;5;241m=\u001b[39m sys\u001b[38;5;241m.\u001b[39mmaxsize\n", + "File \u001b[0;32m~/lava-nc/lava/src/lava/magma/runtime/runtime.py:279\u001b[0m, in \u001b[0;36mRuntime._get_resp_for_run\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 277\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_is_running:\n\u001b[1;32m 278\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m recv_port \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mservice_to_runtime:\n\u001b[0;32m--> 279\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[43mrecv_port\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrecv\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 280\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m enum_equal(data, MGMT_RESPONSE\u001b[38;5;241m.\u001b[39mREQ_PAUSE):\n\u001b[1;32m 281\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_req_paused \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n", + "File \u001b[0;32m~/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py:265\u001b[0m, in \u001b[0;36mCspRecvPort.recv\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mrecv\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 262\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;124;03m Receive from the channel. Blocks if there is no data on the channel.\u001b[39;00m\n\u001b[1;32m 264\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[0;32m--> 265\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_queue\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 266\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_array[\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_idx]\u001b[38;5;241m.\u001b[39mcopy()\n\u001b[1;32m 267\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_idx \u001b[38;5;241m=\u001b[39m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_idx \u001b[38;5;241m+\u001b[39m \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m%\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_size\n", + "File \u001b[0;32m~/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py:149\u001b[0m, in \u001b[0;36mCspRecvQueue.get\u001b[0;34m(self, block, timeout, peek)\u001b[0m\n\u001b[1;32m 147\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m timeout \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 148\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_qsize():\n\u001b[0;32m--> 149\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mnot_empty\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mwait\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 150\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m timeout \u001b[38;5;241m<\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m 151\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtimeout\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m must be a non-negative number\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", + "File \u001b[0;32m/usr/lib/python3.10/threading.py:320\u001b[0m, in \u001b[0;36mCondition.wait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 318\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m: \u001b[38;5;66;03m# restore state no matter what (e.g., KeyboardInterrupt)\u001b[39;00m\n\u001b[1;32m 319\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m timeout \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 320\u001b[0m \u001b[43mwaiter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43macquire\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 321\u001b[0m gotit \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m 322\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + " File \"/home/philipp/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py\", line 310, in select\n", + " self._cv.wait()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/runtime/runtime_services/runtime_service.py\", line 208, in _get_pm_resp\n", + " rcv_msgs.append(ptos_recv_port.recv())\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/model.py\", line 313, in _spike\n", + " self.run_spk()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/proc/lif/models.py\", line 55, in run_spk\n", + " a_in_data = self.a_in.recv()\n", + " File \"/usr/lib/python3.10/threading.py\", line 320, in wait\n", + " waiter.acquire()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py\", line 265, in recv\n", + " self._queue.get()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/proc/dense/models.py\", line 45, in run_spk\n", + " s_in = self.s_in.recv().astype(bool)\n", + "KeyboardInterrupt\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/ports.py\", line 343, in recv\n", + " return ft.reduce(\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py\", line 149, in get\n", + " self.not_empty.wait()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/ports.py\", line 344, in \n", + " lambda acc, port: acc + self._transformer.transform(port.recv(),\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/ports.py\", line 343, in recv\n", + " return ft.reduce(\n", + " File \"/usr/lib/python3.10/threading.py\", line 320, in wait\n", + " waiter.acquire()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/core/model/py/ports.py\", line 344, in \n", + " lambda acc, port: acc + self._transformer.transform(port.recv(),\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py\", line 265, in recv\n", + " self._queue.get()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py\", line 265, in recv\n", + " self._queue.get()\n", + "KeyboardInterrupt\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py\", line 149, in get\n", + " self.not_empty.wait()\n", + " File \"/home/philipp/lava-nc/lava/src/lava/magma/compiler/channels/pypychannel.py\", line 149, in get\n", + " self.not_empty.wait()\n", + " File \"/usr/lib/python3.10/threading.py\", line 320, in wait\n", + " waiter.acquire()\n", + " File \"/usr/lib/python3.10/threading.py\", line 320, in wait\n", + " waiter.acquire()\n", + "KeyboardInterrupt\n", + "KeyboardInterrupt\n" + ] } ], "source": [ @@ -480,6 +695,36 @@ " print('\\n ----- \\n')" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "fde367f7", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "#### Exception for Windows\n", + "Delete the created python script again." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c77799b9", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "if platform == \"win32\" or platform == \"cygwin\":\n", + " !del tutorial_06.py" + ] + }, { "cell_type": "markdown", "id": "3e7349aa-56cf-4759-9af2-15bebd63e399", @@ -509,7 +754,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.10.4" } }, "nbformat": 4, diff --git a/tutorials/in_depth/tutorial07_remote_memory_access.ipynb b/tutorials/in_depth/tutorial07_remote_memory_access.ipynb index 4a933d82b..ab949aa10 100644 --- a/tutorials/in_depth/tutorial07_remote_memory_access.ipynb +++ b/tutorials/in_depth/tutorial07_remote_memory_access.ipynb @@ -3,7 +3,11 @@ { "cell_type": "markdown", "id": "8ff39dda", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "*Copyright (C) 2021 Intel Corporation*
\n", "*SPDX-License-Identifier: BSD-3-Clause*
\n", @@ -30,7 +34,11 @@ { "cell_type": "markdown", "id": "d3568645", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Recommended tutorials before starting: \n", "\n", @@ -45,7 +53,11 @@ { "cell_type": "markdown", "id": "b2e5ed15", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Create a minimal _Process_ and _ProcessModel_ with a _RefPort_" ] @@ -53,7 +65,11 @@ { "cell_type": "markdown", "id": "9407a4f8", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "The [ProcessModel Tutorial](#tutorial03_process_models.ipynb) walks through the creation of _Processes_ and corresponding _ProcessModels_. In order to demonstrate RefPorts we create a minimal process P1 with a _RefPort_ `ref` and a minimal process P2 with a _Var_ `var`. \n", "\n", @@ -63,10 +79,26 @@ { "cell_type": "code", "execution_count": 1, - "id": "b72d269a", + "id": "5f5e6f2a", "metadata": {}, "outputs": [], "source": [ + "%load_ext lava.utils.win_exec" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "b72d269a", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "%%export tutorial_07\n", + "\n", "from lava.magma.core.process.process import AbstractProcess\n", "from lava.magma.core.process.variable import Var\n", "from lava.magma.core.process.ports.ports import RefPort\n", @@ -89,7 +121,11 @@ { "cell_type": "markdown", "id": "d3beee93", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "#### Create a Python Process Model implementing the Loihi Sync Protocol and requiring a CPU compute resource\n", "We also create the corresponding _ProcessModels_ PyProcModel1 and PyProcModel2 which implement the process P1 and P2. The value of the _Var_ of P2 `var` is initialized with the value 5. The behavior we implement prints out the value of the `var` in P1 every time step, demonstrating the **read** ability of a _RefPort_ `ref`. Afterwards we set the value of `var` by adding the current time step to it and write it with `ref`, demonstrating the **write** abiltity of a _RefPort_." @@ -97,11 +133,17 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "b9b8bad0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ + "%%export tutorial_07\n", + "\n", "import numpy as np\n", "\n", "from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol\n", @@ -139,10 +181,45 @@ " var: np.ndarray = LavaPyType(np.ndarray, np.int32)" ] }, + { + "cell_type": "markdown", + "id": "6624fa69", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Exception for Windows\n", + "\n", + "The next cell is only needed for systems running on Windows due to an issue with the multiprocessing package. In order to execute the notebook flawlessly on Windows systems, the _Processes_ needs to be imported from a script and can not be defined in the notebook itself. Therefore we export a python script from this jupyter notebook and import the _Processes_ defined above from there. \n", + "With this temporary solution you can still modify the classes above and rerun the notebook to see the changes. In the meantime, we are working on a permanent fix for the issue of the multiprocessing package under Windows." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "62e23c47", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "from sys import platform\n", + "if platform == \"win32\" or platform == \"cygwin\":\n", + " from tutorial_07 import P1, P2, PyProcModel1, PyProcModel2" + ] + }, { "cell_type": "markdown", "id": "614aa4be", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Run the _Processes_\n", "The _RefPort_ `ref` needs to be connected with the _Var_ `var`, before execution. The expected output will be the initial value 5 of `var` at the beginning, followed by 6 (5+1), 8 (6+2), 11 (8+3), 15 (11+4)." @@ -150,9 +227,13 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "47698fc0", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -185,7 +266,11 @@ { "cell_type": "markdown", "id": "b9bf440f", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Implicit and explicit VarPorts\n", "In the example above we demonstrated the read and write ability of a _RefPort_ which used an **implicit** _VarPort_ to connect to the _Var_. An implicit _VarPort_ is created when `connect_var(..)` is used to connect a _RefPort_ with a _Var_. A _RefPort_ can also be connected to a _VarPort_ **explicitly** defined in a _Process_ using `connect(..)`. In order to demonstrate explicit _VarPorts_ we redefine _Process_ P2 and the corresponding _ProcessModel_." @@ -193,15 +278,23 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "id": "d9dd7405", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ + "%%export tutorial_07_a\n", + "\n", + "from lava.magma.core.process.process import AbstractProcess\n", "from lava.magma.core.process.ports.ports import VarPort\n", + "from lava.magma.core.process.variable import Var\n", "\n", "# A minimal process with a Var and an explicit VarPort\n", - "class P2(AbstractProcess):\n", + "class P2Explicit(AbstractProcess):\n", " def __init__(self, **kwargs):\n", " super().__init__(**kwargs)\n", " self.var = Var(shape=(1,), init=5)\n", @@ -210,34 +303,78 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "id": "e2215c6d", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [ + "%%export tutorial_07_a\n", + "\n", + "import numpy as np\n", + "from lava.magma.core.sync.protocols.loihi_protocol import LoihiProtocol\n", + "from lava.magma.core.model.py.ports import PyRefPort\n", + "from lava.magma.core.model.py.type import LavaPyType\n", + "from lava.magma.core.resources import CPU\n", + "from lava.magma.core.decorator import implements, requires\n", + "from lava.magma.core.model.py.model import PyLoihiProcessModel\n", "from lava.magma.core.model.py.ports import PyVarPort\n", "\n", "# A minimal PyProcModel implementing P2\n", - "@implements(proc=P2, protocol=LoihiProtocol)\n", + "@implements(proc=P2Explicit, protocol=LoihiProtocol)\n", "@requires(CPU)\n", - "class PyProcModel2(PyLoihiProcessModel):\n", + "class PyProcModel2Explicit(PyLoihiProcessModel):\n", " var: np.ndarray = LavaPyType(np.ndarray, np.int32)\n", " var_port: PyVarPort = LavaPyType(PyVarPort.VEC_DENSE, int)" ] }, { "cell_type": "markdown", - "id": "683df3ff", + "id": "ea62fc26", + "metadata": {}, + "source": [ + "#### Exception for Windows\n", + "\n", + "The next cell is only needed for systems running on Windows due to an issue with the multiprocessing package. In order to execute the notebook flawlessly on Windows systems, the _Processes_ needs to be imported from a script and can not be defined in the notebook itself. Therefore we export a python script from this jupyter notebook and import the _Processes_ defined above from there. \n", + "With this temporary solution you can still modify the classes above and rerun the notebook to see the changes. In the meantime, we are working on a permanent fix for the issue of the multiprocessing package under Windows." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "8ddad965", "metadata": {}, + "outputs": [], + "source": [ + "from sys import platform\n", + "if platform == \"win32\" or platform == \"cygwin\":\n", + " from tutorial_07_a import P2Explicit, PyProcModel2Explicit" + ] + }, + { + "cell_type": "markdown", + "id": "683df3ff", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "This time the _RefPort_ `ref` is connected to the explicitly defined _VarPort_ `var_port`. The output is the same as before." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 11, "id": "bfc0ec3b", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [ { "name": "stdout", @@ -254,7 +391,7 @@ "source": [ "# Create process P1 and P2\n", "proc1 = P1()\n", - "proc2 = P2()\n", + "proc2 = P2Explicit()\n", "\n", "# Connect RefPort 'ref' of P1 with VarPort 'var_port' of P2\n", "proc1.ref.connect(proc2.var_port)\n", @@ -267,7 +404,11 @@ { "cell_type": "markdown", "id": "ff8db42e", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## Options to connect RefPorts and VarPorts\n", "_RefPorts_ can be connected in different ways to _Vars_ and _VarPorts_. _RefPorts_ and _VarPorts_ can also be connected to themselves in case of hierarchical processes. \n", @@ -282,10 +423,43 @@ "* _VarPorts_ can receive connections from _VarPorts_ or _RefPorts_ using `connect_from(..)`" ] }, + { + "cell_type": "markdown", + "id": "ae1e974b", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "#### Exception for Windows\n", + "Delete the created python script again." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2a72e73f", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "if platform == \"win32\" or platform == \"cygwin\":\n", + " !del tutorial_07.py\n", + " !del tutorial_07_a.py" + ] + }, { "cell_type": "markdown", "id": "1eb2d987", - "metadata": {}, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "## How to learn more?\n", "\n", @@ -297,8 +471,12 @@ { "cell_type": "code", "execution_count": null, - "id": "2bf6cbaa", - "metadata": {}, + "id": "a242b90e", + "metadata": { + "pycharm": { + "name": "#%%\n" + } + }, "outputs": [], "source": [] } @@ -319,7 +497,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.10.4" } }, "nbformat": 4, From 771ae8505b4b1c932bb1410ca7000cb62fe0c78c Mon Sep 17 00:00:00 2001 From: weidel-p Date: Mon, 27 Jun 2022 13:35:41 +0200 Subject: [PATCH 14/14] lint --- src/lava/utils/win_exec.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lava/utils/win_exec.py b/src/lava/utils/win_exec.py index 35a8986bd..623048f44 100644 --- a/src/lava/utils/win_exec.py +++ b/src/lava/utils/win_exec.py @@ -3,13 +3,12 @@ # See: https://spdx.org/licenses/ from sys import platform - - try: from IPython import get_ipython except ModuleNotFoundError: pass + def export(filename, cell): '''Exports the cell to the given file name if system is windows.''' if platform == "win32" or platform == "cygwin": @@ -28,5 +27,6 @@ def unload_ipython_extension(shell): '''Unregisters the magic function when the extension unloads.''' del shell.magics_manager.magics['cell']['export'] + if __name__ == "__main__": pass