diff --git a/.github/workflows/python_actions.yml b/.github/workflows/python_actions.yml index 54052632b3..72bf8c77e0 100644 --- a/.github/workflows/python_actions.yml +++ b/.github/workflows/python_actions.yml @@ -72,6 +72,7 @@ jobs: package: ${{ env.BASE_PKG }} exitcheck: 31 # Action fails on any message language: en_GB + rcfile: global_strict - name: Lint with mypy run: mypy $BASE_PKG diff --git a/.pylint_dict.txt b/.pylint_dict.txt index 211aee4278..734b056d82 100644 --- a/.pylint_dict.txt +++ b/.pylint_dict.txt @@ -12,101 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Our abbreviations/names -DDL -eieio -HBP -scp -# Our special words -borrowable -collab -cpu -iobuf -iptag -iptags -params -precompiled -malloc -malloc'ed -reentrant -regex -replacer -sdram -sql -txrx -url -xy - -# Our types -AbstractEdge -AbstractEIEIOMessage -AbstractMulticastRoutingTable -AbstractSendsBuffersFromHost -AbstractVertex -ApplicationEdge -ApplicationGraph -ApplicationVertex -BMPConnectionData -BufferedSendingRegion -CommandSender -CoreToFill -CoreSubset -CoreSubsets -CPUState -DatabaseReader -DataSpecificationExecutor -DataSpecificationGenerator -DataSpeedUpPacketGatherMachineVertex -DataWritten -EIEIOPrefix -EIEIOType -ExecutableTargets -ExecutableType -ExtraMonitorSupportMachineVertex -IOBuffer -IPTag -IPtagResource -LiveOutputDevice -LivePacketGatherMachineVertex -LivePacketGatherParameters -MachineEdge -MachineGraph -MachineVertex -MultiCastCommand -MulticastRoutingTable -MulticastRoutingTables -PacmanConfigurationException -ProgressBar -ProvenanceDataItem -ReInjectionStatus -ReverseIPTag -RouterDiagnostics -RoutingTableEntry -SCAMPConnection -SDPMessage -SpallocClient -SpallocJob -UDPConnection -UnCompressedMulticastRoutingTable -VariableSDRAM - -# Python packages -pacman -PyNN -spalloc -spinnman -SpiNNakerGraphFrontEnd -struct - -# Python types -FileIO -RawIOBase -Struct -TextIOBase - -# sqlite3.cursor methods mentioned in docs -fetchone -lastrowid -rowcount -WAL \ No newline at end of file +# We use a single exception files for all the main repsitories +# It can be found at: +# https://github.com/SpiNNakerManchester/SupportScripts/blob/master/actions/pylint/default_dict.txt diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 2a9da1fab9..0000000000 --- a/.pylintrc +++ /dev/null @@ -1,473 +0,0 @@ -# Copyright (c) 2019 The University of Manchester -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -[MASTER] - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist=numpy - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Use multiple processes to speed up Pylint. -jobs=1 - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Pickle collected data for later comparisons. -persistent=yes - -# Specify a configuration file. -#rcfile= - -# When enabled, pylint would attempt to guess common misconfiguration and emit -# user-friendly hints instead of false-positive error messages -suggestion-mode=yes - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=R,C,unsubscriptable-object -# False positives for unsubscriptable-object. Mypy better at this class of issue -# See https://github.com/pylint-dev/pylint/issues/1498 - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable=c-extension-no-member,C0402,C0403 - - -[REPORTS] - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - -# Set the output format. Available formats are text, parseable, colorized, json -# and msvs (visual studio).You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Tells whether to display a full report or only the messages -reports=no - -# Activate the evaluation score. -score=yes - - -[REFACTORING] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - -# Complete name of functions that never returns. When checking for -# inconsistent-return-statements if a never returning function is called then -# it will be considered as an explicit return statement and no message will be -# printed. -never-returning-functions=optparse.Values,sys.exit - - -[BASIC] - -# Naming style matching correct argument names -argument-naming-style=snake_case - -# Regular expression matching correct argument names. Overrides argument- -# naming-style -#argument-rgx= - -# Naming style matching correct attribute names -attr-naming-style=snake_case - -# Regular expression matching correct attribute names. Overrides attr-naming- -# style -#attr-rgx= - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo, - bar, - baz, - toto, - tutu, - tata - -# Naming style matching correct class attribute names -class-attribute-naming-style=any - -# Regular expression matching correct class attribute names. Overrides class- -# attribute-naming-style -#class-attribute-rgx= - -# Naming style matching correct class names -class-naming-style=PascalCase - -# Regular expression matching correct class names. Overrides class-naming-style -#class-rgx= - -# Naming style matching correct constant names -const-naming-style=UPPER_CASE - -# Regular expression matching correct constant names. Overrides const-naming- -# style -#const-rgx= - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - -# Naming style matching correct function names -function-naming-style=snake_case - -# Regular expression matching correct function names. Overrides function- -# naming-style -#function-rgx= - -# Good variable names which should always be accepted, separated by a comma -good-names=i, - j, - k, - ex, - Run, - _ - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Naming style matching correct inline iteration names -inlinevar-naming-style=any - -# Regular expression matching correct inline iteration names. Overrides -# inlinevar-naming-style -#inlinevar-rgx= - -# Naming style matching correct method names -method-naming-style=snake_case - -# Regular expression matching correct method names. Overrides method-naming- -# style -#method-rgx= - -# Naming style matching correct module names -module-naming-style=snake_case - -# Regular expression matching correct module names. Overrides module-naming- -# style -#module-rgx= - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Naming style matching correct variable names -variable-naming-style=snake_case - -# Regular expression matching correct variable names. Overrides variable- -# naming-style -#variable-rgx= - - -[FORMAT] - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Maximum number of characters on a single line. -max-line-length=100 - -# Maximum number of lines in a module -max-module-lines=1000 - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging,FormatAdapter - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME, - XXX - - - -[SIMILARITIES] - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - -# Minimum lines number of a similarity. -min-similarity-lines=4 - - -[SPELLING] - -# Limits count of emitted suggestions for spelling mistakes -max-spelling-suggestions=4 - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words=pragma - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[TYPECHECK] - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members=numpy.* - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local,numpy - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules=numpy - -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes - -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 - -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 - - -[VARIABLES] - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_, - _cb - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.*|^ignored_|^unused_ - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__, - __new__, - _new_run_clear, - _machine_clear, - _hard_reset - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict, - _fields, - _replace, - _source, - _make - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of statements in function / method body -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - - -[IMPORTS] - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=no - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=optparse,tkinter.tix - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=builtins.Exception diff --git a/c_common/front_end_common_lib/include/simulation.h b/c_common/front_end_common_lib/include/simulation.h index 148d8aa475..00e6b737b0 100644 --- a/c_common/front_end_common_lib/include/simulation.h +++ b/c_common/front_end_common_lib/include/simulation.h @@ -220,8 +220,8 @@ void simulation_dma_transfer_done_callback_off(uint tag); //! or not (false) void simulation_set_uses_timer(bool sim_uses_timer); -//! \brief sets the simulation to enter a synchronization barrier repeatedly -//! during the simulation. The synchronization message must be sent +//! \brief sets the simulation to enter a synchronisation barrier repeatedly +//! during the simulation. The synchronisation message must be sent //! from the host. Note simulation_is_finished() must be used each //! timestep to cause the pause to happen. //! \param[in] n_steps: The number of steps of simulation between synchronisations diff --git a/c_common/front_end_common_lib/src/simulation.c b/c_common/front_end_common_lib/src/simulation.c index aa57c3fe6a..5f631d3611 100644 --- a/c_common/front_end_common_lib/src/simulation.c +++ b/c_common/front_end_common_lib/src/simulation.c @@ -66,10 +66,10 @@ static callback_t dma_complete_callbacks[MAX_DMA_CALLBACK_TAG]; //! Whether the simulation uses the timer or not (default true) static bool uses_timer = true; -//! The number of steps to run before synchronization +//! The number of steps to run before synchronisation static uint32_t n_sync_steps; -//! The number simulation timestep at the next synchronization +//! The number simulation timestep at the next synchronisation static uint32_t next_sync_step; //! \brief Store basic provenance data diff --git a/fec_integration_tests/file_convertor_tests/test_write_json_machine.py b/fec_integration_tests/file_convertor_tests/test_write_json_machine.py index d1bbdcf299..d9b8e07943 100644 --- a/fec_integration_tests/file_convertor_tests/test_write_json_machine.py +++ b/fec_integration_tests/file_convertor_tests/test_write_json_machine.py @@ -142,7 +142,7 @@ def testSpin2(self): if not Ping.host_is_reachable(self.spalloc): raise unittest.SkipTest(self.spalloc + " appears to be down") set_config( - "Machine", "spalloc_user", "Integration testing ok to kill") + "Machine", "spalloc_user", "Integration testing OK to kill") set_config("Machine", "spalloc_server", self.spalloc) set_config("Machine", "spalloc_port", self.spin2Port) diff --git a/fec_integration_tests/interface/interface_functions/test_front_end_common_load_executable_images.py b/fec_integration_tests/interface/interface_functions/test_front_end_common_load_executable_images.py index 37ea3c4b26..cba9118d58 100644 --- a/fec_integration_tests/interface/interface_functions/test_front_end_common_load_executable_images.py +++ b/fec_integration_tests/interface/interface_functions/test_front_end_common_load_executable_images.py @@ -39,8 +39,7 @@ def __init__(self, test_case): def execute_flood( self, core_subsets: CoreSubsets, executable: Union[BinaryIO, bytes, str], app_id: int, *, - n_bytes: Optional[int] = None, wait: bool = False - ): # @UnusedVariable + n_bytes: Optional[int] = None, wait: bool = False): for core_subset in core_subsets.core_subsets: x, y = core_subset.x, core_subset.y for p in core_subset.processor_ids: @@ -52,8 +51,7 @@ def execute_flood( @overrides(MockableTransceiver.get_core_state_count) def get_core_state_count( self, app_id: int, state: CPUState, - xys: Optional[Iterable[Tuple[int, int]]] = None - ) -> int: # @UnusedVariable + xys: Optional[Iterable[Tuple[int, int]]] = None) -> int: return self._n_cores_in_app[app_id] diff --git a/import_hook.py b/import_hook.py new file mode 100644 index 0000000000..ef781088fe --- /dev/null +++ b/import_hook.py @@ -0,0 +1,28 @@ +# Copyright (c) 2017 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This file is imported by init-hook in the rcfile +https://github.com/SpiNNakerManchester/SupportScripts/blob/master/actions/pylint/strict_rcfile + +It allows you to temporarily add the other spinnaker repositories without making them part of the permemnant python path + +Intended for use when running pylint.bash +""" +import sys +sys.path.append("../SpiNNUtils") +sys.path.append("../SpiNNMachine") +sys.path.append("../SpiNNMan") +sys.path.append("../PACMAN") +sys.path.append("../spalloc") diff --git a/pylint.bash b/pylint.bash new file mode 100644 index 0000000000..746cb8c751 --- /dev/null +++ b/pylint.bash @@ -0,0 +1,37 @@ +#!/bin/bash + +# Copyright (c) 2024 The University of Manchester +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This bash assumes that SupportScripts has been installed in parallel + +# requires the latest pylint and pyenchant +# pip install --upgrade pylint pyenchant + +# requires the spelling dicts +# sudo apt-get -o Dpkg::Use-Pty=0 install --fix-missing enchant-2 hunspell hunspell-en-gb + +# check all +pylint --output-format=colorized --disable=R --persistent=no --jobs=1 --rcfile=../SupportScripts/actions/pylint/strict_rcfile --spelling-dict=en_GB --spelling-private-dict-file="../SupportScripts/actions/pylint/default_dict.txt" --disable=import-error spinn_front_end_common + +# +# check one test +# pylint --enable=consider-iterating-dictionary --output-format=colorized --disable=R --persistent=no --jobs=1 --rcfile=../SupportScripts/actions/pylint/strict_rcfile --spelling-dict=en_GB --spelling-private-dict-file="../SupportScripts/actions/pylint/default_dict.txt" --disable=all spinn_front_end_common + +# check spelling +# pylint --enable=invalid-characters-in-docstring,wrong-spelling-in-comment,wrong-spelling-in-docstring --output-format=colorized --disable=R --persistent=no --jobs=1 --rcfile=../SupportScripts/actions/pylint/strict_rcfile --spelling-dict=en_GB --spelling-private-dict-file="../SupportScripts/actions/pylint/default_dict.txt" --disable=all spinn_front_end_common + +# check docs including spelling +# pylint --enable=missing-function-docstring,missing-class-docstring,invalid-characters-in-docstring,wrong-spelling-in-comment,wrong-spelling-in-docstring --output-format=colorized --disable=R --persistent=no --jobs=1 --rcfile=../SupportScripts/actions/pylint/strict_rcfile --spelling-dict=en_GB --spelling-private-dict-file="../SupportScripts/actions/pylint/default_dict.txt" --disable=all spinn_front_end_common + diff --git a/spinn_front_end_common/abstract_models/impl/machine_allocation_controller.py b/spinn_front_end_common/abstract_models/impl/machine_allocation_controller.py index c74c606d44..057a28937c 100644 --- a/spinn_front_end_common/abstract_models/impl/machine_allocation_controller.py +++ b/spinn_front_end_common/abstract_models/impl/machine_allocation_controller.py @@ -32,7 +32,7 @@ class MachineAllocationController(object, metaclass=AbstractBase): neatly when the script dies. """ __slots__ = ( - #: boolean flag for telling this thread when the system has ended + #: Boolean flag for telling this thread when the system has ended "_exited", #: the address of the root board of the allocation "__hostname", @@ -124,6 +124,11 @@ def create_transceiver(self) -> Transceiver: return txrx def can_create_transceiver(self) -> bool: + """ + Detects if a call to create_transceiver could work. + + :rtype: bool + """ return self.__hostname is not None def __host(self, chip_x: int, chip_y: int) -> Optional[str]: diff --git a/spinn_front_end_common/abstract_models/live_output_device.py b/spinn_front_end_common/abstract_models/live_output_device.py index 7a7de5861d..4ed4e6b307 100644 --- a/spinn_front_end_common/abstract_models/live_output_device.py +++ b/spinn_front_end_common/abstract_models/live_output_device.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, Tuple, List from spinn_utilities.abstract_base import ( AbstractBase, abstractmethod) from pacman.model.graphs.machine.machine_vertex import MachineVertex -from typing import Dict, Tuple, List class LiveOutputDevice(object, metaclass=AbstractBase): @@ -31,7 +31,7 @@ def get_device_output_keys(self) -> Dict[MachineVertex, """ Get the atom key mapping to be output for each machine vertex received by the device to be output. Note that the device may change the keys - as they pass through it, and this needs to be recognized here. + as they pass through it, and this needs to be recognised here. :rtype: Dict[MachineVertex, List[Tuple[int, int]]] """ diff --git a/spinn_front_end_common/data/fec_data_view.py b/spinn_front_end_common/data/fec_data_view.py index cf2f395df2..ec28dfc84a 100644 --- a/spinn_front_end_common/data/fec_data_view.py +++ b/spinn_front_end_common/data/fec_data_view.py @@ -16,18 +16,22 @@ import os from typing import ( Dict, Iterable, Iterator, Optional, Set, Tuple, Union, List, TYPE_CHECKING) + from spinn_utilities.log import FormatAdapter from spinn_utilities.socket_address import SocketAddress from spinn_utilities.typing.coords import XY + from spinn_machine import Chip, CoreSubsets, FixedRouteEntry from spinnman.data import SpiNNManDataView from spinnman.model import ExecutableTargets from spinnman.model.enums import ExecutableType from spinnman.messages.scp.enums.signal import Signal from spinnman.spalloc import SpallocJob + from pacman.data import PacmanDataView from pacman.model.graphs.application import ApplicationEdge, ApplicationVertex from pacman.model.routing_tables import MulticastRoutingTables + if TYPE_CHECKING: # May be circular references in here; it's OK from spinn_front_end_common.abstract_models.impl import ( @@ -668,8 +672,9 @@ def get_data_in_multicast_key_to_chip_map(cls) -> Dict[XY, int]: """ Retrieve the data_in_multicast_key_to_chip_map if known. Keys are the coordinates of chips. - Values are the base keys for multicast comms received by the Data In - streaming module of the extra monitor running on those chips. + Values are the base keys for multicast communication + received by the Data In streaming module + of the extra monitor running on those chips. :rtype: dict(tuple(int,int), int) :raises ~spinn_utilities.exceptions.SpiNNUtilsException: @@ -698,7 +703,7 @@ def get_system_multicast_router_timeout_keys(cls) -> Dict[XY, int]: """ Retrieve the system_multicast_router_timeout_keys if known. Keys are the coordinates of chips. - Values are the base keys for multicast comms received by the + Values are the base keys for multicast communications received by the re-injector module of the extra monitor running on those chips. :rtype: dict(tuple(int,int), int) @@ -710,7 +715,7 @@ def get_system_multicast_router_timeout_keys(cls) -> Dict[XY, int]: raise cls._exception("system_multicast_router_timeout_keys") return cls.__fec_data._system_multicast_router_timeout_keys - # ipaddress + # IP address @classmethod def has_ipaddress(cls) -> bool: @@ -1001,6 +1006,7 @@ def add_live_packet_gatherer_parameters( lpg_vertex = cls.__fec_data._live_packet_recorder_params.get( live_packet_gatherer_params) if lpg_vertex is None: + # pylint: disable=import-outside-toplevel # UGLY import due to circular reference from spinn_front_end_common.utility_models import ( LivePacketGather as LPG) diff --git a/spinn_front_end_common/data/fec_data_writer.py b/spinn_front_end_common/data/fec_data_writer.py index 2d41682fc8..49fbf16b9b 100644 --- a/spinn_front_end_common/data/fec_data_writer.py +++ b/spinn_front_end_common/data/fec_data_writer.py @@ -530,7 +530,7 @@ def set_gatherer_map(self, gatherer_map: Dict[ if not isinstance( vertex, DataSpeedUpPacketGatherMachineVertex): raise self.__gatherer_map_error() - break # assume if first is ok all are + break # assume if first is OK all are except Exception as ex: # pylint: disable=broad-except raise self.__gatherer_map_error() from ex self.__fec_data._gatherer_map = gatherer_map @@ -557,7 +557,7 @@ def set_monitor_map(self, monitor_map: Dict[ raise self.__monitor_map_error() if not isinstance(vertex, ExtraMonitorSupportMachineVertex): raise self.__monitor_map_error() - break # assume if first is ok all are + break # assume if first is OK all are except TypeError: raise except Exception as ex: # pylint: disable=broad-except diff --git a/spinn_front_end_common/interface/abstract_spinnaker_base.py b/spinn_front_end_common/interface/abstract_spinnaker_base.py index 7538b9c83b..334fcaafc2 100644 --- a/spinn_front_end_common/interface/abstract_spinnaker_base.py +++ b/spinn_front_end_common/interface/abstract_spinnaker_base.py @@ -23,14 +23,14 @@ import sys import threading import types -import requests from threading import Condition from typing import ( - Dict, Final, Iterable, Optional, Sequence, Tuple, Type, + Dict, Iterable, Optional, Sequence, Tuple, Type, TypeVar, Union, cast, final) -from numpy import __version__ as numpy_version import ebrains_drive # type: ignore[import] +from numpy import __version__ as numpy_version +import requests from spinn_utilities import __version__ as spinn_utils_version from spinn_utilities.config_holder import ( @@ -141,7 +141,7 @@ except ImportError: scipy_version = "scipy not installed" -logger: Final = FormatAdapter(logging.getLogger(__name__)) +logger = FormatAdapter(logging.getLogger(__name__)) _T = TypeVar("_T") SHARED_PATH = re.compile(r".*\/shared\/([^\/]+)") @@ -167,7 +167,7 @@ class AbstractSpinnakerBase(ConfigHandler): # "_raise_keyboard_interrupt", - # original sys.excepthook Used in exception handling and control c + # original value which is used in exception handling and control c "__sys_excepthook", # All beyond this point new for no extractor @@ -898,7 +898,7 @@ def _execute_splitter_reset(self) -> None: with FecTimer("Splitter reset", TimerWork.OTHER): splitter_reset() - # Overriden by spynaker to choose an extended algorithm + # Overridden by sPyNNaker to choose an extended algorithm def _execute_splitter_selector(self) -> None: """ Runs, times and logs the SplitterSelector. @@ -911,7 +911,7 @@ def _execute_delay_support_adder(self) -> None: Stub to allow sPyNNaker to add delay supports. """ - # Overriden by spynaker to choose a different algorithm + # Overridden by sPyNNaker to choose a different algorithm def _execute_splitter_partitioner(self) -> None: """ Runs, times and logs the SplitterPartitioner if required. @@ -1373,9 +1373,9 @@ def _execute_sdram_outgoing_partition_allocator(self) -> None: def _execute_control_sync(self, do_sync: bool) -> None: """ - Control synchronization on board. + Control synchronisation on board. - :param bool do_sync: Whether to enable synchronization + :param bool do_sync: Whether to enable synchronisation """ with FecTimer("Control Sync", TimerWork.CONTROL) as timer: if timer.skip_if_virtual_board(): @@ -2089,7 +2089,7 @@ def _execute_create_database_interface( """ with FecTimer("Create database interface", TimerWork.OTHER): # Used to used compressed routing tables if available on host - # TODO consider not saving router tabes. + # TODO consider not saving router tables. self._data_writer.set_database_file_path( database_interface(run_time)) @@ -2220,7 +2220,7 @@ def _do_run( except Exception as run_e: self._recover_from_error(run_e) - # reraise exception + # re-raise exception raise run_e def _recover_from_error(self, exception: Exception) -> None: diff --git a/spinn_front_end_common/interface/buffer_management/buffer_manager.py b/spinn_front_end_common/interface/buffer_management/buffer_manager.py index e3de78babd..3fc95d90df 100644 --- a/spinn_front_end_common/interface/buffer_management/buffer_manager.py +++ b/spinn_front_end_common/interface/buffer_management/buffer_manager.py @@ -51,7 +51,7 @@ eieio_type=EIEIOType.KEY_32_BIT, is_timestamp=True) # The number of bytes in each key to be sent -_N_BYTES_PER_KEY = EIEIOType.KEY_32_BIT.key_bytes # @UndefinedVariable +_N_BYTES_PER_KEY = EIEIOType.KEY_32_BIT.key_bytes _SDP_MAX_PACKAGE_SIZE = 272 @@ -332,6 +332,7 @@ def _send_initial_messages( if (not vertex.is_next_timestamp(region) and bytes_to_go >= EventStopRequest.get_min_packet_length()): data = EventStopRequest().bytestring + # pylint: disable=wrong-spelling-in-comment # logger.debug( # "Writing stop message of {} bytes to {} on {}, {}, {}" # len(data), hex(region_base_address), diff --git a/spinn_front_end_common/interface/buffer_management/buffer_models/sends_buffers_from_host_pre_buffered_impl.py b/spinn_front_end_common/interface/buffer_management/buffer_models/sends_buffers_from_host_pre_buffered_impl.py index eb0a7097b1..139758fbac 100644 --- a/spinn_front_end_common/interface/buffer_management/buffer_models/sends_buffers_from_host_pre_buffered_impl.py +++ b/spinn_front_end_common/interface/buffer_management/buffer_models/sends_buffers_from_host_pre_buffered_impl.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Collection, Dict + from spinn_utilities.abstract_base import abstractmethod from spinn_utilities.overrides import overrides -from .abstract_sends_buffers_from_host import AbstractSendsBuffersFromHost + from spinn_front_end_common.interface.buffer_management.storage_objects \ import BufferedSendingRegion +from .abstract_sends_buffers_from_host import AbstractSendsBuffersFromHost + # mypy: disable-error-code=empty-body diff --git a/spinn_front_end_common/interface/buffer_management/storage_objects/buffer_database.py b/spinn_front_end_common/interface/buffer_management/storage_objects/buffer_database.py index 6cb2990539..6fe05b339b 100644 --- a/spinn_front_end_common/interface/buffer_management/storage_objects/buffer_database.py +++ b/spinn_front_end_common/interface/buffer_management/storage_objects/buffer_database.py @@ -273,6 +273,9 @@ def _set_core_name(self, x: int, y: int, p: int, core_name: Optional[str]): """, (core_name, x, y, p)) def store_vertex_labels(self) -> None: + """ + Goes though all placement an monitor cores to set a label. + """ for placement in FecDataView.iterate_placemements(): self._set_core_name( placement.x, placement.y, placement.p, placement.vertex.label) @@ -284,6 +287,16 @@ def store_vertex_labels(self) -> None: f"SCAMP(OS)_{chip.x}:{chip.y}") def get_core_name(self, x: int, y: int, p: int) -> Optional[str]: + """ + Gets the label (typically vertex label) for this core. + + Returns None if the core at x, y, p is not known. + + :param int x: core x + :param int y: core y + :param int p: core p + :rtype: str or None + """ for row in self.execute( """ SELECT core_name diff --git a/spinn_front_end_common/interface/buffer_management/storage_objects/buffered_sending_region.py b/spinn_front_end_common/interface/buffer_management/storage_objects/buffered_sending_region.py index 1cedb70a19..8f0a06203f 100644 --- a/spinn_front_end_common/interface/buffer_management/storage_objects/buffered_sending_region.py +++ b/spinn_front_end_common/interface/buffer_management/storage_objects/buffered_sending_region.py @@ -24,7 +24,7 @@ EIEIOType.KEY_32_BIT, is_payload_base=True) # The number of bytes in each key to be sent -_N_BYTES_PER_KEY = EIEIOType.KEY_32_BIT.key_bytes # @UndefinedVariable +_N_BYTES_PER_KEY = EIEIOType.KEY_32_BIT.key_bytes # The number of keys allowed (different from the actual number as there is # an additional header) diff --git a/spinn_front_end_common/interface/config_handler.py b/spinn_front_end_common/interface/config_handler.py index 806e5d1613..687615732d 100644 --- a/spinn_front_end_common/interface/config_handler.py +++ b/spinn_front_end_common/interface/config_handler.py @@ -183,7 +183,7 @@ def _remove_excess_folders( pass def _set_up_report_specifics(self) -> None: - # clear and clean out folders considered not useful anymore + # clear and clean out folders considered not useful any more report_dir_path = self._data_writer.get_report_dir_path() if os.listdir(report_dir_path): self._remove_excess_folders( diff --git a/spinn_front_end_common/interface/ds/data_specification_base.py b/spinn_front_end_common/interface/ds/data_specification_base.py index f4b6defe46..4e2170a628 100644 --- a/spinn_front_end_common/interface/ds/data_specification_base.py +++ b/spinn_front_end_common/interface/ds/data_specification_base.py @@ -12,12 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy from typing import Optional, Sequence, TextIO, Union + +import numpy + from spinn_utilities.abstract_base import AbstractBase, abstractmethod +from spinn_front_end_common.utilities.exceptions import DataSpecException + from .data_type import DataType from .ds_sqllite_database import DsSqlliteDatabase -from spinn_front_end_common.utilities.exceptions import DataSpecException + BYTES_PER_WORD = 4 diff --git a/spinn_front_end_common/interface/ds/ds_sqllite_database.py b/spinn_front_end_common/interface/ds/ds_sqllite_database.py index 7aa10ebc6c..8ce2305705 100644 --- a/spinn_front_end_common/interface/ds/ds_sqllite_database.py +++ b/spinn_front_end_common/interface/ds/ds_sqllite_database.py @@ -13,20 +13,25 @@ # limitations under the License. import logging -import numpy import os import sqlite3 from typing import Dict, Iterable, List, Optional, Tuple, cast, TYPE_CHECKING + +import numpy + from spinn_utilities.log import FormatAdapter from spinn_utilities.typing.coords import XYP + from spinnman.model.enums import ExecutableType from spinnman.spalloc.spalloc_job import SpallocJob + from spinn_front_end_common.data import FecDataView from spinn_front_end_common.abstract_models import AbstractHasAssociatedBinary from spinn_front_end_common.utilities.constants import ( APP_PTR_TABLE_BYTE_SIZE) from spinn_front_end_common.utilities.exceptions import DsDatabaseException from spinn_front_end_common.utilities.sqlite_db import SQLiteDB + if TYPE_CHECKING: from spinn_front_end_common.interface.interface_functions.\ spalloc_allocator import SpallocJobController # @UnusedImport @@ -44,7 +49,7 @@ class DsSqlliteDatabase(SQLiteDB): """ A database for holding data specification details. """ - __slots__ = ("_init_file") + __slots__ = ["_init_file"] def __init__(self, database_file: Optional[str] = None): """ @@ -453,6 +458,15 @@ def get_start_address(self, x: int, y: int, p: int) -> int: def set_region_pointer( self, x: int, y: int, p: int, region_num: int, pointer: int): + """ + Sets the pointer to the start of the address for this x, y, p region. + + :param int x: + :param int y: + :param int p: + :param int region_num: + :param int pointer: start address + """ self.execute( """ UPDATE region diff --git a/spinn_front_end_common/interface/ds/dse.sql b/spinn_front_end_common/interface/ds/dse.sql index 4ec6b8e8e9..a658398384 100644 --- a/spinn_front_end_common/interface/ds/dse.sql +++ b/spinn_front_end_common/interface/ds/dse.sql @@ -21,7 +21,7 @@ PRAGMA main.synchronous = OFF; PRAGMA foreign_keys = ON; -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - --- A table describing the ethernets. +-- A table describing the Ethernets. CREATE TABLE IF NOT EXISTS ethernet( ethernet_x INTEGER NOT NULL, ethernet_y INTEGER NOT NULL, @@ -29,7 +29,7 @@ CREATE TABLE IF NOT EXISTS ethernet( PRIMARY KEY (ethernet_x, ethernet_y)); -- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - --- A table describing the chips and their ethernet. +-- A table describing the chips and their Ethernet. CREATE TABlE IF NOT EXISTS chip( x INTEGER NOT NULL, y INTEGER NOT NULL, diff --git a/spinn_front_end_common/interface/interface_functions/chip_provenance_updater.py b/spinn_front_end_common/interface/interface_functions/chip_provenance_updater.py index b2ab1fb537..c335147853 100644 --- a/spinn_front_end_common/interface/interface_functions/chip_provenance_updater.py +++ b/spinn_front_end_common/interface/interface_functions/chip_provenance_updater.py @@ -29,6 +29,11 @@ def chip_provenance_updater(all_core_subsets: CoreSubsets): + """ + Forces all cores to generate provenance data, and then exit. + + :param CoreSubsets all_core_subsets: + """ updater = _ChipProvenanceUpdater(all_core_subsets) # pylint: disable=protected-access updater._run() diff --git a/spinn_front_end_common/interface/interface_functions/command_sender_adder.py b/spinn_front_end_common/interface/interface_functions/command_sender_adder.py index b66567baf5..27f47befbd 100644 --- a/spinn_front_end_common/interface/interface_functions/command_sender_adder.py +++ b/spinn_front_end_common/interface/interface_functions/command_sender_adder.py @@ -36,6 +36,9 @@ def add_command_senders(system_placements: Placements) -> List[CommandSender]: class CommandSenderAdder(object): + """ + Code to add CommandSender vertices and their placements. + """ __slots__ = ( "__command_sender_for_chip", "__general_command_sender", diff --git a/spinn_front_end_common/interface/interface_functions/compute_energy_used.py b/spinn_front_end_common/interface/interface_functions/compute_energy_used.py index 8eeef71891..5f014f7484 100644 --- a/spinn_front_end_common/interface/interface_functions/compute_energy_used.py +++ b/spinn_front_end_common/interface/interface_functions/compute_energy_used.py @@ -49,6 +49,7 @@ #: measured from the loading of the column and extrapolated MILLIWATTS_PER_FRAME_ACTIVE_COST: Final = 0.154163558 +# pylint: disable=invalid-name #: measured from the real power meter and timing between the photos #: for a day powered off MILLIWATTS_FOR_BOXED_48_CHIP_FRAME_IDLE_COST: Final = 0.0045833333 @@ -56,7 +57,7 @@ # TODO needs filling in MILLIWATTS_PER_UNBOXED_48_CHIP_FRAME_IDLE_COST: Final = 0.01666667 -# TODO verify this is correct when doing multiboard comms +# TODO verify this is correct when doing multi board communications N_MONITORS_ACTIVE_DURING_COMMS = 2 @@ -252,17 +253,17 @@ def _calculate_fpga_energy( # if not spalloc, then could be any type of board if (is_config_none("Machine", "spalloc_server") and is_config_none("Machine", "remote_spinnaker_url")): - # if a spinn2 or spinn3 (4 chip boards) then they have no fpgas + # if a spinn2 or spinn3 (4 chip boards) then they have no FPGAs if machine.n_chips <= 4: return 0, 0 # if the spinn4 or spinn5 board, need to verify if wrap-arounds - # are there, if not then assume fpgas are turned off. + # are there, if not then assume FPGAs are turned off. - # how many fpgas are active + # how many FPGAs are active total_fpgas = __board_n_operational_fpgas( machine.ethernet_connected_chips[0]) - # active fpgas + # active FPGAs if total_fpgas == 0: return 0, 0 else: # spalloc machine, need to check each board @@ -363,7 +364,7 @@ def _calculate_loading_energy( # min between chips that are active and fixed monitor, as when 1 # chip is used its one monitor, if more than 1 chip, - # the ethernet connected chip and the monitor handling the read/write + # the Ethernet connected chip and the monitor handling the read/write # this is checked by min n_monitors_active = min(N_MONITORS_ACTIVE_DURING_COMMS, n_monitors) energy_cost = ( @@ -412,7 +413,7 @@ def _calculate_data_extraction_energy( # min between chips that are active and fixed monitor, as when 1 # chip is used its one monitor, if more than 1 chip, - # the ethernet connected chip and the monitor handling the read/write + # the Ethernet connected chip and the monitor handling the read/write # this is checked by min energy_cost = ( total_time_ms * @@ -456,7 +457,7 @@ def _calculate_power_down_energy( :return: energy in joules :rtype: float """ - # if spalloc or hbp + # if spalloc or HBP if FecDataView.has_allocation_controller(): return time * n_frames * MILLIWATTS_FOR_FRAME_IDLE_COST # if 4 chip diff --git a/spinn_front_end_common/interface/interface_functions/graph_data_specification_writer.py b/spinn_front_end_common/interface/interface_functions/graph_data_specification_writer.py index 2d3f85defb..9593f79487 100644 --- a/spinn_front_end_common/interface/interface_functions/graph_data_specification_writer.py +++ b/spinn_front_end_common/interface/interface_functions/graph_data_specification_writer.py @@ -175,6 +175,7 @@ def __generate_data_spec_for_vertices( # creating the error message which contains the memory usage of # what each core within the chip uses and its original estimate. + # pylint: disable=consider-using-f-string memory_usage = "\n".join( " {}: {} (total={}, estimated={})".format( vert, region_size, total_est_size, diff --git a/spinn_front_end_common/interface/interface_functions/hbp_allocator.py b/spinn_front_end_common/interface/interface_functions/hbp_allocator.py index c6ae8cb58b..b602e3963d 100644 --- a/spinn_front_end_common/interface/interface_functions/hbp_allocator.py +++ b/spinn_front_end_common/interface/interface_functions/hbp_allocator.py @@ -13,15 +13,19 @@ # limitations under the License. import logging -import requests from typing import Optional, Tuple, cast + +import requests + from spinn_utilities.config_holder import get_config_str from spinn_utilities.overrides import overrides from spinn_utilities.typing.json import JsonArray, JsonObject + +from pacman.exceptions import PacmanConfigurationException + from spinn_front_end_common.abstract_models.impl import ( MachineAllocationController) from spinn_front_end_common.data import FecDataView -from pacman.exceptions import PacmanConfigurationException class _HBPJobController(MachineAllocationController): @@ -94,9 +98,19 @@ def _teardown(self) -> None: @property def power(self) -> bool: + """ + The last power state set. + + :rtype: bool + """ return self._power_on def set_power(self, power: bool): + """ + Sets the power to the new state. + + :param bool power: + """ self._set_power(self._machine_name, power) self._power_on = power diff --git a/spinn_front_end_common/interface/interface_functions/host_bit_field_router_compressor.py b/spinn_front_end_common/interface/interface_functions/host_bit_field_router_compressor.py index 2d40cc77ad..ed2c0e44f2 100644 --- a/spinn_front_end_common/interface/interface_functions/host_bit_field_router_compressor.py +++ b/spinn_front_end_common/interface/interface_functions/host_bit_field_router_compressor.py @@ -227,7 +227,7 @@ class HostBasedBitFieldRouterCompressor(object): # for router report _LOWER_16_BITS = 0xFFFF - # rob paul to pay sam threshold starting point at 1ms time step + # threshold starting point at 1ms time step _N_PACKETS_PER_SECOND = 100000 # convert between milliseconds and second @@ -315,7 +315,7 @@ def compress_bitfields( if self._best_routing_entries: for entry in self._best_routing_entries: best_router_table.add_multicast_routing_entry( - entry.to_MulticastRoutingEntry()) + entry.to_multicast_routing_entry()) compressed_pacman_router_tables.add_routing_table(best_router_table) @@ -673,18 +673,18 @@ def _create_table_report( report_out.write("The final routing table entries are as follows:\n\n") report_out.write( - "{: <5s} {: <10s} {: <10s} {: <10s} {: <7s} {}\n".format( - "Index", "Key", "Mask", "Route", "Default", "[Cores][Links]")) + f'{"Index": <5s} {"Key": <10s} {"Mask": <10s} {"Route": <10s} ' + f'#{"Default": <7s} [Cores][Links]\n') report_out.write( - "{:-<5s} {:-<10s} {:-<10s} {:-<10s} {:-<7s} {:-<14s}\n".format( - "", "", "", "", "", "")) + f"{'':-<5s} {'':-<10s} {'':-<10s} {'':-<10s} " + f"{'':-<7s} {'':-<14s}\n") entry_count = 0 n_defaultable = 0 # Note: _best_routing_table is a list(), router_table is not for entry in self._best_routing_entries: index = entry_count & self._LOWER_16_BITS - entry_str = format_route(entry.to_MulticastRoutingEntry()) + entry_str = format_route(entry.to_multicast_routing_entry()) entry_count += 1 if entry.defaultable: n_defaultable += 1 diff --git a/spinn_front_end_common/interface/interface_functions/host_no_bitfield_router_compression.py b/spinn_front_end_common/interface/interface_functions/host_no_bitfield_router_compression.py index 5a3fc08cb6..6051472fa6 100644 --- a/spinn_front_end_common/interface/interface_functions/host_no_bitfield_router_compression.py +++ b/spinn_front_end_common/interface/interface_functions/host_no_bitfield_router_compression.py @@ -97,7 +97,7 @@ def __init__( self._binary_path = binary_path self._compress_as_much_as_possible = get_config_bool( "Mapping", "router_table_compress_as_far_as_possible") - # Only used by mundy compressor we can not rebuild + # Only used by Mundy compressor we can not rebuild self._compress_only_when_needed = None self._routing_tables = FecDataView.get_precompressed() self._progresses_text = progress_text @@ -222,7 +222,7 @@ def _build_data(self, table: AbstractMulticastRoutingTable) -> bytes: # Write the size of the table table.number_of_entries) else: - # Mundy's compressor can not be changed so uses it own structure + # Mundy compressor can not be changed so uses it own structure data += _FOUR_WORDS.pack( FecDataView.get_app_id(), int(self._compress_only_when_needed), diff --git a/spinn_front_end_common/interface/interface_functions/load_data_specification.py b/spinn_front_end_common/interface/interface_functions/load_data_specification.py index 1742d28e4f..9709c2dcd6 100644 --- a/spinn_front_end_common/interface/interface_functions/load_data_specification.py +++ b/spinn_front_end_common/interface/interface_functions/load_data_specification.py @@ -13,13 +13,17 @@ # limitations under the License. import logging -import numpy from typing import Any, Callable + +import numpy from typing_extensions import TypeAlias + from spinn_utilities.config_holder import get_config_bool from spinn_utilities.progress_bar import ProgressBar from spinn_utilities.log import FormatAdapter + from spinnman.model.enums import UserRegister + from spinn_front_end_common.data import FecDataView from spinn_front_end_common.utilities.constants import ( APPDATA_MAGIC_NUM, APP_PTR_TABLE_BYTE_SIZE, BYTES_PER_WORD, diff --git a/spinn_front_end_common/interface/interface_functions/router_provenance_gatherer.py b/spinn_front_end_common/interface/interface_functions/router_provenance_gatherer.py index f8d7ff83fa..3195ee5c9f 100644 --- a/spinn_front_end_common/interface/interface_functions/router_provenance_gatherer.py +++ b/spinn_front_end_common/interface/interface_functions/router_provenance_gatherer.py @@ -28,7 +28,10 @@ logger = FormatAdapter(logging.getLogger(__name__)) -def router_provenance_gatherer(): +def router_provenance_gatherer() -> None: + """ + Gathers diagnostics from the routers. + """ _RouterProvenanceGatherer().add_router_provenance_data() diff --git a/spinn_front_end_common/interface/interface_functions/routing_setup.py b/spinn_front_end_common/interface/interface_functions/routing_setup.py index 3300732353..a5b827c310 100644 --- a/spinn_front_end_common/interface/interface_functions/routing_setup.py +++ b/spinn_front_end_common/interface/interface_functions/routing_setup.py @@ -13,14 +13,16 @@ # limitations under the License. from spinn_utilities.progress_bar import ProgressBar + from spinnman.constants import ROUTER_REGISTER_REGISTERS from spinnman.model import DiagnosticFilter from spinnman.model.enums import ( DiagnosticFilterDefaultRoutingStatus, DiagnosticFilterPacketType, DiagnosticFilterSource) -from spinn_front_end_common.data import FecDataView from spinnman.transceiver import Transceiver +from spinn_front_end_common.data import FecDataView + def routing_setup() -> None: """ diff --git a/spinn_front_end_common/interface/interface_functions/sdram_outgoing_partition_allocator.py b/spinn_front_end_common/interface/interface_functions/sdram_outgoing_partition_allocator.py index 41f4c3b5e7..bf10a42d20 100644 --- a/spinn_front_end_common/interface/interface_functions/sdram_outgoing_partition_allocator.py +++ b/spinn_front_end_common/interface/interface_functions/sdram_outgoing_partition_allocator.py @@ -25,13 +25,16 @@ def sdram_outgoing_partition_allocator() -> None: + """ + Goes through all vertices to see if sdram has to be allocated. + """ virtual_usage: Optional[Dict[XY, int]] transceiver: Optional[Transceiver] if FecDataView.has_transceiver(): transceiver = FecDataView.get_transceiver() virtual_usage = None else: - # Ok if transceiver = None + # OK if transceiver = None transceiver = None virtual_usage = defaultdict(int) diff --git a/spinn_front_end_common/interface/interface_functions/spalloc_allocator.py b/spinn_front_end_common/interface/interface_functions/spalloc_allocator.py index 99a5aa44af..a90d8a425a 100644 --- a/spinn_front_end_common/interface/interface_functions/spalloc_allocator.py +++ b/spinn_front_end_common/interface/interface_functions/spalloc_allocator.py @@ -11,35 +11,44 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from contextlib import AbstractContextManager, ExitStack import logging import math from typing import ContextManager, Dict, Tuple, Optional, Union, cast + from spinn_utilities.config_holder import ( get_config_bool, get_config_str_or_none, get_config_str_list) from spinn_utilities.log import FormatAdapter from spinn_utilities.overrides import overrides from spinn_utilities.typing.coords import XY from spinn_utilities.config_holder import get_config_int, get_config_str + from spalloc_client import Job # type: ignore[import] from spalloc_client.states import JobState # type: ignore[import] + +from spinnman.connections.udp_packet_connections import ( + SCAMPConnection, EIEIOConnection) from spinnman.constants import SCP_SCAMP_PORT from spinnman.spalloc import ( is_server_address, SpallocClient, SpallocJob, SpallocState) +from spinnman.transceiver import Transceiver + from spinn_front_end_common.abstract_models.impl import ( MachineAllocationController) from spinn_front_end_common.data import FecDataView from spinn_front_end_common.interface.provenance import ProvenanceWriter from spinn_front_end_common.utilities.utility_calls import parse_old_spalloc -from spinnman.transceiver import Transceiver -from spinnman.connections.udp_packet_connections import ( - SCAMPConnection, EIEIOConnection) logger = FormatAdapter(logging.getLogger(__name__)) _MACHINE_VERSION = 5 # Spalloc only ever works with v5 boards class SpallocJobController(MachineAllocationController): + """ + A class to Create and support Transceivers specific for Spalloc. + """ + __slots__ = ( # the spalloc job object "_job", @@ -72,6 +81,11 @@ def __init__( @property def job(self) -> SpallocJob: + """ + The job value passed into the init. + + :rtype: SpallocJob + """ return self._job @overrides(MachineAllocationController.extend_allocation) diff --git a/spinn_front_end_common/interface/interface_functions/system_multicast_routing_generator.py b/spinn_front_end_common/interface/interface_functions/system_multicast_routing_generator.py index 3d1a1d22b7..ec1aad47b0 100644 --- a/spinn_front_end_common/interface/interface_functions/system_multicast_routing_generator.py +++ b/spinn_front_end_common/interface/interface_functions/system_multicast_routing_generator.py @@ -11,16 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from collections import defaultdict import logging from typing import Dict, Tuple, Set, Optional, cast + from spinn_utilities.log import FormatAdapter from spinn_utilities.typing.coords import XY +from spinn_utilities.progress_bar import ProgressBar + +from spinn_machine import MulticastRoutingEntry, Chip + from pacman.exceptions import (PacmanRoutingException) from pacman.model.routing_tables import ( MulticastRoutingTables, UnCompressedMulticastRoutingTable) -from spinn_machine import MulticastRoutingEntry, Chip -from spinn_utilities.progress_bar import ProgressBar + from spinn_front_end_common.data import FecDataView # ADDRESS_KEY, DATA_KEY, BOUNDARY_KEY @@ -223,7 +228,7 @@ def _add_routing_entries( self._add_routing_entry(chip, key, link_ids=[link]) key += N_KEYS_PER_PARTITION_ID - # accum links to make a broadcast + # accumulate links to make a broadcast links_per_chip = defaultdict(list) for chip_key in tree: chip, link = tree[chip_key] diff --git a/spinn_front_end_common/interface/java_caller.py b/spinn_front_end_common/interface/java_caller.py index 24ee7b5ce4..593ed9ba40 100644 --- a/spinn_front_end_common/interface/java_caller.py +++ b/spinn_front_end_common/interface/java_caller.py @@ -18,6 +18,7 @@ import os import subprocess from typing import Dict, Iterable, List, Optional, cast + from spinn_utilities.config_holder import ( get_config_str, get_config_str_or_none) from spinn_utilities.log import FormatAdapter @@ -26,11 +27,11 @@ from spinn_machine.tags import IPTag from pacman.exceptions import PacmanExternalAlgorithmFailedToCompleteException from pacman.model.graphs import AbstractVirtual -from spinn_front_end_common.data import FecDataView from pacman.model.placements import Placement + +from spinn_front_end_common.data import FecDataView from spinn_front_end_common.utilities.report_functions.write_json_machine \ - import ( - write_json_machine) # Argh! Mypy + import write_json_machine from spinn_front_end_common.utilities.exceptions import ( ConfigurationException, SpinnFrontEndException) from spinn_front_end_common.interface.buffer_management.buffer_models import ( @@ -52,9 +53,9 @@ class JavaCaller(object): """ __slots__ = ( "_chip_by_ethernet", - # The call to get java to work. Including the path if required. + # The call to get Java to work. Including the path if required. "_java_call", - # The location of the java jar file + # The location of the Java jar file "_jar_file", # The location where the machine json is written "_machine_json_path", @@ -62,9 +63,9 @@ class JavaCaller(object): "_monitor_cores", # Flag to indicate if at least one placement is recording "_recording", - # Dict of ethernet (x, y) and the packetGather IPtago + # Dict of Ethernet (x, y) and the packetGather IPtags "_gatherer_iptags", - # Dict of ethernet (x, y) to the p of the packetGather vertex + # Dict of Ethernet (x, y) to the p of the packetGather vertex "_gatherer_cores", # The location where the latest placement json is written "__placement_json", @@ -118,7 +119,7 @@ def _find_java_jar(self) -> None: parent = os.path.dirname(github_checkout_dir) java_spinnaker_path = os.path.join(parent, "JavaSpiNNaker") else: - # As I don't know how to write pwd and /JavaSpiNNaker to one line + # As I don't know how to write this to one line indirect_path = os.path.join( java_spinnaker_path, "JavaSpiNNaker") if os.path.isdir(indirect_path): diff --git a/spinn_front_end_common/interface/profiling/profile_data.py b/spinn_front_end_common/interface/profiling/profile_data.py index e7dd12fe2f..4d4a818895 100644 --- a/spinn_front_end_common/interface/profiling/profile_data.py +++ b/spinn_front_end_common/interface/profiling/profile_data.py @@ -13,10 +13,12 @@ # limitations under the License. import logging -import numpy import math -import scipy.stats # type: ignore[import] from typing import Dict, Iterable, Mapping, Tuple + +import numpy +import scipy.stats # type: ignore[import] + from spinn_utilities.log import FormatAdapter from spinn_front_end_common.data import FecDataView diff --git a/spinn_front_end_common/interface/profiling/profile_utils.py b/spinn_front_end_common/interface/profiling/profile_utils.py index e6c36221a9..21ebe35801 100644 --- a/spinn_front_end_common/interface/profiling/profile_utils.py +++ b/spinn_front_end_common/interface/profiling/profile_utils.py @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Mapping + from pacman.model.placements import Placement + from spinn_front_end_common.utilities.helpful_functions import ( locate_memory_region_for_placement) from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spinn_front_end_common.data import FecDataView -from .profile_data import ProfileData from spinn_front_end_common.interface.ds import DataSpecificationGenerator +from .profile_data import ProfileData + _PROFILE_HEADER_SIZE_BYTES = BYTES_PER_WORD _SIZE_OF_PROFILE_DATA_ENTRY_IN_BYTES = 2 * BYTES_PER_WORD _BYTE_OFFSET_OF_PROFILE_DATA_IN_PROFILE_REGION = BYTES_PER_WORD diff --git a/spinn_front_end_common/interface/provenance/fec_timer.py b/spinn_front_end_common/interface/provenance/fec_timer.py index c9e85e368c..16aa20b94f 100644 --- a/spinn_front_end_common/interface/provenance/fec_timer.py +++ b/spinn_front_end_common/interface/provenance/fec_timer.py @@ -61,6 +61,11 @@ class FecTimer(object): @classmethod def setup(cls, simulator: AbstractSpinnakerBase): + """ + Checks and saves cfg values so they don't have to be read each time + + :param AbstractSpinnakerBase simulator: Not actually used + """ # pylint: disable=global-statement, protected-access cls._simulator = simulator if get_config_bool("Reports", "write_algorithm_timings"): @@ -97,12 +102,31 @@ def _insert_timing( time_taken, skip_reason) def skip(self, reason: str): + """ + Records that the algorithms is being skipped and ends the timer. + + :param str reason: Why the algorithm is being skipped + """ message = f"{self._algorithm} skipped as {reason}" time_taken = self._stop_timer() self._insert_timing(time_taken, reason) self._report(message) def skip_if_has_not_run(self) -> bool: + """ + Skips if the simulation has not run. + + If the simulation has run used this methods + keep the timer running and returns False (did not skip). + + If there was no run this method records the reason, + ends the timing and returns True (it skipped). + + Currently not used as a better check is skip_if_empty on the data + needed for the algorithm. + + :rtype: bool + """ if FecDataView.is_ran_ever(): return False else: @@ -110,6 +134,19 @@ def skip_if_has_not_run(self) -> bool: return True def skip_if_virtual_board(self) -> bool: + """ + Skips if a virtual board is being used. + + If a real board is being used this methods + keep the timer running and returns False (did not skip). + + If a virtual board is being used this method records the reason, + ends the timing and returns True (it skipped). + + Typically called for algorithms that require a real board to run. + + :rtype: bool + """ if get_config_bool("Machine", "virtual_board"): self.skip("virtual_board") return True @@ -118,6 +155,19 @@ def skip_if_virtual_board(self) -> bool: def skip_if_empty(self, value: Optional[ Union[bool, int, str, Sized]], name: str) -> bool: + """ + Skips if the value is one that evaluates to False. + + If the value is considered True (if value) this methods + keep the timer running and returns False (did not skip). + + If the value is False this method records the reason, + ends the timing and returns True (it skipped). + + :param value: Value to check if True + :param str name: Name to record for that value if skipping + :rtype: bool + """ if value: return False if value is None: @@ -129,6 +179,23 @@ def skip_if_empty(self, value: Optional[ return True def skip_if_cfg_false(self, section: str, option: str) -> bool: + """ + Skips if a Boolean cfg values is False. + + If this cfg value is True this methods keep the timer running and + returns False (did not skip). + + If the cfg value is False this method records the reason, + ends the timing and returns True (it skipped). + + Typically called if the algorithm should run if the cfg value + is set True. + + :param str section: Section level to be applied to both options + :param str option1: One of the options to check + :param str option2: The other option to check + :rtype: bool + """ if get_config_bool(section, option): return False else: @@ -137,6 +204,23 @@ def skip_if_cfg_false(self, section: str, option: str) -> bool: def skip_if_cfgs_false( self, section: str, option1: str, option2: str) -> bool: + """ + Skips if two Boolean cfg values are both False. + + If either cfg value is True this methods keep the timer running and + returns False (did not skip). + + If both cfg values are False this method records the reason, + ends the timing and returns True (it skipped). + + Typically called if the algorithm should run if either cfg values + is set True. + + :param str section: Section level to be applied to both options + :param str option1: One of the options to check + :param str option2: The other option to check + :rtype: bool + """ if get_config_bool(section, option1): return False elif get_config_bool(section, option2): @@ -146,6 +230,11 @@ def skip_if_cfgs_false( return True def error(self, reason: str): + """ + Ends an algorithm timing and records that it failed. + + :param str reason: What caused the error + """ time_taken = self._stop_timer() message = f"{self._algorithm} failed after {time_taken} as {reason}" self._insert_timing(time_taken, reason) @@ -169,7 +258,7 @@ def __convert_to_timedelta(time_diff: int) -> timedelta: """ Have to convert to a timedelta for rest of code to read. - As perf_counter_ns is nano seconds, and time delta lowest is micro, + As perf_counter_ns is nanoseconds, and time delta lowest is micro, need to convert. """ return timedelta(microseconds=time_diff / _NANO_TO_MICRO) @@ -260,6 +349,11 @@ def end_category(cls, category: TimerCategory): @classmethod def stop_category_timing(cls) -> None: + """ + Stops all the timing. + + Typically only called during simulator shutdown + """ cls.__stop_category() cls._previous = [] cls._category = None diff --git a/spinn_front_end_common/interface/provenance/global_provenance.py b/spinn_front_end_common/interface/provenance/global_provenance.py index d203c9bb68..5011a52829 100644 --- a/spinn_front_end_common/interface/provenance/global_provenance.py +++ b/spinn_front_end_common/interface/provenance/global_provenance.py @@ -18,14 +18,17 @@ import re from sqlite3 import Row from typing import Iterable, List, Optional, Tuple, Union + from spinn_utilities.log import FormatAdapter + from spinn_front_end_common.data import FecDataView from spinn_front_end_common.utilities.constants import ( MICRO_TO_MILLISECOND_CONVERSION) from spinn_front_end_common.utilities.sqlite_db import SQLiteDB -from .timer_category import TimerCategory from spinn_front_end_common.interface.provenance.timer_work import TimerWork +from .timer_category import TimerCategory + logger = FormatAdapter(logging.getLogger(__name__)) _DDL_FILE = os.path.join(os.path.dirname(__file__), "global.sql") @@ -237,7 +240,7 @@ def get_timer_provenance(self, algorithm: str) -> str: The value to LIKE search for in the algorithm column. Can be the full name, or have ``%`` and ``_`` wildcards. :return: - A possibly multiline string with for each row which matches the + A possibly multi line string with for each row which matches the like a line ``algorithm: value`` :rtype: str """ @@ -256,7 +259,7 @@ def get_run_times(self) -> str: invoked multiple times in the run, its times are summed. :return: - A possibly multiline string with for each row which matches the + A possibly multi line string with for each row which matches the like a line ``description_name: time``. The times are in seconds. :rtype: str """ @@ -271,12 +274,12 @@ def get_run_times(self) -> str: f"{row[0].replace('_', ' ')}: {row[1]} s" for row in self.run_query(query)) - def get_run_time_of_BufferExtractor(self) -> str: + def get_run_time_of_buffer_extractor(self) -> str: """ Gets the buffer extractor provenance item(s) from the last run :return: - A possibly multiline string with for each row which matches the + A possibly multi line string with for each row which matches the ``LIKE %BufferExtractor`` :rtype: str """ @@ -359,7 +362,7 @@ def get_timer_sum_by_work(self, work: TimerWork) -> int: Get the total runtime for one work type of algorithms :param TimerWork work: - :return: total off all runtimes with this category + :return: total off all run times with this category :rtype: int """ query = """ @@ -381,7 +384,7 @@ def get_timer_sum_by_algorithm(self, algorithm: str) -> int: Get the total runtime for one algorithm :param str algorithm: - :return: total off all runtimes with this algorithm + :return: total off all run times with this algorithm :rtype: int """ query = """ diff --git a/spinn_front_end_common/interface/provenance/log_store_db.py b/spinn_front_end_common/interface/provenance/log_store_db.py index b68d5002ae..0a80b02cd5 100644 --- a/spinn_front_end_common/interface/provenance/log_store_db.py +++ b/spinn_front_end_common/interface/provenance/log_store_db.py @@ -35,7 +35,7 @@ def store_log( db.store_log(level, message, timestamp) except sqlite3.OperationalError as ex: if "database is locked" in ex.args: - # Ok ignore this one + # OK ignore this one # DO NOT log this error here or you will loop forever! return # all others are bad diff --git a/spinn_front_end_common/interface/provenance/provides_provenance_data_from_machine_impl.py b/spinn_front_end_common/interface/provenance/provides_provenance_data_from_machine_impl.py index 366c929c35..26db12b980 100644 --- a/spinn_front_end_common/interface/provenance/provides_provenance_data_from_machine_impl.py +++ b/spinn_front_end_common/interface/provenance/provides_provenance_data_from_machine_impl.py @@ -13,17 +13,22 @@ # limitations under the License. from typing import Sequence, Tuple + from spinn_utilities.abstract_base import abstractmethod from spinn_utilities.overrides import overrides + from spinnman.transceiver import Transceiver + from pacman.model.placements import Placement + from spinn_front_end_common.utilities.helpful_functions import ( get_region_base_address_offset) -from .abstract_provides_provenance_data_from_machine import ( - AbstractProvidesProvenanceDataFromMachine) from spinn_front_end_common.data import FecDataView from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spinn_front_end_common.utilities.helpful_functions import n_word_struct + +from .abstract_provides_provenance_data_from_machine import ( + AbstractProvidesProvenanceDataFromMachine) from .provenance_writer import ProvenanceWriter # mypy: disable-error-code=empty-body diff --git a/spinn_front_end_common/interface/provenance/router_prov_mapper.py b/spinn_front_end_common/interface/provenance/router_prov_mapper.py index 57f0095c35..2713d3ffc0 100644 --- a/spinn_front_end_common/interface/provenance/router_prov_mapper.py +++ b/spinn_front_end_common/interface/provenance/router_prov_mapper.py @@ -14,16 +14,16 @@ import argparse import os -import numpy -import sqlite3 -from types import ModuleType from typing import ( Any, ContextManager, FrozenSet, Iterable, List, Optional, Tuple, cast) +import sqlite3 +from types import ModuleType + +import numpy from typing_extensions import Literal + from spinn_front_end_common.utilities.sqlite_db import SQLiteDB from spinn_front_end_common.utilities.exceptions import ConfigurationException -# import matplotlib.pyplot as plot -# import seaborn # The types of router provenance that we'll plot ROUTER_PLOTTABLES = ( @@ -52,6 +52,9 @@ class Plotter(ContextManager[SQLiteDB]): + """ + Code to plot provenance data from the database + """ __slots__ = ("cmap", "_db", "__have_insertion_order", "__verbose") __pyplot: Optional[ModuleType] = None @@ -96,6 +99,11 @@ def __do_chip_query(self, description: str) -> Iterable[sqlite3.Row]: """, (description, )) def get_per_chip_prov_types(self) -> FrozenSet[str]: + """ + Get a set of the descriptions available at chip level + + :rtype: set(str) + """ query = """ SELECT DISTINCT description_name AS "description" FROM provenance_view @@ -106,6 +114,14 @@ def get_per_chip_prov_types(self) -> FrozenSet[str]: def get_per_chip_prov_details(self, info: str) -> Tuple[ str, int, int, numpy.ndarray]: + """ + Gets the provenance of a per chip basis + + :param str info: + The name of the metadata to sum + :return: name, max x, max y and data + :rtype: tuple(str, int, int, numpy.ndarray) + """ data = [] xs = [] ys = [] @@ -161,6 +177,11 @@ def __do_sum_query(self, description: str) -> Iterable[sqlite3.Row]: """, (description, )) def get_per_core_prov_types(self) -> FrozenSet[str]: + """ + Get a set of the descriptions available at core level + + :rtype: set(str) + """ query = """ SELECT DISTINCT description_name AS "description" FROM provenance_view @@ -172,6 +193,14 @@ def get_per_core_prov_types(self) -> FrozenSet[str]: def get_sum_chip_prov_details(self, info: str) -> Tuple[ str, int, int, numpy.ndarray]: + """ + Gets the sum of the provenance + + :param str info: + The name of the metadata to sum + :return: name, max x, max y and data + :rtype: tuple(str, int, int, numpy.ndarray) + """ data: List[Tuple[int, int, Any]] = [] xs: List[int] = [] ys: List[int] = [] @@ -191,7 +220,7 @@ def get_sum_chip_prov_details(self, info: str) -> Tuple[ @classmethod def __plotter_apis(cls) -> Tuple[ModuleType, ModuleType]: # Import here because otherwise CI fails - # pylint: disable=import-error + # pylint: disable=import-error,import-outside-toplevel if not cls.__pyplot: import matplotlib.pyplot as plot # type: ignore[import] cls.__pyplot = plot @@ -205,6 +234,13 @@ def __plotter_apis(cls) -> Tuple[ModuleType, ModuleType]: return cls.__pyplot, cls.__seaborn def plot_per_core_data(self, key: str, output_filename: str): + """ + Plots the metadata for this key/term to the file at a core level + + :param str key: + The name of the metadata to plot, or a unique fragment of it + :param str output_filename: + """ plot, seaborn = self.__plotter_apis() if self.__verbose: print("creating " + output_filename) @@ -222,6 +258,13 @@ def plot_per_core_data(self, key: str, output_filename: str): plot.close() def plot_per_chip_data(self, key: str, output_filename: str): + """ + Plots the metadata for this key/term to the file at a chip level + + :param str key: + The name of the metadata to plot, or a unique fragment of it + :param str output_filename: + """ plot, seaborn = self.__plotter_apis() if self.__verbose: print("creating " + output_filename) @@ -240,6 +283,9 @@ def plot_per_chip_data(self, key: str, output_filename: str): def main() -> None: + """ + Generate heat maps from SpiNNaker provenance databases + """ ap = argparse.ArgumentParser( description="Generate heat maps from SpiNNaker provenance databases.") ap.add_argument("-c", "--colourmap", nargs="?", default="plasma", diff --git a/spinn_front_end_common/interface/provenance/timer_category.py b/spinn_front_end_common/interface/provenance/timer_category.py index 4276947160..17f62f67f6 100644 --- a/spinn_front_end_common/interface/provenance/timer_category.py +++ b/spinn_front_end_common/interface/provenance/timer_category.py @@ -42,4 +42,9 @@ def __init__(self, __, category_name: str): @property def category_name(self) -> str: + """ + The category name as passed into the init. + + :rtype: str + """ return self._category_name diff --git a/spinn_front_end_common/interface/provenance/timer_work.py b/spinn_front_end_common/interface/provenance/timer_work.py index 78ffa22cb1..3c696b7a78 100644 --- a/spinn_front_end_common/interface/provenance/timer_work.py +++ b/spinn_front_end_common/interface/provenance/timer_work.py @@ -45,4 +45,9 @@ def __init__(self, __, work_name: str): @property def work_name(self) -> str: + """ + The name for this timer distinction. + + :rtype: str + """ return self._work_name diff --git a/spinn_front_end_common/interface/spinnaker.cfg b/spinn_front_end_common/interface/spinnaker.cfg index 2a28bede2d..373eac6249 100644 --- a/spinn_front_end_common/interface/spinnaker.cfg +++ b/spinn_front_end_common/interface/spinnaker.cfg @@ -202,7 +202,7 @@ minimum_auto_time_steps = 1000 [Mode] # mode = Production or Debug -# In Debug mode all report boolean config values are automitcally overwritten to True +# In Debug mode all report Boolean config values are automitcally overwritten to True mode = Production violate_1ms_wall_clock_restriction = False @@ -219,7 +219,7 @@ n_samples_per_recording_entry = 100 [Java] use_java = False -# call to start a java process. +# call to start a Java process. # If there is no jdk../bin in your class path this must include the full path java_call = java @@ -236,7 +236,7 @@ java_spinnaker_path = None # Only use this if you have not built JavaSpiNNaker java_jar_path = None -# Properties flag to be passed into every java call. +# Properties flag to be passed into every Java call. # Default logging level is info so NO properties needed java_properties = None # Each Property must start with the -D flag diff --git a/spinn_front_end_common/utilities/base_database.py b/spinn_front_end_common/utilities/base_database.py index bf941dcd8e..f0d6136acd 100644 --- a/spinn_front_end_common/utilities/base_database.py +++ b/spinn_front_end_common/utilities/base_database.py @@ -68,6 +68,11 @@ def __init__(self, database_file: Optional[str] = None, *, @classmethod def default_database_file(cls) -> str: + """ + The path to the stand place the data.sqlite3 file will be stored + + :rtype: str + """ return os.path.join(FecDataView.get_run_dir_path(), f"data{FecDataView.get_reset_str()}.sqlite3") diff --git a/spinn_front_end_common/utilities/connections/live_event_connection.py b/spinn_front_end_common/utilities/connections/live_event_connection.py index 4516ec0e1c..8ae8f063fd 100644 --- a/spinn_front_end_common/utilities/connections/live_event_connection.py +++ b/spinn_front_end_common/utilities/connections/live_event_connection.py @@ -72,7 +72,7 @@ # The expected flags from a RAW SCP packet in response _SCP_RESPONSE_FLAGS = 7 -# The expected destination cpu from a RAW SCP packet in repsonse +# The expected destination cpu from a RAW SCP packet in response _SCP_RESPONSE_DEST = 0xFF @@ -187,6 +187,11 @@ def __init__(self, live_packet_gather_label: Optional[str], self.__scp_response_received: Optional[bytes] = None def add_send_label(self, label: str): + """ + Adds a send label. + + :param str label: + """ if self.__send_labels is None: self.__send_labels = list() if label not in self.__send_labels: @@ -197,6 +202,11 @@ def add_send_label(self, label: str): self.__init_callbacks[label] = list() def add_receive_label(self, label: str): + """ + Adds a receive label is possible. + + :param str label: + """ if self.__live_packet_gather_label is None: raise ConfigurationException( "no live packet gather label given; " diff --git a/spinn_front_end_common/utilities/constants.py b/spinn_front_end_common/utilities/constants.py index a5696b491f..e6e491d138 100644 --- a/spinn_front_end_common/utilities/constants.py +++ b/spinn_front_end_common/utilities/constants.py @@ -17,6 +17,7 @@ # conversion from words to bytes BYTES_PER_WORD = 4 +# pylint: disable=invalid-name BYTES_PER_4_WORDS = 16 BYTES_PER_SHORT = 2 BYTES_PER_KB = 1024 @@ -37,7 +38,7 @@ #: the ITCM max limit for a binary MAX_POSSIBLE_BINARY_SIZE = 33 * BYTES_PER_KB -# converts between micro and milli seconds +# converts between microseconds and milliseconds MICRO_TO_MILLISECOND_CONVERSION = 1000.0 MICRO_TO_SECOND_CONVERSION = 1000000.0 # (1e6) @@ -104,7 +105,7 @@ # output buffering operations -class BUFFERING_OPERATIONS(Enum): +class BufferingOperations(Enum): """ A listing of what SpiNNaker specific EIEIO commands there are. """ diff --git a/spinn_front_end_common/utilities/database/database_connection.py b/spinn_front_end_common/utilities/database/database_connection.py index 62ee3837c5..55294efa53 100644 --- a/spinn_front_end_common/utilities/database/database_connection.py +++ b/spinn_front_end_common/utilities/database/database_connection.py @@ -75,8 +75,9 @@ def __init__( super().__init__( local_host=local_host, local_port=local_port, remote_host=None, remote_port=None) - thread = Thread(name="SpyNNakerDatabaseConnection:{}:{}".format( - self.local_ip_address, self.local_port), target=self.__run) + thread = Thread(name=f"SpyNNakerDatabaseConnection:" + f"{self.local_ip_address}:{self.local_port}", + target=self.__run) self.__database_callbacks: List[_DBCB] = list() self.__start_resume_callback = start_resume_callback_function self.__pause_and_stop_callback = stop_pause_callback_function diff --git a/spinn_front_end_common/utilities/database/database_reader.py b/spinn_front_end_common/utilities/database/database_reader.py index ecdfb37041..cf4a6ebfa3 100644 --- a/spinn_front_end_common/utilities/database/database_reader.py +++ b/spinn_front_end_common/utilities/database/database_reader.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, List, Optional, Tuple -from spinn_front_end_common.utilities.sqlite_db import SQLiteDB from spinnman.spalloc import SpallocClient, SpallocJob +from spinn_front_end_common.utilities.sqlite_db import SQLiteDB class DatabaseReader(SQLiteDB): diff --git a/spinn_front_end_common/utilities/emergency_recovery.py b/spinn_front_end_common/utilities/emergency_recovery.py index fa62454d5d..d37fa853a3 100644 --- a/spinn_front_end_common/utilities/emergency_recovery.py +++ b/spinn_front_end_common/utilities/emergency_recovery.py @@ -13,13 +13,17 @@ # limitations under the License. import logging from typing import List, Optional, Tuple + from spinn_utilities.log import FormatAdapter + from spinnman.model import ExecutableTargets, CPUInfos from spinnman.model.enums import CPUState + from pacman.model.placements import Placement + from spinn_front_end_common.abstract_models import AbstractHasAssociatedBinary -from .iobuf_extractor import IOBufExtractor from spinn_front_end_common.data import FecDataView +from .iobuf_extractor import IOBufExtractor logger = FormatAdapter(logging.getLogger(__name__)) _bad_states = frozenset((CPUState.RUN_TIME_EXCEPTION, CPUState.WATCHDOG)) diff --git a/spinn_front_end_common/utilities/helpful_functions.py b/spinn_front_end_common/utilities/helpful_functions.py index cc43af7c88..2b72cf18bc 100644 --- a/spinn_front_end_common/utilities/helpful_functions.py +++ b/spinn_front_end_common/utilities/helpful_functions.py @@ -206,7 +206,7 @@ def determine_flow_states( expected_start_states[ExecutableType.SYNC] = (CPUState.SYNC0,) expected_end_states[ExecutableType.SYNC] = (CPUState.FINISHED,) - # cores that use our sim interface + # cores that use our simulation interface elif start_type == ExecutableType.USES_SIMULATION_INTERFACE: if no_sync_changes % 2 == 0: expected_start_states[start_type] = (CPUState.SYNC0,) diff --git a/spinn_front_end_common/utilities/iobuf_extractor.py b/spinn_front_end_common/utilities/iobuf_extractor.py index 713fa77f06..80f86847bd 100644 --- a/spinn_front_end_common/utilities/iobuf_extractor.py +++ b/spinn_front_end_common/utilities/iobuf_extractor.py @@ -16,15 +16,19 @@ import logging import os import re -from typing import List, Optional, Pattern, Sequence, Set, Tuple, Union +from typing import ( + Iterable, List, Optional, Pattern, Sequence, Set, Tuple, TypeVar, Union) + +from spinn_utilities.config_holder import get_config_str_or_none from spinn_utilities.log import FormatAdapter from spinn_utilities.make_tools.replacer import Replacer from spinn_utilities.progress_bar import ProgressBar + from spinn_machine.core_subsets import CoreSubsets + from spinnman.model import ExecutableTargets from spinnman.model.enums import ExecutableType from spinnman.model.io_buffer import IOBuffer -from spinn_utilities.config_holder import get_config_str_or_none from spinn_front_end_common.data import FecDataView from spinn_front_end_common.utilities.helpful_functions import ( @@ -37,9 +41,25 @@ ENTRY_FILE = 1 ENTRY_TEXT = 2 +#: :meta private: +T = TypeVar("T") + class _DummyProgress(object): - def over(self, values): + """ + An alternative to the Progress bar so the over can be called. + """ + + def over(self, values: Iterable[T]) -> Iterable[T]: + """ + Simple wrapper for the cases where a progress bar is being used + to show progress through the iteration over a single collection. + + :param ~collections.abc.Iterable values: + The base collection (any iterable) being iterated over + :return: The passed in collection unchanged. + :rtype: ~collections.abc.Iterable + """ return values diff --git a/spinn_front_end_common/utilities/math_constants.py b/spinn_front_end_common/utilities/math_constants.py index eb14ed557f..e79d7ecb29 100644 --- a/spinn_front_end_common/utilities/math_constants.py +++ b/spinn_front_end_common/utilities/math_constants.py @@ -16,6 +16,7 @@ random math constants """ +# pylint: disable=wrong-spelling-in-comment, invalid-name # Scaling factors for FP arithmetic, see Xin Jin et al. (2008) LOG_P1 = 8.0 LOG_P2 = 16.0 diff --git a/spinn_front_end_common/utilities/notification_protocol/notification_protocol.py b/spinn_front_end_common/utilities/notification_protocol/notification_protocol.py index 95022a6b23..93d77e0cc1 100644 --- a/spinn_front_end_common/utilities/notification_protocol/notification_protocol.py +++ b/spinn_front_end_common/utilities/notification_protocol/notification_protocol.py @@ -150,7 +150,6 @@ def _send_read_notification(self) -> None: database has been written. Message also includes the path to the database """ - # noinspection PyBroadException try: self.__do_read_notify(FecDataView.get_database_file_path()) except Exception: # pylint: disable=broad-except @@ -166,7 +165,6 @@ def __do_read_notify(self, database_path: Optional[str]) -> None: "** Notifying external sources that the database is ready for " "reading **") - # noinspection PyBroadException for c in self.__database_message_connections: try: c.send_eieio_message(message) diff --git a/spinn_front_end_common/utilities/report_functions/bit_field_compressor_report.py b/spinn_front_end_common/utilities/report_functions/bit_field_compressor_report.py index 8d46d70cc0..1317a7ff22 100644 --- a/spinn_front_end_common/utilities/report_functions/bit_field_compressor_report.py +++ b/spinn_front_end_common/utilities/report_functions/bit_field_compressor_report.py @@ -194,8 +194,8 @@ def _write_report(writer: TextIO) -> BitFieldSummary: if isinstance(total_bit_fields_merged, str): writer.write(f"\nNone of the {total_to_merge} bitfields merged") else: - writer.write("\nIn total {:.2%} of the bitfields merged".format( - total_bit_fields_merged / total_to_merge)) + percent = total_bit_fields_merged / total_to_merge + writer.write(f"\nIn total {percent:.2%} of the bitfields merged") return BitFieldSummary( lowest_per_chip=min_bit_field, max_per_chip=top_bit_field, diff --git a/spinn_front_end_common/utilities/report_functions/board_chip_report.py b/spinn_front_end_common/utilities/report_functions/board_chip_report.py index 63ae4e4a4a..43fe1e1ac7 100644 --- a/spinn_front_end_common/utilities/report_functions/board_chip_report.py +++ b/spinn_front_end_common/utilities/report_functions/board_chip_report.py @@ -35,7 +35,7 @@ def board_chip_report() -> None: len(machine.ethernet_connected_chips), "Writing the board chip report") - # iterate over ethernet chips and then the chips on that board + # iterate over Ethernet chips and then the chips on that board with open(directory_name, "w", encoding="utf-8") as writer: _write_report(writer, machine, progress_bar) diff --git a/spinn_front_end_common/utilities/report_functions/drift_report.py b/spinn_front_end_common/utilities/report_functions/drift_report.py index 156cb045f2..51ba29a0d3 100644 --- a/spinn_front_end_common/utilities/report_functions/drift_report.py +++ b/spinn_front_end_common/utilities/report_functions/drift_report.py @@ -55,7 +55,7 @@ def drift_report() -> None: # create the progress bar for end users progress = ProgressBar(n_chips, "Writing clock drift report") - # iterate over ethernet chips and then the chips on that board + # iterate over Ethernet chips and then the chips on that board txrx = FecDataView.get_transceiver() with open(directory_name, "a", encoding="utf-8") as writer: if ethernet_only: diff --git a/spinn_front_end_common/utilities/report_functions/energy_report.py b/spinn_front_end_common/utilities/report_functions/energy_report.py index 6bf9285d9d..c97c09cd30 100644 --- a/spinn_front_end_common/utilities/report_functions/energy_report.py +++ b/spinn_front_end_common/utilities/report_functions/energy_report.py @@ -121,6 +121,7 @@ def _write_summary_report(cls, f: TextIO, power_used: PowerUsed): "Energy used during the data extraction process is " f"{power_used.saving_joules} Joules " f"{cls.__report_time(power_used.saving_time_secs)}\n") + # pylint: disable=consider-using-f-string f.write( "Total energy used by the simulation over {} milliseconds is:\n" " {} Joules, or\n" @@ -213,7 +214,7 @@ def _write_fpga_cost(self, power_used: PowerUsed, f: TextIO): # if not spalloc, then could be any type of board if (is_config_none("Machine", "spalloc_server") and is_config_none("Machine", "remote_spinnaker_url")): - # if a spinn2 or spinn3 (4 chip boards) then they have no fpgas + # if a spinn2 or spinn3 (4 chip boards) then they have no FPGAs if version in (2, 3): f.write( f"A SpiNN-{version} board does not contain any FPGA's, " @@ -226,14 +227,14 @@ def _write_fpga_cost(self, power_used: PowerUsed, f: TextIO): "SpiNNaker machine.") # if a spinn4 or spinn5 board, need to verify if wrap-arounds - # are there, if not then assume fpgas are turned off. + # are there, if not then assume FPGAs are turned off. if power_used.num_fpgas == 0: - # no active fpgas + # no active FPGAs f.write( f"The FPGA's on the SpiNN-{version} board are turned off " f"and therefore the energy used by the FPGA is 0\n") return - # active fpgas; fall through to shared main part report + # active FPGAs; fall through to shared main part report # print out as needed for spalloc and non-spalloc versions if version is None: diff --git a/spinn_front_end_common/utilities/report_functions/fixed_route_from_machine_report.py b/spinn_front_end_common/utilities/report_functions/fixed_route_from_machine_report.py index 5311cfe315..4121ee17d2 100644 --- a/spinn_front_end_common/utilities/report_functions/fixed_route_from_machine_report.py +++ b/spinn_front_end_common/utilities/report_functions/fixed_route_from_machine_report.py @@ -35,6 +35,7 @@ def fixed_route_from_machine_report() -> None: for chip in progress.over(machine.chips): fixed_route = transceiver.read_fixed_route( chip.x, chip.y, app_id) + # pylint: disable=consider-using-f-string f.write("{: <3s}:{: <3s} contains route {: <10s} {}\n".format( str(chip.x), str(chip.y), _reduce_route_value( diff --git a/spinn_front_end_common/utilities/report_functions/reports.py b/spinn_front_end_common/utilities/report_functions/reports.py index cd2b181be8..98fee3a083 100644 --- a/spinn_front_end_common/utilities/report_functions/reports.py +++ b/spinn_front_end_common/utilities/report_functions/reports.py @@ -16,23 +16,26 @@ import os import time from typing import Iterable, Optional, TextIO, Tuple + from spinn_utilities.ordered_set import OrderedSet from spinn_utilities.progress_bar import ProgressBar from spinn_utilities.log import FormatAdapter + from spinn_machine import Chip, MulticastRoutingEntry, Router + from pacman.model.graphs.application import ( ApplicationEdgePartition, ApplicationVertex) from pacman.model.graphs.machine import ( - MachineFPGAVertex, MachineSpiNNakerLinkVertex) + MachineFPGAVertex, MachineSpiNNakerLinkVertex, MachineVertex) from pacman.model.routing_tables import ( AbstractMulticastRoutingTable, MulticastRoutingTables) from pacman.model.routing_info import BaseKeyAndMask, RoutingInfo from pacman.utilities.algorithm_utilities.routing_algorithm_utilities import ( get_app_partitions) from pacman.utilities.algorithm_utilities.routes_format import format_route + from spinn_front_end_common.data import FecDataView from .router_summary import RouterSummary -from pacman.model.graphs.machine import MachineVertex logger = FormatAdapter(logging.getLogger(__name__)) @@ -333,15 +336,14 @@ def _write_one_vertex_application_placement( key=lambda vert: vert.vertex_slice.lo_atom) for sv in machine_vertices: if isinstance(sv, MachineSpiNNakerLinkVertex): - f.write(" Slice {} on SpiNNaker Link {}, board {}," - " linked to chip {}\n" - .format(sv.vertex_slice, sv.spinnaker_link_id, - sv.board_address, sv.linked_chip_coordinates)) + f.write(f" Slice {sv.vertex_slice} on " + f"SpiNNaker Link {sv.spinnaker_link_id}, " + f"board {sv.board_address}, " + f"linked to chip {sv.linked_chip_coordinates}\n") elif isinstance(sv, MachineFPGAVertex): - f.write(" Slice {} on FGPA {}, FPGA link {}, board {}," - " linked to chip {}\n" - .format(sv.vertex_slice, sv.fpga_id, sv.fpga_link_id, - sv.board_address, sv.linked_chip_coordinates)) + f.write(f" Slice {sv.vertex_slice} on FGPA {sv.fpga_id}, " + f"FPGA link {sv.fpga_link_id}, board {sv.board_address}, " + f"linked to chip {sv.linked_chip_coordinates}\n") else: cur_placement = FecDataView.get_placement_of_vertex(sv) x, y, p = cur_placement.x, cur_placement.y, cur_placement.p @@ -399,29 +401,27 @@ def _write_one_chip_application_placement(f: TextIO, chip: Chip): vertex_label = app_vertex.label vertex_model = app_vertex.__class__.__name__ vertex_atoms = app_vertex.n_atoms - f.write(" Processor {}: Vertex: '{}', pop size: {}\n".format( - pro_id, vertex_label, vertex_atoms)) + f.write(f" Processor {pro_id}: Vertex: '{vertex_label}', " + f"pop size: {vertex_atoms}\n") f.write(f" Slice: {vertex.vertex_slice}") f.write(f" {vertex.label}\n") - f.write(" Model: {}\n".format(vertex_model)) + f.write(f" Model: {vertex_model}\n") else: - f.write(" Processor {}: System Vertex: '{}'\n".format( - pro_id, vertex.label)) - f.write(" Model: {}\n".format( - vertex.__class__.__name__)) + f.write(f" Processor {pro_id}: System Vertex: '{vertex.label}'\n") + f.write(f" Model: {vertex.__class__.__name__}\n") sdram = vertex.sdram_required - f.write(" SDRAM required: {}; {} per timestep\n\n" - .format(sdram.fixed, sdram.per_timestep)) + f.write(f" SDRAM required: {sdram.fixed}; " + f"{sdram.per_timestep} per timestep\n\n") if total_sdram is None: total_sdram = sdram else: total_sdram += sdram if total_sdram is not None: - f.write("Total SDRAM on chip ({} available): {}; {} per-timestep\n\n" - .format(chip, total_sdram.fixed, - total_sdram.per_timestep)) + f.write(f"Total SDRAM on chip ({chip} available): " + f"{total_sdram.fixed}; {total_sdram.per_timestep} " + f"per-timestep\n\n") def sdram_usage_report_per_chip() -> None: @@ -479,9 +479,9 @@ def _sdram_usage_report_per_chip_with_timesteps( preamble=f"core ({x},{y},{p})", target=f) else: f.write( - "SDRAM reqs for core ({},{},{}) is {} KB ({} bytes) for {}\n" - "".format(x, y, p, int(core_sdram / 1024.0), core_sdram, - placement)) + f"SDRAM reqs forx core ({x},{y},{p}) is " + f"{int(core_sdram / 1024.0)} KB ({core_sdram} bytes)" + f" for {placement}\n") key = (x, y) if key not in used_sdram_by_chip: used_sdram_by_chip[key] = core_sdram @@ -492,12 +492,10 @@ def _sdram_usage_report_per_chip_with_timesteps( used_sdram = used_sdram_by_chip[chip.x, chip.y] if used_sdram: f.write( - "**** Chip: ({}, {}) has total memory usage of" - " {} KB ({} bytes) out of a max of " - "{} KB ({} bytes)\n\n".format( - chip.x, chip.y, - int(used_sdram / 1024.0), used_sdram, - int(chip.sdram / 1024.0), chip.sdram)) + f"**** Chip: ({chip.x}, {chip.y}) has total memory usage " + f"of {int(used_sdram / 1024.0)} KB ({used_sdram} bytes) " + f"out of a max of " + f"{int(chip.sdram / 1024.0)} KB ({chip.sdram} bytes)\n\n") except KeyError: # Do Nothing pass @@ -543,18 +541,18 @@ def _write_vertex_virtual_keys( rinfo = routing_infos.get_routing_info_from_pre_vertex( pre_vertex, part_id) # Might be None if the partition has no outgoing vertices e.g. a Poisson - # source replaced by SDRAM comms + # source replaced by SDRAM data passing if rinfo is not None: f.write(f"Vertex: {pre_vertex}\n") - f.write(" Partition: {}, Routing Info: {}\n".format( - part_id, rinfo.key_and_mask)) + f.write(f" Partition: {part_id}, " + f"Routing Info: {rinfo.key_and_mask}\n") for m_vertex in pre_vertex.splitter.get_out_going_vertices(part_id): r_info = routing_infos.get_routing_info_from_pre_vertex( m_vertex, part_id) if r_info is not None: - f.write(" Machine Vertex: {}, Slice: {}, Routing Info: {}\n" - .format(m_vertex, m_vertex.vertex_slice, - r_info.key_and_mask)) + f.write(f" Machine Vertex: {m_vertex}, " + f"Slice: {m_vertex.vertex_slice}, " + f"Routing Info: {r_info.key_and_mask}\n") def router_report_from_router_tables() -> None: @@ -599,19 +597,18 @@ def generate_routing_table( ~pacman.model.routing_tables.AbstractMulticastRoutingTable :param str top_level_folder: """ - file_name = "routing_table_{}_{}.rpt".format( - routing_table.x, routing_table.y) + file_name = f"routing_table_{routing_table.x}_{routing_table.y}.rpt" file_path = os.path.join(top_level_folder, file_name) try: with open(file_path, "w", encoding="utf-8") as f: - f.write("Router contains {} entries\n".format( - routing_table.number_of_entries)) + f.write( + f"Router contains {routing_table.number_of_entries} entries\n") - f.write("{: <5s} {: <10s} {: <10s} {: <10s} {: <7s} {}\n".format( - "Index", "Key", "Mask", "Route", "Default", "[Cores][Links]")) + f.write(f'{"Index": <5s} {"Key": <10s} {"Mask": <10s} ' + f'{"Route": <10s} {"Default": <7s} {"[Cores][Links]"}\n') f.write( - "{:-<5s} {:-<10s} {:-<10s} {:-<10s} {:-<7s} {:-<14s}\n".format( - "", "", "", "", "", "")) + f'{"":-<5s} {"":-<10s} {"":-<10s} ' + f'{"":-<10s} {"":-<7s} {"":-<14s}\n') line_format = "{: >5d} {}\n" entry_count = 0 @@ -731,8 +728,8 @@ def _search_route( source_placement = FecDataView.get_placement_of_vertex(source_vertex) x = source_placement.x y = source_placement.y - text = " {}:{}:{} -> ".format( - source_placement.x, source_placement.y, source_placement.p) + text = f" {source_placement.x}:{source_placement.y}:" \ + f"{source_placement.p} -> " text += _recursive_trace_to_destinations( machine[x, y], key_and_mask, pre_space=" ") diff --git a/spinn_front_end_common/utilities/report_functions/write_json_machine.py b/spinn_front_end_common/utilities/report_functions/write_json_machine.py index d0d4c693d7..8763c55e3f 100644 --- a/spinn_front_end_common/utilities/report_functions/write_json_machine.py +++ b/spinn_front_end_common/utilities/report_functions/write_json_machine.py @@ -58,7 +58,7 @@ def write_json_machine( def _progress(progress_bar: bool) -> ProgressBar: - # Steps are tojson, validate and writefile + # Steps are create json object, validate json and write json to a file if progress_bar: return ProgressBar(3, "Converting to JSON machine") else: diff --git a/spinn_front_end_common/utilities/report_functions/write_json_placements.py b/spinn_front_end_common/utilities/report_functions/write_json_placements.py index b3b768ddfe..f190c48181 100644 --- a/spinn_front_end_common/utilities/report_functions/write_json_placements.py +++ b/spinn_front_end_common/utilities/report_functions/write_json_placements.py @@ -32,7 +32,7 @@ def write_json_placements() -> None: """ file_path = os.path.join( FecDataView.get_json_dir_path(), _PLACEMENTS_FILENAME) - # Steps are tojson, validate and writefile + # Steps are create json object, validate json and write json to a file with ProgressBar(3, "Converting to JSON Placements") as progress: json_obj = placements_to_json() diff --git a/spinn_front_end_common/utilities/report_functions/write_json_routing_tables.py b/spinn_front_end_common/utilities/report_functions/write_json_routing_tables.py index 3a0df9b1e6..9652b7b3d5 100644 --- a/spinn_front_end_common/utilities/report_functions/write_json_routing_tables.py +++ b/spinn_front_end_common/utilities/report_functions/write_json_routing_tables.py @@ -32,7 +32,7 @@ def write_json_routing_tables(router_tables: MulticastRoutingTables) -> str: """ file_path = os.path.join( FecDataView.get_json_dir_path(), _ROUTING_TABLES_FILENAME) - # Steps are tojson, validate and writefile + # Steps are create json object, validate json and write json to a file with ProgressBar(3, "Converting to JSON RouterTables") as progress: json_obj = to_json(router_tables) diff --git a/spinn_front_end_common/utilities/sqlite_db.py b/spinn_front_end_common/utilities/sqlite_db.py index 9876406466..93bf4a0da5 100644 --- a/spinn_front_end_common/utilities/sqlite_db.py +++ b/spinn_front_end_common/utilities/sqlite_db.py @@ -44,7 +44,7 @@ class SQLiteDB(object): for how to do parameter binding. .. note:: - If you plan to use the WAL journaling mode for the DB, you are + If you plan to use the WAL journal mode for the DB, you are *recommended* to set this up in the DDL file via:: PRAGMA journal_mode=WAL; diff --git a/spinn_front_end_common/utilities/system_control_logic.py b/spinn_front_end_common/utilities/system_control_logic.py index 664c3bced1..3dcff8159b 100644 --- a/spinn_front_end_common/utilities/system_control_logic.py +++ b/spinn_front_end_common/utilities/system_control_logic.py @@ -36,7 +36,7 @@ def run_system_application( timeout: Optional[float] = None): """ Executes the given _system_ application. - Used for on-chip expanders, compressors, etc. + Used for on-chip expander, compressors, etc. :param ~spinnman.model.ExecutableTargets executable_cores: the cores to run the executable on. diff --git a/spinn_front_end_common/utilities/utility_objs/extra_monitor_scp_messages/get_reinjection_status_message.py b/spinn_front_end_common/utilities/utility_objs/extra_monitor_scp_messages/get_reinjection_status_message.py index 7be7fb20b0..9057e090bf 100644 --- a/spinn_front_end_common/utilities/utility_objs/extra_monitor_scp_messages/get_reinjection_status_message.py +++ b/spinn_front_end_common/utilities/utility_objs/extra_monitor_scp_messages/get_reinjection_status_message.py @@ -76,5 +76,11 @@ def read_data_bytestring(self, data: bytes, offset: int): @property def reinjection_functionality_status(self) -> ReInjectionStatus: + """ + Gets the reinjection functionality status + + :rtype: ReInjectionStatus + :raises AssertError: If not yet read + """ assert self._reinjection_status is not None, "response not yet read" return self._reinjection_status diff --git a/spinn_front_end_common/utilities/utility_objs/live_packet_gather_parameters.py b/spinn_front_end_common/utilities/utility_objs/live_packet_gather_parameters.py index 10dc4a70b8..327e9c6bdd 100644 --- a/spinn_front_end_common/utilities/utility_objs/live_packet_gather_parameters.py +++ b/spinn_front_end_common/utilities/utility_objs/live_packet_gather_parameters.py @@ -13,8 +13,8 @@ # limitations under the License. from typing import Optional from spinnman.messages.eieio import EIEIOType, EIEIOPrefix -from spinn_front_end_common.utilities.exceptions import ConfigurationException from pacman.model.resources.iptag_resource import IPtagResource +from spinn_front_end_common.utilities.exceptions import ConfigurationException _HAS_PAYLOAD = (EIEIOType.KEY_PAYLOAD_32_BIT, EIEIOType.KEY_PAYLOAD_16_BIT) _NO_PAYLOAD = (EIEIOType.KEY_32_BIT, EIEIOType.KEY_16_BIT) diff --git a/spinn_front_end_common/utilities/utility_objs/reinjection_status.py b/spinn_front_end_common/utilities/utility_objs/reinjection_status.py index 6224d606d0..299b66a953 100644 --- a/spinn_front_end_common/utilities/utility_objs/reinjection_status.py +++ b/spinn_front_end_common/utilities/utility_objs/reinjection_status.py @@ -223,10 +223,20 @@ def is_reinjecting_fixed_route(self) -> bool: @property def links_dropped_from(self) -> Sequence[int]: + """ + Ids of links where packets where dropped / reinjected + + :rtype: list(int) + """ return [ link for link in range(6) if self._link_proc_bits & (1 << link)] @property def processors_dropped_from(self) -> Sequence[int]: + """ + Ids of processors which failed to accept packets. + + :rtype: list(int) + """ return [ p for p in range(18) if self._link_proc_bits & (1 << p + 6)] diff --git a/spinn_front_end_common/utility_models/chip_power_monitor_machine_vertex.py b/spinn_front_end_common/utility_models/chip_power_monitor_machine_vertex.py index 80c9de8b0c..2ad9952b70 100644 --- a/spinn_front_end_common/utility_models/chip_power_monitor_machine_vertex.py +++ b/spinn_front_end_common/utility_models/chip_power_monitor_machine_vertex.py @@ -15,15 +15,20 @@ import math import logging from enum import IntEnum -import numpy from typing import List + +import numpy + from spinn_utilities.config_holder import get_config_int from spinn_utilities.log import FormatAdapter from spinn_utilities.overrides import overrides + from spinnman.model.enums import ExecutableType + from pacman.model.graphs.machine import MachineVertex from pacman.model.resources import AbstractSDRAM, VariableSDRAM from pacman.model.placements import Placement + from spinn_front_end_common.abstract_models import ( AbstractGeneratesDataSpecification, AbstractHasAssociatedBinary) from spinn_front_end_common.data import FecDataView @@ -126,8 +131,7 @@ def binary_file_name() -> str: @overrides(AbstractGeneratesDataSpecification.generate_data_specification) def generate_data_specification( - self, spec: DataSpecificationGenerator, - placement: Placement): # @UnusedVariable + self, spec: DataSpecificationGenerator, placement: Placement): spec.comment("\n*** Spec for ChipPowerMonitor Instance ***\n\n") # Construct the data images needed for the Neuron: diff --git a/spinn_front_end_common/utility_models/command_sender_machine_vertex.py b/spinn_front_end_common/utility_models/command_sender_machine_vertex.py index 915a204ed5..0148ce7b0e 100644 --- a/spinn_front_end_common/utility_models/command_sender_machine_vertex.py +++ b/spinn_front_end_common/utility_models/command_sender_machine_vertex.py @@ -17,13 +17,18 @@ from typing import ( Callable, Dict, Iterable, List, Sequence, Set, Tuple, Type, TypeVar, TYPE_CHECKING) + from spinn_utilities.overrides import overrides + from spinnman.model.enums import ExecutableType + +from pacman.model.graphs.abstract_edge import AbstractEdge from pacman.model.graphs import AbstractVertex from pacman.model.graphs.machine import MachineVertex, MachineEdge from pacman.model.placements import Placement from pacman.model.resources import AbstractSDRAM, ConstantSDRAM from pacman.model.routing_info import BaseKeyAndMask + from spinn_front_end_common.abstract_models import ( AbstractHasAssociatedBinary, AbstractGeneratesDataSpecification) from spinn_front_end_common.data import FecDataView @@ -34,7 +39,6 @@ from spinn_front_end_common.utilities.constants import ( SYSTEM_BYTES_REQUIREMENT, SIMULATION_N_BYTES, BYTES_PER_WORD) from spinn_front_end_common.utilities.exceptions import ConfigurationException -from pacman.model.graphs.abstract_edge import AbstractEdge from spinn_front_end_common.interface.ds import DataSpecificationGenerator if TYPE_CHECKING: from .command_sender import CommandSender @@ -62,7 +66,10 @@ class CommandSenderMachineVertex( "_vertex_to_key_map") # Regions for populations - class DATA_REGIONS(IntEnum): + class DataRegions(IntEnum): + """ + The ids for each region of the data this Population used. + """ SYSTEM_REGION = 0 COMMANDS_WITH_ARBITRARY_TIMES = 1 COMMANDS_AT_START_RESUME = 2 @@ -161,7 +168,7 @@ def get_fixed_key_and_mask(self, partition_id: str) -> BaseKeyAndMask: @property @overrides(ProvidesProvenanceDataFromMachineImpl._provenance_region_id) def _provenance_region_id(self) -> int: - return self.DATA_REGIONS.PROVENANCE_REGION + return self.DataRegions.PROVENANCE_REGION @property @overrides(ProvidesProvenanceDataFromMachineImpl._n_additional_data_items) @@ -188,7 +195,7 @@ def generate_data_specification( routing_infos = FecDataView.get_routing_infos() av = self.app_vertex assert av is not None - for mc_key in self._keys_to_partition_id.keys(): + for mc_key in self._keys_to_partition_id: allocated_mc_key = routing_infos.get_first_key_from_pre_vertex( av, self._keys_to_partition_id[mc_key]) if allocated_mc_key != mc_key: @@ -213,21 +220,21 @@ def generate_data_specification( # Write system region spec.comment("\n*** Spec for multicast source ***\n\n") - spec.switch_write_focus(self.DATA_REGIONS.SYSTEM_REGION) + spec.switch_write_focus(self.DataRegions.SYSTEM_REGION) spec.write_array(get_simulation_header_array( self.get_binary_file_name())) # write commands to spec for timed commands spec.switch_write_focus( - region=self.DATA_REGIONS.COMMANDS_WITH_ARBITRARY_TIMES) + region=self.DataRegions.COMMANDS_WITH_ARBITRARY_TIMES) self._write_timed_commands(self._timed_commands, spec) # write commands fired off during a start or resume - spec.switch_write_focus(self.DATA_REGIONS.COMMANDS_AT_START_RESUME) + spec.switch_write_focus(self.DataRegions.COMMANDS_AT_START_RESUME) self._write_basic_commands(self._commands_at_start_resume, spec) # write commands fired off during a pause or end - spec.switch_write_focus(self.DATA_REGIONS.COMMANDS_AT_STOP_PAUSE) + spec.switch_write_focus(self.DataRegions.COMMANDS_AT_STOP_PAUSE) self._write_basic_commands(self._commands_at_pause_stop, spec) # End-of-Spec: @@ -298,19 +305,19 @@ def _reserve_memory_regions( # Reserve memory: spec.reserve_memory_region( - region=self.DATA_REGIONS.SYSTEM_REGION, + region=self.DataRegions.SYSTEM_REGION, size=SIMULATION_N_BYTES, label='system') spec.reserve_memory_region( - region=self.DATA_REGIONS.COMMANDS_WITH_ARBITRARY_TIMES, + region=self.DataRegions.COMMANDS_WITH_ARBITRARY_TIMES, size=time_command_size, label='commands with arbitrary times') spec.reserve_memory_region( - region=self.DATA_REGIONS.COMMANDS_AT_START_RESUME, + region=self.DataRegions.COMMANDS_AT_START_RESUME, size=start_command_size, label='commands with start resume times') spec.reserve_memory_region( - region=self.DATA_REGIONS.COMMANDS_AT_STOP_PAUSE, + region=self.DataRegions.COMMANDS_AT_STOP_PAUSE, size=end_command_size, label='commands with stop pause times') self.reserve_provenance_data_region(spec) diff --git a/spinn_front_end_common/utility_models/data_speed_up_packet_gatherer_machine_vertex.py b/spinn_front_end_common/utility_models/data_speed_up_packet_gatherer_machine_vertex.py index 8ca8171ded..8390d60477 100644 --- a/spinn_front_end_common/utility_models/data_speed_up_packet_gatherer_machine_vertex.py +++ b/spinn_front_end_common/utility_models/data_speed_up_packet_gatherer_machine_vertex.py @@ -19,20 +19,26 @@ import struct from enum import Enum, IntEnum from typing import ( - Any, BinaryIO, Iterable, List, Optional, Set, Tuple, Union, TYPE_CHECKING) + Any, BinaryIO, Final, Iterable, List, Optional, Set, Tuple, Union, + TYPE_CHECKING) + from spinn_utilities.config_holder import get_config_bool from spinn_utilities.overrides import overrides from spinn_utilities.log import FormatAdapter from spinn_utilities.typing.coords import XY + from spinn_machine import Chip + from spinnman.exceptions import SpinnmanTimeoutException from spinnman.messages.sdp import SDPMessage, SDPHeader, SDPFlag from spinnman.model.enums import ( CPUState, ExecutableType, SDP_PORTS, UserRegister) from spinnman.connections.udp_packet_connections import SCAMPConnection + from pacman.model.graphs.machine import MachineVertex from pacman.model.resources import ConstantSDRAM, IPtagResource from pacman.model.placements import Placement + from spinn_front_end_common.data import FecDataView from spinn_front_end_common.interface.provenance import ProvenanceWriter from spinn_front_end_common.utilities.helpful_functions import ( @@ -57,7 +63,7 @@ log = FormatAdapter(logging.getLogger(__name__)) -# shift by for the destination x coord in the word. +# shift by for the destination x coordinate in the word. DEST_X_SHIFT = 16 TIMEOUT_RETRY_LIMIT = 100 @@ -70,8 +76,8 @@ SDP_RETRANSMISSION_HEADER_SIZE = 2 #: size of config region in bytes -#: 1.new seq key, 2.first data key, 3. transaction id key 4.end flag key, -# 5.base key, 6.iptag tag +#: 1.new sequence key, 2.first data key, 3. transaction id key +# 4.end flag key, 5.base key, 6.iptag tag CONFIG_SIZE = 6 * BYTES_PER_WORD #: items of data a SDP packet can hold when SCP header removed @@ -86,10 +92,10 @@ #: the size in words of the command flag COMMAND_SIZE_IN_ITEMS = 1 -#: offset for missing seq starts in first packet +#: offset for missing sequence starts in first packet WORDS_FOR_COMMAND_N_MISSING_TRANSACTION = 3 -#: offset for missing seq starts in more packet +#: offset for missing sequence starts in more packet WORDS_FOR_COMMAND_TRANSACTION = ( COMMAND_SIZE_IN_ITEMS + TRANSACTION_ID_SIZE_IN_ITEMS) @@ -120,12 +126,12 @@ WORDS_IN_FULL_PACKET_WITH_KEY * BYTES_PER_WORD) #: size of data in key space -#: x, y, key (all ints) for possible 48 chips, plus n chips to read, +#: x, y, key (all int values) for possible 48 chips, plus n chips to read, # the reinjector base key. SIZE_DATA_IN_CHIP_TO_KEY_SPACE = ((3 * 48) + 2) * BYTES_PER_WORD -class _DATA_REGIONS(IntEnum): +class _DataRegions(IntEnum): """ DSG data regions. """ @@ -134,14 +140,14 @@ class _DATA_REGIONS(IntEnum): PROVENANCE_REGION = 2 -class _PROV_LABELS(str, Enum): +class _ProvLabels(str, Enum): SENT = "Sent_SDP_Packets" RECEIVED = "Received_SDP_Packets" IN_STREAMS = "Speed_Up_Input_Streams" OUT_STREAMS = "Speed_Up_Output_Streams" -class DATA_OUT_COMMANDS(IntEnum): +class _DataOutCommands(IntEnum): """ Command IDs for the SDP packets for data out. """ @@ -151,7 +157,7 @@ class DATA_OUT_COMMANDS(IntEnum): CLEAR = 2000 -class DATA_IN_COMMANDS(IntEnum): +class _DataInCommands(IntEnum): """ Command IDs for the SDP packets for data in. """ @@ -175,7 +181,7 @@ class DATA_IN_COMMANDS(IntEnum): VERIFY_SENT_DATA = False # provenance data size -_PROVENANCE_DATA_SIZE = _FOUR_WORDS.size +_PROVENANCE_DATA_SIZE: Final = _FOUR_WORDS.size def ceildiv(dividend, divisor) -> int: @@ -187,7 +193,7 @@ def ceildiv(dividend, divisor) -> int: return int(q) + (r != 0) -# SDRAM requirement for storing missing SDP packets seq nums +# SDRAM requirement for storing missing SDP packets sequence numbers SDRAM_FOR_MISSING_SDP_SEQ_NUMS = ceildiv( 120.0 * 1024 * BYTES_PER_KB, WORDS_PER_FULL_PACKET_WITH_SEQUENCE_NUM * BYTES_PER_WORD) @@ -218,13 +224,13 @@ class DataSpeedUpPacketGatherMachineVertex( "_transaction_id", # path for the data in report "_in_report_path", - # ipaddress + # IP address "_ip_address", # store for the last reinjection status "_last_status", - # the max seq num expected given a data retrieval + # the max sequence number expected given a data retrieval "_max_seq_num", - # holder for missing seq nums for data in + # holder for missing sequence numbers for data in "_missing_seq_nums_data_in", # holder of data from out "_output", @@ -245,7 +251,8 @@ class DataSpeedUpPacketGatherMachineVertex( END_FLAG_KEY = 0xFFFFFFF6 TRANSACTION_ID_KEY = 0xFFFFFFF5 - #: to use with multicast stuff (reinjection acks have to be fixed route) + #: to use with multicast stuff + # (reinjection acknowledgements have to be fixed route) BASE_MASK = 0xFFFFFFFB NEW_SEQ_KEY_OFFSET = 1 FIRST_DATA_KEY_OFFSET = 2 @@ -269,10 +276,10 @@ class DataSpeedUpPacketGatherMachineVertex( _TIMEOUT_PER_RECEIVE_IN_SECONDS = 2 _TIMEOUT_FOR_SENDING_IN_SECONDS = 0.01 - # end flag for missing seq nums + # end flag for missing sequence numbers _MISSING_SEQ_NUMS_END_FLAG = 0xFFFFFFFF - # flag for saying missing all SEQ numbers + # flag for saying missing all sequence numbers FLAG_FOR_MISSING_ALL_SEQUENCES = 0xFFFFFFFE _ADDRESS_PACKET_BYTE_FORMAT = struct.Struct( @@ -382,7 +389,7 @@ def generate_data_specification( base_key = self.BASE_KEY transaction_id_key = self.TRANSACTION_ID_KEY - spec.switch_write_focus(_DATA_REGIONS.CONFIG) + spec.switch_write_focus(_DataRegions.CONFIG) spec.write_value(new_seq_key) spec.write_value(first_data_key) spec.write_value(transaction_id_key) @@ -398,9 +405,9 @@ def generate_data_specification( spec.write_value(iptag.tag) self._remote_tag = iptag.tag - # write mc chip key map + # write multi cast chip key map machine = FecDataView.get_machine() - spec.switch_write_focus(_DATA_REGIONS.CHIP_TO_KEY_SPACE) + spec.switch_write_focus(_DataRegions.CHIP_TO_KEY_SPACE) chip_xys_on_board = list(machine.get_existing_xys_on_board( machine[placement.xy])) @@ -441,15 +448,15 @@ def _reserve_memory_regions(self, spec: DataSpecificationGenerator): :param ~.DataSpecificationGenerator spec: spec file """ spec.reserve_memory_region( - region=_DATA_REGIONS.CONFIG, + region=_DataRegions.CONFIG, size=CONFIG_SIZE, label="config") spec.reserve_memory_region( - region=_DATA_REGIONS.CHIP_TO_KEY_SPACE, + region=_DataRegions.CHIP_TO_KEY_SPACE, size=SIZE_DATA_IN_CHIP_TO_KEY_SPACE, label="mc_key_map") spec.reserve_memory_region( - region=_DATA_REGIONS.PROVENANCE_REGION, + region=_DataRegions.PROVENANCE_REGION, size=_PROVENANCE_DATA_SIZE, label="Provenance") @overrides(AbstractHasAssociatedBinary.get_binary_file_name) @@ -664,11 +671,11 @@ def _send_data_via_extra_monitors( continue # Decide what to do with the packet - if cmd == DATA_IN_COMMANDS.RECEIVE_FINISHED: + if cmd == _DataInCommands.RECEIVE_FINISHED: received_confirmation = True break - if cmd != DATA_IN_COMMANDS.RECEIVE_MISSING_SEQ_DATA: + if cmd != _DataInCommands.RECEIVE_MISSING_SEQ_DATA: raise ValueError(f"Unknown command {cmd} received") # The currently received packet has missing sequence @@ -729,7 +736,7 @@ def _read_in_missing_seq_nums( new_seq_nums = n_word_struct(n_elements).unpack_from( data, position) - # add missing seqs accordingly + # add missing sequence numbers accordingly seen_last = False seen_all = False if new_seq_nums[-1] == self._MISSING_SEQ_NUMS_END_FLAG: @@ -759,7 +766,7 @@ def _outgoing_retransmit_missing_seq_nums( missing_seqs_as_list = list(missing) missing_seqs_as_list.sort() - # send seq data + # send sequence data for missing_seq_num in missing_seqs_as_list: message, _length = self.__make_data_in_stream_message( data_to_write, missing_seq_num, None) @@ -813,7 +820,7 @@ def __make_data_in_stream_message( # create message body packet_data = _THREE_WORDS.pack( - DATA_IN_COMMANDS.SEND_SEQ_DATA, self._transaction_id, + _DataInCommands.SEND_SEQ_DATA, self._transaction_id, seq_num) + data_to_write[position:position+packet_data_length] # return message for sending, and the length in data sent @@ -827,7 +834,7 @@ def __send_location(self, start_address: int, connection: SCAMPConnection): """ connection.send_sdp_message(self.__make_data_in_message( _FIVE_WORDS.pack( - DATA_IN_COMMANDS.SEND_DATA_TO_LOCATION, + _DataInCommands.SEND_DATA_TO_LOCATION, self._transaction_id, start_address, self._coord_word, self._max_seq_num - 1))) log.debug( @@ -840,7 +847,7 @@ def __send_tell_flag(self, connection: SCAMPConnection) -> None: """ connection.send_sdp_message(self.__make_data_in_message( _TWO_WORDS.pack( - DATA_IN_COMMANDS.SEND_TELL, self._transaction_id))) + _DataInCommands.SEND_TELL, self._transaction_id))) def _send_all_data_based_packets( self, data_to_write: bytes, start_address: int, @@ -859,14 +866,14 @@ def _send_all_data_based_packets( # send rest of data for seq_num in range(self._max_seq_num or 0): - # put in command flag and seq num + # put in command flag and sequence number message, length_to_send = self.__make_data_in_stream_message( data_to_write, seq_num, position_in_data) position_in_data += length_to_send # send the message self.__throttled_send(message, connection) - log.debug("sent seq {} of {} bytes", seq_num, length_to_send) + log.debug("sent sequence {} of {} bytes", seq_num, length_to_send) # check for end flag self.__send_tell_flag(connection) @@ -1042,7 +1049,7 @@ def get_data( # send connection.send_sdp_message(self.__make_data_out_message( placement, _FOUR_WORDS.pack( - DATA_OUT_COMMANDS.START_SENDING, transaction_id, + _DataOutCommands.START_SENDING, transaction_id, memory_address, length_in_bytes))) # receive @@ -1055,7 +1062,7 @@ def get_data( # Stop anything else getting through (and reduce traffic) connection.send_sdp_message(self.__make_data_out_message( placement, _TWO_WORDS.pack( - DATA_OUT_COMMANDS.CLEAR, transaction_id))) + _DataOutCommands.CLEAR, transaction_id))) end = float(time.time()) with ProvenanceWriter() as db: @@ -1189,7 +1196,7 @@ def _determine_and_retransmit_missing_seq_nums( lost_seq_nums.append(len(missing_seq_nums)) # for seq_num in sorted(seq_nums): - # log.debug("from list I'm missing sequence num {}", seq_num) + # log.debug("from list I'm missing sequence number {}", seq_num) if not missing_seq_nums: return True @@ -1225,7 +1232,7 @@ def _determine_and_retransmit_missing_seq_nums( # pack flag and n packets _THREE_WORDS.pack_into( - data, 0, DATA_OUT_COMMANDS.START_MISSING_SEQ, + data, 0, _DataOutCommands.START_MISSING_SEQ, transaction_id, n_packets) # update state @@ -1248,7 +1255,7 @@ def _determine_and_retransmit_missing_seq_nums( # pack flag _TWO_WORDS.pack_into( - data, offset, DATA_OUT_COMMANDS.MISSING_SEQ, + data, offset, _DataOutCommands.MISSING_SEQ, transaction_id) offset += BYTES_PER_WORD * WORDS_FOR_COMMAND_TRANSACTION length_left_in_packet -= WORDS_FOR_COMMAND_TRANSACTION @@ -1300,7 +1307,7 @@ def _process_data( is_end_of_stream = ( first_packet_element & self._LAST_MESSAGE_FLAG_BIT_MASK) != 0 - # check seq num not insane + # check sequence number not insane if seq_num > self._max_seq_num: raise ValueError( f"got an insane sequence number. got {seq_num} when " @@ -1312,7 +1319,7 @@ def _process_data( # write data - # read offset from data is at byte 8. as first 4 is seq num, + # read offset from data is at byte 8. as first 4 is sequence number, # second 4 is transaction id true_data_length = ( offset + length_of_data - BYTES_FOR_SEQ_AND_TRANSACTION_ID) @@ -1322,7 +1329,7 @@ def _process_data( offset, true_data_length, data, BYTES_FOR_SEQ_AND_TRANSACTION_ID, length_of_data) - # add seq num to list + # add sequence number to list seq_nums.add(seq_num) # if received a last flag on its own, its during retransmission. @@ -1404,7 +1411,7 @@ def __provenance_address(x: int, y: int, p: int) -> int: # Get the provenance region base address prov_region_entry_address = get_region_base_address_offset( - region_table, _DATA_REGIONS.PROVENANCE_REGION) + region_table, _DataRegions.PROVENANCE_REGION) return txrx.read_word(x, y, prov_region_entry_address) @overrides(AbstractProvidesProvenanceDataFromMachine @@ -1417,7 +1424,7 @@ def get_provenance_data_from_machine(self, placement: Placement): n_sdp_sent, n_sdp_recvd, n_in_streams, n_out_streams = ( _FOUR_WORDS.unpack_from(data)) with ProvenanceWriter() as db: - db.insert_core(x, y, p, _PROV_LABELS.SENT, n_sdp_sent) - db.insert_core(x, y, p, _PROV_LABELS.RECEIVED, n_sdp_recvd) - db.insert_core(x, y, p, _PROV_LABELS.IN_STREAMS, n_in_streams) - db.insert_core(x, y, p, _PROV_LABELS.OUT_STREAMS, n_out_streams) + db.insert_core(x, y, p, _ProvLabels.SENT, n_sdp_sent) + db.insert_core(x, y, p, _ProvLabels.RECEIVED, n_sdp_recvd) + db.insert_core(x, y, p, _ProvLabels.IN_STREAMS, n_in_streams) + db.insert_core(x, y, p, _ProvLabels.OUT_STREAMS, n_out_streams) diff --git a/spinn_front_end_common/utility_models/eieio_parameters.py b/spinn_front_end_common/utility_models/eieio_parameters.py index 1627c0a23f..060ab788ad 100644 --- a/spinn_front_end_common/utility_models/eieio_parameters.py +++ b/spinn_front_end_common/utility_models/eieio_parameters.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy from typing import List, Optional, Union from dataclasses import dataclass + +import numpy + from spinnman.messages.eieio import EIEIOPrefix from spinnman.model.enums import SDP_PORTS + _SendBufferTimes = Optional[Union[numpy.ndarray, List[numpy.ndarray]]] diff --git a/spinn_front_end_common/utility_models/extra_monitor_support_machine_vertex.py b/spinn_front_end_common/utility_models/extra_monitor_support_machine_vertex.py index db1b7d9623..66bef5e0d5 100644 --- a/spinn_front_end_common/utility_models/extra_monitor_support_machine_vertex.py +++ b/spinn_front_end_common/utility_models/extra_monitor_support_machine_vertex.py @@ -16,15 +16,21 @@ import logging import struct from typing import Dict, Iterable, Optional, ContextManager + from typing_extensions import Literal + from spinn_utilities.log import FormatAdapter from spinn_utilities.overrides import overrides from spinn_utilities.config_holder import get_config_bool + from spinn_machine import Chip, CoreSubsets, MulticastRoutingEntry, Router + from spinnman.model.enums import ExecutableType, UserRegister + from pacman.model.graphs.machine import MachineVertex from pacman.model.resources import AbstractSDRAM, ConstantSDRAM from pacman.model.placements import Placement + from spinn_front_end_common.abstract_models import ( AbstractHasAssociatedBinary, AbstractGeneratesDataSpecification) from spinn_front_end_common.data import FecDataView @@ -38,18 +44,18 @@ from spinn_front_end_common.utilities.emergency_recovery import ( emergency_recover_state_from_failure) from spinn_front_end_common.utilities.utility_objs import ReInjectionStatus -from .data_speed_up_packet_gatherer_machine_vertex import ( - DataSpeedUpPacketGatherMachineVertex as - Gatherer) from spinn_front_end_common.interface.provenance import ( AbstractProvidesProvenanceDataFromMachine, ProvenanceWriter) from spinn_front_end_common.interface.ds import DataSpecificationGenerator +from .data_speed_up_packet_gatherer_machine_vertex import ( + DataSpeedUpPacketGatherMachineVertex as Gatherer) + log = FormatAdapter(logging.getLogger(__name__)) _CONFIG_REGION_REINJECTOR_SIZE_IN_BYTES = 5 * BYTES_PER_WORD -#: 1.new seq key, 2.first data key, 3. transaction id key 4.end flag key, -# 5.base key +#: 1.new sequence key, 2.first data key, 3. transaction id key +# 4.end flag key, 5.base key _CONFIG_DATA_SPEED_UP_SIZE_IN_BYTES = 5 * BYTES_PER_WORD _CONFIG_MAX_EXTRA_SEQ_NUM_SIZE_IN_BYTES = 460 * BYTES_PER_KB _CONFIG_DATA_IN_KEYS_SDRAM_IN_BYTES = 3 * BYTES_PER_WORD @@ -57,6 +63,7 @@ _BIT_SHIFT_TO_MOVE_APP_ID = 24 _ONE_WORD = struct.Struct(" bool: @property def transaction_id(self) -> int: + """ + The current transaction id. + + :rtype: int + """ return self._transaction_id def update_transaction_id(self) -> None: @@ -271,10 +283,10 @@ def _generate_data_speed_up_out_config( :param ~.DataSpecificationGenerator spec: spec file """ spec.reserve_memory_region( - region=_DSG_REGIONS.DATA_OUT_CONFIG, + region=_DsgRegions.DATA_OUT_CONFIG, size=_CONFIG_DATA_SPEED_UP_SIZE_IN_BYTES, label="data speed-up out config region") - spec.switch_write_focus(_DSG_REGIONS.DATA_OUT_CONFIG) + spec.switch_write_focus(_DsgRegions.DATA_OUT_CONFIG) spec.write_value(Gatherer.BASE_KEY) spec.write_value(Gatherer.NEW_SEQ_KEY) spec.write_value(Gatherer.FIRST_DATA_KEY) @@ -288,11 +300,11 @@ def _generate_reinjection_config( :param ~.Chip chip: """ spec.reserve_memory_region( - region=_DSG_REGIONS.REINJECT_CONFIG, + region=_DsgRegions.REINJECT_CONFIG, size=_CONFIG_REGION_REINJECTOR_SIZE_IN_BYTES, label="re-injection config region") - spec.switch_write_focus(_DSG_REGIONS.REINJECT_CONFIG) + spec.switch_write_focus(_DsgRegions.REINJECT_CONFIG) for value in [ self._reinject_multicast, self._reinject_point_to_point, self._reinject_fixed_route, @@ -300,10 +312,10 @@ def _generate_reinjection_config( # Note that this is inverted! Why... I dunno! spec.write_value(int(not value)) - # add the reinjection mc interface + # add the reinjection multi cast interface router_timeout_keys = \ FecDataView.get_system_multicast_router_timeout_keys() - # Write the base key for multicast comms + # Write the base key for multicast communication # pylint: disable=unsubscriptable-object spec.write_value(router_timeout_keys[ chip.nearest_ethernet_x, chip.nearest_ethernet_y]) @@ -315,20 +327,20 @@ def _generate_data_speed_up_in_config( :param ~.Chip chip: the chip where this monitor will run """ spec.reserve_memory_region( - region=_DSG_REGIONS.DATA_IN_CONFIG, + region=_DsgRegions.DATA_IN_CONFIG, size=(_MAX_DATA_SIZE_FOR_DATA_IN_MULTICAST_ROUTING + _CONFIG_DATA_IN_KEYS_SDRAM_IN_BYTES), label="data speed-up in config region") - spec.switch_write_focus(_DSG_REGIONS.DATA_IN_CONFIG) + spec.switch_write_focus(_DsgRegions.DATA_IN_CONFIG) # write address key and data key mc_data_chips_to_keys = \ FecDataView.get_data_in_multicast_key_to_chip_map() # pylint: disable=unsubscriptable-object base_key = mc_data_chips_to_keys[chip.x, chip.y] - spec.write_value(base_key + _KEY_OFFSETS.ADDRESS_KEY_OFFSET) - spec.write_value(base_key + _KEY_OFFSETS.DATA_KEY_OFFSET) - spec.write_value(base_key + _KEY_OFFSETS.BOUNDARY_KEY_OFFSET) + spec.write_value(base_key + _KeyOffsets.ADDRESS_KEY_OFFSET) + spec.write_value(base_key + _KeyOffsets.DATA_KEY_OFFSET) + spec.write_value(base_key + _KeyOffsets.BOUNDARY_KEY_OFFSET) # write table entries data_in_routing_tables = \ @@ -357,7 +369,7 @@ def _generate_provenance_area(self, spec: DataSpecificationGenerator): :param ~.DataSpecificationGenerator spec: spec file """ spec.reserve_memory_region( - region=_DSG_REGIONS.PROVENANCE_AREA, size=_PROVENANCE_FORMAT.size, + region=_DsgRegions.PROVENANCE_AREA, size=_PROVENANCE_FORMAT.size, label="provenance collection region") def __provenance_address(self, place: Placement) -> int: @@ -372,7 +384,7 @@ def __provenance_address(self, place: Placement) -> int: region_table_addr = txrx.get_region_base_address( place.x, place.y, place.p) region_entry_addr = get_region_base_address_offset( - region_table_addr, _DSG_REGIONS.PROVENANCE_AREA) + region_table_addr, _DsgRegions.PROVENANCE_AREA) r = txrx.read_word(place.x, place.y, region_entry_addr) self.__prov_region = r return r @@ -389,10 +401,10 @@ def get_provenance_data_from_machine(self, placement: Placement): (n_sdp_packets, n_in_streams, n_out_streams, n_router_changes) = \ _PROVENANCE_FORMAT.unpack_from(data) with ProvenanceWriter() as db: - db.insert_monitor(x, y, _PROV_LABELS.N_CHANGES, n_router_changes) - db.insert_monitor(x, y, _PROV_LABELS.N_PACKETS, n_sdp_packets) - db.insert_monitor(x, y, _PROV_LABELS.N_IN_STREAMS, n_in_streams) - db.insert_monitor(x, y, _PROV_LABELS.N_OUT_STREAMS, n_out_streams) + db.insert_monitor(x, y, _ProvLabels.N_CHANGES, n_router_changes) + db.insert_monitor(x, y, _ProvLabels.N_PACKETS, n_sdp_packets) + db.insert_monitor(x, y, _ProvLabels.N_IN_STREAMS, n_in_streams) + db.insert_monitor(x, y, _ProvLabels.N_OUT_STREAMS, n_out_streams) def __recover(self) -> ContextManager[Placement]: """ diff --git a/spinn_front_end_common/utility_models/live_packet_gather.py b/spinn_front_end_common/utility_models/live_packet_gather.py index 6eee5576a0..948b64777f 100644 --- a/spinn_front_end_common/utility_models/live_packet_gather.py +++ b/spinn_front_end_common/utility_models/live_packet_gather.py @@ -156,4 +156,9 @@ def n_atoms(self) -> int: # type: ignore[override] @property def params(self) -> LivePacketGatherParameters: + """ + The params value passed into the init. + + :rtype: LivePacketGatherParameters + """ return self.__params diff --git a/spinn_front_end_common/utility_models/live_packet_gather_machine_vertex.py b/spinn_front_end_common/utility_models/live_packet_gather_machine_vertex.py index 2465f49d02..ad9dc69588 100644 --- a/spinn_front_end_common/utility_models/live_packet_gather_machine_vertex.py +++ b/spinn_front_end_common/utility_models/live_packet_gather_machine_vertex.py @@ -15,12 +15,17 @@ from enum import IntEnum import struct from typing import List, Optional, Sequence, Tuple, TYPE_CHECKING + from spinn_utilities.overrides import overrides from spinn_machine.tags import IPTag + +from spinnman.model.enums import ExecutableType + +from pacman.model.graphs.common.mdslice import MDSlice from pacman.model.graphs.machine import MachineVertex from pacman.model.resources import AbstractSDRAM, ConstantSDRAM, IPtagResource from pacman.model.placements import Placement -from spinnman.model.enums import ExecutableType + from spinn_front_end_common.data import FecDataView from spinn_front_end_common.interface.provenance import ( ProvidesProvenanceDataFromMachineImpl, ProvenanceWriter) @@ -31,8 +36,8 @@ from spinn_front_end_common.utilities.constants import ( SYSTEM_BYTES_REQUIREMENT, SIMULATION_N_BYTES, BYTES_PER_WORD) from spinn_front_end_common.utilities.exceptions import ConfigurationException -from pacman.model.graphs.common.mdslice import MDSlice from spinn_front_end_common.interface.ds import DataSpecificationGenerator + if TYPE_CHECKING: from spinn_front_end_common.utilities.utility_objs import ( LivePacketGatherParameters) @@ -164,8 +169,7 @@ def get_binary_start_type(self) -> ExecutableType: @overrides( AbstractGeneratesDataSpecification.generate_data_specification) def generate_data_specification( - self, spec: DataSpecificationGenerator, - placement: Placement): # @UnusedVariable + self, spec: DataSpecificationGenerator, placement: Placement): tags = FecDataView.get_tags().get_ip_tags_for_vertex(self) assert tags is not None @@ -279,4 +283,9 @@ def get_sdram_usage(cls) -> int: @property def params(self) -> LivePacketGatherParameters: + """ + The lpg_params as passed into the init. + + :rtype: LivePacketGatherParameters + """ return self._lpg_params diff --git a/spinn_front_end_common/utility_models/reverse_ip_tag_multi_cast_source.py b/spinn_front_end_common/utility_models/reverse_ip_tag_multi_cast_source.py index 269e9faffc..687bd366d5 100644 --- a/spinn_front_end_common/utility_models/reverse_ip_tag_multi_cast_source.py +++ b/spinn_front_end_common/utility_models/reverse_ip_tag_multi_cast_source.py @@ -13,22 +13,29 @@ # limitations under the License. import sys -import numpy from typing import List, Optional, Union, Tuple + +import numpy + from spinn_utilities.overrides import overrides + from spinn_machine.tags import IPTag + from spinnman.messages.eieio import EIEIOPrefix from spinnman.model.enums import SDP_PORTS + from pacman.model.partitioner_interfaces import LegacyPartitionerAPI from pacman.model.graphs.common import Slice from pacman.model.graphs.application import ApplicationVertex from pacman.model.routing_info.base_key_and_mask import BaseKeyAndMask from pacman.model.partitioner_splitters import AbstractSplitterCommon from pacman.model.resources import AbstractSDRAM -from .reverse_ip_tag_multicast_source_machine_vertex import ( - ReverseIPTagMulticastSourceMachineVertex, is_array_list) + from spinn_front_end_common.utilities.exceptions import ConfigurationException + from .eieio_parameters import EIEIOParameters +from .reverse_ip_tag_multicast_source_machine_vertex import ( + ReverseIPTagMulticastSourceMachineVertex, is_array_list) _SendBufferTimes = Optional[Union[numpy.ndarray, List[numpy.ndarray]]] @@ -188,6 +195,11 @@ def send_buffer_times(self, send_buffer_times: _SendBufferTimes): vertex.send_buffer_times = send_buffer_times_to_set def enable_recording(self, new_state: bool = True): + """ + Turns on or of the recording for this vertex. + + :param bool new_state: True if recording should be done + """ self._is_recording = new_state @overrides(LegacyPartitionerAPI.create_machine_vertex) diff --git a/spinn_front_end_common/utility_models/reverse_ip_tag_multicast_source_machine_vertex.py b/spinn_front_end_common/utility_models/reverse_ip_tag_multicast_source_machine_vertex.py index 1752e8d962..f44d4fb28a 100644 --- a/spinn_front_end_common/utility_models/reverse_ip_tag_multicast_source_machine_vertex.py +++ b/spinn_front_end_common/utility_models/reverse_ip_tag_multicast_source_machine_vertex.py @@ -11,27 +11,33 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from __future__ import annotations +from enum import IntEnum import logging import math import struct -import numpy -from numpy.typing import NDArray -from enum import IntEnum from typing import ( Collection, Dict, List, Optional, Sequence, Union, TYPE_CHECKING) + +import numpy +from numpy.typing import NDArray from typing_extensions import TypeGuard + from spinn_utilities.log import FormatAdapter from spinn_utilities.overrides import overrides + from spinnman.messages.eieio import EIEIOPrefix, EIEIOType from spinnman.messages.eieio.data_messages import EIEIODataHeader from spinnman.model.enums import ExecutableType -from pacman.model.resources import ( - ReverseIPtagResource, AbstractSDRAM, VariableSDRAM) + from pacman.model.graphs.common import Slice from pacman.model.graphs.machine import MachineVertex from pacman.model.placements import Placement +from pacman.model.resources import ( + ReverseIPtagResource, AbstractSDRAM, VariableSDRAM) from pacman.utilities.utility_calls import get_keys + from spinn_front_end_common.data import FecDataView from spinn_front_end_common.utilities.helpful_functions import ( locate_memory_region_for_placement) @@ -58,7 +64,9 @@ from spinn_front_end_common.interface.buffer_management.recording_utilities \ import (get_recording_header_array, get_recording_header_size, get_recording_data_constant_size) + from .eieio_parameters import EIEIOParameters + if TYPE_CHECKING: from typing_extensions import TypeAlias from .reverse_ip_tag_multi_cast_source import ReverseIpTagMultiCastSource @@ -73,7 +81,8 @@ # The microseconds per timestep will be divided by this for the max offset _MAX_OFFSET_DENOMINATOR = 10 -# The max offset modulo to stop spikes in simple cases moving to the next ts +# The max offset modulo to stop spikes in simple cases +# moving to the next timestep _MAX_OFFSET_MODULO = 1000 @@ -123,21 +132,21 @@ class ReverseIPTagMulticastSourceMachineVertex( "_injection_partition_id", "_virtual_key", "_mask", "_prefix", "_prefix_type", "_check_keys") - class _REGIONS(IntEnum): + class _Regions(IntEnum): SYSTEM = 0 CONFIGURATION = 1 RECORDING = 2 SEND_BUFFER = 3 PROVENANCE_REGION = 4 - class _PROVENANCE_ITEMS(IntEnum): + class _ProvenanceItems(IntEnum): N_RECEIVED_PACKETS = 0 N_SENT_PACKETS = 1 INCORRECT_KEYS = 2 INCORRECT_PACKETS = 3 LATE_PACKETS = 4 - # 13 ints (1. has prefix, 2. prefix, 3. prefix type, 4. check key flag, + # 13 int (1. has prefix, 2. prefix, 3. prefix type, 4. check key flag, # 5. has key, 6. key, 7. mask, 8. buffer space, # 9. send buffer flag before notify, 10. tag, # 11. tag destination (y, x), 12. receive SDP port, @@ -331,7 +340,7 @@ def _install_send_buffer(self, send_buffer_times: _SBT): self._send_buffer = BufferedSendingRegion() self._send_buffer_times = send_buffer_times self._send_buffers = { - self._REGIONS.SEND_BUFFER: self._send_buffer + self._Regions.SEND_BUFFER: self._send_buffer } def _clear_send_buffer(self) -> None: @@ -369,7 +378,7 @@ def _install_virtual_key(self, n_keys: int): @property @overrides(ProvidesProvenanceDataFromMachineImpl._provenance_region_id) def _provenance_region_id(self) -> int: - return self._REGIONS.PROVENANCE_REGION + return self._Regions.PROVENANCE_REGION @property @overrides(ProvidesProvenanceDataFromMachineImpl._n_additional_data_items) @@ -565,15 +574,15 @@ def _reserve_regions(self, spec: DataSpecificationGenerator): """ # Reserve system and configuration memory regions: spec.reserve_memory_region( - region=self._REGIONS.SYSTEM, + region=self._Regions.SYSTEM, size=SIMULATION_N_BYTES, label='SYSTEM') spec.reserve_memory_region( - region=self._REGIONS.CONFIGURATION, + region=self._Regions.CONFIGURATION, size=self._CONFIGURATION_REGION_SIZE, label='CONFIGURATION') # Reserve recording buffer regions if required spec.reserve_memory_region( - region=self._REGIONS.RECORDING, + region=self._Regions.RECORDING, size=get_recording_header_size(1), label="RECORDING") @@ -585,12 +594,15 @@ def _reserve_regions(self, spec: DataSpecificationGenerator): FecDataView.get_max_run_time_steps()) if self._send_buffer_size: spec.reserve_memory_region( - region=self._REGIONS.SEND_BUFFER, + region=self._Regions.SEND_BUFFER, size=self._send_buffer_size, label="SEND_BUFFER") self.reserve_provenance_data_region(spec) def update_virtual_key(self) -> None: + """ + Copy the key from the pre vertex as the virtual key if possible. + """ routing_info = FecDataView.get_routing_infos() if self._virtual_key is None: rinfo = None @@ -614,7 +626,7 @@ def _write_configuration(self, spec: DataSpecificationGenerator): """ :param ~.DataSpecificationGenerator spec: """ - spec.switch_write_focus(region=self._REGIONS.CONFIGURATION) + spec.switch_write_focus(region=self._Regions.CONFIGURATION) # Write apply_prefix and prefix and prefix_type if self._prefix is None: @@ -673,20 +685,19 @@ def _write_configuration(self, spec: DataSpecificationGenerator): @overrides(AbstractGeneratesDataSpecification.generate_data_specification) def generate_data_specification( - self, spec: DataSpecificationGenerator, - placement: Placement): # @UnusedVariable + self, spec: DataSpecificationGenerator, placement: Placement): self.update_virtual_key() # Reserve regions self._reserve_regions(spec) # Write the system region - spec.switch_write_focus(self._REGIONS.SYSTEM) + spec.switch_write_focus(self._Regions.SYSTEM) spec.write_array(get_simulation_header_array( self.get_binary_file_name())) # Write the additional recording information - spec.switch_write_focus(self._REGIONS.RECORDING) + spec.switch_write_focus(self._Regions.RECORDING) recording_size = 0 if self._is_recording: per_timestep = self._recording_sdram_per_timestep( @@ -748,7 +759,7 @@ def get_recorded_region_ids(self) -> List[int]: @overrides(AbstractReceiveBuffersToHost.get_recording_region_base_address) def get_recording_region_base_address(self, placement: Placement) -> int: return locate_memory_region_for_placement( - placement, self._REGIONS.RECORDING) + placement, self._Regions.RECORDING) @property # type: ignore[override] def send_buffers(self) -> Dict[int, BufferedSendingRegion]: @@ -790,7 +801,7 @@ def get_region_buffer_size(self, region: int) -> int: :return: Size of buffer, in bytes. :rtype: int """ - if region == self._REGIONS.SEND_BUFFER: + if region == self._Regions.SEND_BUFFER: return self._send_buffer_size return 0 diff --git a/unittests/interface/ds/test_ds.py b/unittests/interface/ds/test_ds.py index be6d0c148d..647fca73c9 100644 --- a/unittests/interface/ds/test_ds.py +++ b/unittests/interface/ds/test_ds.py @@ -130,7 +130,7 @@ def test_reserve_memory_region(self): # check reloading dsr = DataSpecificationReloader(0, 1, 2, db) - # ok to repeat serve as long as the size is the same + # OK to repeat serve as long as the size is the same dsr.reserve_memory_region(10, 123456, "different_name") # But the wrong size foes BOOM! with self.assertRaises(DataSpecException):