diff --git a/.github/workflows/python_actions.yml b/.github/workflows/python_actions.yml
index cad95681..b21cbb95 100644
--- a/.github/workflows/python_actions.yml
+++ b/.github/workflows/python_actions.yml
@@ -52,7 +52,7 @@ jobs:
tests: unittests
coverage: ${{ matrix.python-version == 3.8 }}
cover-packages: ${{ env.ROOT_PKG }}
- coveralls-token: ${{ secrets.COVERALLS_REPO_TOKEN }}
+ coveralls-token: ${{ secrets.GITHUB_TOKEN }}
- name: Lint with flake8
run: flake8 $ROOT_PKG unittests
diff --git a/doc/source/conf.py b/doc/source/conf.py
index b0b23267..ded1235d 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -47,9 +47,15 @@
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
+ 'sphinx.ext.intersphinx',
'sphinx.ext.autosummary',
]
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/3.8', None),
+ 'numpy': ("https://numpy.org/doc/stable/", None),
+}
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 1d7223c7..bc1cf724 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -15,9 +15,9 @@ Contents
--------
.. toctree::
- :maxdepth: 10
+ :maxdepth: 10
- modules
+ modules
Indices and tables
------------------
diff --git a/spinn_utilities/citation/__init__.py b/spinn_utilities/citation/__init__.py
index c23cc532..27d98246 100644
--- a/spinn_utilities/citation/__init__.py
+++ b/spinn_utilities/citation/__init__.py
@@ -14,6 +14,9 @@
# along with this program. If not, see .
from .citation_updater_and_doi_generator import CitationUpdaterAndDoiGenerator
-from .citation_aggregator import CitationAggregator
+from .citation_aggregator import (
+ CitationAggregator, generate_aggregate)
-__all__ = ["CitationUpdaterAndDoiGenerator", "CitationAggregator"]
+__all__ = [
+ "CitationAggregator", "CitationUpdaterAndDoiGenerator",
+ "generate_aggregate"]
diff --git a/spinn_utilities/citation/citation_aggregator.py b/spinn_utilities/citation/citation_aggregator.py
index 2f5fea59..964694ad 100644
--- a/spinn_utilities/citation/citation_aggregator.py
+++ b/spinn_utilities/citation/citation_aggregator.py
@@ -46,9 +46,6 @@ class CitationAggregator(object):
dependencies
"""
- def __init__(self):
- pass
-
def create_aggregated_citation_file(
self, module_to_start_at, aggregated_citation_file):
""" Entrance method for building the aggregated citation file
@@ -56,9 +53,8 @@ def create_aggregated_citation_file(
:param module_to_start_at:
the top level module to figure out its citation file for
:type module_to_start_at: python module
- :param aggregated_citation_file: file name of aggregated citation file
- :type file_path_of_aggregated_citation_file: str
- :rtype: None
+ :param str aggregated_citation_file:
+ file name of aggregated citation file
"""
# get the top citation file to add references to
@@ -128,9 +124,9 @@ def create_aggregated_citation_file(
def _read_pypi_import_map(aggregated_citation_file):
""" Read the PYPI to import name map
- :param aggregated_citation_file: file path to the PYPI map
+ :param str aggregated_citation_file: path to the PYPI map file
:return: map between PYPI names and import names
- :rtype: dict
+ :rtype: dict(str,str)
"""
pypi_to_import_map = dict()
with open(aggregated_citation_file) as f:
@@ -141,11 +137,11 @@ def _read_pypi_import_map(aggregated_citation_file):
def _handle_c_dependency(
self, top_citation_file, module, modules_seen_so_far):
- """ Handle a c code dependency
+ """ Handle a C code dependency
- :param top_citation_file: YAML file for the top citation file
+ :param str top_citation_file: YAML file for the top citation file
:param str module: module to find
- :type top_citation_file: YAML file
+ :param set(str) modules_seen_so_far:
"""
cleaned_path = self.locate_path_for_c_dependency(module)
if cleaned_path is not None:
@@ -163,6 +159,10 @@ def _handle_c_dependency(
@staticmethod
def locate_path_for_c_dependency(true_software_name):
+ """
+ :param str true_software_name:
+ :rtype: str or None
+ """
environment_path_variable = os.environ.get('PATH')
if environment_path_variable is not None:
software_paths = environment_path_variable.split(":")
@@ -185,9 +185,10 @@ def _search_for_other_c_references(
""" Go though the top level path and tries to locate other cff \
files that need to be added to the references pile
- :param reference_entry:
+ :param dict(str,list(str)) reference_entry:
The reference entry to add new dependencies as references for.
- :param software_path: the path to search in
+ :param str software_path: the path to search in
+ :param set(str) modules_seen_so_far:
"""
for possible_extra_citation_file in os.listdir(software_path):
if possible_extra_citation_file.endswith(".cff"):
@@ -206,16 +207,14 @@ def _handle_python_dependency(
module_name):
""" Handle a python dependency
- :param top_citation_file: YAML file for the top citation file
- :type top_citation_file: YAML file
+ :param dict(str,list(str)) top_citation_file:
+ YAML file for the top citation file
:param imported_module: the actual imported module
:type imported_module: ModuleType
- :param modules_seen_so_far:
+ :param set(str) modules_seen_so_far:
list of names of dependencies already processed
- :type modules_seen_so_far: list
- :param module_name: the name of this module to consider as a dependency
- :type module_name: str
- :rtype: None
+ :param str module_name:
+ the name of this module to consider as a dependency
"""
# get modules citation file
citation_level_dir = os.path.abspath(imported_module.__file__)
@@ -244,11 +243,11 @@ def _process_reference(
""" Take a module level and tries to locate and process a citation file
:param str citation_level_dir:
- the expected level where the CITATION.cff should be
+ the expected level where the ``CITATION.cff`` should be
:param imported_module: the module after being imported
:type imported_module: python module
- :param modules_seen_so_far: list of dependencies already processed
- :type modules_seen_so_far: list
+ :param set(str) modules_seen_so_far:
+ list of dependencies already processed
:return: the reference entry in JSON format
:rtype: dict
"""
@@ -281,7 +280,7 @@ def _try_to_find_version(imported_module, module_name):
""" Try to locate a version file or version data to auto-generate \
minimal citation data.
- :param imported_module:\
+ :param imported_module:
the module currently trying to find the version of
:type imported_module: python module
:return: reference entry for this python module
@@ -312,11 +311,10 @@ def _try_to_find_version(imported_module, module_name):
@staticmethod
def _read_and_process_reference_entry(dependency_citation_file_path):
- """ Read a CITATION.cff and makes it a reference for a higher level \
- citation file.
+ """ Read a ``CITATION.cff`` and makes it a reference for a higher \
+ level citation file.
- :param dependency_citation_file_path: path to a CITATION.cff file
- :type dependency_citation_file_path: str
+ :param str dependency_citation_file_path: path to a CITATION.cff file
:return: reference entry for the higher level citation.cff
:rtype: dict
"""
@@ -344,14 +342,20 @@ def _read_and_process_reference_entry(dependency_citation_file_path):
def generate_aggregate(arguments=None):
- """ Generates a single citation.cff from others
-
- :param output_path: Where to write the aggregate file
- :param top_module: the module to start aggregating the citation.cffs from
- :param doi_title: the title of the DOI
- :param zenodo_access_token: the access token for Zenodo
- :param tools_doi: the DOI of the tools
- :rtype: None
+ """ Command-line tool to generate a single ``citation.cff`` from others.
+
+ :param list(str) arguments: Command line arguments.
+
+ * ``--output_path``: \
+ Where to write the aggregate file
+ * ``--top_module``: \
+ The module to start aggregating the citation.cffs from
+ * ``--doi_title``: \
+ The title of the DOI
+ * ``--zenodo_access_token``: \
+ The access token for Zenodo
+ * ``--tools_doi``: \
+ The DOI of the tools
"""
parser = argparse.ArgumentParser(description="Aggregate Citations")
parser.add_argument("output_path", help="The file to store the result in")
diff --git a/spinn_utilities/citation/citation_updater_and_doi_generator.py b/spinn_utilities/citation/citation_updater_and_doi_generator.py
index 4c9caaee..8f9649f8 100644
--- a/spinn_utilities/citation/citation_updater_and_doi_generator.py
+++ b/spinn_utilities/citation/citation_updater_and_doi_generator.py
@@ -28,18 +28,9 @@
CITATION_AUTHOR_SURNAME = "family-names"
CITATION_FILE_DESCRIPTION = "title"
-ZENODO_DEPOSIT_GET_URL = "https://zenodo.org/api/deposit/depositions"
-ZENODO_DEPOSIT_PUT_URL = \
- 'https://zenodo.org/api/deposit/depositions/{}/files'
-ZENODO_PUBLISH_URL = \
- 'https://zenodo.org/api/deposit/depositions/{}/actions/publish'
-
ZENODO_RELATION_FIELD = "relation"
ZENODO_NEWER_VERSION_OF = 'isNewVersionOf'
ZENODO_SIBLING_OF = "cites"
-ZENODO_ACCESS_TOKEN = 'access_token'
-ZENODO_RELATED_IDENTIFIERS = 'related_identifiers'
-ZENODO_CONTENT_TYPE = "Content-Type"
ZENODO_METADATA = 'metadata'
ZENODO_PRE_RESERVED_DOI = "prereserve_doi"
ZENODO_DOI_VALUE = "doi"
@@ -54,33 +45,116 @@
AUTHOR_ORCID = "orcid"
IDENTIFIER = 'identifier'
-ZENODO_VALID_STATUS_REQUEST_GET = 200
-ZENODO_VALID_STATUS_REQUEST_POST = 201
-ZENODO_VALID_STATUS_REQUEST_PUBLISH = 202
+class _ZenodoException(Exception):
+ """ Exception from a call to Zenodo.
+ """
+
+ def __init__(self, operation, expected, request):
+ Exception.__init__(
+ self,
+ "don't know what went wrong. got wrong status code when trying "
+ "to {}. Got error code {} (when expecting {}) with response "
+ "content {}".format(
+ operation, request.status_code, expected, request.content))
+ self.request = request
+ self.expected = expected
+
+
+class _Zenodo(object):
+ """ Manages low level access to Zenodo.
+ """
+
+ # pragma: no cover
+ __slots__ = ("__zenodo_token", )
+
+ _BASE_URI = "https://zenodo.org/api"
+ _DEPOSIT_GET_URL = _BASE_URI + "/deposit/depositions"
+ _DEPOSIT_PUT_URL = _BASE_URI + "/deposit/depositions/{}/files"
+ _PUBLISH_URL = _BASE_URI + "/deposit/depositions/{}/actions/publish"
+ _CONTENT_TYPE = "Content-Type"
+ _JSON = "application/json"
+ _ACCESS_TOKEN = 'access_token'
+ _RELATED_IDENTIFIERS = 'related_identifiers'
+ _VALID_STATUS_REQUEST_GET = 200
+ _VALID_STATUS_REQUEST_POST = 201
+ _VALID_STATUS_REQUEST_PUBLISH = 202
+
+ def __init__(self, token):
+ self.__zenodo_token = token
+
+ @staticmethod
+ def _json(r):
+ try:
+ return r.json()
+ except Exception: # pylint: disable=broad-except
+ return None
+
+ def get_verify(self, related):
+ r = requests.get(
+ self._DEPOSIT_GET_URL,
+ params={self._ACCESS_TOKEN: self.__zenodo_token,
+ self._RELATED_IDENTIFIERS: related},
+ json={}, headers={self._CONTENT_TYPE: self._JSON})
+ if r.status_code != self._VALID_STATUS_REQUEST_GET:
+ raise _ZenodoException(
+ "request a DOI", self._VALID_STATUS_REQUEST_GET, r)
+ return self._json(r)
+
+ def post_create(self, related):
+ r = requests.post(
+ self._DEPOSIT_GET_URL,
+ params={self._ACCESS_TOKEN: self.__zenodo_token,
+ self._RELATED_IDENTIFIERS: related},
+ json={}, headers={self._CONTENT_TYPE: self._JSON})
+ if r.status_code != self._VALID_STATUS_REQUEST_POST:
+ raise _ZenodoException(
+ "get an empty upload", self._VALID_STATUS_REQUEST_POST, r)
+ return self._json(r)
+
+ def post_upload(self, deposit_id, data, files):
+ r = requests.post(
+ self._DEPOSIT_PUT_URL.format(deposit_id),
+ params={self._ACCESS_TOKEN: self.__zenodo_token},
+ data=data, files=files)
+ if r.status_code != self._VALID_STATUS_REQUEST_POST:
+ raise _ZenodoException(
+ "to put files and data into the preallocated DOI",
+ self._VALID_STATUS_REQUEST_POST, r)
+ return self._json(r)
+
+ def post_publish(self, deposit_id):
+ r = requests.post(
+ self._PUBLISH_URL.format(deposit_id),
+ params={self._ACCESS_TOKEN: self.__zenodo_token})
+ if r.status_code != self._VALID_STATUS_REQUEST_PUBLISH:
+ raise _ZenodoException(
+ "publish the DOI", self._VALID_STATUS_REQUEST_PUBLISH, r)
+ return self._json(r)
-class CitationUpdaterAndDoiGenerator(object):
+class CitationUpdaterAndDoiGenerator(object):
def __init__(self):
- pass
+ self.__zenodo = None
def update_citation_file_and_create_doi(
self, citation_file_path, doi_title, create_doi, publish_doi,
previous_doi, zenodo_access_token, module_path):
""" Take a CITATION.cff file and updates the version and \
- date-released fields, and rewrites the CITATION.cff file.
+ date-released fields, and rewrites the ``CITATION.cff`` file.
- :param str citation_file_path: The file path to the CITATION.cff file
+ :param str citation_file_path: File path to the ``CITATION.cff`` file
:param bool create_doi:
Whether to use Zenodo DOI interface to grab a DOI
- :param str zenodo_access_token: the access token for Zenodo
+ :param str zenodo_access_token: Access token for Zenodo
:param bool publish_doi: Whether to publish the DOI on Zenodo
- :param str previous_doi: the DOI to append the created DOI to
- :param str doi_title: the title for the created DOI
- :param str module_path: path to the module to zip up
+ :param str previous_doi: DOI to append the created DOI to
+ :param str doi_title: Title for the created DOI
+ :param str module_path: Path to the module to zip up
:param bool update_version:
Whether we should update the citation version
"""
+ self.__zenodo = _Zenodo(zenodo_access_token)
# data holders
deposit_id = None
@@ -91,8 +165,7 @@ def update_citation_file_and_create_doi(
# if creating a DOI, go and request one
if create_doi:
- doi_id, deposit_id = self._request_doi(
- zenodo_access_token, previous_doi)
+ doi_id, deposit_id = self._request_doi(previous_doi)
yaml_file[IDENTIFIER] = doi_id
# rewrite citation file with updated fields
@@ -103,16 +176,15 @@ def update_citation_file_and_create_doi(
# if creating a DOI, finish the request and possibly publish it
if create_doi:
self._finish_doi(
- deposit_id, zenodo_access_token, publish_doi, doi_title,
+ deposit_id, publish_doi, doi_title,
yaml_file[CITATION_FILE_DESCRIPTION], yaml_file, module_path)
- def _request_doi(self, zenodo_access_token, previous_doi):
+ def _request_doi(self, previous_doi):
""" Go to zenodo and requests a DOI
- :param str zenodo_access_token: zenodo access token
:param str previous_doi: the previous DOI for this module, if exists
:return: the DOI id, and deposit id
- :rtype: str, str
+ :rtype: tuple(str, str)
"""
# create link to previous version (if applicable)
@@ -122,95 +194,52 @@ def _request_doi(self, zenodo_access_token, previous_doi):
IDENTIFIER: previous_doi})
# get a request for a DOI
- request = requests.get(
- ZENODO_DEPOSIT_GET_URL,
- params={ZENODO_ACCESS_TOKEN: zenodo_access_token,
- ZENODO_RELATED_IDENTIFIERS: related},
- json={}, headers={ZENODO_CONTENT_TYPE: "application/json"})
-
- # verify the DOI is valid
- if (request.status_code !=
- ZENODO_VALID_STATUS_REQUEST_GET): # pragma: no cover
- raise Exception(
- "don't know what went wrong. got wrong status code when "
- "trying to request a DOI. Got error code {} with response "
- "content {}".format(request.status_code, request.content))
+ self.__zenodo.get_verify(related)
# get empty upload
- request = requests.post(
- ZENODO_DEPOSIT_GET_URL,
- params={ZENODO_ACCESS_TOKEN: zenodo_access_token,
- ZENODO_RELATED_IDENTIFIERS: related},
- json={}, headers={ZENODO_CONTENT_TYPE: "application/json"})
-
- # verify the DOI is valid
- if (request.status_code !=
- ZENODO_VALID_STATUS_REQUEST_POST): # pragma: no cover
- raise Exception(
- "don't know what went wrong. got wrong status code when "
- "trying to get a empty upload. Got error code {} with response"
- " content {}".format(request.status_code, request.content))
+ request_data = self.__zenodo.post_create(related)
# get DOI and deposit id
doi_id = unicodedata.normalize(
'NFKD',
- (request.json()[ZENODO_METADATA][ZENODO_PRE_RESERVED_DOI]
+ (request_data[ZENODO_METADATA][ZENODO_PRE_RESERVED_DOI]
[ZENODO_DOI_VALUE])).encode('ascii', 'ignore')
- deposition_id = request.json()[ZENODO_DEPOSIT_ID]
+ deposition_id = request_data[ZENODO_DEPOSIT_ID]
return doi_id, deposition_id
def _finish_doi(
- self, deposit_id, access_token, publish_doi, title,
+ self, deposit_id, publish_doi, title,
doi_description, yaml_file, module_path):
""" Finishes the DOI on zenodo
:param str deposit_id: the deposit id to publish
- :param str access_token: the access token needed to publish
+ :param bool publish_doi: whether we should publish the DOI
:param str title: the title of this DOI
:param str doi_description: the description for the DOI
:param yaml_file: the citation file after its been read it
- :param bool publish_doi: whether we should publish the DOI
- :param files: the zipped up file for the zenodo DOI request
:param module_path: the path to the module to DOI
"""
-
- zipped_file = self._zip_up_module(module_path)
- zipped_open_file = open(zipped_file, "rb")
- files = {ZENODO_FILE: zipped_open_file}
-
- data = self._fill_in_data(title, doi_description, yaml_file)
-
- r = requests.post(
- ZENODO_DEPOSIT_PUT_URL.format(deposit_id),
- params={ZENODO_ACCESS_TOKEN: access_token}, data=data, files=files)
- zipped_open_file.close()
- os.remove('module.zip')
-
- if (r.status_code !=
- ZENODO_VALID_STATUS_REQUEST_POST): # pragma: no cover
- raise Exception(
- "don't know what went wrong. got wrong status code when "
- "trying to put files and data into the pre allocated DOI. "
- "Got error code {} with response content {}".format(
- r.status_code, r.content))
+ zipped_file = None
+ try:
+ zipped_file = self._zip_up_module(module_path)
+ with open(zipped_file, "rb") as zipped_open_file:
+ files = {ZENODO_FILE: zipped_open_file}
+ data = self._fill_in_data(title, doi_description, yaml_file)
+ self.__zenodo.post_upload(deposit_id, data, files)
+ finally:
+ if zipped_file:
+ os.remove(zipped_file)
# publish DOI
if publish_doi:
- request = requests.post(
- ZENODO_PUBLISH_URL.format(deposit_id),
- params={ZENODO_ACCESS_TOKEN: access_token})
- if (request.status_code !=
- ZENODO_VALID_STATUS_REQUEST_PUBLISH): # pragma: no cover
- raise Exception(
- "don't know what went wrong. got wrong status code when "
- "trying to publish the DOI")
+ self.__zenodo.post_publish(deposit_id)
def _zip_up_module(self, module_path):
""" Zip up a module
- :param module_path: the path to the module to zip up
- :return: a opened reader for the zip file generated
+ :param str module_path: the path to the module to zip up
+ :return: the filename to the zip file
"""
if os.path.isfile('module.zip'):
os.remove('module.zip')
@@ -219,10 +248,9 @@ def _zip_up_module(self, module_path):
".github", "model_binaries", "common_model_binaries",
".coveragerc", ".idea"]
- module_zip_file = zipfile.ZipFile(
- 'module.zip', 'w', zipfile.ZIP_DEFLATED)
- self._zip_walker(module_path, avoids, module_zip_file)
- module_zip_file.close()
+ with zipfile.ZipFile(
+ 'module.zip', 'w', zipfile.ZIP_DEFLATED) as module_zip_file:
+ self._zip_walker(module_path, avoids, module_zip_file)
return 'module.zip'
@staticmethod
@@ -230,18 +258,16 @@ def _zip_walker(module_path, avoids, module_zip_file):
""" Traverse the module and its subdirectories and only adds to the \
files to the zip which are not within a avoid directory that.
- :param module_path: the path to start the search at
- :param avoids: the set of avoids to avoid
- :param module_zip_file: the zip file to put into
+ :param str module_path: the path to start the search at
+ :param set(str) avoids: the set of avoids to avoid
+ :param ~zipfile.ZipFile module_zip_file: the zip file to put into
"""
for directory_path, _, files in os.walk(module_path):
- avoid = False
for directory_name in directory_path.split(os.sep):
if directory_name in avoids:
- avoid = True
break
- if not avoid:
+ else:
for potential_zip_file in files:
# if safe to zip, zip
if potential_zip_file not in avoids:
@@ -254,31 +280,30 @@ def _fill_in_data(doi_title, doi_description, yaml_file):
:param str doi_title: the title of the DOI
:param str doi_description: the description of the DOI
- :param yaml_file: the citation file once read into the system
- :type yaml_file: dict
+ :param dict yaml_file: the citation file once read into the system
:return: dict containing zenodo metadata
:rtype: dict
"""
- data = dict()
- data[ZENODO_METADATA] = dict()
-
# add basic meta data
- data[ZENODO_METADATA][ZENODO_METADATA_TITLE] = doi_title
- data[ZENODO_METADATA][ZENODO_METATDATA_DESC] = doi_description
- data[ZENODO_METADATA][ZENODO_METADATA_CREATORS] = list()
+ metadata = {
+ ZENODO_METADATA_TITLE: doi_title,
+ ZENODO_METATDATA_DESC: doi_description,
+ ZENODO_METADATA_CREATORS: []
+ }
# get author data from the citation file
for author in yaml_file[CITATION_AUTHORS_TYPE]:
- author_data = dict()
- author_data[ZENODO_AUTHOR_NAME] = (
- author[CITATION_AUTHOR_SURNAME] + ", " +
- author[CITATION_AUTHOR_FIRST_NAME] + ", ")
+ author_data = {
+ ZENODO_AUTHOR_NAME: (
+ author[CITATION_AUTHOR_SURNAME] + ", " +
+ author[CITATION_AUTHOR_FIRST_NAME])
+ }
if AUTHOR_AFFILIATION in author:
author_data[AUTHOR_AFFILIATION] = author[AUTHOR_AFFILIATION]
if AUTHOR_ORCID in author:
author_data[AUTHOR_ORCID] = author[AUTHOR_ORCID]
- data[ZENODO_METADATA][ZENODO_METADATA_CREATORS].append(author_data)
- return data
+ metadata[ZENODO_METADATA_CREATORS].append(author_data)
+ return {ZENODO_METADATA: metadata}
@staticmethod
def convert_text_date_to_date(
@@ -290,6 +315,7 @@ def convert_text_date_to_date(
:param int version_year: version year
:param int version_day: version day of month
:return: the string representation for the cff file
+ :rtype: str
"""
return "{}-{}-{}".format(
version_year,
@@ -302,7 +328,7 @@ def convert_month_name_to_number(version_month):
""" Convert a python month in text form to a number form
:param version_month: the text form of the month
- :type version_month: string or int
+ :type version_month: str or int
:return: the month int value
:rtype: int
:raises: Exception when the month name is not recognised
diff --git a/spinn_utilities/make_tools/__init__.py b/spinn_utilities/make_tools/__init__.py
index 2c75a833..cbd4e480 100644
--- a/spinn_utilities/make_tools/__init__.py
+++ b/spinn_utilities/make_tools/__init__.py
@@ -12,8 +12,9 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-
+from .converter import Converter
from .file_converter import FileConverter
+from .replacer import Replacer
__all__ = [
- "FileConverter"]
+ "Converter", "FileConverter", "Replacer"]
diff --git a/spinn_utilities/make_tools/converter.py b/spinn_utilities/make_tools/converter.py
index e85d0ee4..cb68cbca 100644
--- a/spinn_utilities/make_tools/converter.py
+++ b/spinn_utilities/make_tools/converter.py
@@ -48,12 +48,9 @@ class Converter(object):
def __init__(self, src, dest, dict_file):
""" Converts a whole directory including sub directories
- :param src: Full source directory
- :type src: str
- :param dest: Full destination directory
- :type dest: str
- :param dict_file: Full path to dictionary file
- :type dict_file: str
+ :param str src: Full source directory
+ :param str dest: Full destination directory
+ :param str dict_file: Full path to dictionary file
"""
self._src = os.path.abspath(src)
if not os.path.exists(self._src):
@@ -72,15 +69,15 @@ def __init__(self, src, dest, dict_file):
self._dict = os.path.abspath(dict_file)
def run(self):
- """ Runs the file converter on a whole directory including sub \
- directories
-
- WARNING. This code is absolutely not thread safe.
- Interwoven calls even on different FileConverter objects is dangerous!
- It is highly likely that dict files become corrupted and the same
- message_id is used multiple times.
-
- :return:
+ """ Runs the file converter on a whole directory including \
+ sub-directories.
+
+ .. warning::
+ This code is absolutely not thread safe.
+ Interwoven calls even on different FileConverter objects is
+ dangerous!
+ It is highly likely that dict files become corrupted and the same
+ ``message_id`` is used multiple times.
"""
self._mkdir(self._dest)
with open(self._dict, 'w') as dict_f:
@@ -160,6 +157,8 @@ def _find_common_based_on_environ(self):
@staticmethod
def convert(src, dest, dict_file):
+ """ Wrapper function around this class.
+ """
converter = Converter(src, dest, dict_file)
converter.run()
diff --git a/spinn_utilities/make_tools/file_converter.py b/spinn_utilities/make_tools/file_converter.py
index 3d0e107f..f76590ce 100644
--- a/spinn_utilities/make_tools/file_converter.py
+++ b/spinn_utilities/make_tools/file_converter.py
@@ -12,7 +12,7 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
-
+import enum
import os
import re
import sys
@@ -28,12 +28,6 @@
r"log_((info)|(error)|(debug)|(warning))(\s)*\(")
DOUBLE_HEX = ", double_to_upper({0}), double_to_lower({0})"
-# Status values
-NORMAL_CODE = 0
-COMMENT = NORMAL_CODE + 1
-IN_LOG = COMMENT + 1
-IN_LOG_CLOSE_BRACKET = IN_LOG + 1
-
MINIS = {"log_info(": "log_mini_info(",
"log_error(": "log_mini_error(",
"log_debug(": "log_mini_debug(",
@@ -47,88 +41,99 @@
MAX_LOG_PER_FILE = 100
+class State(enum.Enum):
+ """Status values"""
+ NORMAL_CODE = 0
+ COMMENT = 1
+ IN_LOG = 2
+ IN_LOG_CLOSE_BRACKET = 3
+
+
class FileConverter(object):
__slots__ = [
- # Full destination directory
- "_dest",
- # File to hold dictionary mappings
- "_dict",
- # original c log method found
- # variable created each time a log method found
+ "dest",
+ "dict",
"_log",
- # Log methods found so far
- # variable created each time a log method found
"_log_full",
- # Number of c lines the log method takes
- # variable created each time a log method found
"_log_lines",
- # Any other stuff found before the log method but on same line
- # variable created each time a log method found
"_log_start",
- # Id for next message
"_message_id",
- # The previous state
- # variable created when a comment found
"_previous_status",
- # Full source directory
- "_src",
- # Current status of state machine
+ "src",
"_status",
- # Number of extra lines written to modified not yet recovered
- # Extra lines are caused by the header and possibly log comment
- # Extra lines are recovered by omitting blank lines
"_too_many_lines"
]
def __init__(self, src, dest, dict_file):
""" Creates the file_convertor to convert one file
- :param src: Full source directory
- :type src: str
- :param dest: Full destination directory
- :type dest: str
- :param dict_file: File to hold dictionary mappings
- :type dict_file: str
+ :param str src: Source file
+ :param str dest: Destination file
+ :param str dict_file: File to hold dictionary mappings
"""
- self._src = os.path.abspath(src)
- self._dest = os.path.abspath(dest)
- self._dict = dict_file
+ #: Full source file name
+ #:
+ #: :type: str
+ self.src = os.path.abspath(src)
+ #: Full destination file name
+ #:
+ #: :type: str
+ self.dest = os.path.abspath(dest)
+ #: File to hold dictionary mappings
+ #:
+ #: :type: str
+ self.dict = dict_file
+ #: Id for next message
+ self._message_id = None
+ #: Current status of state machine
+ self._status = None
+ #: Number of extra lines written to modified not yet recovered
+ #: Extra lines are caused by the header and possibly log comment
+ #: Extra lines are recovered by omitting blank lines
+ self._too_many_lines = None
+
+ # Variables created each time a log method found
+ #: original c log method found
self._log = None
+ #: Log methods found so far
self._log_full = None
+ #: Number of c lines the log method takes
self._log_lines = None
+ #: Any other stuff found before the log method but on same line
self._log_start = None
- self._message_id = None
+
+ # variable created when a comment found
+ #: The previous state
self._previous_status = None
- self._status = None
- self._too_many_lines = None
def _run(self, range_start):
""" Runs the file converter
- WARNING. This code is absolutely not thread safe.
- Interwoven calls even on different FileConverter objects is dangerous!
- It is highly likely that dict files become corrupted and the same
- message_id is used multiple times.
+ .. warning::
+ This code is absolutely not thread safe.
+ Interwoven calls even on different FileConverter objects is
+ dangerous if the dict files are the same!
+ It is highly likely that dict files become corrupted and the same
+ ``message_id`` is used multiple times.
-
- :param range_start: id of last dictionary key used
- :type range_start: int
+ :param int range_start: id of last dictionary key used
:return: The last message id use which can in turn be passed into
- the next FileConverter
+ the next FileConverter
+ :rtype: int
"""
self._message_id = range_start
- if not os.path.exists(self._src):
- raise Exception("Unable to locate source {}".format(self._src))
- dest_dir = os.path.dirname(os.path.realpath(self._dest))
+ if not os.path.exists(self.src):
+ raise Exception("Unable to locate source {}".format(self.src))
+ dest_dir = os.path.dirname(os.path.realpath(self.dest))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
- with open(self._src) as src_f:
- with open(self._dest, 'w') as dest_f:
+ with open(self.src) as src_f:
+ with open(self.dest, 'w') as dest_f:
dest_f.write(
"// DO NOT EDIT! THIS FILE WAS GENERATED FROM {}\n\n"
.format(self.unique_src()))
self._too_many_lines = 2
- self._status = NORMAL_CODE
+ self._status = State.NORMAL_CODE
for line_num, text in enumerate(src_f):
if self._too_many_lines > 0:
# Try to recover the lines added by do not edit
@@ -147,31 +152,33 @@ def _process_line(self, dest_f, line_num, text):
""" Process a single line
:param dest_f: Open file like Object to write modified source to
- :param line_num: Line number in the source c file
- :param text: Text of that line including whitespace
+ :param int line_num: Line number in the source c file
+ :param str text: Text of that line including whitespace
:return: True if and only if the whole line was processed
+ :rtype: bool
"""
- if self._status == COMMENT:
+ if self._status == State.COMMENT:
return self._process_line_in_comment(dest_f, text)
if "/*" in text:
return self._process_line_comment_start(dest_f, line_num, text)
- if self._status == IN_LOG:
+ if self._status == State.IN_LOG:
return self._process_line_in_log(dest_f, line_num, text)
- if self._status == IN_LOG_CLOSE_BRACKET:
+ if self._status == State.IN_LOG_CLOSE_BRACKET:
return self._process_line_in_log_close_bracket(
dest_f, line_num, text)
- assert self._status == NORMAL_CODE
+ assert self._status == State.NORMAL_CODE
return self._process_line_normal_code(dest_f, line_num, text)
def _process_line_in_comment(self, dest_f, text):
- """ Process a single line when in a multi line comment /* .. */
+ """ Process a single line when in a multi-line comment /* .. */
:param dest_f: Open file like Object to write modified source to
- :param text: Text of that line including whitespace
+ :param str text: Text of that line including whitespace
:return: True if and only if the whole line was processed
+ :rtype: bool
"""
if "*/" in text:
stripped = text.strip()
@@ -179,7 +186,7 @@ def _process_line_in_comment(self, dest_f, text):
if match.end(0) == len(stripped):
# OK Comment until end of line
dest_f.write(text)
- self._status = NORMAL_CODE
+ self._status = State.NORMAL_CODE
return True
return False # Stuff after comment so check by character
# Whole line in comment without end
@@ -189,17 +196,18 @@ def _process_line_in_comment(self, dest_f, text):
def _process_line_comment_start(self, dest_f, line_num, text):
""" Processes a line known assumed to contain a /* but not know where
- There is also the assumption that the start status is not COMMENT
+ There is also the assumption that the start status is not ``COMMENT``.
:param dest_f: Open file like Object to write modified source to
- :param line_num: Line number in the source c file
- :param text: Text of that line including whitespace
+ :param int line_num: Line number in the source c file
+ :param str text: Text of that line including whitespace
:return: True if and only if the whole line was processed
+ :rtype: bool
"""
stripped = text.strip()
if stripped.startswith("/*"):
self._previous_status = self._status
- self._status = COMMENT
+ self._status = State.COMMENT
# Comment start so now check for comment end
return self._process_line(dest_f, line_num, text)
# Stuff before comment so check by char
@@ -209,9 +217,10 @@ def _process_line_in_log(self, dest_f, line_num, text):
""" Process a line when the status is a log call has been started
:param dest_f: Open file like Object to write modified source to
- :param line_num: Line number in the source c file
- :param text: Text of that line including whitespace
+ :param int line_num: Line number in the source c file
+ :param str text: Text of that line including whitespace
:return: True if and only if the whole line was processed
+ :rtype: bool
"""
stripped = text.strip()
if stripped.startswith("//"):
@@ -223,7 +232,7 @@ def _process_line_in_log(self, dest_f, line_num, text):
if not match:
if stripped[-1:] == ")":
# possible start of end
- self._status = IN_LOG_CLOSE_BRACKET
+ self._status = State.IN_LOG_CLOSE_BRACKET
self._log_full += stripped
self._log_lines += 1
return True
@@ -234,16 +243,17 @@ def _process_line_in_log(self, dest_f, line_num, text):
self._log_lines += 1
self._log_full += stripped
self._write_log_method(dest_f, line_num)
- self._status = NORMAL_CODE
+ self._status = State.NORMAL_CODE
return True
def _process_line_in_log_close_bracket(self, dest_f, line_num, text):
""" Process where the last log line has the ) but not the ;
:param dest_f: Open file like Object to write modified source to
- :param line_num: Line number in the source c file
- :param text: Text of that line including whitespace
+ :param int line_num: Line number in the source c file
+ :param str text: Text of that line including whitespace
:return: True if and only if the whole line was processed
+ :rtype: bool
"""
stripped = text.strip()
if len(stripped) == 0:
@@ -254,7 +264,7 @@ def _process_line_in_log_close_bracket(self, dest_f, line_num, text):
self._log_full += (";")
self._log_lines += 1
self._write_log_method(dest_f, line_num)
- self._status = NORMAL_CODE
+ self._status = State.NORMAL_CODE
return True
else:
return False
@@ -266,16 +276,17 @@ def _process_line_in_log_close_bracket(self, dest_f, line_num, text):
else:
# so not a closing bracket so set status back
- self._status = IN_LOG
+ self._status = State.IN_LOG
return self._process_line_in_log(dest_f, line_num, text)
def _process_line_normal_code(self, dest_f, line_num, text):
""" Process a line where the status is normal code
:param dest_f: Open file like Object to write modified source to
- :param line_num: Line number in the source c file
- :param text: Text of that line including whitespace
+ :param int line_num: Line number in the source c file
+ :param str text: Text of that line including whitespace
:return: True if and only if the whole line was processed
+ :rtype: bool
"""
stripped = text.strip()
match = LOG_START_REGEX.search(stripped)
@@ -301,19 +312,35 @@ def _process_line_normal_code(self, dest_f, line_num, text):
self._log = "".join(match.group(0).split())
start_len = self._log_start + len(self._log)
- self._status = IN_LOG
+ self._status = State.IN_LOG
self._log_full = "" # text saved in process_line_in_log
self._log_lines = 0
# Now check for the end of log command
return self._process_line_in_log(dest_f, line_num, text[start_len:])
def quote_part(self, text):
+ """ Net count of double quotes in line.
+
+ :param str text:
+ :rtype: int
+ """
return (text.count('"') - text.count('\\"')) % 2 > 0
def bracket_count(self, text):
+ """ Net count of open brackets in line.
+
+ :param str text:
+ :rtype: int
+ """
return (text.count('(') - text.count(')'))
def split_by_comma_plus(self, main, line_num):
+ """ split line by comma and partially parse
+
+ :param str main:
+ :param int line_num:
+ :rtype: list(str)
+ """
try:
parts = main.split(",")
for i, part in enumerate(parts):
@@ -353,24 +380,25 @@ def split_by_comma_plus(self, main, line_num):
parts[0] = parts[0][1:-1]
return parts
- except Exception:
+ except Exception as e:
raise Exception("Unexpected line {} at {} in {}".format(
- self._log_full, line_num, self._src))
+ self._log_full, line_num, self.src)) from e
def _short_log(self, line_num):
""" shortens the log string message and adds the id
- Assumes that self._message_id has already been updated
+ Assumes that ``self._message_id`` has already been updated
- :param original: Source log messages
- :return: new log message and the id
+ :param int line_num: Current line number
+ :return: new log message parts
+ :rtype: tuple(str,str)
"""
try:
match = LOG_END_REGEX.search(self._log_full)
main = self._log_full[:-len(match.group(0))]
- except Exception:
+ except Exception as e:
raise Exception("Unexpected line {} at {} in {}".format(
- self._log_full, line_num, self._src))
+ self._log_full, line_num, self.src)) from e
parts = self.split_by_comma_plus(main, line_num)
original = parts[0]
count = original.count("%") - original.count("%%") * 2
@@ -387,11 +415,11 @@ def _short_log(self, line_num):
if len(parts) < count + 1:
raise Exception(
"Too few parameters in line {} at {} in {}".format(
- self._log_full, line_num, self._src))
+ self._log_full, line_num, self.src))
if len(parts) > count + 1:
raise Exception(
"Too many parameters in line {} at {} in {}".format(
- self._log_full, line_num, self._src))
+ self._log_full, line_num, self.src))
for i, match in enumerate(matches):
front += TOKEN
if match.endswith("f"):
@@ -424,8 +452,8 @@ def _write_log_method(self, dest_f, line_num, tail=""):
- original message
:param dest_f: Open file like Object to write modified source to
- :param line_num: Line number in the source c file
- :param text: Text of that line including whitespace
+ :param int line_num: Line number in the source c file
+ :param str text: Text of that line including whitespace
"""
self._message_id += 1
self._log_full = self._log_full.replace('""', '')
@@ -453,12 +481,12 @@ def _write_log_method(self, dest_f, line_num, tail=""):
dest_f.write(self._log_full)
dest_f.write("*/")
dest_f.write(end * (self._log_lines - 1))
- with open(self._dict, 'a') as mess_f:
+ with open(self.dict, 'a') as mess_f:
# Remove commas from filenames for csv format
# Remove start and end quotes from original
mess_f.write("{},{} ({}: {}): ,{}\n".format(
self._message_id, LEVELS[self._log],
- os.path.basename(self._src).replace(",", ";"),
+ os.path.basename(self.src).replace(",", ";"),
line_num + 1,
original))
@@ -466,13 +494,13 @@ def _process_chars(self, dest_f, line_num, text):
""" Deals with complex lines that can not be handled in one go
:param dest_f: Open file like Object to write modified source to
- :param line_num: Line number in the source c file
- :param text: Text of that line including whitespace
+ :param int line_num: Line number in the source c file
+ :param str text: Text of that line including whitespace
"""
pos = 0
write_flag = 0
while text[pos] != "\n":
- if self._status == COMMENT:
+ if self._status == State.COMMENT:
if text[pos] == "*" and text[pos+1] == "/":
dest_f.write(text[write_flag:pos + 2])
pos = pos + 2
@@ -482,19 +510,19 @@ def _process_chars(self, dest_f, line_num, text):
pos = pos + 1
elif text[pos] == "/":
if text[pos+1] == "*":
- if self._status == IN_LOG:
+ if self._status == State.IN_LOG:
self._log_full += text[write_flag:pos].strip()
if self._log_full[-1] == ")":
- self._status = IN_LOG_CLOSE_BRACKET
+ self._status = State.IN_LOG_CLOSE_BRACKET
# NO change to self._log_lines as newline not removed
else:
dest_f.write(text[write_flag:pos])
write_flag = pos
pos = pos + 2 # leave the /* as not written
self._previous_status = self._status
- self._status = COMMENT
+ self._status = State.COMMENT
elif text[pos+1] == "/":
- if self._status == IN_LOG:
+ if self._status == State.IN_LOG:
self._log_full += text[write_flag:pos].strip()
# NO change to self._log_lines as newline not removed
dest_f.write(text[pos:])
@@ -510,12 +538,12 @@ def _process_chars(self, dest_f, line_num, text):
if text[str_pos] == "\n":
raise Exception(
"Unclosed string literal in {} at line: {}".
- format(self._src, line_num))
+ format(self.src, line_num))
elif text[str_pos] == "\\":
if text[str_pos+1] == "\n":
raise Exception(
"Unclosed string literal in {} at line: {}".
- format(self._src, line_num))
+ format(self.src, line_num))
else:
str_pos += 2 # ignore next char which may be a "
@@ -524,14 +552,14 @@ def _process_chars(self, dest_f, line_num, text):
pos = str_pos + 1
continue
- elif self._status == IN_LOG:
+ elif self._status == State.IN_LOG:
if text[pos] == ")":
match = LOG_END_REGEX.match(text[pos:])
if match:
# include the end
pos = pos + len(match.group(0))
self._log_full += text[write_flag:pos].strip()
- self._status = NORMAL_CODE
+ self._status = State.NORMAL_CODE
if text[pos:].strip(): # Stuff left
write_flag = pos
# self._log_lines not changed as no newline
@@ -551,24 +579,24 @@ def _process_chars(self, dest_f, line_num, text):
else:
pos += 1
- elif self._status == IN_LOG_CLOSE_BRACKET:
+ elif self._status == State.IN_LOG_CLOSE_BRACKET:
stripped = text.strip()
if stripped[0] == ";":
self._log_full += (";")
self._write_log_method(dest_f, line_num)
pos = text.index(";") + 1
write_flag = pos
- self._status = NORMAL_CODE
+ self._status = State.NORMAL_CODE
else:
# Save the ) as not part of the end
- self._status = IN_LOG
+ self._status = State.IN_LOG
elif text[pos] == "l":
match = LOG_START_REGEX.match(text[pos:])
if match:
self._log_start = text.index(match.group(0))
self._log = "".join(match.group(0).split())
- self._status = IN_LOG
+ self._status = State.IN_LOG
self._log_full = "" # text saved after while
self._log_lines = 0
dest_f.write(text[write_flag:pos])
@@ -584,45 +612,45 @@ def _process_chars(self, dest_f, line_num, text):
pos += 1
# after while text[pos] != "\n"
- if self._status == IN_LOG:
+ if self._status == State.IN_LOG:
self._log_full += text[write_flag:].strip()
self._log_lines += 1
else:
dest_f.write(text[write_flag:])
def unique_src(self):
- """ Returns the part of the source path which is different
+ """ Returns the suffix of the source and destination paths which is\
+ the same.
- For example assuming a source of
- /spinnaker/sPyNNaker/neural_modelling/src/common/in_spikes.h
- /spinnaker/sPyNNaker/neural_modelling/modified_src/common/in_spikes.h
- returns src/common/in_spikes.h
+ For example, assuming sources of
+ ``/spinnaker/sPyNNaker/neural_modelling/src/common/in_spikes.h``
+ ``/spinnaker/sPyNNaker/neural_modelling/modified_src/common/in_spikes.h``
+ this returns ``src/common/in_spikes.h``
:return: A pointer to the source relative to the destination
+ :rtype: str
"""
pos = 0
last_sep = 0
- while pos < len(self._src) and pos < len(self._dest) \
- and self._src[pos] == self._dest[pos]:
- if self._src[pos] == os.path.sep:
+ while pos < len(self.src) and pos < len(self.dest) \
+ and self.src[pos] == self.dest[pos]:
+ if self.src[pos] == os.path.sep:
last_sep = pos + 1
pos += 1
- return self._src[last_sep:]
+ return self.src[last_sep:]
@staticmethod
- def convert(src, dest, dict_file, range_start):
+ def convert(src, dest, dict_file, range_start=1):
""" Static method to create Object and do the conversion
- :param src: Full source directory
- :type src: str
- :param dest: Full destination directory
- :type dest: str
- :param dict_file: File to hold dictionary mappings
- :type dict_file: str
- :param range_start:
- :param range_start: id of last dictionary key used
- :type range_start: int
- :return: The last message id use which can in turn be passed into
+ :param str src: Source file
+ :param str dest: Destination file
+ :param str dict_file: File to hold dictionary mappings
+ :param int range_start: id of last dictionary key used
+ :return: The last message id use which can in turn be passed into this
+ method again (``range_start``) to get contiguous non-overlapping
+ IDs across many files.
+ :rtype: int
"""
converter = FileConverter(src, dest, dict_file)
return converter._run(range_start) # pylint: disable=protected-access
diff --git a/spinn_utilities/make_tools/replacer.py b/spinn_utilities/make_tools/replacer.py
index a5933bad..a9df6ef9 100644
--- a/spinn_utilities/make_tools/replacer.py
+++ b/spinn_utilities/make_tools/replacer.py
@@ -24,8 +24,17 @@
class Replacer(object):
+ """ Performs replacements.
+ """
+
+ _INT_FMT = struct.Struct("!I")
+ _FLT_FMT = struct.Struct("!f")
+ _DBL_FMT = struct.Struct("!d")
def __init__(self, dict_pointer):
+ """
+ :param str dict_pointer: Where to find the dictionary file
+ """
self._messages = {}
rest, _ = os.path.splitext(dict_pointer)
dict_path = rest + ".dict"
@@ -39,10 +48,16 @@ def __init__(self, dict_pointer):
continue
self._messages[parts[0]] = parts
else:
- logger.error("Unable to find a dictionary file at {}"
- .format(dict_path))
+ logger.error("Unable to find a dictionary file at {}".format(
+ dict_path))
def replace(self, short):
+ """ Apply the replacements to a short message.
+
+ :param str short: The short message to apply the transform to.
+ :return: The expanded message.
+ :rtype: str
+ """
parts = short.split(TOKEN)
if not parts[0].isdigit():
return short
@@ -60,9 +75,9 @@ def replace(self, short):
for match in matches:
i += 1
if match.endswith("f"):
- replacement = str(self.hex_to_float(parts[i]))
+ replacement = str(self._hex_to_float(parts[i]))
elif match.endswith("F"):
- replacement = str(self.hexes_to_double(
+ replacement = str(self._hexes_to_double(
parts[i], parts[i+1]))
i += 1
else:
@@ -73,11 +88,11 @@ def replace(self, short):
return preface + replaced
- def hex_to_float(self, hex_str):
- return struct.unpack('!f', struct.pack("!I", int(hex_str, 16)))[0]
+ def _hex_to_float(self, hex_str):
+ return self._FLT_FMT.unpack(
+ self._INT_FMT.pack(int(hex_str, 16)))[0]
- def hexes_to_double(self, upper, lower):
- return struct.unpack(
- '!d',
- struct.pack("!I", int(upper, 16)) +
- struct.pack("!I", int(lower, 16)))[0]
+ def _hexes_to_double(self, upper, lower):
+ return self._DBL_FMT.unpack(
+ self._INT_FMT.pack(int(upper, 16)) +
+ self._INT_FMT.pack(int(lower, 16)))[0]
diff --git a/spinn_utilities/ranged/ranged_list.py b/spinn_utilities/ranged/ranged_list.py
index 5b8eaf8e..a412409d 100644
--- a/spinn_utilities/ranged/ranged_list.py
+++ b/spinn_utilities/ranged/ranged_list.py
@@ -26,13 +26,15 @@ def function_iterator(function, size, ids=None):
list(function_iterator(lambda x: x * 2 , 3, ids=[2, 4, 6]))
- :param function: A function with one integer parameter that returns a value
- :param size: The number of elements to put in the list. If used, the
- function will be called with ``range(size)``. Ignored if ``ids``
- provided
- :param ids: A list of IDs to call the function for or None to use the size.
- :type ids: list of int
- :return: a list of values
+ :param ~collections.abc.Callable[[int],object] function:
+ A function with one integer parameter that returns a value
+ :param int size:
+ The number of elements to put in the list. If used, the function will
+ be called with ``range(size)``. Ignored if ``ids`` provided
+ :param ~collections.abc.Iterable(int) ids:
+ A list of IDs to call the function for or ``None`` to use the size.
+ :return: a sequence of values returned by the function
+ :rtype: ~collections.abc.Iterable(object)
"""
if ids is None:
ids = range(size)
@@ -50,11 +52,15 @@ class RangedList(AbstractList):
def __init__(
self, size=None, value=None, key=None, use_list_as_value=False):
"""
- :param size: Fixed length of the list
+ :param size:
+ Fixed length of the list;
+ if ``None``, the value must be a sized object.
+ :type size: int or None
:param value: value to given to all elements in the list
+ :type value: object or ~collections.abc.Sized
:param key: The dict key this list covers.
This is used only for better Exception messages
- :param use_list_as_value: True if the value *is* a list
+ :param bool use_list_as_value: True if the value *is* a list
"""
if size is None:
try:
@@ -308,10 +314,8 @@ def set_value_by_id(self, id, value): # @ReservedAssignment
Use ``set`` or ``__set__`` for slices, tuples, lists and negative
indexes.
- :param id: Single ID
- :type id: int
- :param value: The value to save
- :type value: anything
+ :param int id: Single ID
+ :param object value: The value to save
"""
self._check_id_in_range(id)
@@ -369,16 +373,13 @@ def set_value_by_slice(
""" Sets the value for a single range to the new value.
.. note::
- This method only works for a single positive int ID.
+ This method only works for a single positive range.
Use ``set`` or ``__set__`` for slices, tuples, lists and negative
indexes.
- :param slice_start: Start of the range
- :type slice_start: int
- :param slice_stop: Exclusive end of the range
- :type slice_stop: int
- :param value: The value to save
- :type value: anything
+ :param int slice_start: Start of the range
+ :param int slice_stop: Exclusive end of the range
+ :param object value: The value to save
"""
slice_start, slice_stop = self._check_slice_in_range(
slice_start, slice_stop)
@@ -468,8 +469,9 @@ def set_value_by_ids(self, ids, value, use_list_as_value=False):
def set_value_by_selector(self, selector, value, use_list_as_value=False):
""" Support for the ``list[x] =`` format.
- :param id: A single ID, a slice of IDs or a list of IDs
- :param value:
+ :param selector: A single ID, a slice of IDs or a list of IDs
+ :type selector: int or slice or list(int)
+ :param object value:
"""
if selector is None:
@@ -494,7 +496,7 @@ def get_ranges(self):
.. note::
As this is a copy it will not reflect any updates.
- :return:
+ :rtype: list(tuple(int,int,object))
"""
if self._ranged_based:
return list(self._ranges)
@@ -506,7 +508,7 @@ def set_default(self, default):
.. note::
Does not change the value of any element in the list.
- :param default: new default value
+ :param object default: new default value
"""
self._default = default
@@ -515,6 +517,7 @@ def get_default(self):
""" Returns the default value for this list.
:return: Default Value
+ :rtype: object
"""
try:
return self._default
@@ -528,7 +531,7 @@ def copy_into(self, other):
Depth is just enough so that any changes done through the RangedList
API on other will not change self
- :param RangedList; Another Ranged List to copy the values from
+ :param RangedList other: Another Ranged List to copy the values from
"""
# Assume the _default and key remain unchanged
self._ranged_based = other.range_based()
@@ -541,12 +544,13 @@ def copy_into(self, other):
def copy(self):
"""
- Turns this List into a copy of the other
+ Creates a copy of this list.
Depth is just enough so that any changes done through the RangedList
API on other will not change self
- :param RangedList; Another Ranged List to copy the values from
+ :return: The copy
+ :rtype: RangedList
"""
clone = RangedList(self._size, self._default, self._key)
clone.copy_into(self)
diff --git a/unittests/citation/test_citation.py b/unittests/citation/test_citation.py
index e7645d57..4ab3e879 100644
--- a/unittests/citation/test_citation.py
+++ b/unittests/citation/test_citation.py
@@ -19,9 +19,7 @@
import yaml
import httpretty
from spinn_utilities.citation.citation_updater_and_doi_generator import (
- ZENODO_DEPOSIT_GET_URL, ZENODO_VALID_STATUS_REQUEST_GET,
- ZENODO_VALID_STATUS_REQUEST_POST, ZENODO_PUBLISH_URL,
- ZENODO_VALID_STATUS_REQUEST_PUBLISH, ZENODO_DEPOSIT_PUT_URL)
+ _Zenodo as Zenodo)
def test_generate_aggregate():
@@ -29,19 +27,19 @@ def test_generate_aggregate():
f.write("test")
deposit_id = 56789
httpretty.register_uri(
- httpretty.GET, ZENODO_DEPOSIT_GET_URL,
- status=ZENODO_VALID_STATUS_REQUEST_GET)
+ httpretty.GET, Zenodo._DEPOSIT_GET_URL,
+ status=Zenodo._VALID_STATUS_REQUEST_GET)
httpretty.register_uri(
- httpretty.POST, ZENODO_DEPOSIT_GET_URL,
- status=ZENODO_VALID_STATUS_REQUEST_POST,
+ httpretty.POST, Zenodo._DEPOSIT_GET_URL,
+ status=Zenodo._VALID_STATUS_REQUEST_POST,
body=('{{"id": "{}", "metadata": {{'
'"prereserve_doi": {{"doi": "12345"}}}}}}'.format(deposit_id)))
httpretty.register_uri(
- httpretty.POST, ZENODO_DEPOSIT_PUT_URL.format(deposit_id),
- status=ZENODO_VALID_STATUS_REQUEST_POST)
+ httpretty.POST, Zenodo._DEPOSIT_PUT_URL.format(deposit_id),
+ status=Zenodo._VALID_STATUS_REQUEST_POST)
httpretty.register_uri(
- httpretty.POST, ZENODO_PUBLISH_URL.format(deposit_id),
- status=ZENODO_VALID_STATUS_REQUEST_PUBLISH)
+ httpretty.POST, Zenodo._PUBLISH_URL.format(deposit_id),
+ status=Zenodo._VALID_STATUS_REQUEST_PUBLISH)
output_path = tempfile.mktemp(".cff")
os.environ["PATH"] = os.environ["PATH"] + os.pathsep + (
os.path.dirname(__file__) + os.sep + "c_module")
diff --git a/unittests/make_tools/.gitignore b/unittests/make_tools/.gitignore
index 4a69e96b..1e66dd11 100644
--- a/unittests/make_tools/.gitignore
+++ b/unittests/make_tools/.gitignore
@@ -1,3 +1,4 @@
test_ranges.txt
/modified_src/
/log.ranges
+foo
diff --git a/unittests/make_tools/test_replacer.py b/unittests/make_tools/test_replacer.py
index e7f4ce70..d22e75ea 100644
--- a/unittests/make_tools/test_replacer.py
+++ b/unittests/make_tools/test_replacer.py
@@ -79,24 +79,24 @@ def test_hex_to_float(self):
"""
replacer = Replacer(os.path.join(PATH, "test"))
assert self.near_equals(
- -345443332234.13432143, replacer.hex_to_float("d2a0dc0e"))
+ -345443332234.13432143, replacer._hex_to_float("d2a0dc0e"))
assert self.near_equals(
- -2000, replacer.hex_to_float("c4fa0000"))
+ -2000, replacer._hex_to_float("c4fa0000"))
assert self.near_equals(
- -1, replacer.hex_to_float("bf800000"))
+ -1, replacer._hex_to_float("bf800000"))
assert self.near_equals(
- 0, replacer.hex_to_float("0"))
+ 0, replacer._hex_to_float("0"))
assert self.near_equals(
- 0.00014, replacer.hex_to_float("3912ccf7"))
+ 0.00014, replacer._hex_to_float("3912ccf7"))
assert self.near_equals(
- 1, replacer.hex_to_float("3f800000"))
+ 1, replacer._hex_to_float("3f800000"))
assert self.near_equals(
- 200, replacer.hex_to_float("43480000"))
+ 200, replacer._hex_to_float("43480000"))
assert self.near_equals(
- 455424364531.3463460, replacer.hex_to_float("52d412d1"))
- assert float("Inf") == replacer.hex_to_float("7f800000")
- assert 0-float("Inf") == replacer.hex_to_float("ff800000")
- assert math.isnan(replacer.hex_to_float("7fc00000"))
+ 455424364531.3463460, replacer._hex_to_float("52d412d1"))
+ assert float("Inf") == replacer._hex_to_float("7f800000")
+ assert 0-float("Inf") == replacer._hex_to_float("ff800000")
+ assert math.isnan(replacer._hex_to_float("7fc00000"))
def test_hexes_to_double(self):
"""
@@ -105,18 +105,18 @@ def test_hexes_to_double(self):
"""
replacer = Replacer(os.path.join(PATH, "test"))
assert self.near_equals(
- 0, replacer.hexes_to_double("0", "0"))
+ 0, replacer._hexes_to_double("0", "0"))
assert self.near_equals(
455424364531.3463460,
- replacer.hexes_to_double("425a825a", "13fcd62b"))
+ replacer._hexes_to_double("425a825a", "13fcd62b"))
assert self.near_equals(
-455424364531.3463460,
- replacer.hexes_to_double("c25a825a", "13fcd62b"))
+ replacer._hexes_to_double("c25a825a", "13fcd62b"))
assert self.near_equals(
- 23.60, replacer.hexes_to_double("40379999", "9999999a"))
+ 23.60, replacer._hexes_to_double("40379999", "9999999a"))
assert self.near_equals(
- -1, replacer.hexes_to_double("bff00000", "0"))
+ -1, replacer._hexes_to_double("bff00000", "0"))
assert self.near_equals(
- 1, replacer.hexes_to_double("3ff00000", "0"))
+ 1, replacer._hexes_to_double("3ff00000", "0"))
assert self.near_equals(
- 0.0000000004, replacer.hexes_to_double("3dfb7cdf", "d9d7bdbb"))
+ 0.0000000004, replacer._hexes_to_double("3dfb7cdf", "d9d7bdbb"))