diff --git a/.circleci/config.yml b/.circleci/config.yml index 27c16c14c3..263a46c424 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,13 +1,13 @@ version: 2.1 orbs: - python: circleci/python@2.1.1 + python: circleci/python@3.0.0 jobs: manylinux2014-aarch64: parameters: - NRN_PYTHON_VERSION: + NRN_PYTHON_VERSION_MINOR: type: string NRN_NIGHTLY_UPLOAD: type: string @@ -31,8 +31,8 @@ jobs: -e NRN_RELEASE_UPLOAD \ -e SETUPTOOLS_SCM_PRETEND_VERSION \ -e NRN_BUILD_FOR_UPLOAD=1 \ - 'neuronsimulator/neuron_wheel:latest-gcc9-aarch64' \ - packaging/python/build_wheels.bash linux << parameters.NRN_PYTHON_VERSION >> coreneuron + 'neuronsimulator/neuron_wheel:latest-aarch64' \ + packaging/python/build_wheels.bash linux 3<< parameters.NRN_PYTHON_VERSION_MINOR >> coreneuron - store_artifacts: path: ./wheelhouse @@ -41,24 +41,16 @@ jobs: - run: name: Test manylinux AArch64 wheel command: | - - # install mpi dependencies sudo apt update + echo "deb http://ppa.launchpad.net/deadsnakes/ppa/ubuntu jammy main" | sudo tee /etc/apt/sources.list.d/deadsnakes-ppa.list && sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776 && sudo apt update + sudo apt update + # install mpi dependencies sudo apt install -y mpich openmpi-bin libopenmpi-dev libmpich-dev + version=3.<< parameters.NRN_PYTHON_VERSION_MINOR >> + # install Python from deadsnakes + sudo apt install -y python${version}-venv libpython${version}-dev g++ make - # choose available python versions from pyenv - pyenv_py_ver="" - case << parameters.NRN_PYTHON_VERSION >> in - 39) pyenv_py_ver="3.9" ;; - 310) pyenv_py_ver="3.10" ;; - 311) pyenv_py_ver="3.11" ;; - 312) pyenv_py_ver="3.12" ;; - *) echo "Error: pyenv python version not specified or not supported." && exit 1;; - esac - - env PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install $pyenv_py_ver --force - pyenv global $pyenv_py_ver - export PYTHON_EXE=$(which python) + export PYTHON_EXE=$(which python3.<< parameters.NRN_PYTHON_VERSION_MINOR >>) # test wheel packaging/python/test_wheels.sh $PYTHON_EXE $(ls -t wheelhouse/*.whl) @@ -86,7 +78,7 @@ workflows: - /circleci\/.*/ matrix: parameters: - NRN_PYTHON_VERSION: ["312"] + NRN_PYTHON_VERSION_MINOR: ["13"] NRN_NIGHTLY_UPLOAD: ["false"] nightly: @@ -101,5 +93,5 @@ workflows: - manylinux2014-aarch64: matrix: parameters: - NRN_PYTHON_VERSION: ["39", "310", "311", "312"] + NRN_PYTHON_VERSION_MINOR: ["9", "10", "11", "12", "13"] NRN_NIGHTLY_UPLOAD: ["true"] diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index a4e23da01c..f204e36421 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -24,7 +24,7 @@ on: env: PY_MIN_VERSION: '3.9' PY_MID_VERSION: '3.10' - PY_MAX_VERSION: '3.12' + PY_MAX_VERSION: '3.13' jobs: coverage: diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 2ef1052dd7..f9cc6e0051 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -17,7 +17,7 @@ on: - release/** env: - DEFAULT_PY_VERSION: '3.12' + DEFAULT_PY_VERSION: '3.13' jobs: documentation: diff --git a/.github/workflows/neuron-ci.yml b/.github/workflows/neuron-ci.yml index 48ec76d28e..adcf08c3b7 100644 --- a/.github/workflows/neuron-ci.yml +++ b/.github/workflows/neuron-ci.yml @@ -39,7 +39,7 @@ jobs: DESIRED_CMAKE_VERSION: 3.17 DYNAMIC_PYTHON_CMAKE_VERSION: 3.18 PY_MIN_VERSION: ${{ matrix.config.python_min_version || '3.9' }} - PY_MAX_VERSION: ${{ matrix.config.python_max_version || '3.12' }} + PY_MAX_VERSION: ${{ matrix.config.python_max_version || '3.13' }} MUSIC_INSTALL_DIR: /opt/MUSIC # hash of commit containing mpi4py 4 fix MUSIC_VERSION: '13f312338dcccebfe74d391b1b24f1b6d816ac6c' diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 02f5f8374a..5970a4243f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -42,6 +42,9 @@ stages: python.version: '3.11' Python312: python.version: '3.12' + Python313: + python.version: '3.13' + steps: # Secure files documentation: @@ -74,7 +77,7 @@ stages: - script: | sudo apt update sudo apt install -y mpich openmpi-bin libopenmpi-dev libmpich-dev - displayName: 'Install Test System Depdendencies' + displayName: 'Install Test System Dependencies' - template: ci/azure-wheel-test-upload.yml @@ -102,6 +105,10 @@ stages: python.version: '3.12' python.org.version: '3.12.0' python.installer.name: 'macos11.pkg' + Python313: + python.version: '3.13' + python.org.version: '3.13.0' + python.installer.name: 'macos11.pkg' steps: diff --git a/bldnrnmacpkgcmake.sh b/bldnrnmacpkg.sh similarity index 76% rename from bldnrnmacpkgcmake.sh rename to bldnrnmacpkg.sh index 2f0a56af8d..49ed663691 100644 --- a/bldnrnmacpkgcmake.sh +++ b/bldnrnmacpkg.sh @@ -1,19 +1,29 @@ #!/usr/bin/env bash set -ex -default_pythons="python3.8 python3.9 python3.10 python3.11" +default_pythons="python3.9 python3.10 python3.11 python3.12 python3.13" # distribution built with # bash bldnrnmacpkgcmake.sh # without args, default are the pythons above. +if test "$RX3D_OPT" = "" ; then + RX3D_OPT=1 # a value of 2 takes much longer to build +fi + # If all the pythons are universal, then so is NEURON. # Otherwise $CPU -# All pythons must have the same macos version and that will become -# the MACOSX_DEPLOYMENT_TARGET +# Now obsolete... # On my machine, to build nrn-x.x.x-macosx-10.9-universal2-py-38-39-310-311.pkg # I built my own versions of 3.8 in $HOME/soft/python3.8, and -export PATH=$HOME/soft/python3.8/bin:$PATH +# export PATH=$HOME/soft/python3.8/bin:$PATH +# All other python versions installed from python.org universal2 installers. + +# As of Python-3.13.0, we configure to link universal2 static readline +# (and ncurses) into libnrniv.dylib. +# The packages were downloade and the universal libraries were built with +# nrn/packaging/python/build_static_readline_osx.bash +READLINE_LIST="/opt/nrnwheel/arm64/readline;/opt/nrnwheel/arm64/ncurses" CPU=`uname -m` @@ -24,10 +34,15 @@ if test "$args" = "" ; then args="$default_pythons" fi +MPI_LIST="/opt/homebrew/opt/openmpi/include;/opt/homebrew/opt/mpich/include" # sysconfig.get_platform() looks like, e.g. "macosx-12.2-arm64" or # "macosx-11-universal2". I.e. encodes MACOSX_DEPLOYMENT_TARGET and archs. # Demand all pythons we are building against have same platform. +# Update: nrn software now requires at least 10.15. All the python's on this +# machine are 10.9, except python 3.13 is 10.13. The substantive aspect of +# the following fragment (exit 1 if not all the same platform), has been +# commented out. mac_platform="" for i in $args ; do last_py=$i @@ -38,7 +53,7 @@ for i in $args ; do fi if test "$mac_platform" != "$mplat" ; then echo "$i platform \"$mplat\" differs from previous python \"$mac_platform\"." - exit 1 +# exit 1 fi done @@ -93,13 +108,15 @@ fi cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=$NRN_INSTALL \ -DNRN_ENABLE_MPI_DYNAMIC=ON \ + -DNRN_MPI_DYNAMIC="$MPI_LIST" \ -DPYTHON_EXECUTABLE=`which python3` -DNRN_ENABLE_PYTHON_DYNAMIC=ON \ -DNRN_PYTHON_DYNAMIC="$pythons" \ -DIV_ENABLE_X11_DYNAMIC=ON \ -DNRN_ENABLE_CORENEURON=OFF \ - -DNRN_RX3D_OPT_LEVEL=2 \ + -DNRN_RX3D_OPT_LEVEL=$RX3D_OPT \ + -DNRN_WHEEL_STATIC_READLINE=ON \ $archs_cmake \ - -DCMAKE_PREFIX_PATH=/usr/X11 \ + -DCMAKE_PREFIX_PATH="/usr/X11;$READLINE_LIST" \ -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ ninja install @@ -134,22 +151,29 @@ for i in $args ; do chk $i done +describe="`sh $NRN_SRC/nrnversion.sh describe`" +macos=macos${MACOSX_DEPLOYMENT_TARGET} +PACKAGE_FULL_NAME=nrn-${describe}-${mac_platform}-${PYVS}.pkg +PACKAGE_FILE_NAME=$NRN_BLD/src/mac/build/NEURON.pkg + #/Applications/Packages.app from # http://s.sudre.free.fr/Software/Packages/about.html # For mac to do a productsign, need my developerID_installer.cer # and Neurondev.p12 file. To add to the keychain, double click each # of those files. By default, I added my certificates to the login keychain. -ninja macpkg # will sign the binaries, construct below - # mentioned PACKAGE_FILE_NAME, request notarization from - # Apple, and staple the package. + +# Will sign the binaries, construct above +# mentioned PACKAGE_FILE_NAME, request notarization from +# Apple, and staple the package. +if ! ninja macpkg ; then + echo "ninja macpkg failed. (because of notification failure?)" + echo "Not creating $PACKAGE_FULL_NAME" + echo " from $PACKAGE_FILE_NAME" + exit 1 +fi # Copy the package to $HOME/$PACKAGE_FULL_NAME # You should then manually upload that to github. -describe="`sh $NRN_SRC/nrnversion.sh describe`" -macos=macos${MACOSX_DEPLOYMENT_TARGET} -PACKAGE_FULL_NAME=nrn-${describe}-${mac_platform}-${PYVS}.pkg -PACKAGE_FILE_NAME=$NRN_BLD/src/mac/build/NEURON.pkg - cp $PACKAGE_FILE_NAME $HOME/$PACKAGE_FULL_NAME echo " diff --git a/ci/win_build_cmake.sh b/ci/win_build_cmake.sh index b82e430444..15cc217323 100755 --- a/ci/win_build_cmake.sh +++ b/ci/win_build_cmake.sh @@ -32,7 +32,7 @@ cd $BUILD_SOURCESDIRECTORY/build -DNRN_BINARY_DIST_BUILD=ON \ -DPYTHON_EXECUTABLE=/c/Python39/python.exe \ -DNRN_ENABLE_PYTHON_DYNAMIC=ON \ - -DNRN_PYTHON_DYNAMIC='c:/Python39/python.exe;c:/Python310/python.exe;c:/Python311/python.exe;c:/Python312/python.exe' \ + -DNRN_PYTHON_DYNAMIC='c:/Python39/python.exe;c:/Python310/python.exe;c:/Python311/python.exe;c:/Python312/python.exe;c:/Python313/python.exe' \ -DCMAKE_INSTALL_PREFIX='/c/nrn-install' \ -DMPI_CXX_LIB_NAMES:STRING=msmpi \ -DMPI_C_LIB_NAMES:STRING=msmpi \ diff --git a/ci/win_download_deps.cmd b/ci/win_download_deps.cmd index 40a9b44f49..166d44461a 100644 --- a/ci/win_download_deps.cmd +++ b/ci/win_download_deps.cmd @@ -7,6 +7,7 @@ pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.9.exe htt pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.10.exe https://www.python.org/ftp/python/3.10.0/python-3.10.0-amd64.exe || goto :error pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.11.exe https://www.python.org/ftp/python/3.11.1/python-3.11.1-amd64.exe || goto :error pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.12.exe https://www.python.org/ftp/python/3.12.1/python-3.12.1-amd64.exe || goto :error +pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.13.exe https://www.python.org/ftp/python/3.13.0/python-3.13.0-amd64.exe || goto :error :: mpi pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile msmpisetup.exe https://download.microsoft.com/download/a/5/2/a5207ca5-1203-491a-8fb8-906fd68ae623/msmpisetup.exe || goto :error diff --git a/ci/win_install_deps.cmd b/ci/win_install_deps.cmd index c12f32cdef..82a8c950d1 100644 --- a/ci/win_install_deps.cmd +++ b/ci/win_install_deps.cmd @@ -7,6 +7,7 @@ python-3.9.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustFo python-3.10.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustForMeTargetDir=C:\Python310 || goto :error python-3.11.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustForMeTargetDir=C:\Python311 || goto :error python-3.12.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustForMeTargetDir=C:\Python312 || goto :error +python-3.13.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustForMeTargetDir=C:\Python313 || goto :error :: fix msvcc version for all python3 pwsh -command "(Get-Content C:\Python39\Lib\distutils\cygwinccompiler.py) -replace 'elif msc_ver == ''1600'':', 'elif msc_ver == ''1927'':' | Out-File C:\Python39\Lib\distutils\cygwinccompiler.py" @@ -24,8 +25,10 @@ C:\Python39\python.exe -m pip install numpy cython || goto :error C:\Python310\python.exe -m pip install numpy cython || goto :error C:\Python311\python.exe -m pip install numpy cython || goto :error C:\Python312\python.exe -m pip install numpy cython || goto :error +C:\Python313\python.exe -m pip install numpy cython || goto :error :: setuptools 70.2 leads to an error C:\Python312\python.exe -m pip install setuptools==70.1.1 || goto :error +C:\Python313\python.exe -m pip install setuptools==70.1.1 || goto :error :: install nsis nsis-3.05-setup.exe /S || goto :error diff --git a/ci/win_test_installer.cmd b/ci/win_test_installer.cmd index bda5bbec7b..a6cf59a009 100644 --- a/ci/win_test_installer.cmd +++ b/ci/win_test_installer.cmd @@ -21,18 +21,21 @@ C:\Python39\python -c "import neuron; neuron.test(); quit()" || set "errorfound= C:\Python310\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" C:\Python311\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" C:\Python312\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" +C:\Python313\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" :: install oldest supported numpy C:\Python39\python.exe -m pip install -r packaging/python/oldest_numpy_requirements.txt || goto :error C:\Python310\python.exe -m pip install -r packaging/python/oldest_numpy_requirements.txt || goto :error C:\Python311\python.exe -m pip install -r packaging/python/oldest_numpy_requirements.txt || goto :error C:\Python312\python.exe -m pip install -r packaging/python/oldest_numpy_requirements.txt || goto :error +C:\Python313\python.exe -m pip install -r packaging/python/oldest_numpy_requirements.txt || goto :error :: test all pythons again C:\Python39\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" C:\Python310\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" C:\Python311\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" C:\Python312\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" +C:\Python313\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" :: run also using whatever is system python python -m pip install numpy diff --git a/docs/python/modelspec/programmatic/topology/seclist.rst b/docs/python/modelspec/programmatic/topology/seclist.rst index 3ff618c188..810d145d62 100755 --- a/docs/python/modelspec/programmatic/topology/seclist.rst +++ b/docs/python/modelspec/programmatic/topology/seclist.rst @@ -31,6 +31,10 @@ SectionList for sec in python_iterable_of_sections: sl.append(sec) + ``len(sl)`` returns the number of sections in the SectionList. + + ``list(sl)`` and ``[s for s in sl]`` generate equivalent lists. + .. seealso:: :class:`SectionBrowser`, :class:`Shape`, :meth:`RangeVarPlot.list` diff --git a/packaging/python/Dockerfile b/packaging/python/Dockerfile index 36c7d0ad4f..95931335cc 100644 --- a/packaging/python/Dockerfile +++ b/packaging/python/Dockerfile @@ -93,4 +93,7 @@ COPY Dockerfile . # build wheels from there WORKDIR /root +# remove Python 3.13t since we do not support the free-threaded build yet +RUN rm -fr /opt/python/cp313-cp313t + ENV NMODL_PYLIB=/nrnwheel/python/lib/libpython3.10.so.1.0 diff --git a/packaging/python/oldest_numpy_requirements.txt b/packaging/python/oldest_numpy_requirements.txt index 4a77e64c2b..eb2e1a80db 100644 --- a/packaging/python/oldest_numpy_requirements.txt +++ b/packaging/python/oldest_numpy_requirements.txt @@ -3,3 +3,4 @@ numpy==1.21.6;python_version=='3.9' and platform_machine=='arm64' numpy==1.21.6;python_version=='3.10' numpy==1.23.5;python_version=='3.11' numpy==1.26.4;python_version=='3.12' +numpy==2.1.0;python_version=='3.13' diff --git a/setup.py b/setup.py index 05e8ca67e1..25c3fabc2a 100644 --- a/setup.py +++ b/setup.py @@ -4,6 +4,7 @@ import sys from collections import defaultdict import logging +import platform logging.basicConfig(level=logging.INFO) from shutil import copytree, which @@ -461,6 +462,8 @@ def setup_package(): + ["-Wl,-rpath,{}".format(REL_RPATH + "/../../.data/lib/")], ) ) + if platform.system() == "Darwin": + rxd_params["extra_link_args"] += ["-headerpad_max_install_names"] logging.info("RX3D compile flags %s" % str(rxd_params)) diff --git a/src/coreneuron/io/nrn_setup.cpp b/src/coreneuron/io/nrn_setup.cpp index 3fe338348f..8cdd33bee2 100644 --- a/src/coreneuron/io/nrn_setup.cpp +++ b/src/coreneuron/io/nrn_setup.cpp @@ -220,6 +220,12 @@ std::vector nrn_read_filesdat(const char* filesdat) { int iFile; nrn_assert(fscanf(fp, "%d\n", &iFile) == 1); if ((iNum % nrnmpi_numprocs) == nrnmpi_myid) { + // A "-1" entry means that this rank should not be assigned further gid groups. + // It is a way to create files.dat files which deterministically assign gid groups to + // ranks, particularly useful for very large simulations which required load balancing. + if (iFile == -1) { + break; + } rank_cell_groups.push_back(iFile); } } diff --git a/src/mac/nrn_productsign.sh b/src/mac/nrn_productsign.sh index cbe1d78122..84007ad2ee 100755 --- a/src/mac/nrn_productsign.sh +++ b/src/mac/nrn_productsign.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +set -ex # Signing the package with an identified developer certificate means the # installer does not have to lower the security settings on their machine. diff --git a/src/nrniv/secbrows.cpp b/src/nrniv/secbrows.cpp index 84263229b6..207a5b6b4f 100644 --- a/src/nrniv/secbrows.cpp +++ b/src/nrniv/secbrows.cpp @@ -121,17 +121,13 @@ OcSectionBrowser::OcSectionBrowser(Object* ob) psec_[scnt_++] = sec; } } else { - struct hoc_Item* qsec; scnt_ = 0; - // ForAllSections(sec) //{ - ITERATE(qsec, section_list) { + for (const Section* sec: range_sec(section_list)) { ++scnt_; } psec_ = new Section*[scnt_]; scnt_ = 0; - // ForAllSections(sec) //{ - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { psec_[scnt_++] = sec; } } @@ -359,18 +355,13 @@ void BrowserAccept::execute() { } SectionBrowserImpl::SectionBrowserImpl() { - struct hoc_Item* qsec; scnt_ = 0; - // ForAllSections(sec) //{ - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (const Section* sec: range_sec(section_list)) { ++scnt_; } psec_ = new Section*[scnt_]; scnt_ = 0; - // ForAllSections(sec) //{ - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { psec_[scnt_++] = sec; section_ref(sec); } diff --git a/src/nrnoc/cabcode.cpp b/src/nrnoc/cabcode.cpp index fe6de4e3a2..85c4b1b392 100644 --- a/src/nrnoc/cabcode.cpp +++ b/src/nrnoc/cabcode.cpp @@ -450,10 +450,7 @@ Section* chk_access() { Section* sec = secstack[isecstack]; if (!sec || !sec->prop) { /* use any existing section as a default section */ - hoc_Item* qsec; - // ForAllSections(lsec) - ITERATE(qsec, section_list) { - Section* lsec = hocSEC(qsec); + for (Section* lsec: range_sec(section_list)) { if (lsec->prop) { sec = lsec; ++sec->refcount; @@ -479,10 +476,7 @@ Section* nrn_noerr_access(void) /* return 0 if no accessed section */ Section* sec = secstack[isecstack]; if (!sec || !sec->prop) { /* use any existing section as a default section */ - hoc_Item* qsec; - // ForAllSections(lsec) - ITERATE(qsec, section_list) { - Section* lsec = hocSEC(qsec); + for (Section* lsec: range_sec(section_list)) { if (lsec->prop) { sec = lsec; ++sec->refcount; @@ -1639,8 +1633,6 @@ void nrn_parent_info(Section* s) { } void setup_topology(void) { - Item* qsec; - /* use connection info in section property to connect nodes. */ /* for the moment we assume uniform dx and range 0-1 */ @@ -1656,9 +1648,7 @@ void setup_topology(void) { nrn_global_ncell = 0; - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { #if 0 if (sec->nnode < 1) { /* last node is not a segment */ hoc_execerror(secname(sec), @@ -2200,15 +2190,11 @@ void hoc_level_pushsec(Section* sec) { } void push_section(void) { - Section* sec; + Section* sec = nullptr; if (hoc_is_str_arg(1)) { - Item* qsec; char* s; - sec = (Section*) 0; s = gargstr(1); - // ForAllSections(sec1) /* I can't imagine a more inefficient way */ - ITERATE(qsec, section_list) { - Section* sec1 = hocSEC(qsec); + for (Section* sec1: range_sec(section_list)) { if (strcmp(s, nrn_sec2pysecname(sec1)) == 0) { sec = sec1; break; diff --git a/src/nrnoc/extcelln.cpp b/src/nrnoc/extcelln.cpp index d807f2bf47..ccfc4250a1 100644 --- a/src/nrnoc/extcelln.cpp +++ b/src/nrnoc/extcelln.cpp @@ -225,9 +225,7 @@ void extnode_free_elements(Extnode* nde) { } static void check_if_extracellular_in_use() { - hoc_Item* qsec; - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (const Section* sec: range_sec(section_list)) { if (sec->pnode[0]->extnode) { hoc_execerror("Cannot change nlayer_extracellular when instances exist", NULL); } @@ -486,14 +484,11 @@ void ext_con_coef(void) /* setup a and b */ { int j, k; double dx, area; - hoc_Item* qsec; Node *nd, **pnd; Extnode* nde; /* temporarily store half segment resistances in rhs */ - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { if (sec->pnode[0]->extnode) { dx = section_length(sec) / ((double) (sec->nnode - 1)); for (j = 0; j < sec->nnode - 1; j++) { @@ -529,9 +524,7 @@ void ext_con_coef(void) /* setup a and b */ section connects straight to the point*/ /* for the near future we always have a last node at x=1 with no properties */ - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (const Section* sec: range_sec(section_list)) { if (sec->pnode[0]->extnode) { /* node half resistances in general get added to the node and to the node's "child node in the same section". @@ -549,9 +542,7 @@ void ext_con_coef(void) /* setup a and b */ } } } - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { if (sec->pnode[0]->extnode) { /* convert to siemens/cm^2 for all nodes except last and microsiemens for last. This means that a*V = mamps/cm2 @@ -578,9 +569,7 @@ void ext_con_coef(void) /* setup a and b */ } } /* now the effect of parent on node equation. */ - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (const Section* sec: range_sec(section_list)) { if (sec->pnode[0]->extnode) { for (j = 0; j < sec->nnode; j++) { nd = sec->pnode[j]; diff --git a/src/nrnoc/solve.cpp b/src/nrnoc/solve.cpp index a3e55c2535..a42fbe960a 100644 --- a/src/nrnoc/solve.cpp +++ b/src/nrnoc/solve.cpp @@ -100,17 +100,13 @@ Section** secorder; double debugsolve(void) /* returns solution error */ { short inode; - int i; - Section *sec, *psec, *ch; Node *nd, *pnd, **ndP; double err, sum; /* save parts of matrix that will be destroyed */ - assert(0) - /* need to save the rootnodes too */ - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + assert(0); + /* need to save the rootnodes too */ + for (const Section* sec: range_sec(section_list)) { assert(sec->pnode && sec->nnode); for (inode = sec->nnode - 1; inode >= 0; inode--) { nd = sec->pnode[inode]; @@ -124,9 +120,7 @@ double debugsolve(void) /* returns solution error */ err = 0.; /* need to check the rootnodes too */ - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (const Section* sec: range_sec(section_list)) { for (inode = sec->nnode - 1; inode >= 0; inode--) { ndP = sec->pnode + inode; nd = sec->pnode[inode]; @@ -141,10 +135,9 @@ double debugsolve(void) /* returns solution error */ if (inode < sec->nnode - 1) { sum += NODEA(ndP[1]) * NODERHS(ndP[1]); } - for (ch = nd->child; ch; ch = ch->sibling) { - psec = ch; - pnd = psec->pnode[0]; - assert(pnd && psec->nnode); + for (const Section* ch = nd->child; ch; ch = ch->sibling) { + pnd = ch->pnode[0]; + assert(pnd && ch->nnode); sum += NODEA(pnd) * NODERHS(pnd); } sum -= nd->savrhs; @@ -291,12 +284,9 @@ static void dashes(Section* sec, int offset, int first); void nrnhoc_topology(void) /* print the topology of the branched cable */ { - hoc_Item* q; - v_setup_vectors(); Printf("\n"); - ITERATE(q, section_list) { - Section* sec = (Section*) VOIDITM(q); + for (Section* sec: range_sec(section_list)) { if (sec->parentsec == (Section*) 0) { Printf("|"); dashes(sec, 0, '-'); @@ -434,10 +424,7 @@ void bksub(NrnThread* _nt) { } void nrn_clear_mark(void) { - hoc_Item* qsec; - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { sec->volatile_mark = 0; } } @@ -744,15 +731,10 @@ void section_order(void) /* create a section order consistent */ { int order, isec; Section* ch; - Section* sec; - hoc_Item* qsec; /* count the sections */ section_count = 0; - /*SUPPRESS 765*/ - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { sec->order = -1; ++section_count; } @@ -765,9 +747,7 @@ void section_order(void) /* create a section order consistent */ secorder = (Section**) emalloc(section_count * sizeof(Section*)); } order = 0; - // ForAllSections(sec) /* all the roots first */ - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { if (!sec->parentsec) { secorder[order] = sec; sec->order = order; @@ -777,10 +757,7 @@ void section_order(void) /* create a section order consistent */ for (isec = 0; isec < section_count; isec++) { if (isec >= order) { - // Sections form a loop. - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { Section *psec, *s = sec; for (psec = sec->parentsec; psec; s = psec, psec = psec->parentsec) { if (!psec || s->order >= 0) { @@ -798,7 +775,7 @@ void section_order(void) /* create a section order consistent */ } } } - sec = secorder[isec]; + Section* sec = secorder[isec]; for (ch = sec->child; ch; ch = ch->sibling) { secorder[order] = ch; ch->order = order; diff --git a/src/nrnoc/treeset.cpp b/src/nrnoc/treeset.cpp index f27b02fa4f..e7fc5c78c0 100644 --- a/src/nrnoc/treeset.cpp +++ b/src/nrnoc/treeset.cpp @@ -814,7 +814,6 @@ void connection_coef(void) /* setup a and b */ { int j; double area; - hoc_Item* qsec; Node* nd; #if RA_WARNING extern int nrn_ra_set; @@ -836,9 +835,7 @@ void connection_coef(void) /* setup a and b */ #endif ++recalc_diam_count_; nrn_area_ri_nocount_ = 1; - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { nrn_area_ri(sec); } nrn_area_ri_nocount_ = 0; @@ -858,9 +855,7 @@ void connection_coef(void) /* setup a and b */ std::fill_n(nt.node_a_storage(), nt.end, 0.0); std::fill_n(nt.node_b_storage(), nt.end, 0.0); } - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (Section* sec: range_sec(section_list)) { // Unnecessary because they are unused, but help when looking at fmatrix. if (!sec->parentsec) { if (auto* const ptr = nrn_classicalNodeA(sec->parentnode)) { @@ -889,9 +884,7 @@ void connection_coef(void) /* setup a and b */ } } /* now the effect of parent on node equation. */ - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); + for (const Section* sec: range_sec(section_list)) { for (j = 0; j < sec->nnode; j++) { nd = sec->pnode[j]; *nrn_classicalNodeB(nd) = -1.e2 * NODERINV(nd) / NODEAREA(nd); diff --git a/src/nrnpython/nrnpy_hoc.cpp b/src/nrnpython/nrnpy_hoc.cpp index 1e68124c8c..0ee57d8289 100644 --- a/src/nrnpython/nrnpy_hoc.cpp +++ b/src/nrnpython/nrnpy_hoc.cpp @@ -1646,6 +1646,16 @@ static int araychk(Arrayinfo* a, PyHocObject* po, int ix) { return 0; } +static Py_ssize_t seclist_count(Object* ho) { + assert(ho->ctemplate == hoc_sectionlist_template_); + hoc_List* sl = (hoc_List*) (ho->u.this_pointer); + Py_ssize_t n = 0; + for (hoc_Item* q1 = sl->next; q1 != sl; q1 = q1->next) { + n++; + } + return n; +} + static Py_ssize_t hocobj_len(PyObject* self) { PyHocObject* po = (PyHocObject*) self; if (po->type_ == PyHoc::HocObject) { @@ -1654,8 +1664,7 @@ static Py_ssize_t hocobj_len(PyObject* self) { } else if (po->ho_->ctemplate == hoc_list_template_) { return ivoc_list_count(po->ho_); } else if (po->ho_->ctemplate == hoc_sectionlist_template_) { - PyErr_SetString(PyExc_TypeError, "hoc.SectionList has no len()"); - return -1; + return seclist_count(po->ho_); } } else if (po->type_ == PyHoc::HocArray) { Arrayinfo* a = hocobj_aray(po->sym_, po->ho_); @@ -1682,6 +1691,8 @@ static int hocobj_nonzero(PyObject* self) { b = vector_capacity((Vect*) po->ho_->u.this_pointer) > 0; } else if (po->ho_->ctemplate == hoc_list_template_) { b = ivoc_list_count(po->ho_) > 0; + } else if (po->ho_->ctemplate == hoc_sectionlist_template_) { + b = seclist_count(po->ho_) > 0; } } else if (po->type_ == PyHoc::HocArray) { Arrayinfo* a = hocobj_aray(po->sym_, po->ho_); @@ -1715,13 +1726,16 @@ static PyObject* hocobj_iter(PyObject* raw_self) { nb::object self = nb::borrow(raw_self); PyHocObject* po = (PyHocObject*) self.ptr(); - if (po->type_ == PyHoc::HocObject) { + if (po->type_ == PyHoc::HocObject || po->type_ == PyHoc::HocSectionListIterator) { if (po->ho_->ctemplate == hoc_vec_template_) { return PySeqIter_New(self.ptr()); } else if (po->ho_->ctemplate == hoc_list_template_) { return PySeqIter_New(self.ptr()); } else if (po->ho_->ctemplate == hoc_sectionlist_template_) { // need a clone of self so nested loops do not share iteritem_ + // The HocSectionListIter arm of the outer 'if' became necessary + // at Python-3.13.1 upon which the following body is executed + // twice. See https://github.com/python/cpython/issues/127682 auto po2 = nb::steal(nrnpy_ho2po(po->ho_)); PyHocObject* pho2 = (PyHocObject*) po2.ptr(); pho2->type_ = PyHoc::HocSectionListIterator; diff --git a/src/oc/hoclist.h b/src/oc/hoclist.h index de9666efa0..3a7036dadc 100644 --- a/src/oc/hoclist.h +++ b/src/oc/hoclist.h @@ -47,8 +47,33 @@ struct hoc_Item { }; using hoc_List = hoc_Item; -#define ITEM0 (hoc_Item*) 0 -#define LIST0 (hoc_List*) 0 +constexpr auto range_sec(hoc_List* iterable) { + struct iterator { + hoc_Item* iter; + bool operator!=(const iterator& other) const { + return iter != other.iter; + } + void operator++() { + iter = iter->next; + } + Section* operator*() const { + return iter->element.sec; + } + }; + struct iterable_wrapper { + hoc_List* iterable; + auto begin() { + return iterator{iterable->next}; + } + auto end() { + return iterator{iterable}; + } + }; + return iterable_wrapper{iterable}; +} + +#define ITEM0 nullptr +#define LIST0 nullptr #define ITERATE(itm, lst) for (itm = (lst)->next; itm != (lst); itm = itm->next) /* diff --git a/test/hoctests/tests/test_seclist.py b/test/hoctests/tests/test_seclist.py new file mode 100644 index 0000000000..0f3c86696a --- /dev/null +++ b/test/hoctests/tests/test_seclist.py @@ -0,0 +1,18 @@ +from neuron import h + +secs = [h.Section() for _ in range(10)] + + +def test(): + sl = h.SectionList() + sl.allroots() + assert len(sl) == len(list(sl)) + b = [(s1, s2) for s1 in sl for s2 in sl] + n = len(sl) + for i in range(n): + for j in range(n): + assert b[i * n + j][0] == b[i + j * n][1] + return sl + + +sl = test()