From 41152a55aff042d9341cc0030574f2597d06320e Mon Sep 17 00:00:00 2001 From: David Gardner Date: Fri, 21 Jun 2024 21:08:25 -0700 Subject: [PATCH 1/8] CMake: remove message wrapper macros (#519) Remove `print_error` and `print_warning` macros --- CMakeLists.txt | 2 +- cmake/SundialsBuildOptionsPre.cmake | 2 +- cmake/SundialsDeprecated.cmake | 78 ++++++++++++-------------- cmake/SundialsExampleOptions.cmake | 12 ++-- cmake/SundialsIndexSize.cmake | 12 ++-- cmake/SundialsSetupCompilers.cmake | 14 ++--- cmake/SundialsSetupFortran.cmake | 2 +- cmake/SundialsSetupHIP.cmake | 2 +- cmake/SundialsSetupTesting.cmake | 4 +- cmake/macros/SundialsCMakeMacros.cmake | 49 ---------------- cmake/macros/SundialsOption.cmake | 13 ++--- cmake/tpl/FindSUPERLUMT.cmake | 5 +- cmake/tpl/SundialsAdiak.cmake | 2 +- cmake/tpl/SundialsCaliper.cmake | 2 +- cmake/tpl/SundialsGinkgo.cmake | 10 ++-- cmake/tpl/SundialsHypre.cmake | 4 +- cmake/tpl/SundialsKLU.cmake | 6 +- cmake/tpl/SundialsLapack.cmake | 2 +- cmake/tpl/SundialsMAGMA.cmake | 6 +- cmake/tpl/SundialsOpenMP.cmake | 6 +- cmake/tpl/SundialsPETSC.cmake | 24 ++++---- cmake/tpl/SundialsPthread.cmake | 4 +- cmake/tpl/SundialsRAJA.cmake | 14 ++--- cmake/tpl/SundialsSuperLUDIST.cmake | 6 +- cmake/tpl/SundialsSuperLUMT.cmake | 4 +- cmake/tpl/SundialsTPL.cmake.template | 2 +- cmake/tpl/SundialsTrilinos.cmake | 2 +- 27 files changed, 116 insertions(+), 173 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0f8fa7933f..d16ee2e0a9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -117,7 +117,7 @@ set(sundialslib_SOVERSION "${PACKAGE_VERSION_MAJOR}") # Prohibit in-source build if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}") - print_error("In-source build prohibited.") + message(FATAL_ERROR "In-source build prohibited.") endif() # Organize targets into folders when using an IDE diff --git a/cmake/SundialsBuildOptionsPre.cmake b/cmake/SundialsBuildOptionsPre.cmake index b78212e720..3c3f6b16cb 100644 --- a/cmake/SundialsBuildOptionsPre.cmake +++ b/cmake/SundialsBuildOptionsPre.cmake @@ -182,7 +182,7 @@ sundials_option(BUILD_FORTRAN_MODULE_INTERFACE BOOL "${DOCSTR}" OFF) if(BUILD_FORTRAN_MODULE_INTERFACE) # F2003 interface only supports double precision if(NOT (SUNDIALS_PRECISION MATCHES "DOUBLE")) - print_error("F2003 interface is not compatible with ${SUNDIALS_PRECISION} precision") + message(FATAL_ERROR "F2003 interface is not compatible with ${SUNDIALS_PRECISION} precision") endif() # Allow a user to set where the Fortran modules will be installed diff --git a/cmake/SundialsDeprecated.cmake b/cmake/SundialsDeprecated.cmake index 702ee00aa5..385a11361b 100644 --- a/cmake/SundialsDeprecated.cmake +++ b/cmake/SundialsDeprecated.cmake @@ -17,9 +17,8 @@ # if(DEFINED F2003_INTERFACE_ENABLE) - print_warning("The CMake option F2003_INTERFACE_ENABLE is deprecated" - "Use BUILD_FORTRAN_MODULE_INTERFACE instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option F2003_INTERFACE_ENABLE is deprecated. " + "Use BUILD_FORTRAN_MODULE_INTERFACE instead.") set(BUILD_FORTRAN_MODULE_INTERFACE ${F2003_INTERFACE_ENABLE} CACHE BOOL "Enable Fortran 2003 module interfaces") endif() @@ -30,120 +29,115 @@ unset(F2003_INTERFACE_ENABLE CACHE) # if(DEFINED MPI_ENABLE) - print_warning("The CMake option MPI_ENABLE is deprecated" "Use ENABLE_MPI instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option MPI_ENABLE is deprecated. " + "Use ENABLE_MPI instead.") set(ENABLE_MPI ${MPI_ENABLE} CACHE BOOL "Enable MPI support" FORCE) unset(MPI_ENABLE CACHE) endif() if(DEFINED OPENMP_ENABLE) - print_warning("The CMake option OPENMP_ENABLE is deprecated" "Use ENABLE_OPENMP instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option OPENMP_ENABLE is deprecated. " + "Use ENABLE_OPENMP instead.") set(ENABLE_OPENMP ${OPENMP_ENABLE} CACHE BOOL "Enable OpenMP support" FORCE) unset(OPENMP_ENABLE CACHE) endif() if(DEFINED OPENMP_DEVICE_ENABLE) - print_warning("The CMake option OPENMP_DEVICE_ENABLE is deprecated" - "Use ENABLE_OPENMP_DEVICE instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option OPENMP_DEVICE_ENABLE is deprecated. " + "Use ENABLE_OPENMP_DEVICE instead.") set(ENABLE_OPENMP_DEVICE ${OPENMP_DEVICE_ENABLE} CACHE BOOL "Enable OpenMP device offloading support" FORCE) unset(OPENMP_DEVICE_ENABLE CACHE) endif() if(DEFINED SKIP_OPENMP_DEVICE_CHECK) - print_warning("The CMake option SKIP_OPENMP_DEVICE_CHECK is deprecated" - "Use OPENMP_DEVICE_WORKS instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option SKIP_OPENMP_DEVICE_CHECK is deprecated. " + "Use OPENMP_DEVICE_WORKS instead.") set(OPENMP_DEVICE_WORKS ${SKIP_OPENMP_DEVICE_CHECK} CACHE BOOL "Skip the compiler check for OpenMP device offloading" FORCE) unset(SKIP_OPENMP_DEVICE_CHECK CACHE) endif() if(DEFINED PTHREAD_ENABLE) - print_warning("The CMake option PTHREAD_ENABLE is deprecated" "Use ENABLE_PTHREAD instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option PTHREAD_ENABLE is deprecated. " + "Use ENABLE_PTHREAD instead") set(ENABLE_PTHREAD ${PTHREAD_ENABLE} CACHE BOOL "Enable Pthreads support" FORCE) unset(PTHREAD_ENABLE CACHE) endif() if(DEFINED CUDA_ENABLE) - print_warning("The CMake option CUDA_ENABLE is deprecated" "Use ENABLE_CUDA instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option CUDA_ENABLE is deprecated. " + "Use ENABLE_CUDA instead.") set(ENABLE_CUDA ${CUDA_ENABLE} CACHE BOOL "Enable CUDA support" FORCE) unset(CUDA_ENABLE CACHE) endif() if(DEFINED LAPACK_ENABLE) - print_warning("The CMake option LAPACK_ENABLE is deprecated" "Use ENABLE_LAPACK instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option LAPACK_ENABLE is deprecated. " + "Use ENABLE_LAPACK instead.") set(ENABLE_LAPACK ${LAPACK_ENABLE} CACHE BOOL "Enable LAPACK support" FORCE) unset(LAPACK_ENABLE CACHE) endif() if(DEFINED SUPERLUDIST_ENABLE) - print_warning("The CMake option SUPERLUDIST_ENABLE is deprecated" - "Use ENABLE_SUPERLUDIST instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option SUPERLUDIST_ENABLE is deprecated. " + "Use ENABLE_SUPERLUDIST instead.") set(ENABLE_SUPERLUDIST ${SUPERLUDIST_ENABLE} CACHE BOOL "Enable SuperLU_DIST support" FORCE) unset(SUPERLUDIST_ENABLE CACHE) endif() # Deprecated with SUNDIALS 6.4.0 if(DEFINED SUPERLUDIST_LIBRARY_DIR) - print_warning("The CMake option SUPERLUDIST_LIBRARY_DIR is deprecated" - "Use SUPERLUDIST_DIR instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option SUPERLUDIST_LIBRARY_DIR is deprecated. " + "Use SUPERLUDIST_DIR instead.") set(SUPERLUDIST_DIR "${SUPERLUDIST_LIBRARY_DIR}/../" CACHE BOOL "SuperLU_DIST root directory" FORCE) unset(SUPERLUDIST_LIBRARY_DIR CACHE) endif() if(DEFINED SUPERLUDIST_INCLUDE_DIR) - print_warning("The CMake option SUPERLUDIST_INCLUDE_DIR is deprecated" - "Use SUPERLUDIST_INCLUDE_DIRS instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option SUPERLUDIST_INCLUDE_DIR is deprecated. " + "Use SUPERLUDIST_INCLUDE_DIRS instead.") set(SUPERLUDIST_INCLUDE_DIRS "${SUPERLUDIST_INCLUDE_DIR}" CACHE BOOL "SuperLU_DIST include directoroes" FORCE) unset(SUPERLUDIST_INCLUDE_DIR CACHE) endif() if(DEFINED SUPERLUMT_ENABLE) - print_warning("The CMake option SUPERLUMT_ENABLE is deprecated" "Use ENABLE_SUPERLUMT instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option SUPERLUMT_ENABLE is deprecated. " + "Use ENABLE_SUPERLUMT instead.") set(ENABLE_SUPERLUMT ${SUPERLUMT_ENABLE} CACHE BOOL "Enable SuperLU_MT support" FORCE) unset(SUPERLUMT_ENABLE CACHE) endif() if(DEFINED KLU_ENABLE) - print_warning("The CMake option KLU_ENABLE is deprecated" "Use ENABLE_KLU instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option KLU_ENABLE is deprecated. " + "Use ENABLE_KLU instead.") set(ENABLE_KLU ${KLU_ENABLE} CACHE BOOL "Enable KLU support" FORCE) unset(KLU_ENABLE CACHE) endif() if(DEFINED HYPRE_ENABLE) - print_warning("The CMake option HYPRE_ENABLE is deprecated" "Use ENABLE_HYPRE instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option HYPRE_ENABLE is deprecated. " + "Use ENABLE_HYPRE instead.") set(ENABLE_HYPRE ${HYPRE_ENABLE} CACHE BOOL "Enable HYPRE support" FORCE) unset(HYPRE_ENABLE CACHE) endif() if(DEFINED PETSC_ENABLE) - print_warning("The CMake option PETSC_ENABLE is deprecated" "Use ENABLE_PETSC instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option PETSC_ENABLE is deprecated. " + "Use ENABLE_PETSC instead.") set(ENABLE_PETSC ${PETSC_ENABLE} CACHE BOOL "Enable PETSC support" FORCE) unset(PETSC_ENABLE CACHE) endif() if(DEFINED Trilinos_ENABLE) - print_warning("The CMake option Trilinos_ENABLE is deprecated" "Use ENABLE_TRILINOS instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option Trilinos_ENABLE is deprecated. " + "Use ENABLE_TRILINOS instead.") set(ENABLE_TRILINOS ${Trilinos_ENABLE} CACHE BOOL "Enable Trilinos support" FORCE) unset(Trilinos_ENABLE CACHE) endif() if(DEFINED RAJA_ENABLE) - print_warning("The CMake option RAJA_ENABLE is deprecated" "Use ENABLE_RAJA instead" - MODE DEPRECATION) + message(DEPRECATION "The CMake option RAJA_ENABLE is deprecated. " + "Use ENABLE_RAJA instead.") set(ENABLE_RAJA ${RAJA_ENABLE} CACHE BOOL "Enable RAJA support" FORCE) unset(RAJA_ENABLE CACHE) endif() @@ -153,8 +147,8 @@ endif() # if(DEFINED CUDA_ARCH) - print_warning("The CMake option CUDA_ARCH is deprecated" "Use CMAKE_CUDA_ARCHITECTURES instead" - MODE DEPRECATION) + print_warning("The CMake option CUDA_ARCH is deprecated. " + "Use CMAKE_CUDA_ARCHITECTURES instead.") # convert sm_** to just ** string(REGEX MATCH "[0-9]+" arch_name "${CUDA_ARCH}") set(CMAKE_CUDA_ARCHITECTURES ${arch_name} CACHE STRING "CUDA Architectures" FORCE) diff --git a/cmake/SundialsExampleOptions.cmake b/cmake/SundialsExampleOptions.cmake index 29f099eb1e..40d692771f 100644 --- a/cmake/SundialsExampleOptions.cmake +++ b/cmake/SundialsExampleOptions.cmake @@ -42,16 +42,16 @@ if(BUILD_FORTRAN_MODULE_INTERFACE) # Fortran 2003 examples only support double precision if(EXAMPLES_ENABLE_F2003 AND (NOT (SUNDIALS_PRECISION MATCHES "DOUBLE"))) - print_warning("F2003 examples are not compatible with ${SUNDIALS_PRECISION} precision. " - "Setting EXAMPLES_ENABLE_F2003 to OFF.") + message(WARNING "F2003 examples are not compatible with ${SUNDIALS_PRECISION} precision. " + "Setting EXAMPLES_ENABLE_F2003 to OFF.") force_variable(EXAMPLES_ENABLE_F2003 BOOL "${DOCSTR}" OFF) endif() else() # set back to OFF (in case it was ON) if(EXAMPLES_ENABLE_F2003) - print_warning("EXAMPLES_ENABLE_F2003 is ON but BUILD_FORTRAN_MODULE_INTERFACE is OFF. " - "Setting EXAMPLES_ENABLE_F2003 to OFF.") + message(WARNING "EXAMPLES_ENABLE_F2003 is ON but BUILD_FORTRAN_MODULE_INTERFACE is OFF. " + "Setting EXAMPLES_ENABLE_F2003 to OFF.") force_variable(EXAMPLES_ENABLE_F2003 BOOL "${DOCSTR}" OFF) endif() @@ -75,8 +75,8 @@ sundials_option(EXAMPLES_INSTALL_PATH PATH "Output directory for installing exam # If examples are to be exported, check where we should install them. if(EXAMPLES_INSTALL AND NOT EXAMPLES_INSTALL_PATH) - print_warning("The example installation path is empty. " - "Example installation path was reset to its default value") + message(WARNING "The example installation path is empty. Example installation " + "path was reset to its default value") set(EXAMPLES_INSTALL_PATH "${CMAKE_INSTALL_PREFIX}/examples" CACHE STRING "Output directory for installing example files" FORCE) endif() diff --git a/cmake/SundialsIndexSize.cmake b/cmake/SundialsIndexSize.cmake index 88c676b41b..6498637bd3 100644 --- a/cmake/SundialsIndexSize.cmake +++ b/cmake/SundialsIndexSize.cmake @@ -42,9 +42,8 @@ if(SUNDIALS_INDEX_SIZE MATCHES "64") endforeach() if(NOT SUNDIALS_CINDEX_TYPE) - print_error("No integer type of size 8 was found.\n\ - Tried ${POSSIBLE_INT64}.\n\ - Try setting the advanced option SUNDIALS_INDEX_TYPE.") + message(FATAL_ERROR "No integer type of size 8 was found. Tried " + "${POSSIBLE_INT64}. Try setting the advanced option SUNDIALS_INDEX_TYPE.") endif() # set Fortran integer size too @@ -70,13 +69,12 @@ elseif(SUNDIALS_INDEX_SIZE MATCHES "32") endforeach() if(NOT SUNDIALS_CINDEX_TYPE) - print_error("No integer type of size 4 was found.\n\ - Tried ${POSSIBLE_INT32}\n\ - Try setting the advanced option SUNDIALS_INDEX_TYPE.") + message(FATAL_ERROR "No integer type of size 4 was found. Tried " + "${POSSIBLE_INT32}. Try setting the advanced option SUNDIALS_INDEX_TYPE.") endif() # set Fortran integer size too set(SUNDIALS_FINDEX_TYPE "4") else() - print_error("Invalid index size.") + message(FATAL_ERROR "Invalid index size.") endif() diff --git a/cmake/SundialsSetupCompilers.cmake b/cmake/SundialsSetupCompilers.cmake index f4b226e96d..7790f61609 100644 --- a/cmake/SundialsSetupCompilers.cmake +++ b/cmake/SundialsSetupCompilers.cmake @@ -294,12 +294,12 @@ sundials_option(SUNDIALS_LAPACK_UNDERSCORES STRING # If used, both case and underscores must be set if((NOT SUNDIALS_LAPACK_CASE) AND SUNDIALS_LAPACK_UNDERSCORES) - print_error("If SUNDIALS_LAPACK_UNDERSCORES is set, " - "SUNDIALS_LAPACK_CASE must also be set.") + message(FATAL_ERROR "If SUNDIALS_LAPACK_UNDERSCORES is set, " + "SUNDIALS_LAPACK_CASE must also be set.") endif() if(SUNDIALS_LAPACK_CASE AND (NOT SUNDIALS_LAPACK_UNDERSCORES)) - print_error("If SUNDIALS_LAPACK_CASE is set, " - "SUNDIALS_LAPACK_UNDERSCORES must also be set.") + message(FATAL_ERROR "If SUNDIALS_LAPACK_CASE is set, " + "SUNDIALS_LAPACK_UNDERSCORES must also be set.") endif() # Did the user provide a name-mangling scheme? @@ -324,7 +324,7 @@ if(SUNDIALS_LAPACK_CASE AND SUNDIALS_LAPACK_UNDERSCORES) set(LAPACK_MANGLE_MACRO1 "#define SUNDIALS_LAPACK_FUNC(name,NAME) name ## __") set(LAPACK_MANGLE_MACRO2 "#define SUNDIALS_LAPACK_FUNC_(name,NAME) name ## __") else() - print_error("Invalid SUNDIALS_LAPACK_UNDERSCORES option.") + message(FATAL_ERROR "Invalid SUNDIALS_LAPACK_UNDERSCORES option.") endif() elseif(SUNDIALS_LAPACK_CASE MATCHES "UPPER") if(SUNDIALS_LAPACK_UNDERSCORES MATCHES "NONE") @@ -337,10 +337,10 @@ if(SUNDIALS_LAPACK_CASE AND SUNDIALS_LAPACK_UNDERSCORES) set(LAPACK_MANGLE_MACRO1 "#define SUNDIALS_LAPACK_FUNC(name,NAME) NAME ## __") set(LAPACK_MANGLE_MACRO2 "#define SUNDIALS_LAPACK_FUNC_(name,NAME) NAME ## __") else() - print_error("Invalid SUNDIALS_LAPACK_UNDERSCORES option.") + message(FATAL_ERROR "Invalid SUNDIALS_LAPACK_UNDERSCORES option.") endif() else() - print_error("Invalid SUNDIALS_LAPACK_CASE option.") + message(FATAL_ERROR "Invalid SUNDIALS_LAPACK_CASE option.") endif() # name-mangling scheme has been manually set diff --git a/cmake/SundialsSetupFortran.cmake b/cmake/SundialsSetupFortran.cmake index 1daee0b7b2..de9beea905 100644 --- a/cmake/SundialsSetupFortran.cmake +++ b/cmake/SundialsSetupFortran.cmake @@ -73,7 +73,7 @@ if(BUILD_FORTRAN_MODULE_INTERFACE) message(STATUS "Checking whether ${CMAKE_Fortran_COMPILER} supports F2003 -- no") message(STATUS "Check output:") message("${COMPILE_OUTPUT}") - print_error("BUILD_FORTRAN_MODULE_INTERFACE is set to ON, but the CMAKE_Fortran_COMPILER does not support F2003") + message(FATAL_ERROR "BUILD_FORTRAN_MODULE_INTERFACE is set to ON, but the CMAKE_Fortran_COMPILER does not support F2003") endif() else() message(STATUS "Skipped F2003 tests, assuming ${CMAKE_Fortran_COMPILER} supports the f2003 standard. To rerun the F2003 tests, set F2003_FOUND to FALSE.") diff --git a/cmake/SundialsSetupHIP.cmake b/cmake/SundialsSetupHIP.cmake index 7c147019a7..bc5a6c3776 100644 --- a/cmake/SundialsSetupHIP.cmake +++ b/cmake/SundialsSetupHIP.cmake @@ -46,7 +46,7 @@ set(CMAKE_PREFIX_PATH "${ROCM_PATH};${HIP_PATH}") find_package(HIP REQUIRED) if("${HIP_COMPILER}" STREQUAL "hcc") - print_error("Deprecated HCC compiler is not supported" "Please update ROCm") + message(FATAL_ERROR "Deprecated HCC compiler is not supported" "Please update ROCm") endif() message(STATUS "HIP version: ${HIP_VERSION}") diff --git a/cmake/SundialsSetupTesting.cmake b/cmake/SundialsSetupTesting.cmake index 57dd7b1635..11a445900d 100644 --- a/cmake/SundialsSetupTesting.cmake +++ b/cmake/SundialsSetupTesting.cmake @@ -25,7 +25,7 @@ if (SUNDIALS_TEST_DEVTESTS OR BUILD_BENCHMARKS) # look for the testRunner script in the test directory find_program(TESTRUNNER testRunner PATHS test NO_DEFAULT_PATH) if(NOT TESTRUNNER) - print_error("Could not locate testRunner. Set SUNDIALS_TEST_DEVTESTS=OFF or BUILD_BENCHMARKS=OFF to continue.") + message(FATAL_ERROR "Could not locate testRunner. Set SUNDIALS_TEST_DEVTESTS=OFF or BUILD_BENCHMARKS=OFF to continue.") endif() message(STATUS "Found testRunner: ${TESTRUNNER}") set(TESTRUNNER ${TESTRUNNER} CACHE INTERNAL "") @@ -56,7 +56,7 @@ if(SUNDIALS_TEST_DEVTESTS) if(SUNDIALS_TEST_ANSWER_DIR) message(STATUS "Using non-default test answer directory: ${SUNDIALS_TEST_ANSWER_DIR}") if(NOT EXISTS ${SUNDIALS_TEST_ANSWER_DIR}) - print_error("SUNDIALS_TEST_ANSWER_DIR does not exist!") + message(FATAL_ERROR "SUNDIALS_TEST_ANSWER_DIR does not exist!") endif() endif() diff --git a/cmake/macros/SundialsCMakeMacros.cmake b/cmake/macros/SundialsCMakeMacros.cmake index 237c7c6ac8..20d101c834 100644 --- a/cmake/macros/SundialsCMakeMacros.cmake +++ b/cmake/macros/SundialsCMakeMacros.cmake @@ -40,55 +40,6 @@ macro(ADD_PREFIX prefix rootlist) set(${rootlist} ${outlist}) endmacro(ADD_PREFIX) -# Macro to print warnings. - -macro(print_warning message action) - set(options ) - set(oneValueArgs MODE) - set(multiValueArgs ) - - # parse inputs and create variables print_warning_ - cmake_parse_arguments(print_warning "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) - - if(print_warning_MODE) - set(_mode ${print_warning_MODE}) - else() - set(_mode WARNING) - endif() - - set(MSG - "------------------------------------------------------------------------\n" - "WARNING: ${message}\n" - "${action}\n" - "------------------------------------------------------------------------") - - message(${_mode} ${MSG}) -endmacro() - -# Macro to print error messages. - -macro(print_error message) - set(options ) - set(oneValueArgs MODE) - set(multiValueArgs ) - - # parse inputs and create variables print_warning_ - cmake_parse_arguments(print_error "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) - - if(print_error_MODE) - set(_mode ${print_error_MODE}) - else() - set(_mode FATAL_ERROR) - endif() - - set(MSG - "************************************************************************\n" - "ERROR: ${message}\n" - "************************************************************************") - - message(${_mode} ${MSG}) -endmacro() - # Returns an unquoted string. Note that CMake will readily turn such # strings back into lists, due to the duality of lists and # semicolon-separated strings. So be careful how you use it. diff --git a/cmake/macros/SundialsOption.cmake b/cmake/macros/SundialsOption.cmake index 252db50f69..e80ed5aac8 100644 --- a/cmake/macros/SundialsOption.cmake +++ b/cmake/macros/SundialsOption.cmake @@ -74,15 +74,14 @@ macro(sundials_option NAME TYPE DOCSTR DEFAULT_VALUE) # dependencies were previously met but are no longer satisfied if(DEFINED ${NAME}) string(CONCAT _warn_msg_string - "The variable ${NAME} was set to ${${NAME}} " - "but not all of its dependencies " - "(${depends_on_dependencies_not_met}) evaluate to TRUE." - ) + "The variable ${NAME} was set to ${${NAME}} but not all of its " + "dependencies (${depends_on_dependencies_not_met}) evaluate to TRUE. " + "Unsetting ${NAME}.") unset(${NAME} CACHE) if(sundials_option_DEPENDS_ON_THROW_ERROR) - print_error("${_warn_msg_string}" "Unsetting ${NAME}") + message(FATAL_ERROR "${_warn_msg_string}") else() - print_warning("${_warn_msg_string}" "Unsetting ${NAME}") + message(WARNING "${_warn_msg_string}") endif() endif() @@ -93,7 +92,7 @@ macro(sundials_option NAME TYPE DOCSTR DEFAULT_VALUE) foreach(_option ${${NAME}}) if(NOT (${_option} IN_LIST sundials_option_OPTIONS)) list(JOIN sundials_option_OPTIONS ", " _options_msg) - print_error("Value of ${NAME} must be one of ${_options_msg}") + message(FATAL_ERROR "Value of ${NAME} must be one of ${_options_msg}") endif() endforeach() get_property(is_in_cache CACHE ${NAME} PROPERTY TYPE) diff --git a/cmake/tpl/FindSUPERLUMT.cmake b/cmake/tpl/FindSUPERLUMT.cmake index 8c9cbbfb73..4e1acb30b0 100644 --- a/cmake/tpl/FindSUPERLUMT.cmake +++ b/cmake/tpl/FindSUPERLUMT.cmake @@ -38,7 +38,8 @@ force_variable(SUPERLUMT_THREAD_TYPE STRING "SuperLU_MT threading type: OPENMP o if(SUPERLUMT_THREAD_TYPE AND NOT SUPERLUMT_THREAD_TYPE STREQUAL "OPENMP" AND NOT SUPERLUMT_THREAD_TYPE STREQUAL "PTHREAD") - print_error("Unknown thread type: ${SUPERLUMT_THREAD_TYPE}" "Please enter PTHREAD or OPENMP") + message(FATAL_ERROR "Unknown thread type: ${SUPERLUMT_THREAD_TYPE} " + "Please enter PTHREAD or OPENMP") endif() # check if the threading library has been found @@ -51,7 +52,7 @@ if(SUPERLUMT_THREAD_TYPE STREQUAL "PTHREAD") message(STATUS "Using Pthreads") else() set(PTHREADS_FOUND FALSE) - print_error("Could not determine Pthreads compiler flags") + message(FATAL_ERROR "Could not determine Pthreads compiler flags") endif() endif() else(SUPERLUMT_THREAD_TYPE STREQUAL "OPENMP") diff --git a/cmake/tpl/SundialsAdiak.cmake b/cmake/tpl/SundialsAdiak.cmake index a2b48cf1b9..b5342a7216 100644 --- a/cmake/tpl/SundialsAdiak.cmake +++ b/cmake/tpl/SundialsAdiak.cmake @@ -83,7 +83,7 @@ if(adiak_FOUND AND (NOT adiak_WORKS)) message(STATUS "Checking if adiak works with SUNDIALS... FAILED") message(STATUS "Check output: ") message("${COMPILE_OUTPUT}") - print_error("SUNDIALS interface to adiak is not functional.") + message(FATAL_ERROR "SUNDIALS interface to adiak is not functional.") endif() elseif(adiak_FOUND AND adiak_WORKS) diff --git a/cmake/tpl/SundialsCaliper.cmake b/cmake/tpl/SundialsCaliper.cmake index 8a665a3f05..998376266d 100644 --- a/cmake/tpl/SundialsCaliper.cmake +++ b/cmake/tpl/SundialsCaliper.cmake @@ -102,7 +102,7 @@ if(CALIPER_FOUND AND (NOT CALIPER_WORKS)) message(STATUS "Checking if CALIPER works with SUNDIALS... FAILED") message(STATUS "Check output: ") message("${COMPILE_OUTPUT}") - print_error("SUNDIALS interface to CALIPER is not functional.") + message(FATAL_ERROR "SUNDIALS interface to CALIPER is not functional.") endif() elseif(CALIPER_FOUND AND CALIPER_WORKS) diff --git a/cmake/tpl/SundialsGinkgo.cmake b/cmake/tpl/SundialsGinkgo.cmake index 595c96aa5a..37dd821a2b 100644 --- a/cmake/tpl/SundialsGinkgo.cmake +++ b/cmake/tpl/SundialsGinkgo.cmake @@ -55,23 +55,23 @@ message(STATUS "GINKGO CXX FLAGS: ${GINKGO_INTERFACE_CXX_FLAGS}") # ----------------------------------------------------------------------------- if(Ginkgo_FOUND AND (NOT GINKGO_WORKS)) if(SUNDIALS_PRECISION MATCHES "extended|EXTENDED") - print_error("SUNDIALS GINKGO interface is not compatible with extended precision") + message(FATAL_ERROR "SUNDIALS GINKGO interface is not compatible with extended precision") endif() if(SUNDIALS_GINKGO_BACKENDS MATCHES "CUDA" AND NOT ENABLE_CUDA) - print_error("SUNDIALS_GINKGO_BACKENDS includes CUDA but CUDA is not enabled. Set ENABLE_CUDA=ON or change the backend.") + message(FATAL_ERROR "SUNDIALS_GINKGO_BACKENDS includes CUDA but CUDA is not enabled. Set ENABLE_CUDA=ON or change the backend.") endif() if(SUNDIALS_GINKGO_BACKENDS MATCHES "HIP" AND NOT ENABLE_HIP) - print_error("SUNDIALS_GINKGO_BACKENDS includes HIP but HIP is not enabled. Set ENABLE_HIP=ON or change the backend.") + message(FATAL_ERROR "SUNDIALS_GINKGO_BACKENDS includes HIP but HIP is not enabled. Set ENABLE_HIP=ON or change the backend.") endif() if(SUNDIALS_GINKGO_BACKENDS MATCHES "SYCL" AND NOT ENABLE_SYCL) - print_error("SUNDIALS_GINKGO_BACKENDS includes SYCL but SYCL is not enabled. Set ENABLE_SYCL=ON or change the backend.") + message(FATAL_ERROR "SUNDIALS_GINKGO_BACKENDS includes SYCL but SYCL is not enabled. Set ENABLE_SYCL=ON or change the backend.") endif() if(SUNDIALS_GINKGO_BACKENDS MATCHES "OMP" AND NOT ENABLE_OPENMP) - print_error("SUNDIALS_GINKGO_BACKENDS includes OMP but OpenMP is not enabled. Set ENABLE_OPENMP=ON or change the backend.") + message(FATAL_ERROR "SUNDIALS_GINKGO_BACKENDS includes OMP but OpenMP is not enabled. Set ENABLE_OPENMP=ON or change the backend.") endif() message(STATUS "Checking if GINKGO works... OK") diff --git a/cmake/tpl/SundialsHypre.cmake b/cmake/tpl/SundialsHypre.cmake index 56a6158165..ea27e32973 100644 --- a/cmake/tpl/SundialsHypre.cmake +++ b/cmake/tpl/SundialsHypre.cmake @@ -39,7 +39,7 @@ endif() if(ENABLE_HYPRE) # Using hypre requres building with MPI enabled if(NOT ENABLE_MPI) - print_error("MPI is required for hypre support. Set ENABLE_MPI to ON.") + message(FATAL_ERROR "MPI is required for hypre support. Set ENABLE_MPI to ON.") endif() # Using hypre requres C99 or newer if(CMAKE_C_STANDARD STREQUAL "90") @@ -112,7 +112,7 @@ if(HYPRE_FOUND AND (NOT HYPRE_WORKS)) message(STATUS "Checking if HYPRE works... FAILED") message(STATUS "Check output: ") message("${COMPILE_OUTPUT}") - print_error("SUNDIALS interface to HYPRE is not functional.") + message(FATAL_ERROR "SUNDIALS interface to HYPRE is not functional.") endif() elseif(HYPRE_FOUND AND HYPRE_WORKS) diff --git a/cmake/tpl/SundialsKLU.cmake b/cmake/tpl/SundialsKLU.cmake index aa71405687..f3c006608c 100644 --- a/cmake/tpl/SundialsKLU.cmake +++ b/cmake/tpl/SundialsKLU.cmake @@ -38,7 +38,7 @@ endif() # KLU does not support single or extended precision if(SUNDIALS_PRECISION MATCHES "SINGLE" OR SUNDIALS_PRECISION MATCHES "EXTENDED") - print_error("KLU is not compatible with ${SUNDIALS_PRECISION} precision") + message(FATAL_ERROR "KLU is not compatible with ${SUNDIALS_PRECISION} precision") endif() # ----------------------------------------------------------------------------- @@ -69,7 +69,7 @@ if(KLU_FOUND AND (NOT KLU_WORKS)) set(CMAKE_REQUIRED_INCLUDES ${save_CMAKE_REQUIRED_INCLUDES}) message(STATUS "Size of SuiteSparse_long is ${SIZEOF_SUITESPARSE_LONG}") if(NOT SIZEOF_SUITESPARSE_LONG EQUAL "8") - print_error("Size of 'sunindextype' is 8 but size of 'SuiteSparse_long' is ${SIZEOF_SUITESPARSE_LONG}. KLU cannot be used.") + message(FATAL_ERROR "Size of 'sunindextype' is 8 but size of 'SuiteSparse_long' is ${SIZEOF_SUITESPARSE_LONG}. KLU cannot be used.") endif() endif() @@ -119,7 +119,7 @@ if(KLU_FOUND AND (NOT KLU_WORKS)) message(STATUS "Checking if KLU works... FAILED") message(STATUS "Check output: ") message("${COMPILE_OUTPUT}") - print_error("SUNDIALS interface to KLU is not functional.") + message(FATAL_ERROR "SUNDIALS interface to KLU is not functional.") endif() elseif(KLU_FOUND AND KLU_WORKS) diff --git a/cmake/tpl/SundialsLapack.cmake b/cmake/tpl/SundialsLapack.cmake index 63b8514520..0047d6afd5 100644 --- a/cmake/tpl/SundialsLapack.cmake +++ b/cmake/tpl/SundialsLapack.cmake @@ -355,7 +355,7 @@ if(LAPACK_LIBRARIES AND (NOT LAPACK_WORKS)) message(STATUS "Checking if LAPACK works with SUNDIALS... FAILED") message(STATUS "Check output: ") message("${COMPILE_OUTPUT}") - print_error("SUNDIALS interface to LAPACK is not functional.") + message(FATAL_ERROR "SUNDIALS interface to LAPACK is not functional.") endif() elseif(LAPACK_LIBRARIES AND LAPACK_WORKS) diff --git a/cmake/tpl/SundialsMAGMA.cmake b/cmake/tpl/SundialsMAGMA.cmake index e821506c86..6b11a92b8d 100644 --- a/cmake/tpl/SundialsMAGMA.cmake +++ b/cmake/tpl/SundialsMAGMA.cmake @@ -37,7 +37,7 @@ endif() # ----------------------------------------------------------------------------- if(SUNDIALS_PRECISION MATCHES "extended") - print_error("SUNDIALS MAGMA interface is not compatible with extended precision") + message(FATAL_ERROR "SUNDIALS MAGMA interface is not compatible with extended precision") endif() # ----------------------------------------------------------------------------- @@ -57,10 +57,10 @@ message(STATUS "SUNDIALS_MAGMA_BACKENDS: ${SUNDIALS_MAGMA_BACKENDS}") if(MAGMA_FOUND AND (NOT MAGMA_WORKS)) if(SUNDIALS_MAGMA_BACKENDS MATCHES "CUDA" AND NOT ENABLE_CUDA) - print_error("SUNDIALS_MAGMA_BACKENDS includes CUDA but CUDA is not enabled. Set ENABLE_CUDA=ON or change the backend.") + message(FATAL_ERROR "SUNDIALS_MAGMA_BACKENDS includes CUDA but CUDA is not enabled. Set ENABLE_CUDA=ON or change the backend.") endif() if(SUNDIALS_MAGMA_BACKENDS MATCHES "HIP" AND NOT ENABLE_HIP) - print_error("SUNDIALS_MAGMA_BACKENDS includes HIP but HIP is not enabled. Set ENABLE_HIP=ON or change the backend.") + message(FATAL_ERROR "SUNDIALS_MAGMA_BACKENDS includes HIP but HIP is not enabled. Set ENABLE_HIP=ON or change the backend.") endif() set(MAGMA_WORKS TRUE CACHE BOOL "MAGMA works with SUNDIALS as configured" FORCE) diff --git a/cmake/tpl/SundialsOpenMP.cmake b/cmake/tpl/SundialsOpenMP.cmake index aff73d42c4..d845a27888 100644 --- a/cmake/tpl/SundialsOpenMP.cmake +++ b/cmake/tpl/SundialsOpenMP.cmake @@ -69,7 +69,7 @@ if(OPENMP_FOUND AND (ENABLE_OPENMP_DEVICE OR SUPERLUDIST_OpenMP)) # The user has asked for checks to be skipped, assume offloading is supported set(OPENMP45_FOUND TRUE) set(OPENMP_SUPPORTS_DEVICE_OFFLOADING TRUE) - print_warning("Skipping OpenMP device/version check." "SUNDIALS OpenMP functionality dependent on OpenMP 4.5+ is not guaranteed.") + message(WARNING "Skipping OpenMP device/version check." "SUNDIALS OpenMP functionality dependent on OpenMP 4.5+ is not guaranteed.") else() @@ -84,9 +84,9 @@ if(OPENMP_FOUND AND (ENABLE_OPENMP_DEVICE OR SUPERLUDIST_OpenMP)) message(STATUS "Checking whether OpenMP supports device offloading -- no") set(OPENMP45_FOUND FALSE) set(OPENMP_SUPPORTS_DEVICE_OFFLOADING FALSE) - print_error("The found OpenMP version does not support device offloading.") + message(FATAL_ERROR "The found OpenMP version does not support device offloading.") endif() endif() -endif() \ No newline at end of file +endif() diff --git a/cmake/tpl/SundialsPETSC.cmake b/cmake/tpl/SundialsPETSC.cmake index 1dc42ce873..dddcf47180 100644 --- a/cmake/tpl/SundialsPETSC.cmake +++ b/cmake/tpl/SundialsPETSC.cmake @@ -38,11 +38,11 @@ endif() # Using PETSc requires building with MPI enabled if(ENABLE_PETSC AND NOT ENABLE_MPI) - print_error("MPI is required for PETSc support. Set ENABLE_MPI to ON.") + message(FATAL_ERROR "MPI is required for PETSc support. Set ENABLE_MPI to ON.") endif() if(SUNDIALS_PRECISION MATCHES "EXTENDED") - print_error("SUNDIALS is not compatible with PETSc when using ${SUNDIALS_PRECISION} precision") + message(FATAL_ERROR "SUNDIALS is not compatible with PETSc when using ${SUNDIALS_PRECISION} precision") endif() # ----------------------------------------------------------------------------- @@ -68,22 +68,22 @@ if(PETSC_FOUND AND (NOT PETSC_WORKS)) if(NOT ("${SUNDIALS_INDEX_SIZE}" MATCHES "${PETSC_INDEX_SIZE}")) string(CONCAT _err_msg_string - "PETSc not functional due to index size mismatch:\n" - "SUNDIALS_INDEX_SIZE=${SUNDIALS_INDEX_SIZE}, " - "but PETSc was built with ${PETSC_INDEX_SIZE}-bit indices\n" - "PETSC_DIR: ${PETSC_DIR}\n") - print_error("${_err_msg_string}") + "PETSc not functional due to index size mismatch:\n" + "SUNDIALS_INDEX_SIZE=${SUNDIALS_INDEX_SIZE}, " + "but PETSc was built with ${PETSC_INDEX_SIZE}-bit indices\n" + "PETSC_DIR: ${PETSC_DIR}\n") + message(FATAL_ERROR "${_err_msg_string}") endif() string(TOUPPER "${PETSC_PRECISION}" _petsc_precision) string(TOUPPER "${SUNDIALS_PRECISION}" _sundials_precision) if(NOT ("${_sundials_precision}" MATCHES "${_petsc_precision}")) string(CONCAT _err_msg_string - "PETSc not functional due to real type precision mismatch:\n" - "SUNDIALS_PRECISION=${_sundials_precision}, " - "but PETSc was built with ${_petsc_precision} precision\n" - "PETSC_DIR: ${PETSC_DIR}\n") - print_error("${_err_msg_string}") + "PETSc not functional due to real type precision mismatch:\n" + "SUNDIALS_PRECISION=${_sundials_precision}, " + "but PETSc was built with ${_petsc_precision} precision\n" + "PETSC_DIR: ${PETSC_DIR}\n") + message(FATAL_ERROR "${_err_msg_string}") endif() set(PETSC_WORKS TRUE CACHE BOOL "PETSC works with SUNDIALS as configured" FORCE) diff --git a/cmake/tpl/SundialsPthread.cmake b/cmake/tpl/SundialsPthread.cmake index c520064d7b..f19b07c09c 100644 --- a/cmake/tpl/SundialsPthread.cmake +++ b/cmake/tpl/SundialsPthread.cmake @@ -53,5 +53,5 @@ if(CMAKE_USE_PTHREADS_INIT) else() set(PTHREADS_FOUND FALSE) message(STATUS "Checking if Pthreads is available... FAILED") - print_error("Could not determine Pthreads compiler flags") -endif() \ No newline at end of file + message(FATAL_ERROR "Could not determine Pthreads compiler flags") +endif() diff --git a/cmake/tpl/SundialsRAJA.cmake b/cmake/tpl/SundialsRAJA.cmake index 6f7ad51809..1084eb9f6f 100644 --- a/cmake/tpl/SundialsRAJA.cmake +++ b/cmake/tpl/SundialsRAJA.cmake @@ -95,24 +95,24 @@ endif() # ----------------------------------------------------------------------------- if((SUNDIALS_RAJA_BACKENDS MATCHES "CUDA") AND - (NOT RAJA_BACKENDS MATCHES "CUDA")) - print_error("Requested that SUNDIALS uses the CUDA RAJA backend, but RAJA was not built with the CUDA backend.") + (NOT RAJA_BACKENDS MATCHES "CUDA")) + message(FATAL_ERROR "Requested that SUNDIALS uses the CUDA RAJA backend, but RAJA was not built with the CUDA backend.") endif() if((SUNDIALS_RAJA_BACKENDS MATCHES "HIP") AND - (NOT RAJA_BACKENDS MATCHES "HIP")) - print_error("Requested that SUNDIALS uses the HIP RAJA backend, but RAJA was not built with the HIP backend.") + (NOT RAJA_BACKENDS MATCHES "HIP")) + message(FATAL_ERROR "Requested that SUNDIALS uses the HIP RAJA backend, but RAJA was not built with the HIP backend.") endif() if(NOT ENABLE_OPENMP AND RAJA_BACKENDS MATCHES "OPENMP") - print_error("RAJA was built with OpenMP, but OpenMP is not enabled. Set ENABLE_OPENMP to ON.") + message(FATAL_ERROR "RAJA was built with OpenMP, but OpenMP is not enabled. Set ENABLE_OPENMP to ON.") endif() if(NOT ENABLE_OPENMP_DEVICE AND RAJA_BACKENDS MATCHES "TARGET_OPENMP") - print_error("RAJA was built with OpenMP device offloading, but OpenMP with device offloading is not enabled. Set ENABLE_OPENMP_DEVICE to ON.") + message(FATAL_ERROR "RAJA was built with OpenMP device offloading, but OpenMP with device offloading is not enabled. Set ENABLE_OPENMP_DEVICE to ON.") endif() if((SUNDIALS_RAJA_BACKENDS MATCHES "SYCL") AND (NOT RAJA_BACKENDS MATCHES "SYCL")) - print_error("Requested that SUNDIALS uses the SYCL RAJA backend, but RAJA was not built with the SYCL backend.") + message(FATAL_ERROR "Requested that SUNDIALS uses the SYCL RAJA backend, but RAJA was not built with the SYCL backend.") endif() diff --git a/cmake/tpl/SundialsSuperLUDIST.cmake b/cmake/tpl/SundialsSuperLUDIST.cmake index c5699a87d5..2dfc84a565 100644 --- a/cmake/tpl/SundialsSuperLUDIST.cmake +++ b/cmake/tpl/SundialsSuperLUDIST.cmake @@ -38,17 +38,17 @@ endif() # SuperLU_DIST only supports double precision if(SUNDIALS_PRECISION MATCHES "SINGLE" OR SUNDIALS_PRECISION MATCHES "EXTENDED") - print_error("SuperLU_DIST is not compatible with ${SUNDIALS_PRECISION} precision") + message(FATAL_ERROR "SuperLU_DIST is not compatible with ${SUNDIALS_PRECISION} precision") endif() # Using SUPERLUDIST requires building with MPI enabled if(ENABLE_SUPERLUDIST AND NOT ENABLE_MPI) - print_error("MPI is required for SuperLU DIST support. Set ENABLE_MPI to ON.") + message(FATAL_ERROR "MPI is required for SuperLU DIST support. Set ENABLE_MPI to ON.") endif() # Using SUPERLUDIST with OpenMP requires building with OpenMP enabled if(ENABLE_SUPERLUDIST AND SUPERLUDIST_OpenMP AND NOT ENABLE_OPENMP) - print_error("OpenMP is required for SuperLU DIST support. Set ENABLE_OPENMP to ON.") + message(FATAL_ERROR "OpenMP is required for SuperLU DIST support. Set ENABLE_OPENMP to ON.") endif() # ----------------------------------------------------------------------------- diff --git a/cmake/tpl/SundialsSuperLUMT.cmake b/cmake/tpl/SundialsSuperLUMT.cmake index 4691e94544..a7b6bf863a 100644 --- a/cmake/tpl/SundialsSuperLUMT.cmake +++ b/cmake/tpl/SundialsSuperLUMT.cmake @@ -38,7 +38,7 @@ endif() # SUPERLUMT does not support extended precision if(SUNDIALS_PRECISION MATCHES "EXTENDED") - print_error("SUPERLUMT is not compatible with ${SUNDIALS_PRECISION} precision") + message(FATAL_ERROR "SUPERLUMT is not compatible with ${SUNDIALS_PRECISION} precision") endif() # ----------------------------------------------------------------------------- @@ -106,7 +106,7 @@ if(SUPERLUMT_FOUND AND (NOT SUPERLUMT_WORKS)) message(STATUS "Checking if SuperLU_MT works with SUNDIALS... FAILED") message(STATUS "Check output: ") message("${COMPILE_OUTPUT}") - print_error("SUNDIALS interface to SuperLU_MT is not functional.") + message(FATAL_ERROR "SUNDIALS interface to SuperLU_MT is not functional.") endif() elseif(SUPERLUMT_FOUND AND SUPERLUMT_WORKS) diff --git a/cmake/tpl/SundialsTPL.cmake.template b/cmake/tpl/SundialsTPL.cmake.template index c1036a5e01..e95255b18e 100644 --- a/cmake/tpl/SundialsTPL.cmake.template +++ b/cmake/tpl/SundialsTPL.cmake.template @@ -75,7 +75,7 @@ if(_FOUND AND (NOT _WORKS)) message(STATUS "Checking if works with SUNDIALS... FAILED") message(STATUS "Check output: ") message("${COMPILE_OUTPUT}") - print_error("SUNDIALS interface to is not functional.") + message(FATAL_ERROR "SUNDIALS interface to is not functional.") endif() elseif(_FOUND AND _WORKS) diff --git a/cmake/tpl/SundialsTrilinos.cmake b/cmake/tpl/SundialsTrilinos.cmake index 4b8e7e4593..849eef6319 100644 --- a/cmake/tpl/SundialsTrilinos.cmake +++ b/cmake/tpl/SundialsTrilinos.cmake @@ -143,7 +143,7 @@ if(Trilinos_FOUND AND (NOT Trilinos_WORKS)) message(STATUS "Checking if Trilinos works with SUNDIALS... FAILED") message(STATUS "Check output: ") message("${COMPILE_OUTPUT}") - print_error("SUNDIALS interface to Trilinos is not functional.") + message(FATAL_ERROR "SUNDIALS interface to Trilinos is not functional.") endif() elseif(Trilinos_FOUND AND Trilinos_WORKS) From 9e7a8ff31c2a14b8d020ee78151514872881c1b7 Mon Sep 17 00:00:00 2001 From: Steven Roberts Date: Fri, 21 Jun 2024 23:42:09 -0700 Subject: [PATCH 2/8] Docs: Add missing pointers to KINSOL getter docs (#522) --- doc/kinsol/guide/source/Usage/index.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/kinsol/guide/source/Usage/index.rst b/doc/kinsol/guide/source/Usage/index.rst index 17d89da5c3..dd1bcacec1 100644 --- a/doc/kinsol/guide/source/Usage/index.rst +++ b/doc/kinsol/guide/source/Usage/index.rst @@ -1327,7 +1327,7 @@ different quantities that may be of interest to the user, such as solver workspace requirements and solver performance statistics. These optional output functions are described next. -.. c:function:: int KINGetWorkSpace(void * kin_mem, long int lenrw, long int leniw) +.. c:function:: int KINGetWorkSpace(void * kin_mem, long int * lenrw, long int * leniw) The function :c:func:`KINGetWorkSpace` returns the KINSOL integer and real workspace sizes. @@ -1352,7 +1352,7 @@ functions are described next. :math:`22 + 5 N` (increased by :math:`N` if constraint checking is enabled). -.. c:function:: int KINGetNumFuncEvals(void * kin_mem, long int nfevals) +.. c:function:: int KINGetNumFuncEvals(void * kin_mem, long int * nfevals) The function :c:func:`KINGetNumFuncEvals` returns the number of evaluations of the system function. @@ -1366,7 +1366,7 @@ functions are described next. * ``KIN_MEM_NULL`` -- The ``kin_mem`` pointer is ``NULL``. -.. c:function:: int KINGetNumNonlinSolvIters(void * kin_mem, long int nniters) +.. c:function:: int KINGetNumNonlinSolvIters(void * kin_mem, long int * nniters) The function :c:func:`KINGetNumNonlinSolvIters` returns the number of nonlinear iterations. @@ -1380,7 +1380,7 @@ functions are described next. * ``KIN_MEM_NULL`` -- The ``kin_mem`` pointer is ``NULL``. -.. c:function:: int KINGetNumBetaCondFails(void * kin_mem, long int nbcfails) +.. c:function:: int KINGetNumBetaCondFails(void * kin_mem, long int * nbcfails) The function :c:func:`KINGetNumBetaCondFails` returns the number of :math:`\beta`-condition failures. @@ -1394,7 +1394,7 @@ functions are described next. * ``KIN_MEM_NULL`` -- The ``kin_mem`` pointer is ``NULL``. -.. c:function:: int KINGetNumBacktrackOps(void * kin_mem, long int nbacktr) +.. c:function:: int KINGetNumBacktrackOps(void * kin_mem, long int * nbacktr) The function :c:func:`KINGetNumBacktrackOps` returns the number of backtrack operations (step length adjustments) performed by the line search algorithm. @@ -1408,7 +1408,7 @@ functions are described next. * ``KIN_MEM_NULL`` -- The ``kin_mem`` pointer is ``NULL``. -.. c:function:: int KINGetFuncNorm(void * kin_mem, sunrealtype fnorm) +.. c:function:: int KINGetFuncNorm(void * kin_mem, sunrealtype * fnorm) The function :c:func:`KINGetFuncNorm` returns the scaled Euclidean :math:`\ell_2` norm of the nonlinear system function :math:`F(u)` evaluated @@ -1423,7 +1423,7 @@ functions are described next. * ``KIN_MEM_NULL`` -- The ``kin_mem`` pointer is ``NULL``. -.. c:function:: int KINGetStepLength(void * kin_mem, sunrealtype steplength) +.. c:function:: int KINGetStepLength(void * kin_mem, sunrealtype * steplength) The function :c:func:`KINGetStepLength` returns the scaled Euclidean :math:`\ell_2` norm of the step used during the previous iteration. From c3c8490afaeb181c8f1488f026f60492d512d7f6 Mon Sep 17 00:00:00 2001 From: David Gardner Date: Mon, 24 Jun 2024 09:33:46 -0700 Subject: [PATCH 3/8] Maintenance: update change log (#518) Update change logs for next development cycle --- CHANGELOG.md | 10 +++ doc/shared/Changelog.rst | 139 ++++++++++++++++++++++++++++++++++- doc/shared/RecentChanges.rst | 126 ------------------------------- doc/shared/sundials_vars.py | 2 +- scripts/startReleaseCycle.sh | 2 +- 5 files changed, 150 insertions(+), 129 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 059d8d3666..31c94ba897 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # SUNDIALS Changelog +## Changes to SUNDIALS in release X.Y.Z + +### Major Features + +### New Features and Enhancements + +### Bug Fixes + +### Deprecation Notices + ## Changes to SUNDIALS in release 7.1.0 ### Major Features diff --git a/doc/shared/Changelog.rst b/doc/shared/Changelog.rst index 4fa304b8ac..4c3126e836 100644 --- a/doc/shared/Changelog.rst +++ b/doc/shared/Changelog.rst @@ -21,11 +21,148 @@ Changelog .. SED_REPLACEMENT_KEY -Changes to SUNDIALS in release 7.1.0 +Changes to SUNDIALS in release X.Y.Z ==================================== .. include:: RecentChanges_link.rst +Changes to SUNDIALS in release 7.1.0 +==================================== + +**Major Features** + +Created shared user interface functions for ARKODE to allow more uniform control +over time-stepping algorithms, improved extensibility, and simplified code +maintenance. The corresponding stepper-specific user-callable functions are now +deprecated and will be removed in a future major release. + +Added CMake infrastructure that enables externally maintained addons/plugins to +be *optionally* built with SUNDIALS. See :ref:`Contributing` for details. + +**New Features and Enhancements** + +Added support for Kokkos Kernels v4. + +Added the following Runge-Kutta Butcher tables + +* ``ARKODE_FORWARD_EULER_1_1`` +* ``ARKODE_RALSTON_EULER_2_1_2`` +* ``ARKODE_EXPLICIT_MIDPOINT_EULER_2_1_2`` +* ``ARKODE_BACKWARD_EULER_1_1`` +* ``ARKODE_IMPLICIT_MIDPOINT_1_2`` +* ``ARKODE_IMPLICIT_TRAPEZOIDAL_2_2`` + +Added the following MRI coupling tables + +* ``ARKODE_MRI_GARK_FORWARD_EULER`` +* ``ARKODE_MRI_GARK_RALSTON2`` +* ``ARKODE_MRI_GARK_RALSTON3`` +* ``ARKODE_MRI_GARK_BACKWARD_EULER`` +* ``ARKODE_MRI_GARK_IMPLICIT_MIDPOINT`` +* ``ARKODE_IMEX_MRI_GARK_EULER`` +* ``ARKODE_IMEX_MRI_GARK_TRAPEZOIDAL`` +* ``ARKODE_IMEX_MRI_GARK_MIDPOINT`` + +Added :c:func:`ARKodeButcherTable_ERKIDToName` and +:c:func:`ARKodeButcherTable_DIRKIDToName` to convert a Butcher table ID to a +string representation. + +Added the function :c:func:`ARKodeSetAutonomous` in ARKODE to indicate that the +implicit right-hand side function does not explicitly depend on time. When using +the trivial predictor, an autonomous problem may reuse implicit function +evaluations across stage solves to reduce the total number of function +evaluations. + +Users may now disable interpolated output in ARKODE by passing +``ARK_INTERP_NONE`` to :c:func:`ARKodeSetInterpolantType`. When interpolation is +disabled, rootfinding is not supported, implicit methods must use the trivial +predictor (the default option), and interpolation at stop times cannot be used +(interpolating at stop times is disabled by default). With interpolation +disabled, calling :c:func:`ARKodeEvolve` in ``ARK_NORMAL`` mode will return at +or past the requested output time (setting a stop time may still be used to halt +the integrator at a specific time). Disabling interpolation will reduce the +memory footprint of an integrator by two or more state vectors (depending on the +interpolant type and degree) which can be beneficial when interpolation is not +needed e.g., when integrating to a final time without output in between or using +an explicit fast time scale integrator with an MRI method. + +Added "Resize" capability to ARKODE's SPRKStep time-stepping module. + +Enabled the Fortran interfaces to build with 32-bit ``sunindextype``. + +**Bug Fixes** + +Updated the CMake variable ``HIP_PLATFORM`` default to ``amd`` as the previous +default, ``hcc``, is no longer recognized in ROCm 5.7.0 or newer. The new +default is also valid in older version of ROCm (at least back to version 4.3.1). + +Renamed the DPCPP value for the :cmakeop:`SUNDIALS_GINKGO_BACKENDS` CMake option +to ``SYCL`` to match Ginkgo's updated naming convention. + +Changed the CMake version compatibility mode for SUNDIALS to ``AnyNewerVersion`` +instead of ``SameMajorVersion``. This fixes the issue seen `here +`_. + +Fixed a CMake bug that caused an MPI linking error for our C++ examples in some +instances. Fixes `GitHub Issue #464 +`_. + +Fixed the runtime library installation path for windows systems. This fix +changes the default library installation path from +``CMAKE_INSTALL_PREFIX/CMAKE_INSTALL_LIBDIR`` to +``CMAKE_INSTALL_PREFIX/CMAKE_INSTALL_BINDIR``. + +Fixed conflicting ``.lib`` files between shared and static libs when using +``MSVC`` on Windows + +Fixed invalid ``SUNDIALS_EXPORT`` generated macro when building both shared and +static libs. + +Fixed a bug in some Fortran examples where ``c_null_ptr`` was passed as an +argument to a function pointer instead of ``c_null_funptr``. This caused +compilation issues with the Cray Fortran compiler. + +Fixed a bug in the HIP execution policies where ``WARP_SIZE`` would not be set +with ROCm 6.0.0 or newer. + +Fixed a bug that caused error messages to be cut off in some cases. Fixes +`GitHub Issue #461 `_. + +Fixed a memory leak when an error handler was added to a +:c:type:`SUNContext`. Fixes `GitHub Issue #466 +`_. + +Fixed a bug where :c:func:`MRIStepEvolve` would not handle a recoverable error +produced from evolving the inner stepper. + +Added missing ``SetRootDirection`` and ``SetNoInactiveRootWarn`` functions to +ARKODE's SPRKStep time-stepping module. + +Fixed a bug in :c:func:`ARKodeSPRKTable_Create` where the coefficient arrays +were not allocated. + +Fix bug on LLP64 platforms (like Windows 64-bit) where ``KLU_INDEXTYPE`` could be +32 bits wide even if ``SUNDIALS_INT64_T`` is defined. + +Check if size of ``SuiteSparse_long`` is 8 if the size of ``sunindextype`` is 8 +when using KLU. + +Fixed several build errors with the Fortran interfaces on Windows systems. + +**Deprecation Notices** + +Numerous ARKODE stepper-specific functions are now deprecated in favor of +ARKODE-wide functions. + +Deprecated the `ARKStepSetOptimalParams` function. Since this function does not have an +ARKODE-wide equivalent, instructions have been added to the user guide for how +to retain the current functionality using other user-callable functions. + +The unsupported implementations of ``N_VGetArrayPointer`` and +``N_VSetArrayPointer`` for the *hypre* and PETSc vectors are now deprecated. +Users should access the underlying wrapped external library vector objects +instead with ``N_VGetVector_ParHyp`` and ``N_VGetVector_Petsc``, respectively. + Changes to SUNDIALS in release 7.0.0 ==================================== diff --git a/doc/shared/RecentChanges.rst b/doc/shared/RecentChanges.rst index adc2691e7b..4f1514700e 100644 --- a/doc/shared/RecentChanges.rst +++ b/doc/shared/RecentChanges.rst @@ -1,133 +1,7 @@ **Major Features** -Created shared user interface functions for ARKODE to allow more uniform control -over time-stepping algorithms, improved extensibility, and simplified code -maintenance. The corresponding stepper-specific user-callable functions are now -deprecated and will be removed in a future major release. - -Added CMake infrastructure that enables externally maintained addons/plugins to -be *optionally* built with SUNDIALS. See :ref:`Contributing` for details. - **New Features and Enhancements** -Added support for Kokkos Kernels v4. - -Added the following Runge-Kutta Butcher tables - -* ``ARKODE_FORWARD_EULER_1_1`` -* ``ARKODE_RALSTON_EULER_2_1_2`` -* ``ARKODE_EXPLICIT_MIDPOINT_EULER_2_1_2`` -* ``ARKODE_BACKWARD_EULER_1_1`` -* ``ARKODE_IMPLICIT_MIDPOINT_1_2`` -* ``ARKODE_IMPLICIT_TRAPEZOIDAL_2_2`` - -Added the following MRI coupling tables - -* ``ARKODE_MRI_GARK_FORWARD_EULER`` -* ``ARKODE_MRI_GARK_RALSTON2`` -* ``ARKODE_MRI_GARK_RALSTON3`` -* ``ARKODE_MRI_GARK_BACKWARD_EULER`` -* ``ARKODE_MRI_GARK_IMPLICIT_MIDPOINT`` -* ``ARKODE_IMEX_MRI_GARK_EULER`` -* ``ARKODE_IMEX_MRI_GARK_TRAPEZOIDAL`` -* ``ARKODE_IMEX_MRI_GARK_MIDPOINT`` - -Added :c:func:`ARKodeButcherTable_ERKIDToName` and -:c:func:`ARKodeButcherTable_DIRKIDToName` to convert a Butcher table ID to a -string representation. - -Added the function :c:func:`ARKodeSetAutonomous` in ARKODE to indicate that the -implicit right-hand side function does not explicitly depend on time. When using -the trivial predictor, an autonomous problem may reuse implicit function -evaluations across stage solves to reduce the total number of function -evaluations. - -Users may now disable interpolated output in ARKODE by passing -``ARK_INTERP_NONE`` to :c:func:`ARKodeSetInterpolantType`. When interpolation is -disabled, rootfinding is not supported, implicit methods must use the trivial -predictor (the default option), and interpolation at stop times cannot be used -(interpolating at stop times is disabled by default). With interpolation -disabled, calling :c:func:`ARKodeEvolve` in ``ARK_NORMAL`` mode will return at -or past the requested output time (setting a stop time may still be used to halt -the integrator at a specific time). Disabling interpolation will reduce the -memory footprint of an integrator by two or more state vectors (depending on the -interpolant type and degree) which can be beneficial when interpolation is not -needed e.g., when integrating to a final time without output in between or using -an explicit fast time scale integrator with an MRI method. - -Added "Resize" capability to ARKODE's SPRKStep time-stepping module. - -Enabled the Fortran interfaces to build with 32-bit ``sunindextype``. - **Bug Fixes** -Updated the CMake variable ``HIP_PLATFORM`` default to ``amd`` as the previous -default, ``hcc``, is no longer recognized in ROCm 5.7.0 or newer. The new -default is also valid in older version of ROCm (at least back to version 4.3.1). - -Renamed the DPCPP value for the :cmakeop:`SUNDIALS_GINKGO_BACKENDS` CMake option -to ``SYCL`` to match Ginkgo's updated naming convention. - -Changed the CMake version compatibility mode for SUNDIALS to ``AnyNewerVersion`` -instead of ``SameMajorVersion``. This fixes the issue seen `here -`_. - -Fixed a CMake bug that caused an MPI linking error for our C++ examples in some -instances. Fixes `GitHub Issue #464 -`_. - -Fixed the runtime library installation path for windows systems. This fix -changes the default library installation path from -``CMAKE_INSTALL_PREFIX/CMAKE_INSTALL_LIBDIR`` to -``CMAKE_INSTALL_PREFIX/CMAKE_INSTALL_BINDIR``. - -Fixed conflicting ``.lib`` files between shared and static libs when using -``MSVC`` on Windows - -Fixed invalid ``SUNDIALS_EXPORT`` generated macro when building both shared and -static libs. - -Fixed a bug in some Fortran examples where ``c_null_ptr`` was passed as an -argument to a function pointer instead of ``c_null_funptr``. This caused -compilation issues with the Cray Fortran compiler. - -Fixed a bug in the HIP execution policies where ``WARP_SIZE`` would not be set -with ROCm 6.0.0 or newer. - -Fixed a bug that caused error messages to be cut off in some cases. Fixes -`GitHub Issue #461 `_. - -Fixed a memory leak when an error handler was added to a -:c:type:`SUNContext`. Fixes `GitHub Issue #466 -`_. - -Fixed a bug where :c:func:`MRIStepEvolve` would not handle a recoverable error -produced from evolving the inner stepper. - -Added missing ``SetRootDirection`` and ``SetNoInactiveRootWarn`` functions to -ARKODE's SPRKStep time-stepping module. - -Fixed a bug in :c:func:`ARKodeSPRKTable_Create` where the coefficient arrays -were not allocated. - -Fix bug on LLP64 platforms (like Windows 64-bit) where ``KLU_INDEXTYPE`` could be -32 bits wide even if ``SUNDIALS_INT64_T`` is defined. - -Check if size of ``SuiteSparse_long`` is 8 if the size of ``sunindextype`` is 8 -when using KLU. - -Fixed several build errors with the Fortran interfaces on Windows systems. - **Deprecation Notices** - -Numerous ARKODE stepper-specific functions are now deprecated in favor of -ARKODE-wide functions. - -Deprecated the `ARKStepSetOptimalParams` function. Since this function does not have an -ARKODE-wide equivalent, instructions have been added to the user guide for how -to retain the current functionality using other user-callable functions. - -The unsupported implementations of ``N_VGetArrayPointer`` and -``N_VSetArrayPointer`` for the *hypre* and PETSc vectors are now deprecated. -Users should access the underlying wrapped external library vector objects -instead with ``N_VGetVector_ParHyp`` and ``N_VGetVector_Petsc``, respectively. diff --git a/doc/shared/sundials_vars.py b/doc/shared/sundials_vars.py index dca40a56f2..1b9406fa69 100644 --- a/doc/shared/sundials_vars.py +++ b/doc/shared/sundials_vars.py @@ -9,7 +9,7 @@ # SPDX-License-Identifier: BSD-3-Clause # SUNDIALS Copyright End # ---------------------------------------------------------------- -doc_version = 'v7.1.0' +doc_version = 'develop' sundials_version = 'v7.1.0' arkode_version = 'v6.1.0' cvode_version = 'v7.1.0' diff --git a/scripts/startReleaseCycle.sh b/scripts/startReleaseCycle.sh index 5e9f9e86b8..f4ee9075db 100755 --- a/scripts/startReleaseCycle.sh +++ b/scripts/startReleaseCycle.sh @@ -31,7 +31,7 @@ sedi() { # Update versions # ------------------------------------------------------------------------------ -fn="../doc/shared/versions.py" +fn="../doc/shared/sundials_vars.py" sedi "s/doc_version =.*/doc_version = \'develop\'/" $fn # ------------------------------------------------------------------------------ From 8d3dde0ea9007dffde63e02427d12c454025cf43 Mon Sep 17 00:00:00 2001 From: Cody Balos Date: Mon, 24 Jun 2024 22:55:34 -0700 Subject: [PATCH 4/8] Bugfix: Revert change to N_VSpace_Sycl (#523) This change was supposed to be reverted in #447 but it was missed. --- CHANGELOG.md | 2 ++ doc/shared/RecentChanges.rst | 2 ++ src/nvector/sycl/nvector_sycl.cpp | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31c94ba897..5954359481 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,8 @@ ### Bug Fixes +Fixed a [bug](https://github.com/LLNL/sundials/pull/523) in v7.1.0 with the SYCL N_Vector `N_VSpace` function. + ### Deprecation Notices ## Changes to SUNDIALS in release 7.1.0 diff --git a/doc/shared/RecentChanges.rst b/doc/shared/RecentChanges.rst index 4f1514700e..c10634340f 100644 --- a/doc/shared/RecentChanges.rst +++ b/doc/shared/RecentChanges.rst @@ -4,4 +4,6 @@ **Bug Fixes** +Fixed a `bug `_ in v7.1.0 with the SYCL N_Vector ``N_VSpace`` function. + **Deprecation Notices** diff --git a/src/nvector/sycl/nvector_sycl.cpp b/src/nvector/sycl/nvector_sycl.cpp index ce2106e210..2f5b6a0a61 100644 --- a/src/nvector/sycl/nvector_sycl.cpp +++ b/src/nvector/sycl/nvector_sycl.cpp @@ -870,7 +870,7 @@ void N_VDestroy_Sycl(N_Vector v) return; } -void N_VSpace_Sycl(N_Vector X, long int* lrw, long int* liw) +void N_VSpace_Sycl(N_Vector X, sunindextype* lrw, sunindextype* liw) { *lrw = NVEC_SYCL_CONTENT(X)->length; *liw = 2; From c28eaa3764a03705d61decb6025b409360e9d53f Mon Sep 17 00:00:00 2001 From: Cody Balos Date: Wed, 26 Jun 2024 09:46:34 -0700 Subject: [PATCH 5/8] Release/7.1.1 (#529) --- CHANGELOG.md | 8 +------- CITATIONS.md | 12 ++++++------ CMakeLists.txt | 24 ++++++++++++------------ README.md | 2 +- doc/shared/Changelog.rst | 2 +- doc/shared/History.rst | 2 ++ doc/shared/RecentChanges.rst | 6 ------ doc/shared/sundials.bib | 24 ++++++++++++------------ doc/shared/sundials_vars.py | 16 ++++++++-------- doc/sundials/biblio.bib | 24 ++++++++++++------------ doc/sundials/ug.tex | 14 +++++++------- scripts/tarscript | 14 +++++++------- scripts/updateVersion.sh | 2 +- src/arkode/README.md | 6 +++--- src/cvode/README.md | 6 +++--- src/cvodes/README.md | 6 +++--- src/ida/README.md | 6 +++--- src/idas/README.md | 6 +++--- src/kinsol/README.md | 6 +++--- 19 files changed, 88 insertions(+), 98 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5954359481..eee9a428a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,17 +1,11 @@ # SUNDIALS Changelog -## Changes to SUNDIALS in release X.Y.Z - -### Major Features - -### New Features and Enhancements +## Changes to SUNDIALS in release 7.1.1 ### Bug Fixes Fixed a [bug](https://github.com/LLNL/sundials/pull/523) in v7.1.0 with the SYCL N_Vector `N_VSpace` function. -### Deprecation Notices - ## Changes to SUNDIALS in release 7.1.0 ### Major Features diff --git a/CITATIONS.md b/CITATIONS.md index ef97b2c715..9e93aa7ced 100644 --- a/CITATIONS.md +++ b/CITATIONS.md @@ -69,7 +69,7 @@ they are using rather than the combined SUNDIALS online guide: author = {Daniel R. Reynolds and David J. Gardner and Carol S. Woodward and Cody J. Balos}, title = {User Documentation for ARKODE}, year = {2024}, - note = {v6.1.0} + note = {v6.1.1} } ``` @@ -78,7 +78,7 @@ they are using rather than the combined SUNDIALS online guide: author = {Alan C. Hindmarsh and Radu Serban and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, title = {User Documentation for CVODE}, year = {2024}, - note = {v7.1.0} + note = {v7.1.1} } ``` @@ -87,7 +87,7 @@ they are using rather than the combined SUNDIALS online guide: author = {Alan C. Hindmarsh and Radu Serban and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, title = {User Documentation for CVODES}, year = {2024}, - note = {v7.1.0} + note = {v7.1.1} } ``` @@ -96,7 +96,7 @@ they are using rather than the combined SUNDIALS online guide: author = {Alan C. Hindmarsh and Radu Serban and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, title = {User Documentation for IDA}, year = {2024}, - note = {v7.1.0} + note = {v7.1.1} } ``` @@ -105,7 +105,7 @@ they are using rather than the combined SUNDIALS online guide: author = {Radu Serban and Cosmin Petra and Alan C. Hindmarsh and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, title = {User Documentation for IDAS}, year = {2024}, - note = {v6.1.0} + note = {v6.1.1} } ``` @@ -114,6 +114,6 @@ they are using rather than the combined SUNDIALS online guide: author = {Alan C. Hindmarsh and Radu Serban and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, title = {User Documentation for KINSOL}, year = {2024}, - note = {v7.1.0} + note = {v7.1.1} } ``` diff --git a/CMakeLists.txt b/CMakeLists.txt index d16ee2e0a9..b5cca95afd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,7 +48,7 @@ include(FindPackageHandleStandardArgs) # Set some variables with info on the SUNDIALS project set(PACKAGE_BUGREPORT "sundials-users@llnl.gov") set(PACKAGE_NAME "SUNDIALS") -set(PACKAGE_STRING "SUNDIALS 7.1.0") +set(PACKAGE_STRING "SUNDIALS 7.1.1") set(PACKAGE_TARNAME "sundials") # Set SUNDIALS version numbers @@ -58,7 +58,7 @@ message(STATUS "SUNDIALS_GIT_VERSION: ${SUNDIALS_GIT_VERSION}") # (use "" for the version label if none is needed) set(PACKAGE_VERSION_MAJOR "7") set(PACKAGE_VERSION_MINOR "1") -set(PACKAGE_VERSION_PATCH "0") +set(PACKAGE_VERSION_PATCH "1") set(PACKAGE_VERSION_LABEL "") if(PACKAGE_VERSION_LABEL) @@ -73,37 +73,37 @@ endif() # Specify the VERSION and SOVERSION for shared libraries -set(arkodelib_VERSION "6.1.0") +set(arkodelib_VERSION "6.1.1") set(arkodelib_SOVERSION "6") -set(cvodelib_VERSION "7.1.0") +set(cvodelib_VERSION "7.1.1") set(cvodelib_SOVERSION "7") -set(cvodeslib_VERSION "7.1.0") +set(cvodeslib_VERSION "7.1.1") set(cvodeslib_SOVERSION "7") -set(idalib_VERSION "7.1.0") +set(idalib_VERSION "7.1.1") set(idalib_SOVERSION "7") -set(idaslib_VERSION "6.1.0") +set(idaslib_VERSION "6.1.1") set(idaslib_SOVERSION "6") -set(kinsollib_VERSION "7.1.0") +set(kinsollib_VERSION "7.1.1") set(kinsollib_SOVERSION "7") set(cpodeslib_VERSION "0.0.0") set(cpodeslib_SOVERSION "0") -set(nveclib_VERSION "7.1.0") +set(nveclib_VERSION "7.1.1") set(nveclib_SOVERSION "7") -set(sunmatrixlib_VERSION "5.1.0") +set(sunmatrixlib_VERSION "5.1.1") set(sunmatrixlib_SOVERSION "5") -set(sunlinsollib_VERSION "5.1.0") +set(sunlinsollib_VERSION "5.1.1") set(sunlinsollib_SOVERSION "5") -set(sunnonlinsollib_VERSION "4.1.0") +set(sunnonlinsollib_VERSION "4.1.1") set(sunnonlinsollib_SOVERSION "4") set(sundialslib_VERSION diff --git a/README.md b/README.md index 9a21db1d4d..c410afb6ca 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # SUNDIALS: SUite of Nonlinear and DIfferential/ALgebraic equation Solvers # -### Version 7.1.0 (Jun 2024) ### +### Version 7.1.1 (Jun 2024) ### **Center for Applied Scientific Computing, Lawrence Livermore National Laboratory** diff --git a/doc/shared/Changelog.rst b/doc/shared/Changelog.rst index 4c3126e836..2ce7b9c951 100644 --- a/doc/shared/Changelog.rst +++ b/doc/shared/Changelog.rst @@ -21,7 +21,7 @@ Changelog .. SED_REPLACEMENT_KEY -Changes to SUNDIALS in release X.Y.Z +Changes to SUNDIALS in release 7.1.1 ==================================== .. include:: RecentChanges_link.rst diff --git a/doc/shared/History.rst b/doc/shared/History.rst index b5c79886e3..f2f4e0842d 100644 --- a/doc/shared/History.rst +++ b/doc/shared/History.rst @@ -21,6 +21,8 @@ Release History +----------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+ | Date | SUNDIALS | ARKODE | CVODE | CVODES | IDA | IDAS | KINSOL | +==========+===================+===================+===================+===================+===================+===================+===================+ +| Jun 2024 | 7.1.1 | 6.1.1 | 7.1.1 | 7.1.1 | 7.1.1 | 6.1.1 | 7.1.1 | ++----------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+ | Jun 2024 | 7.1.0 | 6.1.0 | 7.1.0 | 7.1.0 | 7.1.0 | 6.1.0 | 7.1.0 | +----------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+-------------------+ | Feb 2024 | 7.0.0 | 6.0.0 | 7.0.0 | 7.0.0 | 7.0.0 | 6.0.0 | 7.0.0 | diff --git a/doc/shared/RecentChanges.rst b/doc/shared/RecentChanges.rst index c10634340f..0340eed7c8 100644 --- a/doc/shared/RecentChanges.rst +++ b/doc/shared/RecentChanges.rst @@ -1,9 +1,3 @@ -**Major Features** - -**New Features and Enhancements** - **Bug Fixes** Fixed a `bug `_ in v7.1.0 with the SYCL N_Vector ``N_VSpace`` function. - -**Deprecation Notices** diff --git a/doc/shared/sundials.bib b/doc/shared/sundials.bib index afa6de0c0d..1f9d585a91 100644 --- a/doc/shared/sundials.bib +++ b/doc/shared/sundials.bib @@ -27,7 +27,7 @@ % @techreport{arkode_ug, author = {Daniel R. Reynolds and David J. Gardner and Carol S. Woodward and Rujeko Chinomona and Cody J. Balos}, -title = {{User Documentation for ARKODE v6.1.0}}, +title = {{User Documentation for ARKODE v6.1.1}}, institution = {LLNL}, number = {LLNL-SM-668082}, year = 2024 @@ -37,7 +37,7 @@ @techreport{arkode_ug % @techreport{arkode_ex, author = {Daniel R. Reynolds}, -title = {{Example Programs for ARKODE v6.1.0}}, +title = {{Example Programs for ARKODE v6.1.1}}, institution = {Southern Methodist University}, year = 2024 } @@ -46,7 +46,7 @@ @techreport{arkode_ex % @techreport{cvode_ug, author = {Alan C. Hindmarsh and Radu Serban and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, -title = {{User Documentation for CVODE v7.1.0}}, +title = {{User Documentation for CVODE v7.1.1}}, institution = {LLNL}, number = {UCRL-SM-208108}, year = 2024 @@ -56,7 +56,7 @@ @techreport{cvode_ug % @techreport{cvode_ex, author = {Alan C. Hindmarsh and Radu Serban}, -title = {{Example Programs for CVODE v7.1.0}}, +title = {{Example Programs for CVODE v7.1.1}}, institution = {LLNL}, note = {UCRL-SM-208110}, year = 2024 @@ -66,7 +66,7 @@ @techreport{cvode_ex % @techreport{cvodes_ug, author = {Alan C. Hindmarsh and Radu Serban and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, -title = {{User Documentation for CVODES v7.1.0}}, +title = {{User Documentation for CVODES v7.1.1}}, institution = {LLNL}, note = {UCRL-SM-208111}, year = 2024 @@ -76,7 +76,7 @@ @techreport{cvodes_ug % @techreport{cvodes_ex, author = {Radu Serban and Alan C. Hindmarsh}, -title = {{Example Programs for CVODES v7.1.0}}, +title = {{Example Programs for CVODES v7.1.1}}, institution = {LLNL}, number = {UCRL-SM-208115}, year = 2024 @@ -86,7 +86,7 @@ @techreport{cvodes_ex % @techreport{ida_ug, author = {Alan C. Hindmarsh and Radu Serban and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, -title = {{User Documentation for IDA v7.1.0}}, +title = {{User Documentation for IDA v7.1.1}}, institution = {LLNL}, number = {UCRL-SM-208112}, year = 2024 @@ -96,7 +96,7 @@ @techreport{ida_ug % @techreport{ida_ex, author = {Alan C. Hindmarsh and Radu Serban and Aaron Collier}, -title = {{Example Programs for IDA v7.1.0}}, +title = {{Example Programs for IDA v7.1.1}}, institution = {LLNL}, number = {UCRL-SM-208113}, year = 2024 @@ -106,7 +106,7 @@ @techreport{ida_ex % @techreport{idas_ug, author = {Radu Serban and Cosmin Petra and Alan C. Hindmarsh and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, -title = {{User Documentation for IDAS v6.1.0}}, +title = {{User Documentation for IDAS v6.1.1}}, institution = {LLNL}, number = {UCRL-SM-234051}, year = 2024 @@ -116,7 +116,7 @@ @techreport{idas_ug % @techreport{idas_ex, author = {Radu Serban and Alan C. Hindmarsh}, -title = {{Example Programs for IDAS v6.1.0}}, +title = {{Example Programs for IDAS v6.1.1}}, institution = {LLNL}, number = {LLNL-TR-437091}, year = 2024 @@ -126,7 +126,7 @@ @techreport{idas_ex % @techreport{kinsol_ug, author = {Alan C. Hindmarsh and Radu Serban and Cody J. Balos and David J. Gardner and Daniel R. Reynolds and Carol S. Woodward}, -title = {{User Documentation for KINSOL v7.1.0}}, +title = {{User Documentation for KINSOL v7.1.1}}, institution = {LLNL}, number = {UCRL-SM-208116}, year = 2024 @@ -136,7 +136,7 @@ @techreport{kinsol_ug % @techreport{kinsol_ex, author = {Aaron M. Collier and Radu Serban}, -title = {{Example Programs for KINSOL v7.1.0}}, +title = {{Example Programs for KINSOL v7.1.1}}, institution = {LLNL}, number = {UCRL-SM-208114}, year = 2024 diff --git a/doc/shared/sundials_vars.py b/doc/shared/sundials_vars.py index 1b9406fa69..7fdf28446e 100644 --- a/doc/shared/sundials_vars.py +++ b/doc/shared/sundials_vars.py @@ -9,14 +9,14 @@ # SPDX-License-Identifier: BSD-3-Clause # SUNDIALS Copyright End # ---------------------------------------------------------------- -doc_version = 'develop' -sundials_version = 'v7.1.0' -arkode_version = 'v6.1.0' -cvode_version = 'v7.1.0' -cvodes_version = 'v7.1.0' -ida_version = 'v7.1.0' -idas_version = 'v6.1.0' -kinsol_version = 'v7.1.0' +doc_version = 'v7.1.1' +sundials_version = 'v7.1.1' +arkode_version = 'v6.1.1' +cvode_version = 'v7.1.1' +cvodes_version = 'v7.1.1' +ida_version = 'v7.1.1' +idas_version = 'v6.1.1' +kinsol_version = 'v7.1.1' year = '2024' # Warn about all references where the target cannot be found diff --git a/doc/sundials/biblio.bib b/doc/sundials/biblio.bib index b0470d62e6..56b49481c8 100644 --- a/doc/sundials/biblio.bib +++ b/doc/sundials/biblio.bib @@ -16,7 +16,7 @@ @techreport{arkode_ug, author={Daniel R. Reynolds and David J. Gardner and Alan C. Hindmarsh and Carol S. Woodward and Jean M. Sexton}, -title={{User Documentation for ARKODE v6.1.0}}, +title={{User Documentation for ARKODE v6.1.1}}, institution={LLNL}, number={LLNL-SM-668082}, year = 2024 @@ -26,7 +26,7 @@ @techreport{arkode_ug % @techreport{arkode_ex, author={Daniel R. Reynolds}, -title={{Example Programs for ARKODE v6.1.0}}, +title={{Example Programs for ARKODE v6.1.1}}, institution={Southern Methodist University}, year = 2024 } @@ -35,7 +35,7 @@ @techreport{arkode_ex % @techreport{cvode_ug, author={A. C. Hindmarsh and R. Serban}, -title={{User Documentation for CVODE v7.1.0}}, +title={{User Documentation for CVODE v7.1.1}}, institution={LLNL}, number={UCRL-SM-208108}, year = 2024 @@ -45,7 +45,7 @@ @techreport{cvode_ug % @techreport{cvode_ex, author={A. C. Hindmarsh and R. Serban and D. R. Reynolds}, -title={{Example Programs for CVODE v7.1.0}}, +title={{Example Programs for CVODE v7.1.1}}, institution={LLNL}, note={UCRL-SM-208110}, year = 2024 @@ -55,7 +55,7 @@ @techreport{cvode_ex % @techreport{cvodes_ug, author={A. C. Hindmarsh and R. Serban}, -title={{User Documentation for CVODES v7.1.0}}, +title={{User Documentation for CVODES v7.1.1}}, institution={LLNL}, note={UCRL-SM-208111}, year = 2024 @@ -65,7 +65,7 @@ @techreport{cvodes_ug % @techreport{cvodes_ex, author={R. Serban and A. C. Hindmarsh}, -title={{Example Programs for CVODES v7.1.0}}, +title={{Example Programs for CVODES v7.1.1}}, institution={LLNL}, number={UCRL-SM-208115}, year = 2024 @@ -75,7 +75,7 @@ @techreport{cvodes_ex % @techreport{ida_ug, author={A. C. Hindmarsh and R. Serban and A. Collier}, -title={{User Documentation for IDA v7.1.0}}, +title={{User Documentation for IDA v7.1.1}}, institution={LLNL}, number={UCRL-SM-208112}, year = 2024 @@ -85,7 +85,7 @@ @techreport{ida_ug % @techreport{ida_ex, author={A. C. Hindmarsh and R. Serban and A. Collier}, -title={{Example Programs for IDA v7.1.0}}, +title={{Example Programs for IDA v7.1.1}}, institution={LLNL}, number={UCRL-SM-208113}, year = 2024 @@ -95,7 +95,7 @@ @techreport{ida_ex % @techreport{idas_ug, author={R. Serban and C. Petra and A. C. Hindmarsh}, -title={{User Documentation for IDAS v6.1.0}}, +title={{User Documentation for IDAS v6.1.1}}, institution={LLNL}, number={UCRL-SM-234051}, year = 2024 @@ -105,7 +105,7 @@ @techreport{idas_ug % @techreport{idas_ex, author={R. Serban and A. C. Hindmarsh}, -title={{Example Programs for IDAS v6.1.0}}, +title={{Example Programs for IDAS v6.1.1}}, institution={LLNL}, number={LLNL-TR-437091}, year = 2024 @@ -115,7 +115,7 @@ @techreport{idas_ex % @techreport{kinsol_ug, author={A. M. Collier and A. C. Hindmarsh and R. Serban and C.S. Woodward}, -title={{User Documentation for KINSOL v7.1.0}}, +title={{User Documentation for KINSOL v7.1.1}}, institution={LLNL}, number={UCRL-SM-208116}, year = 2024 @@ -125,7 +125,7 @@ @techreport{kinsol_ug % @techreport{kinsol_ex, author={A. M. Collier and R. Serban}, -title={{Example Programs for KINSOL v7.1.0}}, +title={{Example Programs for KINSOL v7.1.1}}, institution={LLNL}, number={UCRL-SM-208114}, year = 2024 diff --git a/doc/sundials/ug.tex b/doc/sundials/ug.tex index 721acd879d..aa3d25945c 100644 --- a/doc/sundials/ug.tex +++ b/doc/sundials/ug.tex @@ -59,29 +59,29 @@ %----- VERSIONS AND UCRL NUMBERS OF SUNDIALS CODES -\newcommand{\sunrelease}{v7.1.0} +\newcommand{\sunrelease}{v7.1.1} -\newcommand{\cvrelease}{v7.1.0} +\newcommand{\cvrelease}{v7.1.1} \newcommand{\cvucrlug}{UCRL-SM-208108} \newcommand{\cvucrlex}{UCRL-SM-208110} -\newcommand{\cvsrelease}{v7.1.0} +\newcommand{\cvsrelease}{v7.1.1} \newcommand{\cvsucrlug}{UCRL-SM-208111} \newcommand{\cvsucrlex}{UCRL-SM-208115} -\newcommand{\idarelease}{v7.1.0} +\newcommand{\idarelease}{v7.1.1} \newcommand{\idaucrlug}{UCRL-SM-208112} \newcommand{\idaucrlex}{UCRL-SM-208113} -\newcommand{\idasrelease}{v6.1.0} +\newcommand{\idasrelease}{v6.1.1} \newcommand{\idasucrlug}{UCRL-SM-234051} \newcommand{\idasucrlex}{LLNL-TR-437091} -\newcommand{\kinrelease}{v7.1.0} +\newcommand{\kinrelease}{v7.1.1} \newcommand{\kinucrlug}{UCRL-SM-208116} \newcommand{\kinucrlex}{UCRL-SM-208114} -\newcommand{\arkrelease}{v6.1.0} +\newcommand{\arkrelease}{v6.1.1} \newcommand{\arkucrlug}{LLNL-SM-668082} \newcommand{\arkucrlex}{????-??-??????} diff --git a/scripts/tarscript b/scripts/tarscript index 642dec13aa..8a95748e56 100755 --- a/scripts/tarscript +++ b/scripts/tarscript @@ -57,13 +57,13 @@ function print_usage # VERSION NUMBERS #--------------------------------------------------------- -SUN_VER="7.1.0" -CV_VER="7.1.0" -CVS_VER="7.1.0" -IDA_VER="7.1.0" -IDAS_VER="6.1.0" -KIN_VER="7.1.0" -ARK_VER="6.1.0" +SUN_VER="7.1.1" +CV_VER="7.1.1" +CVS_VER="7.1.1" +IDA_VER="7.1.1" +IDAS_VER="6.1.1" +KIN_VER="7.1.1" +ARK_VER="6.1.1" #--------------------------------------------------------- # Test if the script is executed from within its directory diff --git a/scripts/updateVersion.sh b/scripts/updateVersion.sh index e91e846bde..89d68697e0 100755 --- a/scripts/updateVersion.sh +++ b/scripts/updateVersion.sh @@ -20,7 +20,7 @@ # releases the label string is "". sun_major=${1:-7} sun_minor=${2:-1} -sun_patch=${3:-0} +sun_patch=${3:-1} sun_label=${4:-""} month=${5:-$(date +"%b")} year=${6:-$(date +"%Y")} diff --git a/src/arkode/README.md b/src/arkode/README.md index d71f6b43ef..8ae6ccd1cc 100644 --- a/src/arkode/README.md +++ b/src/arkode/README.md @@ -1,5 +1,5 @@ # ARKODE -### Version 6.1.0 (Jun 2024) +### Version 6.1.1 (Jun 2024) **Daniel R. Reynolds, Department of Mathematics, SMU** @@ -44,8 +44,8 @@ the "SUNDIALS Release History" appendix of the ARKODE User Guide. ## References * D. R. Reynolds, D. J. Gardner, C. S. Woodward, and C. J. Balos, - "User Documentation for ARKODE v6.1.0," LLNL technical report + "User Documentation for ARKODE v6.1.1," LLNL technical report LLNL-SM-668082, Jun 2024. -* D. R. Reynolds, "Example Programs for ARKODE v6.1.0," Technical Report, +* D. R. Reynolds, "Example Programs for ARKODE v6.1.1," Technical Report, Southern Methodist University Center for Scientific Computation, Jun 2024. diff --git a/src/cvode/README.md b/src/cvode/README.md index 7e17d2a68c..04e3c19f74 100644 --- a/src/cvode/README.md +++ b/src/cvode/README.md @@ -1,5 +1,5 @@ # CVODE -### Version 7.1.0 (Jun 2024) +### Version 7.1.1 (Jun 2024) **Alan C. Hindmarsh, Radu Serban, Cody J. Balos, David J. Gardner, and Carol S. Woodward, Center for Applied Scientific Computing, LLNL** @@ -47,10 +47,10 @@ the "SUNDIALS Release History" appendix of the CVODE User Guide. ## References * A. C. Hindmarsh, R. Serban, C. J. Balos, D. J. Gardner, D. R. Reynolds - and C. S. Woodward, "User Documentation for CVODE v7.1.0," + and C. S. Woodward, "User Documentation for CVODE v7.1.1," LLNL technical report UCRL-SM-208108, Jun 2024. -* A. C. Hindmarsh and R. Serban, "Example Programs for CVODE v7.1.0," +* A. C. Hindmarsh and R. Serban, "Example Programs for CVODE v7.1.1," LLNL technical report UCRL-SM-208110, Jun 2024. * S.D. Cohen and A.C. Hindmarsh, "CVODE, a Stiff/nonstiff ODE Solver in C," diff --git a/src/cvodes/README.md b/src/cvodes/README.md index 8e4259efdd..0c4171ee58 100644 --- a/src/cvodes/README.md +++ b/src/cvodes/README.md @@ -1,5 +1,5 @@ # CVODES -### Version 7.1.0 (Jun 2024) +### Version 7.1.1 (Jun 2024) **Alan C. Hindmarsh, Radu Serban, Cody J. Balos, David J. Gardner, and Carol S. Woodward, Center for Applied Scientific Computing, LLNL** @@ -44,10 +44,10 @@ the "SUNDIALS Release History" appendix of the CVODES User Guide. ## References * A. C. Hindmarsh, R. Serban, C. J. Balos, D. J. Gardner, D. R. Reynolds - and C. S. Woodward, "User Documentation for CVODES v7.1.0," + and C. S. Woodward, "User Documentation for CVODES v7.1.1," LLNL technical report UCRL-SM-208111, Jun 2024. -* A. C. Hindmarsh and R. Serban, "Example Programs for CVODES v7.1.0," +* A. C. Hindmarsh and R. Serban, "Example Programs for CVODES v7.1.1," LLNL technical report UCRL-SM-208115, Jun 2024. * R. Serban and A. C. Hindmarsh, "CVODES: the Sensitivity-Enabled ODE diff --git a/src/ida/README.md b/src/ida/README.md index 086ea1c262..3e668d9e42 100644 --- a/src/ida/README.md +++ b/src/ida/README.md @@ -1,5 +1,5 @@ # IDA -### Version 7.1.0 (Jun 2024) +### Version 7.1.1 (Jun 2024) **Alan C. Hindmarsh, Radu Serban, Cody J. Balos, David J. Gardner, and Carol S. Woodward, Center for Applied Scientific Computing, LLNL** @@ -47,10 +47,10 @@ the "SUNDIALS Release History" appendix of the IDA User Guide. ## References * A. C. Hindmarsh, R. Serban, C. J. Balos, D. J. Gardner, D. R. Reynolds - and C. S. Woodward, "User Documentation for IDA v7.1.0," + and C. S. Woodward, "User Documentation for IDA v7.1.1," LLNL technical report UCRL-SM-208112, Jun 2024. -* A. C. Hindmarsh, R. Serban, and A. Collier, "Example Programs for IDA v7.1.0," +* A. C. Hindmarsh, R. Serban, and A. Collier, "Example Programs for IDA v7.1.1," LLNL technical report UCRL-SM-208113, Jun 2024. * A. C. Hindmarsh, P. N. Brown, K. E. Grant, S. L. Lee, R. Serban, diff --git a/src/idas/README.md b/src/idas/README.md index 0b34eb22e7..db24f4c494 100644 --- a/src/idas/README.md +++ b/src/idas/README.md @@ -1,5 +1,5 @@ # IDAS -### Version 6.1.0 (Jun 2024) +### Version 6.1.1 (Jun 2024) **Radu Serban, Cosmin Petra, Alan C. Hindmarsh, Cody J. Balos, David J. Gardner, and Carol S. Woodward, Center for Applied Scientific Computing, LLNL** @@ -43,10 +43,10 @@ the "SUNDIALS Release History" appendix of the IDAS User Guide. ## References * R. Serban, C. Petra, A. C. Hindmarsh, C. J. Balos, D. J. Gardner, - D. R. Reynolds and C. S. Woodward, "User Documentation for IDAS v6.1.0," + D. R. Reynolds and C. S. Woodward, "User Documentation for IDAS v6.1.1," LLNL technical report UCRL-SM-234051, Jun 2024. -* R. Serban and A.C. Hindmarsh, "Example Programs for IDAS v6.1.0," +* R. Serban and A.C. Hindmarsh, "Example Programs for IDAS v6.1.1," LLNL technical report LLNL-TR-437091, Jun 2024. * A. C. Hindmarsh, P. N. Brown, K. E. Grant, S. L. Lee, R. Serban, diff --git a/src/kinsol/README.md b/src/kinsol/README.md index 84678f7713..37d11bec6d 100644 --- a/src/kinsol/README.md +++ b/src/kinsol/README.md @@ -1,5 +1,5 @@ # KINSOL -### Version 7.1.0 (Jun 2024) +### Version 7.1.1 (Jun 2024) **Alan C. Hindmarsh, Radu Serban, Cody J. Balos, David J. Gardner, and Carol S. Woodward, Center for Applied Scientific Computing, LLNL** @@ -48,10 +48,10 @@ the "SUNDIALS Release History" appendix of the KINSOL User Guide. * A. C. Hindmarsh, R. Serban, C. J. Balos, D. J. Gardner, D. R. Reynolds and C. S. Woodward, - "User Documentation for KINSOL v7.1.0," LLNL technical report + "User Documentation for KINSOL v7.1.1," LLNL technical report UCRL-SM-208116, Jun 2024. -* A. M. Collier and R. Serban, "Example Programs for KINSOL v7.1.0," +* A. M. Collier and R. Serban, "Example Programs for KINSOL v7.1.1," LLNL technical report UCRL-SM-208114, Jun 2024. * A. C. Hindmarsh, P. N. Brown, K. E. Grant, S. L. Lee, R. Serban, From c0555cfba3d85a86a6e39fcd77c17963b4f1a292 Mon Sep 17 00:00:00 2001 From: David Gardner Date: Wed, 26 Jun 2024 14:57:35 -0700 Subject: [PATCH 6/8] Maintenance: Start recent changes for next release (#532) --- CHANGELOG.md | 13 ++++++++++++- doc/shared/Changelog.rst | 10 +++++++++- doc/shared/RecentChanges.rst | 6 +++++- doc/shared/sundials_vars.py | 2 +- 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eee9a428a5..141b16720c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,21 @@ # SUNDIALS Changelog +## Changes to SUNDIALS in release X.Y.Z + +### Major Features + +### New Features and Enhancements + +### Bug Fixes + +### Deprecation Notices + ## Changes to SUNDIALS in release 7.1.1 ### Bug Fixes -Fixed a [bug](https://github.com/LLNL/sundials/pull/523) in v7.1.0 with the SYCL N_Vector `N_VSpace` function. +Fixed a [bug](https://github.com/LLNL/sundials/pull/523) in v7.1.0 with the SYCL +N_Vector `N_VSpace` function. ## Changes to SUNDIALS in release 7.1.0 diff --git a/doc/shared/Changelog.rst b/doc/shared/Changelog.rst index 2ce7b9c951..5bf5c04203 100644 --- a/doc/shared/Changelog.rst +++ b/doc/shared/Changelog.rst @@ -21,11 +21,19 @@ Changelog .. SED_REPLACEMENT_KEY -Changes to SUNDIALS in release 7.1.1 +Changes to SUNDIALS in release X.Y.Z ==================================== .. include:: RecentChanges_link.rst +Changes to SUNDIALS in release 7.1.1 +==================================== + +**Bug Fixes** + +Fixed a `bug `_ in v7.1.0 with the +SYCL N_Vector ``N_VSpace`` function. + Changes to SUNDIALS in release 7.1.0 ==================================== diff --git a/doc/shared/RecentChanges.rst b/doc/shared/RecentChanges.rst index 0340eed7c8..4f1514700e 100644 --- a/doc/shared/RecentChanges.rst +++ b/doc/shared/RecentChanges.rst @@ -1,3 +1,7 @@ +**Major Features** + +**New Features and Enhancements** + **Bug Fixes** -Fixed a `bug `_ in v7.1.0 with the SYCL N_Vector ``N_VSpace`` function. +**Deprecation Notices** diff --git a/doc/shared/sundials_vars.py b/doc/shared/sundials_vars.py index 7fdf28446e..4c5f76c563 100644 --- a/doc/shared/sundials_vars.py +++ b/doc/shared/sundials_vars.py @@ -9,7 +9,7 @@ # SPDX-License-Identifier: BSD-3-Clause # SUNDIALS Copyright End # ---------------------------------------------------------------- -doc_version = 'v7.1.1' +doc_version = 'develop' sundials_version = 'v7.1.1' arkode_version = 'v6.1.1' cvode_version = 'v7.1.1' From 0749c816594668e49b6a08f196b49c96256ba6e1 Mon Sep 17 00:00:00 2001 From: David Gardner Date: Wed, 26 Jun 2024 16:16:52 -0700 Subject: [PATCH 7/8] Bugfix: Correct typo in example comment (#531) --- examples/arkode/C_serial/ark_reaction_diffusion_mri.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/arkode/C_serial/ark_reaction_diffusion_mri.c b/examples/arkode/C_serial/ark_reaction_diffusion_mri.c index 65a4ca73bf..56754bb625 100644 --- a/examples/arkode/C_serial/ark_reaction_diffusion_mri.c +++ b/examples/arkode/C_serial/ark_reaction_diffusion_mri.c @@ -135,7 +135,7 @@ int main(void) if (check_retval(&retval, "SetInitialCondition", 1)) { return 1; } /* - * Create the slow integrator and set options + * Create the fast integrator and set options */ /* Initialize the fast integrator. Specify the explicit fast right-hand side From b578eabccd77b7642b04ddda9d8530f05890d1b4 Mon Sep 17 00:00:00 2001 From: David Gardner Date: Sun, 30 Jun 2024 22:50:58 -0700 Subject: [PATCH 8/8] Maintenance: format python scripts with black (#534) --- .github/workflows/check-clang-format.yml | 6 + .../scripts/compare_error.py | 83 +- .../scripts/compute_error.py | 77 +- .../scripts/make_plots.py | 305 +-- .../scripts/pickle_solution_output.py | 64 +- .../plot_nvector_performance_results.py | 345 ++-- .../plot_nvector_performance_speedup.py | 222 ++- .../developers/style_guide/SourceCode.rst | 24 +- .../arkode/CXX_parallel/plot_brusselator1D.py | 63 +- examples/arkode/CXX_parallel/plot_heat2D_p.py | 85 +- examples/arkode/CXX_parhyp/plot_heat2D_p.py | 85 +- examples/arkode/CXX_serial/plot_heat2D.py | 53 +- examples/arkode/CXX_serial/plot_sol.py | 26 +- examples/arkode/CXX_xbraid/plot_heat2D.py | 51 +- .../arkode/C_manyvector/plot_brusselator1D.py | 40 +- .../arkode/C_openmp/plot_brusselator1D.py | 40 +- .../arkode/C_parallel/plot_brusselator1D.py | 63 +- examples/arkode/C_serial/ark_kepler_plot.py | 74 +- .../arkode/C_serial/plot_brusselator1D.py | 40 +- .../arkode/C_serial/plot_brusselator1D_FEM.py | 48 +- examples/arkode/C_serial/plot_heat1D.py | 20 +- examples/arkode/C_serial/plot_heat1D_adapt.py | 32 +- examples/arkode/C_serial/plot_sol.py | 26 +- examples/arkode/C_serial/plot_sol_log.py | 26 +- examples/cvode/CXX_parallel/plot_heat2D_p.py | 81 +- examples/cvode/CXX_parhyp/plot_heat2D_p.py | 81 +- examples/cvode/CXX_serial/plot_heat2D.py | 49 +- examples/cvode/serial/plot_cvParticle.py | 82 +- examples/cvode/serial/plot_cvPendulum.py | 63 +- examples/cvodes/serial/plot_cvsParticle.py | 82 +- examples/cvodes/serial/plot_cvsPendulum.py | 63 +- examples/utilities/plot_data_2d.py | 677 ++++--- examples/utilities/plot_data_time_series.py | 97 +- scripts/format.sh | 2 + test/compare_benchmarks.py | 97 +- test/compare_examples.py | 86 +- test/config_cmake.py | 1732 ++++++++++++----- test/notify.py | 53 +- test/test_install.py | 76 +- 39 files changed, 3220 insertions(+), 1999 deletions(-) diff --git a/.github/workflows/check-clang-format.yml b/.github/workflows/check-clang-format.yml index 2a87a2a35d..24f44b45ee 100644 --- a/.github/workflows/check-clang-format.yml +++ b/.github/workflows/check-clang-format.yml @@ -15,6 +15,12 @@ jobs: apt update apt install -y git python3-pip + - name: Install black + run: pip install black + + - name: Print black version + run: black --version + - name: Install fprettify run: pip install fprettify diff --git a/benchmarks/advection_reaction_3D/scripts/compare_error.py b/benchmarks/advection_reaction_3D/scripts/compare_error.py index 2dc66d23fa..4dd1ff7ee1 100755 --- a/benchmarks/advection_reaction_3D/scripts/compare_error.py +++ b/benchmarks/advection_reaction_3D/scripts/compare_error.py @@ -15,7 +15,8 @@ import glob import sys import matplotlib -matplotlib.use('Agg') + +matplotlib.use("Agg") from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import pandas as pd @@ -25,57 +26,57 @@ # load pickled data def load_data(file): data = np.load(file) - m = data['mesh'] - t = data['t'] - u = data['u'] - v = data['v'] - w = data['w'] + m = data["mesh"] + t = data["t"] + u = data["u"] + v = data["v"] + w = data["w"] - hx = m[0,1] - m[0,0] - hy = m[1,1] - m[1,0] - hz = m[2,1] - m[2,0] + hx = m[0, 1] - m[0, 0] + hy = m[1, 1] - m[1, 0] + hz = m[2, 1] - m[2, 0] - return { 'm': m, 'h': (hx,hy,hz), 't': t, 'u': u, 'v': v, 'w': w } + return {"m": m, "h": (hx, hy, hz), "t": t, "u": u, "v": v, "w": w} # grid function norm def norm_3Dgrid(h, x, q=1): - hx,hy,hz = h + hx, hy, hz = h s = np.shape(x) - return (hx*hy*hz*np.sum(np.abs(x)**q, axis=(1,2,3)))**(1./q) + return (hx * hy * hz * np.sum(np.abs(x) ** q, axis=(1, 2, 3))) ** (1.0 / q) # load data files -np111 = load_data('np-111/output-with-h-8.33e-02.npz') -np211 = load_data('np-211/output-with-h-8.33e-02.npz') -np311 = load_data('np-311/output-with-h-8.33e-02.npz') -np131 = load_data('np-131/output-with-h-8.33e-02.npz') -np113 = load_data('np-113/output-with-h-8.33e-02.npz') -np911 = load_data('np-911/output-with-h-8.33e-02.npz') +np111 = load_data("np-111/output-with-h-8.33e-02.npz") +np211 = load_data("np-211/output-with-h-8.33e-02.npz") +np311 = load_data("np-311/output-with-h-8.33e-02.npz") +np131 = load_data("np-131/output-with-h-8.33e-02.npz") +np113 = load_data("np-113/output-with-h-8.33e-02.npz") +np911 = load_data("np-911/output-with-h-8.33e-02.npz") # np133 = load_data('np-133/output-with-h-8.33e-02.npz') -np313 = load_data('np-313/output-with-h-8.33e-02.npz') -np331 = load_data('np-331/output-with-h-8.33e-02.npz') -np333 = load_data('np-333/output-with-h-8.33e-02.npz') +np313 = load_data("np-313/output-with-h-8.33e-02.npz") +np331 = load_data("np-331/output-with-h-8.33e-02.npz") +np333 = load_data("np-333/output-with-h-8.33e-02.npz") # np666 = load_data('np-666/output-with-h-8.33e-02.npz') -for component in ['u', 'v', 'w']: +for component in ["u", "v", "w"]: # Reference solution ref = np111[component] # Now compute E(h) = ||U(h) - \bar{U}(h)|| using the grid-function norm - E_np211 = norm_3Dgrid(np211['h'], np211[component] - ref) - E_np311 = norm_3Dgrid(np311['h'], np311[component] - ref) - E_np131 = norm_3Dgrid(np131['h'], np131[component] - ref) - E_np113 = norm_3Dgrid(np113['h'], np113[component] - ref) - E_np911 = norm_3Dgrid(np911['h'], np911[component] - ref) + E_np211 = norm_3Dgrid(np211["h"], np211[component] - ref) + E_np311 = norm_3Dgrid(np311["h"], np311[component] - ref) + E_np131 = norm_3Dgrid(np131["h"], np131[component] - ref) + E_np113 = norm_3Dgrid(np113["h"], np113[component] - ref) + E_np911 = norm_3Dgrid(np911["h"], np911[component] - ref) # E_np133 = norm_3Dgrid(np133['h'], np133[component] - ref) - E_np313 = norm_3Dgrid(np313['h'], np313[component] - ref) - E_np331 = norm_3Dgrid(np331['h'], np331[component] - ref) - E_np333 = norm_3Dgrid(np333['h'], np333[component] - ref) + E_np313 = norm_3Dgrid(np313["h"], np313[component] - ref) + E_np331 = norm_3Dgrid(np331["h"], np331[component] - ref) + E_np333 = norm_3Dgrid(np333["h"], np333[component] - ref) # E_np666 = norm_3Dgrid(np666['h'], np666[component] - ref) # Plot error across time - X, Y = np.meshgrid(np111['m'][0,:], np111['t']) + X, Y = np.meshgrid(np111["m"][0, :], np111["t"]) # fig = plt.figure() # ax = plt.subplot(311, projection='3d') # ax.plot_surface(X, Y, np.abs(np911[component][:,:,0,0] - ref[:,:,0,0])) @@ -83,17 +84,17 @@ def norm_3Dgrid(h, x, q=1): # ax.plot_surface(X, Y, np.abs(np911[component][:,0,:,0] - ref[:,0,:,0])) # ax = plt.subplot(313, projection='3d') # ax.plot_surface(X, Y, np.abs(np911[component][:,0,0,:] - ref[:,0,0,:])) - plt.plot(np111['t'], E_np211) - plt.plot(np111['t'], E_np131) - plt.plot(np111['t'], E_np113) - plt.plot(np111['t'], E_np911) + plt.plot(np111["t"], E_np211) + plt.plot(np111["t"], E_np131) + plt.plot(np111["t"], E_np113) + plt.plot(np111["t"], E_np911) # plt.plot(np111['t'], E_np133) - plt.plot(np111['t'], E_np313) - plt.plot(np111['t'], E_np331) - plt.plot(np111['t'], E_np333) + plt.plot(np111["t"], E_np313) + plt.plot(np111["t"], E_np331) + plt.plot(np111["t"], E_np333) # plt.plot(np111['t'], E_np666) # plt.legend(['2 1 1', '3 1 1', '1 3 3', '3 1 3', '3 3 1', '3 3 3', '6 6 6']) # plt.legend(['3 1 1', '1 3 1', '1 1 3', '9 1 1', '1 3 3', '3 1 3', '3 3 1']) - plt.ylabel('||E(hx,hy,hz)||') - plt.xlabel('time') - plt.savefig('compare-error-plot-%s.png' % component) + plt.ylabel("||E(hx,hy,hz)||") + plt.xlabel("time") + plt.savefig("compare-error-plot-%s.png" % component) diff --git a/benchmarks/advection_reaction_3D/scripts/compute_error.py b/benchmarks/advection_reaction_3D/scripts/compute_error.py index 2c01826b29..85f151ed59 100755 --- a/benchmarks/advection_reaction_3D/scripts/compute_error.py +++ b/benchmarks/advection_reaction_3D/scripts/compute_error.py @@ -15,7 +15,8 @@ import glob import sys import matplotlib -matplotlib.use('Agg') + +matplotlib.use("Agg") import matplotlib.pyplot as plt import pandas as pd import numpy as np @@ -24,65 +25,67 @@ # load pickled data def load_data(file): data = np.load(file) - m = data['mesh'] - t = data['t'] - u = data['u'] - v = data['v'] - w = data['w'] + m = data["mesh"] + t = data["t"] + u = data["u"] + v = data["v"] + w = data["w"] - hx = m[0,1] - m[0,0] - hy = m[1,1] - m[1,0] - hz = m[2,1] - m[2,0] + hx = m[0, 1] - m[0, 0] + hy = m[1, 1] - m[1, 0] + hz = m[2, 1] - m[2, 0] - return { 'm': m, 'h': (hx,hy,hz), 't': t, 'u': u, 'v': v, 'w': w } + return {"m": m, "h": (hx, hy, hz), "t": t, "u": u, "v": v, "w": w} # grid function norm def norm_3Dgrid(h, x, q=1): - hx,hy,hz = h - return (hx*hy*hz*np.sum(np.abs(x)**q, axis=(1,2,3)))**(1/q) + hx, hy, hz = h + return (hx * hy * hz * np.sum(np.abs(x) ** q, axis=(1, 2, 3))) ** (1 / q) # computer order of accuracy p def calc_order(h1, Eh1, h2, Eh2): - return np.log( Eh1/Eh2 ) / np.log( np.prod(h1)/np.prod(h2) ) + return np.log(Eh1 / Eh2) / np.log(np.prod(h1) / np.prod(h2)) # load data files -h_over_8 = load_data('middle-h/output-with-h-1.04e-02.npz') -h_over_4 = load_data('large-h/output-with-h-2.08e-02.npz') +h_over_8 = load_data("middle-h/output-with-h-1.04e-02.npz") +h_over_4 = load_data("large-h/output-with-h-2.08e-02.npz") # h_over_2 = load_data('larger-h/output-with-h-4.16e-02.npz') -h_over_1 = load_data('largest-h/output-with-h-8.33e-02.npz') +h_over_1 = load_data("largest-h/output-with-h-8.33e-02.npz") -for component in ['u', 'v', 'w']: +for component in ["u", "v", "w"]: # Restrict reference to the coarsest grid - ref = h_over_8[component][:,::8,::8,::8] + ref = h_over_8[component][:, ::8, ::8, ::8] # Now compute E(h) = ||U(h) - \bar{U}(h)|| using the grid-function norm - Eh_over_4 = norm_3Dgrid(h_over_4['h'], h_over_4[component][:,::4,::4,::4] - ref) - Eh_over_1 = norm_3Dgrid(h_over_1['h'], h_over_1[component][:,:,:,:] - ref) + Eh_over_4 = norm_3Dgrid(h_over_4["h"], h_over_4[component][:, ::4, ::4, ::4] - ref) + Eh_over_1 = norm_3Dgrid(h_over_1["h"], h_over_1[component][:, :, :, :] - ref) # Compute order p as in O(h^p) - p = calc_order(h_over_1['h'], Eh_over_1, h_over_4['h'], Eh_over_4) - print('min p for %s component: %.4f' % (component, np.min(p))) + p = calc_order(h_over_1["h"], Eh_over_1, h_over_4["h"], Eh_over_4) + print("min p for %s component: %.4f" % (component, np.min(p))) # Plot error across time plt.figure() - plt.plot(h_over_8['t'], Eh_over_4, 'r-') - plt.plot(h_over_8['t'], Eh_over_1, 'b-') - plt.ylabel('||E(hx,hy,hz)||') - plt.xlabel('time') - plt.savefig('error-in-time-plot-%s.png' % component) + plt.plot(h_over_8["t"], Eh_over_4, "r-") + plt.plot(h_over_8["t"], Eh_over_1, "b-") + plt.ylabel("||E(hx,hy,hz)||") + plt.xlabel("time") + plt.savefig("error-in-time-plot-%s.png" % component) # Plot error norm with respect to h plt.figure() - x = np.array([np.prod(h_over_4['h']), np.prod(h_over_1['h'])]) - plt.plot(x, x, 'k-') - plt.plot(x, x**2, 'k-') - plt.plot(x, [np.linalg.norm(Eh_over_4, np.Inf), np.linalg.norm(Eh_over_1, np.Inf)], 'r-') - plt.legend(['1st order', '2nd order', 'actual']) - plt.ylabel('|| ||E(hx,hy,hz)|| ||_inf') - plt.xlabel('hx * hy * hz') - plt.yscale('log') - plt.xscale('log') - plt.savefig('error-plot-%s.png' % component) + x = np.array([np.prod(h_over_4["h"]), np.prod(h_over_1["h"])]) + plt.plot(x, x, "k-") + plt.plot(x, x**2, "k-") + plt.plot( + x, [np.linalg.norm(Eh_over_4, np.Inf), np.linalg.norm(Eh_over_1, np.Inf)], "r-" + ) + plt.legend(["1st order", "2nd order", "actual"]) + plt.ylabel("|| ||E(hx,hy,hz)|| ||_inf") + plt.xlabel("hx * hy * hz") + plt.yscale("log") + plt.xscale("log") + plt.savefig("error-plot-%s.png" % component) diff --git a/benchmarks/advection_reaction_3D/scripts/make_plots.py b/benchmarks/advection_reaction_3D/scripts/make_plots.py index 69a0168d79..a4dfa87840 100755 --- a/benchmarks/advection_reaction_3D/scripts/make_plots.py +++ b/benchmarks/advection_reaction_3D/scripts/make_plots.py @@ -22,218 +22,265 @@ # ------------------------------------------------------------------------------ + # utility functions def parallel_coords(rank): - if (rank == 0): + if rank == 0: return [0, 0, 0] - if (rank == 1): + if rank == 1: return [0, 0, 1] - if (rank == 2): + if rank == 2: return [0, 1, 0] - if (rank == 3): + if rank == 3: return [0, 1, 1] - if (rank == 4): + if rank == 4: return [1, 0, 0] - if (rank == 5): + if rank == 5: return [1, 0, 1] - if (rank == 6): + if rank == 6: return [1, 1, 0] - if (rank == 7): + if rank == 7: return [1, 1, 1] -def xslice(u,it,ix): - return u[it,ix,:,:] -def yslice(u,it,iy): - return u[it,:,iy,:] +def xslice(u, it, ix): + return u[it, ix, :, :] + + +def yslice(u, it, iy): + return u[it, :, iy, :] + + +def zslice(u, it, iz): + return u[it, :, :, iz] -def zslice(u,it,iz): - return u[it,:,:,iz] -def xproj(u,it): - return np.average(u[it,:,:,:], axis=0) +def xproj(u, it): + return np.average(u[it, :, :, :], axis=0) -def yproj(u,it): - return np.average(u[it,:,:,:], axis=1) -def zproj(u,it): - return np.average(u[it,:,:,:], axis=2) +def yproj(u, it): + return np.average(u[it, :, :, :], axis=1) -def myplot(axis, X, Y, Z, xlabel='none', ylabel='none'): + +def zproj(u, it): + return np.average(u[it, :, :, :], axis=2) + + +def myplot(axis, X, Y, Z, xlabel="none", ylabel="none"): frame = axis.contourf(X, Y, Z) plt.colorbar(frame, ax=axis) - if (xlabel != 'none'): + if xlabel != "none": axis.set_xlabel(xlabel) - if (ylabel != 'none'): + if ylabel != "none": axis.set_ylabel(ylabel) - # read time mesh times = np.loadtxt("t.000000.txt") nt = times.size # read spatial mesh mesh = np.loadtxt("mesh.txt", dtype=float) -x = mesh[0,:] -y = mesh[1,:] -z = mesh[2,:] +x = mesh[0, :] +y = mesh[1, :] +z = mesh[2, :] nx = x.size ny = y.size nz = z.size # ensure that the run used exactly 1 or 8 MPI ranks for i in range(9): - if (exists("u.00000" + str(i) + ".txt" ) and - not exists("u.00000" + str(i+1) + ".txt" )): - nprocs = i+1 -if ((nprocs != 1) and (nprocs != 8)): + if exists("u.00000" + str(i) + ".txt") and not exists( + "u.00000" + str(i + 1) + ".txt" + ): + nprocs = i + 1 +if (nprocs != 1) and (nprocs != 8): print("make_plots.py error: run must have used either 1 or 8 MPI ranks") exit() # load data for run -if (nprocs == 1): - u = np.zeros((nt,nx,ny,nz), dtype=float) - v = np.zeros((nt,nx,ny,nz), dtype=float) - w = np.zeros((nt,nx,ny,nz), dtype=float) +if nprocs == 1: + u = np.zeros((nt, nx, ny, nz), dtype=float) + v = np.zeros((nt, nx, ny, nz), dtype=float) + w = np.zeros((nt, nx, ny, nz), dtype=float) udata = np.loadtxt("u.000000.txt") vdata = np.loadtxt("v.000000.txt") wdata = np.loadtxt("w.000000.txt") - if (nt != udata.shape[0]): + if nt != udata.shape[0]: print("make_plots.py error: mesh and data have incompatible sizes") exit() - if (nx*ny*nz != udata.shape[1]): + if nx * ny * nz != udata.shape[1]: print("make_plots.py error: mesh and data have incompatible sizes") exit() for it in range(nt): - u[it,:,:,:] = np.reshape(udata[it,:], (nx,ny,nz), order='C') - v[it,:,:,:] = np.reshape(vdata[it,:], (nx,ny,nz), order='C') - w[it,:,:,:] = np.reshape(wdata[it,:], (nx,ny,nz), order='C') + u[it, :, :, :] = np.reshape(udata[it, :], (nx, ny, nz), order="C") + v[it, :, :, :] = np.reshape(vdata[it, :], (nx, ny, nz), order="C") + w[it, :, :, :] = np.reshape(wdata[it, :], (nx, ny, nz), order="C") else: - u = np.zeros((nt,nx,ny,nz), dtype=float) - v = np.zeros((nt,nx,ny,nz), dtype=float) - w = np.zeros((nt,nx,ny,nz), dtype=float) - nxl = nx//2 - nyl = ny//2 - nzl = nz//2 + u = np.zeros((nt, nx, ny, nz), dtype=float) + v = np.zeros((nt, nx, ny, nz), dtype=float) + w = np.zeros((nt, nx, ny, nz), dtype=float) + nxl = nx // 2 + nyl = ny // 2 + nzl = nz // 2 for ip in range(8): udata = np.loadtxt("u.00000" + str(ip) + ".txt") vdata = np.loadtxt("v.00000" + str(ip) + ".txt") wdata = np.loadtxt("w.00000" + str(ip) + ".txt") - if (nt != udata.shape[0]): + if nt != udata.shape[0]: print("make_plots.py error: mesh and data have incompatible sizes") exit() - if (nxl*nyl*nzl != udata.shape[1]): + if nxl * nyl * nzl != udata.shape[1]: print("make_plots.py error: mesh and data have incompatible sizes") exit() coords = parallel_coords(ip) - ilo = coords[0]*nxl - ihi = (coords[0]+1)*nxl - jlo = coords[1]*nyl - jhi = (coords[1]+1)*nyl - klo = coords[2]*nzl - khi = (coords[2]+1)*nzl + ilo = coords[0] * nxl + ihi = (coords[0] + 1) * nxl + jlo = coords[1] * nyl + jhi = (coords[1] + 1) * nyl + klo = coords[2] * nzl + khi = (coords[2] + 1) * nzl for it in range(nt): - u[it,ilo:ihi,jlo:jhi,klo:khi] = np.reshape(udata[it,:], (nxl,nyl,nzl), order='C') - v[it,ilo:ihi,jlo:jhi,klo:khi] = np.reshape(vdata[it,:], (nxl,nyl,nzl), order='C') - w[it,ilo:ihi,jlo:jhi,klo:khi] = np.reshape(wdata[it,:], (nxl,nyl,nzl), order='C') + u[it, ilo:ihi, jlo:jhi, klo:khi] = np.reshape( + udata[it, :], (nxl, nyl, nzl), order="C" + ) + v[it, ilo:ihi, jlo:jhi, klo:khi] = np.reshape( + vdata[it, :], (nxl, nyl, nzl), order="C" + ) + w[it, ilo:ihi, jlo:jhi, klo:khi] = np.reshape( + wdata[it, :], (nxl, nyl, nzl), order="C" + ) # set meshgrid objects -xy0,xy1 = np.meshgrid(x, y) -yz0,yz1 = np.meshgrid(y, z) -xz0,xz1 = np.meshgrid(x, z) +xy0, xy1 = np.meshgrid(x, y) +yz0, yz1 = np.meshgrid(y, z) +xz0, xz1 = np.meshgrid(x, z) # generate plots sliceidx = 25 tslice = [0, 5, 10] -figsize = (9,7) +figsize = (9, 7) # xy slices at various times plt.figure(1) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, xy0, xy1, zslice(u,tslice[0],sliceidx), ylabel = 'u') -myplot(ax2, xy0, xy1, zslice(u,tslice[1],sliceidx)) -myplot(ax3, xy0, xy1, zslice(u,tslice[2],sliceidx)) -myplot(ax4, xy0, xy1, zslice(v,tslice[0],sliceidx), ylabel = 'v') -myplot(ax5, xy0, xy1, zslice(v,tslice[1],sliceidx)) -myplot(ax6, xy0, xy1, zslice(v,tslice[2],sliceidx)) -myplot(ax7, xy0, xy1, zslice(w,tslice[0],sliceidx), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, xy0, xy1, zslice(w,tslice[1],sliceidx), xlabel = 't = ' + str(times[1])) -myplot(ax9, xy0, xy1, zslice(w,tslice[2],sliceidx), xlabel = 't = ' + str(times[2])) -plt.savefig('xy-slices.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, xy0, xy1, zslice(u, tslice[0], sliceidx), ylabel="u") +myplot(ax2, xy0, xy1, zslice(u, tslice[1], sliceidx)) +myplot(ax3, xy0, xy1, zslice(u, tslice[2], sliceidx)) +myplot(ax4, xy0, xy1, zslice(v, tslice[0], sliceidx), ylabel="v") +myplot(ax5, xy0, xy1, zslice(v, tslice[1], sliceidx)) +myplot(ax6, xy0, xy1, zslice(v, tslice[2], sliceidx)) +myplot( + ax7, + xy0, + xy1, + zslice(w, tslice[0], sliceidx), + ylabel="w", + xlabel="t = " + str(times[0]), +) +myplot(ax8, xy0, xy1, zslice(w, tslice[1], sliceidx), xlabel="t = " + str(times[1])) +myplot(ax9, xy0, xy1, zslice(w, tslice[2], sliceidx), xlabel="t = " + str(times[2])) +plt.savefig("xy-slices.png") # yz slices at various times plt.figure(2) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, yz0, yz1, xslice(u,tslice[0],sliceidx), ylabel = 'u') -myplot(ax2, yz0, yz1, xslice(u,tslice[1],sliceidx)) -myplot(ax3, yz0, yz1, xslice(u,tslice[2],sliceidx)) -myplot(ax4, yz0, yz1, xslice(v,tslice[0],sliceidx), ylabel = 'v') -myplot(ax5, yz0, yz1, xslice(v,tslice[1],sliceidx)) -myplot(ax6, yz0, yz1, xslice(v,tslice[2],sliceidx)) -myplot(ax7, yz0, yz1, xslice(w,tslice[0],sliceidx), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, yz0, yz1, xslice(w,tslice[1],sliceidx), xlabel = 't = ' + str(times[1])) -myplot(ax9, yz0, yz1, xslice(w,tslice[2],sliceidx), xlabel = 't = ' + str(times[2])) -plt.savefig('yz-slices.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, yz0, yz1, xslice(u, tslice[0], sliceidx), ylabel="u") +myplot(ax2, yz0, yz1, xslice(u, tslice[1], sliceidx)) +myplot(ax3, yz0, yz1, xslice(u, tslice[2], sliceidx)) +myplot(ax4, yz0, yz1, xslice(v, tslice[0], sliceidx), ylabel="v") +myplot(ax5, yz0, yz1, xslice(v, tslice[1], sliceidx)) +myplot(ax6, yz0, yz1, xslice(v, tslice[2], sliceidx)) +myplot( + ax7, + yz0, + yz1, + xslice(w, tslice[0], sliceidx), + ylabel="w", + xlabel="t = " + str(times[0]), +) +myplot(ax8, yz0, yz1, xslice(w, tslice[1], sliceidx), xlabel="t = " + str(times[1])) +myplot(ax9, yz0, yz1, xslice(w, tslice[2], sliceidx), xlabel="t = " + str(times[2])) +plt.savefig("yz-slices.png") # xz slices at various times plt.figure(3) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, xz0, xz1, yslice(u,tslice[0],sliceidx), ylabel ='u') -myplot(ax2, xz0, xz1, yslice(u,tslice[1],sliceidx)) -myplot(ax3, xz0, xz1, yslice(u,tslice[2],sliceidx)) -myplot(ax4, xz0, xz1, yslice(v,tslice[0],sliceidx), ylabel = 'v') -myplot(ax5, xz0, xz1, yslice(v,tslice[1],sliceidx)) -myplot(ax6, xz0, xz1, yslice(v,tslice[2],sliceidx)) -myplot(ax7, xz0, xz1, yslice(w,tslice[0],sliceidx), ylabel= 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, xz0, xz1, yslice(w,tslice[1],sliceidx), xlabel ='t = ' + str(times[1])) -myplot(ax9, xz0, xz1, yslice(w,tslice[2],sliceidx), xlabel = 't = ' + str(times[2])) -plt.savefig('xz-slices.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, xz0, xz1, yslice(u, tslice[0], sliceidx), ylabel="u") +myplot(ax2, xz0, xz1, yslice(u, tslice[1], sliceidx)) +myplot(ax3, xz0, xz1, yslice(u, tslice[2], sliceidx)) +myplot(ax4, xz0, xz1, yslice(v, tslice[0], sliceidx), ylabel="v") +myplot(ax5, xz0, xz1, yslice(v, tslice[1], sliceidx)) +myplot(ax6, xz0, xz1, yslice(v, tslice[2], sliceidx)) +myplot( + ax7, + xz0, + xz1, + yslice(w, tslice[0], sliceidx), + ylabel="w", + xlabel="t = " + str(times[0]), +) +myplot(ax8, xz0, xz1, yslice(w, tslice[1], sliceidx), xlabel="t = " + str(times[1])) +myplot(ax9, xz0, xz1, yslice(w, tslice[2], sliceidx), xlabel="t = " + str(times[2])) +plt.savefig("xz-slices.png") # xy projection at various times plt.figure(4) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, xy0, xy1, zproj(u,tslice[0]), ylabel = 'u') -myplot(ax2, xy0, xy1, zproj(u,tslice[1])) -myplot(ax3, xy0, xy1, zproj(u,tslice[2])) -myplot(ax4, xy0, xy1, zproj(v,tslice[0]), ylabel = 'v') -myplot(ax5, xy0, xy1, zproj(v,tslice[1])) -myplot(ax6, xy0, xy1, zproj(v,tslice[2])) -myplot(ax7, xy0, xy1, zproj(w,tslice[0]), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, xy0, xy1, zproj(w,tslice[1]), xlabel = 't = ' + str(times[1])) -myplot(ax9, xy0, xy1, zproj(w,tslice[2]), xlabel = 't = ' + str(times[2])) -plt.savefig('xy-projections.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, xy0, xy1, zproj(u, tslice[0]), ylabel="u") +myplot(ax2, xy0, xy1, zproj(u, tslice[1])) +myplot(ax3, xy0, xy1, zproj(u, tslice[2])) +myplot(ax4, xy0, xy1, zproj(v, tslice[0]), ylabel="v") +myplot(ax5, xy0, xy1, zproj(v, tslice[1])) +myplot(ax6, xy0, xy1, zproj(v, tslice[2])) +myplot(ax7, xy0, xy1, zproj(w, tslice[0]), ylabel="w", xlabel="t = " + str(times[0])) +myplot(ax8, xy0, xy1, zproj(w, tslice[1]), xlabel="t = " + str(times[1])) +myplot(ax9, xy0, xy1, zproj(w, tslice[2]), xlabel="t = " + str(times[2])) +plt.savefig("xy-projections.png") # yz projection at various times fig = plt.figure(5) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, yz0, yz1, xproj(u,tslice[0]), ylabel = 'u') -myplot(ax2, yz0, yz1, xproj(u,tslice[1])) -myplot(ax3, yz0, yz1, xproj(u,tslice[2])) -myplot(ax4, yz0, yz1, xproj(v,tslice[0]), ylabel = 'v') -myplot(ax5, yz0, yz1, xproj(v,tslice[1])) -myplot(ax6, yz0, yz1, xproj(v,tslice[2])) -myplot(ax7, yz0, yz1, xproj(w,tslice[0]), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, yz0, yz1, xproj(w,tslice[1]), xlabel = 't = ' + str(times[1])) -myplot(ax9, yz0, yz1, xproj(w,tslice[2]), xlabel = 't = ' + str(times[2])) -plt.savefig('yz-projections.png') +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, yz0, yz1, xproj(u, tslice[0]), ylabel="u") +myplot(ax2, yz0, yz1, xproj(u, tslice[1])) +myplot(ax3, yz0, yz1, xproj(u, tslice[2])) +myplot(ax4, yz0, yz1, xproj(v, tslice[0]), ylabel="v") +myplot(ax5, yz0, yz1, xproj(v, tslice[1])) +myplot(ax6, yz0, yz1, xproj(v, tslice[2])) +myplot(ax7, yz0, yz1, xproj(w, tslice[0]), ylabel="w", xlabel="t = " + str(times[0])) +myplot(ax8, yz0, yz1, xproj(w, tslice[1]), xlabel="t = " + str(times[1])) +myplot(ax9, yz0, yz1, xproj(w, tslice[2]), xlabel="t = " + str(times[2])) +plt.savefig("yz-projections.png") # xz projection at various times fig = plt.figure(6) -fig, ((ax1,ax2,ax3), (ax4,ax5,ax6), (ax7,ax8,ax9)) = plt.subplots(3, 3, sharex=True, sharey=True, figsize=figsize) -myplot(ax1, xz0, xz1, yproj(u,tslice[0]), ylabel = 'u') -myplot(ax2, xz0, xz1, yproj(u,tslice[1])) -myplot(ax3, xz0, xz1, yproj(u,tslice[2])) -myplot(ax4, xz0, xz1, yproj(v,tslice[0]), ylabel = 'v') -myplot(ax5, xz0, xz1, yproj(v,tslice[1])) -myplot(ax6, xz0, xz1, yproj(v,tslice[2])) -myplot(ax7, xz0, xz1, yproj(w,tslice[0]), ylabel = 'w', xlabel = 't = ' + str(times[0])) -myplot(ax8, xz0, xz1, yproj(w,tslice[1]), xlabel = 't = ' + str(times[1])) -myplot(ax9, xz0, xz1, yproj(w,tslice[2]), xlabel = 't = ' + str(times[2])) -plt.savefig('xz-projections.png') - -#plt.show() +fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots( + 3, 3, sharex=True, sharey=True, figsize=figsize +) +myplot(ax1, xz0, xz1, yproj(u, tslice[0]), ylabel="u") +myplot(ax2, xz0, xz1, yproj(u, tslice[1])) +myplot(ax3, xz0, xz1, yproj(u, tslice[2])) +myplot(ax4, xz0, xz1, yproj(v, tslice[0]), ylabel="v") +myplot(ax5, xz0, xz1, yproj(v, tslice[1])) +myplot(ax6, xz0, xz1, yproj(v, tslice[2])) +myplot(ax7, xz0, xz1, yproj(w, tslice[0]), ylabel="w", xlabel="t = " + str(times[0])) +myplot(ax8, xz0, xz1, yproj(w, tslice[1]), xlabel="t = " + str(times[1])) +myplot(ax9, xz0, xz1, yproj(w, tslice[2]), xlabel="t = " + str(times[2])) +plt.savefig("xz-projections.png") + +# plt.show() plt.close() ##### end of script ##### diff --git a/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py b/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py index 407c34921a..a51fade40f 100755 --- a/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py +++ b/benchmarks/advection_reaction_3D/scripts/pickle_solution_output.py @@ -19,39 +19,66 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('mesh.txt', dtype=np.double) +mesh = np.loadtxt("mesh.txt", dtype=np.double) # X,Y,Z = np.meshgrid(mesh[0,:], mesh[1,:], mesh[2,:]) # calculate h -hx = mesh[0,1] - mesh[0,0] -hy = mesh[1,1] - mesh[1,0] -hz = mesh[2,1] - mesh[2,0] -nx = len(mesh[0,:]) -ny = len(mesh[1,:]) -nz = len(mesh[2,:]) +hx = mesh[0, 1] - mesh[0, 0] +hy = mesh[1, 1] - mesh[1, 0] +hz = mesh[2, 1] - mesh[2, 0] +nx = len(mesh[0, :]) +ny = len(mesh[1, :]) +nz = len(mesh[2, :]) print("nx, ny, nz = %d, %d, %d" % (nx, ny, nz)) print("hx, hy, hz = %g, %g, %g" % (hx, hy, hz)) # load output time file -times = np.loadtxt('t.000000.txt', dtype=np.double) +times = np.loadtxt("t.000000.txt", dtype=np.double) # load solution data files -ufiles = glob.glob('u.' + ('[0-9]'*6) + '.txt'); ufiles.sort() -vfiles = glob.glob('v.' + ('[0-9]'*6) + '.txt'); vfiles.sort() -wfiles = glob.glob('w.' + ('[0-9]'*6) + '.txt'); wfiles.sort() +ufiles = glob.glob("u." + ("[0-9]" * 6) + ".txt") +ufiles.sort() +vfiles = glob.glob("v." + ("[0-9]" * 6) + ".txt") +vfiles.sort() +wfiles = glob.glob("w." + ("[0-9]" * 6) + ".txt") +wfiles.sort() udata = [] vdata = [] wdata = [] sys.stdout.write("reading 1/%d...\r" % len(ufiles)) sys.stdout.flush() -for idx in range(0,len(ufiles)): - sys.stdout.write("reading %d/%d...\r" % (idx+1,len(ufiles))) +for idx in range(0, len(ufiles)): + sys.stdout.write("reading %d/%d...\r" % (idx + 1, len(ufiles))) sys.stdout.flush() - udata.append(pd.read_csv(ufiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double)) - vdata.append(pd.read_csv(vfiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double)) - wdata.append(pd.read_csv(wfiles[idx], header=None, delimiter=' ', skipinitialspace=True, dtype=np.double)) + udata.append( + pd.read_csv( + ufiles[idx], + header=None, + delimiter=" ", + skipinitialspace=True, + dtype=np.double, + ) + ) + vdata.append( + pd.read_csv( + vfiles[idx], + header=None, + delimiter=" ", + skipinitialspace=True, + dtype=np.double, + ) + ) + wdata.append( + pd.read_csv( + wfiles[idx], + header=None, + delimiter=" ", + skipinitialspace=True, + dtype=np.double, + ) + ) sys.stdout.write("\n") sys.stdout.flush() @@ -69,5 +96,6 @@ # save data to pickle print("saving...") -np.savez_compressed('output-with-h-%.2e.npz' % hx, t=times, u=udata, v=vdata, w=wdata, mesh=mesh) - +np.savez_compressed( + "output-with-h-%.2e.npz" % hx, t=times, u=udata, v=vdata, w=wdata, mesh=mesh +) diff --git a/benchmarks/nvector/plot_nvector_performance_results.py b/benchmarks/nvector/plot_nvector_performance_results.py index 02c45665e6..55bb5ae32e 100755 --- a/benchmarks/nvector/plot_nvector_performance_results.py +++ b/benchmarks/nvector/plot_nvector_performance_results.py @@ -20,6 +20,7 @@ # indicates if timing was enabled. # ----------------------------------------------------------------------------- + def main(): import argparse @@ -35,42 +36,60 @@ def main(): import matplotlib.ticker as mtick parser = argparse.ArgumentParser( - description='Plot data from NVector performance tests') - - parser.add_argument('op', type=str, - help='Which NVector operation to plot') - - parser.add_argument('datadir', type=str, - help='Directory where test output files are located') - - parser.add_argument('--timevelem', dest='timevelem', action='store_true', - help='Turn on plots for time vs number of elements') - - parser.add_argument('--noheatmap', dest='heatmap', action='store_false', - help='Turn off heatmap plots') - - parser.add_argument('--loglog', dest='loglog', action='store_true', - help='Generate loglog plots for time vs number of elements') - - parser.add_argument('--show', dest='show', action='store_true', - help='Display plots rather than saving to file') - - parser.add_argument('--debug', dest='debug', action='store_true', - help='Turn on debugging output') + description="Plot data from NVector performance tests" + ) + + parser.add_argument("op", type=str, help="Which NVector operation to plot") + + parser.add_argument( + "datadir", type=str, help="Directory where test output files are located" + ) + + parser.add_argument( + "--timevelem", + dest="timevelem", + action="store_true", + help="Turn on plots for time vs number of elements", + ) + + parser.add_argument( + "--noheatmap", + dest="heatmap", + action="store_false", + help="Turn off heatmap plots", + ) + + parser.add_argument( + "--loglog", + dest="loglog", + action="store_true", + help="Generate loglog plots for time vs number of elements", + ) + + parser.add_argument( + "--show", + dest="show", + action="store_true", + help="Display plots rather than saving to file", + ) + + parser.add_argument( + "--debug", dest="debug", action="store_true", help="Turn on debugging output" + ) # parse command line args args = parser.parse_args() - if (args.debug): + if args.debug: print(args) # check for test data directory - if (not os.path.isdir(args.datadir)): - print("ERROR:",args.datadir,"does not exist") + if not os.path.isdir(args.datadir): + print("ERROR:", args.datadir, "does not exist") sys.exit() # sort output files - output = sorted(glob.glob(args.datadir+'/output*.txt')) + output = sorted(glob.glob(args.datadir + "/output*.txt")) # if (args.debug): # print("output files") @@ -80,8 +99,8 @@ def main(): # figure out vector sizes, number of vectors, and number of sums nelem = [] - nvec = [] - nsum = [] + nvec = [] + nsum = [] ntest = [] # parse file names to get input parameters @@ -95,32 +114,32 @@ def main(): ns = int(split_fout[3]) nt = int(split_fout[4]) - if (not ne in nelem): + if not ne in nelem: nelem.append(ne) - if (not nv in nvec): + if not nv in nvec: nvec.append(nv) - if (not ns in nsum): + if not ns in nsum: nsum.append(ns) - if (not nt in ntest): + if not nt in ntest: ntest.append(nt) - if (len(ntest) != 1): + if len(ntest) != 1: print("Warning: Unequal numbers of tests") - if (args.debug): - print("nelem:",nelem, len(nelem)) - print("nvec: ",nvec, len(nvec)) - print("nsum: ",nsum, len(nsum)) - print("ntest:",ntest, len(ntest)) + if args.debug: + print("nelem:", nelem, len(nelem)) + print("nvec: ", nvec, len(nvec)) + print("nsum: ", nsum, len(nsum)) + print("ntest:", ntest, len(ntest)) # allocate numpy arrays for timing data - avg_fused = np.zeros([len(nvec), len(nelem)]) + avg_fused = np.zeros([len(nvec), len(nelem)]) sdev_fused = np.zeros([len(nvec), len(nelem)]) - avg_unfused = np.zeros([len(nvec), len(nelem)]) + avg_unfused = np.zeros([len(nvec), len(nelem)]) sdev_unfused = np.zeros([len(nvec), len(nelem)]) avg_ratio = np.zeros([len(nvec), len(nelem)]) @@ -131,8 +150,8 @@ def main(): # read output files for f in output: - if (args.debug): - print("Reading:",f) + if args.debug: + print("Reading:", f) # get test inputs from file name split_fout = f.split("/")[-1] @@ -149,15 +168,15 @@ def main(): split_line = shlex.split(line) # skip blank lines - if (not split_line): + if not split_line: continue # tests finished, stop reading file - if (split_line[0] == "Finished"): + if split_line[0] == "Finished": break # check if the operation is the one we want and get data - if (args.op == split_line[0]): + if args.op == split_line[0]: i = nvec.index(nv) j = nelem.index(ne) @@ -165,15 +184,15 @@ def main(): # NVEC[i][j] = nv # NELM[i][j] = ne - avg_fused[i][j] = float(split_line[1]) + avg_fused[i][j] = float(split_line[1]) sdev_fused[i][j] = float(split_line[2]) - avg_unfused[i][j] = float(split_line[5]) + avg_unfused[i][j] = float(split_line[5]) sdev_unfused[i][j] = float(split_line[6]) avg_ratio[i][j] = avg_fused[i][j] / avg_unfused[i][j] - if (args.debug): + if args.debug: print(avg_fused) print(avg_unfused) print(avg_ratio) @@ -185,35 +204,37 @@ def main(): # -------------------------------------------------------------------------- # allocate arrays for the upper and lower bounds of the confidence interval - lower_fused = np.zeros([len(nvec), len(nelem)]) - upper_fused = np.zeros([len(nvec), len(nelem)]) + lower_fused = np.zeros([len(nvec), len(nelem)]) + upper_fused = np.zeros([len(nvec), len(nelem)]) lower_unfused = np.zeros([len(nvec), len(nelem)]) upper_unfused = np.zeros([len(nvec), len(nelem)]) # critical value for 99% confidence interval - if (ntest[0] < 30): + if ntest[0] < 30: # student's t distribution - cv = st.t.interval(0.99, ntest[0]-1)[1] + cv = st.t.interval(0.99, ntest[0] - 1)[1] else: # normal distribution cv = st.norm.ppf(0.995) # confidence intervals - cdev_fused = cv * sdev_fused / np.sqrt(ntest[0]) + cdev_fused = cv * sdev_fused / np.sqrt(ntest[0]) lower_fused = avg_fused - cdev_fused upper_fused = avg_fused + cdev_fused - cdev_unfused = cv * sdev_unfused / np.sqrt(ntest[0]) + cdev_unfused = cv * sdev_unfused / np.sqrt(ntest[0]) lower_unfused = avg_unfused - cdev_unfused upper_unfused = avg_unfused + cdev_unfused # check if the fused average times are within the unfused confidence interval - fused_in = np.where(np.logical_and(avg_fused < upper_unfused, - avg_fused > lower_unfused)) + fused_in = np.where( + np.logical_and(avg_fused < upper_unfused, avg_fused > lower_unfused) + ) # check if the unfused average times are within the fused confidence interval - unfused_in = np.where(np.logical_and(avg_unfused < upper_fused, - avg_unfused > lower_fused)) + unfused_in = np.where( + np.logical_and(avg_unfused < upper_fused, avg_unfused > lower_fused) + ) # get which numbers of vectors and elements for fused tests are in the # confidence interval of the unfused times @@ -226,7 +247,7 @@ def main(): ef[i] = np.log2(nelem[fused_in[1][i]]) df[i] = 1 - if (args.debug): + if args.debug: print(vf) print(ef) @@ -241,7 +262,7 @@ def main(): eu[i] = np.log2(nelem[unfused_in[1][i]]) du[i] = 1 - if (args.debug): + if args.debug: print(vu) print(eu) @@ -266,20 +287,20 @@ def main(): # print(NELM) # print(avg_ratio) for i in reversed(range(len(nvec))): - print('%2d' % int(i+1), str(avg_ratio[i]).replace('\n', '')) + print("%2d" % int(i + 1), str(avg_ratio[i]).replace("\n", "")) print # -------------------------------------------------------------------------- # Heat Map # -------------------------------------------------------------------------- - if (args.heatmap): + if args.heatmap: - x = np.arange(len(nelem)+1)-0.5 # x = log2(number of elements) = 0,1,2,... - y = np.arange(len(nvec)+1)+1.5 # y = number of vectors = 2,3,4,... + x = np.arange(len(nelem) + 1) - 0.5 # x = log2(number of elements) = 0,1,2,... + y = np.arange(len(nvec) + 1) + 1.5 # y = number of vectors = 2,3,4,... # y = np.arange(len(nvec)+1)+0.5 # y = number of vectors = 1,2,3,... X, Y = np.meshgrid(x, y) - if (args.debug): + if args.debug: print(x) print(y) @@ -287,67 +308,79 @@ def main(): rmax = np.amax(avg_ratio) rmin = np.amin(avg_ratio) - ext = 'neither' - if (rmin > 1): - cmap='Reds' - norm = mpl.colors.Normalize(vmin=rmin, vmax=min(rmax,2)) - v = np.linspace(rmin, min(rmax,2), 10, endpoint=True) - if (rmax > 2): - ext = 'max' + ext = "neither" + if rmin > 1: + cmap = "Reds" + norm = mpl.colors.Normalize(vmin=rmin, vmax=min(rmax, 2)) + v = np.linspace(rmin, min(rmax, 2), 10, endpoint=True) + if rmax > 2: + ext = "max" else: - cmap='seismic' - if (rmax-1 > 1): + cmap = "seismic" + if rmax - 1 > 1: rrange = 1 - ext = 'max' + ext = "max" else: - rrange = max(abs(rmax-1),abs(rmin-1)) + rrange = max(abs(rmax - 1), abs(rmin - 1)) - v1 = np.linspace(1-rrange, 1, 5, endpoint=True) - v2 = np.linspace(1, 1+rrange, 5, endpoint=True) - v = np.append(v1,v2[1:]) - norm = mpl.colors.Normalize(vmin=1-rrange, vmax=1+rrange) + v1 = np.linspace(1 - rrange, 1, 5, endpoint=True) + v2 = np.linspace(1, 1 + rrange, 5, endpoint=True) + v = np.append(v1, v2[1:]) + norm = mpl.colors.Normalize(vmin=1 - rrange, vmax=1 + rrange) # plot heatmap plt.pcolormesh(X, Y, avg_ratio, cmap=cmap, norm=norm) clb = plt.colorbar(ticks=v, extend=ext) - clb.ax.set_title('Max = {0:.2f}\nMin = {1:.2f}'.format(rmax,rmin)) + clb.ax.set_title("Max = {0:.2f}\nMin = {1:.2f}".format(rmax, rmin)) # aff markers to indicate if the average time falls in a confidence interval - plt.scatter(ef,vf,s=40,marker='^',c=df,label='fused') - plt.scatter(eu,vu,s=40,marker='v',c=du,label='unfused') + plt.scatter(ef, vf, s=40, marker="^", c=df, label="fused") + plt.scatter(eu, vu, s=40, marker="v", c=du, label="unfused") plt.legend(loc=9, bbox_to_anchor=(0.5, -0.1), ncol=2) # add legend for scatter plot art = [] - lgd = plt.legend(loc='lower right', bbox_to_anchor=(1.34, -0.17)) + lgd = plt.legend(loc="lower right", bbox_to_anchor=(1.34, -0.17)) art.append(lgd) # add labels and title plt.xticks(np.log2(nelem)) plt.yticks(nvec) - plt.xlabel('log2(num elements)') - plt.ylabel('num vectors') - plt.title('avg fused time / avg unfused time \n'+args.op) + plt.xlabel("log2(num elements)") + plt.ylabel("num vectors") + plt.title("avg fused time / avg unfused time \n" + args.op) # display or save figure - if (args.show): + if args.show: plt.show() else: - plt.savefig(args.op+'-heatmap.pdf', - additional_artists=art, - bbox_inches="tight") + plt.savefig( + args.op + "-heatmap.pdf", additional_artists=art, bbox_inches="tight" + ) plt.close() # -------------------------------------------------------------------------- # Time vs Number of Elements Plots # -------------------------------------------------------------------------- - if (args.timevelem): - - colors = ['#000000','#a6cee3','#1f78b4','#b2df8a','#33a02c', - '#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6', - '#6a3d9a','#ffff99','#b15928'] - - hatch = [ '/','\\','-','+','x','o','O','.','*'] + if args.timevelem: + + colors = [ + "#000000", + "#a6cee3", + "#1f78b4", + "#b2df8a", + "#33a02c", + "#fb9a99", + "#e31a1c", + "#fdbf6f", + "#ff7f00", + "#cab2d6", + "#6a3d9a", + "#ffff99", + "#b15928", + ] + + hatch = ["/", "\\", "-", "+", "x", "o", "O", ".", "*"] # -------------------------------------------------------------------------- # Combined Number of Vectors Plots @@ -359,38 +392,45 @@ def main(): i = nvec.index(nv) - if (args.loglog): - ax.loglog(nelem, avg_fused[i], - color=colors[i], linestyle='-', label=nv) - ax.loglog(nelem, avg_unfused[i], - color=colors[i], linestyle='--', label=None) + if args.loglog: + ax.loglog(nelem, avg_fused[i], color=colors[i], linestyle="-", label=nv) + ax.loglog( + nelem, avg_unfused[i], color=colors[i], linestyle="--", label=None + ) else: - ax.plot(nelem, avg_fused[i], - color=colors[i], linestyle='-', label=nv) - ax.plot(nelem, avg_unfused[i], - color=colors[i], linestyle='--', label=None) + ax.plot(nelem, avg_fused[i], color=colors[i], linestyle="-", label=nv) + ax.plot( + nelem, avg_unfused[i], color=colors[i], linestyle="--", label=None + ) # plot confidence interval - ax.fill_between(nelem, lower_fused[i], upper_fused[i], - color=colors[i], alpha=0.3) - ax.fill_between(nelem, lower_unfused[i], upper_unfused[i], - color=colors[i], hatch='.', alpha=0.3) + ax.fill_between( + nelem, lower_fused[i], upper_fused[i], color=colors[i], alpha=0.3 + ) + ax.fill_between( + nelem, + lower_unfused[i], + upper_unfused[i], + color=colors[i], + hatch=".", + alpha=0.3, + ) ax.legend() ax.grid() - plt.title('Average Time Fused vs Unfused \n'+args.op) - plt.xlabel('vector length') - plt.ylabel('time (s)') + plt.title("Average Time Fused vs Unfused \n" + args.op) + plt.xlabel("vector length") + plt.ylabel("time (s)") - if (args.show): + if args.show: plt.show() else: - if (args.loglog): - fname=args.op+'-nvec-all-loglog.pdf' + if args.loglog: + fname = args.op + "-nvec-all-loglog.pdf" else: - fname=args.op+'-nvec-all.pdf' - plt.ticklabel_format(axis='both',style='sci') + fname = args.op + "-nvec-all.pdf" + plt.ticklabel_format(axis="both", style="sci") plt.savefig(fname) plt.close() @@ -400,49 +440,70 @@ def main(): for nv in nvec: fig = plt.figure() - ax = fig.add_subplot(111) + ax = fig.add_subplot(111) idx = nvec.index(nv) # plot run times - if (args.loglog): - ax.loglog(nelem, avg_fused[idx], - color='red', linestyle='-', label='Fused') - ax.loglog(nelem, avg_unfused[idx], - color='blue', linestyle='--', label='Unfused') + if args.loglog: + ax.loglog( + nelem, avg_fused[idx], color="red", linestyle="-", label="Fused" + ) + ax.loglog( + nelem, + avg_unfused[idx], + color="blue", + linestyle="--", + label="Unfused", + ) else: - ax.plot(nelem, avg_fused[idx], - color='red', linestyle='-', label='Fused') - ax.plot(nelem, avg_unfused[idx], - color='blue', linestyle='--', label='Unfused') + ax.plot( + nelem, avg_fused[idx], color="red", linestyle="-", label="Fused" + ) + ax.plot( + nelem, + avg_unfused[idx], + color="blue", + linestyle="--", + label="Unfused", + ) # plot confidence intervals - ax.fill_between(nelem, lower_fused[idx], upper_fused[idx], - color='red', alpha=0.2) - ax.fill_between(nelem, lower_unfused[idx], upper_unfused[idx], - color='blue', hatch='.', alpha=0.2) + ax.fill_between( + nelem, lower_fused[idx], upper_fused[idx], color="red", alpha=0.2 + ) + ax.fill_between( + nelem, + lower_unfused[idx], + upper_unfused[idx], + color="blue", + hatch=".", + alpha=0.2, + ) ax.legend() ax.grid() - plt.title('Average Time Fused vs Unfused with '+str(nv)+' vectors\n'+args.op) - plt.xlabel('vector length') - ax.set_ylabel('time (s)') + plt.title( + "Average Time Fused vs Unfused with " + str(nv) + " vectors\n" + args.op + ) + plt.xlabel("vector length") + ax.set_ylabel("time (s)") - if (args.show): + if args.show: plt.show() else: - if (args.loglog): - fname=args.op+'-nvec-'+str(nv)+'-loglog.pdf' + if args.loglog: + fname = args.op + "-nvec-" + str(nv) + "-loglog.pdf" else: - fname=args.op+'-nvec-'+str(nv)+'.pdf' - plt.ticklabel_format(axis='both',style='sci') + fname = args.op + "-nvec-" + str(nv) + ".pdf" + plt.ticklabel_format(axis="both", style="sci") plt.savefig(fname) plt.close() + # =============================================================================== if __name__ == "__main__": main() # EOF - diff --git a/benchmarks/nvector/plot_nvector_performance_speedup.py b/benchmarks/nvector/plot_nvector_performance_speedup.py index fb421f5573..623d716c01 100755 --- a/benchmarks/nvector/plot_nvector_performance_speedup.py +++ b/benchmarks/nvector/plot_nvector_performance_speedup.py @@ -20,6 +20,7 @@ # indicates if timing was enabled. # ----------------------------------------------------------------------------- + def main(): import argparse @@ -35,46 +36,61 @@ def main(): import matplotlib.ticker as mtick parser = argparse.ArgumentParser( - description='Plot data from NVector performance tests') - - parser.add_argument('op', type=str, - help='Which NVector operation to plot') - - parser.add_argument('datadir', type=str, - help='Directory where test output files are located') - - parser.add_argument('--noplots', dest='noplots', action='store_true', - help='Turn on plots for time vs number of elements') - - parser.add_argument('--logx', dest='logx', action='store_true', - help='Generate plots for speedup with log scale for the x axis (number of elements') - - parser.add_argument('--fused', dest='fused', action='store_true', - help='Operation is a fused op') - - parser.add_argument('--show', dest='show', action='store_true', - help='Display plots rather than saving to file') - - parser.add_argument('--debug', dest='debug', action='store_true', - help='Turn on debugging output') + description="Plot data from NVector performance tests" + ) + + parser.add_argument("op", type=str, help="Which NVector operation to plot") + + parser.add_argument( + "datadir", type=str, help="Directory where test output files are located" + ) + + parser.add_argument( + "--noplots", + dest="noplots", + action="store_true", + help="Turn on plots for time vs number of elements", + ) + + parser.add_argument( + "--logx", + dest="logx", + action="store_true", + help="Generate plots for speedup with log scale for the x axis (number of elements", + ) + + parser.add_argument( + "--fused", dest="fused", action="store_true", help="Operation is a fused op" + ) + + parser.add_argument( + "--show", + dest="show", + action="store_true", + help="Display plots rather than saving to file", + ) + + parser.add_argument( + "--debug", dest="debug", action="store_true", help="Turn on debugging output" + ) # parse command line args args = parser.parse_args() - if (args.debug): + if args.debug: print(args) # check for test data directory - if (not os.path.isdir(args.datadir)): - print("ERROR:",args.datadir,"does not exist") + if not os.path.isdir(args.datadir): + print("ERROR:", args.datadir, "does not exist") sys.exit() # sort output files - output_baseline = sorted(glob.glob(args.datadir+'/output*-old.log')) - output_new = sorted(glob.glob(args.datadir+'/output*-new.log')) + output_baseline = sorted(glob.glob(args.datadir + "/output*-old.log")) + output_new = sorted(glob.glob(args.datadir + "/output*-new.log")) output = output_baseline + output_new - if (args.debug): + if args.debug: print("output files") print(len(output)) for i in range(len(output)): @@ -82,8 +98,8 @@ def main(): # figure out vector sizes, number of vectors, and number of sums nelem = [] - nvec = [] - nsum = [] + nvec = [] + nsum = [] ntest = [] # parse file names to get input parameters @@ -97,40 +113,40 @@ def main(): ns = int(split_fout[3]) nt = int(split_fout[4]) - if (not ne in nelem): + if not ne in nelem: nelem.append(ne) - if (not nv in nvec): + if not nv in nvec: nvec.append(nv) - if (not ns in nsum): + if not ns in nsum: nsum.append(ns) - if (not nt in ntest): + if not nt in ntest: ntest.append(nt) - if (len(ntest) != 1): + if len(ntest) != 1: print("Warning: Unequal numbers of tests") nelem.sort() - if (args.debug): - print("nelem:",nelem, len(nelem)) - print("nvec: ",nvec, len(nvec)) - print("nsum: ",nsum, len(nsum)) - print("ntest:",ntest, len(ntest)) + if args.debug: + print("nelem:", nelem, len(nelem)) + print("nvec: ", nvec, len(nvec)) + print("nsum: ", nsum, len(nsum)) + print("ntest:", ntest, len(ntest)) # allocate numpy arrays for timing data - avg_denom = np.zeros([len(nvec), len(nelem)]) + avg_denom = np.zeros([len(nvec), len(nelem)]) sdev_denom = np.zeros([len(nvec), len(nelem)]) - avg_numer = np.zeros([len(nvec), len(nelem)]) + avg_numer = np.zeros([len(nvec), len(nelem)]) sdev_numer = np.zeros([len(nvec), len(nelem)]) avg_ratio = np.zeros([len(nvec), len(nelem)]) # read 'baseline' files for f in output_baseline: - if (args.debug): - print("Reading:",f) + if args.debug: + print("Reading:", f) # get test inputs from file name split_fout = f.split("/")[-1] split_fout = split_fout.split("_") @@ -142,22 +158,22 @@ def main(): # split line into list split_line = shlex.split(line) # skip blank lines - if (not split_line): + if not split_line: continue # tests finished, stop reading file - if (split_line[0] == "Finished"): + if split_line[0] == "Finished": break # check if the operation is the one we want and get data - if (args.op == split_line[0]): + if args.op == split_line[0]: i = nvec.index(nv) j = nelem.index(ne) - avg_numer[i][j] = float(split_line[1]) + avg_numer[i][j] = float(split_line[1]) sdev_numer[i][j] = float(split_line[2]) # read output files for f in output_new: - if (args.debug): - print("Reading:",f) + if args.debug: + print("Reading:", f) # get test inputs from file name split_fout = f.split("/")[-1] split_fout = split_fout.split("_") @@ -169,16 +185,16 @@ def main(): # split line into list split_line = shlex.split(line) # skip blank lines - if (not split_line): + if not split_line: continue # tests finished, stop reading file - if (split_line[0] == "Finished"): + if split_line[0] == "Finished": break # check if the operation is the one we want and get data - if (args.op == split_line[0]): + if args.op == split_line[0]: i = nvec.index(nv) j = nelem.index(ne) - avg_denom[i][j] = float(split_line[1]) + avg_denom[i][j] = float(split_line[1]) sdev_denom[i][j] = float(split_line[2]) avg_ratio[i][j] = avg_numer[i][j] / avg_denom[i][j] @@ -187,35 +203,37 @@ def main(): # -------------------------------------------------------------------------- # allocate arrays for the upper and lower bounds of the confidence interval - lower_denom = np.zeros([len(nvec), len(nelem)]) - upper_denom = np.zeros([len(nvec), len(nelem)]) + lower_denom = np.zeros([len(nvec), len(nelem)]) + upper_denom = np.zeros([len(nvec), len(nelem)]) lower_numer = np.zeros([len(nvec), len(nelem)]) upper_numer = np.zeros([len(nvec), len(nelem)]) # critical value for 99% confidence interval - if (ntest[0] < 30): + if ntest[0] < 30: # student's t distribution - cv = st.t.interval(0.99, ntest[0]-1)[1] + cv = st.t.interval(0.99, ntest[0] - 1)[1] else: # normal distribution cv = st.norm.ppf(0.995) # confidence intervals - cdev_denom = cv * sdev_denom / np.sqrt(ntest[0]) + cdev_denom = cv * sdev_denom / np.sqrt(ntest[0]) lower_denom = avg_denom - cdev_denom upper_denom = avg_denom + cdev_denom - cdev_numer = cv * sdev_numer / np.sqrt(ntest[0]) + cdev_numer = cv * sdev_numer / np.sqrt(ntest[0]) lower_numer = avg_numer - cdev_numer upper_numer = avg_numer + cdev_numer # check if the new average times are within the baseline confidence interval - denom_in = np.where(np.logical_and(avg_denom < upper_numer, - avg_denom > lower_numer)) + denom_in = np.where( + np.logical_and(avg_denom < upper_numer, avg_denom > lower_numer) + ) # check if the baseline average times are within the new confidence interval - numer_in = np.where(np.logical_and(avg_numer < upper_denom, - avg_numer > lower_denom)) + numer_in = np.where( + np.logical_and(avg_numer < upper_denom, avg_numer > lower_denom) + ) # get which numbers of vectors and elements for new tests are in the # confidence interval of the baseline times @@ -228,9 +246,9 @@ def main(): ef[i] = np.log2(nelem[denom_in[1][i]]) df[i] = 1 - if (args.debug): - print('vf:', vf) - print('ef:', ef) + if args.debug: + print("vf:", vf) + print("ef:", ef) # get which numbers of vectors and elements for baseline tests are in the # confidence interval of the new times @@ -243,9 +261,9 @@ def main(): eu[i] = np.log2(nelem[numer_in[1][i]]) du[i] = 1 - if (args.debug): - print('vu:', vu) - print('eu:', eu) + if args.debug: + print("vu:", vu) + print("eu:", eu) # -------------------------------------------------------------------------- # Output ratios @@ -256,29 +274,41 @@ def main(): print("avg. new") for i in reversed(range(len(nvec))): - print('%2d' % int(i+1), str(avg_denom[i]).replace('\n', '')) + print("%2d" % int(i + 1), str(avg_denom[i]).replace("\n", "")) print() print("avg. baseline") for i in reversed(range(len(nvec))): - print('%2d' % int(i+1), str(avg_numer[i]).replace('\n', '')) + print("%2d" % int(i + 1), str(avg_numer[i]).replace("\n", "")) print() print("avg. ratio (speedup)") for i in reversed(range(len(nvec))): - print('%2d' % int(i+1), str(avg_ratio[i]).replace('\n', '')) + print("%2d" % int(i + 1), str(avg_ratio[i]).replace("\n", "")) print() # -------------------------------------------------------------------------- # Speedup v. Number of Elements Plots # -------------------------------------------------------------------------- - if (not args.noplots): - - colors = ['#000000','#a6cee3','#1f78b4','#b2df8a','#33a02c', - '#fb9a99','#e31a1c','#fdbf6f','#ff7f00','#cab2d6', - '#6a3d9a','#ffff99','#b15928'] - - hatch = [ '/','\\','-','+','x','o','O','.','*'] + if not args.noplots: + + colors = [ + "#000000", + "#a6cee3", + "#1f78b4", + "#b2df8a", + "#33a02c", + "#fb9a99", + "#e31a1c", + "#fdbf6f", + "#ff7f00", + "#cab2d6", + "#6a3d9a", + "#ffff99", + "#b15928", + ] + + hatch = ["/", "\\", "-", "+", "x", "o", "O", ".", "*"] # -------------------------------------------------------------------------- # Combined Number of Vectors Plots @@ -287,19 +317,17 @@ def main(): ax = fig.add_subplot(111) if args.fused: - indices = range(0,len(nvec)) + indices = range(0, len(nvec)) else: - indices = range(len(nvec)-1,len(nvec)) + indices = range(len(nvec) - 1, len(nvec)) for i in indices: - lab = 'num. vecs %d' % nvec[i] - if (args.logx): - ax.plot(nelem, avg_ratio[i], - color=colors[i], linestyle='-', label=lab) - ax.set_xscale('log') + lab = "num. vecs %d" % nvec[i] + if args.logx: + ax.plot(nelem, avg_ratio[i], color=colors[i], linestyle="-", label=lab) + ax.set_xscale("log") else: - ax.plot(nelem, avg_ratio[i], - color=colors[i], linestyle='-', label=lab) + ax.plot(nelem, avg_ratio[i], color=colors[i], linestyle="-", label=lab) # # plot confidence interval # ax.fill_between(nelem, lower_denom[i], upper_denom[i], # color=colors[i], alpha=0.3) @@ -309,18 +337,18 @@ def main(): ax.legend() ax.grid() - plt.title('Average Speedup \n'+args.op) - plt.xlabel('vector length') - plt.ylabel('speedup (baseline/new)') + plt.title("Average Speedup \n" + args.op) + plt.xlabel("vector length") + plt.ylabel("speedup (baseline/new)") - if (args.show): + if args.show: plt.show() else: - if (args.logx): - fname=args.op+'-nvec-all-logx.pdf' + if args.logx: + fname = args.op + "-nvec-all-logx.pdf" else: - fname=args.op+'-nvec-all.pdf' - plt.ticklabel_format(axis='both',style='sci') + fname = args.op + "-nvec-all.pdf" + plt.ticklabel_format(axis="both", style="sci") plt.savefig(fname) plt.close() diff --git a/doc/superbuild/source/developers/style_guide/SourceCode.rst b/doc/superbuild/source/developers/style_guide/SourceCode.rst index 473409d987..a01897bdf2 100644 --- a/doc/superbuild/source/developers/style_guide/SourceCode.rst +++ b/doc/superbuild/source/developers/style_guide/SourceCode.rst @@ -184,10 +184,11 @@ not adhere to all of these rules. variable-length arrays. Exceptions are allowed when interfacing with a library which requires a newer standard. -#. All new code added to SUNDIALS should be - formatted with `clang-format `_, - and `fprettify `_. - See :ref:`Style.Formatting` for details. +#. All new code added to SUNDIALS should be formatted with `clang-format + `_ for C/C++, `fprettify + `_ for Fortran, and `black + `_ for Python. See :ref:`Style.Formatting` for + details. #. Spaces not tabs. @@ -378,11 +379,14 @@ Formatting ---------- All new code added to SUNDIALS should be formatted with `clang-format -`_ and -`fprettify `_. The -``.clang-format`` file in the root of the project defines our configuration -for clang-format. We use the default fprettify settings, except we use -2-space indentation. To apply ``clang-format`` and ``fprettify`` you can run: +`_ for C/C++, `fprettify +`_ for Fortran, and `black +`_ for Python. The ``.clang-format`` file in the +root of the project defines our configuration for clang-format. We use the +default fprettify settings, except we use 2-space indentation. We also use the +default black settings. + +To apply ``clang-format``, ``fprettify``, and ``black`` you can run: .. code-block:: shell @@ -395,7 +399,7 @@ for clang-format. We use the default fprettify settings, except we use that you use version ``17.0.4``, which can be installed from source or with Spack. Alternatively, when you open a pull request on GitHub, an action will run ``clang-format`` on the code. If any formatting is required, the action will fail and produce a git patch artifact that you can download - (from the job artifacts section) and apply with `git apply`. + (from the job artifacts section) and apply with ``git apply``. If clang-format breaks lines in a way that is unreadable, use ``//`` to break the line. For example, diff --git a/examples/arkode/CXX_parallel/plot_brusselator1D.py b/examples/arkode/CXX_parallel/plot_brusselator1D.py index 2bcc7d1af7..087577ff0e 100755 --- a/examples/arkode/CXX_parallel/plot_brusselator1D.py +++ b/examples/arkode/CXX_parallel/plot_brusselator1D.py @@ -22,33 +22,36 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('mesh.txt', dtype=np.double) +mesh = np.loadtxt("mesh.txt", dtype=np.double) # load output time file -times = np.loadtxt('t.000000.txt', dtype=np.double) +times = np.loadtxt("t.000000.txt", dtype=np.double) # load solution data files -ufiles = glob.glob('u.' + ('[0-9]'*6) + '.txt'); ufiles.sort() -vfiles = glob.glob('v.' + ('[0-9]'*6) + '.txt'); vfiles.sort() -wfiles = glob.glob('w.' + ('[0-9]'*6) + '.txt'); wfiles.sort() +ufiles = glob.glob("u." + ("[0-9]" * 6) + ".txt") +ufiles.sort() +vfiles = glob.glob("v." + ("[0-9]" * 6) + ".txt") +vfiles.sort() +wfiles = glob.glob("w." + ("[0-9]" * 6) + ".txt") +wfiles.sort() udata = np.loadtxt(ufiles[0], dtype=np.double) vdata = np.loadtxt(vfiles[0], dtype=np.double) wdata = np.loadtxt(wfiles[0], dtype=np.double) -for idx in range(1,len(ufiles)): +for idx in range(1, len(ufiles)): udata = np.hstack((udata, np.loadtxt(ufiles[idx], dtype=np.double))) vdata = np.hstack((vdata, np.loadtxt(vfiles[idx], dtype=np.double))) wdata = np.hstack((wdata, np.loadtxt(wfiles[idx], dtype=np.double))) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() xmax = mesh.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -57,39 +60,39 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'solution.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "solution." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, xmax, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() # set string constants for output plots, current time, mesh size -pname = 'solution_at_x0.png' +pname = "solution_at_x0.png" xstr = repr(mesh[0]) # plot current solution and save to disk plt.figure(1) -plt.plot(times,udata[:,0],label="u") -plt.plot(times,vdata[:,0],label="v") -plt.plot(times,wdata[:,0],label="w") -plt.xlabel('t') -plt.ylabel('solution') -plt.title('Solutions at output at x = '+xstr) +plt.plot(times, udata[:, 0], label="u") +plt.plot(times, vdata[:, 0], label="v") +plt.plot(times, wdata[:, 0], label="w") +plt.xlabel("t") +plt.ylabel("solution") +plt.title("Solutions at output at x = " + xstr) plt.axis((times[0], times[-1], minval, maxval)) plt.grid() -plt.legend(loc='upper right', shadow=True) +plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/CXX_parallel/plot_heat2D_p.py b/examples/arkode/CXX_parallel/plot_heat2D_p.py index 7b7f83d929..0a99dfbc5e 100755 --- a/examples/arkode/CXX_parallel/plot_heat2D_p.py +++ b/examples/arkode/CXX_parallel/plot_heat2D_p.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read MPI root process problem info file -infofile = 'heat2d_info.00000.txt' +infofile = "heat2d_info.00000.txt" with open(infofile) as fn: @@ -59,7 +59,7 @@ continue # total number of MPI processes - if "np"in line: + if "np" in line: nprocs = int(text[1]) continue @@ -71,11 +71,11 @@ # ------------------------------------------------------------------------------ # load subdomain information, store in table -subdomains = np.zeros((nprocs,4), dtype=np.int) +subdomains = np.zeros((nprocs, 4), dtype=np.int) for i in range(nprocs): - infofile = 'heat2d_info.' + repr(i).zfill(5) + '.txt' + infofile = "heat2d_info." + repr(i).zfill(5) + ".txt" with open(infofile) as fn: @@ -87,62 +87,64 @@ # x-direction starting index if "is" in line: - subdomains[i,0] = float(text[1]) + subdomains[i, 0] = float(text[1]) continue # x-direction ending index if "ie" in line: - subdomains[i,1] = float(text[1]) + subdomains[i, 1] = float(text[1]) continue # y-direction starting index if "js" in line: - subdomains[i,2] = float(text[1]) + subdomains[i, 2] = float(text[1]) continue # y-direction ending index if "je" in line: - subdomains[i,3] = float(text[1]) + subdomains[i, 3] = float(text[1]) continue # ------------------------------------------------------------------------------ # check if the error was output -fname = 'heat2d_error.00000.txt' +fname = "heat2d_error.00000.txt" if os.path.isfile(fname): - plottype = ['solution', 'error'] + plottype = ["solution", "error"] else: - plottype = ['solution'] + plottype = ["solution"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) for i in range(nprocs): - datafile = 'heat2d_' + pt + '.' + repr(i).zfill(5) + '.txt' + datafile = "heat2d_" + pt + "." + repr(i).zfill(5) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) - if (np.shape(data)[0] != nt): - sys.exit('error: subdomain ' + i + ' has an incorrect number of time steps') + if np.shape(data)[0] != nt: + sys.exit("error: subdomain " + i + " has an incorrect number of time steps") # subdomain indices - istart = subdomains[i,0] - iend = subdomains[i,1] - jstart = subdomains[i,2] - jend = subdomains[i,3] - nxl = iend - istart + 1 - nyl = jend - jstart + 1 + istart = subdomains[i, 0] + iend = subdomains[i, 1] + jstart = subdomains[i, 2] + jend = subdomains[i, 3] + nxl = iend - istart + 1 + nyl = jend - jstart + 1 # extract data for i in range(nt): - time[i] = data[i,0] - result[i,jstart:jend+1,istart:iend+1] = np.reshape(data[i,1:], (nyl,nxl)) + time[i] = data[i, 0] + result[i, jstart : jend + 1, istart : iend + 1] = np.reshape( + data[i, 1:], (nyl, nxl) + ) # determine extents of plots maxtemp = 1.1 * result.max() @@ -151,7 +153,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -160,24 +162,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/arkode/CXX_parhyp/plot_heat2D_p.py b/examples/arkode/CXX_parhyp/plot_heat2D_p.py index 7b7f83d929..0a99dfbc5e 100755 --- a/examples/arkode/CXX_parhyp/plot_heat2D_p.py +++ b/examples/arkode/CXX_parhyp/plot_heat2D_p.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read MPI root process problem info file -infofile = 'heat2d_info.00000.txt' +infofile = "heat2d_info.00000.txt" with open(infofile) as fn: @@ -59,7 +59,7 @@ continue # total number of MPI processes - if "np"in line: + if "np" in line: nprocs = int(text[1]) continue @@ -71,11 +71,11 @@ # ------------------------------------------------------------------------------ # load subdomain information, store in table -subdomains = np.zeros((nprocs,4), dtype=np.int) +subdomains = np.zeros((nprocs, 4), dtype=np.int) for i in range(nprocs): - infofile = 'heat2d_info.' + repr(i).zfill(5) + '.txt' + infofile = "heat2d_info." + repr(i).zfill(5) + ".txt" with open(infofile) as fn: @@ -87,62 +87,64 @@ # x-direction starting index if "is" in line: - subdomains[i,0] = float(text[1]) + subdomains[i, 0] = float(text[1]) continue # x-direction ending index if "ie" in line: - subdomains[i,1] = float(text[1]) + subdomains[i, 1] = float(text[1]) continue # y-direction starting index if "js" in line: - subdomains[i,2] = float(text[1]) + subdomains[i, 2] = float(text[1]) continue # y-direction ending index if "je" in line: - subdomains[i,3] = float(text[1]) + subdomains[i, 3] = float(text[1]) continue # ------------------------------------------------------------------------------ # check if the error was output -fname = 'heat2d_error.00000.txt' +fname = "heat2d_error.00000.txt" if os.path.isfile(fname): - plottype = ['solution', 'error'] + plottype = ["solution", "error"] else: - plottype = ['solution'] + plottype = ["solution"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) for i in range(nprocs): - datafile = 'heat2d_' + pt + '.' + repr(i).zfill(5) + '.txt' + datafile = "heat2d_" + pt + "." + repr(i).zfill(5) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) - if (np.shape(data)[0] != nt): - sys.exit('error: subdomain ' + i + ' has an incorrect number of time steps') + if np.shape(data)[0] != nt: + sys.exit("error: subdomain " + i + " has an incorrect number of time steps") # subdomain indices - istart = subdomains[i,0] - iend = subdomains[i,1] - jstart = subdomains[i,2] - jend = subdomains[i,3] - nxl = iend - istart + 1 - nyl = jend - jstart + 1 + istart = subdomains[i, 0] + iend = subdomains[i, 1] + jstart = subdomains[i, 2] + jend = subdomains[i, 3] + nxl = iend - istart + 1 + nyl = jend - jstart + 1 # extract data for i in range(nt): - time[i] = data[i,0] - result[i,jstart:jend+1,istart:iend+1] = np.reshape(data[i,1:], (nyl,nxl)) + time[i] = data[i, 0] + result[i, jstart : jend + 1, istart : iend + 1] = np.reshape( + data[i, 1:], (nyl, nxl) + ) # determine extents of plots maxtemp = 1.1 * result.max() @@ -151,7 +153,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -160,24 +162,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/arkode/CXX_serial/plot_heat2D.py b/examples/arkode/CXX_serial/plot_heat2D.py index 6c97cdc112..c494bc06de 100755 --- a/examples/arkode/CXX_serial/plot_heat2D.py +++ b/examples/arkode/CXX_serial/plot_heat2D.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read problem info file -infofile = 'heat2d_info.txt' +infofile = "heat2d_info.txt" with open(infofile) as fn: @@ -66,26 +66,26 @@ # ------------------------------------------------------------------------------ # check if the error was output -fname = 'heat2d_error.txt' +fname = "heat2d_error.txt" if os.path.isfile(fname): - plottype = ['solution', 'error'] + plottype = ["solution", "error"] else: - plottype = ['solution'] + plottype = ["solution"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) # load data - data = np.loadtxt('heat2d_' + pt + '.txt', dtype=np.double) + data = np.loadtxt("heat2d_" + pt + ".txt", dtype=np.double) # extract data for i in range(nt): - time[i] = data[i,0] - result[i,0:ny+1,0:nx+1] = np.reshape(data[i,1:], (ny,nx)) + time[i] = data[i, 0] + result[i, 0 : ny + 1, 0 : nx + 1] = np.reshape(data[i, 1:], (ny, nx)) # determine extents of plots maxtemp = 1.1 * result.max() @@ -94,7 +94,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -103,24 +103,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/arkode/CXX_serial/plot_sol.py b/examples/arkode/CXX_serial/plot_sol.py index ab463fac6c..fe3d875340 100755 --- a/examples/arkode/CXX_serial/plot_sol.py +++ b/examples/arkode/CXX_serial/plot_sol.py @@ -20,16 +20,16 @@ import numpy as np # load solution data file -data = np.loadtxt('solution.txt', dtype=np.double) +data = np.loadtxt("solution.txt", dtype=np.double) # determine number of time steps, number of fields -nt,nv = np.shape(data) +nt, nv = np.shape(data) # extract time array -times = data[:,0] +times = data[:, 0] # parse comment line to determine solution names -f = open('solution.txt', 'r') +f = open("solution.txt", "r") commentline = f.readline() commentsplit = commentline.split() names = commentsplit[2:] @@ -38,18 +38,16 @@ plt.figure() # add curves to figure -for i in range(nv-1): - plt.plot(times,data[:,i+1],label=names[i]) -plt.xlabel('t') -if (nv > 2): - plt.ylabel('solutions') +for i in range(nv - 1): + plt.plot(times, data[:, i + 1], label=names[i]) +plt.xlabel("t") +if nv > 2: + plt.ylabel("solutions") else: - plt.ylabel('solution') -plt.legend(loc='upper right', shadow=True) + plt.ylabel("solution") +plt.legend(loc="upper right", shadow=True) plt.grid() -plt.savefig('solution.png') - - +plt.savefig("solution.png") ##### end of script ##### diff --git a/examples/arkode/CXX_xbraid/plot_heat2D.py b/examples/arkode/CXX_xbraid/plot_heat2D.py index 72aaa2adea..f24592bfdc 100755 --- a/examples/arkode/CXX_xbraid/plot_heat2D.py +++ b/examples/arkode/CXX_xbraid/plot_heat2D.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read problem info file -infofile = 'heat2d_info.txt' +infofile = "heat2d_info.txt" with open(infofile) as fn: @@ -66,17 +66,17 @@ # ------------------------------------------------------------------------------ # check if the error was output -fname = 'heat2d_error.000000.txt' +fname = "heat2d_error.000000.txt" if os.path.isfile(fname): - plottype = ['solution', 'error'] + plottype = ["solution", "error"] else: - plottype = ['solution'] + plottype = ["solution"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) tindex = range(0, nt) @@ -86,14 +86,14 @@ for t in tindex: # output file name - datafile = 'heat2d_' + pt + '.' + repr(t).zfill(6) + '.txt' + datafile = "heat2d_" + pt + "." + repr(t).zfill(6) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) # extract data time[i] = data[0] - result[i,0:ny+1,0:nx+1] = np.reshape(data[1:], (ny,nx)) + result[i, 0 : ny + 1, 0 : nx + 1] = np.reshape(data[1:], (ny, nx)) i += 1 # determine extents of plots @@ -103,7 +103,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -112,24 +112,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(6) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(6) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/arkode/C_manyvector/plot_brusselator1D.py b/examples/arkode/C_manyvector/plot_brusselator1D.py index 3cc29051e5..72a3402c4a 100755 --- a/examples/arkode/C_manyvector/plot_brusselator1D.py +++ b/examples/arkode/C_manyvector/plot_brusselator1D.py @@ -20,23 +20,23 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('bruss_mesh.txt', dtype=np.double) +mesh = np.loadtxt("bruss_mesh.txt", dtype=np.double) # load solution data files -udata = np.loadtxt('bruss_u.txt', dtype=np.double) -vdata = np.loadtxt('bruss_v.txt', dtype=np.double) -wdata = np.loadtxt('bruss_w.txt', dtype=np.double) +udata = np.loadtxt("bruss_u.txt", dtype=np.double) +vdata = np.loadtxt("bruss_v.txt", dtype=np.double) +wdata = np.loadtxt("bruss_w.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -44,21 +44,21 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'brusselator1D.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "brusselator1D." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_openmp/plot_brusselator1D.py b/examples/arkode/C_openmp/plot_brusselator1D.py index 3cc29051e5..72a3402c4a 100755 --- a/examples/arkode/C_openmp/plot_brusselator1D.py +++ b/examples/arkode/C_openmp/plot_brusselator1D.py @@ -20,23 +20,23 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('bruss_mesh.txt', dtype=np.double) +mesh = np.loadtxt("bruss_mesh.txt", dtype=np.double) # load solution data files -udata = np.loadtxt('bruss_u.txt', dtype=np.double) -vdata = np.loadtxt('bruss_v.txt', dtype=np.double) -wdata = np.loadtxt('bruss_w.txt', dtype=np.double) +udata = np.loadtxt("bruss_u.txt", dtype=np.double) +vdata = np.loadtxt("bruss_v.txt", dtype=np.double) +wdata = np.loadtxt("bruss_w.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -44,21 +44,21 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'brusselator1D.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "brusselator1D." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_parallel/plot_brusselator1D.py b/examples/arkode/C_parallel/plot_brusselator1D.py index 2bcc7d1af7..087577ff0e 100755 --- a/examples/arkode/C_parallel/plot_brusselator1D.py +++ b/examples/arkode/C_parallel/plot_brusselator1D.py @@ -22,33 +22,36 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('mesh.txt', dtype=np.double) +mesh = np.loadtxt("mesh.txt", dtype=np.double) # load output time file -times = np.loadtxt('t.000000.txt', dtype=np.double) +times = np.loadtxt("t.000000.txt", dtype=np.double) # load solution data files -ufiles = glob.glob('u.' + ('[0-9]'*6) + '.txt'); ufiles.sort() -vfiles = glob.glob('v.' + ('[0-9]'*6) + '.txt'); vfiles.sort() -wfiles = glob.glob('w.' + ('[0-9]'*6) + '.txt'); wfiles.sort() +ufiles = glob.glob("u." + ("[0-9]" * 6) + ".txt") +ufiles.sort() +vfiles = glob.glob("v." + ("[0-9]" * 6) + ".txt") +vfiles.sort() +wfiles = glob.glob("w." + ("[0-9]" * 6) + ".txt") +wfiles.sort() udata = np.loadtxt(ufiles[0], dtype=np.double) vdata = np.loadtxt(vfiles[0], dtype=np.double) wdata = np.loadtxt(wfiles[0], dtype=np.double) -for idx in range(1,len(ufiles)): +for idx in range(1, len(ufiles)): udata = np.hstack((udata, np.loadtxt(ufiles[idx], dtype=np.double))) vdata = np.hstack((vdata, np.loadtxt(vfiles[idx], dtype=np.double))) wdata = np.hstack((wdata, np.loadtxt(wfiles[idx], dtype=np.double))) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() xmax = mesh.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -57,39 +60,39 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'solution.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "solution." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, xmax, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() # set string constants for output plots, current time, mesh size -pname = 'solution_at_x0.png' +pname = "solution_at_x0.png" xstr = repr(mesh[0]) # plot current solution and save to disk plt.figure(1) -plt.plot(times,udata[:,0],label="u") -plt.plot(times,vdata[:,0],label="v") -plt.plot(times,wdata[:,0],label="w") -plt.xlabel('t') -plt.ylabel('solution') -plt.title('Solutions at output at x = '+xstr) +plt.plot(times, udata[:, 0], label="u") +plt.plot(times, vdata[:, 0], label="v") +plt.plot(times, wdata[:, 0], label="w") +plt.xlabel("t") +plt.ylabel("solution") +plt.title("Solutions at output at x = " + xstr) plt.axis((times[0], times[-1], minval, maxval)) plt.grid() -plt.legend(loc='upper right', shadow=True) +plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_serial/ark_kepler_plot.py b/examples/arkode/C_serial/ark_kepler_plot.py index 2d499d850e..7a50dc3ebb 100755 --- a/examples/arkode/C_serial/ark_kepler_plot.py +++ b/examples/arkode/C_serial/ark_kepler_plot.py @@ -19,70 +19,72 @@ import matplotlib.pyplot as plt import argparse -parser = argparse.ArgumentParser(description='Script for plotting the energy, angular momentum, and phase space solution for ark_kepler.c') -parser.add_argument('output_times', help='file with the output times') -parser.add_argument('solution', help='file with the solution') -parser.add_argument('conserved_quantities', help='file with conserved quantities') +parser = argparse.ArgumentParser( + description="Script for plotting the energy, angular momentum, and phase space solution for ark_kepler.c" +) +parser.add_argument("output_times", help="file with the output times") +parser.add_argument("solution", help="file with the solution") +parser.add_argument("conserved_quantities", help="file with conserved quantities") args = parser.parse_args() t = np.loadtxt(args.output_times, dtype=np.float64) y = np.loadtxt(args.solution, dtype=np.float64) -y = np.reshape(y, (y.shape[0]//4, 4)) +y = np.reshape(y, (y.shape[0] // 4, 4)) plt.figure(dpi=200) -plt.plot(y[:,0], y[:,1]) -plt.savefig('ark_kepler_phase.png') +plt.plot(y[:, 0], y[:, 1]) +plt.savefig("ark_kepler_phase.png") plt.close() -conserved = np.loadtxt(args.conserved_quantities, delimiter=',', dtype=np.float64) -energy = conserved[:,0] -energy_0 = conserved[0,0] -L = conserved[:,1] -L_0 = conserved[0,1] +conserved = np.loadtxt(args.conserved_quantities, delimiter=",", dtype=np.float64) +energy = conserved[:, 0] +energy_0 = conserved[0, 0] +L = conserved[:, 1] +L_0 = conserved[0, 1] plt.figure(dpi=200) -plt.title('Energy') +plt.title("Energy") plt.plot(t, np.abs(energy)) -plt.ylabel('H(t,p,q)') -plt.xlabel('<--- t --->') -plt.xscale('log') -plt.savefig('ark_kepler_energy.png') +plt.ylabel("H(t,p,q)") +plt.xlabel("<--- t --->") +plt.xscale("log") +plt.savefig("ark_kepler_energy.png") plt.close() plt.figure(dpi=200) -plt.title('Momentum') +plt.title("Momentum") plt.plot(t, L) -plt.ylabel('L(t,p,q)') -plt.xlabel('<--- t --->') -plt.xscale('log') -plt.savefig('ark_kepler_momentum.png') +plt.ylabel("L(t,p,q)") +plt.xlabel("<--- t --->") +plt.xscale("log") +plt.savefig("ark_kepler_momentum.png") plt.close() # # Time plot. # plt.figure(dpi=200) -plt.plot(t, y[:,0], linewidth = 2) -plt.plot(t, y[:,1], linewidth = 2) -plt.plot(t, y[:,2], linewidth = 2) -plt.plot(t, y[:,3], linewidth = 2) +plt.plot(t, y[:, 0], linewidth=2) +plt.plot(t, y[:, 1], linewidth=2) +plt.plot(t, y[:, 2], linewidth=2) +plt.plot(t, y[:, 3], linewidth=2) plt.grid(True) -plt.legend(['P', 'P\'', 'Q', 'Q\'']) -plt.xlabel('<--- t --->') -plt.ylabel('<--- y(1:4) --->') -plt.title('Solution in Time') -plt.savefig('ark_kepler_plot.png') +plt.legend(["P", "P'", "Q", "Q'"]) +plt.xlabel("<--- t --->") +plt.ylabel("<--- y(1:4) --->") +plt.title("Solution in Time") +plt.savefig("ark_kepler_plot.png") plt.close() # # Phase plot. # plt.figure(dpi=200) -plt.plot(y[:,0], y[:,1], linewidth=0.1) +plt.plot(y[:, 0], y[:, 1], linewidth=0.1) plt.grid(True) -plt.xlabel('<--- y1 --->') -plt.ylabel('<--- y2 --->') -plt.title('Phase Plot') -plt.savefig('ark_kepler_phase.png') +plt.xlabel("<--- y1 --->") +plt.ylabel("<--- y2 --->") +plt.title("Phase Plot") +plt.savefig("ark_kepler_phase.png") plt.close() diff --git a/examples/arkode/C_serial/plot_brusselator1D.py b/examples/arkode/C_serial/plot_brusselator1D.py index 3cc29051e5..72a3402c4a 100755 --- a/examples/arkode/C_serial/plot_brusselator1D.py +++ b/examples/arkode/C_serial/plot_brusselator1D.py @@ -20,23 +20,23 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('bruss_mesh.txt', dtype=np.double) +mesh = np.loadtxt("bruss_mesh.txt", dtype=np.double) # load solution data files -udata = np.loadtxt('bruss_u.txt', dtype=np.double) -vdata = np.loadtxt('bruss_v.txt', dtype=np.double) -wdata = np.loadtxt('bruss_w.txt', dtype=np.double) +udata = np.loadtxt("bruss_u.txt", dtype=np.double) +vdata = np.loadtxt("bruss_v.txt", dtype=np.double) +wdata = np.loadtxt("bruss_w.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() @@ -44,21 +44,21 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'brusselator1D.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "brusselator1D." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_serial/plot_brusselator1D_FEM.py b/examples/arkode/C_serial/plot_brusselator1D_FEM.py index d47bf2b40e..61bbf4f069 100755 --- a/examples/arkode/C_serial/plot_brusselator1D_FEM.py +++ b/examples/arkode/C_serial/plot_brusselator1D_FEM.py @@ -20,52 +20,52 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('bruss_FEM_mesh.txt', dtype=np.double) +mesh = np.loadtxt("bruss_FEM_mesh.txt", dtype=np.double) # load solution data files -udata = np.loadtxt('bruss_FEM_u.txt', dtype=np.double) -vdata = np.loadtxt('bruss_FEM_v.txt', dtype=np.double) -wdata = np.loadtxt('bruss_FEM_w.txt', dtype=np.double) +udata = np.loadtxt("bruss_FEM_u.txt", dtype=np.double) +vdata = np.loadtxt("bruss_FEM_v.txt", dtype=np.double) +wdata = np.loadtxt("bruss_FEM_w.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(udata) +nt, nx = np.shape(udata) # determine min/max values -umin = 0.9*udata.min() -umax = 1.1*udata.max() -vmin = 0.9*vdata.min() -vmax = 1.1*vdata.max() -wmin = 0.9*wdata.min() -wmax = 1.1*wdata.max() +umin = 0.9 * udata.min() +umax = 1.1 * udata.max() +vmin = 0.9 * vdata.min() +vmax = 1.1 * vdata.max() +wmin = 0.9 * wdata.min() +wmax = 1.1 * wdata.max() minval = np.array([umin, vmin, wmin]).min() maxval = np.array([umax, vmax, wmax]).max() # plot the mesh plt.figure(1) -plt.plot(mesh,0.0*mesh,'o') -plt.xlabel('x') -plt.title('FEM mesh') -plt.savefig('brusselator1D_FEM_mesh.png') +plt.plot(mesh, 0.0 * mesh, "o") +plt.xlabel("x") +plt.title("FEM mesh") +plt.savefig("brusselator1D_FEM_mesh.png") # generate plots of results for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'brusselator1D_FEM.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "brusselator1D_FEM." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,udata[tstep,:],label="u") - plt.plot(mesh,vdata[tstep,:],label="v") - plt.plot(mesh,wdata[tstep,:],label="w") - plt.xlabel('x') - plt.ylabel('solution') - plt.title('Solutions at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, udata[tstep, :], label="u") + plt.plot(mesh, vdata[tstep, :], label="v") + plt.plot(mesh, wdata[tstep, :], label="w") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("Solutions at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, minval, maxval)) plt.grid() - plt.legend(loc='upper right', shadow=True) + plt.legend(loc="upper right", shadow=True) plt.savefig(pname) plt.close() diff --git a/examples/arkode/C_serial/plot_heat1D.py b/examples/arkode/C_serial/plot_heat1D.py index d1c8e2bfdf..7b7b0e3fd2 100755 --- a/examples/arkode/C_serial/plot_heat1D.py +++ b/examples/arkode/C_serial/plot_heat1D.py @@ -20,31 +20,31 @@ import numpy as np # load mesh data file -mesh = np.loadtxt('heat_mesh.txt', dtype=np.double) +mesh = np.loadtxt("heat_mesh.txt", dtype=np.double) # load solution data file -data = np.loadtxt('heat1D.txt', dtype=np.double) +data = np.loadtxt("heat1D.txt", dtype=np.double) # determine number of time steps, mesh size -nt,nx = np.shape(data) +nt, nx = np.shape(data) # determine maximum temperature -maxtemp = 1.1*data.max() +maxtemp = 1.1 * data.max() # generate plots of results for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat1d.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "heat1d." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(nx) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh,data[tstep,:]) - plt.xlabel('x') - plt.ylabel('solution') - plt.title('u(x) at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh, data[tstep, :]) + plt.xlabel("x") + plt.ylabel("solution") + plt.title("u(x) at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, 0.0, maxtemp)) plt.grid() plt.savefig(pname) diff --git a/examples/arkode/C_serial/plot_heat1D_adapt.py b/examples/arkode/C_serial/plot_heat1D_adapt.py index fa813fff04..ab361a968e 100755 --- a/examples/arkode/C_serial/plot_heat1D_adapt.py +++ b/examples/arkode/C_serial/plot_heat1D_adapt.py @@ -20,39 +20,41 @@ import numpy as np # load mesh data file as list of NumPy arrays -inp = open('heat_mesh.txt').readlines() +inp = open("heat_mesh.txt").readlines() mesh = [] for line in inp: mesh.append(np.array(str.split(line), dtype=np.double)) # load solution data file as list of NumPy arrays -inp = open('heat1D.txt').readlines() +inp = open("heat1D.txt").readlines() data = [] for line in inp: data.append(np.array(str.split(line), dtype=np.double)) # determine number of time steps -nt = len(mesh) +nt = len(mesh) nt2 = len(data) -if (nt != nt2): - sys.exit('plot_heat1D_adapt.py error: data and mesh files have different numbers of time steps') +if nt != nt2: + sys.exit( + "plot_heat1D_adapt.py error: data and mesh files have different numbers of time steps" + ) # determine minimum/maximum temperature mintemp = 0.0 maxtemp = 0.0 for tstep in range(nt): mx = data[tstep].max() - if (mx > maxtemp): + if mx > maxtemp: maxtemp = mx mn = data[tstep].min() - if (mn < mintemp): + if mn < mintemp: mintemp = mn -if (maxtemp > 0.0): +if maxtemp > 0.0: maxtemp *= 1.1 else: maxtemp *= 0.9 -if (mintemp > 0.0): +if mintemp > 0.0: mintemp *= 0.9 else: mintemp *= 1.1 @@ -62,16 +64,16 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat1d.' + repr(tstep).zfill(3) + '.png' - tstr = repr(tstep) + pname = "heat1d." + repr(tstep).zfill(3) + ".png" + tstr = repr(tstep) nxstr = repr(len(data[tstep])) # plot current solution and save to disk plt.figure(1) - plt.plot(mesh[tstep],data[tstep],'-o') - plt.xlabel('x') - plt.ylabel('solution') - plt.title('u(x) at output ' + tstr + ', mesh = ' + nxstr) + plt.plot(mesh[tstep], data[tstep], "-o") + plt.xlabel("x") + plt.ylabel("solution") + plt.title("u(x) at output " + tstr + ", mesh = " + nxstr) plt.axis((0.0, 1.0, mintemp, maxtemp)) plt.grid() plt.savefig(pname) diff --git a/examples/arkode/C_serial/plot_sol.py b/examples/arkode/C_serial/plot_sol.py index af783fb053..813f35bec2 100755 --- a/examples/arkode/C_serial/plot_sol.py +++ b/examples/arkode/C_serial/plot_sol.py @@ -20,16 +20,16 @@ import numpy as np # load solution data file -data = np.loadtxt('solution.txt', dtype=np.double) +data = np.loadtxt("solution.txt", dtype=np.double) # determine number of time steps, number of fields -nt,nv = np.shape(data) +nt, nv = np.shape(data) # extract time array -times = data[:,0] +times = data[:, 0] # parse comment line to determine solution names -f = open('solution.txt', 'r') +f = open("solution.txt", "r") commentline = f.readline() commentsplit = commentline.split() names = commentsplit[2:] @@ -38,18 +38,16 @@ plt.figure() # add curves to figure -for i in range(nv-1): - plt.plot(times,data[:,i+1],label=names[i]) -plt.xlabel('t') -if (nv > 2): - plt.ylabel('solutions') +for i in range(nv - 1): + plt.plot(times, data[:, i + 1], label=names[i]) +plt.xlabel("t") +if nv > 2: + plt.ylabel("solutions") else: - plt.ylabel('solution') -plt.legend(loc='upper right', shadow=True) + plt.ylabel("solution") +plt.legend(loc="upper right", shadow=True) plt.grid() -plt.savefig('solution.png') - - +plt.savefig("solution.png") ##### end of script ##### diff --git a/examples/arkode/C_serial/plot_sol_log.py b/examples/arkode/C_serial/plot_sol_log.py index ca27f9eb59..2437cce448 100755 --- a/examples/arkode/C_serial/plot_sol_log.py +++ b/examples/arkode/C_serial/plot_sol_log.py @@ -20,16 +20,16 @@ import numpy as np # load solution data file -data = np.loadtxt('solution.txt', dtype=np.double) +data = np.loadtxt("solution.txt", dtype=np.double) # determine number of time steps, number of fields -nt,nv = np.shape(data) +nt, nv = np.shape(data) # extract time array -times = data[:,0] +times = data[:, 0] # parse comment line to determine solution names -f = open('solution.txt', 'r') +f = open("solution.txt", "r") commentline = f.readline() commentsplit = commentline.split() names = commentsplit[2:] @@ -38,18 +38,16 @@ plt.figure() # add curves to figure -for i in range(nv-1): - plt.loglog(times,data[:,i+1],label=names[i]) -plt.xlabel('t') -if (nv > 2): - plt.ylabel('solutions') +for i in range(nv - 1): + plt.loglog(times, data[:, i + 1], label=names[i]) +plt.xlabel("t") +if nv > 2: + plt.ylabel("solutions") else: - plt.ylabel('solution') -plt.legend(loc='upper right', shadow=True) + plt.ylabel("solution") +plt.legend(loc="upper right", shadow=True) plt.grid() -plt.savefig('solution.png') - - +plt.savefig("solution.png") ##### end of script ##### diff --git a/examples/cvode/CXX_parallel/plot_heat2D_p.py b/examples/cvode/CXX_parallel/plot_heat2D_p.py index 5e5357873a..9f320f609b 100755 --- a/examples/cvode/CXX_parallel/plot_heat2D_p.py +++ b/examples/cvode/CXX_parallel/plot_heat2D_p.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read MPI root process problem info file -infofile = 'heat2d_info.00000.txt' +infofile = "heat2d_info.00000.txt" with open(infofile) as fn: @@ -59,7 +59,7 @@ continue # total number of MPI processes - if "np"in line: + if "np" in line: nprocs = int(text[1]) continue @@ -71,11 +71,11 @@ # ------------------------------------------------------------------------------ # load subdomain information, store in table -subdomains = np.zeros((nprocs,4), dtype=np.int) +subdomains = np.zeros((nprocs, 4), dtype=np.int) for i in range(nprocs): - infofile = 'heat2d_info.' + repr(i).zfill(5) + '.txt' + infofile = "heat2d_info." + repr(i).zfill(5) + ".txt" with open(infofile) as fn: @@ -87,56 +87,58 @@ # x-direction starting index if "is" in line: - subdomains[i,0] = float(text[1]) + subdomains[i, 0] = float(text[1]) continue # x-direction ending index if "ie" in line: - subdomains[i,1] = float(text[1]) + subdomains[i, 1] = float(text[1]) continue # y-direction starting index if "js" in line: - subdomains[i,2] = float(text[1]) + subdomains[i, 2] = float(text[1]) continue # y-direction ending index if "je" in line: - subdomains[i,3] = float(text[1]) + subdomains[i, 3] = float(text[1]) continue # ------------------------------------------------------------------------------ -plottype = ['solution', 'error'] +plottype = ["solution", "error"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) for i in range(nprocs): - datafile = 'heat2d_' + pt + '.' + repr(i).zfill(5) + '.txt' + datafile = "heat2d_" + pt + "." + repr(i).zfill(5) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) - if (np.shape(data)[0] != nt): - sys.exit('error: subdomain ' + i + ' has an incorrect number of time steps') + if np.shape(data)[0] != nt: + sys.exit("error: subdomain " + i + " has an incorrect number of time steps") # subdomain indices - istart = subdomains[i,0] - iend = subdomains[i,1] - jstart = subdomains[i,2] - jend = subdomains[i,3] - nxl = iend - istart + 1 - nyl = jend - jstart + 1 + istart = subdomains[i, 0] + iend = subdomains[i, 1] + jstart = subdomains[i, 2] + jend = subdomains[i, 3] + nxl = iend - istart + 1 + nyl = jend - jstart + 1 # extract data for i in range(nt): - time[i] = data[i,0] - result[i,jstart:jend+1,istart:iend+1] = np.reshape(data[i,1:], (nyl,nxl)) + time[i] = data[i, 0] + result[i, jstart : jend + 1, istart : iend + 1] = np.reshape( + data[i, 1:], (nyl, nxl) + ) # determine extents of plots maxtemp = 1.1 * result.max() @@ -145,7 +147,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -154,24 +156,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/cvode/CXX_parhyp/plot_heat2D_p.py b/examples/cvode/CXX_parhyp/plot_heat2D_p.py index 58673d17b2..f567f1c621 100755 --- a/examples/cvode/CXX_parhyp/plot_heat2D_p.py +++ b/examples/cvode/CXX_parhyp/plot_heat2D_p.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read MPI root process problem info file -infofile = 'heat2d_info.00000.txt' +infofile = "heat2d_info.00000.txt" with open(infofile) as fn: @@ -59,7 +59,7 @@ continue # total number of MPI processes - if "np"in line: + if "np" in line: nprocs = int(text[1]) continue @@ -71,11 +71,11 @@ # ------------------------------------------------------------------------------ # load subdomain information, store in table -subdomains = np.zeros((nprocs,4), dtype=np.int) +subdomains = np.zeros((nprocs, 4), dtype=np.int) for i in range(nprocs): - infofile = 'heat2d_info.' + repr(i).zfill(5) + '.txt' + infofile = "heat2d_info." + repr(i).zfill(5) + ".txt" with open(infofile) as fn: @@ -87,56 +87,58 @@ # x-direction starting index if "is" in line: - subdomains[i,0] = float(text[1]) + subdomains[i, 0] = float(text[1]) continue # x-direction ending index if "ie" in line: - subdomains[i,1] = float(text[1]) + subdomains[i, 1] = float(text[1]) continue # y-direction starting index if "js" in line: - subdomains[i,2] = float(text[1]) + subdomains[i, 2] = float(text[1]) continue # y-direction ending index if "je" in line: - subdomains[i,3] = float(text[1]) + subdomains[i, 3] = float(text[1]) continue # ------------------------------------------------------------------------------ -plottype = ['solution', 'error'] +plottype = ["solution", "error"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) for i in range(nprocs): - datafile = 'heat2d_' + pt + '.' + repr(i).zfill(5) + '.txt' + datafile = "heat2d_" + pt + "." + repr(i).zfill(5) + ".txt" # load data data = np.loadtxt(datafile, dtype=np.double) - if (np.shape(data)[0] != nt): - sys.exit('error: subdomain ' + i + ' has an incorrect number of time steps') + if np.shape(data)[0] != nt: + sys.exit("error: subdomain " + i + " has an incorrect number of time steps") # subdomain indices - istart = subdomains[i,0] - iend = subdomains[i,1] - jstart = subdomains[i,2] - jend = subdomains[i,3] - nxl = iend - istart + 1 - nyl = jend - jstart + 1 + istart = subdomains[i, 0] + iend = subdomains[i, 1] + jstart = subdomains[i, 2] + jend = subdomains[i, 3] + nxl = iend - istart + 1 + nyl = jend - jstart + 1 # extract data for i in range(nt): - time[i] = data[i,0] - result[i,jstart:jend+1,istart:iend+1] = np.reshape(data[i,1:], (nyl,nxl)) + time[i] = data[i, 0] + result[i, jstart : jend + 1, istart : iend + 1] = np.reshape( + data[i, 1:], (nyl, nxl) + ) # determine extents of plots maxtemp = 1.1 * result.max() @@ -145,7 +147,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -154,24 +156,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/cvode/CXX_serial/plot_heat2D.py b/examples/cvode/CXX_serial/plot_heat2D.py index bbadc4de32..90ceba7d96 100755 --- a/examples/cvode/CXX_serial/plot_heat2D.py +++ b/examples/cvode/CXX_serial/plot_heat2D.py @@ -28,7 +28,7 @@ # ------------------------------------------------------------------------------ # read problem info file -infofile = 'heat2d_info.txt' +infofile = "heat2d_info.txt" with open(infofile) as fn: @@ -65,21 +65,21 @@ # ------------------------------------------------------------------------------ -plottype = ['solution', 'error'] +plottype = ["solution", "error"] for pt in plottype: # fill array with data - time = np.zeros(nt) + time = np.zeros(nt) result = np.zeros((nt, ny, nx)) # load data - data = np.loadtxt('heat2d_' + pt + '.txt', dtype=np.double) + data = np.loadtxt("heat2d_" + pt + ".txt", dtype=np.double) # extract data for i in range(nt): - time[i] = data[i,0] - result[i,0:ny+1,0:nx+1] = np.reshape(data[i,1:], (ny,nx)) + time[i] = data[i, 0] + result[i, 0 : ny + 1, 0 : nx + 1] = np.reshape(data[i, 1:], (ny, nx)) # determine extents of plots maxtemp = 1.1 * result.max() @@ -88,7 +88,7 @@ # set x and y meshgrid objects xspan = np.linspace(0.0, xu, nx) yspan = np.linspace(0.0, yu, ny) - X,Y = np.meshgrid(xspan, yspan) + X, Y = np.meshgrid(xspan, yspan) nxstr = repr(nx) nystr = repr(ny) @@ -97,24 +97,33 @@ for tstep in range(nt): # set string constants for output plots, current time, mesh size - pname = 'heat2d_surf_' + pt + '.' + repr(tstep).zfill(3) + '.png' - tstr = str(time[tstep]) + pname = "heat2d_surf_" + pt + "." + repr(tstep).zfill(3) + ".png" + tstr = str(time[tstep]) # plot surface and save to disk fig = plt.figure(1) - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, result[tstep,:,:], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, shade=True) - - ax.set_xlabel('x') - ax.set_ylabel('y') + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + result[tstep, :, :], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + + ax.set_xlabel("x") + ax.set_ylabel("y") ax.set_zlim((mintemp, maxtemp)) - ax.view_init(20,45) - if (pt == 'solution'): - title('u(x,y) at t = ' + tstr) + ax.view_init(20, 45) + if pt == "solution": + title("u(x,y) at t = " + tstr) else: - title('error(x,y) at t = ' + tstr) + title("error(x,y) at t = " + tstr) savefig(pname) plt.close() diff --git a/examples/cvode/serial/plot_cvParticle.py b/examples/cvode/serial/plot_cvParticle.py index 6557000a0a..f686312a78 100755 --- a/examples/cvode/serial/plot_cvParticle.py +++ b/examples/cvode/serial/plot_cvParticle.py @@ -21,20 +21,26 @@ import matplotlib.pyplot as plt # command line options -parser = argparse.ArgumentParser(description='Plots cvPraticle_dns output') -parser.add_argument('--sfile', type=str, - default='cvParticle_solution.txt', - help='solution output file to read') -parser.add_argument('--efile', type=str, - default='cvParticle_error.txt', - help='error output file to read') -parser.add_argument('--alpha', type=float, nargs=1, - default=1.0, - help='set a non-default alpha value') -parser.add_argument('--slim', type=float, nargs=2, - help='x and y limits for solution plot') -parser.add_argument('--eylim', type=float, nargs=2, - help='y limits for error plot') +parser = argparse.ArgumentParser(description="Plots cvPraticle_dns output") +parser.add_argument( + "--sfile", + type=str, + default="cvParticle_solution.txt", + help="solution output file to read", +) +parser.add_argument( + "--efile", + type=str, + default="cvParticle_error.txt", + help="error output file to read", +) +parser.add_argument( + "--alpha", type=float, nargs=1, default=1.0, help="set a non-default alpha value" +) +parser.add_argument( + "--slim", type=float, nargs=2, help="x and y limits for solution plot" +) +parser.add_argument("--eylim", type=float, nargs=2, help="y limits for error plot") # parse inputs args = parser.parse_args() @@ -48,23 +54,23 @@ y = data[:, 2] # unit circle -tt = np.linspace(0,np.pi*2,10000) +tt = np.linspace(0, np.pi * 2, 10000) xt = np.cos(tt) yt = np.sin(tt) # plot solution fig, ax = plt.subplots() -plt.plot(xt, yt, color='black', linestyle='--') -plt.scatter(x, y, color='red') +plt.plot(xt, yt, color="black", linestyle="--") +plt.scatter(x, y, color="red") -if (args.slim): +if args.slim: plt.xlim((args.slim[0], args.slim[1])) plt.ylim((args.slim[0], args.slim[1])) -plt.xlabel('x') -plt.ylabel('y') -plt.title('Solution') -ax.set_aspect('equal') +plt.xlabel("x") +plt.ylabel("y") +plt.title("Solution") +ax.set_aspect("equal") # true solution xt = np.cos(args.alpha * t) @@ -72,15 +78,15 @@ # plot solution fig, ax = plt.subplots() -plt.plot(t, x, linestyle='-', label='x') -plt.plot(t, xt, linestyle='--', label='x true') -plt.plot(t, y, linestyle='-', label='y') -plt.plot(t, yt, linestyle='--', label='y true') +plt.plot(t, x, linestyle="-", label="x") +plt.plot(t, xt, linestyle="--", label="x true") +plt.plot(t, y, linestyle="-", label="y") +plt.plot(t, yt, linestyle="--", label="y true") -plt.xlabel('t') -plt.ylabel('position') -plt.title('Particle Position Over Time') -plt.legend(loc='lower right') +plt.xlabel("t") +plt.ylabel("position") +plt.title("Particle Position Over Time") +plt.legend(loc="lower right") # read error output file data = np.loadtxt(args.efile, dtype=np.double) @@ -93,17 +99,17 @@ # plot solution fig, ax = plt.subplots() -plt.semilogy(t, xerr, label='x err') -plt.semilogy(t, yerr, label='y err') -plt.semilogy(t, cerr, label='c err') +plt.semilogy(t, xerr, label="x err") +plt.semilogy(t, yerr, label="y err") +plt.semilogy(t, cerr, label="c err") -if (args.eylim): +if args.eylim: plt.ylim((args.eylim[0], args.eylim[1])) -plt.xlabel('time') -plt.ylabel('error') -plt.legend(loc='lower right') -plt.title('Error in position and constraint') +plt.xlabel("time") +plt.ylabel("error") +plt.legend(loc="lower right") +plt.title("Error in position and constraint") plt.grid() # display plots diff --git a/examples/cvode/serial/plot_cvPendulum.py b/examples/cvode/serial/plot_cvPendulum.py index 07314f2936..c855c70d6d 100755 --- a/examples/cvode/serial/plot_cvPendulum.py +++ b/examples/cvode/serial/plot_cvPendulum.py @@ -21,9 +21,8 @@ import matplotlib.pyplot as plt # command line options -parser = argparse.ArgumentParser(description='Plots cvPendulum_dns output') -parser.add_argument('sfile', type=str, - help='solution output file to read') +parser = argparse.ArgumentParser(description="Plots cvPendulum_dns output") +parser.add_argument("sfile", type=str, help="solution output file to read") # parse inputs args = parser.parse_args() @@ -32,9 +31,9 @@ data = np.loadtxt(args.sfile, dtype=np.double) # extract times, positions, and velocities -t = data[:, 0] -x = data[:, 1] -y = data[:, 2] +t = data[:, 0] +x = data[:, 1] +y = data[:, 2] vx = data[:, 3] vy = data[:, 4] @@ -42,50 +41,50 @@ ref = np.loadtxt("cvPendulum_dns_ref.txt", dtype=np.double) # extract positions and velocities -xr = ref[:, 1] -yr = ref[:, 2] +xr = ref[:, 1] +yr = ref[:, 2] vxr = ref[:, 3] vyr = ref[:, 4] # lower half of unit circle -tt = np.linspace(np.pi, 2*np.pi, 10000) +tt = np.linspace(np.pi, 2 * np.pi, 10000) xt = np.cos(tt) yt = np.sin(tt) # plot solution in xy plane fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--', label=None) -ax.axvline(x=0, color='black', linestyle='--', label=None) -plt.plot(xt, yt, color='black', linestyle='--', label=None) -plt.scatter(x, y, color='red', label='comp') -plt.scatter(xr, yr, color='blue', label='ref') - -plt.xlabel('x') -plt.ylabel('y') -plt.title('Pendulum') -ax.set_aspect('equal') -plt.legend(loc='lower right') +ax.axhline(y=0, color="black", linestyle="--", label=None) +ax.axvline(x=0, color="black", linestyle="--", label=None) +plt.plot(xt, yt, color="black", linestyle="--", label=None) +plt.scatter(x, y, color="red", label="comp") +plt.scatter(xr, yr, color="blue", label="ref") + +plt.xlabel("x") +plt.ylabel("y") +plt.title("Pendulum") +ax.set_aspect("equal") +plt.legend(loc="lower right") # plot position over time fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--') -plt.plot(t, x, label='x') -plt.plot(t, y, label='y') +ax.axhline(y=0, color="black", linestyle="--") +plt.plot(t, x, label="x") +plt.plot(t, y, label="y") -plt.xlabel('t') -plt.ylabel('position') -plt.title('Pendulum Position') +plt.xlabel("t") +plt.ylabel("position") +plt.title("Pendulum Position") plt.legend() # plot velocity over time fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--') -plt.plot(t, vx, label='$v_x$') -plt.plot(t, vy, label='$v_y$') +ax.axhline(y=0, color="black", linestyle="--") +plt.plot(t, vx, label="$v_x$") +plt.plot(t, vy, label="$v_y$") -plt.xlabel('t') -plt.ylabel('velocity') -plt.title('Pendulum Velocity') +plt.xlabel("t") +plt.ylabel("velocity") +plt.title("Pendulum Velocity") plt.legend() # display plots diff --git a/examples/cvodes/serial/plot_cvsParticle.py b/examples/cvodes/serial/plot_cvsParticle.py index fb0c66da1c..72e8736388 100755 --- a/examples/cvodes/serial/plot_cvsParticle.py +++ b/examples/cvodes/serial/plot_cvsParticle.py @@ -21,20 +21,26 @@ import matplotlib.pyplot as plt # command line options -parser = argparse.ArgumentParser(description='Plots cvsPraticle_dns output') -parser.add_argument('--sfile', type=str, - default='cvsParticle_solution.txt', - help='solution output file to read') -parser.add_argument('--efile', type=str, - default='cvsParticle_error.txt', - help='error output file to read') -parser.add_argument('--alpha', type=float, nargs=1, - default=1.0, - help='set a non-default alpha value') -parser.add_argument('--slim', type=float, nargs=2, - help='x and y limits for solution plot') -parser.add_argument('--eylim', type=float, nargs=2, - help='y limits for error plot') +parser = argparse.ArgumentParser(description="Plots cvsPraticle_dns output") +parser.add_argument( + "--sfile", + type=str, + default="cvsParticle_solution.txt", + help="solution output file to read", +) +parser.add_argument( + "--efile", + type=str, + default="cvsParticle_error.txt", + help="error output file to read", +) +parser.add_argument( + "--alpha", type=float, nargs=1, default=1.0, help="set a non-default alpha value" +) +parser.add_argument( + "--slim", type=float, nargs=2, help="x and y limits for solution plot" +) +parser.add_argument("--eylim", type=float, nargs=2, help="y limits for error plot") # parse inputs args = parser.parse_args() @@ -48,23 +54,23 @@ y = data[:, 2] # unit circle -tt = np.linspace(0,np.pi*2,10000) +tt = np.linspace(0, np.pi * 2, 10000) xt = np.cos(tt) yt = np.sin(tt) # plot solution fig, ax = plt.subplots() -plt.plot(xt, yt, color='black', linestyle='--') -plt.scatter(x, y, color='red') +plt.plot(xt, yt, color="black", linestyle="--") +plt.scatter(x, y, color="red") -if (args.slim): +if args.slim: plt.xlim((args.slim[0], args.slim[1])) plt.ylim((args.slim[0], args.slim[1])) -plt.xlabel('x') -plt.ylabel('y') -plt.title('Solution') -ax.set_aspect('equal') +plt.xlabel("x") +plt.ylabel("y") +plt.title("Solution") +ax.set_aspect("equal") # true solution xt = np.cos(args.alpha * t) @@ -72,15 +78,15 @@ # plot solution fig, ax = plt.subplots() -plt.plot(t, x, linestyle='-', label='x') -plt.plot(t, xt, linestyle='--', label='x true') -plt.plot(t, y, linestyle='-', label='y') -plt.plot(t, yt, linestyle='--', label='y true') +plt.plot(t, x, linestyle="-", label="x") +plt.plot(t, xt, linestyle="--", label="x true") +plt.plot(t, y, linestyle="-", label="y") +plt.plot(t, yt, linestyle="--", label="y true") -plt.xlabel('t') -plt.ylabel('position') -plt.title('Particle Position Over Time') -plt.legend(loc='lower right') +plt.xlabel("t") +plt.ylabel("position") +plt.title("Particle Position Over Time") +plt.legend(loc="lower right") # read error output file data = np.loadtxt(args.efile, dtype=np.double) @@ -93,17 +99,17 @@ # plot solution fig, ax = plt.subplots() -plt.semilogy(t, xerr, label='x err') -plt.semilogy(t, yerr, label='y err') -plt.semilogy(t, cerr, label='c err') +plt.semilogy(t, xerr, label="x err") +plt.semilogy(t, yerr, label="y err") +plt.semilogy(t, cerr, label="c err") -if (args.eylim): +if args.eylim: plt.ylim((args.eylim[0], args.eylim[1])) -plt.xlabel('time') -plt.ylabel('error') -plt.legend(loc='lower right') -plt.title('Error in position and constraint') +plt.xlabel("time") +plt.ylabel("error") +plt.legend(loc="lower right") +plt.title("Error in position and constraint") plt.grid() # display plots diff --git a/examples/cvodes/serial/plot_cvsPendulum.py b/examples/cvodes/serial/plot_cvsPendulum.py index 0376a755bb..87408f4634 100755 --- a/examples/cvodes/serial/plot_cvsPendulum.py +++ b/examples/cvodes/serial/plot_cvsPendulum.py @@ -21,9 +21,8 @@ import matplotlib.pyplot as plt # command line options -parser = argparse.ArgumentParser(description='Plots cvsPendulum_dns output') -parser.add_argument('sfile', type=str, - help='solution output file to read') +parser = argparse.ArgumentParser(description="Plots cvsPendulum_dns output") +parser.add_argument("sfile", type=str, help="solution output file to read") # parse inputs args = parser.parse_args() @@ -32,9 +31,9 @@ data = np.loadtxt(args.sfile, dtype=np.double) # extract times, positions, and velocities -t = data[:, 0] -x = data[:, 1] -y = data[:, 2] +t = data[:, 0] +x = data[:, 1] +y = data[:, 2] vx = data[:, 3] vy = data[:, 4] @@ -42,50 +41,50 @@ ref = np.loadtxt("cvsPendulum_dns_ref.txt", dtype=np.double) # extract positions and velocities -xr = ref[:, 1] -yr = ref[:, 2] +xr = ref[:, 1] +yr = ref[:, 2] vxr = ref[:, 3] vyr = ref[:, 4] # lower half of unit circle -tt = np.linspace(np.pi, 2*np.pi, 10000) +tt = np.linspace(np.pi, 2 * np.pi, 10000) xt = np.cos(tt) yt = np.sin(tt) # plot solution in xy plane fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--', label=None) -ax.axvline(x=0, color='black', linestyle='--', label=None) -plt.plot(xt, yt, color='black', linestyle='--', label=None) -plt.scatter(x, y, color='red', label='comp') -plt.scatter(xr, yr, color='blue', label='ref') - -plt.xlabel('x') -plt.ylabel('y') -plt.title('Pendulum') -ax.set_aspect('equal') -plt.legend(loc='lower right') +ax.axhline(y=0, color="black", linestyle="--", label=None) +ax.axvline(x=0, color="black", linestyle="--", label=None) +plt.plot(xt, yt, color="black", linestyle="--", label=None) +plt.scatter(x, y, color="red", label="comp") +plt.scatter(xr, yr, color="blue", label="ref") + +plt.xlabel("x") +plt.ylabel("y") +plt.title("Pendulum") +ax.set_aspect("equal") +plt.legend(loc="lower right") # plot position over time fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--') -plt.plot(t, x, label='x') -plt.plot(t, y, label='y') +ax.axhline(y=0, color="black", linestyle="--") +plt.plot(t, x, label="x") +plt.plot(t, y, label="y") -plt.xlabel('t') -plt.ylabel('position') -plt.title('Pendulum Position') +plt.xlabel("t") +plt.ylabel("position") +plt.title("Pendulum Position") plt.legend() # plot velocity over time fig, ax = plt.subplots() -ax.axhline(y=0, color='black', linestyle='--') -plt.plot(t, vx, label='$v_x$') -plt.plot(t, vy, label='$v_y$') +ax.axhline(y=0, color="black", linestyle="--") +plt.plot(t, vx, label="$v_x$") +plt.plot(t, vy, label="$v_y$") -plt.xlabel('t') -plt.ylabel('velocity') -plt.title('Pendulum Velocity') +plt.xlabel("t") +plt.ylabel("velocity") +plt.title("Pendulum Velocity") plt.legend() # display plots diff --git a/examples/utilities/plot_data_2d.py b/examples/utilities/plot_data_2d.py index 0303e252ff..ecf15ab7e6 100755 --- a/examples/utilities/plot_data_2d.py +++ b/examples/utilities/plot_data_2d.py @@ -78,101 +78,111 @@ def main(): import sys import argparse - parser = argparse.ArgumentParser(description='''Plot 2D data files''') + parser = argparse.ArgumentParser(description="""Plot 2D data files""") # List of input data files - parser.add_argument('datafiles', type=str, nargs='+', - help='Data files to plot') + parser.add_argument("datafiles", type=str, nargs="+", help="Data files to plot") # Plot type options - group = parser.add_argument_group('Plot Options', - '''Options to specify the type of plot to - generate and what data to plot''') + group = parser.add_argument_group( + "Plot Options", + """Options to specify the type of plot to + generate and what data to plot""", + ) - group.add_argument('--plottype', type=str, - choices=['surface', 'surface-ani', - 'contour', 'contour-ani', - 'slice', 'point'], - default='surface', - help='''Set the plot type''') + group.add_argument( + "--plottype", + type=str, + choices=["surface", "surface-ani", "contour", "contour-ani", "slice", "point"], + default="surface", + help="""Set the plot type""", + ) - group.add_argument('--plotvars', type=int, nargs='+', - help='''Variable indices to plot''') + group.add_argument( + "--plotvars", type=int, nargs="+", help="""Variable indices to plot""" + ) - group.add_argument('--plottimes', type=int, nargs='+', - help='''Time indices to plot''') + group.add_argument( + "--plottimes", type=int, nargs="+", help="""Time indices to plot""" + ) # Slice plot options - group = parser.add_argument_group('Slice Plot Options', - '''Options specific to the slice plot - type''') + group = parser.add_argument_group( + "Slice Plot Options", + """Options specific to the slice plot + type""", + ) - group.add_argument('--slicetype', type=str, default='var', - choices=['var', 'time'], - help='''The slice plot type''') + group.add_argument( + "--slicetype", + type=str, + default="var", + choices=["var", "time"], + help="""The slice plot type""", + ) mxgroup = group.add_mutually_exclusive_group() - mxgroup.add_argument('--yslice', type=int, default=-1, - help='''y index to plot''') + mxgroup.add_argument("--yslice", type=int, default=-1, help="""y index to plot""") - mxgroup.add_argument('--xslice', type=int, default=-1, - help='''x index to plot''') + mxgroup.add_argument("--xslice", type=int, default=-1, help="""x index to plot""") # Point plot options - group = parser.add_argument_group('Point Plot Options', - '''Options specific to the point plot - type''') + group = parser.add_argument_group( + "Point Plot Options", + """Options specific to the point plot + type""", + ) - group.add_argument('--point', type=int, nargs=2, default=[0, 0], - help='''x and y index to plot''') + group.add_argument( + "--point", type=int, nargs=2, default=[0, 0], help="""x and y index to plot""" + ) # Output options - group = parser.add_argument_group('Output Options', - '''Options for saving plots''') + group = parser.add_argument_group("Output Options", """Options for saving plots""") - group.add_argument('--save', action='store_true', - help='''Save figure to file''') + group.add_argument("--save", action="store_true", help="""Save figure to file""") - group.add_argument('--prefix', type=str, - help='''File name prefix for saving the figure''') + group.add_argument( + "--prefix", type=str, help="""File name prefix for saving the figure""" + ) - group.add_argument('--merge', action='store_true', - help='''Merge PDF output files into a single file''') + group.add_argument( + "--merge", + action="store_true", + help="""Merge PDF output files into a single file""", + ) # Figure options - group = parser.add_argument_group('Figure Options', - '''Options to specify various figure - properties''') + group = parser.add_argument_group( + "Figure Options", + """Options to specify various figure + properties""", + ) - group.add_argument('--labels', type=str, nargs='+', - help='''Data labels for the plot legend''') + group.add_argument( + "--labels", type=str, nargs="+", help="""Data labels for the plot legend""" + ) - group.add_argument('--title', type=str, - help='''Plot title''') + group.add_argument("--title", type=str, help="""Plot title""") - group.add_argument('--xlabel', type=str, - help='''x-axis label''') + group.add_argument("--xlabel", type=str, help="""x-axis label""") - group.add_argument('--ylabel', type=str, - help='''y-axis label''') + group.add_argument("--ylabel", type=str, help="""y-axis label""") - group.add_argument('--zlabel', type=str, - help='''z-axis label''') + group.add_argument("--zlabel", type=str, help="""z-axis label""") - group.add_argument('--grid', action='store_true', - help='''Add grid to plot''') + group.add_argument("--grid", action="store_true", help="""Add grid to plot""") # Debugging options - parser.add_argument('--debug', action='store_true', - help='Enable debugging') + parser.add_argument("--debug", action="store_true", help="Enable debugging") # parse command line args args = parser.parse_args() @@ -190,52 +200,53 @@ def main(): plot_settings(args, info, time, xvals, yvals, zdata) # Create plots - if args.plottype == 'surface': + if args.plottype == "surface": plot_surface(args, info, time, xvals, yvals, zdata) - if args.plottype == 'surface-ani': + if args.plottype == "surface-ani": plot_surface_ani(args, info, time, xvals, yvals, zdata) - if args.plottype == 'contour': + if args.plottype == "contour": plot_contour(args, info, time, xvals, yvals, zdata) - if args.plottype == 'contour-ani': + if args.plottype == "contour-ani": plot_contour_ani(args, info, time, xvals, yvals, zdata) - if args.plottype == 'slice': + if args.plottype == "slice": # slice data - if (args.yslice > -1) and (args.yslice < info['ny']): + if (args.yslice > -1) and (args.yslice < info["ny"]): svals = xvals sdata = zdata[:, args.yslice, :, :] if args.xlabel: hlabel = args.xlabel else: - hlabel = 'x' + hlabel = "x" suffix = " at y = {:.4f}".format(yvals[args.yslice]) - elif (args.xslice > -1) and (args.xslice < info['nx']): + elif (args.xslice > -1) and (args.xslice < info["nx"]): svals = yvals sdata = zdata[:, :, args.xslice, :] if args.ylabel: hlabel = args.ylabel else: - hlabel = 'y' + hlabel = "y" suffix = " at x = {:.4f}".format(xvals[args.xslice]) else: print("ERROR: invalid xslice or yslice option") sys.exit() - if args.slicetype == 'var': + if args.slicetype == "var": plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix) else: plot_slice_time(args, info, time, svals, sdata, hlabel, suffix) - if args.plottype == 'point': + if args.plottype == "point": # point data pdata = zdata[:, args.point[1], args.point[0], :] - suffix = " at x = {:.4f}, y = {:.4f}".format(xvals[args.point[0]], - yvals[args.point[1]]) + suffix = " at x = {:.4f}, y = {:.4f}".format( + xvals[args.point[0]], yvals[args.point[1]] + ) plot_point(args, info, time, pdata, suffix) @@ -264,8 +275,19 @@ def read_header(args): import numpy as np # initialize dictionary of header info variables to None - keys = ['title', 'varnames', 'nprocs', 'nvar', 'nt', 'nx', 'xl', 'xu', - 'ny', 'yl', 'yu'] + keys = [ + "title", + "varnames", + "nprocs", + "nvar", + "nt", + "nx", + "xl", + "xu", + "ny", + "yl", + "yu", + ] info = dict() for k in keys: @@ -290,62 +312,62 @@ def read_header(args): # plot title if "title" in line: - info['title'] = " ".join(text[2:]) + info["title"] = " ".join(text[2:]) continue # plot variable names if "vars" in line: - info['varnames'] = text[2:] + info["varnames"] = text[2:] continue # total number of processes if "nprocs" in line: - info['nprocs'] = int(text[2]) + info["nprocs"] = int(text[2]) continue # number of variables (at each spatial node) if "nvar" in line: - info['nvar'] = int(text[2]) + info["nvar"] = int(text[2]) continue # number of output times if "nt" in line: - info['nt'] = int(text[2]) + info["nt"] = int(text[2]) continue # the global number of nodes in the x-direction, the x lower bound # (west) and the x upper bound (east) if "nx" in line: - info['nx'] = int(text[2]) + info["nx"] = int(text[2]) continue if "xl" in line: - info['xl'] = float(text[2]) + info["xl"] = float(text[2]) continue if "xu" in line: - info['xu'] = float(text[2]) + info["xu"] = float(text[2]) continue # the global number of nodes in the y-direction, the y lower bound # (south) and the y upper bound (north) if "ny" in line: - info['ny'] = int(text[2]) + info["ny"] = int(text[2]) continue if "yl" in line: - info['yl'] = float(text[2]) + info["yl"] = float(text[2]) continue if "yu" in line: - info['yu'] = float(text[2]) + info["yu"] = float(text[2]) continue # load data to deduce values and perform sanity checks data = np.loadtxt(args.datafiles[0], dtype=np.double) # try to fill in missing values - if info['nvar'] is None: - info['nvar'] = 1 + if info["nvar"] is None: + info["nvar"] = 1 print("WARNING: nvar not provided. Using nvar = 1") - if info['nt'] is None or info['nx'] is None or info['ny'] is None: + if info["nt"] is None or info["nx"] is None or info["ny"] is None: # check if data exists if data.ndim != 2: @@ -353,72 +375,76 @@ def read_header(args): sys.exit() # number of output times - if info['nt'] is None: - info['nt'] = np.shape(data)[0] + if info["nt"] is None: + info["nt"] = np.shape(data)[0] # number of spatial nodes - if info['nx'] is None or info['ny'] is None: + if info["nx"] is None or info["ny"] is None: col = np.shape(data)[1] - 1 # exclude output times - if info['nx'] is None and info['ny'] is not None: - info['nx'] = col // (info['nvar'] * info['ny']) - elif info['nx'] is not None and info['ny'] is None: - info['ny'] = col // (info['nvar'] * info['nx']) + if info["nx"] is None and info["ny"] is not None: + info["nx"] = col // (info["nvar"] * info["ny"]) + elif info["nx"] is not None and info["ny"] is None: + info["ny"] = col // (info["nvar"] * info["nx"]) else: - info['nx'] = int(np.sqrt(col // info['nvar'])) - info['ny'] = info['nx'] - print("WARNING: nx and ny not provided. Using nx = ny =", - info['nx']) + info["nx"] = int(np.sqrt(col // info["nvar"])) + info["ny"] = info["nx"] + print("WARNING: nx and ny not provided. Using nx = ny =", info["nx"]) # sanity checks - if info['nt'] != np.shape(data)[0]: - print("ERROR: nt != nrows", info['nt'], np.shape(data)[0]) + if info["nt"] != np.shape(data)[0]: + print("ERROR: nt != nrows", info["nt"], np.shape(data)[0]) sys.exit() - if (info['nvar'] * info['nx'] * info['ny']) != (np.shape(data)[1] - 1): + if (info["nvar"] * info["nx"] * info["ny"]) != (np.shape(data)[1] - 1): print("ERROR: nvar * nx * ny != ncols - 1") sys.exit() # check x-dimension lower and upper bounds - if info['xl'] is None: + if info["xl"] is None: print("WARNING: xl not provided, using xl = 0") - info['xl'] = 0.0 + info["xl"] = 0.0 - if info['xu'] is None: + if info["xu"] is None: print("WARNING: xu not provided, using xu = 1") - info['xu'] = 1.0 + info["xu"] = 1.0 # check y-dimension lower and upper bounds - if info['yl'] is None: + if info["yl"] is None: print("WARNING: yl not provided, using yl = 0") - info['yl'] = 0.0 + info["yl"] = 0.0 - if info['yu'] is None: + if info["yu"] is None: print("WARNING: yu not provided, using yu = 1") - info['yu'] = 1.0 + info["yu"] = 1.0 # check number of processes - if info['nprocs'] is None: - info['nprocs'] = len(args.datafiles) - print("WARNING: nprocs not provided, using nprocs =", info['nprocs']) + if info["nprocs"] is None: + info["nprocs"] = len(args.datafiles) + print("WARNING: nprocs not provided, using nprocs =", info["nprocs"]) # check if all the expected input files were provided - if len(args.datafiles) != info['nprocs']: - print("ERROR: number of data files (", len(args.datafiles), - ") does not match number of processes (", info['nprocs'], ")") + if len(args.datafiles) != info["nprocs"]: + print( + "ERROR: number of data files (", + len(args.datafiles), + ") does not match number of processes (", + info["nprocs"], + ")", + ) sys.exit() if args.debug: - print('title = ', info['title']) - print('varnames = ', info['varnames']) - print('nprocs = ', info['nprocs']) - print('nvar = ', info['nvar']) - print('nt = ', info['nt']) - print('nx = ', info['nx']) - print('xl = ', info['xl']) - print('xu = ', info['xu']) - print('ny = ', info['ny']) - print('yl = ', info['yl']) - print('yu = ', info['yu']) + print("title = ", info["title"]) + print("varnames = ", info["varnames"]) + print("nprocs = ", info["nprocs"]) + print("nvar = ", info["nvar"]) + print("nt = ", info["nt"]) + print("nx = ", info["nx"]) + print("xl = ", info["xl"]) + print("xu = ", info["xu"]) + print("ny = ", info["ny"]) + print("yl = ", info["yl"]) + print("yu = ", info["yu"]) return info @@ -435,14 +461,14 @@ def read_subdomains(args, info): import numpy as np # load subdomain information, store in table - subdomains = np.zeros((info['nprocs'], 4), dtype=int) + subdomains = np.zeros((info["nprocs"], 4), dtype=int) # get the spatial subdomain owned by each process - if info['nprocs'] == 1: + if info["nprocs"] == 1: subdomains[0, 0] = 0 - subdomains[0, 1] = info['nx'] - 1 + subdomains[0, 1] = info["nx"] - 1 subdomains[0, 2] = 0 - subdomains[0, 3] = info['ny'] - 1 + subdomains[0, 3] = info["ny"] - 1 else: for idx, datafile in enumerate(args.datafiles): @@ -490,8 +516,7 @@ def read_subdomains(args, info): # check if subdomain indices were found if not (found_is and found_ie and found_js and found_je): - print("ERROR: could not find subdomain indices in", - datafile) + print("ERROR: could not find subdomain indices in", datafile) sys.exit() return subdomains @@ -507,10 +532,10 @@ def read_data(args, info, subdomains): import numpy as np # initialize data arrays - time = np.zeros(info['nt']) - xvals = np.linspace(info['xl'], info['xu'], info['nx']) - yvals = np.linspace(info['yl'], info['yu'], info['ny']) - zdata = np.zeros((info['nt'], info['ny'], info['nx'], info['nvar'])) + time = np.zeros(info["nt"]) + xvals = np.linspace(info["xl"], info["xu"], info["nx"]) + yvals = np.linspace(info["yl"], info["yu"], info["ny"]) + zdata = np.zeros((info["nt"], info["ny"], info["nx"], info["nvar"])) # extract data for idx, datafile in enumerate(args.datafiles): @@ -524,10 +549,17 @@ def read_data(args, info, subdomains): if args.debug: print(np.shape(data)) - if np.shape(data)[0] != info['nt']: - print("WARNING: subdomain", str(idx), "has an incorrect number of" - "output times (", np.shape(data)[0], "vs", info['nt'], ")") - info['nt'] = np.shape(data)[0] + if np.shape(data)[0] != info["nt"]: + print( + "WARNING: subdomain", + str(idx), + "has an incorrect number of" "output times (", + np.shape(data)[0], + "vs", + info["nt"], + ")", + ) + info["nt"] = np.shape(data)[0] # x-subdomain indices istart = subdomains[idx, 0] @@ -547,10 +579,11 @@ def read_data(args, info, subdomains): # reshape and save data time[:] = data[:, 0] - for v in range(info['nvar']): - for i in range(info['nt']): - zdata[i, jstart:jend+1, istart:iend+1, v] = \ - np.reshape(data[i, 1+v::info['nvar']], (nyl, nxl)) + for v in range(info["nvar"]): + for i in range(info["nt"]): + zdata[i, jstart : jend + 1, istart : iend + 1, v] = np.reshape( + data[i, 1 + v :: info["nvar"]], (nyl, nxl) + ) return time, xvals, yvals, zdata @@ -565,40 +598,40 @@ def plot_settings(args, info, time, xvals, yvals, zdata): import numpy as np # determine extents of plots - info['zmin'] = np.zeros(info['nvar']) - info['zmax'] = np.zeros(info['nvar']) + info["zmin"] = np.zeros(info["nvar"]) + info["zmax"] = np.zeros(info["nvar"]) - for v in range(info['nvar']): - info['zmin'][v] = np.amin(zdata[:, :, :, v]) - info['zmax'][v] = np.amax(zdata[:, :, :, v]) + for v in range(info["nvar"]): + info["zmin"][v] = np.amin(zdata[:, :, :, v]) + info["zmax"][v] = np.amax(zdata[:, :, :, v]) if args.debug: - print("z max = ", info['zmax']) - print("z min = ", info['zmin']) + print("z max = ", info["zmax"]) + print("z min = ", info["zmin"]) # which variables to plot if args.plotvars: - info['pltvars'] = args.plotvars + info["pltvars"] = args.plotvars else: - info['pltvars'] = range(info['nvar']) + info["pltvars"] = range(info["nvar"]) # which times to plot if args.plottimes: - info['plttimes'] = args.plottimes + info["plttimes"] = args.plottimes else: - info['plttimes'] = range(info['nt']) + info["plttimes"] = range(info["nt"]) # x-axis label if args.xlabel: - info['xlabel'] = args.xlabel + info["xlabel"] = args.xlabel else: - info['xlabel'] = 'x' + info["xlabel"] = "x" # y-axis label if args.ylabel: - info['ylabel'] = args.ylabel + info["ylabel"] = args.ylabel else: - info['ylabel'] = 'y' + info["ylabel"] = "y" # ----------------------------------------------------------------------------- @@ -638,50 +671,58 @@ def plot_surface(args, info, time, xvals, yvals, zdata): X, Y = np.meshgrid(xvals, yvals) # generate plots - for v in info['pltvars']: + for v in info["pltvars"]: if args.merge: mergefiles = list() - for t in info['plttimes']: + for t in info["plttimes"]: # create figure and axes fig = plt.figure() - ax = fig.add_subplot(111, projection='3d') - - ax.plot_surface(X, Y, zdata[t, :, :, v], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, - shade=True) + ax = fig.add_subplot(111, projection="3d") + + ax.plot_surface( + X, + Y, + zdata[t, :, :, v], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) # set axis limits - ax.set_xlim([info['xl'], info['xu']]) - ax.set_ylim([info['yl'], info['yu']]) - ax.set_zlim(info['zmin'][v], info['zmax'][v]) + ax.set_xlim([info["xl"], info["xu"]]) + ax.set_ylim([info["yl"], info["yu"]]) + ax.set_zlim(info["zmin"][v], info["zmax"][v]) # initial perspective ax.view_init(20, -120) # add axis labels - plt.xlabel(info['xlabel']) - plt.ylabel(info['ylabel']) + plt.xlabel(info["xlabel"]) + plt.ylabel(info["ylabel"]) # add z-axis label if args.zlabel: ax.set_zlabel(args.zlabel) - elif info['varnames']: - ax.set_zlabel(info['varnames'][v]) + elif info["varnames"]: + ax.set_zlabel(info["varnames"][v]) else: - ax.set_zlabel('z') + ax.set_zlabel("z") # add title tstr = str(time[t]) if args.title: title = args.title - elif info['title']: - title = info['title'] + elif info["title"]: + title = info["title"] else: - title = 'Solution' - plt.title(title + '\nt = ' + tstr) + title = "Solution" + plt.title(title + "\nt = " + tstr) # add grid if args.grid: @@ -690,15 +731,15 @@ def plot_surface(args, info, time, xvals, yvals, zdata): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_surface_' + fname = args.prefix + "_fig_surface_" else: - fname = 'fig_surface_' - if info['varnames']: - fname += info['varnames'][v] + fname = "fig_surface_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - fname += '_t_' + repr(t).zfill(3) + '.pdf' - plt.savefig(fname, bbox_inches='tight') + fname += "var_" + repr(v).zfill(3) + fname += "_t_" + repr(t).zfill(3) + ".pdf" + plt.savefig(fname, bbox_inches="tight") if args.merge: mergefiles.append(fname) else: @@ -707,14 +748,14 @@ def plot_surface(args, info, time, xvals, yvals, zdata): if args.merge: if args.prefix: - fname = args.prefix + '_fig_surface_' + fname = args.prefix + "_fig_surface_" else: - fname = 'fig_surface_' - if info['varnames']: - fname += info['varnames'][v] + fname = "fig_surface_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - fname += '.pdf' + fname += "var_" + repr(v).zfill(3) + fname += ".pdf" merge_pdf(mergefiles, fname) @@ -732,38 +773,47 @@ def plot_surface_ani(args, info, time, xvals, yvals, zdata): def update_plot(frame_number, zarray, v, plot): plot[0].remove() - plot[0] = ax.plot_surface(X, Y, zarray[frame_number, :, :, v], - cmap=cm.jet) + plot[0] = ax.plot_surface(X, Y, zarray[frame_number, :, :, v], cmap=cm.jet) tstr = str(time[frame_number]) if args.title: title = args.title - elif info['title']: - title = info['title'] + elif info["title"]: + title = info["title"] else: - title = 'Solution' - plt.title(title + '\nt = ' + tstr) + title = "Solution" + plt.title(title + "\nt = " + tstr) - return plot, + return (plot,) # set x and y meshgrid objects X, Y = np.meshgrid(xvals, yvals) # generate plots - for v in info['pltvars']: + for v in info["pltvars"]: # create figure and axes fig = plt.figure() - ax = plt.axes(projection='3d') - - plot = [ax.plot_surface(X, Y, zdata[0, :, :, v], rstride=1, cstride=1, - cmap=cm.jet, linewidth=0, antialiased=True, - shade=True)] + ax = plt.axes(projection="3d") + + plot = [ + ax.plot_surface( + X, + Y, + zdata[0, :, :, v], + rstride=1, + cstride=1, + cmap=cm.jet, + linewidth=0, + antialiased=True, + shade=True, + ) + ] # set axis limits - ax.set_xlim([info['xl'], info['xu']]) - ax.set_ylim([info['yl'], info['yu']]) - ax.set_zlim([info['zmin'][v], info['zmax'][v]]) + ax.set_xlim([info["xl"], info["xu"]]) + ax.set_ylim([info["yl"], info["yu"]]) + ax.set_zlim([info["zmin"][v], info["zmax"][v]]) # initial perspective ax.view_init(20, -120) @@ -772,45 +822,45 @@ def update_plot(frame_number, zarray, v, plot): if args.xlabel: plt.xlabel(args.xlabel) else: - ax.set_xlabel('x') + ax.set_xlabel("x") # add y-axis label if args.ylabel: plt.ylabel(args.ylabel) else: - ax.set_ylabel('y') + ax.set_ylabel("y") # add z-axis label if args.zlabel: ax.set_zlabel(args.zlabel) - elif info['varnames']: - ax.set_zlabel(info['varnames'][v]) + elif info["varnames"]: + ax.set_zlabel(info["varnames"][v]) else: - ax.set_zlabel('z') + ax.set_zlabel("z") # add grid if args.grid: plt.grid() - fps = 2 # frame per sec + fps = 2 # frame per sec frn = len(time) # number of frames in the animation # create animation - ani = animation.FuncAnimation(fig, update_plot, frn, - fargs=(zdata, v, plot), - interval=1000/fps) + ani = animation.FuncAnimation( + fig, update_plot, frn, fargs=(zdata, v, plot), interval=1000 / fps + ) # save animation to file if args.save: if args.prefix: - fname = args.prefix + '_ani_surface_' + fname = args.prefix + "_ani_surface_" else: - fname = 'ani_surface_' - if info['varnames']: - fname += info['varnames'][v] + fname = "ani_surface_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - ani.save(fname + '.mp4', dpi=200, fps=fps) + fname += "var_" + repr(v).zfill(3) + ani.save(fname + ".mp4", dpi=200, fps=fps) else: plt.show() plt.close() @@ -830,36 +880,37 @@ def plot_contour(args, info, time, xvals, yvals, zdata): X, Y = np.meshgrid(xvals, yvals) # generate plots - for v in info['pltvars']: + for v in info["pltvars"]: - levels = np.linspace(info['zmin'][v], info['zmax'][v], 100) - ticks = np.linspace(info['zmin'][v], info['zmax'][v], 10) + levels = np.linspace(info["zmin"][v], info["zmax"][v], 100) + ticks = np.linspace(info["zmin"][v], info["zmax"][v], 10) - for t in info['plttimes']: + for t in info["plttimes"]: # create figure and axes fig, ax = plt.subplots() - cf = ax.contourf(X, Y, zdata[t, :, :, v], levels=levels, - cmap="coolwarm", extend="both") + cf = ax.contourf( + X, Y, zdata[t, :, :, v], levels=levels, cmap="coolwarm", extend="both" + ) fig.colorbar(cf, ax=ax, fraction=0.046, pad=0.04, ticks=ticks) # set axis limits - ax.set_xlim([info['xl'], info['xu']]) - ax.set_ylim([info['yl'], info['yu']]) + ax.set_xlim([info["xl"], info["xu"]]) + ax.set_ylim([info["yl"], info["yu"]]) # add axis labels - plt.xlabel(info['xlabel']) - plt.ylabel(info['ylabel']) + plt.xlabel(info["xlabel"]) + plt.ylabel(info["ylabel"]) # add title tstr = str(time[t]) if args.title: - plt.title(args.title + ' at t = ' + tstr) - elif info['title']: - plt.title(info['title'] + ' at t = ' + tstr) + plt.title(args.title + " at t = " + tstr) + elif info["title"]: + plt.title(info["title"] + " at t = " + tstr) else: - plt.title('Solution at t = ' + tstr) + plt.title("Solution at t = " + tstr) # add grid if args.grid: @@ -868,15 +919,15 @@ def plot_contour(args, info, time, xvals, yvals, zdata): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_contour_' + fname = args.prefix + "_fig_contour_" else: - fname = 'fig_contour_' - if info['varnames']: - fname += info['varnames'][v] + fname = "fig_contour_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - fname += '_t_' + repr(t).zfill(3) + '.pdf' - plt.savefig(fname, bbox_inches='tight') + fname += "var_" + repr(v).zfill(3) + fname += "_t_" + repr(t).zfill(3) + ".pdf" + plt.savefig(fname, bbox_inches="tight") else: plt.show() plt.close() @@ -894,67 +945,76 @@ def plot_contour_ani(args, info, time, xvals, yvals, zdata): import matplotlib.animation as animation def update_plot(frame_number, zarray, v, plot): - plot[0] = ax.contourf(X, Y, zdata[frame_number, :, :, v], - levels=levels, cmap="coolwarm", extend="both") + plot[0] = ax.contourf( + X, + Y, + zdata[frame_number, :, :, v], + levels=levels, + cmap="coolwarm", + extend="both", + ) tstr = str(time[frame_number]) if args.title: title = args.title - elif info['title']: - title = info['title'] + elif info["title"]: + title = info["title"] else: - title = 'Solution' - plt.title(title + '\nt = ' + tstr) + title = "Solution" + plt.title(title + "\nt = " + tstr) - return plot, + return (plot,) # set x and y meshgrid objects X, Y = np.meshgrid(xvals, yvals) # generate plots - for v in info['pltvars']: + for v in info["pltvars"]: - levels = np.linspace(info['zmin'][v], info['zmax'][v], 100) - ticks = np.linspace(info['zmin'][v], info['zmax'][v], 10) + levels = np.linspace(info["zmin"][v], info["zmax"][v], 100) + ticks = np.linspace(info["zmin"][v], info["zmax"][v], 10) # create figure and axes fig, ax = plt.subplots() - plot = [ax.contourf(X, Y, zdata[0, :, :, v], levels=levels, - cmap="coolwarm", extend="both")] + plot = [ + ax.contourf( + X, Y, zdata[0, :, :, v], levels=levels, cmap="coolwarm", extend="both" + ) + ] fig.colorbar(plot[0], ax=ax, fraction=0.046, pad=0.04, ticks=ticks) # set axis limits - ax.set_xlim([info['xl'], info['xu']]) - ax.set_ylim([info['yl'], info['yu']]) + ax.set_xlim([info["xl"], info["xu"]]) + ax.set_ylim([info["yl"], info["yu"]]) # add axis labels - plt.xlabel(info['xlabel']) - plt.ylabel(info['ylabel']) + plt.xlabel(info["xlabel"]) + plt.ylabel(info["ylabel"]) # add grid if args.grid: plt.grid() - fps = 2 # frame per sec + fps = 2 # frame per sec frn = len(time) # number of frames in the animation # create animation - ani = animation.FuncAnimation(fig, update_plot, frn, - fargs=(zdata, v, plot), - interval=1000/fps) + ani = animation.FuncAnimation( + fig, update_plot, frn, fargs=(zdata, v, plot), interval=1000 / fps + ) # save animation to file if args.save: if args.prefix: - fname = args.prefix + '_ani_contour_' + fname = args.prefix + "_ani_contour_" else: - fname = 'ani_contour_' - if info['varnames']: - fname += info['varnames'][v] + fname = "ani_contour_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - ani.save(fname + '.mp4', dpi=200, fps=fps) + fname += "var_" + repr(v).zfill(3) + ani.save(fname + ".mp4", dpi=200, fps=fps) else: plt.show() plt.close() @@ -971,10 +1031,10 @@ def plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix): import matplotlib.pyplot as plt # determine extents of slice plot - smin = np.zeros(info['nvar']) - smax = np.zeros(info['nvar']) + smin = np.zeros(info["nvar"]) + smax = np.zeros(info["nvar"]) - for v in range(info['nvar']): + for v in range(info["nvar"]): smin[v] = np.amin(sdata[:, :, v]) smax[v] = np.amax(sdata[:, :, v]) @@ -989,13 +1049,13 @@ def plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix): label = ["%.2f" % t for t in time] # create plot for each variable - for v in info['pltvars']: + for v in info["pltvars"]: # create figure and axes fig, ax = plt.subplots() # add each output time to the plot - for t in info['plttimes']: + for t in info["plttimes"]: ax.plot(svals, sdata[t, :, v], label=label[t]) # set axis limits @@ -1012,19 +1072,19 @@ def plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix): if args.zlabel: ax.set_ylabel(args.zlabel) else: - if info['varnames']: - ax.set_ylabel(info['varnames'][v]) + if info["varnames"]: + ax.set_ylabel(info["varnames"][v]) else: - ax.set_ylabel('variable ' + repr(v)) + ax.set_ylabel("variable " + repr(v)) # add title if args.title: plt.title(args.title + suffix) - elif info['title']: - plt.title(info['title'] + suffix) + elif info["title"]: + plt.title(info["title"] + suffix) else: - if info['varnames']: - plt.title("Evolution of " + info['varnames'][v] + suffix) + if info["varnames"]: + plt.title("Evolution of " + info["varnames"][v] + suffix) else: plt.title("Evolution of variable " + repr(v) + suffix) @@ -1035,14 +1095,14 @@ def plot_slice_vars(args, info, time, svals, sdata, hlabel, suffix): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_slice_' + fname = args.prefix + "_fig_slice_" else: - fname = 'fig_slice_' - if info['varnames']: - fname += info['varnames'][v] + fname = "fig_slice_" + if info["varnames"]: + fname += info["varnames"][v] else: - fname += 'var_' + repr(v).zfill(3) - plt.savefig(fname + '.pdf', bbox_inches='tight') + fname += "var_" + repr(v).zfill(3) + plt.savefig(fname + ".pdf", bbox_inches="tight") else: plt.show() plt.close() @@ -1069,19 +1129,19 @@ def plot_slice_time(args, info, time, svals, sdata, hlabel, suffix): # set labels for the plot legend if args.labels: label = args.labels - elif info['varnames']: - label = info['varnames'] + elif info["varnames"]: + label = info["varnames"] else: - label = [None] * info['nvar'] + label = [None] * info["nvar"] # create plot for each variable - for t in info['plttimes']: + for t in info["plttimes"]: # create figure and axes fig, ax = plt.subplots() # add each output time to the plot - for v in info['pltvars']: + for v in info["pltvars"]: ax.plot(svals, sdata[t, :, v], label=label[v]) # set axis limits @@ -1101,11 +1161,11 @@ def plot_slice_time(args, info, time, svals, sdata, hlabel, suffix): # add title tstr = str(time[t]) if args.title: - plt.title(args.title + suffix + ' and t = ' + tstr) - elif info['title']: - plt.title(info['title'] + suffix + ' and t = ' + tstr) + plt.title(args.title + suffix + " and t = " + tstr) + elif info["title"]: + plt.title(info["title"] + suffix + " and t = " + tstr) else: - plt.title("Evolution" + suffix + ' and t = ' + tstr) + plt.title("Evolution" + suffix + " and t = " + tstr) # add grid if args.grid: @@ -1114,11 +1174,11 @@ def plot_slice_time(args, info, time, svals, sdata, hlabel, suffix): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_slice_t_' + fname = args.prefix + "_fig_slice_t_" else: - fname = 'fig_slice_t_' - fname += repr(t).zfill(3) + '.pdf' - plt.savefig(fname, bbox_inches='tight') + fname = "fig_slice_t_" + fname += repr(t).zfill(3) + ".pdf" + plt.savefig(fname, bbox_inches="tight") else: plt.show() plt.close() @@ -1136,16 +1196,16 @@ def plot_point(args, info, time, pdata, suffix): # set labels for the plot legend if args.labels: label = args.labels - elif info['varnames']: - label = info['varnames'] + elif info["varnames"]: + label = info["varnames"] else: - label = [None] * info['nvar'] + label = [None] * info["nvar"] # create figure and axes fig, ax = plt.subplots() # create plot for each variable - for v in info['pltvars']: + for v in info["pltvars"]: ax.plot(time, pdata[:, v], label=label[v]) # add legend @@ -1157,8 +1217,8 @@ def plot_point(args, info, time, pdata, suffix): # add title if args.title: plt.title(args.title + suffix) - elif info['title']: - plt.title(info['title'] + suffix) + elif info["title"]: + plt.title(info["title"] + suffix) else: plt.title("Evolution" + suffix) @@ -1169,10 +1229,10 @@ def plot_point(args, info, time, pdata, suffix): # save plot to file if args.save: if args.prefix: - fname = args.prefix + '_fig_point' + fname = args.prefix + "_fig_point" else: - fname = 'fig_point' - plt.savefig(fname + '.pdf', bbox_inches='tight') + fname = "fig_point" + plt.savefig(fname + ".pdf", bbox_inches="tight") else: plt.show() plt.close() @@ -1183,6 +1243,7 @@ def plot_point(args, info, time, pdata, suffix): # ----------------------------------------------------------------------------- -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/examples/utilities/plot_data_time_series.py b/examples/utilities/plot_data_time_series.py index f96aeec538..02e3c34f7c 100755 --- a/examples/utilities/plot_data_time_series.py +++ b/examples/utilities/plot_data_time_series.py @@ -31,6 +31,7 @@ # output time. # ----------------------------------------------------------------------------- + # ----------------------------------------------------------------------------- # main routine # ----------------------------------------------------------------------------- @@ -41,50 +42,46 @@ def main(): import numpy as np import shlex - parser = argparse.ArgumentParser(description='''Plot data files''') + parser = argparse.ArgumentParser(description="""Plot data files""") - parser.add_argument('quantity', type=str, - help='''Quantity to plot''') + parser.add_argument("quantity", type=str, help="""Quantity to plot""") - parser.add_argument('datafiles', type=str, nargs='+', - help='''Data files to plot''') + parser.add_argument("datafiles", type=str, nargs="+", help="""Data files to plot""") # Plot display options - parser.add_argument('--save', action='store_true', - help='''Save figure to file''') + parser.add_argument("--save", action="store_true", help="""Save figure to file""") - parser.add_argument('--labels', type=str, nargs='+', - help='''Data file labels for plot legend''') + parser.add_argument( + "--labels", type=str, nargs="+", help="""Data file labels for plot legend""" + ) - parser.add_argument('--title', type=str, - help='''Plot title''') + parser.add_argument("--title", type=str, help="""Plot title""") - parser.add_argument('--xlabel', type=str, - help='''x-axis label''') + parser.add_argument("--xlabel", type=str, help="""x-axis label""") - parser.add_argument('--ylabel', type=str, - help='''y-axis label''') + parser.add_argument("--ylabel", type=str, help="""y-axis label""") - parser.add_argument('--grid', action='store_true', - help='''Add grid to plot''') + parser.add_argument("--grid", action="store_true", help="""Add grid to plot""") # Axis scaling logscale = parser.add_mutually_exclusive_group() - logscale.add_argument('--logx', action='store_true', - help='''Plot with log scale x-axis''') + logscale.add_argument( + "--logx", action="store_true", help="""Plot with log scale x-axis""" + ) - logscale.add_argument('--logy', action='store_true', - help='''Plot with log scale y-axis''') + logscale.add_argument( + "--logy", action="store_true", help="""Plot with log scale y-axis""" + ) - logscale.add_argument('--loglog', action='store_true', - help='''Use log scale x and y axes''') + logscale.add_argument( + "--loglog", action="store_true", help="""Use log scale x and y axes""" + ) # Debugging options - parser.add_argument('--debug', action='store_true', - help='Enable debugging') + parser.add_argument("--debug", action="store_true", help="Enable debugging") # Parse command line args args = parser.parse_args() @@ -132,33 +129,49 @@ def main(): print(data) # Extract t and q data - tdata = data[:,0] # first column has t values - qdata = data[:,idx] # remaining columns have q values + tdata = data[:, 0] # first column has t values + qdata = data[:, idx] # remaining columns have q values # line colors: matplotlib.org/stable/tutorials/colors/colormaps.html # and colorbrewer2.org) if len(args.datafiles) < 22: - colors = ["#d62728", "#1f77b4", "#2ca02c", "#9467bd", "#ff7f0e", - "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf", - "#000000", "#ff9896", "#aec7e8", "#98df8a", "#c5b0d5", - "#ffbb78", "#c49c94", "#f7b6d2", "#c7c7c7", "#dbdb8d", - "#9edae5"] + colors = [ + "#d62728", + "#1f77b4", + "#2ca02c", + "#9467bd", + "#ff7f0e", + "#8c564b", + "#e377c2", + "#7f7f7f", + "#bcbd22", + "#17becf", + "#000000", + "#ff9896", + "#aec7e8", + "#98df8a", + "#c5b0d5", + "#ffbb78", + "#c49c94", + "#f7b6d2", + "#c7c7c7", + "#dbdb8d", + "#9edae5", + ] else: print("ERROR: ncols > ncolors") sys.exit() # Set plot label for legend - if (args.labels): - label=args.labels[i] + if args.labels: + label = args.labels[i] else: - label=None + label = None if args.logx or args.logy or args.loglog: - ax.plot(tdata, np.abs(qdata), label=label, - color=colors[i]) + ax.plot(tdata, np.abs(qdata), label=label, color=colors[i]) else: - ax.plot(tdata, qdata, label=label, - color=colors[i]) + ax.plot(tdata, qdata, label=label, color=colors[i]) # Change axis scale if args.logx: @@ -183,7 +196,7 @@ def main(): if args.ylabel: plt.ylabel(args.ylabel) else: - plt.ylabel(args.quantity.replace("_"," ")); + plt.ylabel(args.quantity.replace("_", " ")) # Add legend if args.labels: @@ -199,10 +212,12 @@ def main(): else: plt.show() + # ----------------------------------------------------------------------------- # run the main routine # ----------------------------------------------------------------------------- -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/scripts/format.sh b/scripts/format.sh index b145adcfad..38011b85c0 100755 --- a/scripts/format.sh +++ b/scripts/format.sh @@ -31,3 +31,5 @@ find "${paths[@]}" -iname '*.h' -o -iname '*.hpp' -o \ -iname '*.cuh' -o -iname '*.cu' | grep -v fmod | xargs clang-format -i find "${paths[@]}" -iname '*.f90' | grep -v fmod | xargs fprettify --indent 2 --enable-replacements --c-relations + +find "${paths[@]}" -iname '*.py' -exec black {} ';' diff --git a/test/compare_benchmarks.py b/test/compare_benchmarks.py index 2df5e7249e..1f76c60519 100644 --- a/test/compare_benchmarks.py +++ b/test/compare_benchmarks.py @@ -22,19 +22,55 @@ def main(): - parser = argparse.ArgumentParser(description='Compare Sundials performance results against previous results') - - parser.add_argument('--release', dest='release', action='store_true', help='indicate if the current run to process is a release') - - parser.add_argument('--calidir', dest='caliDir', type=str, help='path to directory containing caliper files', default="/usr/workspace/sundials/califiles") - - parser.add_argument('--releasedir', dest='releaseDir', type=str, help='path to directory containing release caliper files', default="/usr/workspace/sundials/califiles/Release") - - parser.add_argument('--outpath', dest='outPath', type=str, help='path to directory to write results to', default="/dev/null") - - parser.add_argument('--jobid', dest='jobID', type=int, help='job id of the current run to identify .cali files') - - parser.add_argument('--threshold', dest="threshold", type=float, help='the percentage threshold in performance difference that indicates a regression', default=2.0) + parser = argparse.ArgumentParser( + description="Compare Sundials performance results against previous results" + ) + + parser.add_argument( + "--release", + dest="release", + action="store_true", + help="indicate if the current run to process is a release", + ) + + parser.add_argument( + "--calidir", + dest="caliDir", + type=str, + help="path to directory containing caliper files", + default="/usr/workspace/sundials/califiles", + ) + + parser.add_argument( + "--releasedir", + dest="releaseDir", + type=str, + help="path to directory containing release caliper files", + default="/usr/workspace/sundials/califiles/Release", + ) + + parser.add_argument( + "--outpath", + dest="outPath", + type=str, + help="path to directory to write results to", + default="/dev/null", + ) + + parser.add_argument( + "--jobid", + dest="jobID", + type=int, + help="job id of the current run to identify .cali files", + ) + + parser.add_argument( + "--threshold", + dest="threshold", + type=float, + help="the percentage threshold in performance difference that indicates a regression", + default=2.0, + ) args = parser.parse_args() @@ -50,38 +86,42 @@ def main(): if not os.path.exists(outPath): os.makedirs(outPath) - outFile = open("%s/benchmark_output.out" % outPath, 'w') + outFile = open("%s/benchmark_output.out" % outPath, "w") # thread per file with mp.Pool() as pool: - for res in pool.starmap(process_benchmark, [(jobID, release, releaseDir, i, threshold) for i in benchFiles]): + for res in pool.starmap( + process_benchmark, + [(jobID, release, releaseDir, i, threshold) for i in benchFiles], + ): if res: outFile.write(res + "\n") outFile.close() - outFile = open("%s/benchmark_output.out" % outPath, 'r') + outFile = open("%s/benchmark_output.out" % outPath, "r") try: outLines = outFile.readlines() finally: outFile.close() - if (len(outLines) == 0): + if len(outLines) == 0: return -1 return 0 + def process_benchmark(jobID, isRelease, releaseDir, benchmarkDir, threshold): # Get the current benchmark run benchmarkFiles = glob.glob("%s/*.cali" % benchmarkDir) # Don't compare if the run didn't include this benchmark - if (len(benchmarkFiles) == 0): + if len(benchmarkFiles) == 0: return th_files = tt.Thicket.from_caliperreader(benchmarkFiles) - curFilter = lambda x: x['job_id'] == jobID + curFilter = lambda x: x["job_id"] == jobID th_current = th_files.filter_metadata(curFilter) # Get the release caliper file - cluster = th_current.metadata['cluster'].values[0] + cluster = th_current.metadata["cluster"].values[0] if isRelease: # Get the last release versionDirs = glob.glob("%s/%s/*" % (releaseDir, cluster)) @@ -89,18 +129,23 @@ def process_benchmark(jobID, isRelease, releaseDir, benchmarkDir, threshold): versionDir = versionDirs[1] else: # Get the release the run is a part of - version = th_current.metadata['sundials_version'].values[0] + version = th_current.metadata["sundials_version"].values[0] versionDir = "%s/%s/%s" % (releaseDir, cluster, version) - benchmarkName = th_current.metadata['env.TEST_NAME'].values[0] - releaseFile = glob.glob("%s/Benchmarking/*/%s/*.cali" % (versionDir, benchmarkName), recursive=True) + benchmarkName = th_current.metadata["env.TEST_NAME"].values[0] + releaseFile = glob.glob( + "%s/Benchmarking/*/%s/*.cali" % (versionDir, benchmarkName), recursive=True + ) th_compare = tt.Thicket.from_caliperreader(releaseFile) - metrics = ['Max time/rank'] + metrics = ["Max time/rank"] tt.mean(th_current, columns=metrics) tt.mean(th_compare, columns=metrics) - ratio = th_current.statsframe.dataframe['Max time/rank_mean'] / th_compare.statsframe.dataframe['Max time/rank_mean'] + ratio = ( + th_current.statsframe.dataframe["Max time/rank_mean"] + / th_compare.statsframe.dataframe["Max time/rank_mean"] + ) - tolerance = threshold/100 + tolerance = threshold / 100 if 1 - ratio[0] < tolerance: return benchmarkName diff --git a/test/compare_examples.py b/test/compare_examples.py index 046b9d620b..3383fa91f6 100644 --- a/test/compare_examples.py +++ b/test/compare_examples.py @@ -27,18 +27,50 @@ import hatchet as ht import thicket as tt -def main(): - parser = argparse.ArgumentParser(description='Compare Sundials performance results against previous results') - - parser.add_argument('--release', dest='release', action='store_true', help='indicate if the current run to process is a release') - - parser.add_argument('--calidir', dest='caliDir', type=str, help='path to directory containing caliper files', default="/usr/workspace/sundials/califiles") - - parser.add_argument('--releasedir', dest='releaseDir', type=str, help='path to directory containing release caliper files', default="/usr/workspace/sundials/califiles/Release") - parser.add_argument('--outpath', dest='outPath', type=str, help='path to directory to write results to', default="/dev/null") - - parser.add_argument('--threshold', dest="threshold", type=float, help='the percentage threshold in performance difference that indicates a regression', default=2.0) +def main(): + parser = argparse.ArgumentParser( + description="Compare Sundials performance results against previous results" + ) + + parser.add_argument( + "--release", + dest="release", + action="store_true", + help="indicate if the current run to process is a release", + ) + + parser.add_argument( + "--calidir", + dest="caliDir", + type=str, + help="path to directory containing caliper files", + default="/usr/workspace/sundials/califiles", + ) + + parser.add_argument( + "--releasedir", + dest="releaseDir", + type=str, + help="path to directory containing release caliper files", + default="/usr/workspace/sundials/califiles/Release", + ) + + parser.add_argument( + "--outpath", + dest="outPath", + type=str, + help="path to directory to write results to", + default="/dev/null", + ) + + parser.add_argument( + "--threshold", + dest="threshold", + type=float, + help="the percentage threshold in performance difference that indicates a regression", + default=2.0, + ) args = parser.parse_args() @@ -49,13 +81,13 @@ def main(): threshold = args.threshold # Get the latest test run - runDirs = glob.glob("%s/Testing/*" % caliDir, recursive = True) + runDirs = glob.glob("%s/Testing/*" % caliDir, recursive=True) runDirs.sort(key=os.path.getmtime, reverse=True) runDir = runDirs[0] runFile = glob.glob(runDir)[0] th_temp = tt.Thicket.from_caliperreader(runFile) - cluster = th_temp.metadata['cluster'] + cluster = th_temp.metadata["cluster"] # get machine from the file if release: # Compare against the last release @@ -64,7 +96,7 @@ def main(): versionDir = versionDirs[1] else: # Compare against the release the run is a part of - version = th_temp.metadata['sundials_version'].values[0] + version = th_temp.metadata["sundials_version"].values[0] versionDir = "%s/%s/%s" % (releaseDir, cluster, version) # Gather files to process @@ -72,22 +104,24 @@ def main(): if not os.path.exists(outPath): os.makedirs(outPath) - outFile = open("%s/output.out" % outPath, 'w') + outFile = open("%s/output.out" % outPath, "w") # Compare test results against past runs. If a test performs below a threshold, output test name to outFile. with mp.Pool() as pool: - for res in pool.starmap(compare_against_release, [(versionDir, i, threshold) for i in runFiles]): + for res in pool.starmap( + compare_against_release, [(versionDir, i, threshold) for i in runFiles] + ): if res: outFile.write(res + "\n") outFile.close() - outFile = open("%s/example_output.out" % outPath, 'r') + outFile = open("%s/example_output.out" % outPath, "r") try: outLines = outFile.readlines() finally: outFile.close() - if (len(outLines) == 0): + if len(outLines) == 0: return -1 return 0 @@ -95,21 +129,27 @@ def main(): def compare_against_release(releaseDir, file, threshold): th = tt.Thicket.from_caliperreader(file) - testName = th.metadata['env.TEST_NAME'].values[0] + testName = th.metadata["env.TEST_NAME"].values[0] # Gather release run - releaseFile = glob.glob("%s/Testing/*/%s.*.cali" % (releaseDir, testName), recursive=True) + releaseFile = glob.glob( + "%s/Testing/*/%s.*.cali" % (releaseDir, testName), recursive=True + ) th_release = tt.Thicket.from_caliperreader(releaseFile) - metrics = ['Max time/rank'] + metrics = ["Max time/rank"] tt.mean(th_release, columns=metrics) tt.mean(th, columns=metrics) - ratio = th.statsframe.dataframe['Max time/rank_mean'] / th_release.statsframe.dataframe['Max time/rank_mean'] + ratio = ( + th.statsframe.dataframe["Max time/rank_mean"] + / th_release.statsframe.dataframe["Max time/rank_mean"] + ) print(ratio[0]) - tolerance = threshold/100 + tolerance = threshold / 100 if 1 - ratio[0] < tolerance: return testName + if __name__ == "__main__": main() diff --git a/test/config_cmake.py b/test/config_cmake.py index e916d1bcc0..f1574b6257 100644 --- a/test/config_cmake.py +++ b/test/config_cmake.py @@ -21,109 +21,252 @@ def main(): import argparse - parser = argparse.ArgumentParser(description='''Create a SUNDIALS CMake - cache file''') - - parser.add_argument('--filetype', type=str, choices=['cache', 'script'], - default='cache', - help='''Create a CMake cache file or configuration - script (default cache)''') - - parser.add_argument('--filename', type=str, default="sundials.cmake", - help='''Set the cache file or script name (default - sundials.cmake)''') - - parser.add_argument('--readenv', action='store_true', - help='''Read environment variables (command line + parser = argparse.ArgumentParser( + description="""Create a SUNDIALS CMake + cache file""" + ) + + parser.add_argument( + "--filetype", + type=str, + choices=["cache", "script"], + default="cache", + help="""Create a CMake cache file or configuration + script (default cache)""", + ) + + parser.add_argument( + "--filename", + type=str, + default="sundials.cmake", + help="""Set the cache file or script name (default + sundials.cmake)""", + ) + + parser.add_argument( + "--readenv", + action="store_true", + help="""Read environment variables (command line arguments will override any settings from the - environment variables)''') + environment variables)""", + ) - parser.add_argument('--debugscript', action='store_true', - help='Enable debugging output for this script') + parser.add_argument( + "--debugscript", + action="store_true", + help="Enable debugging output for this script", + ) # ----------------- # Compiler Options # ----------------- - group = parser.add_argument_group('Compilers and Flags', - '''Options for setting the C, C++, - Fortran, and CUDA compiler and flags.''') + group = parser.add_argument_group( + "Compilers and Flags", + """Options for setting the C, C++, + Fortran, and CUDA compiler and flags.""", + ) # Build type - add_arg(group, '--build-type', 'CMAKE_BUILD_TYPE', 'CMAKE_BUILD_TYPE', - 'RelWithDebInfo', 'STRING', - 'CMake build type (Debug, RelWithDebInfo, Release)') + add_arg( + group, + "--build-type", + "CMAKE_BUILD_TYPE", + "CMAKE_BUILD_TYPE", + "RelWithDebInfo", + "STRING", + "CMake build type (Debug, RelWithDebInfo, Release)", + ) # C compiler - add_arg(group, '--c-compiler', 'CC', 'CMAKE_C_COMPILER', None, 'FILEPATH', - 'C compiler') - - add_arg(group, '--c-flags', 'CFLAGS', 'CMAKE_C_FLAGS', None, 'STRING', - 'C compiler flags') - - add_arg(group, '--c-std', 'CMAKE_C_STANDARD', 'CMAKE_C_STANDARD', '99', - 'STRING', 'C standard') - - add_arg(group, '--c-ext', 'CMAKE_C_EXTENSIONS', 'CMAKE_C_EXTENSIONS', - 'OFF', 'STRING', 'C compiler extensions') + add_arg( + group, "--c-compiler", "CC", "CMAKE_C_COMPILER", None, "FILEPATH", "C compiler" + ) + + add_arg( + group, + "--c-flags", + "CFLAGS", + "CMAKE_C_FLAGS", + None, + "STRING", + "C compiler flags", + ) + + add_arg( + group, + "--c-std", + "CMAKE_C_STANDARD", + "CMAKE_C_STANDARD", + "99", + "STRING", + "C standard", + ) + + add_arg( + group, + "--c-ext", + "CMAKE_C_EXTENSIONS", + "CMAKE_C_EXTENSIONS", + "OFF", + "STRING", + "C compiler extensions", + ) # C++ compiler - add_arg(group, '--cxx-compiler', 'CXX', 'CMAKE_CXX_COMPILER', None, - 'FILEPATH', 'C++ compiler') - - add_arg(group, '--cxx-flags', 'CXXFLAGS', 'CMAKE_CXX_FLAGS', None, - 'STRING', 'C++ compiler flags') - - add_arg(group, '--cxx-std', 'CMAKE_CXX_STANDARD', 'CMAKE_CXX_STANDARD', - '14', 'STRING', 'C++ standard') - - add_arg(group, '--cxx-ext', 'CMAKE_CXX_EXTENSIONS', 'CMAKE_CXX_EXTENSIONS', - 'OFF', 'STRING', 'C++ compiler extensions') + add_arg( + group, + "--cxx-compiler", + "CXX", + "CMAKE_CXX_COMPILER", + None, + "FILEPATH", + "C++ compiler", + ) + + add_arg( + group, + "--cxx-flags", + "CXXFLAGS", + "CMAKE_CXX_FLAGS", + None, + "STRING", + "C++ compiler flags", + ) + + add_arg( + group, + "--cxx-std", + "CMAKE_CXX_STANDARD", + "CMAKE_CXX_STANDARD", + "14", + "STRING", + "C++ standard", + ) + + add_arg( + group, + "--cxx-ext", + "CMAKE_CXX_EXTENSIONS", + "CMAKE_CXX_EXTENSIONS", + "OFF", + "STRING", + "C++ compiler extensions", + ) # Fortran compiler - add_arg(group, '--fortran-compiler', 'FC', 'CMAKE_Fortran_COMPILER', None, - 'FILEPATH', 'Fortran compiler') - - add_arg(group, '--fortran-flags', 'FFLAGS', 'CMAKE_Fortran_FLAGS', None, - 'STRING', 'Fortran compiler flags') + add_arg( + group, + "--fortran-compiler", + "FC", + "CMAKE_Fortran_COMPILER", + None, + "FILEPATH", + "Fortran compiler", + ) + + add_arg( + group, + "--fortran-flags", + "FFLAGS", + "CMAKE_Fortran_FLAGS", + None, + "STRING", + "Fortran compiler flags", + ) # CUDA compiler - add_arg(group, '--cuda-compiler', 'CUDACXX', 'CMAKE_CUDA_COMPILER', None, - 'FILEPATH', 'CUDA compiler') - - add_arg(group, '--cuda-flags', 'CUDAFLAGS', 'CMAKE_CUDA_FLAGS', None, - 'STRING', 'CUDA compiler flags') - - add_arg(group, '--cuda-std', 'CMAKE_CUDA_STANDARD', 'CMAKE_CUDA_STANDARD', - '14', 'STRING', 'CUDA standard') - - add_arg(group, '--cuda-arch', 'CUDAARCHS', 'CMAKE_CUDA_ARCHITECTURES', - None, 'STRING', 'CUDA architecture') + add_arg( + group, + "--cuda-compiler", + "CUDACXX", + "CMAKE_CUDA_COMPILER", + None, + "FILEPATH", + "CUDA compiler", + ) + + add_arg( + group, + "--cuda-flags", + "CUDAFLAGS", + "CMAKE_CUDA_FLAGS", + None, + "STRING", + "CUDA compiler flags", + ) + + add_arg( + group, + "--cuda-std", + "CMAKE_CUDA_STANDARD", + "CMAKE_CUDA_STANDARD", + "14", + "STRING", + "CUDA standard", + ) + + add_arg( + group, + "--cuda-arch", + "CUDAARCHS", + "CMAKE_CUDA_ARCHITECTURES", + None, + "STRING", + "CUDA architecture", + ) # Additional compiler options - add_arg(group, '--Wall', 'SUNDIALS_ENABLE_ALL_WARNINGS', - 'ENABLE_ALL_WARNINGS', 'OFF', 'BOOL', - 'Enable all compiler warnings') - - add_arg(group, '--Werror', 'SUNDIALS_ENABLE_WARNINGS_AS_ERRORS', - 'ENABLE_WARNINGS_AS_ERRORS', 'OFF', 'BOOL', - 'Enable compiler warnings as errors') - - add_arg(group, '--address-sanitizer', 'SUNDIALS_ENABLE_ADDRESS_SANITIZER', - 'ENABLE_ADDRESS_SANITIZER', 'OFF', 'BOOL', - 'Enable address sanitizer') + add_arg( + group, + "--Wall", + "SUNDIALS_ENABLE_ALL_WARNINGS", + "ENABLE_ALL_WARNINGS", + "OFF", + "BOOL", + "Enable all compiler warnings", + ) + + add_arg( + group, + "--Werror", + "SUNDIALS_ENABLE_WARNINGS_AS_ERRORS", + "ENABLE_WARNINGS_AS_ERRORS", + "OFF", + "BOOL", + "Enable compiler warnings as errors", + ) + + add_arg( + group, + "--address-sanitizer", + "SUNDIALS_ENABLE_ADDRESS_SANITIZER", + "ENABLE_ADDRESS_SANITIZER", + "OFF", + "BOOL", + "Enable address sanitizer", + ) # ---------------- # Install Options # ---------------- - group = parser.add_argument_group('Install Options', - '''Options for where SUNDIALS should be - installed.''') + group = parser.add_argument_group( + "Install Options", + """Options for where SUNDIALS should be + installed.""", + ) # install prefix - add_arg(group, '--install-prefix', 'SUNDIALS_INSTALL_PREFIX', - 'CMAKE_INSTALL_PREFIX', None, 'PATH', 'SUNDIALS install location') + add_arg( + group, + "--install-prefix", + "SUNDIALS_INSTALL_PREFIX", + "CMAKE_INSTALL_PREFIX", + None, + "PATH", + "SUNDIALS install location", + ) # library directory @@ -131,148 +274,333 @@ def main(): # Debugging Options # ------------------ - group = parser.add_argument_group('Debugging Options', - '''Options debugging SUNDIALS.''') - - add_arg(group, '--debug', 'SUNDIALS_DEBUG', 'SUNDIALS_DEBUG', 'OFF', - 'BOOL', 'SUNDIALS debugging output') - - add_arg(group, '--debug-assert', 'SUNDIALS_DEBUG_ASSERT', - 'SUNDIALS_DEBUG_ASSERT', 'OFF', 'BOOL', - 'SUNDIALS debugging asserts', dependson='--debug') - - add_arg(group, '--debug-cuda', 'SUNDIALS_DEBUG_CUDA_LASTERROR', - 'SUNDIALS_DEBUG_CUDA_LASTERROR', 'OFF', 'BOOL', - 'SUNDIALS debugging cuda errors', dependson='--debug') - - add_arg(group, '--debug-hip', 'SUNDIALS_DEBUG_HIP_LASTERROR', - 'SUNDIALS_DEBUG_HIP_LASTERROR', 'OFF', 'BOOL', - 'SUNDIALS debugging hip errors', dependson='--debug') - - add_arg(group, '--debug-printvec', 'SUNDIALS_DEBUG_PRINTVEC', - 'SUNDIALS_DEBUG_PRINTVEC', 'OFF', 'BOOL', - 'SUNDIALS debugging vector output', dependson='--debug') + group = parser.add_argument_group( + "Debugging Options", """Options debugging SUNDIALS.""" + ) + + add_arg( + group, + "--debug", + "SUNDIALS_DEBUG", + "SUNDIALS_DEBUG", + "OFF", + "BOOL", + "SUNDIALS debugging output", + ) + + add_arg( + group, + "--debug-assert", + "SUNDIALS_DEBUG_ASSERT", + "SUNDIALS_DEBUG_ASSERT", + "OFF", + "BOOL", + "SUNDIALS debugging asserts", + dependson="--debug", + ) + + add_arg( + group, + "--debug-cuda", + "SUNDIALS_DEBUG_CUDA_LASTERROR", + "SUNDIALS_DEBUG_CUDA_LASTERROR", + "OFF", + "BOOL", + "SUNDIALS debugging cuda errors", + dependson="--debug", + ) + + add_arg( + group, + "--debug-hip", + "SUNDIALS_DEBUG_HIP_LASTERROR", + "SUNDIALS_DEBUG_HIP_LASTERROR", + "OFF", + "BOOL", + "SUNDIALS debugging hip errors", + dependson="--debug", + ) + + add_arg( + group, + "--debug-printvec", + "SUNDIALS_DEBUG_PRINTVEC", + "SUNDIALS_DEBUG_PRINTVEC", + "OFF", + "BOOL", + "SUNDIALS debugging vector output", + dependson="--debug", + ) # -------------- # Library Types # -------------- - group = parser.add_argument_group('Library Type Options', - '''Options to specify if shared and/or - static libraries are build.''') - add_arg(group, '--static', 'SUNDIALS_STATIC_LIBRARIES', - 'BUILD_STATIC_LIBS', 'ON', 'BOOL', - 'Build static SUNDIALS libraries') - - add_arg(group, '--shared', 'SUNDIALS_SHARED_LIBRARIES', - 'BUILD_SHARED_LIBS', 'ON', 'BOOL', - 'Build shared SUNDIALS libraries') + group = parser.add_argument_group( + "Library Type Options", + """Options to specify if shared and/or + static libraries are build.""", + ) + add_arg( + group, + "--static", + "SUNDIALS_STATIC_LIBRARIES", + "BUILD_STATIC_LIBS", + "ON", + "BOOL", + "Build static SUNDIALS libraries", + ) + + add_arg( + group, + "--shared", + "SUNDIALS_SHARED_LIBRARIES", + "BUILD_SHARED_LIBS", + "ON", + "BOOL", + "Build shared SUNDIALS libraries", + ) # --------- # Packages # --------- # packages TODO(DJG): Add support for ONLY option - group = parser.add_argument_group('SUNDIALS Packages', - '''Options to specify which SUNDIALS - packages should be built.''') - add_arg(group, '--arkode', 'SUNDIALS_ARKODE', 'BUILD_ARKODE', 'ON', 'BOOL', - 'Build the ARKODE library') - - add_arg(group, '--cvode', 'SUNDIALS_CVODE', 'BUILD_CVODE', 'ON', 'BOOL', - 'Build the CVODE library') - - add_arg(group, '--cvodes', 'SUNDIALS_CVODES', 'BUILD_CVODES', 'ON', 'BOOL', - 'Build the CVODES library') - - add_arg(group, '--ida', 'SUNDIALS_IDA', 'BUILD_IDA', 'ON', 'BOOL', - 'Build the IDA library') - - add_arg(group, '--idas', 'SUNDIALS_IDAS', 'BUILD_IDAS', 'ON', 'BOOL', - 'Build the IDAS library') - - add_arg(group, '--kinsol', 'SUNDIALS_KINSOL', 'BUILD_KINSOL', 'ON', 'BOOL', - 'Build the KINSOL library') + group = parser.add_argument_group( + "SUNDIALS Packages", + """Options to specify which SUNDIALS + packages should be built.""", + ) + add_arg( + group, + "--arkode", + "SUNDIALS_ARKODE", + "BUILD_ARKODE", + "ON", + "BOOL", + "Build the ARKODE library", + ) + + add_arg( + group, + "--cvode", + "SUNDIALS_CVODE", + "BUILD_CVODE", + "ON", + "BOOL", + "Build the CVODE library", + ) + + add_arg( + group, + "--cvodes", + "SUNDIALS_CVODES", + "BUILD_CVODES", + "ON", + "BOOL", + "Build the CVODES library", + ) + + add_arg( + group, + "--ida", + "SUNDIALS_IDA", + "BUILD_IDA", + "ON", + "BOOL", + "Build the IDA library", + ) + + add_arg( + group, + "--idas", + "SUNDIALS_IDAS", + "BUILD_IDAS", + "ON", + "BOOL", + "Build the IDAS library", + ) + + add_arg( + group, + "--kinsol", + "SUNDIALS_KINSOL", + "BUILD_KINSOL", + "ON", + "BOOL", + "Build the KINSOL library", + ) # ----------------- # Packages Options # ----------------- - group = parser.add_argument_group('SUNDIALS Package Options', - '''Options for configuring SUNDIALS types + group = parser.add_argument_group( + "SUNDIALS Package Options", + """Options for configuring SUNDIALS types and enabling special compile time - features.''') + features.""", + ) # index size - add_arg(group, '--indexsize', 'SUNDIALS_INDEX_SIZE', 'SUNDIALS_INDEX_SIZE', - '64', 'STRING', 'index size', choices=['32', '64']) + add_arg( + group, + "--indexsize", + "SUNDIALS_INDEX_SIZE", + "SUNDIALS_INDEX_SIZE", + "64", + "STRING", + "index size", + choices=["32", "64"], + ) # precision - add_arg(group, '--precision', 'SUNDIALS_PRECISION', 'SUNDIALS_PRECISION', - 'double', 'STRING', 'real type precision', - choices=['single', 'double', 'extended']) + add_arg( + group, + "--precision", + "SUNDIALS_PRECISION", + "SUNDIALS_PRECISION", + "double", + "STRING", + "real type precision", + choices=["single", "double", "extended"], + ) # monitoring - add_arg(group, '--monitoring', 'SUNDIALS_MONITORING', - 'SUNDIALS_BUILD_WITH_MONITORING', 'OFF', 'BOOL', - 'integrator and solver monitoring') + add_arg( + group, + "--monitoring", + "SUNDIALS_MONITORING", + "SUNDIALS_BUILD_WITH_MONITORING", + "OFF", + "BOOL", + "integrator and solver monitoring", + ) # profiling - add_arg(group, '--profiling', 'SUNDIALS_PROFILING', - 'SUNDIALS_BUILD_WITH_PROFILING', 'OFF', 'BOOL', - 'fine-grained profiling') - - add_arg(group, '--logging-level', 'SUNDIALS_LOGGING_LEVEL', - 'SUNDIALS_LOGGING_LEVEL', '0', 'STRING', - 'logging', choices=['0', '1', '2', '3', '4', '5']) + add_arg( + group, + "--profiling", + "SUNDIALS_PROFILING", + "SUNDIALS_BUILD_WITH_PROFILING", + "OFF", + "BOOL", + "fine-grained profiling", + ) + + add_arg( + group, + "--logging-level", + "SUNDIALS_LOGGING_LEVEL", + "SUNDIALS_LOGGING_LEVEL", + "0", + "STRING", + "logging", + choices=["0", "1", "2", "3", "4", "5"], + ) # fused kernels - add_arg(group, '--fused-kernels', 'SUNDIALS_FUSED_KERNELS', - 'SUNDIALS_BUILD_PACKAGE_FUSED_KERNELS', 'OFF', 'BOOL', - 'package fused kernels') + add_arg( + group, + "--fused-kernels", + "SUNDIALS_FUSED_KERNELS", + "SUNDIALS_BUILD_PACKAGE_FUSED_KERNELS", + "OFF", + "BOOL", + "package fused kernels", + ) # error checks - add_arg(group, '--enable-error-checks', 'SUNDIALS_ENABLE_ERROR_CHECKS', - 'SUNDIALS_ENABLE_ERROR_CHECKS', 'OFF', 'BOOL', - 'enable error checks') - + add_arg( + group, + "--enable-error-checks", + "SUNDIALS_ENABLE_ERROR_CHECKS", + "SUNDIALS_ENABLE_ERROR_CHECKS", + "OFF", + "BOOL", + "enable error checks", + ) # ----------- # Interfaces # ----------- - group = parser.add_argument_group('SUNDIALS Interfaces', - '''These options enable or disable the - SUNDIALS Fortran interfaces.''') + group = parser.add_argument_group( + "SUNDIALS Interfaces", + """These options enable or disable the + SUNDIALS Fortran interfaces.""", + ) # Fortran interfaces - add_arg(group, '--fmod-interface', 'SUNDIALS_FMOD_INTERFACE', - 'BUILD_FORTRAN_MODULE_INTERFACE', 'OFF', 'BOOL', - 'Fortran module interface') + add_arg( + group, + "--fmod-interface", + "SUNDIALS_FMOD_INTERFACE", + "BUILD_FORTRAN_MODULE_INTERFACE", + "OFF", + "BOOL", + "Fortran module interface", + ) # --------- # Examples # --------- - group = parser.add_argument_group('Example and Benchmark Programs', - '''These options enable or disable + group = parser.add_argument_group( + "Example and Benchmark Programs", + """These options enable or disable building and installing the SUNDIALS - example and Benchmark programs.''') - - add_arg(group, '--examples-c', 'SUNDIALS_EXAMPLES_C', - 'EXAMPLES_ENABLE_C', 'ON', 'BOOL', 'C examples') - - add_arg(group, '--examples-cxx', 'SUNDIALS_EXAMPLES_CXX', - 'EXAMPLES_ENABLE_CXX', None, 'BOOL', 'C++ examples') - - add_arg(group, '--examples-f03', 'SUNDIALS_EXAMPLES_F03', - 'EXAMPLES_ENABLE_F2003', None, 'BOOL', - 'Fortran 2003 examples') - - add_arg(group, '--examples-cuda', 'SUNDIALS_EXAMPLES_CUDA', - 'EXAMPLES_ENABLE_CUDA', None, 'BOOL', 'CUDA examples') - - add_arg(group, '--benchmarks', 'SUNDIALS_BENCHMARKS', - 'BUILD_BENCHMARKS', 'OFF', 'BOOL', 'Benchmarks') + example and Benchmark programs.""", + ) + + add_arg( + group, + "--examples-c", + "SUNDIALS_EXAMPLES_C", + "EXAMPLES_ENABLE_C", + "ON", + "BOOL", + "C examples", + ) + + add_arg( + group, + "--examples-cxx", + "SUNDIALS_EXAMPLES_CXX", + "EXAMPLES_ENABLE_CXX", + None, + "BOOL", + "C++ examples", + ) + + add_arg( + group, + "--examples-f03", + "SUNDIALS_EXAMPLES_F03", + "EXAMPLES_ENABLE_F2003", + None, + "BOOL", + "Fortran 2003 examples", + ) + + add_arg( + group, + "--examples-cuda", + "SUNDIALS_EXAMPLES_CUDA", + "EXAMPLES_ENABLE_CUDA", + None, + "BOOL", + "CUDA examples", + ) + + add_arg( + group, + "--benchmarks", + "SUNDIALS_BENCHMARKS", + "BUILD_BENCHMARKS", + "OFF", + "BOOL", + "Benchmarks", + ) # ------------ # TPL Options @@ -282,317 +610,745 @@ def main(): # MPI # ---- - group = parser.add_argument_group('MPI Options', - '''Options for enabling MPI support in + group = parser.add_argument_group( + "MPI Options", + """Options for enabling MPI support in SUNDIALS and setting the MPI C, C++, and - Fortran compilers.''') - - add_arg(group, '--mpi', 'SUNDIALS_MPI', 'ENABLE_MPI', 'OFF', - 'FILEPATH', 'SUNDIALS MPI support') - - add_arg(group, '--mpicc', 'MPICC', 'MPI_C_COMPILER', None, - 'FILEPATH', 'MPI C compiler', dependson='--mpi') - - add_arg(group, '--mpicxx', 'MPICXX', 'MPI_CXX_COMPILER', None, - 'FILEPATH', 'MPI C++ compiler', dependson='--mpi') - - add_arg(group, '--mpifort', 'MPIFC', 'MPI_Fortran_COMPILER', None, - 'FILEPATH', 'MPI Fortran compiler', dependson='--mpi') - - add_arg(group, '--mpiexec', 'MPIEXEC', 'MPIEXEC_EXECUTABLE', None, - 'FILEPATH', 'MPI executable', dependson='--mpi') - - add_arg(group, '--mpiexec-pre-flags', 'MPIEXEC_PREFLAGS', 'MPIEXEC_PREFLAGS', None, - 'STRING', 'MPI executable extra flags', dependson='--mpi') + Fortran compilers.""", + ) + + add_arg( + group, + "--mpi", + "SUNDIALS_MPI", + "ENABLE_MPI", + "OFF", + "FILEPATH", + "SUNDIALS MPI support", + ) + + add_arg( + group, + "--mpicc", + "MPICC", + "MPI_C_COMPILER", + None, + "FILEPATH", + "MPI C compiler", + dependson="--mpi", + ) + + add_arg( + group, + "--mpicxx", + "MPICXX", + "MPI_CXX_COMPILER", + None, + "FILEPATH", + "MPI C++ compiler", + dependson="--mpi", + ) + + add_arg( + group, + "--mpifort", + "MPIFC", + "MPI_Fortran_COMPILER", + None, + "FILEPATH", + "MPI Fortran compiler", + dependson="--mpi", + ) + + add_arg( + group, + "--mpiexec", + "MPIEXEC", + "MPIEXEC_EXECUTABLE", + None, + "FILEPATH", + "MPI executable", + dependson="--mpi", + ) + + add_arg( + group, + "--mpiexec-pre-flags", + "MPIEXEC_PREFLAGS", + "MPIEXEC_PREFLAGS", + None, + "STRING", + "MPI executable extra flags", + dependson="--mpi", + ) # ---------- # Threading # ---------- # OpenMP - group = parser.add_argument_group('OpenMP Options', - '''Options for enabling OpenMP support in - SUNDIALS.''') - - add_arg(group, '--openmp', 'SUNDIALS_OPENMP', 'ENABLE_OPENMP', 'OFF', - 'BOOL', 'SUNDIALS OpenMP support') - - add_arg(group, '--openmp-device-works', 'SUNDIALS_OPENMP_DEVICE_WORKS', - 'OPENMP_DEVICE_WORKS', 'OFF', 'BOOL', - 'Disable OpenMP Device Support Checks (assume OpenMP 4.5+)') - + group = parser.add_argument_group( + "OpenMP Options", + """Options for enabling OpenMP support in + SUNDIALS.""", + ) + + add_arg( + group, + "--openmp", + "SUNDIALS_OPENMP", + "ENABLE_OPENMP", + "OFF", + "BOOL", + "SUNDIALS OpenMP support", + ) + + add_arg( + group, + "--openmp-device-works", + "SUNDIALS_OPENMP_DEVICE_WORKS", + "OPENMP_DEVICE_WORKS", + "OFF", + "BOOL", + "Disable OpenMP Device Support Checks (assume OpenMP 4.5+)", + ) # Pthread - group = parser.add_argument_group('Pthread Options', - '''Options for enabling - Pthread support in SUNDIALS.''') - - add_arg(group, '--pthread', 'SUNDIALS_PTHREAD', 'ENABLE_PTHREAD', 'OFF', - 'BOOL', 'SUNDIALS PThread support') + group = parser.add_argument_group( + "Pthread Options", + """Options for enabling + Pthread support in SUNDIALS.""", + ) + + add_arg( + group, + "--pthread", + "SUNDIALS_PTHREAD", + "ENABLE_PTHREAD", + "OFF", + "BOOL", + "SUNDIALS PThread support", + ) # ----- # GPUs # ----- # CUDA - group = parser.add_argument_group('CUDA Options', - '''Options for enabling CUDA support in - - SUNDIALS''') - add_arg(group, '--cuda', 'SUNDIALS_CUDA', 'ENABLE_CUDA', 'OFF', 'BOOL', - 'SUNDIALS CUDA support') + group = parser.add_argument_group( + "CUDA Options", + """Options for enabling CUDA support in + + SUNDIALS""", + ) + add_arg( + group, + "--cuda", + "SUNDIALS_CUDA", + "ENABLE_CUDA", + "OFF", + "BOOL", + "SUNDIALS CUDA support", + ) # HIP - group = parser.add_argument_group('HIP Options', - '''Options for enabling HIP support in - SUNDIALS.''') - - add_arg(group, '--hip', 'SUNDIALS_HIP', 'ENABLE_HIP', 'OFF', 'BOOL', - 'SUNDIALS HIP support') + group = parser.add_argument_group( + "HIP Options", + """Options for enabling HIP support in + SUNDIALS.""", + ) + + add_arg( + group, + "--hip", + "SUNDIALS_HIP", + "ENABLE_HIP", + "OFF", + "BOOL", + "SUNDIALS HIP support", + ) # OpenMP Offload - group = parser.add_argument_group('OpenMP Offload Options', - '''Options for enabling OpenMP offload - support in SUNDIALS.''') - - add_arg(group, '--openmp-offload', 'SUNDIALS_OPENMP_OFFLOAD', - 'ENABLE_OPENMP_DEVICE', 'OFF', 'BOOL', - 'SUNDIALS OpenMP offload support') + group = parser.add_argument_group( + "OpenMP Offload Options", + """Options for enabling OpenMP offload + support in SUNDIALS.""", + ) + + add_arg( + group, + "--openmp-offload", + "SUNDIALS_OPENMP_OFFLOAD", + "ENABLE_OPENMP_DEVICE", + "OFF", + "BOOL", + "SUNDIALS OpenMP offload support", + ) # ------------------------ # Performance portability # ------------------------ # Kokkos - group = parser.add_argument_group('Kokkos Options') - - add_arg(group, '--kokkos', 'SUNDIALS_KOKKOS', 'ENABLE_KOKKOS', 'OFF', - 'BOOL', 'SUNDIALS Kokkos support') - - add_arg(group, '--kokkos-dir', 'KOKKOS_ROOT', 'Kokkos_DIR', None, 'PATH', - 'Kokkos install directory', dependson='--kokkos') + group = parser.add_argument_group("Kokkos Options") + + add_arg( + group, + "--kokkos", + "SUNDIALS_KOKKOS", + "ENABLE_KOKKOS", + "OFF", + "BOOL", + "SUNDIALS Kokkos support", + ) + + add_arg( + group, + "--kokkos-dir", + "KOKKOS_ROOT", + "Kokkos_DIR", + None, + "PATH", + "Kokkos install directory", + dependson="--kokkos", + ) # RAJA - group = parser.add_argument_group('RAJA Options') - - add_arg(group, '--raja', 'SUNDIALS_RAJA', 'ENABLE_RAJA', 'OFF', 'BOOL', - 'SUNDIALS Raja support') - - add_arg(group, '--raja-dir', 'RAJA_ROOT', 'RAJA_DIR', None, 'PATH', - 'RAJA install directory', dependson='--raja') - - add_arg(group, '--raja-backends', 'RAJA_BACKENDS', - 'SUNDIALS_RAJA_BACKENDS', None, 'STRING', 'RAJA backends', - choices=['CUDA', 'HIP'], dependson='--raja') + group = parser.add_argument_group("RAJA Options") + + add_arg( + group, + "--raja", + "SUNDIALS_RAJA", + "ENABLE_RAJA", + "OFF", + "BOOL", + "SUNDIALS Raja support", + ) + + add_arg( + group, + "--raja-dir", + "RAJA_ROOT", + "RAJA_DIR", + None, + "PATH", + "RAJA install directory", + dependson="--raja", + ) + + add_arg( + group, + "--raja-backends", + "RAJA_BACKENDS", + "SUNDIALS_RAJA_BACKENDS", + None, + "STRING", + "RAJA backends", + choices=["CUDA", "HIP"], + dependson="--raja", + ) # SYCL - group = parser.add_argument_group('SYCL Options') - - add_arg(group, '--sycl', 'SUNDIALS_SYCL', 'ENABLE_SYCL', 'OFF', 'BOOL', - 'SUNDIALS SYCL support') + group = parser.add_argument_group("SYCL Options") + + add_arg( + group, + "--sycl", + "SUNDIALS_SYCL", + "ENABLE_SYCL", + "OFF", + "BOOL", + "SUNDIALS SYCL support", + ) # ------------------------ # Linear solver libraries # ------------------------ # Ginkgo - group = parser.add_argument_group('Ginkgo Options') - - add_arg(group, '--ginkgo', 'SUNDIALS_GINKGO', 'ENABLE_GINKGO', 'OFF', - 'BOOL', 'SUNDIALS Ginkgo support') - - add_arg(group, '--ginkgo-dir', 'GINKGO_ROOT', 'Ginkgo_DIR', None, 'PATH', - 'Ginkgo install directory', dependson='--ginkgo') - - add_arg(group, '--ginkgo-backends', 'GINKGO_BACKENDS', - 'SUNDIALS_GINKGO_BACKENDS', 'REF;OMP', 'STRING', 'Ginkgo backends', - choices=['REF', 'OMP', 'CUDA', 'HIP', 'DPCPP'], dependson='--ginkgo') + group = parser.add_argument_group("Ginkgo Options") + + add_arg( + group, + "--ginkgo", + "SUNDIALS_GINKGO", + "ENABLE_GINKGO", + "OFF", + "BOOL", + "SUNDIALS Ginkgo support", + ) + + add_arg( + group, + "--ginkgo-dir", + "GINKGO_ROOT", + "Ginkgo_DIR", + None, + "PATH", + "Ginkgo install directory", + dependson="--ginkgo", + ) + + add_arg( + group, + "--ginkgo-backends", + "GINKGO_BACKENDS", + "SUNDIALS_GINKGO_BACKENDS", + "REF;OMP", + "STRING", + "Ginkgo backends", + choices=["REF", "OMP", "CUDA", "HIP", "DPCPP"], + dependson="--ginkgo", + ) # LAPACK - group = parser.add_argument_group('LAPACK Options') - - add_arg(group, '--lapack', 'SUNDIALS_LAPACK', 'ENABLE_LAPACK', 'OFF', - 'BOOL', 'SUNDIALS LAPACK support') - - add_arg(group, '--lapack-libs', 'LAPACK_LIBRARIES', 'LAPACK_LIBRARIES', - None, 'STRING', 'LAPACK libraries', dependson='--lapack') + group = parser.add_argument_group("LAPACK Options") + + add_arg( + group, + "--lapack", + "SUNDIALS_LAPACK", + "ENABLE_LAPACK", + "OFF", + "BOOL", + "SUNDIALS LAPACK support", + ) + + add_arg( + group, + "--lapack-libs", + "LAPACK_LIBRARIES", + "LAPACK_LIBRARIES", + None, + "STRING", + "LAPACK libraries", + dependson="--lapack", + ) # KLU - group = parser.add_argument_group('KLU Options') - - add_arg(group, '--klu', 'SUNDIALS_KLU', 'ENABLE_KLU', 'OFF', 'BOOL', - 'SUNDIALS KLU support') - - add_arg(group, '--klu-incdir', 'SUITE_SPARSE_INCLUDE_DIR', - 'KLU_INCLUDE_DIR', None, 'PATH', 'KLU include directory', - dependson='--klu') - - add_arg(group, '--klu-libdir', 'SUITE_SPARSE_LIBRARY_DIR', - 'KLU_LIBRARY_DIR', None, 'PATH', 'KLU library directory', - dependson='--klu') + group = parser.add_argument_group("KLU Options") + + add_arg( + group, + "--klu", + "SUNDIALS_KLU", + "ENABLE_KLU", + "OFF", + "BOOL", + "SUNDIALS KLU support", + ) + + add_arg( + group, + "--klu-incdir", + "SUITE_SPARSE_INCLUDE_DIR", + "KLU_INCLUDE_DIR", + None, + "PATH", + "KLU include directory", + dependson="--klu", + ) + + add_arg( + group, + "--klu-libdir", + "SUITE_SPARSE_LIBRARY_DIR", + "KLU_LIBRARY_DIR", + None, + "PATH", + "KLU library directory", + dependson="--klu", + ) # KokkosKernels - group = parser.add_argument_group('KokkosKernels Options') - - add_arg(group, '--kokkos-kernels', 'SUNDIALS_KOKKOS_KERNELS', - 'ENABLE_KOKKOS_KERNELS', 'OFF', 'BOOL', - 'SUNDIALS Kokkos-Kernels support') - - add_arg(group, '--kokkos-kernels-dir', 'KOKKOS_KERNELS_ROOT', - 'KokkosKernels_DIR', None, 'PATH', - 'Kokkos-Kernels install directory', dependson='--kokkos-kernels') + group = parser.add_argument_group("KokkosKernels Options") + + add_arg( + group, + "--kokkos-kernels", + "SUNDIALS_KOKKOS_KERNELS", + "ENABLE_KOKKOS_KERNELS", + "OFF", + "BOOL", + "SUNDIALS Kokkos-Kernels support", + ) + + add_arg( + group, + "--kokkos-kernels-dir", + "KOKKOS_KERNELS_ROOT", + "KokkosKernels_DIR", + None, + "PATH", + "Kokkos-Kernels install directory", + dependson="--kokkos-kernels", + ) # SuperLU MT - group = parser.add_argument_group('SuperLU_MT Options') - - add_arg(group, '--superlu-mt', 'SUNDIALS_SUPERLU_MT', 'ENABLE_SUPERLUMT', - 'OFF', 'BOOL', 'SUNDIALS SuperLU MT support') - - add_arg(group, '--superlu-mt-incdir', 'SUPERLU_MT_INCLUDE_DIR', - 'SUPERLUMT_INCLUDE_DIR', None, 'PATH', - 'SuperLU_MT include directory', dependson='--superlu-mt') - - add_arg(group, '--superlu-mt-libdir', 'SUPERLU_MT_LIBRARY_DIR', - 'SUPERLUMT_LIBRARY_DIR', None, 'PATH', - 'SuperLU_MT library directory', dependson='--superlu-mt') - - add_arg(group, '--superlu-mt-libs', 'SUPERLU_MT_LIBRARIES', - 'SUPERLUMT_LIBRARIES', None, 'STRING', - 'SuperLU_MT additional libraries', dependson='--superlu-mt') - - add_arg(group, '--superlu-mt-thread-type', 'SUPERLU_MT_THREAD_TYPE', - 'SUPERLUMT_THREAD_TYPE', None, 'STRING', - 'SuperLU_MT thread type', choices=['OpenMP', 'Pthread'], - dependson='--superlu-mt') + group = parser.add_argument_group("SuperLU_MT Options") + + add_arg( + group, + "--superlu-mt", + "SUNDIALS_SUPERLU_MT", + "ENABLE_SUPERLUMT", + "OFF", + "BOOL", + "SUNDIALS SuperLU MT support", + ) + + add_arg( + group, + "--superlu-mt-incdir", + "SUPERLU_MT_INCLUDE_DIR", + "SUPERLUMT_INCLUDE_DIR", + None, + "PATH", + "SuperLU_MT include directory", + dependson="--superlu-mt", + ) + + add_arg( + group, + "--superlu-mt-libdir", + "SUPERLU_MT_LIBRARY_DIR", + "SUPERLUMT_LIBRARY_DIR", + None, + "PATH", + "SuperLU_MT library directory", + dependson="--superlu-mt", + ) + + add_arg( + group, + "--superlu-mt-libs", + "SUPERLU_MT_LIBRARIES", + "SUPERLUMT_LIBRARIES", + None, + "STRING", + "SuperLU_MT additional libraries", + dependson="--superlu-mt", + ) + + add_arg( + group, + "--superlu-mt-thread-type", + "SUPERLU_MT_THREAD_TYPE", + "SUPERLUMT_THREAD_TYPE", + None, + "STRING", + "SuperLU_MT thread type", + choices=["OpenMP", "Pthread"], + dependson="--superlu-mt", + ) # SuperLU DIST - group = parser.add_argument_group('SuperLU_DIST Options') - - add_arg(group, '--superlu-dist', 'SUNDIALS_SUPERLU_DIST', - 'ENABLE_SUPERLUDIST', 'OFF', 'BOOL', - 'SUNDIALS SuperLU DIST support') - - add_arg(group, '--superlu-dist-dir', 'SUPERLU_DIST_ROOT', - 'SUPERLUDIST_DIR', None, 'PATH', - 'SuperLU_DIST installation directory', dependson='--superlu-dist') - - add_arg(group, '--superlu-dist-incdir', 'SUPERLU_DIST_INCLUDE_DIR', - 'SUPERLUDIST_INCLUDE_DIR', None, 'PATH', - 'SuperLU_DIST include directory', dependson='--superlu-dist') - - add_arg(group, '--superlu-dist-libdir', 'SUPERLU_DIST_LIBRARY_DIR', - 'SUPERLUDIST_LIBRARY_DIR', None, 'PATH', - 'SuperLU_DIST library directory', dependson='--superlu-dist') - - add_arg(group, '--superlu-dist-libs', 'SUPERLU_DIST_LIBRARIES', - 'SUPERLUDIST_LIBRARIES', None, 'STRING', - 'SuperLU_DIST additional libraries', dependson='--superlu-dist') - - add_arg(group, '--superlu-dist-openmp', 'SUPERLU_DIST_OPENMP', - 'SUPERLUDIST_OpenMP', 'OFF', 'BOOL', 'SuperLU_DIST OpenMP enabled', - dependson='--superlu-dist') + group = parser.add_argument_group("SuperLU_DIST Options") + + add_arg( + group, + "--superlu-dist", + "SUNDIALS_SUPERLU_DIST", + "ENABLE_SUPERLUDIST", + "OFF", + "BOOL", + "SUNDIALS SuperLU DIST support", + ) + + add_arg( + group, + "--superlu-dist-dir", + "SUPERLU_DIST_ROOT", + "SUPERLUDIST_DIR", + None, + "PATH", + "SuperLU_DIST installation directory", + dependson="--superlu-dist", + ) + + add_arg( + group, + "--superlu-dist-incdir", + "SUPERLU_DIST_INCLUDE_DIR", + "SUPERLUDIST_INCLUDE_DIR", + None, + "PATH", + "SuperLU_DIST include directory", + dependson="--superlu-dist", + ) + + add_arg( + group, + "--superlu-dist-libdir", + "SUPERLU_DIST_LIBRARY_DIR", + "SUPERLUDIST_LIBRARY_DIR", + None, + "PATH", + "SuperLU_DIST library directory", + dependson="--superlu-dist", + ) + + add_arg( + group, + "--superlu-dist-libs", + "SUPERLU_DIST_LIBRARIES", + "SUPERLUDIST_LIBRARIES", + None, + "STRING", + "SuperLU_DIST additional libraries", + dependson="--superlu-dist", + ) + + add_arg( + group, + "--superlu-dist-openmp", + "SUPERLU_DIST_OPENMP", + "SUPERLUDIST_OpenMP", + "OFF", + "BOOL", + "SuperLU_DIST OpenMP enabled", + dependson="--superlu-dist", + ) # Magma - group = parser.add_argument_group('MAGMA Options') - - add_arg(group, '--magma', 'SUNDIALS_MAGMA', 'ENABLE_MAGMA', 'OFF', 'BOOL', - 'SUNDIALS MAGMA support') - - add_arg(group, '--magma-dir', 'MAGMA_ROOT', 'MAGMA_DIR', None, 'PATH', - 'MAGMA install directory', dependson='--magma') - - add_arg(group, '--magma-backends', 'MAGAMA_BACKENDS', - 'SUNDIALS_MAGMA_BACKENDS', None, 'STRING', 'MAGMA backends', - choices=['CUDA', 'HIP'], dependson='--magma') + group = parser.add_argument_group("MAGMA Options") + + add_arg( + group, + "--magma", + "SUNDIALS_MAGMA", + "ENABLE_MAGMA", + "OFF", + "BOOL", + "SUNDIALS MAGMA support", + ) + + add_arg( + group, + "--magma-dir", + "MAGMA_ROOT", + "MAGMA_DIR", + None, + "PATH", + "MAGMA install directory", + dependson="--magma", + ) + + add_arg( + group, + "--magma-backends", + "MAGAMA_BACKENDS", + "SUNDIALS_MAGMA_BACKENDS", + None, + "STRING", + "MAGMA backends", + choices=["CUDA", "HIP"], + dependson="--magma", + ) # ---------------- # Other libraries # ---------------- # hypre - group = parser.add_argument_group('hypre Options') - - add_arg(group, '--hypre', 'SUNDIALS_HYPRE', 'ENABLE_HYPRE', 'OFF', 'BOOL', - 'SUNDIALS hypre support') - - add_arg(group, '--hypre-incdir', 'HYPRE_INCLUDE_DIR', - 'HYPRE_INCLUDE_DIR', None, 'PATH', - 'Hypre include directory', dependson='--hypre') - - add_arg(group, '--hypre-libdir', 'HYPRE_LIBRARY_DIR', - 'HYPRE_LIBRARY_DIR', None, 'PATH', - 'Hypre library directory', dependson='--hypre') + group = parser.add_argument_group("hypre Options") + + add_arg( + group, + "--hypre", + "SUNDIALS_HYPRE", + "ENABLE_HYPRE", + "OFF", + "BOOL", + "SUNDIALS hypre support", + ) + + add_arg( + group, + "--hypre-incdir", + "HYPRE_INCLUDE_DIR", + "HYPRE_INCLUDE_DIR", + None, + "PATH", + "Hypre include directory", + dependson="--hypre", + ) + + add_arg( + group, + "--hypre-libdir", + "HYPRE_LIBRARY_DIR", + "HYPRE_LIBRARY_DIR", + None, + "PATH", + "Hypre library directory", + dependson="--hypre", + ) # PETSc - group = parser.add_argument_group('PTESc Options') - - add_arg(group, '--petsc', 'SUNDIALS_PETSC', 'ENABLE_PETSC', 'OFF', 'BOOL', - 'SUNDIALS PETSc support') - - add_arg(group, '--petsc-dir', 'PETSC_ROOT', 'PETSC_DIR', None, 'PATH', - 'PETSc install directory', dependson='--petsc') + group = parser.add_argument_group("PTESc Options") + + add_arg( + group, + "--petsc", + "SUNDIALS_PETSC", + "ENABLE_PETSC", + "OFF", + "BOOL", + "SUNDIALS PETSc support", + ) + + add_arg( + group, + "--petsc-dir", + "PETSC_ROOT", + "PETSC_DIR", + None, + "PATH", + "PETSc install directory", + dependson="--petsc", + ) # Trilinos - group = parser.add_argument_group('Trilinos Options') - - add_arg(group, '--trilinos', 'SUNDIALS_TRILINOS', 'ENABLE_TRILINOS', 'OFF', - 'BOOL', 'SUNDIALS Trilinos support') - - add_arg(group, '--trilinos-dir', 'TRILINOS_ROOT', 'Trilinos_DIR', None, - 'PATH', 'Trilinos install directory', dependson='--trilinos') + group = parser.add_argument_group("Trilinos Options") + + add_arg( + group, + "--trilinos", + "SUNDIALS_TRILINOS", + "ENABLE_TRILINOS", + "OFF", + "BOOL", + "SUNDIALS Trilinos support", + ) + + add_arg( + group, + "--trilinos-dir", + "TRILINOS_ROOT", + "Trilinos_DIR", + None, + "PATH", + "Trilinos install directory", + dependson="--trilinos", + ) # XBraid - group = parser.add_argument_group('XBraid Options') - - add_arg(group, '--xbraid', 'SUNDIALS_XBRAID', 'ENABLE_XBRAID', 'OFF', - 'BOOL', 'SUNDIALS XBraid support') - - add_arg(group, '--xbraid-dir', 'XBRAID_ROOT', 'XBRAID_DIR', None, 'PATH', - 'XBraid install directory', dependson='--xbraid') + group = parser.add_argument_group("XBraid Options") + + add_arg( + group, + "--xbraid", + "SUNDIALS_XBRAID", + "ENABLE_XBRAID", + "OFF", + "BOOL", + "SUNDIALS XBraid support", + ) + + add_arg( + group, + "--xbraid-dir", + "XBRAID_ROOT", + "XBRAID_DIR", + None, + "PATH", + "XBraid install directory", + dependson="--xbraid", + ) # -------- # Testing # -------- - group = parser.add_argument_group('Testing Options') + group = parser.add_argument_group("Testing Options") # development tests - add_arg(group, '--dev-tests', 'SUNDIALS_TEST_DEVTESTS', - 'SUNDIALS_TEST_DEVTESTS', 'OFF', 'BOOL', - 'SUNDIALS development tests') + add_arg( + group, + "--dev-tests", + "SUNDIALS_TEST_DEVTESTS", + "SUNDIALS_TEST_DEVTESTS", + "OFF", + "BOOL", + "SUNDIALS development tests", + ) # unit tests - add_arg(group, '--unit-tests', 'SUNDIALS_TEST_UNITTESTS', - 'SUNDIALS_TEST_UNITTESTS', 'OFF', 'BOOL', - 'SUNDIALS unit tests') - - add_arg(group, '--no-gtest', 'SUNDIALS_TEST_ENABLE_GTEST', - 'SUNDIALS_TEST_ENABLE_GTEST', 'ON', 'BOOL', - 'SUNDIALS GTest unit tests') + add_arg( + group, + "--unit-tests", + "SUNDIALS_TEST_UNITTESTS", + "SUNDIALS_TEST_UNITTESTS", + "OFF", + "BOOL", + "SUNDIALS unit tests", + ) + + add_arg( + group, + "--no-gtest", + "SUNDIALS_TEST_ENABLE_GTEST", + "SUNDIALS_TEST_ENABLE_GTEST", + "ON", + "BOOL", + "SUNDIALS GTest unit tests", + ) # test output directory - add_arg(group, '--test-output-dir', 'SUNDIALS_TEST_OUTPUT_DIR', - 'SUNDIALS_TEST_OUTPUT_DIR', None, 'PATH', - 'SUNDIALS test output directory') + add_arg( + group, + "--test-output-dir", + "SUNDIALS_TEST_OUTPUT_DIR", + "SUNDIALS_TEST_OUTPUT_DIR", + None, + "PATH", + "SUNDIALS test output directory", + ) # test answer directory - add_arg(group, '--test-answer-dir', 'SUNDIALS_TEST_ANSWER_DIR', - 'SUNDIALS_TEST_ANSWER_DIR', None, 'PATH', - 'SUNDIALS test answer directory') + add_arg( + group, + "--test-answer-dir", + "SUNDIALS_TEST_ANSWER_DIR", + "SUNDIALS_TEST_ANSWER_DIR", + None, + "PATH", + "SUNDIALS test answer directory", + ) # test float comparison precision - add_arg(group, '--test-float-precision', 'SUNDIALS_TEST_FLOAT_PRECISION', - 'SUNDIALS_TEST_FLOAT_PRECISION', None, 'STRING', - 'SUNDIALS test float comparison precision') + add_arg( + group, + "--test-float-precision", + "SUNDIALS_TEST_FLOAT_PRECISION", + "SUNDIALS_TEST_FLOAT_PRECISION", + None, + "STRING", + "SUNDIALS test float comparison precision", + ) # test integer comparison precision - add_arg(group, '--test-integer-precision', - 'SUNDIALS_TEST_INTEGER_PRECISION', - 'SUNDIALS_TEST_INTEGER_PRECISION', None, 'STRING', - 'SUNDIALS test integer comparison precision') - - add_arg(group, '--make-verbose', 'CMAKE_VERBOSE_MAKEFILE', - 'CMAKE_VERBOSE_MAKEFILE', 'OFF', 'BOOL', 'verbose make output') + add_arg( + group, + "--test-integer-precision", + "SUNDIALS_TEST_INTEGER_PRECISION", + "SUNDIALS_TEST_INTEGER_PRECISION", + None, + "STRING", + "SUNDIALS test integer comparison precision", + ) + + add_arg( + group, + "--make-verbose", + "CMAKE_VERBOSE_MAKEFILE", + "CMAKE_VERBOSE_MAKEFILE", + "OFF", + "BOOL", + "verbose make output", + ) # --------------------- # Parse and check args @@ -640,20 +1396,20 @@ def read_env(args): continue # don't overwite options already set at command line - value = args_dict[a]['value'] - default = args_dict[a]['default'] + value = args_dict[a]["value"] + default = args_dict[a]["default"] if value != default: continue # check for environment variable and set value - env_var = args_dict[a]['env_var'] + env_var = args_dict[a]["env_var"] if env_var is None: continue if env_var in os.environ: - args_dict[a]['value'] = os.environ[env_var] + args_dict[a]["value"] = os.environ[env_var] # ----------------------------------------------------------------------------- @@ -661,31 +1417,49 @@ def read_env(args): # ----------------------------------------------------------------------------- -def add_arg(parser, arg, env_var, cmake_var, cmake_default, cmake_type, msg, - choices=None, dependson=None): +def add_arg( + parser, + arg, + env_var, + cmake_var, + cmake_default, + cmake_type, + msg, + choices=None, + dependson=None, +): """Add a command SUNDIALS option command line arg""" # Use underscores in the arg variable name - arg_dest = arg[2:].replace('-', '_') + arg_dest = arg[2:].replace("-", "_") help_msg = msg # Define function to create an argparse SUNDIALS option type - arg_type = cmake_arg(env_var, cmake_var, cmake_default, cmake_type, msg, - choices=choices, dependson=dependson) + arg_type = cmake_arg( + env_var, + cmake_var, + cmake_default, + cmake_type, + msg, + choices=choices, + dependson=dependson, + ) # Replace 'None' with a default string to ensure a dictionary is created # even when a command line input is not provided. This is ensures the # dictionary exists when reading variables from the environment. if cmake_default is None: - cmake_default = '__default_none__' + cmake_default = "__default_none__" # Create command line arg - parser.add_argument(arg, dest=arg_dest, type=arg_type, - default=cmake_default, help=help_msg) + parser.add_argument( + arg, dest=arg_dest, type=arg_type, default=cmake_default, help=help_msg + ) -def cmake_arg(env_var, cmake_var, cmake_default, cmake_type, msg, - choices=None, dependson=None): +def cmake_arg( + env_var, cmake_var, cmake_default, cmake_type, msg, choices=None, dependson=None +): """Function factory for argparse SUNDIALS option type""" def _cmake_arg(str_var): @@ -694,19 +1468,19 @@ def _cmake_arg(str_var): import argparse # check if using None for the default value - if str_var == '__default_none__': + if str_var == "__default_none__": str_var = None # check for valid input options - if cmake_type == 'BOOL' and str_var not in ['ON', 'OFF', None]: - err_msg = 'Invalid option value ' + str_var + '. ' - err_msg += 'Input value must be ON or OFF.' + if cmake_type == "BOOL" and str_var not in ["ON", "OFF", None]: + err_msg = "Invalid option value " + str_var + ". " + err_msg += "Input value must be ON or OFF." raise argparse.ArgumentTypeError("Invaid Value for BOOL") if choices is not None and str_var is not None: raise_error = False if ";" in str_var: - for s in str_var.split(';'): + for s in str_var.split(";"): if s not in choices: raise_error = True else: @@ -714,24 +1488,24 @@ def _cmake_arg(str_var): raise_error = True if raise_error: - err_msg = 'Invalid option value ' + str_var + '. ' - err_msg += 'Input value must be ' + err_msg = "Invalid option value " + str_var + ". " + err_msg += "Input value must be " if len(choices) < 3: - err_msg += ' or '.join(choices) + '.' + err_msg += " or ".join(choices) + "." else: - err_msg += ', '.join(choices[:-1]) - err_msg += ', or ' + choices[-1] + '.' + err_msg += ", ".join(choices[:-1]) + err_msg += ", or " + choices[-1] + "." raise argparse.ArgumentTypeError(err_msg) # create dictionary for SUNDIALS option cmake_dict = {} - cmake_dict['env_var'] = env_var - cmake_dict['cmake_var'] = cmake_var - cmake_dict['default'] = cmake_default - cmake_dict['cmake_type'] = cmake_type - cmake_dict['msg'] = msg - cmake_dict['value'] = str_var - cmake_dict['depends_on'] = dependson + cmake_dict["env_var"] = env_var + cmake_dict["cmake_var"] = cmake_var + cmake_dict["default"] = cmake_default + cmake_dict["cmake_type"] = cmake_type + cmake_dict["msg"] = msg + cmake_dict["value"] = str_var + cmake_dict["depends_on"] = dependson return cmake_dict @@ -761,35 +1535,34 @@ def write_cmake(fn, args): # print(a, args_dict[a]) # don't wite output lines if using the default value - value = args_dict[a]['value'] - default = args_dict[a]['default'] + value = args_dict[a]["value"] + default = args_dict[a]["default"] if value is None or value == default: continue # don't wite output if TPL is not enabled - depends_on = args_dict[a]['depends_on'] + depends_on = args_dict[a]["depends_on"] if depends_on is not None: - depends_on = depends_on[2:].replace('-', '_') - depends_on_val = args_dict[depends_on]['value'] + depends_on = depends_on[2:].replace("-", "_") + depends_on_val = args_dict[depends_on]["value"] # print(depends_on, depends_on_val) - if depends_on_val != 'ON': + if depends_on_val != "ON": continue # write CMake output - cmake_var = args_dict[a]['cmake_var'] - cmake_type = args_dict[a]['cmake_type'] - cmake_msg = args_dict[a]['msg'] + cmake_var = args_dict[a]["cmake_var"] + cmake_type = args_dict[a]["cmake_type"] + cmake_msg = args_dict[a]["msg"] - if args.filetype == 'cache': - cmd = (f"set({cmake_var} \"{value}\" CACHE {cmake_type} " - f"\"{cmake_msg}\")\n") + if args.filetype == "cache": + cmd = f'set({cmake_var} "{value}" CACHE {cmake_type} ' f'"{cmake_msg}")\n' else: - cmd = f" \\\n -D {cmake_var}=\"{value}\"" + cmd = f' \\\n -D {cmake_var}="{value}"' fn.write(cmd) @@ -799,13 +1572,15 @@ def setup_file(cmakefile, filename, filetype): import os import stat - if filetype == 'cache': - msg = (f'# CMake cache file for configuring SUNDIALS\n' - f'#\n' - f'# Move this file to your build directory and configure ' - f'SUNDIALS with the\n' - f'# following command:\n' - f'# cmake -C {filename}\n') + if filetype == "cache": + msg = ( + f"# CMake cache file for configuring SUNDIALS\n" + f"#\n" + f"# Move this file to your build directory and configure " + f"SUNDIALS with the\n" + f"# following command:\n" + f"# cmake -C {filename}\n" + ) cmakefile.write(msg) # update permissions to make sure the file is not executable @@ -817,18 +1592,20 @@ def setup_file(cmakefile, filename, filetype): st = os.stat(filename) os.chmod(filename, st.st_mode & NO_EXE) else: - msg = (f'#!/bin/bash\n' - f'# Script for configuring SUNDIALS\n' - f'#\n' - f'# Move this file to your build directory and configure ' - f'SUNDIALS with the\n' - f'# following command:\n' - f'# ./{filename} \n' - f'if [ "$#" -lt 1 ]; then\n' - f' echo "ERROR: Path to SUNDIALS source required"\n' - f' exit 1\n' - f'fi\n' - f'cmake $1') + msg = ( + f"#!/bin/bash\n" + f"# Script for configuring SUNDIALS\n" + f"#\n" + f"# Move this file to your build directory and configure " + f"SUNDIALS with the\n" + f"# following command:\n" + f"# ./{filename} \n" + f'if [ "$#" -lt 1 ]; then\n' + f' echo "ERROR: Path to SUNDIALS source required"\n' + f" exit 1\n" + f"fi\n" + f"cmake $1" + ) cmakefile.write(msg) # update permissions to make sure the user can execute the script @@ -858,6 +1635,7 @@ def print_args(args): # ----------------------------------------------------------------------------- -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main()) diff --git a/test/notify.py b/test/notify.py index befdd721ab..ee19008d9a 100755 --- a/test/notify.py +++ b/test/notify.py @@ -15,24 +15,29 @@ # Send email notification if a SUNDIALS regression test status # ----------------------------------------------------------------------------- + def main(): import argparse import os parser = argparse.ArgumentParser( - description='Send email notification based on regression test status', - formatter_class=argparse.RawTextHelpFormatter) + description="Send email notification based on regression test status", + formatter_class=argparse.RawTextHelpFormatter, + ) - parser.add_argument('teststatus', type=str, - choices=['passed', 'failed', 'fixed'], - help='Status of regression test') + parser.add_argument( + "teststatus", + type=str, + choices=["passed", "failed", "fixed"], + help="Status of regression test", + ) - parser.add_argument('testname', type=str, - help='Name branch name or pull-request tested') + parser.add_argument( + "testname", type=str, help="Name branch name or pull-request tested" + ) - parser.add_argument('testurl', type=str, - help='URL for viewing test results') + parser.add_argument("testurl", type=str, help="URL for viewing test results") # parse command line args args = parser.parse_args() @@ -41,7 +46,7 @@ def main(): logfile = "suntest.log" # if log file exists add url, otherwise create log file - if (os.path.isfile(logfile)): + if os.path.isfile(logfile): with open(logfile, "a") as log: log.write("View test output at:\n") log.write(args.testurl) @@ -53,7 +58,7 @@ def main(): log.write(args.testurl) # determine notification recipient - special_branches = ['main', 'develop', 'release'] + special_branches = ["main", "develop", "release"] if any(branch in args.testname for branch in special_branches): # SUNDIALS developers list @@ -61,23 +66,23 @@ def main(): else: # author of most recent commit cmd = "git log --format='%ae' -1" - recipient = runCommand(cmd).rstrip().decode('UTF-8') + recipient = runCommand(cmd).rstrip().decode("UTF-8") # check if the last commit was a CI merge - if (recipient == 'nobody@nowhere'): + if recipient == "nobody@nowhere": cmd = "git log HEAD~1 --pretty=format:'%ae' -1" - recipient = runCommand(cmd).rstrip().decode('UTF-8') + recipient = runCommand(cmd).rstrip().decode("UTF-8") # send notification if tests fail, log file not found, or fixed - if (args.teststatus == 'failed'): + if args.teststatus == "failed": - subject = "FAILED: SUNDIALS "+args.testname+" failed regression tests" + subject = "FAILED: SUNDIALS " + args.testname + " failed regression tests" print("Tests failed, sending notification to", recipient) sendEmail(recipient, subject, logfile) - elif (args.teststatus == 'fixed'): + elif args.teststatus == "fixed": - subject = "FIXED: SUNDIALS "+args.testname+" passed regression tests" + subject = "FIXED: SUNDIALS " + args.testname + " passed regression tests" print("Tests fixed, sending notification to", recipient) sendEmail(recipient, subject, logfile) @@ -94,7 +99,7 @@ def runCommand(cmd): cmdout = subprocess.check_output(cmd, shell=True) - return(cmdout) + return cmdout # @@ -116,13 +121,13 @@ def sendEmail(recipient, subject, message): sender = "SUNDIALS.suntest@llnl.gov" # email settings - msg['Subject'] = subject - msg['From'] = sender - msg['To'] = recipient + msg["Subject"] = subject + msg["From"] = sender + msg["To"] = recipient # Send the message via our own SMTP server, but don't include the # envelope header. - s = smtplib.SMTP('smtp.llnl.gov') + s = smtplib.SMTP("smtp.llnl.gov") s.send_message(msg) s.quit() @@ -130,5 +135,5 @@ def sendEmail(recipient, subject, message): # # just run the main routine # -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/test/test_install.py b/test/test_install.py index 7b2dbe06e6..4dab1c4513 100755 --- a/test/test_install.py +++ b/test/test_install.py @@ -19,6 +19,7 @@ # main routine # ----------------------------------------------------------------------------- + def main(): import argparse @@ -28,29 +29,29 @@ def main(): import subprocess parser = argparse.ArgumentParser( - description='Find and build installed examples', - formatter_class=argparse.RawTextHelpFormatter) + description="Find and build installed examples", + formatter_class=argparse.RawTextHelpFormatter, + ) - parser.add_argument('directory', type=str, - help='Directory to search for build files') + parser.add_argument( + "directory", type=str, help="Directory to search for build files" + ) - parser.add_argument('--cmake', action='store_true', - help='CMake build') + parser.add_argument("--cmake", action="store_true", help="CMake build") - parser.add_argument('--test', action='store_true', - help='Test builds') + parser.add_argument("--test", action="store_true", help="Test builds") - parser.add_argument('--clean', action='store_true', - help='Clean builds') + parser.add_argument("--clean", action="store_true", help="Clean builds") - parser.add_argument('--regex', type=str, - help='Regular expression for filtering example directories') + parser.add_argument( + "--regex", type=str, help="Regular expression for filtering example directories" + ) - parser.add_argument('-v', '--verbose', action='count', default=0, - help='Verbose output') + parser.add_argument( + "-v", "--verbose", action="count", default=0, help="Verbose output" + ) - parser.add_argument('--failfast', action='store_true', - help='Stop on first failure') + parser.add_argument("--failfast", action="store_true", help="Stop on first failure") # parse command line args args = parser.parse_args() @@ -80,7 +81,7 @@ def main(): # filter files if args.regex: regex = re.compile(args.regex) - buildfiles = [ bf for bf in buildfiles if re.search(regex, bf) ] + buildfiles = [bf for bf in buildfiles if re.search(regex, bf)] if args.verbose > 0: print(f"Total files (filtered): {len(buildfiles)}") if args.verbose > 2: @@ -102,9 +103,12 @@ def main(): # clean and move on if args.clean: - ret = subprocess.call('make clean', shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + ret = subprocess.call( + "make clean", + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) # return to original directory os.chdir(cwd) continue @@ -112,11 +116,14 @@ def main(): # confgure cmake if necessary configfail = False if args.cmake: - if os.path.isfile('Makefile'): - os.remove('Makefile') - ret = subprocess.call('cmake -DCMAKE_VERBOSE_MAKEFILE=ON .', - shell=True, stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + if os.path.isfile("Makefile"): + os.remove("Makefile") + ret = subprocess.call( + "cmake -DCMAKE_VERBOSE_MAKEFILE=ON .", + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) if args.verbose > 0: print(f" Config return: {ret}") if ret != 0: @@ -126,9 +133,9 @@ def main(): # make examples buildfail = False if not configfail: - ret = subprocess.call('make', shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + ret = subprocess.call( + "make", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) if args.verbose > 0: print(f" Build return: {ret}") if ret != 0: @@ -138,9 +145,12 @@ def main(): # test examples testfail = False if not configfail and not buildfail and args.test: - ret = subprocess.call('make test', shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + ret = subprocess.call( + "make test", + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) if args.verbose > 0: print(f" Test return: {ret}") if ret != 0: @@ -167,10 +177,12 @@ def main(): else: print("All builds successful.") + # ----------------------------------------------------------------------------- # run the main routine # ----------------------------------------------------------------------------- -if __name__ == '__main__': +if __name__ == "__main__": import sys + sys.exit(main())