diff --git a/.travis.yml b/.travis.yml index 51a546940..3f4f5de17 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,7 +4,7 @@ language: matrix: include: - os: linux - dist: trusty + dist: xenial compiler: gcc-6 addons: apt: @@ -13,10 +13,13 @@ matrix: - gcc-6 - g++-6 - gfortran-6 - env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' COVERAGE='ON' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' PYTHON_VERSION='3.5' COVERAGE='ON' - os: linux - dist: trusty + dist: xenial compiler: clang-3.6 addons: &clang36 apt: @@ -25,7 +28,10 @@ matrix: - clang-3.6 - g++-7 - gfortran-6 - env: CC='clang-3.6' CXX='clang++-3.6' FC='gfortran-6' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='clang-3.6' CXX='clang++-3.6' FC='gfortran-6' PYTHON_VERSION='3.5' - os: linux dist: xenial @@ -39,10 +45,13 @@ matrix: - clang-4.0 - g++-7 - gfortran-6 - env: CC='clang-4.0' CXX='clang++-4.0' FC='gfortran-6' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='clang-4.0' CXX='clang++-4.0' FC='gfortran-6' PYTHON_VERSION='3.5' - os: linux - dist: trusty + dist: xenial compiler: gcc-4.4 addons: apt: @@ -51,10 +60,13 @@ matrix: - gcc-4.4 - g++-4.4 - gfortran-4.4 - env: CC='gcc-4.4' CXX='g++-4.4' FC='gfortran-4.4' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='gcc-4.4' CXX='g++-4.4' FC='gfortran-4.4' PYTHON_VERSION='3.5' - os: linux - dist: trusty + dist: xenial compiler: gcc-4.7 addons: apt: @@ -63,10 +75,13 @@ matrix: - gcc-4.7 - g++-4.7 - gfortran-4.7 - env: CC='gcc-4.7' CXX='g++-4.7' FC='gfortran-4.7' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='gcc-4.7' CXX='g++-4.7' FC='gfortran-4.7' PYTHON_VERSION='3.5' - os: linux - dist: trusty + dist: xenial compiler: gcc-4.8 addons: apt: @@ -75,10 +90,13 @@ matrix: - gcc-4.8 - g++-4.8 - gfortran-4.8 - env: CC='gcc-4.8' CXX='g++-4.8' FC='gfortran-4.8' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='gcc-4.8' CXX='g++-4.8' FC='gfortran-4.8' PYTHON_VERSION='3.5' - os: linux - dist: trusty + dist: xenial compiler: gcc-4.9 addons: apt: @@ -87,7 +105,10 @@ matrix: - gcc-4.9 - g++-4.9 - gfortran-4.9 - env: CC='gcc-4.9' CXX='g++-4.9' FC='gfortran-4.9' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='gcc-4.9' CXX='g++-4.9' FC='gfortran-4.9' PYTHON_VERSION='3.5' - os: linux dist: trusty @@ -99,7 +120,9 @@ matrix: - gcc-5 - g++-5 - gfortran-5 - env: CC='gcc-5' CXX='g++-5' FC='gfortran-5' + - libpython2.7 + - python-pip + env: CC='gcc-5' CXX='g++-5' FC='gfortran-5' PYTHON_VERSION='2.7' - os: linux dist: trusty @@ -111,10 +134,12 @@ matrix: - gcc-6 - g++-6 - gfortran-6 - env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' C_STANDARD='90' + - libpython2.7 + - python-pip + env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' PYTHON_VERSION='2.7' - os: linux - dist: trusty + dist: xenial compiler: gcc-6 addons: apt: @@ -123,10 +148,13 @@ matrix: - gcc-6 - g++-6 - gfortran-6 - env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' C_STANDARD='11' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' PYTHON_VERSION='3.5' C_STANDARD='90' - os: linux - dist: trusty + dist: xenial compiler: gcc-6 addons: apt: @@ -135,10 +163,13 @@ matrix: - gcc-6 - g++-6 - gfortran-6 - env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' CXX_STANDARD='11' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' PYTHON_VERSION='3.5' C_STANDARD='11' - os: linux - dist: trusty + dist: xenial compiler: gcc-6 addons: apt: @@ -147,10 +178,13 @@ matrix: - gcc-6 - g++-6 - gfortran-6 - env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' CXX_STANDARD='14' + - libpython3.5-dev + - cython3 + - python3-numpy + env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' PYTHON_VERSION='3.5' CXX_STANDARD='11' - os: linux - dist: trusty + dist: xenial compiler: gcc-6 addons: apt: @@ -159,29 +193,110 @@ matrix: - gcc-6 - g++-6 - gfortran-6 - env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' + - libpython3.5 + - cython3 + - python3-numpy + env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' PYTHON_VERSION='3.5' CXX_STANDARD='14' + + - os: linux + dist: xenial + compiler: gcc-6 + addons: + apt: + sources: ubuntu-toolchain-r-test + packages: + - gcc-6 + - g++-6 + - gfortran-6 + - libpython3.5 + - cython3 + - python3-numpy + env: CC='gcc-6' CXX='g++-6' FC='gfortran-6' PYTHON_VERSION='3.5' + + - os: linux + dist: xenial + compiler: gcc-7 + addons: + apt: + sources: ubuntu-toolchain-r-test + packages: + - gcc-7 + - g++-7 + - gfortran-7 + - libpython3.5 + - cython3 + - python3-numpy + env: CC='gcc-7' CXX='g++-7' FC='gfortran-7' PYTHON_VERSION='3.5' - os: osx osx_image: xcode7.3 compiler: gcc - env: CC='gcc' CXX='g++' + env: CC='gcc' CXX='g++' PYTHON_VERSION='3.5' - os: osx osx_image: xcode8.3 compiler: gcc - env: CC='gcc' CXX='g++' + env: CC='gcc' CXX='g++' PYTHON_VERSION='2.7' - os: osx osx_image: xcode7.3 compiler: clang - env: CC='clang' CXX='clang++' + env: CC='clang' CXX='clang++' PYTHON_VERSION='3.5' - os: osx osx_image: xcode8.3 compiler: clang - env: CC='clang' CXX='clang++' + env: CC='clang' CXX='clang++' PYTHON_VERSION='2.7' script: + - if [ "$TRAVIS_OS_NAME" == "osx" ]; then pyenv root; fi + - | + if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$PYTHON_VERSION" = "2.7" ]; then + pyenv install 2.7.12; + export PYTHON_INCLUDE_DIR=$(pyenv root)/versions/2.7.12/include/python2.7; + export PYTHON_LIBRARY=$(pyenv root)/versions/2.7.12/lib/libpython2.7.dylib; + export PYTHON_EXECUTABLE=$(pyenv root)/versions/2.7.12/bin/python2.7; + fi + - | + if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$PYTHON_VERSION" = "3.5" ]; then + pyenv install 3.5.0; + export PYTHON_INCLUDE_DIR=$(pyenv root)/versions/3.5.0/include/python3.5m; + export PYTHON_LIBRARY=$(pyenv root)/versions/3.5.0/lib/libpython3.5m.a; + export PYTHON_EXECUTABLE=$(pyenv root)/versions/3.5.0/bin/python3.5m; + fi + - | + if [ "$TRAVIS_OS_NAME" == "osx" ]; then + $PYTHON_EXECUTABLE -m pip install --upgrade pip; + $PYTHON_EXECUTABLE -m pip install -r ${TRAVIS_BUILD_DIR}/python/requirements.txt; + fi + + - | + if [ "$TRAVIS_OS_NAME" = "linux" ]; then + export PYTHON_EXECUTABLE=/usr/bin/python$PYTHON_VERSION; + source /etc/lsb-release; + fi + - | + if [ "$TRAVIS_OS_NAME" = "linux" ] && [ "$PYTHON_VERSION" = "2.7" ]; then + export PYTHON_INCLUDE_DIR=/usr/include/python2.7; + export PYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython2.7.so; + fi + - | + if [ "$TRAVIS_OS_NAME" = "linux" ] && [ "$PYTHON_VERSION" = "3.5" ]; then + export PYTHON_INCLUDE_DIR=/usr/include/python3.5m; + export PYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.5m.so; + fi + - | + if [ "$TRAVIS_OS_NAME" = "linux" ] && [ "$DISTRIB_CODENAME" = "trusty" ] && [ "$PYTHON_VERSION" = "2.7" ]; then + sudo $PYTHON_EXECUTABLE -m pip install --upgrade pip; + sudo $PYTHON_EXECUTABLE -m pip install -r ${TRAVIS_BUILD_DIR}/python/requirements.txt; + fi + - | + if [ "$TRAVIS_OS_NAME" = "linux" ] && [ "$DISTRIB_CODENAME" = "trusty" ] && [ "$PYTHON_VERSION" = "3.5" ]; then + echo "Python 3.5 not supported on Ubuntu Trusty"; + exit 1; + fi + + - printenv | grep PYTHON - ./travis.sh after_success: diff --git a/CMakeLists.txt b/CMakeLists.txt index 91afca8be..c05c8875b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -206,6 +206,11 @@ endif() #------------------------------------------------------------------------------# # Add source code #------------------------------------------------------------------------------# +include(CTest) +if(BUILD_TESTING) + enable_testing() +endif() + set(ZFP_LIBRARY_PREFIX "" CACHE STRING "Prefix to prepend to the output library name") mark_as_advanced(ZFP_LIBRARY_PREFIX) @@ -227,14 +232,17 @@ if(BUILD_UTILITIES) add_subdirectory(utils) endif() +option(BUILD_PYTHON "Build python bindings for zfp" OFF) +if(BUILD_PYTHON) + add_subdirectory(python) +endif() + option(BUILD_EXAMPLES "Build Examples" OFF) if(BUILD_EXAMPLES) add_subdirectory(examples) endif() -include(CTest) if(BUILD_TESTING) - enable_testing() add_subdirectory(tests) endif() diff --git a/cmake/travis.cmake b/cmake/travis.cmake index 83c005437..6d5ab5f7b 100644 --- a/cmake/travis.cmake +++ b/cmake/travis.cmake @@ -11,6 +11,7 @@ set(cfg_options -DCMAKE_C_STANDARD=${C_STANDARD} -DCMAKE_CXX_STANDARD=${CXX_STANDARD} -DBUILD_CFP=${BUILD_CFP} + -DBUILD_PYTHON=${BUILD_PYTHON} -DBUILD_ZFORP=${BUILD_ZFORP} -DZFP_WITH_OPENMP=${BUILD_OPENMP} -DZFP_WITH_CUDA=${BUILD_CUDA} @@ -37,6 +38,15 @@ if(BUILD_CFP) endif() endif() +if(BUILD_PYTHON) + set(CTEST_SITE "${CTEST_SITE}_python") + list(APPEND cfg_options + -DPYTHON_INCLUDE_DIR=$ENV{PYTHON_INCLUDE_DIR} + -DPYTHON_LIBRARY=$ENV{PYTHON_LIBRARY} + -DPYTHON_EXECUTABLE=$ENV{PYTHON_EXECUTABLE} + ) +endif() + if(BUILD_ZFORP) set(CTEST_SITE "${CTEST_SITE}_zforp") endif() diff --git a/docs/source/index.rst b/docs/source/index.rst index 4f6dd42dd..7343a6b04 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -18,6 +18,7 @@ low-level-api bit-stream arrays + python tutorial zfpcmd examples diff --git a/docs/source/python.rst b/docs/source/python.rst new file mode 100644 index 000000000..0806ff10d --- /dev/null +++ b/docs/source/python.rst @@ -0,0 +1,119 @@ +.. include:: defs.rst +.. _python: + +Python +======================= + +.. py:module:: zfp + +Dependencies +------------ + +Minimum Tested Versions: + +* Python: Python 2.7 & Python 3.5 +* Cython: 0.22 +* Numpy: 1.8.0 + +You can install the necessary dependencies using ``pip`` and the zfp +``requirements.txt``:: + + pip install -r $ZFP_ROOT/python/requirements.txt + +Installation +------------ + +To build the python bindings, add ``-DBUILD_PYTHON=on`` to the cmake line. Cmake +will attempt to automatically detect the python installation to use. If cmake +finds multiple python installations, it will use the newest one. To specify a +specific python installation to use, set ``PYTHON_LIBRARY`` and +``PYTHON_INCLUDE_DIR`` in the cmake line. Putting it all together:: + + cmake -DBUILD_PYTHON=on -DPYTHON_LIBRARY=/path/to/lib/libpython2.7.so -DPYTHON_INCLUDE_DIR=/path/to/include/python2.7 .. + +Compression +----------- + +.. py:function:: compress_numpy(arr, tolerance = -1, rate = -1, precision = -1, write_header=True) + +Compression through the python bindings currently requires a numpy array +populated with the data to be compressed. The numpy metadata (i.e., shape, +strides, and type) are used to automatically populate ``zfp_field`` structure. +By default, all that is required to be passed to the compression function is the +numpy array; this will result in a stream that includes a header and is +compressed with the ``reversible`` mode. For example:: + + import zfp + import numpy as np + + my_array = np.arange(1, 20) + compressed_data = zfp.compress_numpy(my_array) + decompressed_array = zfp.decompress_numpy(compressed_data) + + # confirm lossless compression/decompression + np.testing.assert_array_equal(my_array, decompressed_array) + +Using the fixed-accuracy, fixed-rate, or fixed-precision modes simply requires +setting one of the tolerance, rate, or precision arguments, respectively. For example:: + + compressed_data = zfp.compress_numpy(my_array, tolerance=1e-4) + decompressed_array = zfp.decompress_numpy(compressed_data) + + # Note the change from "equal" to "allclose" due to the lossy compression + np.testing.assert_allclose(my_array, decompressed_array, atol=1e-3) + +Since numpy arrays are C-ordered by default and ``zfp_compress`` expects the +fastest changing stride to the first (i.e., Fortran-ordering), +``compress_numpy`` automatically flips the reverses the stride in order to +optimize the compression ratio for C-ordered numpy arrays. Since the +``decompress_numpy`` function also reverses the stride order, data both +compressed and decompressed with the python bindings should have the same shape +before and after. + +.. note:: ``decompress_numpy`` requires a header to decompress properly, so do + not use ``write_header=False`` if you intend to decompress the stream with + the python bindings. + +Decompression +------------- + +.. py:function:: decompress_numpy(compressed_data) + +``decompress_numpy`` consumes a compressed stream that includes a header and +produces a numpy array with metadata populated based on the contents of the +header. Stride information is not stored in the zfp header, so the +``decompress_numpy`` function assumes that the array was compressed with the +fastest changing dimension first (typically referred to as Fortran-ordering). +The returned numpy array is in C-ordering (the default for numpy arrays), so the +shape of the returned array is reversed from that of the shape in the +compression header. For example, if the header declares the array to be of +shape (2, 4, 8), then the returned numpy array will have a shape of (8, 4, 2). +Since the ``compress_numpy`` function also reverses the stride order, data both +compressed and decompressed with the python bindings should have the same shape +before and after. + +.. note:: Decompressing a stream without a header requires using the + internal ``_decompress`` python function (or the C API). + +.. py:function:: _decompress(compressed_data, ztype, shape, out=None, tolerance = -1, rate = -1, precision = -1,) + +.. warning:: ``_decompress`` is an "experimental" function currently used + internally for testing the . It does allow decompression of + streams without headers, but providing too small of an output + bufffer or incorrectly specifying the shape or strides can result + in segmentation faults. Use with care. + +Decompresses a compressed stream without a header. If a header is present in +the stream, it will be incorrectly interpreted as compressed data. ``ztype`` is +a ``zfp_type``, which can be manually specified (e.g., ``zfp.type_int32``) or +generated from a numpy dtype (e.g., ``zfp.dtype_to_ztype(array.dtype)``). If +``out`` is specified, the data is decompressed into the ``out`` buffer. ``out`` +can be a numpy array or a pointer to memory large enough to hold the +decompressed data. Regardless if ``out`` is provided or its type, +``_decompress`` always returns a numpy array. If ``out`` is not provided, the +array is allocated for the user, and if ``out`` is provided, then the returned +numpy is just a pointer to or wrapper around the user-supplied ``out``. If +``out`` is a numpy array, then the shape and type of the numpy array must match +the required arguments ``shape`` and ``ztype``. If you want to avoid this +constraint check, use ``out=ndarray.data`` rather than ``out=ndarray`` when +calling ``_decompress``. diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt new file mode 100644 index 000000000..429fd8fe7 --- /dev/null +++ b/python/CMakeLists.txt @@ -0,0 +1,37 @@ +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR}/scikit-build-cmake) +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_CURRENT_LIST_DIR}/eyescale-cmake) +include(UseCython) +include(FindPythonExtensions) +include(FindNumPy) + +find_package(PythonInterp REQUIRED) +find_package(PythonLibs REQUIRED) +find_package(PythonExtensions REQUIRED) +find_package(Cython REQUIRED) +find_package(NumPy REQUIRED) + +include_directories(${ZFP_SOURCE_DIR}/include) +include_directories(${PYTHON_NUMPY_INCLUDE_DIR}) + +# cannot reuse the zfp target, use _zfp instead +add_cython_target(_zfp zfp.pyx C) +add_library(_zfp MODULE ${_zfp}) +target_link_libraries(_zfp zfp) +python_extension_module(_zfp) + +# Build to the currrent binary dir to avoid conflicts with other libraries named zfp +set(PYLIB_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/lib" CACHE PATH "Directory where zfp python library will be built") +set_target_properties(_zfp PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PYLIB_BUILD_DIR}) +# Make sure the final library is called zfp (rather than _zfp) +set_target_properties(_zfp PROPERTIES LIBRARY_OUTPUT_NAME zfp) +# Install to the typical python module directory +set(python_install_lib_dir "lib/python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}/site-packages/") +install(TARGETS _zfp LIBRARY DESTINATION ${python_install_lib_dir}) + +include_directories(${ZFP_SOURCE_DIR}/tests/utils) +include_directories(${ZFP_SOURCE_DIR}) +add_cython_target(test_utils test_utils.pyx C) +add_library(test_utils MODULE ${test_utils}) +target_link_libraries(test_utils zfp genSmoothRandNumsLib stridedOperationsLib zfpCompressionParamsLib zfpChecksumsLib zfpHashLib) +python_extension_module(test_utils) +set_target_properties(test_utils PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${PYLIB_BUILD_DIR}) diff --git a/python/eyescale-cmake/FindNumPy.cmake b/python/eyescale-cmake/FindNumPy.cmake new file mode 100644 index 000000000..8aba4e696 --- /dev/null +++ b/python/eyescale-cmake/FindNumPy.cmake @@ -0,0 +1,41 @@ +# Find the Python NumPy package +# PYTHON_NUMPY_INCLUDE_DIR +# PYTHON_NUMPY_FOUND +# will be set by this script + +# cmake_minimum_required(VERSION 2.6) + +if(NOT PYTHON_EXECUTABLE) + if(NumPy_FIND_QUIETLY) + find_package(PythonInterp QUIET) + else() + find_package(PythonInterp) + set(__numpy_out 1) + endif() +endif() + +if (PYTHON_EXECUTABLE) + # Find out the include path + execute_process( + COMMAND "${PYTHON_EXECUTABLE}" -c + "from __future__ import print_function\ntry: import numpy; print(numpy.get_include(), end='')\nexcept:pass\n" + OUTPUT_VARIABLE __numpy_path) + # And the version + execute_process( + COMMAND "${PYTHON_EXECUTABLE}" -c + "from __future__ import print_function\ntry: import numpy; print(numpy.__version__, end='')\nexcept:pass\n" + OUTPUT_VARIABLE __numpy_version) +elseif(__numpy_out) + message(STATUS "Python executable not found.") +endif(PYTHON_EXECUTABLE) + +find_path(PYTHON_NUMPY_INCLUDE_DIR numpy/arrayobject.h + HINTS "${__numpy_path}" "${PYTHON_INCLUDE_PATH}" NO_DEFAULT_PATH) + +if(PYTHON_NUMPY_INCLUDE_DIR) + set(PYTHON_NUMPY_FOUND 1 CACHE INTERNAL "Python numpy found") +endif(PYTHON_NUMPY_INCLUDE_DIR) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(NumPy REQUIRED_VARS PYTHON_NUMPY_INCLUDE_DIR + VERSION_VAR __numpy_version) diff --git a/python/eyescale-cmake/LICENSE.txt b/python/eyescale-cmake/LICENSE.txt new file mode 100644 index 000000000..307d54e59 --- /dev/null +++ b/python/eyescale-cmake/LICENSE.txt @@ -0,0 +1,26 @@ +Unless otherwise noted in the file, all files in this directory are +licensed under the BSD license, reproduced below. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +- Neither the name of Eyescale Software GmbH nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 000000000..7f3612988 --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,3 @@ +cython>=0.22 +numpy>=1.8.0 + diff --git a/python/scikit-build-cmake/FindCython.cmake b/python/scikit-build-cmake/FindCython.cmake new file mode 100644 index 000000000..3d58c4f00 --- /dev/null +++ b/python/scikit-build-cmake/FindCython.cmake @@ -0,0 +1,77 @@ +#.rst: +# +# Find ``cython`` executable. +# +# This module will set the following variables in your project: +# +# ``CYTHON_EXECUTABLE`` +# path to the ``cython`` program +# +# ``CYTHON_VERSION`` +# version of ``cython`` +# +# ``CYTHON_FOUND`` +# true if the program was found +# +# For more information on the Cython project, see http://cython.org/. +# +# *Cython is a language that makes writing C extensions for the Python language +# as easy as Python itself.* +# +#============================================================================= +# Copyright 2011 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Use the Cython executable that lives next to the Python executable +# if it is a local installation. +find_package(PythonInterp) +if(PYTHONINTERP_FOUND) + get_filename_component(_python_path ${PYTHON_EXECUTABLE} PATH) + find_program(CYTHON_EXECUTABLE + NAMES cython cython.bat cython3 + HINTS ${_python_path} + DOC "path to the cython executable") +else() + find_program(CYTHON_EXECUTABLE + NAMES cython cython.bat cython3 + DOC "path to the cython executable") +endif() + +if(CYTHON_EXECUTABLE) + set(CYTHON_version_command ${CYTHON_EXECUTABLE} --version) + + execute_process(COMMAND ${CYTHON_version_command} + OUTPUT_VARIABLE CYTHON_version_output + ERROR_VARIABLE CYTHON_version_error + RESULT_VARIABLE CYTHON_version_result + OUTPUT_STRIP_TRAILING_WHITESPACE) + + if(NOT ${CYTHON_version_result} EQUAL 0) + set(_error_msg "Command \"${CYTHON_version_command}\" failed with") + set(_error_msg "${_error_msg} output:\n${CYTHON_version_error}") + message(SEND_ERROR "${_error_msg}") + else() + if("${CYTHON_version_output}" MATCHES "^[Cc]ython version ([^,]+)") + set(CYTHON_VERSION "${CMAKE_MATCH_1}") + endif() + endif() +endif() + +include(FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS(Cython REQUIRED_VARS CYTHON_EXECUTABLE) + +mark_as_advanced(CYTHON_EXECUTABLE) + +include(UseCython) diff --git a/python/scikit-build-cmake/FindPythonExtensions.cmake b/python/scikit-build-cmake/FindPythonExtensions.cmake new file mode 100644 index 000000000..9a3d76a0c --- /dev/null +++ b/python/scikit-build-cmake/FindPythonExtensions.cmake @@ -0,0 +1,573 @@ +#.rst: +# +# This module defines CMake functions to build Python extension modules and +# stand-alone executables. +# +# The following variables are defined: +# :: +# +# PYTHON_PREFIX - absolute path to the current Python +# distribution's prefix +# PYTHON_SITE_PACKAGES_DIR - absolute path to the current Python +# distribution's site-packages directory +# PYTHON_RELATIVE_SITE_PACKAGES_DIR - path to the current Python +# distribution's site-packages directory +# relative to its prefix +# PYTHON_SEPARATOR - separator string for file path +# components. Equivalent to ``os.sep`` in +# Python. +# PYTHON_PATH_SEPARATOR - separator string for PATH-style +# environment variables. Equivalent to +# ``os.pathsep`` in Python. +# PYTHON_EXTENSION_MODULE_SUFFIX - suffix of the compiled module. For example, on +# Linux, based on environment, it could be ``.cpython-35m-x86_64-linux-gnu.so``. +# +# +# +# The following functions are defined: +# +# .. cmake:command:: python_extension_module +# +# For libraries meant to be used as Python extension modules, either dynamically +# loaded or directly linked. Amend the configuration of the library target +# (created using ``add_library``) with additional options needed to build and +# use the referenced library as a Python extension module. +# +# python_extension_module( +# [LINKED_MODULES_VAR ] +# [FORWARD_DECL_MODULES_VAR ] +# [MODULE_SUFFIX ]) +# +# Only extension modules that are configured to be built as MODULE libraries can +# be runtime-loaded through the standard Python import mechanism. All other +# modules can only be included in standalone applications that are written to +# expect their presence. In addition to being linked against the libraries for +# these modules, such applications must forward declare their entry points and +# initialize them prior to use. To generate these forward declarations and +# initializations, see ``python_modules_header``. +# +# If ```` does not refer to a target, then it is assumed to refer to an +# extension module that is not linked at all, but compiled along with other +# source files directly into an executable. Adding these modules does not cause +# any library configuration modifications, and they are not added to the list of +# linked modules. They still must be forward declared and initialized, however, +# and so are added to the forward declared modules list. +# +# If the associated target is of type ``MODULE_LIBRARY``, the LINK_FLAGS target +# property is used to set symbol visibility and export only the module init function. +# This applies to GNU and MSVC compilers. +# +# Options: +# +# ``LINKED_MODULES_VAR `` +# Name of the variable referencing a list of extension modules whose libraries +# must be linked into the executables of any stand-alone applications that use +# them. By default, the global property ``PY_LINKED_MODULES_LIST`` is used. +# +# ``FORWARD_DECL_MODULES_VAR `` +# Name of the variable referencing a list of extension modules whose entry +# points must be forward declared and called by any stand-alone applications +# that use them. By default, the global property +# ``PY_FORWARD_DECL_MODULES_LIST`` is used. +# +# ``MODULE_SUFFIX `` +# Suffix appended to the python extension module file. +# The default suffix is retrieved using ``sysconfig.get_config_var("SO")"``, +# if not available, the default is then ``.so`` on unix and ``.pyd`` on +# windows. +# Setting the variable ``PYTHON_EXTENSION_MODULE_SUFFIX`` in the caller +# scope defines the value used for all extensions not having a suffix +# explicitly specified using ``MODULE_SUFFIX`` parameter. +# +# +# .. cmake:command:: python_standalone_executable +# +# python_standalone_executable() +# +# For standalone executables that initialize their own Python runtime +# (such as when building source files that include one generated by Cython with +# the --embed option). Amend the configuration of the executable target +# (created using ``add_executable``) with additional options needed to properly +# build the referenced executable. +# +# +# .. cmake:command:: python_modules_header +# +# Generate a header file that contains the forward declarations and +# initialization routines for the given list of Python extension modules. +# ```` is the logical name for the header file (no file extensions). +# ```` is the actual destination filename for the header file +# (e.g.: decl_modules.h). +# +# python_modules_header( [HeaderFilename] +# [FORWARD_DECL_MODULES_LIST ] +# [HEADER_OUTPUT_VAR ] +# [INCLUDE_DIR_OUTPUT_VAR ]) +# +# If only ```` is provided, and it ends in the ".h" extension, then it +# is assumed to be the ````. The filename of the header file +# without the extension is used as the logical name. If only ```` is +# provided, and it does not end in the ".h" extension, then the +# ```` is assumed to ``.h``. +# +# The exact contents of the generated header file depend on the logical +# ````. It should be set to a value that corresponds to the target +# application, or for the case of multiple applications, some identifier that +# conveyes its purpose. It is featured in the generated multiple inclusion +# guard as well as the names of the generated initialization routines. +# +# The generated header file includes forward declarations for all listed +# modules, as well as implementations for the following class of routines: +# +# ``int _(void)`` +# Initializes the python extension module, ````. Returns an integer +# handle to the module. +# +# ``void _LoadAllPythonModules(void)`` +# Initializes all listed python extension modules. +# +# ``void CMakeLoadAllPythonModules(void);`` +# Alias for ``_LoadAllPythonModules`` whose name does not depend on +# ````. This function is excluded during preprocessing if the +# preprocessing macro ``EXCLUDE_LOAD_ALL_FUNCTION`` is defined. +# +# ``void Py_Initialize_Wrapper();`` +# Wrapper arpund ``Py_Initialize()`` that initializes all listed python +# extension modules. This function is excluded during preprocessing if the +# preprocessing macro ``EXCLUDE_PY_INIT_WRAPPER`` is defined. If this +# function is generated, then ``Py_Initialize()`` is redefined to a macro +# that calls this function. +# +# Options: +# +# ``FORWARD_DECL_MODULES_LIST `` +# List of extension modules for which to generate forward declarations of +# their entry points and their initializations. By default, the global +# property ``PY_FORWARD_DECL_MODULES_LIST`` is used. +# +# ``HEADER_OUTPUT_VAR `` +# Name of the variable to set to the path to the generated header file. By +# default, ```` is used. +# +# ``INCLUDE_DIR_OUTPUT_VAR `` +# Name of the variable to set to the path to the directory containing the +# generated header file. By default, ``_INCLUDE_DIRS`` is used. +# +# Defined variables: +# +# ```` +# The path to the generated header file +# +# ```` +# Directory containing the generated header file +# +# +# Example usage +# ^^^^^^^^^^^^^ +# +# .. code-block:: cmake +# +# find_package(PythonInterp) +# find_package(PythonLibs) +# find_package(PythonExtensions) +# find_package(Cython) +# find_package(Boost COMPONENTS python) +# +# # Simple Cython Module -- no executables +# add_cython_target(_module.pyx) +# add_library(_module MODULE ${_module}) +# python_extension_module(_module) +# +# # Mix of Cython-generated code and C++ code using Boost Python +# # Stand-alone executable -- no modules +# include_directories(${Boost_INCLUDE_DIRS}) +# add_cython_target(main.pyx CXX EMBED_MAIN) +# add_executable(main boost_python_module.cxx ${main}) +# target_link_libraries(main ${Boost_LIBRARIES}) +# python_standalone_executable(main) +# +# # stand-alone executable with three extension modules: +# # one statically linked, one dynamically linked, and one loaded at runtime +# # +# # Freely mixes Cython-generated code, code using Boost-Python, and +# # hand-written code using the CPython API. +# +# # module1 -- statically linked +# add_cython_target(module1.pyx) +# add_library(module1 STATIC ${module1}) +# python_extension_module(module1 +# LINKED_MODULES_VAR linked_module_list +# FORWARD_DECL_MODULES_VAR fdecl_module_list) +# +# # module2 -- dynamically linked +# include_directories({Boost_INCLUDE_DIRS}) +# add_library(module2 SHARED boost_module2.cxx) +# target_link_libraries(module2 ${Boost_LIBRARIES}) +# python_extension_module(module2 +# LINKED_MODULES_VAR linked_module_list +# FORWARD_DECL_MODULES_VAR fdecl_module_list) +# +# # module3 -- loaded at runtime +# add_cython_target(module3a.pyx) +# add_library(module1 MODULE ${module3a} module3b.cxx) +# target_link_libraries(module3 ${Boost_LIBRARIES}) +# python_extension_module(module3 +# LINKED_MODULES_VAR linked_module_list +# FORWARD_DECL_MODULES_VAR fdecl_module_list) +# +# # application executable -- generated header file + other source files +# python_modules_header(modules +# FORWARD_DECL_MODULES_LIST ${fdecl_module_list}) +# include_directories(${modules_INCLUDE_DIRS}) +# +# add_cython_target(mainA) +# add_cython_target(mainC) +# add_executable(main ${mainA} mainB.cxx ${mainC} mainD.c) +# +# target_link_libraries(main ${linked_module_list} ${Boost_LIBRARIES}) +# python_standalone_executable(main) +# +#============================================================================= +# Copyright 2011 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +find_package(PythonInterp REQUIRED) +find_package(PythonLibs) +include(targetLinkLibrariesWithDynamicLookup) + +set(_command " +import distutils.sysconfig +import itertools +import os +import os.path +import site +import sys +import sysconfig + +result = None +rel_result = None +candidate_lists = [] + +try: + candidate_lists.append((distutils.sysconfig.get_python_lib(),)) +except AttributeError: pass + +try: + candidate_lists.append(site.getsitepackages()) +except AttributeError: pass + +try: + candidate_lists.append((site.getusersitepackages(),)) +except AttributeError: pass + +candidates = itertools.chain.from_iterable(candidate_lists) + +for candidate in candidates: + rel_candidate = os.path.relpath( + candidate, sys.prefix) + if not rel_candidate.startswith(\"..\"): + result = candidate + rel_result = rel_candidate + break + +sys.stdout.write(\";\".join(( + os.sep, + os.pathsep, + sys.prefix, + result, + rel_result, + sysconfig.get_config_var('SO') +))) +") + +execute_process(COMMAND "${PYTHON_EXECUTABLE}" -c "${_command}" + OUTPUT_VARIABLE _list + RESULT_VARIABLE _result) + +list(GET _list 0 _item) +set(PYTHON_SEPARATOR "${_item}") +mark_as_advanced(PYTHON_SEPARATOR) + +list(GET _list 1 _item) +set(PYTHON_PATH_SEPARATOR "${_item}") +mark_as_advanced(PYTHON_PATH_SEPARATOR) + +list(GET _list 2 _item) +set(PYTHON_PREFIX "${_item}") +mark_as_advanced(PYTHON_PREFIX) + +list(GET _list 3 _item) +set(PYTHON_SITE_PACKAGES_DIR "${_item}") +mark_as_advanced(PYTHON_SITE_PACKAGES_DIR) + +list(GET _list 4 _item) +set(PYTHON_RELATIVE_SITE_PACKAGES_DIR "${_item}") +mark_as_advanced(PYTHON_RELATIVE_SITE_PACKAGES_DIR) + +if(NOT DEFINED PYTHON_EXTENSION_MODULE_SUFFIX) + list(GET _list 5 _item) + set(PYTHON_EXTENSION_MODULE_SUFFIX "${_item}") +endif() + +function(_set_python_extension_symbol_visibility _target) + if(PYTHON_VERSION_MAJOR VERSION_GREATER 2) + set(_modinit_prefix "PyInit_") + else() + set(_modinit_prefix "init") + endif() + message("_modinit_prefix:${_modinit_prefix}") + if("${CMAKE_C_COMPILER_ID}" STREQUAL "MSVC") + set_target_properties(${_target} PROPERTIES LINK_FLAGS + "/EXPORT:${_modinit_prefix}${_target}" + ) + elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU") + set(_script_path + ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${_target}-version-script.map + ) + file(WRITE ${_script_path} + "{global: ${_modinit_prefix}${_target}; local: *; };" + ) + set_property(TARGET ${_target} APPEND_STRING PROPERTY LINK_FLAGS + " -Wl,--version-script=${_script_path}" + ) + endif() +endfunction() + +function(python_extension_module _target) + set(one_ops LINKED_MODULES_VAR FORWARD_DECL_MODULES_VAR MODULE_SUFFIX) + cmake_parse_arguments(_args "" "${one_ops}" "" ${ARGN}) + + set(_lib_type "NA") + if(TARGET ${_target}) + get_property(_lib_type TARGET ${_target} PROPERTY TYPE) + endif() + + set(_is_non_lib TRUE) + + set(_is_static_lib FALSE) + if(_lib_type STREQUAL "STATIC_LIBRARY") + set(_is_static_lib TRUE) + set(_is_non_lib FALSE) + endif() + + set(_is_shared_lib FALSE) + if(_lib_type STREQUAL "SHARED_LIBRARY") + set(_is_shared_lib TRUE) + set(_is_non_lib FALSE) + endif() + + set(_is_module_lib FALSE) + if(_lib_type STREQUAL "MODULE_LIBRARY") + set(_is_module_lib TRUE) + set(_is_non_lib FALSE) + endif() + + if(_is_static_lib OR _is_shared_lib OR _is_non_lib) + + if(_is_static_lib OR _is_shared_lib) + if(_args_LINKED_MODULES_VAR) + set(${_args_LINKED_MODULES_VAR} + ${${_args_LINKED_MODULES_VAR}} ${_target} PARENT_SCOPE) + else() + set_property(GLOBAL APPEND PROPERTY PY_LINKED_MODULES_LIST ${_target}) + endif() + endif() + + if(_args_FORWARD_DECL_MODULES_VAR) + set(${_args_FORWARD_DECL_MODULES_VAR} + ${${_args_FORWARD_DECL_MODULES_VAR}} ${_target} PARENT_SCOPE) + else() + set_property(GLOBAL APPEND PROPERTY + PY_FORWARD_DECL_MODULES_LIST ${_target}) + endif() + endif() + + if(NOT _is_non_lib) + include_directories("${PYTHON_INCLUDE_DIRS}") + endif() + + if(_is_module_lib) + set_target_properties(${_target} PROPERTIES + PREFIX "${PYTHON_MODULE_PREFIX}") + endif() + + if(_is_module_lib OR _is_shared_lib) + if(_is_module_lib) + + if(NOT _args_MODULE_SUFFIX) + set(_args_MODULE_SUFFIX "${PYTHON_EXTENSION_MODULE_SUFFIX}") + endif() + + if(_args_MODULE_SUFFIX STREQUAL "" AND WIN32 AND NOT CYGWIN) + set(_args_MODULE_SUFFIX ".pyd") + endif() + + if(NOT _args_MODULE_SUFFIX STREQUAL "") + set_target_properties(${_target} + PROPERTIES SUFFIX ${_args_MODULE_SUFFIX}) + endif() + endif() + + target_link_libraries_with_dynamic_lookup(${_target} ${PYTHON_LIBRARIES}) + + if(_is_module_lib) + #_set_python_extension_symbol_visibility(${_altname}) + endif() + endif() +endfunction() + +function(python_standalone_executable _target) + include_directories(${PYTHON_INCLUDE_DIRS}) + target_link_libraries(${_target} ${PYTHON_LIBRARIES}) +endfunction() + +function(python_modules_header _name) + set(one_ops FORWARD_DECL_MODULES_LIST + HEADER_OUTPUT_VAR + INCLUDE_DIR_OUTPUT_VAR) + cmake_parse_arguments(_args "" "${one_ops}" "" ${ARGN}) + + list(GET _args_UNPARSED_ARGUMENTS 0 _arg0) + # if present, use arg0 as the input file path + if(_arg0) + set(_source_file ${_arg0}) + + # otherwise, must determine source file from name, or vice versa + else() + get_filename_component(_name_ext "${_name}" EXT) + + # if extension provided, _name is the source file + if(_name_ext) + set(_source_file ${_name}) + get_filename_component(_name "${_source_file}" NAME_WE) + + # otherwise, assume the source file is ${_name}.h + else() + set(_source_file ${_name}.h) + endif() + endif() + + if(_args_FORWARD_DECL_MODULES_LIST) + set(static_mod_list ${_args_FORWARD_DECL_MODULES_LIST}) + else() + get_property(static_mod_list GLOBAL PROPERTY PY_FORWARD_DECL_MODULES_LIST) + endif() + + string(REPLACE "." "_" _header_name "${_name}") + string(TOUPPER ${_header_name} _header_name_upper) + set(_header_name_upper "_${_header_name_upper}_H") + set(generated_file ${CMAKE_CURRENT_BINARY_DIR}/${_source_file}) + + set(generated_file_tmp "${generated_file}.in") + file(WRITE ${generated_file_tmp} + "/* Created by CMake. DO NOT EDIT; changes will be lost. */\n") + + set(_chunk "") + set(_chunk "${_chunk}#ifndef ${_header_name_upper}\n") + set(_chunk "${_chunk}#define ${_header_name_upper}\n") + set(_chunk "${_chunk}\n") + set(_chunk "${_chunk}#include \n") + set(_chunk "${_chunk}\n") + set(_chunk "${_chunk}#ifdef __cplusplus\n") + set(_chunk "${_chunk}extern \"C\" {\n") + set(_chunk "${_chunk}#endif /* __cplusplus */\n") + set(_chunk "${_chunk}\n") + set(_chunk "${_chunk}#if PY_MAJOR_VERSION < 3\n") + file(APPEND ${generated_file_tmp} "${_chunk}") + + foreach(_module ${static_mod_list}) + file(APPEND ${generated_file_tmp} + "PyMODINIT_FUNC init${PYTHON_MODULE_PREFIX}${_module}(void);\n") + endforeach() + + file(APPEND ${generated_file_tmp} "#else /* PY_MAJOR_VERSION >= 3*/\n") + + foreach(_module ${static_mod_list}) + file(APPEND ${generated_file_tmp} + "PyMODINIT_FUNC PyInit_${PYTHON_MODULE_PREFIX}${_module}(void);\n") + endforeach() + + set(_chunk "") + set(_chunk "${_chunk}#endif /* PY_MAJOR_VERSION >= 3*/\n\n") + set(_chunk "${_chunk}#ifdef __cplusplus\n") + set(_chunk "${_chunk}}\n") + set(_chunk "${_chunk}#endif /* __cplusplus */\n") + set(_chunk "${_chunk}\n") + file(APPEND ${generated_file_tmp} "${_chunk}") + + foreach(_module ${static_mod_list}) + set(_import_function "${_header_name}_${_module}") + set(_prefixed_module "${PYTHON_MODULE_PREFIX}${_module}") + + set(_chunk "") + set(_chunk "${_chunk}int ${_import_function}(void)\n") + set(_chunk "${_chunk}{\n") + set(_chunk "${_chunk} static char name[] = \"${_prefixed_module}\";\n") + set(_chunk "${_chunk} #if PY_MAJOR_VERSION < 3\n") + set(_chunk "${_chunk} return PyImport_AppendInittab(") + set(_chunk "${_chunk}name, init${_prefixed_module});\n") + set(_chunk "${_chunk} #else /* PY_MAJOR_VERSION >= 3 */\n") + set(_chunk "${_chunk} return PyImport_AppendInittab(") + set(_chunk "${_chunk}name, PyInit_${_prefixed_module});\n") + set(_chunk "${_chunk} #endif /* PY_MAJOR_VERSION >= 3 */\n") + set(_chunk "${_chunk}}\n\n") + file(APPEND ${generated_file_tmp} "${_chunk}") + endforeach() + + file(APPEND ${generated_file_tmp} + "void ${_header_name}_LoadAllPythonModules(void)\n{\n") + foreach(_module ${static_mod_list}) + file(APPEND ${generated_file_tmp} " ${_header_name}_${_module}();\n") + endforeach() + file(APPEND ${generated_file_tmp} "}\n\n") + + set(_chunk "") + set(_chunk "${_chunk}#ifndef EXCLUDE_LOAD_ALL_FUNCTION\n") + set(_chunk "${_chunk}void CMakeLoadAllPythonModules(void)\n") + set(_chunk "${_chunk}{\n") + set(_chunk "${_chunk} ${_header_name}_LoadAllPythonModules();\n") + set(_chunk "${_chunk}}\n") + set(_chunk "${_chunk}#endif /* !EXCLUDE_LOAD_ALL_FUNCTION */\n\n") + + set(_chunk "${_chunk}#ifndef EXCLUDE_PY_INIT_WRAPPER\n") + set(_chunk "${_chunk}static void Py_Initialize_Wrapper()\n") + set(_chunk "${_chunk}{\n") + set(_chunk "${_chunk} ${_header_name}_LoadAllPythonModules();\n") + set(_chunk "${_chunk} Py_Initialize();\n") + set(_chunk "${_chunk}}\n") + set(_chunk "${_chunk}#define Py_Initialize Py_Initialize_Wrapper\n") + set(_chunk "${_chunk}#endif /* !EXCLUDE_PY_INIT_WRAPPER */\n\n") + + set(_chunk "${_chunk}#endif /* !${_header_name_upper} */\n") + file(APPEND ${generated_file_tmp} "${_chunk}") + + # with configure_file() cmake complains that you may not use a file created + # using file(WRITE) as input file for configure_file() + execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different + "${generated_file_tmp}" "${generated_file}" + OUTPUT_QUIET ERROR_QUIET) + + set(_header_output_var ${_name}) + if(_args_HEADER_OUTPUT_VAR) + set(_header_output_var ${_args_HEADER_OUTPUT_VAR}) + endif() + set(${_header_output_var} ${generated_file} PARENT_SCOPE) + + set(_include_dir_var ${_name}_INCLUDE_DIRS) + if(_args_INCLUDE_DIR_OUTPUT_VAR) + set(_include_dir_var ${_args_INCLUDE_DIR_OUTPUT_VAR}) + endif() + set(${_include_dirs_var} ${CMAKE_CURRENT_BINARY_DIR} PARENT_SCOPE) +endfunction() diff --git a/python/scikit-build-cmake/LICENSE b/python/scikit-build-cmake/LICENSE new file mode 100644 index 000000000..73a9db0f2 --- /dev/null +++ b/python/scikit-build-cmake/LICENSE @@ -0,0 +1,53 @@ +Unless otherwise noted in the file, all files in this directory are +licensed under the MIT license, reproduced below. + +The MIT License (MIT) + +Copyright (c) 2014 Mike Sarahan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +This project borrows a great deal from the setup tools of the PyNE project. Here is its license: + +Copyright 2011-2014, the PyNE Development Team. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are +permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, this list of + conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright notice, this list + of conditions and the following disclaimer in the documentation and/or other materials + provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE PYNE DEVELOPMENT TEAM ``AS IS'' AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those of the +authors and should not be interpreted as representing official policies, either expressed +or implied, of the stakeholders of the PyNE project or the employers of PyNE developers. diff --git a/python/scikit-build-cmake/UseCython.cmake b/python/scikit-build-cmake/UseCython.cmake new file mode 100644 index 000000000..9a5966488 --- /dev/null +++ b/python/scikit-build-cmake/UseCython.cmake @@ -0,0 +1,389 @@ +#.rst: +# +# The following functions are defined: +# +# .. cmake:command:: add_cython_target +# +# Create a custom rule to generate the source code for a Python extension module +# using cython. +# +# add_cython_target( [] +# [EMBED_MAIN] +# [C | CXX] +# [PY2 | PY3] +# [OUTPUT_VAR ]) +# +# ```` is the name of the new target, and ```` +# is the path to a cython source file. Note that, despite the name, no new +# targets are created by this function. Instead, see ``OUTPUT_VAR`` for +# retrieving the path to the generated source for subsequent targets. +# +# If only ```` is provided, and it ends in the ".pyx" extension, then it +# is assumed to be the ````. The name of the input without the +# extension is used as the target name. If only ```` is provided, and it +# does not end in the ".pyx" extension, then the ```` is assumed to +# be ``.pyx``. +# +# The Cython include search path is amended with any entries found in the +# ``INCLUDE_DIRECTORIES`` property of the directory containing the +# ```` file. Use ``include_directories`` to add to the Cython +# include search path. +# +# Options: +# +# ``EMBED_MAIN`` +# Embed a main() function in the generated output (for stand-alone +# applications that initialize their own Python runtime). +# +# ``C | CXX`` +# Force the generation of either a C or C++ file. By default, a C file is +# generated, unless the C language is not enabled for the project; in this +# case, a C++ file is generated by default. +# +# ``PY2 | PY3`` +# Force compilation using either Python-2 or Python-3 syntax and code +# semantics. By default, Python-2 syntax and semantics are used if the major +# version of Python found is 2. Otherwise, Python-3 syntax and sematics are +# used. +# +# ``OUTPUT_VAR `` +# Set the variable ```` in the parent scope to the path to the +# generated source file. By default, ```` is used as the output +# variable name. +# +# Defined variables: +# +# ```` +# The path of the generated source file. +# +# Cache variables that effect the behavior include: +# +# ``CYTHON_ANNOTATE`` +# whether to create an annotated .html file when compiling +# +# ``CYTHON_FLAGS`` +# additional flags to pass to the Cython compiler +# +# Example usage +# ^^^^^^^^^^^^^ +# +# .. code-block:: cmake +# +# find_package(Cython) +# +# # Note: In this case, either one of these arguments may be omitted; their +# # value would have been inferred from that of the other. +# add_cython_target(cy_code cy_code.pyx) +# +# add_library(cy_code MODULE ${cy_code}) +# target_link_libraries(cy_code ...) +# +#============================================================================= +# Copyright 2011 Kitware, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#============================================================================= + +# Configuration options. +set(CYTHON_ANNOTATE OFF + CACHE BOOL "Create an annotated .html file when compiling *.pyx.") + +set(CYTHON_FLAGS "" CACHE STRING + "Extra flags to the cython compiler.") +mark_as_advanced(CYTHON_ANNOTATE CYTHON_FLAGS) +string(REGEX REPLACE " " ";" CYTHON_FLAGS_LIST "${CYTHON_FLAGS}") + +find_package(PythonLibs REQUIRED) + +set(CYTHON_CXX_EXTENSION "cxx") +set(CYTHON_C_EXTENSION "c") + +get_property(languages GLOBAL PROPERTY ENABLED_LANGUAGES) + +function(add_cython_target _name) + set(options EMBED_MAIN C CXX PY2 PY3) + set(options1 OUTPUT_VAR) + cmake_parse_arguments(_args "${options}" "${options1}" "" ${ARGN}) + + list(GET _args_UNPARSED_ARGUMENTS 0 _arg0) + + # if provided, use _arg0 as the input file path + if(_arg0) + set(_source_file ${_arg0}) + + # otherwise, must determine source file from name, or vice versa + else() + get_filename_component(_name_ext "${_name}" EXT) + + # if extension provided, _name is the source file + if(_name_ext) + set(_source_file ${_name}) + get_filename_component(_name "${_source_file}" NAME_WE) + + # otherwise, assume the source file is ${_name}.pyx + else() + set(_source_file ${_name}.pyx) + endif() + endif() + + set(_embed_main FALSE) + + if("${PYTHONLIBS_VERSION_STRING}" MATCHES "^2.") + set(_input_syntax "PY2") + else() + set(_input_syntax "PY3") + endif() + + if(_args_EMBED_MAIN) + set(_embed_main TRUE) + endif() + + if(_args_C) + set(_output_syntax "C") + endif() + + if(_args_CXX) + set(_output_syntax "CXX") + endif() + + if(_args_PY2) + set(_input_syntax "PY2") + endif() + + if(_args_PY3) + set(_input_syntax "PY3") + endif() + + set(embed_arg "") + if(_embed_main) + set(embed_arg "--embed") + endif() + + set(cxx_arg "") + set(extension "c") + if(_output_syntax STREQUAL "CXX") + set(cxx_arg "--cplus") + set(extension "cxx") + endif() + + set(py_version_arg "") + if(_input_syntax STREQUAL "PY2") + set(py_version_arg "-2") + elseif(_input_syntax STREQUAL "PY3") + set(py_version_arg "-3") + endif() + + set(generated_file "${CMAKE_CURRENT_BINARY_DIR}/${_name}.${extension}") + set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE) + + set(_output_var ${_name}) + if(_args_OUTPUT_VAR) + set(_output_var ${_args_OUTPUT_VAR}) + endif() + set(${_output_var} ${generated_file} PARENT_SCOPE) + + file(RELATIVE_PATH generated_file_relative + ${CMAKE_BINARY_DIR} ${generated_file}) + + set(comment "Generating ${_output_syntax} source ${generated_file_relative}") + set(cython_include_directories "") + set(pxd_dependencies "") + set(c_header_dependencies "") + + # Get the include directories. + get_source_file_property(pyx_location ${_source_file} LOCATION) + get_filename_component(pyx_path ${pyx_location} PATH) + get_directory_property(cmake_include_directories + DIRECTORY ${pyx_path} + INCLUDE_DIRECTORIES) + list(APPEND cython_include_directories ${cmake_include_directories}) + + # Determine dependencies. + # Add the pxd file with the same basename as the given pyx file. + get_filename_component(pyx_file_basename ${_source_file} NAME_WE) + unset(corresponding_pxd_file CACHE) + find_file(corresponding_pxd_file ${pyx_file_basename}.pxd + PATHS "${pyx_path}" ${cmake_include_directories} + NO_DEFAULT_PATH) + if(corresponding_pxd_file) + list(APPEND pxd_dependencies "${corresponding_pxd_file}") + endif() + + # pxd files to check for additional dependencies + set(pxds_to_check "${_source_file}" "${pxd_dependencies}") + set(pxds_checked "") + set(number_pxds_to_check 1) + while(number_pxds_to_check GREATER 0) + foreach(pxd ${pxds_to_check}) + list(APPEND pxds_checked "${pxd}") + list(REMOVE_ITEM pxds_to_check "${pxd}") + + # look for C headers + file(STRINGS "${pxd}" extern_from_statements + REGEX "cdef[ ]+extern[ ]+from.*$") + foreach(statement ${extern_from_statements}) + # Had trouble getting the quote in the regex + string(REGEX REPLACE + "cdef[ ]+extern[ ]+from[ ]+[\"]([^\"]+)[\"].*" "\\1" + header "${statement}") + unset(header_location CACHE) + find_file(header_location ${header} PATHS ${cmake_include_directories}) + if(header_location) + list(FIND c_header_dependencies "${header_location}" header_idx) + if(${header_idx} LESS 0) + list(APPEND c_header_dependencies "${header_location}") + endif() + endif() + endforeach() + + # check for pxd dependencies + # Look for cimport statements. + set(module_dependencies "") + file(STRINGS "${pxd}" cimport_statements REGEX cimport) + foreach(statement ${cimport_statements}) + if(${statement} MATCHES from) + string(REGEX REPLACE + "from[ ]+([^ ]+).*" "\\1" + module "${statement}") + else() + string(REGEX REPLACE + "cimport[ ]+([^ ]+).*" "\\1" + module "${statement}") + endif() + list(APPEND module_dependencies ${module}) + endforeach() + + # check for pxi dependencies + # Look for include statements. + set(include_dependencies "") + file(STRINGS "${pxd}" include_statements REGEX include) + foreach(statement ${include_statements}) + string(REGEX REPLACE + "include[ ]+[\"]([^\"]+)[\"].*" "\\1" + module "${statement}") + list(APPEND include_dependencies ${module}) + endforeach() + + list(REMOVE_DUPLICATES module_dependencies) + list(REMOVE_DUPLICATES include_dependencies) + + # Add modules to the files to check, if appropriate. + foreach(module ${module_dependencies}) + unset(pxd_location CACHE) + find_file(pxd_location ${module}.pxd + PATHS "${pyx_path}" ${cmake_include_directories} + NO_DEFAULT_PATH) + if(pxd_location) + list(FIND pxds_checked ${pxd_location} pxd_idx) + if(${pxd_idx} LESS 0) + list(FIND pxds_to_check ${pxd_location} pxd_idx) + if(${pxd_idx} LESS 0) + list(APPEND pxds_to_check ${pxd_location}) + list(APPEND pxd_dependencies ${pxd_location}) + endif() # if it is not already going to be checked + endif() # if it has not already been checked + endif() # if pxd file can be found + endforeach() # for each module dependency discovered + + # Add includes to the files to check, if appropriate. + foreach(_include ${include_dependencies}) + unset(pxi_location CACHE) + find_file(pxi_location ${_include} + PATHS "${pyx_path}" ${cmake_include_directories} + NO_DEFAULT_PATH) + if(pxi_location) + list(FIND pxds_checked ${pxi_location} pxd_idx) + if(${pxd_idx} LESS 0) + list(FIND pxds_to_check ${pxi_location} pxd_idx) + if(${pxd_idx} LESS 0) + list(APPEND pxds_to_check ${pxi_location}) + list(APPEND pxd_dependencies ${pxi_location}) + endif() # if it is not already going to be checked + endif() # if it has not already been checked + endif() # if include file can be found + endforeach() # for each include dependency discovered + endforeach() # for each include file to check + + list(LENGTH pxds_to_check number_pxds_to_check) + endwhile() + + # Set additional flags. + set(annotate_arg "") + if(CYTHON_ANNOTATE) + set(annotate_arg "--annotate") + endif() + + set(no_docstrings_arg "") + set(embed_signature_arg "") + if(CMAKE_BUILD_TYPE STREQUAL "MinSizeRel") + set(no_docstrings_arg "--no-docstrings") + else() + set(embed_signature_arg "-Xembedsignature=True") + endif() + + set(cython_debug_arg "") + set(embed_pos_arg "") + set(line_directives_arg "") + if(CMAKE_BUILD_TYPE STREQUAL "Debug" OR + CMAKE_BUILD_TYPE STREQUAL "RelWithDebInfo") + set(cython_debug_arg "--gdb") + set(embed_pos_arg "--embed-positions") + set(line_directives_arg "--line-directives") + endif() + + # Include directory arguments. + list(REMOVE_DUPLICATES cython_include_directories) + set(include_directory_arg "") + foreach(_include_dir ${cython_include_directories}) + set(include_directory_arg + ${include_directory_arg} "--include-dir" "${_include_dir}") + endforeach() + + list(REMOVE_DUPLICATES pxd_dependencies) + list(REMOVE_DUPLICATES c_header_dependencies) + + # Add the command to run the compiler. + add_custom_command(OUTPUT ${generated_file} + COMMAND ${CYTHON_EXECUTABLE} + ARGS ${cxx_arg} ${include_directory_arg} ${py_version_arg} + ${embed_arg} ${annotate_arg} ${no_docstrings_arg} + ${cython_debug_arg} ${embed_pos_arg} ${embed_signature_arg} + ${line_directives_arg} ${CYTHON_FLAGS_LIST} ${pyx_location} + --output-file ${generated_file} + DEPENDS ${_source_file} + ${pxd_dependencies} + IMPLICIT_DEPENDS ${_output_syntax} + ${c_header_dependencies} + COMMENT ${comment}) + + # NOTE(opadron): I thought about making a proper target, but after trying it + # out, I decided that it would be far too convenient to use the same name as + # the target for the extension module (e.g.: for single-file modules): + # + # ... + # add_cython_target(_module.pyx) + # add_library(_module ${_module}) + # ... + # + # The above example would not be possible since the "_module" target name + # would already be taken by the cython target. Since I can't think of a + # reason why someone would need the custom target instead of just using the + # generated file directly, I decided to leave this commented out. + # + # add_custom_target(${_name} DEPENDS ${generated_file}) + + # Remove their visibility to the user. + set(corresponding_pxd_file "" CACHE INTERNAL "") + set(header_location "" CACHE INTERNAL "") + set(pxd_location "" CACHE INTERNAL "") +endfunction() diff --git a/python/scikit-build-cmake/targetLinkLibrariesWithDynamicLookup.cmake b/python/scikit-build-cmake/targetLinkLibrariesWithDynamicLookup.cmake new file mode 100644 index 000000000..020fc404a --- /dev/null +++ b/python/scikit-build-cmake/targetLinkLibrariesWithDynamicLookup.cmake @@ -0,0 +1,581 @@ +#.rst: +# +# Public Functions +# ^^^^^^^^^^^^^^^^ +# +# The following functions are defined: +# +# .. cmake:command:: target_link_libraries_with_dynamic_lookup +# +# :: +# +# target_link_libraries_with_dynamic_lookup( []) +# +# +# Useful to "weakly" link a loadable module. For example, it should be used +# when compiling a loadable module when the symbols should be resolve from +# the run-time environment where the module is loaded, and not a specific +# system library. +# +# Like proper linking, except that the given ```` are not necessarily +# linked. Instead, the ```` is produced in a manner that allows for +# symbols unresolved within it to be resolved at runtime, presumably by the +# given ````. If such a target can be produced, the provided +# ```` are not actually linked. +# +# It links a library to a target such that the symbols are resolved at +# run-time not link-time. +# +# The linker is checked to see if it supports undefined +# symbols when linking a shared library. If it does then the library +# is not linked when specified with this function. +# +# On platforms that do not support weak-linking, this function works just +# like ``target_link_libraries``. +# +# .. note:: +# +# For OSX it uses ``undefined dynamic_lookup``. This is similar to using +# ``-shared`` on Linux where undefined symbols are ignored. +# +# For more details, see `blog `_ +# from Tim D. Smith. +# +# +# .. cmake:command:: check_dynamic_lookup +# +# Check if the linker requires a command line flag to allow leaving symbols +# unresolved when producing a target of type ```` that is +# weakly-linked against a dependency of type ````. +# +# ```` +# can be one of "STATIC", "SHARED", "MODULE", or "EXE". +# +# ```` +# can be one of "STATIC", "SHARED", or "MODULE". +# +# Long signature: +# +# :: +# +# check_dynamic_lookup( +# +# +# []) +# +# +# Short signature: +# +# :: +# +# check_dynamic_lookup() # set to "MODULE" +# # set to "SHARED" +# +# +# The result is cached between invocations and recomputed only when the value +# of CMake's linker flag list changes; ``CMAKE_STATIC_LINKER_FLAGS`` if +# ```` is "STATIC", and ``CMAKE_SHARED_LINKER_FLAGS`` otherwise. +# +# +# Defined variables: +# +# ```` +# Whether the current C toolchain supports weak-linking for target binaries of +# type ```` that are weakly-linked against a dependency target of +# type ````. +# +# ```` +# List of flags to add to the linker command to produce a working target +# binary of type ```` that is weakly-linked against a dependency +# target of type ````. +# +# ``HAS_DYNAMIC_LOOKUP__`` +# Cached, global alias for ```` +# +# ``DYNAMIC_LOOKUP_FLAGS__`` +# Cached, global alias for ```` +# +# +# Private Functions +# ^^^^^^^^^^^^^^^^^ +# +# The following private functions are defined: +# +# .. warning:: These functions are not part of the scikit-build API. They +# exist purely as an implementation detail and may change from version +# to version without notice, or even be removed. +# +# We mean it. +# +# +# .. cmake:command:: _get_target_type +# +# :: +# +# _get_target_type( ) +# +# +# Shorthand for querying an abbreviated version of the target type +# of the given ````. +# +# ```` is set to: +# +# - "STATIC" for a STATIC_LIBRARY, +# - "SHARED" for a SHARED_LIBRARY, +# - "MODULE" for a MODULE_LIBRARY, +# - and "EXE" for an EXECUTABLE. +# +# Defined variables: +# +# ```` +# The abbreviated version of the ````'s type. +# +# +# .. cmake:command:: _test_weak_link_project +# +# :: +# +# _test_weak_link_project( +# +# +# ) +# +# +# Attempt to compile and run a test project where a target of type +# ```` is weakly-linked against a dependency of type ````: +# +# - ```` can be one of "STATIC", "SHARED", "MODULE", or "EXE". +# - ```` can be one of "STATIC", "SHARED", or "MODULE". +# +# Defined variables: +# +# ```` +# Whether the current C toolchain can produce a working target binary of type +# ```` that is weakly-linked against a dependency target of type +# ````. +# +# ```` +# List of flags to add to the linker command to produce a working target +# binary of type ```` that is weakly-linked against a dependency +# target of type ````. +# + +function(_get_target_type result_var target) + set(target_type "SHARED_LIBRARY") + if(TARGET ${target}) + get_property(target_type TARGET ${target} PROPERTY TYPE) + endif() + + set(result "STATIC") + + if(target_type STREQUAL "STATIC_LIBRARY") + set(result "STATIC") + endif() + + if(target_type STREQUAL "SHARED_LIBRARY") + set(result "SHARED") + endif() + + if(target_type STREQUAL "MODULE_LIBRARY") + set(result "MODULE") + endif() + + if(target_type STREQUAL "EXECUTABLE") + set(result "EXE") + endif() + + set(${result_var} ${result} PARENT_SCOPE) +endfunction() + + +function(_test_weak_link_project + target_type + lib_type + can_weak_link_var + project_name) + + set(gnu_ld_ignore "-Wl,--unresolved-symbols=ignore-all") + set(osx_dynamic_lookup "-undefined dynamic_lookup") + set(no_flag "") + + foreach(link_flag_spec gnu_ld_ignore osx_dynamic_lookup no_flag) + set(link_flag "${${link_flag_spec}}") + + set(test_project_dir "${PROJECT_BINARY_DIR}/CMakeTmp") + set(test_project_dir "${test_project_dir}/${project_name}") + set(test_project_dir "${test_project_dir}/${link_flag_spec}") + set(test_project_dir "${test_project_dir}/${target_type}") + set(test_project_dir "${test_project_dir}/${lib_type}") + + set(test_project_src_dir "${test_project_dir}/src") + set(test_project_bin_dir "${test_project_dir}/build") + + file(MAKE_DIRECTORY ${test_project_src_dir}) + file(MAKE_DIRECTORY ${test_project_bin_dir}) + + set(mod_type "STATIC") + set(link_mod_lib TRUE) + set(link_exe_lib TRUE) + set(link_exe_mod FALSE) + + if("${target_type}" STREQUAL "EXE") + set(link_exe_lib FALSE) + set(link_exe_mod TRUE) + else() + set(mod_type "${target_type}") + endif() + + if("${mod_type}" STREQUAL "MODULE") + set(link_mod_lib FALSE) + endif() + + + file(WRITE "${test_project_src_dir}/CMakeLists.txt" " + cmake_minimum_required(VERSION ${CMAKE_VERSION}) + project(${project_name} C) + + include_directories(${test_project_src_dir}) + + add_library(number ${lib_type} number.c) + add_library(counter ${mod_type} counter.c) + ") + + if("${mod_type}" STREQUAL "MODULE") + file(APPEND "${test_project_src_dir}/CMakeLists.txt" " + set_target_properties(counter PROPERTIES PREFIX \"\") + ") + endif() + + if(link_mod_lib) + file(APPEND "${test_project_src_dir}/CMakeLists.txt" " + target_link_libraries(counter number) + ") + elseif(NOT link_flag STREQUAL "") + file(APPEND "${test_project_src_dir}/CMakeLists.txt" " + set_target_properties(counter PROPERTIES LINK_FLAGS \"${link_flag}\") + ") + endif() + + file(APPEND "${test_project_src_dir}/CMakeLists.txt" " + add_executable(main main.c) + ") + + if(link_exe_lib) + file(APPEND "${test_project_src_dir}/CMakeLists.txt" " + target_link_libraries(main number) + ") + elseif(NOT link_flag STREQUAL "") + file(APPEND "${test_project_src_dir}/CMakeLists.txt" " + target_link_libraries(main \"${link_flag}\") + ") + endif() + + if(link_exe_mod) + file(APPEND "${test_project_src_dir}/CMakeLists.txt" " + target_link_libraries(main counter) + ") + else() + file(APPEND "${test_project_src_dir}/CMakeLists.txt" " + target_link_libraries(main \"${CMAKE_DL_LIBS}\") + ") + endif() + + file(WRITE "${test_project_src_dir}/number.c" " + #include + + static int _number; + void set_number(int number) { _number = number; } + int get_number() { return _number; } + ") + + file(WRITE "${test_project_src_dir}/number.h" " + #ifndef _NUMBER_H + #define _NUMBER_H + extern void set_number(int); + extern int get_number(void); + #endif + ") + + file(WRITE "${test_project_src_dir}/counter.c" " + #include + int count() { + int result = get_number(); + set_number(result + 1); + return result; + } + ") + + file(WRITE "${test_project_src_dir}/counter.h" " + #ifndef _COUNTER_H + #define _COUNTER_H + extern int count(void); + #endif + ") + + file(WRITE "${test_project_src_dir}/main.c" " + #include + #include + #include + ") + + if(NOT link_exe_mod) + file(APPEND "${test_project_src_dir}/main.c" " + #include + ") + endif() + + file(APPEND "${test_project_src_dir}/main.c" " + int my_count() { + int result = get_number(); + set_number(result + 1); + return result; + } + + int main(int argc, char **argv) { + int result; + ") + + if(NOT link_exe_mod) + file(APPEND "${test_project_src_dir}/main.c" " + void *counter_module; + int (*count)(void); + + counter_module = dlopen(\"./counter.so\", RTLD_LAZY | RTLD_GLOBAL); + if(!counter_module) goto error; + + count = dlsym(counter_module, \"count\"); + if(!count) goto error; + ") + endif() + + file(APPEND "${test_project_src_dir}/main.c" " + result = count() != 0 ? EXIT_FAILURE : + my_count() != 1 ? EXIT_FAILURE : + my_count() != 2 ? EXIT_FAILURE : + count() != 3 ? EXIT_FAILURE : + count() != 4 ? EXIT_FAILURE : + count() != 5 ? EXIT_FAILURE : + my_count() != 6 ? EXIT_FAILURE : EXIT_SUCCESS; + ") + + if(NOT link_exe_mod) + file(APPEND "${test_project_src_dir}/main.c" " + goto done; + error: + fprintf(stderr, \"Error occured:\\n %s\\n\", dlerror()); + result = 1; + + done: + if(counter_module) dlclose(counter_module); + ") + endif() + + file(APPEND "${test_project_src_dir}/main.c" " + return result; + } + ") + + set(_rpath_arg) + if(APPLE AND ${CMAKE_VERSION} VERSION_GREATER 2.8.11) + set(_rpath_arg "-DCMAKE_MACOSX_RPATH='${CMAKE_MACOSX_RPATH}'") + endif() + + try_compile(project_compiles + "${test_project_bin_dir}" + "${test_project_src_dir}" + "${project_name}" + CMAKE_FLAGS + "-DCMAKE_SHARED_LINKER_FLAGS='${CMAKE_SHARED_LINKER_FLAGS}'" + "-DCMAKE_ENABLE_EXPORTS=ON" + ${_rpath_arg} + OUTPUT_VARIABLE compile_output) + + set(project_works 1) + set(run_output) + + if(project_compiles) + execute_process(COMMAND ${CMAKE_CROSSCOMPILING_EMULATOR} + "${test_project_bin_dir}/main" + WORKING_DIRECTORY "${test_project_bin_dir}" + RESULT_VARIABLE project_works + OUTPUT_VARIABLE run_output + ERROR_VARIABLE run_output) + endif() + + set(test_description + "Weak Link ${target_type} -> ${lib_type} (${link_flag_spec})") + + if(project_works EQUAL 0) + set(project_works TRUE) + message(STATUS "Performing Test ${test_description} - Success") + else() + set(project_works FALSE) + message(STATUS "Performing Test ${test_description} - Failed") + file(APPEND ${CMAKE_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/CMakeError.log + "Performing Test ${test_description} failed with the " + "following output:\n" + "BUILD\n-----\n${compile_output}\nRUN\n---\n${run_output}\n") + endif() + + set(${can_weak_link_var} ${project_works} PARENT_SCOPE) + if(project_works) + set(${project_name} ${link_flag} PARENT_SCOPE) + break() + endif() + endforeach() +endfunction() + +function(check_dynamic_lookup) + # Two signatures are supported: + + if(ARGC EQUAL "1") + # + # check_dynamic_lookup() + # + set(target_type "MODULE") + set(lib_type "SHARED") + set(has_dynamic_lookup_var "${ARGV0}") + set(link_flags_var "unused") + + elseif(ARGC GREATER "2") + # + # check_dynamic_lookup( + # + # + # []) + # + set(target_type "${ARGV0}") + set(lib_type "${ARGV1}") + set(has_dynamic_lookup_var "${ARGV2}") + if(ARGC EQUAL "3") + set(link_flags_var "unused") + else() + set(link_flags_var "${ARGV3}") + endif() + else() + message(FATAL_ERROR "missing arguments") + endif() + + _check_dynamic_lookup( + ${target_type} + ${lib_type} + ${has_dynamic_lookup_var} + ${link_flags_var} + ) + set(${has_dynamic_lookup_var} ${${has_dynamic_lookup_var}} PARENT_SCOPE) + if(NOT "x${link_flags_var}x" STREQUAL "xunusedx") + set(${link_flags_var} ${${link_flags_var}} PARENT_SCOPE) + endif() +endfunction() + +function(_check_dynamic_lookup + target_type + lib_type + has_dynamic_lookup_var + link_flags_var + ) + + # hash the CMAKE_FLAGS passed and check cache to know if we need to rerun + if("${target_type}" STREQUAL "STATIC") + string(MD5 cmake_flags_hash "${CMAKE_STATIC_LINKER_FLAGS}") + else() + string(MD5 cmake_flags_hash "${CMAKE_SHARED_LINKER_FLAGS}") + endif() + + set(cache_var "HAS_DYNAMIC_LOOKUP_${target_type}_${lib_type}") + set(cache_hash_var "HAS_DYNAMIC_LOOKUP_${target_type}_${lib_type}_hash") + set(result_var "DYNAMIC_LOOKUP_FLAGS_${target_type}_${lib_type}") + + if( NOT DEFINED ${cache_hash_var} + OR NOT "${${cache_hash_var}}" STREQUAL "${cmake_flags_hash}") + unset(${cache_var} CACHE) + endif() + + if(NOT DEFINED ${cache_var}) + set(skip_test FALSE) + + if(CMAKE_CROSSCOMPILING AND NOT CMAKE_CROSSCOMPILING_EMULATOR) + set(skip_test TRUE) + endif() + + if(skip_test) + set(has_dynamic_lookup FALSE) + set(link_flags) + else() + _test_weak_link_project(${target_type} + ${lib_type} + has_dynamic_lookup + link_flags) + endif() + + set(caveat " (when linking ${target_type} against ${lib_type})") + + set(${cache_var} "${has_dynamic_lookup}" + CACHE BOOL + "linker supports dynamic lookup for undefined symbols${caveat}") + mark_as_advanced(${cache_var}) + + set(${result_var} "${link_flags}" + CACHE STRING + "linker flags for dynamic lookup${caveat}") + mark_as_advanced(${result_var}) + + set(${cache_hash_var} "${cmake_flags_hash}" + CACHE INTERNAL "hashed flags for ${cache_var} check") + endif() + + set(${has_dynamic_lookup_var} "${${cache_var}}" PARENT_SCOPE) + set(${link_flags_var} "${${result_var}}" PARENT_SCOPE) +endfunction() + +function(target_link_libraries_with_dynamic_lookup target) + _get_target_type(target_type ${target}) + + set(link_props) + set(link_items) + set(link_libs) + + foreach(lib ${ARGN}) + _get_target_type(lib_type ${lib}) + check_dynamic_lookup(${target_type} + ${lib_type} + has_dynamic_lookup + dynamic_lookup_flags) + + if(has_dynamic_lookup) + if(dynamic_lookup_flags) + if("${target_type}" STREQUAL "EXE") + list(APPEND link_items "${dynamic_lookup_flags}") + else() + list(APPEND link_props "${dynamic_lookup_flags}") + endif() + endif() + elseif(${lib} MATCHES "(debug|optimized|general)") + # See gh-255 + else() + list(APPEND link_libs "${lib}") + endif() + endforeach() + + if(link_props) + list(REMOVE_DUPLICATES link_props) + endif() + + if(link_items) + list(REMOVE_DUPLICATES link_items) + endif() + + if(link_libs) + list(REMOVE_DUPLICATES link_libs) + endif() + + if(link_props) + set_target_properties(${target} + PROPERTIES LINK_FLAGS "${link_props}") + endif() + + set(links "${link_items}" "${link_libs}") + if(links) + target_link_libraries(${target} "${links}") + endif() +endfunction() diff --git a/python/test_utils.pyx b/python/test_utils.pyx new file mode 100644 index 000000000..e7ed77e06 --- /dev/null +++ b/python/test_utils.pyx @@ -0,0 +1,402 @@ +import cython +from libc.stdlib cimport malloc, free +cimport libc.stdint as stdint +from cython cimport view +from itertools import islice, repeat, chain + +import zfp +cimport zfp + +import numpy as np +cimport numpy as np + +ctypedef stdint.int32_t int32_t +ctypedef stdint.int64_t int64_t +ctypedef stdint.uint32_t uint32_t +ctypedef stdint.uint64_t uint64_t + +cdef extern from "genSmoothRandNums.h": + size_t intPow(size_t base, int exponent); + void generateSmoothRandInts64(size_t minTotalElements, + int numDims, + int amplitudeExp, + int64_t** outputArr, + size_t* outputSideLen, + size_t* outputTotalLen); + void generateSmoothRandInts32(size_t minTotalElements, + int numDims, + int amplitudeExp, + int32_t** outputArr32Ptr, + size_t* outputSideLen, + size_t* outputTotalLen); + void generateSmoothRandFloats(size_t minTotalElements, + int numDims, + float** outputArrPtr, + size_t* outputSideLen, + size_t* outputTotalLen); + void generateSmoothRandDoubles(size_t minTotalElements, + int numDims, + double** outputArrPtr, + size_t* outputSideLen, + size_t* outputTotalLen); + +cdef extern from "stridedOperations.h": + ctypedef enum stride_config: + AS_IS = 0, + PERMUTED = 1, + INTERLEAVED = 2, + REVERSED = 3 + + void reverseArray(void* inputArr, + void* outputArr, + size_t inputArrLen, + zfp.zfp_type zfpType); + void interleaveArray(void* inputArr, + void* outputArr, + size_t inputArrLen, + zfp.zfp_type zfpType); + int permuteSquareArray(void* inputArr, + void* outputArr, + size_t sideLen, + int dims, + zfp.zfp_type zfpType); + void getReversedStrides(int dims, + size_t n[4], + int s[4]); + void getInterleavedStrides(int dims, + size_t n[4], + int s[4]); + void getPermutedStrides(int dims, + size_t n[4], + int s[4]); + +cdef extern from "zfpCompressionParams.h": + int computeFixedPrecisionParam(int param); + size_t computeFixedRateParam(int param); + double computeFixedAccuracyParam(int param); + +cdef extern from "zfpChecksums.h": + uint64_t getChecksumOriginalDataBlock(int dims, + zfp.zfp_type type); + uint64_t getChecksumEncodedBlock(int dims, + zfp.zfp_type type); + uint64_t getChecksumEncodedPartialBlock(int dims, + zfp.zfp_type type); + uint64_t getChecksumDecodedBlock(int dims, + zfp.zfp_type type); + uint64_t getChecksumDecodedPartialBlock(int dims, + zfp.zfp_type type); + uint64_t getChecksumOriginalDataArray(int dims, + zfp.zfp_type type); + uint64_t getChecksumCompressedBitstream(int dims, + zfp.zfp_type type, + zfp.zfp_mode mode, + int compressParamNum); + uint64_t getChecksumDecompressedArray(int dims, + zfp.zfp_type type, + zfp.zfp_mode mode, + int compressParamNum); + +cdef extern from "zfpHash.h": + uint64_t hashBitstream(uint64_t* ptrStart, + size_t bufsizeBytes); + uint32_t hashArray32(const uint32_t* arr, + size_t nx, + int sx); + uint32_t hashStridedArray32(const uint32_t* arr, + size_t n[4], + int s[4]); + uint64_t hashArray64(const uint64_t* arr, + size_t nx, + int sx); + uint64_t hashStridedArray64(const uint64_t* arr, + size_t n[4], + int s[4]); + +# enums +stride_as_is = AS_IS +stride_permuted = PERMUTED +stride_interleaved = INTERLEAVED +stride_reversed = REVERSED + +# functions +cdef validate_num_dimensions(int dims): + if dims > 4 or dims < 1: + raise ValueError("Unsupported number of dimensions: {}".format(dims)) + +cdef validate_ztype(zfp.zfp_type ztype): + if ztype not in [ + zfp.type_float, + zfp.type_double, + zfp.type_int32, + zfp.type_int64 + ]: + raise ValueError("Unsupported ztype: {}".format(ztype)) + +cdef validate_mode(zfp.zfp_mode mode): + if mode not in [ + zfp.mode_fixed_rate, + zfp.mode_fixed_precision, + zfp.mode_fixed_accuracy, + ]: + raise ValueError("Unsupported mode: {}".format(mode)) + +cdef validate_compress_param(int comp_param): + if comp_param not in range(3): # i.e., [0, 1, 2] + raise ValueError( + "Unsupported compression parameter number: {}".format(comp_param) + ) + +cpdef getRandNumpyArray( + int numDims, + zfp.zfp_type ztype, +): + validate_num_dimensions(numDims) + validate_ztype(ztype) + + cdef size_t minTotalElements = 0 + cdef int amplitudeExp = 0 + + if ztype in [zfp.type_float, zfp.type_double]: + minTotalElements = 1000000 + elif ztype in [zfp.type_int32, zfp.type_int64]: + minTotalElements = 4096 + + cdef int64_t* outputArrInt64 = NULL + cdef int32_t* outputArrInt32 = NULL + cdef float* outputArrFloat = NULL + cdef double* outputArrDouble = NULL + cdef size_t outputSideLen = 0 + cdef size_t outputTotalLen = 0 + cdef view.array viewArr = None + + if ztype == zfp.type_int64: + amplitudeExp = 64 - 2 + generateSmoothRandInts64(minTotalElements, + numDims, + amplitudeExp, + &outputArrInt64, + &outputSideLen, + &outputTotalLen) + if numDims == 1: + viewArr = outputArrInt64 + elif numDims == 2: + viewArr = outputArrInt64 + elif numDims == 3: + viewArr = outputArrInt64 + elif numDims == 4: + viewArr = outputArrInt64 + elif ztype == zfp.type_int32: + amplitudeExp = 32 - 2 + generateSmoothRandInts32(minTotalElements, + numDims, + amplitudeExp, + &outputArrInt32, + &outputSideLen, + &outputTotalLen) + if numDims == 1: + viewArr = outputArrInt32 + elif numDims == 2: + viewArr = outputArrInt32 + elif numDims == 3: + viewArr = outputArrInt32 + elif numDims == 4: + viewArr = outputArrInt32 + elif ztype == zfp.type_float: + generateSmoothRandFloats(minTotalElements, + numDims, + &outputArrFloat, + &outputSideLen, + &outputTotalLen) + if numDims == 1: + viewArr = outputArrFloat + elif numDims == 2: + viewArr = outputArrFloat + elif numDims == 3: + viewArr = outputArrFloat + elif numDims == 4: + viewArr = outputArrFloat + elif ztype == zfp.type_double: + generateSmoothRandDoubles(minTotalElements, + numDims, + &outputArrDouble, + &outputSideLen, + &outputTotalLen) + if numDims == 1: + viewArr = outputArrDouble + elif numDims == 2: + viewArr = outputArrDouble + elif numDims == 3: + viewArr = outputArrDouble + elif numDims == 4: + viewArr = outputArrDouble + else: + raise ValueError("Unknown zfp_type: {}".format(ztype)) + + return np.asarray(viewArr) + +cpdef uint64_t getChecksumOrigArray( + int dims, + zfp.zfp_type ztype +): + validate_num_dimensions(dims) + validate_ztype(ztype) + + return getChecksumOriginalDataArray(dims, ztype) + +cpdef uint64_t getChecksumCompArray( + int dims, + zfp.zfp_type ztype, + zfp.zfp_mode mode, + int compressParamNum +): + validate_num_dimensions(dims) + validate_ztype(ztype) + validate_mode(mode) + validate_compress_param(compressParamNum) + + return getChecksumCompressedBitstream(dims, ztype, mode, compressParamNum) + +cpdef uint64_t getChecksumDecompArray( + int dims, + zfp.zfp_type ztype, + zfp.zfp_mode mode, + int compressParamNum +): + validate_num_dimensions(dims) + validate_ztype(ztype) + validate_mode(mode) + validate_compress_param(compressParamNum) + + return getChecksumDecompressedArray(dims, ztype, mode, compressParamNum) + +cpdef computeParameterValue(zfp.zfp_mode mode, int param): + validate_mode(mode) + validate_compress_param(param) + + if mode == zfp.mode_fixed_accuracy: + return computeFixedAccuracyParam(param) + elif mode == zfp.mode_fixed_precision: + return computeFixedPrecisionParam(param) + elif mode == zfp.mode_fixed_rate: + return computeFixedRateParam(param) + +cpdef hashStridedArray( + bytes inarray, + zfp.zfp_type ztype, + shape, + strides, +): + cdef char* array = inarray + cdef size_t[4] padded_shape = zfp.gen_padded_int_list(shape) + cdef int[4] padded_strides = zfp.gen_padded_int_list(strides) + + if ztype == zfp.type_int32 or ztype == zfp.type_float: + return hashStridedArray32(array, padded_shape, padded_strides) + elif ztype == zfp.type_int64 or ztype == zfp.type_double: + return hashStridedArray64(array, padded_shape, padded_strides) + +cpdef hashNumpyArray( + np.ndarray nparray, + stride_config stride_conf = AS_IS, +): + dtype = nparray.dtype + if dtype not in [np.int32, np.float32, np.int64, np.float64]: + raise ValueError("Unsupported numpy type: {}".format(dtype)) + if stride_conf not in [AS_IS, PERMUTED, INTERLEAVED, REVERSED]: + raise ValueError("Unsupported stride config: {}".format(stride_conf)) + + size = int(nparray.size) + cdef int[4] strides + cdef size_t[4] shape + if stride_conf in [AS_IS, INTERLEAVED]: + stride_width = 1 if stride_conf is AS_IS else 2 + if dtype == np.int32 or dtype == np.float32: + return hashArray32(nparray.data, size, stride_width) + elif dtype == np.int64 or dtype == np.float64: + return hashArray64(nparray.data, size, stride_width) + elif stride_conf in [REVERSED, PERMUTED]: + strides = zfp.gen_padded_int_list( + [x for x in nparray.strides[:nparray.ndim]] + ) + shape = zfp.gen_padded_int_list( + [x for x in nparray.shape[:nparray.ndim]] + ) + if dtype == np.int32 or dtype == np.float32: + return hashStridedArray32(nparray.data, shape, strides) + elif dtype == np.int64 or dtype == np.float64: + return hashStridedArray64(nparray.data, shape, strides) + + +cpdef hashCompressedArray( + bytes array, +): + cdef const char* c_array = array + return hashBitstream( c_array, len(array)) + + +cpdef generateStridedRandomNumpyArray( + stride_config stride, + np.ndarray randomArray, +): + cdef int ndim = randomArray.ndim + shape = [int(x) for x in randomArray.shape[:ndim]] + dtype = randomArray.dtype + cdef zfp.zfp_type ztype = zfp.dtype_to_ztype(dtype) + cdef int[4] strides = [0, 0, 0, 0] + cdef size_t[4] dims = zfp.gen_padded_int_list(shape) + cdef size_t inputLen = len(randomArray) + cdef void* output_array_ptr = NULL + cdef np.ndarray output_array = None + cdef view.array output_array_view = None + + if stride == AS_IS: + # return an unmodified copy + return randomArray.copy(order='K') + elif stride == PERMUTED: + if ndim == 1: + raise ValueError("Permutation not supported on 1D arrays") + output_array = np.empty_like(randomArray, order='K') + getPermutedStrides(ndim, dims, strides) + strides = [int(x) * (randomArray.itemsize) for x in strides] + ret = permuteSquareArray( + randomArray.data, + output_array.data, + dims[0], + ndim, + ztype + ) + if ret != 0: + raise RuntimeError("Error permuting square array") + + return np.lib.stride_tricks.as_strided( + output_array, + shape=[x for x in dims[:ndim]], + strides=reversed([x for x in strides[:ndim]]), + ) + + elif stride == INTERLEAVED: + num_elements = np.prod(shape) + new_shape = [x for x in dims if x > 0] + new_shape[-1] *= 2 + dims = tuple(zfp.gen_padded_int_list(new_shape, pad=0, length=4)) + + output_array = np.empty( + new_shape, + dtype=dtype + ) + interleaveArray( + randomArray.data, + output_array.data, + num_elements, + ztype + ) + getInterleavedStrides(ndim, dims, strides) + strides = [int(x) * (randomArray.itemsize) for x in strides] + return np.lib.stride_tricks.as_strided( + output_array, + shape=shape, + strides=reversed([x for x in strides[:ndim]]), + ) + else: + raise ValueError("Unsupported_config: {|}".format(stride)) diff --git a/python/zfp.pxd b/python/zfp.pxd new file mode 100644 index 000000000..f812aed6d --- /dev/null +++ b/python/zfp.pxd @@ -0,0 +1,71 @@ +import cython +cimport libc.stdint as stdint + +cdef extern from "bitstream.h": + cdef struct bitstream: + pass + bitstream* stream_open(void* data, size_t); + void stream_close(bitstream* stream); + +cdef extern from "zfp.h": + # enums + ctypedef enum zfp_type: + zfp_type_none = 0, + zfp_type_int32 = 1, + zfp_type_int64 = 2, + zfp_type_float = 3, + zfp_type_double = 4 + + ctypedef enum zfp_mode: + zfp_mode_null = 0, + zfp_mode_expert = 1, + zfp_mode_fixed_rate = 2, + zfp_mode_fixed_precision = 3, + zfp_mode_fixed_accuracy = 4 + + # structs + ctypedef struct zfp_field: + zfp_type _type "type" + cython.uint nx, ny, nz, nw + int sx, sy, sz, sw + void* data + ctypedef struct zfp_stream: + pass + + # include #define's + cython.uint ZFP_HEADER_MAGIC + cython.uint ZFP_HEADER_META + cython.uint ZFP_HEADER_MODE + cython.uint ZFP_HEADER_FULL + + # function definitions + zfp_stream* zfp_stream_open(bitstream* stream); + void zfp_stream_close(zfp_stream* stream); + size_t zfp_stream_maximum_size(const zfp_stream* stream, const zfp_field* field); + void zfp_stream_set_bit_stream(zfp_stream* stream, bitstream* bs); + cython.uint zfp_stream_set_precision(zfp_stream* stream, cython.uint precision); + double zfp_stream_set_accuracy(zfp_stream* stream, double tolerance); + double zfp_stream_set_rate(zfp_stream* stream, double rate, zfp_type type, cython.uint dims, int wra); + void zfp_stream_set_reversible(zfp_stream* stream); + stdint.uint64_t zfp_stream_mode(const zfp_stream* zfp); + zfp_mode zfp_stream_set_mode(zfp_stream* stream, stdint.uint64_t mode); + zfp_field* zfp_field_alloc(); + zfp_field* zfp_field_1d(void* pointer, zfp_type, cython.uint nx); + zfp_field* zfp_field_2d(void* pointer, zfp_type, cython.uint nx, cython.uint ny); + zfp_field* zfp_field_3d(void* pointer, zfp_type, cython.uint nx, cython.uint ny, cython.uint nz); + zfp_field* zfp_field_4d(void* pointer, zfp_type, cython.uint nx, cython.uint ny, cython.uint nz, cython.uint nw); + void zfp_field_set_stride_1d(zfp_field* field, int sx); + void zfp_field_set_stride_2d(zfp_field* field, int sx, int sy); + void zfp_field_set_stride_3d(zfp_field* field, int sx, int sy, int sz); + void zfp_field_set_stride_4d(zfp_field* field, int sx, int sy, int sz, int sw); + int zfp_field_stride(const zfp_field* field, int* stride) + void zfp_field_free(zfp_field* field); + zfp_type zfp_field_set_type(zfp_field* field, zfp_type type); + size_t zfp_compress(zfp_stream* stream, const zfp_field* field) nogil; + size_t zfp_decompress(zfp_stream* stream, zfp_field* field) nogil; + size_t zfp_write_header(zfp_stream* stream, const zfp_field* field, cython.uint mask); + size_t zfp_read_header(zfp_stream* stream, zfp_field* field, cython.uint mask); + void zfp_stream_rewind(zfp_stream* stream); + void zfp_field_set_pointer(zfp_field* field, void* pointer) nogil; + +cdef gen_padded_int_list(orig_array, pad=*, length=*) diff --git a/python/zfp.pyx b/python/zfp.pyx new file mode 100644 index 000000000..d0e4f3305 --- /dev/null +++ b/python/zfp.pyx @@ -0,0 +1,355 @@ +import sys +import operator +import functools +import cython +from libc.stdlib cimport malloc, free +from cython cimport view +from cpython cimport array +import array + +import itertools +if sys.version_info[0] == 2: + from itertools import izip_longest as zip_longest +elif sys.version_info[0] == 3: + from itertools import zip_longest + +cimport zfp + +import numpy as np +cimport numpy as np + +# export #define's +HEADER_MAGIC = ZFP_HEADER_MAGIC +HEADER_META = ZFP_HEADER_META +HEADER_MODE = ZFP_HEADER_MODE +HEADER_FULL = ZFP_HEADER_FULL + +# export enums +type_none = zfp_type_none +type_int32 = zfp_type_int32 +type_int64 = zfp_type_int64 +type_float = zfp_type_float +type_double = zfp_type_double +mode_null = zfp_mode_null +mode_expert = zfp_mode_expert +mode_fixed_rate = zfp_mode_fixed_rate +mode_fixed_precision = zfp_mode_fixed_precision +mode_fixed_accuracy = zfp_mode_fixed_accuracy + + +cpdef dtype_to_ztype(dtype): + if dtype == np.int32: + return zfp_type_int32 + elif dtype == np.int64: + return zfp_type_int64 + elif dtype == np.float32: + return zfp_type_float + elif dtype == np.float64: + return zfp_type_double + else: + raise TypeError("Unknown dtype: {}".format(dtype)) + +cpdef dtype_to_format(dtype): + # format characters detailed here: + # https://docs.python.org/2/library/array.html#module-array + if dtype == np.int32: + return 'i' # signed int + elif dtype == np.int64: + return 'l' # signed long + elif dtype == np.float32: + return 'f' # float + elif dtype == np.float64: + return 'd' # double + else: + raise TypeError("Unknown dtype: {}".format(dtype)) + +zfp_to_dtype_map = { + zfp_type_int32: np.int32, + zfp_type_int64: np.int64, + zfp_type_float: np.float32, + zfp_type_double: np.float64, +} +cpdef ztype_to_dtype(zfp_type ztype): + try: + return zfp_to_dtype_map[ztype] + except KeyError: + raise ValueError("Unsupported zfp_type {}".format(ztype)) + +cdef zfp_field* _init_field(np.ndarray arr): + shape = arr.shape + cdef int ndim = arr.ndim + cdef zfp_type ztype = dtype_to_ztype(arr.dtype) + cdef zfp_field* field + cdef void* pointer = arr.data + + strides = [int(x) / arr.itemsize for x in arr.strides[:ndim]] + + if ndim == 1: + field = zfp_field_1d(pointer, ztype, shape[0]) + zfp_field_set_stride_1d(field, strides[0]) + elif ndim == 2: + field = zfp_field_2d(pointer, ztype, shape[1], shape[0]) + zfp_field_set_stride_2d(field, strides[1], strides[0]) + elif ndim == 3: + field = zfp_field_3d(pointer, ztype, shape[2], shape[1], shape[0]) + zfp_field_set_stride_3d(field, strides[2], strides[1], strides[0]) + elif ndim == 4: + field = zfp_field_4d(pointer, ztype, shape[3], shape[2], shape[1], shape[0]) + zfp_field_set_stride_4d(field, strides[3], strides[2], strides[1], strides[0]) + else: + raise RuntimeError("Greater than 4 dimensions not supported") + + return field + +cdef gen_padded_int_list(orig_array, pad=0, length=4): + return [int(x) for x in + itertools.islice( + itertools.chain( + orig_array, + itertools.repeat(pad) + ), + length + ) + ] + +@cython.final +cdef class Memory: + cdef void* data + def __cinit__(self, size_t size): + self.data = malloc(size) + if self.data == NULL: + raise MemoryError() + cdef void* __enter__(self): + return self.data + def __exit__(self, exc_type, exc_value, exc_tb): + free(self.data) + +cpdef bytes compress_numpy( + np.ndarray arr, + double tolerance = -1, + double rate = -1, + int precision = -1, + write_header=True +): + # Input validation + if arr is None: + raise TypeError("Input array cannot be None") + num_params_set = sum([1 for x in [tolerance, rate, precision] if x >= 0]) + if num_params_set > 1: + raise ValueError("Only one of tolerance, rate, or precision can be set") + + # Setup zfp structs to begin compression + cdef zfp_field* field = _init_field(arr) + cdef zfp_stream* stream = zfp_stream_open(NULL) + + cdef zfp_type ztype = zfp_type_none; + cdef int ndim = arr.ndim; + _set_compression_mode(stream, ztype, ndim, tolerance, rate, precision) + + # Allocate space based on the maximum size potentially required by zfp to + # store the compressed array + cdef bytes compress_str = None + cdef size_t maxsize = zfp_stream_maximum_size(stream, field) + try: + with Memory(maxsize) as data: + bstream = stream_open(data, maxsize) + zfp_stream_set_bit_stream(stream, bstream) + zfp_stream_rewind(stream) + # write the full header so we can reconstruct the numpy array on + # decompression + if write_header and zfp_write_header(stream, field, HEADER_FULL) == 0: + raise RuntimeError("Failed to write header to stream") + with nogil: + compressed_size = zfp_compress(stream, field) + if compressed_size == 0: + raise RuntimeError("Failed to write to stream") + # copy the compressed data into a perfectly sized bytes object + compress_str = (data)[:compressed_size] + finally: + zfp_field_free(field) + zfp_stream_close(stream) + stream_close(bstream) + + return compress_str + +cdef view.array _decompress_with_view( + zfp_field* field, + zfp_stream* stream, +): + cdef zfp_type ztype = field[0]._type + dtype = ztype_to_dtype(ztype) + format_type = dtype_to_format(dtype) + + shape = (field[0].nw, field[0].nz, field[0].ny, field[0].nx) + shape = tuple([x for x in shape if x > 0]) + + cdef view.array decomp_arr = view.array( + shape, + itemsize=np.dtype(dtype).itemsize, + format=format_type, + allocate_buffer=True + ) + cdef void* pointer = decomp_arr.data + with nogil: + zfp_field_set_pointer(field, pointer) + ret = zfp_decompress(stream, field) + if ret == 0: + raise RuntimeError("error during zfp decompression") + return decomp_arr + +cdef _decompress_with_user_array( + zfp_field* field, + zfp_stream* stream, + void* out, +): + with nogil: + zfp_field_set_pointer(field, out) + ret = zfp_decompress(stream, field) + if ret == 0: + raise RuntimeError("error during zfp decompression") + +cdef _set_compression_mode( + zfp_stream *stream, + zfp_type ztype, + int ndim, + double tolerance = -1, + double rate = -1, + int precision = -1, +): + if tolerance >= 0: + zfp_stream_set_accuracy(stream, tolerance) + elif rate >= 0: + zfp_stream_set_rate(stream, rate, ztype, ndim, 0) + elif precision >= 0: + zfp_stream_set_precision(stream, precision) + else: + zfp_stream_set_reversible(stream) + +cdef _validate_4d_list(in_list, list_name): + # Validate that the input list is either a valid list for strides or shape + # Specifically, check it is a list and the length is > 0 and <= 4 + # Throws a TypeError or ValueError if invalid + try: + if len(in_list) > 4: + raise ValueError( + "User-provided {} has too many dimensions " + "(up to 4 supported)" + ) + elif len(in_list) <= 0: + raise ValueError( + "User-provided {} needs at least one dimension" + ) + except TypeError: + raise TypeError( + "User-provided {} is not an iterable" + ) + +cpdef np.ndarray _decompress( + bytes compressed_data, + zfp_type ztype, + shape, + out=None, + double tolerance = -1, + double rate = -1, + int precision = -1, +): + + if compressed_data is None: + raise TypeError("compressed_data cannot be None") + if compressed_data is out: + raise ValueError("Cannot decompress in-place") + _validate_4d_list(shape, "shape") + + cdef char* comp_data_pointer = compressed_data + cdef zfp_field* field = zfp_field_alloc() + cdef bitstream* bstream = stream_open( + comp_data_pointer, + len(compressed_data) + ) + cdef zfp_stream* stream = zfp_stream_open(bstream) + cdef np.ndarray output + + try: + zfp_stream_rewind(stream) + zshape = gen_padded_int_list(reversed(shape), pad=0, length=4) + # set the shape, type, and compression mode + # strides are set further down + field[0].nx, field[0].ny, field[0].nz, field[0].nw = zshape + zfp_field_set_type(field, ztype) + ndim = sum([1 for x in zshape if x > 0]) + _set_compression_mode(stream, ztype, ndim, tolerance, rate, precision) + + # pad the shape with zeros to reach len == 4 + # strides = gen_padded_int_list(reversed(strides), pad=0, length=4) + # field[0].sx, field[0].sy, field[0].sz, field[0].sw = strides + + if out is None: + output = np.asarray(_decompress_with_view(field, stream)) + else: + dtype = zfp.ztype_to_dtype(ztype) + if isinstance(out, np.ndarray): + output = out + + # check that numpy and user-provided types match + if out.dtype != dtype: + raise ValueError( + "Out ndarray has dtype {} but decompression is using " + "{}. Use out=ndarray.data to avoid this check.".format( + out.dtype, + dtype + ) + ) + + # check that numpy and user-provided shape match + numpy_shape = out.shape + user_shape = [x for x in shape if x > 0] + if not all( + [x == y for x, y in + zip_longest(numpy_shape, user_shape) + ] + ): + raise ValueError( + "Out ndarray has shape {} but decompression is using " + "{}. Use out=ndarray.data to avoid this check.".format( + numpy_shape, + user_shape + ) + ) + else: + output = np.frombuffer(out, dtype=dtype) + output = output.reshape(shape) + + _decompress_with_user_array(field, stream, output.data) + + finally: + zfp_field_free(field) + zfp_stream_close(stream) + stream_close(bstream) + + return output + +cpdef np.ndarray decompress_numpy( + bytes compressed_data, +): + if compressed_data is None: + raise TypeError("compressed_data cannot be None") + + cdef char* comp_data_pointer = compressed_data + cdef zfp_field* field = zfp_field_alloc() + cdef bitstream* bstream = stream_open( + comp_data_pointer, + len(compressed_data) + ) + cdef zfp_stream* stream = zfp_stream_open(bstream) + cdef np.ndarray output + + try: + if zfp_read_header(stream, field, HEADER_FULL) == 0: + raise ValueError("Failed to read required zfp header") + output = np.asarray(_decompress_with_view(field, stream)) + finally: + zfp_field_free(field) + zfp_stream_close(stream) + stream_close(bstream) + + return output diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 3d5611da2..f39f775bb 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -128,3 +128,7 @@ if(ZFP_BUILD_TESTING_LARGE) endforeach() endforeach() endif() + +if(BUILD_PYTHON) + add_subdirectory(python) +endif() diff --git a/tests/python/CMakeLists.txt b/tests/python/CMakeLists.txt new file mode 100644 index 000000000..163a95d16 --- /dev/null +++ b/tests/python/CMakeLists.txt @@ -0,0 +1,6 @@ +add_test(NAME test_numpy + COMMAND ${PYTHON_EXECUTABLE} test_numpy.py + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + +set_tests_properties(test_numpy PROPERTIES + ENVIRONMENT PYTHONPATH=${PYLIB_BUILD_DIR}:$ENV{PYTHONPATH}) diff --git a/tests/python/test_numpy.py b/tests/python/test_numpy.py new file mode 100644 index 000000000..4443e76ba --- /dev/null +++ b/tests/python/test_numpy.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python + +import unittest + +import zfp +import test_utils +import numpy as np +try: + from packaging.version import parse as version_parse +except ImportError: + version_parse = None + + +class TestNumpy(unittest.TestCase): + def lossless_round_trip(self, orig_array): + compressed_array = zfp.compress_numpy(orig_array, write_header=True) + decompressed_array = zfp.decompress_numpy(compressed_array) + self.assertIsNone(np.testing.assert_array_equal(decompressed_array, orig_array)) + + def test_different_dimensions(self): + for dimensions in range(1, 5): + shape = [5] * dimensions + c_array = np.random.rand(*shape) + self.lossless_round_trip(c_array) + + shape = range(2, 2 + dimensions) + c_array = np.random.rand(*shape) + self.lossless_round_trip(c_array) + + def test_different_dtypes(self): + shape = (5, 5) + num_elements = shape[0] * shape[1] + + for dtype in [np.float32, np.float64]: + elements = np.random.random_sample(num_elements) + elements = elements.astype(dtype, casting="same_kind") + array = np.reshape(elements, newshape=shape) + self.lossless_round_trip(array) + + if (version_parse is not None and + (version_parse(np.__version__) >= version_parse("1.11.0")) + ): + for dtype in [np.int32, np.int64]: + array = np.random.randint(2**30, size=shape, dtype=dtype) + self.lossless_round_trip(array) + else: + array = np.random.randint(2**30, size=shape) + self.lossless_round_trip(array) + + def test_advanced_decompression_checksum(self): + ndims = 2 + ztype = zfp.type_float + random_array = test_utils.getRandNumpyArray(ndims, ztype) + mode = zfp.mode_fixed_accuracy + compress_param_num = 1 + compression_kwargs = { + "tolerance": test_utils.computeParameterValue( + mode, + compress_param_num + ), + } + compressed_array = zfp.compress_numpy( + random_array, + write_header=False, + **compression_kwargs + ) + + # Decompression using the "advanced" interface which enforces no header, + # and the user must provide all the metadata + decompressed_array = np.empty_like(random_array) + zfp._decompress( + compressed_array, + ztype, + random_array.shape, + out=decompressed_array, + **compression_kwargs + ) + decompressed_checksum = test_utils.getChecksumDecompArray( + ndims, + ztype, + mode, + compress_param_num + ) + actual_checksum = test_utils.hashNumpyArray( + decompressed_array + ) + self.assertEqual(decompressed_checksum, actual_checksum) + + def test_advanced_decompression_nonsquare(self): + for dimensions in range(1, 5): + shape = range(2, 2 + dimensions) + random_array = np.random.rand(*shape) + + decompressed_array = np.empty_like(random_array) + compressed_array = zfp.compress_numpy( + random_array, + write_header=False, + ) + zfp._decompress( + compressed_array, + zfp.dtype_to_ztype(random_array.dtype), + random_array.shape, + out= decompressed_array, + ) + self.assertIsNone(np.testing.assert_array_equal(decompressed_array, random_array)) + + def test_utils(self): + for ndims in range(1, 5): + for ztype, ztype_str in [ + (zfp.type_float, "float"), + (zfp.type_double, "double"), + (zfp.type_int32, "int32"), + (zfp.type_int64, "int64"), + ]: + orig_random_array = test_utils.getRandNumpyArray(ndims, ztype) + orig_checksum = test_utils.getChecksumOrigArray(ndims, ztype) + actual_checksum = test_utils.hashNumpyArray(orig_random_array) + self.assertEqual(orig_checksum, actual_checksum) + + for stride_str, stride_config in [ + ("as_is", test_utils.stride_as_is), + ("permuted", test_utils.stride_permuted), + ("interleaved", test_utils.stride_interleaved), + #("reversed", test_utils.stride_reversed), + ]: + # permuting a 1D array is not supported + if stride_config == test_utils.stride_permuted and ndims == 1: + continue + random_array = test_utils.generateStridedRandomNumpyArray( + stride_config, + orig_random_array + ) + self.assertTrue(np.equal(orig_random_array, random_array).all()) + + for compress_param_num in range(3): + modes = [(zfp.mode_fixed_accuracy, "tolerance"), + (zfp.mode_fixed_precision, "precision"), + (zfp.mode_fixed_rate, "rate")] + if ztype in [zfp.type_int32, zfp.type_int64]: + modes = [modes[-1]] # only fixed-rate is supported for integers + for mode, mode_str in modes: + # Compression + compression_kwargs = { + mode_str: test_utils.computeParameterValue( + mode, + compress_param_num + ), + } + + compressed_array = zfp.compress_numpy( + random_array, + write_header=False, + **compression_kwargs + ) + compressed_checksum = test_utils.getChecksumCompArray( + ndims, + ztype, + mode, + compress_param_num + ) + actual_checksum = test_utils.hashCompressedArray( + compressed_array + ) + self.assertEqual(compressed_checksum, actual_checksum) + + # Decompression + decompressed_checksum = test_utils.getChecksumDecompArray( + ndims, + ztype, + mode, + compress_param_num + ) + + # Decompression using the "public" interface + # requires a header, so re-compress with the header + # included in the stream + compressed_array = zfp.compress_numpy( + random_array, + write_header=True, + **compression_kwargs + ) + decompressed_array = zfp.decompress_numpy( + compressed_array, + ) + actual_checksum = test_utils.hashNumpyArray( + decompressed_array + ) + self.assertEqual(decompressed_checksum, actual_checksum) + + +if __name__ == "__main__": + unittest.main(verbosity=2) diff --git a/travis.sh b/travis.sh index 09a563069..f71aa9153 100755 --- a/travis.sh +++ b/travis.sh @@ -1,21 +1,27 @@ #!/usr/bin/env sh set -e +# pass additional args in $1 (starting with whitespace character) +run_all () { + run_all_cmd="ctest -V -C Debug -DC_STANDARD=${C_STANDARD:-99} -DCXX_STANDARD=${CXX_STANDARD:-98} -S \"$TRAVIS_BUILD_DIR/cmake/travis.cmake\"" + eval "${run_all_cmd}$1" +} + mkdir build cd build if [ -n "${COVERAGE}" ]; then # build (linux) - ctest -V -C "Debug" -DC_STANDARD=${C_STANDARD:-99} -DCXX_STANDARD=${CXX_STANDARD:-98} -DBUILD_CFP=ON -DBUILD_ZFORP=ON -DBUILD_OPENMP=ON -DBUILD_CUDA=OFF -DWITH_COVERAGE=ON -S $TRAVIS_BUILD_DIR/cmake/travis.cmake + run_all " -DBUILD_CFP=ON -DBUILD_PYTHON=ON -DBUILD_ZFORP=ON -DZFP_WITH_ALIGNED_ALLOC=1 -DBUILD_OPENMP=ON -DBUILD_CUDA=OFF -DWITH_COVERAGE=ON" else - # build/test without OpenMP, with CFP (and custom namespace), with Fortran (linux only) + # build/test without OpenMP, with CFP (and custom namespace), with zfPy, with Fortran (linux only) if [[ "$OSTYPE" == "darwin"* ]]; then BUILD_ZFORP=OFF else BUILD_ZFORP=ON fi - ctest -V -C "Debug" -DC_STANDARD=${C_STANDARD:-99} -DCXX_STANDARD=${CXX_STANDARD:-98} -DBUILD_CFP=ON -DCFP_NAMESPACE=cfp2 -DZFP_WITH_ALIGNED_ALLOC=1 -DBUILD_ZFORP=${BUILD_ZFORP} -DBUILD_OPENMP=OFF -DBUILD_CUDA=OFF -S $TRAVIS_BUILD_DIR/cmake/travis.cmake + run_all " -DBUILD_CFP=ON -DCFP_NAMESPACE=cfp2 -DBUILD_PYTHON=ON -DBUILD_ZFORP=${BUILD_ZFORP} -DZFP_WITH_ALIGNED_ALLOC=1 -DBUILD_OPENMP=OFF -DBUILD_CUDA=OFF" rm -rf ./* ; @@ -24,6 +30,6 @@ else rm -rf ./* ; # build/test with OpenMP - ctest -V -C "Debug" -DC_STANDARD=${C_STANDARD:-99} -DCXX_STANDARD=${CXX_STANDARD:-98} -DBUILD_OPENMP=ON -S $TRAVIS_BUILD_DIR/cmake/travis.cmake + run_all " -DBUILD_OPENMP=ON" fi fi