diff --git a/.github/workflows/flake.yml b/.github/workflows/flake.yml index 0f5fe3e71e..bd02f98013 100644 --- a/.github/workflows/flake.yml +++ b/.github/workflows/flake.yml @@ -21,4 +21,4 @@ jobs: - name: Test run: | pip install flake8 - flake8 tools modules/cnmultifit/pyext modules/EMageFit/pyext modules/parallel/pyext modules/test/pyext modules/mmcif/pyext modules/statistics/pyext modules/pepdock/pyext modules/saxs_merge/bin/saxs_merge + flake8 tools modules/cnmultifit/pyext modules/EMageFit/pyext modules/parallel/pyext modules/test/pyext modules/mmcif/pyext modules/statistics/pyext modules/pepdock/pyext modules/saxs_merge/bin/saxs_merge modules/isd/pyext modules/multifit/pyext modules/spatiotemporal/pyext diff --git a/.gitmodules b/.gitmodules index c23fbf7e50..26bbff11c8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -18,3 +18,7 @@ path = modules/sampcon url = https://github.com/salilab/imp-sampcon.git branch = imp_module +[submodule "modules/nestor"] + path = modules/nestor + url = https://github.com/isblab/nestor.git + branch = imp-integration diff --git a/CMakeLists.txt b/CMakeLists.txt index 9339eba0b9..1ecc6ebc3b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -57,6 +57,11 @@ if(POLICY CMP0056) cmake_policy(SET CMP0056 NEW) endif(POLICY CMP0056) +# Don't complain about our use of FindCUDA for now +if(POLICY CMP0146) +cmake_policy(SET CMP0146 OLD) +endif(POLICY CMP0146) + include(${CMAKE_SOURCE_DIR}/cmake_modules/IMPExecuteProcess.cmake) include(${CMAKE_SOURCE_DIR}/cmake_modules/IMPAddTests.cmake) include(${CMAKE_SOURCE_DIR}/cmake_modules/CheckCompiles.cmake) @@ -488,6 +493,9 @@ file(WRITE ${CMAKE_BINARY_DIR}/IMPConfig.cmake "set(IMP_SWIG_DIR \"${CMAKE_BINARY_DIR}/swig\" )\n" "set(RMF_SWIG_DIR \"${CMAKE_SOURCE_DIR}/modules/rmf/dependency/RMF/swig\" )\n" "set(IMP_MODULES_DIR \"${CMAKE_SOURCE_DIR}/cmake_modules\" )\n" + "set(IMP_CXX_COMPILER \"${CMAKE_CXX_COMPILER}\" )\n" + "set(IMP_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}\" )\n" + "set(IMP_OSX_SYSROOT \"${CMAKE_OSX_SYSROOT}\" )\n" "set(RMF_INCLUDE_PATH \"${RMF_INCLUDE_PATH}\" )\n" "set(IMP_USE_FILE \"\${IMP_USE_DIR}/UseIMP.cmake\" )\n") # Installed locations @@ -505,6 +513,9 @@ file(WRITE ${CMAKE_BINARY_DIR}/cmake/IMPConfig.cmake "set(RMF_SWIG_DIR \"${CMAKE_INSTALL_FULL_SWIGDIR}\" )\n" "set(IMP_MODULES_DIR \"${CMAKE_INSTALL_FULL_CMAKEDIR}\" )\n" "set(RMF_INCLUDE_PATH \"${CMAKE_INSTALL_FULL_INCLUDEDIR}\" )\n" + "set(IMP_CXX_COMPILER \"${CMAKE_CXX_COMPILER}\" )\n" + "set(IMP_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE}\" )\n" + "set(IMP_OSX_SYSROOT \"${CMAKE_OSX_SYSROOT}\" )\n" "set(IMP_USE_FILE \"\${IMP_USE_DIR}/UseIMP.cmake\" )\n") list(REMOVE_DUPLICATES IMP_ALL_DEPENDS_VARS) foreach(cmakefile ${CMAKE_BINARY_DIR}/IMPConfig.cmake diff --git a/ChangeLog.md b/ChangeLog.md index 082785ffe5..4ec98fc751 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,6 +1,26 @@ ChangeLog {#changelog} ========= +# 2.21.0 - 2024-06-13 # {#changelog_2_21_0} +- The new IMP::spatiotemporal module can be used to assist in building stepwise + spatiotemporal models, such as those we used to compute our model of + NPC assembly. +- The new IMP::nestor module performs nested sampling-based optimization of + representation. +- This is the last version of IMP to support Python 2. Please port your + workflows to Python 3. +- The IMP::atom::ChainPDBSelector constructor that takes a single string has + been removed; pass a list of chain IDs instead. +- The IMP::pmi::get_is_canonical function has been removed; PMI no longer + supports the older PMI1-style hierarchies. +- .deb packages for IMP for Ubuntu LTS, for both amd64 and arm64 architectures, + are now provided by an + [Ubuntu PPA](https://launchpad.net/~salilab/+archive/ubuntu/ppa) +- Many runtime checks on the VectorD and Array (e.g. used by ParticleIndexPair) + classes have now been moved to compile-time checks, which makes restraint + evaluation significantly faster in release builds, without sacrificing + correctness. + # 2.20.2 - 2024-04-04 # {#changelog_2_20_2} - Add support for building ARM64 .deb packages. - Bugfix: fix use of nested classes with latest SWIG. diff --git a/VERSION b/VERSION index 83ecbf1d7a..db65e2167e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.20.2 +2.21.0 diff --git a/cmake_modules/IMPFindCompilerFlags.cmake b/cmake_modules/IMPFindCompilerFlags.cmake index 0a6203e5f0..721b312879 100644 --- a/cmake_modules/IMPFindCompilerFlags.cmake +++ b/cmake_modules/IMPFindCompilerFlags.cmake @@ -15,10 +15,7 @@ endif() if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") execute_process(COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE) -message(STATUS "GCC version: ${GCC_VERSION}") -if (GCC_VERSION VERSION_GREATER 4.6 OR GCC_VERSION VERSION_EQUAL 4.6) -message(STATUS "No conversion issue") -elseif (GCC_VERSION VERSION_GREATER 4.3 OR GCC_VERSION VERSION_EQUAL 4.3) +if (GCC_VERSION VERSION_LESS 4.6 AND (GCC_VERSION VERSION_GREATER 4.3 OR GCC_VERSION VERSION_EQUAL 4.3)) add_definitions("-Wno-conversion") endif() endif() diff --git a/cmake_modules/IMPFindPython.cmake b/cmake_modules/IMPFindPython.cmake index ed8d87c14a..ff6712155a 100644 --- a/cmake_modules/IMPFindPython.cmake +++ b/cmake_modules/IMPFindPython.cmake @@ -29,22 +29,23 @@ function(imp_find_python) endif() set(PYTHON_EXECUTABLE ${python_full_path} CACHE INTERNAL "" FORCE) set(PYTHON_TEST_EXECUTABLE ${python_full_path} CACHE STRING "") - execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import sys; print('%d.%d.%d' % sys.version_info[:3])" - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - OUTPUT_VARIABLE python_full_version - OUTPUT_STRIP_TRAILING_WHITESPACE) - string(REGEX REPLACE "^([0-9])+\\.[0-9]+.*" "\\1" major - "${python_full_version}") - string(REGEX REPLACE "^[0-9]+\\.([0-9]+).*" "\\1" minor - "${python_full_version}") - string(REGEX REPLACE "^[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1" patch - "${python_full_version}") - set(PYTHON_VERSION ${python_full_version} CACHE INTERNAL "" FORCE) - set(PYTHON_VERSION_MAJOR ${major} CACHE INTERNAL "" FORCE) - set(PYTHON_VERSION_MINOR ${minor} CACHE INTERNAL "" FORCE) - set(PYTHON_VERSION_PATCH ${patch} CACHE INTERNAL "" FORCE) - message(STATUS "Python binary is " ${python_full_path} " (version " ${python_full_version} ")") - + endif() + execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import sys; print('%d.%d.%d' % sys.version_info[:3])" + WORKING_DIRECTORY ${PROJECT_BINARY_DIR} + OUTPUT_VARIABLE python_full_version + OUTPUT_STRIP_TRAILING_WHITESPACE) + string(REGEX REPLACE "^([0-9])+\\.[0-9]+.*" "\\1" major + "${python_full_version}") + string(REGEX REPLACE "^[0-9]+\\.([0-9]+).*" "\\1" minor + "${python_full_version}") + string(REGEX REPLACE "^[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1" patch + "${python_full_version}") + set(PYTHON_VERSION ${python_full_version} CACHE INTERNAL "" FORCE) + set(PYTHON_VERSION_MAJOR ${major} CACHE INTERNAL "" FORCE) + set(PYTHON_VERSION_MINOR ${minor} CACHE INTERNAL "" FORCE) + set(PYTHON_VERSION_PATCH ${patch} CACHE INTERNAL "" FORCE) + message(STATUS "Python binary is " ${PYTHON_EXECUTABLE} " (version " ${python_full_version} ")") + if(NOT DEFINED PYTHON_INCLUDE_DIRS) find_package(PythonLibs ${python_full_version} EXACT REQUIRED) # Make sure PYTHON_INCLUDE_DIRS is in the cache so it can be # used elsewhere diff --git a/doc/manual/cmake_config.md b/doc/manual/cmake_config.md index 5ca732e294..f68f9692fc 100644 --- a/doc/manual/cmake_config.md +++ b/doc/manual/cmake_config.md @@ -51,6 +51,7 @@ Various aspects of %IMP build behavior can be controlled via variables. These ca - `IMP_MAX_CHECKS`: One of `NONE`, `USAGE`, `INTERNAL` to control what check levels will be supported. The default is `USAGE` for release builds and `INTERNAL` for debug builds (setting this to `INTERNAL` will impact performance; `NONE` is not recommended as all sanity checks will be skipped). - `IMP_MAX_LOG`: One of `SILENT`, `PROGRESS`, `TERSE`, `VERBOSE` to control what log levels are supported. - `IMP_PER_CPP_COMPILATION`: A colon-separated list of modules to build one .cpp at a time, or `ALL` to do this for all modules. +- `IMP_CUDA`: A colon-separated list of modules to build with CUDA (GPU) support, or `ALL` to do this for all modules. This is experimental and is currently in development. See [here](@ref gpu) for more details. - `USE_PYTHON2`: Set to `on` to have CMake build %IMP with Python 2 (by default it will use Python 3 if available). - `IMP_USE_SYSTEM_RMF`: Set to `on` to build %IMP using an external (system) copy of the RMF library, instead of that bundled with IMP itself. - `IMP_USE_SYSTEM_IHM`: Set to `on` to build %IMP using an external (system) copy of the python-ihm library, instead of that bundled with IMP itself. diff --git a/doc/manual/gpu.md b/doc/manual/gpu.md new file mode 100644 index 0000000000..2b4c1da638 --- /dev/null +++ b/doc/manual/gpu.md @@ -0,0 +1,13 @@ +GPU support {#gpu} +=========== + +%IMP currently has only very rudimentary support for running on a graphics +processing unit (GPU), although this is currently in development. + +%IMP currently only supports NVIDIA GPUs using the CUDA toolkit (although this +is likely to switch to use [SYCL](https://sycl.tech/) as the ecosystem matures). +To build %IMP from source code with CUDA support (there are currently no +prebuilt %IMP binaries that use CUDA), ensure that the `nvcc` compiler +from NVIDIA's [CUDA toolkit](https://developer.nvidia.com/cuda-downloads) +is available, and add `-DIMP_CUDA` to your +[CMake invocation](@ref cmake_config). diff --git a/doc/manual/installation.md b/doc/manual/installation.md index c4419757aa..0b436fd05b 100644 --- a/doc/manual/installation.md +++ b/doc/manual/installation.md @@ -21,9 +21,7 @@ to see if the code is currently stable enough for your purposes. To experiment with IMP on [Google Colaboratory](https://colab.research.google.com), use the following code snippet: \code{.unparsed} -!echo "deb https://integrativemodeling.org/latest/download $(lsb_release -cs)/" > /etc/apt/sources.list.d/salilab.list -!wget -O /etc/apt/trusted.gpg.d/salilab.asc https://salilab.org/~ben/pubkey256.asc -!apt update +!add-apt-repository -y ppa:salilab/ppa !apt install imp import sys, os, glob sys.path.append(os.path.dirname(glob.glob('/usr/lib/python*/dist-packages/IMP')[0])) diff --git a/doc/manual/mainpage.dox b/doc/manual/mainpage.dox index 99d6dee999..e99440a356 100644 --- a/doc/manual/mainpage.dox +++ b/doc/manual/mainpage.dox @@ -59,6 +59,8 @@ complex might want to skip ahead to - [Bug reports](@ref bugs) + - [GPU support](@ref gpu) + - [Applying IMP to a new biological system](@ref biosystem) - [Developing the IMP source code](@ref developing) diff --git a/doc/manual/write_examples.md b/doc/manual/write_examples.md index a4ae660969..1cce7b6a1d 100644 --- a/doc/manual/write_examples.md +++ b/doc/manual/write_examples.md @@ -17,7 +17,8 @@ for it to finish.) The build system passes each example the `--run_quick_test` argument, so this can be used to run a faster subset (e.g. fewer iterations of an optimization). If you are trying to demonstrate the application of your method to a real biological system, you should write a -[biological system](https://integrativemodeling.org/systems/) instead of an +[biological system](https://integrativemodeling.org/systems/) or perhaps a +[tutorial](https://integrativemodeling.org/tutorials/) instead of an example. The example should have enough comments that the reasoning behind each line of code is clear to someone who roughly understands how %IMP in general works. diff --git a/modules/algebra/include/VectorBaseD.h b/modules/algebra/include/VectorBaseD.h index b18608cbe5..42901a71c4 100644 --- a/modules/algebra/include/VectorBaseD.h +++ b/modules/algebra/include/VectorBaseD.h @@ -24,19 +24,22 @@ #include #include #include -#include #if IMP_HAS_CHECKS >= IMP_INTERNAL #define IMP_ALGEBRA_VECTOR_CHECK check_vector() +#define IMP_ALGEBRA_VECTOR_CHECK_OTHER(o) o.check_vector() #else #define IMP_ALGEBRA_VECTOR_CHECK +#define IMP_ALGEBRA_VECTOR_CHECK_OTHER(o) #endif +/* Should only need to check for "compatible" (same dimension) vectors + when using variable-dimension vectors; otherwise, it is checked at + compile time */ #if IMP_HAS_CHECKS >= IMP_USAGE #define IMP_ALGEBRA_VECTOR_CHECK_INDEX(i) check_index(i) #define IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o) \ - check_compatible_vector(o); \ - o.check_vector() + if (D == -1) { check_compatible_vector(o); } #else #define IMP_ALGEBRA_VECTOR_CHECK_INDEX(i) #define IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o) @@ -61,8 +64,8 @@ class VectorBaseD : public GeometricPrimitiveD { } void check_vector() const { - IMP_USAGE_CHECK(!data_.get_is_null(), - "Attempt to use uninitialized vector."); + IMP_INTERNAL_CHECK(!data_.get_is_null(), + "Attempt to use uninitialized vector."); } template void check_compatible_vector(const VectorBaseD &o) const { @@ -129,9 +132,10 @@ class VectorBaseD : public GeometricPrimitiveD { //! Default constructor VectorBaseD() {} - double get_scalar_product(const VectorBaseD &o) const { - IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o); + double get_scalar_product(const VectorBaseD &o) const { IMP_ALGEBRA_VECTOR_CHECK; + IMP_ALGEBRA_VECTOR_CHECK_OTHER(o); + IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o); double ret = 0; for (unsigned int i = 0; i < get_dimension(); ++i) { ret += operator[](i) * o.operator[](i); @@ -153,14 +157,30 @@ class VectorBaseD : public GeometricPrimitiveD { double get_magnitude() const { return std::sqrt(get_squared_magnitude()); } + //! Return the distance between this and another vector + /** This is essentially identical to (v1 - v2).get_magnitude() but + may be slightly more efficient as it avoids creating a temporary + vector object. */ + double get_distance(const VectorBaseD &o) const { + IMP_ALGEBRA_VECTOR_CHECK_OTHER(o); + IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o); + const double *data = get_data(), *odata = o.get_data(); + double ret = 0; + for (unsigned int i = 0; i < get_dimension(); ++i) { + ret += (odata[i] - data[i]) * (odata[i] - data[i]); + } + return std::sqrt(ret); + } + #ifndef IMP_DOXYGEN double operator*(const VectorBaseD &o) const { return get_scalar_product(o); } VectorBaseD &operator+=(const VectorBaseD &o) { - IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o); IMP_ALGEBRA_VECTOR_CHECK; + IMP_ALGEBRA_VECTOR_CHECK_OTHER(o); + IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o); for (unsigned int i = 0; i < get_dimension(); ++i) { operator[](i) += o[i]; } @@ -168,8 +188,9 @@ class VectorBaseD : public GeometricPrimitiveD { } VectorBaseD &operator-=(const VectorBaseD &o) { - IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o); IMP_ALGEBRA_VECTOR_CHECK; + IMP_ALGEBRA_VECTOR_CHECK_OTHER(o); + IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o); for (unsigned int i = 0; i < get_dimension(); ++i) { operator[](i) -= o[i]; } diff --git a/modules/algebra/include/VectorD.h b/modules/algebra/include/VectorD.h index 4952f2c28d..e4712e47ca 100644 --- a/modules/algebra/include/VectorD.h +++ b/modules/algebra/include/VectorD.h @@ -25,23 +25,6 @@ #include #include #include -#include - -#if IMP_HAS_CHECKS >= IMP_INTERNAL -#define IMP_ALGEBRA_VECTOR_CHECK check_vector() -#else -#define IMP_ALGEBRA_VECTOR_CHECK -#endif - -#if IMP_HAS_CHECKS >= IMP_USAGE -#define IMP_ALGEBRA_VECTOR_CHECK_INDEX(i) check_index(i) -#define IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o) \ - check_compatible_vector(o); \ - o.check_vector() -#else -#define IMP_ALGEBRA_VECTOR_CHECK_INDEX(i) -#define IMP_ALGEBRA_VECTOR_CHECK_COMPATIBLE(o) -#endif IMPALGEBRA_BEGIN_NAMESPACE //! A Cartesian vector in D-dimensions. diff --git a/modules/algebra/include/algebra_macros.h b/modules/algebra/include/algebra_macros.h index d284e394cc..eeb6d3f766 100644 --- a/modules/algebra/include/algebra_macros.h +++ b/modules/algebra/include/algebra_macros.h @@ -2,7 +2,7 @@ * \file IMP/algebra/algebra_macros.h * \brief Various helper macros. * - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-2024 IMP Inventors. All rights reserved. * */ @@ -34,7 +34,8 @@ template \ VectorD(const VectorD &o) \ : P(o) { \ - BOOST_STATIC_ASSERT(OD == D || OD == -1 || D == -1); \ + static_assert(OD == D || OD == -1 || D == -1, \ + "VectorD size mismatch"); \ } \ IMP_ALGEBRA_VECTOR_SWIG_METHODS(D); \ /** The distance between b and e must be equal to D. \ diff --git a/modules/algebra/include/internal/internal_vector_generators.h b/modules/algebra/include/internal/internal_vector_generators.h index fea00dafea..f4e302384b 100644 --- a/modules/algebra/include/internal/internal_vector_generators.h +++ b/modules/algebra/include/internal/internal_vector_generators.h @@ -1,7 +1,7 @@ /** * \file cgal_predicates.h * \brief predicates implemented using CGAL - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-2024 IMP Inventors. All rights reserved. */ #ifndef IMPALGEBRA_INTERNAL_VECTOR_GENERATORS_H @@ -18,8 +18,8 @@ #include #endif #include -#include -#include +#include +#include #include #include @@ -28,7 +28,8 @@ template inline VectorD get_random_vector_in(const BoundingBoxD &bb) { VectorD ret = bb.get_corner(0); // some appropriate vector for (unsigned int i = 0; i < bb.get_dimension(); ++i) { - ::boost::uniform_real<> rand(bb.get_corner(0)[i], bb.get_corner(1)[i]); + ::boost::random::uniform_real_distribution<> rand(bb.get_corner(0)[i], + bb.get_corner(1)[i]); ret[i] = rand(random_number_generator); } return ret; @@ -61,7 +62,7 @@ inline VectorD<-1> get_random_vector_on(const SphereD<-1> &s) { } inline VectorD<2> get_random_vector_on(const SphereD<2> &s) { - ::boost::uniform_real<> rand(0, 2 * PI); + ::boost::random::uniform_real_distribution<> rand(0, 2 * PI); double angle = rand(random_number_generator); VectorD<2> ret(s.get_radius() * sin(angle), s.get_radius() * cos(angle)); return ret + s.get_center(); @@ -70,7 +71,7 @@ inline VectorD<2> get_random_vector_on(const SphereD<2> &s) { //! returns a random vector on a sphere of radius 1 //! with implementation optimized for the 3D unit vector case inline VectorD<3> get_random_vector_on_unit_sphere() { - // ::boost::uniform_real<> rand(-1, 1); + // ::boost::random::uniform_real_distribution<> rand(-1, 1); do { // double x1 = rand(random_number_generator); // double x2 = rand(random_number_generator); @@ -117,12 +118,12 @@ inline VectorD<3> get_random_vector_on(const SphereD<3> &s) { Floats up(s.get_center().get_dimension()); for (unsigned int i=s.get_dimension()-1; i>0; --i) { double r= std::sqrt(cur_radius2); - ::boost::uniform_real<> rand(-r, r); + ::boost::random::uniform_real_distribution<> rand(-r, r); up[i]= rand(random_number_generator); // radius of circle cur_radius2= cur_radius2-get_squared(up[i]); } - ::boost::uniform_int<> rand(0, 1); + ::boost::random::uniform_int_distribution<> rand(0, 1); double x= std::sqrt(cur_radius2); if (rand(random_number_generator)) { x=-x; @@ -153,7 +154,7 @@ inline VectorD<3> get_random_vector_on(const SphereD<3> &s) { template inline Vector > native_uniform_cover_unit_sphere( unsigned int d, unsigned int n, bool ALL) { - BOOST_STATIC_ASSERT(D != 3); + static_assert(D != 3, "VectorD wrong size"); Vector > ret(n); for (unsigned int i = 0; i < std::min(d, n / (ALL ? 2 : 1)); ++i) { VectorD v = get_basis_vector_kd(d, i); @@ -293,7 +294,8 @@ struct RandomVectorOnBB { /*for (unsigned int i=0; i< D*2; ++i) { std::cout << areas[i] << " "; }*/ - ::boost::uniform_real<> rand(0, areas[2 * bb.get_dimension() - 1]); + ::boost::random::uniform_real_distribution<> rand( + 0, areas[2 * bb.get_dimension() - 1]); double a = rand(random_number_generator); // std::cout << ": " << a << std::endl; unsigned int side; @@ -335,7 +337,7 @@ struct RandomVectorOnBB { template <> struct RandomVectorOnBB<1> { static VectorD<1> get(BoundingBoxD<1> bb) { - ::boost::uniform_int<> rand(0, 1); + ::boost::random::uniform_int_distribution<> rand(0, 1); return bb.get_corner(rand(random_number_generator)); } }; diff --git a/modules/algebra/pyext/include/IMP_algebra.types.i b/modules/algebra/pyext/include/IMP_algebra.types.i index d8fe27c840..7998fc517f 100644 --- a/modules/algebra/pyext/include/IMP_algebra.types.i +++ b/modules/algebra/pyext/include/IMP_algebra.types.i @@ -366,8 +366,9 @@ struct ConvertEigenMatrix { #if IMP_KERNEL_HAS_NUMPY // We are called for both float and double. Map to equivalent NumPy // types by checking the size of the type (a little ugly) - BOOST_STATIC_ASSERT(sizeof(typename M::Scalar) == sizeof(double) || - sizeof(typename M::Scalar) == sizeof(float)); + static_assert(sizeof(typename M::Scalar) == sizeof(double) || + sizeof(typename M::Scalar) == sizeof(float), + "NumPy type size mismatch"); if (numpy_import_retval == 0) { npy_intp dims[2]; dims[0] = t.rows(); @@ -442,8 +443,9 @@ struct ConvertEigenVector { #if IMP_KERNEL_HAS_NUMPY // We are called for both float and double. Map to equivalent NumPy // types by checking the size of the type (a little ugly) - BOOST_STATIC_ASSERT(sizeof(typename M::Scalar) == sizeof(double) || - sizeof(typename M::Scalar) == sizeof(float)); + static_assert(sizeof(typename M::Scalar) == sizeof(double) || + sizeof(typename M::Scalar) == sizeof(float), + "NumPy type size mismatch"); if (numpy_import_retval == 0) { npy_intp dims[2]; dims[0] = t.rows(); diff --git a/modules/algebra/src/Transformation3D.cpp b/modules/algebra/src/Transformation3D.cpp index 0bcdb47e83..85c9a79860 100644 --- a/modules/algebra/src/Transformation3D.cpp +++ b/modules/algebra/src/Transformation3D.cpp @@ -74,7 +74,8 @@ Transformation3D get_random_local_transformation(Vector3D origin, algebra::Sphere3D(algebra::Vector3D(0, 0, 0), max_translation)); algebra::Vector3D axis = algebra::get_random_vector_on(algebra::Sphere3D(origin, 1.)); - ::boost::uniform_real<> rand(-max_angle_in_rad, max_angle_in_rad); + ::boost::random::uniform_real_distribution<> rand(-max_angle_in_rad, + max_angle_in_rad); Float angle = rand(random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis, angle); return algebra::Transformation3D(r, translation); diff --git a/modules/algebra/src/vector_generators.cpp b/modules/algebra/src/vector_generators.cpp index c55034cc45..fa6e4dde35 100644 --- a/modules/algebra/src/vector_generators.cpp +++ b/modules/algebra/src/vector_generators.cpp @@ -21,7 +21,7 @@ IMPALGEBRA_BEGIN_NAMESPACE \relatesalso SphereD */ VectorD<2> get_random_vector_in(const SphereD<2> &s) { - ::boost::uniform_real<> rand(0, 1); + ::boost::random::uniform_real_distribution<> rand(0, 1); double angle = 2 * PI * rand(random_number_generator); // sample the radius uniformly with respect to the circle area PI * R^2 double R2 = std::pow(s.get_radius(), 2); @@ -35,7 +35,7 @@ VectorD<2> get_random_vector_in(const SphereD<2> &s) { \relatesalso Cylinder3D */ Vector3D get_random_vector_in(const Cylinder3D &c) { - ::boost::uniform_real<> rand(0, 1); + ::boost::random::uniform_real_distribution<> rand(0, 1); // relative height and radius are between 0 (0%) and 1 (100%): double relative_h = rand(random_number_generator); // sqrt[rand(0,1)] is uniform with respect to the circle area PI*r^2 diff --git a/modules/algebra/src/vector_search.cpp b/modules/algebra/src/vector_search.cpp index b9e042fd59..1940b3477d 100644 --- a/modules/algebra/src/vector_search.cpp +++ b/modules/algebra/src/vector_search.cpp @@ -77,8 +77,7 @@ void DynamicNearestNeighbor3D::set_coordinates(int id, Vector3D nc) { Ints &it = grid_[ind]; IMP_INTERNAL_CHECK(std::find(it.begin(), it.end(), id) != it.end(), "Item not found in list"); - std::remove(it.begin(), it.end(), id); - it.pop_back(); + it.erase(std::remove(it.begin(), it.end(), id), it.end()); IMP_INTERNAL_CHECK(std::find(it.begin(), it.end(), id) == it.end(), "Item found in list"); set_coordinates_internal(id, nc); diff --git a/modules/algebra/test/expensive_test_vectord_mismatch.py b/modules/algebra/test/expensive_test_vectord_mismatch.py new file mode 100644 index 0000000000..a55939cf60 --- /dev/null +++ b/modules/algebra/test/expensive_test_vectord_mismatch.py @@ -0,0 +1,18 @@ +import IMP.test + + +class Tests(IMP.test.TestCase): + + def test_vectord_mismatch(self): + """Test combination of VectorD of different dimensions""" + # Should be a compile-time error to combine VectorD of different sizes + self.assertCompileFails( + headers=['IMP/algebra/VectorD.h'], + body=""" +IMP::algebra::Vector3D v1(1,2,3); +IMP::algebra::Vector4D v3(7,8,9,10); +v1 -= v3;""") + + +if __name__ == '__main__': + IMP.test.main() diff --git a/modules/algebra/test/test_vector3d.py b/modules/algebra/test/test_vector3d.py index 85eba60279..c58ab0c152 100644 --- a/modules/algebra/test/test_vector3d.py +++ b/modules/algebra/test/test_vector3d.py @@ -5,6 +5,7 @@ import math import sys import pickle +import operator class Tests(IMP.test.TestCase): @@ -20,7 +21,7 @@ def test_magnitude(self): def test_uninit(self): """Check use of uninitialized Vector3D""" v = IMP.algebra.Vector3D() - self.assertRaises(IMP.UsageException, v.__getitem__, 0) + self.assertRaises(IMP.InternalException, v.__getitem__, 0) def test_from_floats(self): """Check Vector3D from floats""" @@ -93,6 +94,35 @@ def test_len(self): v1 = IMP.algebra.Vector3D(1.0, 2.0, 3.0) self.assertEqual(len(v1), 3) + def test_dimension_mismatch(self): + """Check failure to combine with other dimension vectors""" + v3 = IMP.algebra.Vector3D(1.0, 2.0, 3.0) + v4 = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 4.0) + k3 = IMP.algebra.VectorKD(v3) + k4 = IMP.algebra.VectorKD(v4) + # Should not be able to add 3D vector to 4D (or KD with K==4) vector + self.assertRaises(TypeError, operator.add, v3, v4) + self.assertRaises(TypeError, operator.add, v3, k4) + self.assertRaises(TypeError, operator.add, v4, v3) + self.assertRaises(TypeError, operator.add, v4, k3) + # Should not be able to subtract 3D vector from + # 4D (or KD with K==4) vector + self.assertRaises(TypeError, operator.sub, v3, v4) + self.assertRaises(TypeError, operator.sub, v3, k4) + self.assertRaises(TypeError, operator.sub, v4, v3) + self.assertRaises(TypeError, operator.sub, v4, k3) + # Should not be able to get scalar product 3D vector with 4D + self.assertRaises(TypeError, v3.get_scalar_product, v4) + self.assertRaises(TypeError, v3.get_scalar_product, k4) + self.assertRaises(TypeError, v4.get_scalar_product, v3) + self.assertRaises(TypeError, v4.get_scalar_product, k3) + # 3D vector with KD (with K==3) is OK, but scalar product is + # not currently supported + _ = v3 + k3 + _ = v3 - k3 + self.assertRaises(TypeError, v3.get_scalar_product, k3) + self.assertRaises(TypeError, k3.get_scalar_product, v3) + def test_scalar_product(self): """Check Vector3D scalar product""" v1 = IMP.algebra.Vector3D(1.0, 2.0, 3.0) @@ -140,6 +170,19 @@ def test_difference(self): self.assertAlmostEqual((v1 - expected_diff).get_magnitude(), 0, delta=.1) + def test_get_distance(self): + """Check Vector3D.get_distance()""" + v1 = IMP.algebra.Vector3D(1.0, 2.0, 3.0) + v2 = IMP.algebra.Vector3D(10.0, 1.0, 2.0) + dist = v1.get_distance(v2) + self.assertAlmostEqual(dist, 9.11, delta=0.01) + dist2 = (v1 - v2).get_magnitude() + self.assertAlmostEqual(dist, dist2, delta=0.01) + # Should not be able to get distance between different + # dimension vectors + v4 = IMP.algebra.Vector4D(10.0, 1.0, 2.0, 0.0) + self.assertRaises(TypeError, v1.get_distance, v4) + def test_show(self): """Check vector 3D show""" v = IMP.algebra.Vector3D(1, 2, 3) diff --git a/modules/atom/include/CenterOfMass.h b/modules/atom/include/CenterOfMass.h index 81e5d3378b..a5278acde7 100644 --- a/modules/atom/include/CenterOfMass.h +++ b/modules/atom/include/CenterOfMass.h @@ -102,7 +102,7 @@ class IMPATOMEXPORT CenterOfMass : public IMP::Decorator { - IMP_NO_DOXYGEN(typedef boost::false_type DecoratorHasTraits); + IMP_NO_DOXYGEN(typedef std::false_type DecoratorHasTraits); }; diff --git a/modules/atom/include/Hierarchy.h b/modules/atom/include/Hierarchy.h index e1ecba6b9a..658e97fd6d 100644 --- a/modules/atom/include/Hierarchy.h +++ b/modules/atom/include/Hierarchy.h @@ -194,7 +194,7 @@ class IMPATOMEXPORT Hierarchy : public core::Hierarchy { public: #ifndef IMP_DOXYGEN - typedef boost::false_type DecoratorHasTraits; + typedef std::false_type DecoratorHasTraits; //! Setup the particle as a hierarchy; add the passed particles as children. static Hierarchy setup_particle(Particle *p, diff --git a/modules/atom/include/internal/soap_bond_separation.h b/modules/atom/include/internal/soap_bond_separation.h index 671fcc2d83..c528637e0d 100644 --- a/modules/atom/include/internal/soap_bond_separation.h +++ b/modules/atom/include/internal/soap_bond_separation.h @@ -366,8 +366,8 @@ class SoapBondSeparation { // Determine the number of bonds between the two atoms (or -1 if unconnected) int get_bond_separation(Model *m, const ParticleIndexPair &p) const { - Atom a1(m, p[0]); - Atom a2(m, p[1]); + Atom a1(m, std::get<0>(p)); + Atom a2(m, std::get<1>(p)); Residue r1 = get_residue(a1); Residue r2 = get_residue(a2); if (a1 == a2) { diff --git a/modules/atom/include/internal/soap_chain_separation.h b/modules/atom/include/internal/soap_chain_separation.h index 8d6391a14a..fdb71ce0c1 100644 --- a/modules/atom/include/internal/soap_chain_separation.h +++ b/modules/atom/include/internal/soap_chain_separation.h @@ -37,8 +37,8 @@ class SoapChainSeparation { if (!enabled()) { return true; } - Atom a1(m, p[0]); - Atom a2(m, p[1]); + Atom a1(m, std::get<0>(p)); + Atom a2(m, std::get<1>(p)); Residue r1 = get_residue(a1); Residue r2 = get_residue(a2); Hierarchy p1 = r1.get_parent(); diff --git a/modules/atom/include/pdb.h b/modules/atom/include/pdb.h index 88a46873fc..193a9826af 100644 --- a/modules/atom/include/pdb.h +++ b/modules/atom/include/pdb.h @@ -275,23 +275,6 @@ class ChainPDBSelector : public NonAlternativePDBSelector { std::sort(chains_.begin(), chains_.end()); } -#ifndef IMP_DOXYGEN - //! The chain id can be any character in chains - /** \note This limits the selection to single-character chain IDs - (mmCIF files support multiple-character chain names) */ - IMPATOM_DEPRECATED_METHOD_DECL(2.20) - ChainPDBSelector(const std::string &chains, - std::string name = "ChainPDBSelector%1%") - : NonAlternativePDBSelector(name) { - IMPATOM_DEPRECATED_METHOD_DEF( - 2.20, "Pass a list of chain ID strings instead"); - for (size_t i = 0; i < chains.length(); ++i) { - chains_.push_back(std::string(1, chains[i])); - } - std::sort(chains_.begin(), chains_.end()); - } -#endif - private: Strings chains_; }; diff --git a/modules/atom/src/BondedPairFilter.cpp b/modules/atom/src/BondedPairFilter.cpp index 7a62b820b8..effc437e96 100644 --- a/modules/atom/src/BondedPairFilter.cpp +++ b/modules/atom/src/BondedPairFilter.cpp @@ -14,11 +14,12 @@ BondedPairFilter::BondedPairFilter() : PairPredicate("BondedPairFilter%1%") {} int BondedPairFilter::get_value_index( Model *m, const ParticleIndexPair &pip) const { - if (!Bonded::get_is_setup(m, pip[0]) || !Bonded::get_is_setup(m, pip[1])) { + if (!Bonded::get_is_setup(m, std::get<0>(pip)) + || !Bonded::get_is_setup(m, std::get<1>(pip))) { return false; } - Bonded ba(m, pip[0]); - Bonded bb(m, pip[1]); + Bonded ba(m, std::get<0>(pip)); + Bonded bb(m, std::get<1>(pip)); Bond bd = get_bond(ba, bb); return bd != Bond(); } diff --git a/modules/atom/src/CoulombPairScore.cpp b/modules/atom/src/CoulombPairScore.cpp index 18ff703986..3b6f045b3b 100644 --- a/modules/atom/src/CoulombPairScore.cpp +++ b/modules/atom/src/CoulombPairScore.cpp @@ -28,8 +28,8 @@ void CoulombPairScore::calculate_multiplication_factor() { double CoulombPairScore::evaluate_index(Model *m, const ParticleIndexPair &p, DerivativeAccumulator *da) const { - Charged c0(m, p[0]); - Charged c1(m, p[1]); + Charged c0(m, std::get<0>(p)); + Charged c1(m, std::get<1>(p)); algebra::Vector3D delta = c0.get_coordinates() - c1.get_coordinates(); double dist = delta.get_magnitude(); double score = diff --git a/modules/atom/src/HelixRestraint.cpp b/modules/atom/src/HelixRestraint.cpp index bf625282cd..d9ce7e50b6 100644 --- a/modules/atom/src/HelixRestraint.cpp +++ b/modules/atom/src/HelixRestraint.cpp @@ -86,8 +86,8 @@ ModelObjectsTemp HelixRestraint::do_get_inputs() const { } for (ParticleIndexPairs::const_iterator tb = bonds_ON_.begin(); tb != bonds_ON_.end(); ++tb) { - ps.push_back(m->get_particle((*tb)[0])); - ps.push_back(m->get_particle((*tb)[1])); + ps.push_back(m->get_particle(std::get<0>(*tb))); + ps.push_back(m->get_particle(std::get<1>(*tb))); } return ps; diff --git a/modules/atom/src/Hierarchy.cpp b/modules/atom/src/Hierarchy.cpp index 16d6c85882..5622721edc 100644 --- a/modules/atom/src/Hierarchy.cpp +++ b/modules/atom/src/Hierarchy.cpp @@ -27,8 +27,6 @@ #include -#include - #include IMPATOM_BEGIN_NAMESPACE diff --git a/modules/atom/src/LennardJonesPairScore.cpp b/modules/atom/src/LennardJonesPairScore.cpp index d0f6568926..8c3ba1643e 100644 --- a/modules/atom/src/LennardJonesPairScore.cpp +++ b/modules/atom/src/LennardJonesPairScore.cpp @@ -14,8 +14,8 @@ IMPATOM_BEGIN_NAMESPACE Float LennardJonesPairScore::evaluate_index(Model *m, const ParticleIndexPair &p, DerivativeAccumulator *da) const { - LennardJones lj0(m, p[0]); - LennardJones lj1(m, p[1]); + LennardJones lj0(m, std::get<0>(p)); + LennardJones lj1(m, std::get<1>(p)); algebra::Vector3D delta = lj0.get_coordinates() - lj1.get_coordinates(); double distsqr = delta.get_squared_magnitude(); diff --git a/modules/atom/src/SameResiduePairFilter.cpp b/modules/atom/src/SameResiduePairFilter.cpp index 9af96cfc3f..7aedf89763 100644 --- a/modules/atom/src/SameResiduePairFilter.cpp +++ b/modules/atom/src/SameResiduePairFilter.cpp @@ -16,7 +16,8 @@ SameResiduePairFilter::SameResiduePairFilter() {} int SameResiduePairFilter::get_value_index( Model *m, const ParticleIndexPair &p) const { - return Hierarchy(m, p[0]).get_parent() == Hierarchy(m, p[1]).get_parent(); + return Hierarchy(m, std::get<0>(p)).get_parent() + == Hierarchy(m, std::get<1>(p)).get_parent(); } ModelObjectsTemp SameResiduePairFilter::do_get_inputs( diff --git a/modules/atom/src/StereochemistryPairFilter.cpp b/modules/atom/src/StereochemistryPairFilter.cpp index a948377223..6821bcbd14 100644 --- a/modules/atom/src/StereochemistryPairFilter.cpp +++ b/modules/atom/src/StereochemistryPairFilter.cpp @@ -17,8 +17,9 @@ StereochemistryPairFilter::StereochemistryPairFilter() {} int StereochemistryPairFilter::get_value_index( Model *m, const ParticleIndexPair &pp) const { - return excluded_map_.find(internal::ExcludedPair(m->get_particle(pp[0]), - m->get_particle(pp[1]))) != + return excluded_map_.find( + internal::ExcludedPair(m->get_particle(std::get<0>(pp)), + m->get_particle(std::get<1>(pp)))) != excluded_map_.end(); } diff --git a/modules/atom/src/distance.cpp b/modules/atom/src/distance.cpp index 1d5f3a12fb..9ff4a445c7 100644 --- a/modules/atom/src/distance.cpp +++ b/modules/atom/src/distance.cpp @@ -62,7 +62,7 @@ double get_pairwise_rmsd_score(const core::XYZs& ref1, const core::XYZs& ref2, }*/ algebra::Transformation3D t = algebra::get_transformation_aligning_first_to_second(mdl1, ref1); - Float rmsd_score = get_rmsd_transforming_first(t, ref2, mdl2); + Float rmsd_score = get_rmsd_transforming_first(t, mdl2, ref2); return rmsd_score; } diff --git a/modules/atom/test/test_distance.py b/modules/atom/test/test_distance.py index 48f14bde29..fdb785c813 100644 --- a/modules/atom/test/test_distance.py +++ b/modules/atom/test/test_distance.py @@ -29,6 +29,41 @@ def test_placement_score(self): self.assertAlmostEqual(da[0], d, 2) self.assertAlmostEqual(da[1], a, 2) + def test_pairwise_rmsd_score(self): + """Test pairwise RMSD score""" + m = IMP.Model() + # Get reference orientation ref1, ref2 + mpref = atom.read_pdb(self.get_input_file_name("mini.pdb"), + m, atom.NonWaterPDBSelector()) + leaves = atom.get_leaves(mpref) + ref1 = core.XYZs(leaves[:30]) + ref2 = core.XYZs(leaves[30:]) + # Transform the complex + mpmdl = atom.read_pdb(self.get_input_file_name("mini.pdb"), + m, atom.NonWaterPDBSelector()) + leaves = atom.get_leaves(mpmdl) + mdl1 = core.XYZs(leaves[:30]) + mdl2 = core.XYZs(leaves[30:]) + t = IMP.algebra.Transformation3D( + IMP.algebra.get_random_rotation_3d(), + IMP.algebra.get_random_vector_in( + IMP.algebra.get_unit_bounding_box_3d())) + for d in mdl1 + mdl2: + core.transform(d, t) + # Both components were transformed together, so RMSD ~= 0 + self.assertAlmostEqual( + IMP.atom.get_pairwise_rmsd_score(ref1, ref2, mdl1, mdl2), + 0.0, delta=1e-6) + # Transform second component + t2 = IMP.algebra.Transformation3D( + IMP.algebra.get_identity_rotation_3d(), + IMP.algebra.Vector3D(5.0, 0.0, 0.0)) + for d in mdl2: + core.transform(d, t2) + self.assertAlmostEqual( + IMP.atom.get_pairwise_rmsd_score(ref1, ref2, mdl1, mdl2), + 5.0, delta=1e-6) + def test_drms(self): """ Test drms measure """ m = IMP.Model() diff --git a/modules/atom/test/test_mmcif.py b/modules/atom/test/test_mmcif.py index 17acfb64a2..db579b45e6 100644 --- a/modules/atom/test/test_mmcif.py +++ b/modules/atom/test/test_mmcif.py @@ -385,15 +385,6 @@ def test_chain_selector_multi_char(self): """Check reading single chain with multi-char ID from an mmCIF file""" m = IMP.Model() - # Try deprecated method, will select chains "Z" and "K" - with IMP.allow_deprecated(): - s = IMP.atom.ChainPDBSelector("ZK") - mp = IMP.atom.read_mmcif( - self.get_input_file_name('chaintest.cif'), m, s) - chains = [IMP.atom.Chain(x) - for x in IMP.atom.get_by_type(mp, IMP.atom.CHAIN_TYPE)] - self.assertEqual([c.get_id() for c in chains], ['K']) - mp = IMP.atom.read_mmcif(self.get_input_file_name('chaintest.cif'), m, IMP.atom.ChainPDBSelector(["Z", "K"])) chains = [IMP.atom.Chain(x) diff --git a/modules/cgal/include/internal/knn.h b/modules/cgal/include/internal/knn.h index 4fe5520d34..eddd2cd174 100644 --- a/modules/cgal/include/internal/knn.h +++ b/modules/cgal/include/internal/knn.h @@ -11,7 +11,6 @@ #include #include #include -#include #include IMPCGAL_BEGIN_INTERNAL_NAMESPACE diff --git a/modules/cgal/src/internal/knn.cpp b/modules/cgal/src/internal/knn.cpp index 67a66b1912..de4f4cd576 100644 --- a/modules/cgal/src/internal/knn.cpp +++ b/modules/cgal/src/internal/knn.cpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include diff --git a/modules/cnmultifit/pyext/src/rmsd.py b/modules/cnmultifit/pyext/src/rmsd.py index 24c1d2f377..ac9c34b6cc 100755 --- a/modules/cnmultifit/pyext/src/rmsd.py +++ b/modules/cnmultifit/pyext/src/rmsd.py @@ -7,9 +7,7 @@ def parse_args(): - desc = """%prog [options] - - + desc = """ This program calculates the RMSD between modeled cyclic symmetric complexes and the reference structure. The RMSD and cross correlation of each complex is written into a file called rmsd.output. diff --git a/modules/container/benchmark/benchmark_close_pairs_finders.cpp b/modules/container/benchmark/benchmark_close_pairs_finders.cpp index 1945f6978f..c4c8628123 100644 --- a/modules/container/benchmark/benchmark_close_pairs_finders.cpp +++ b/modules/container/benchmark/benchmark_close_pairs_finders.cpp @@ -24,7 +24,7 @@ void test_one(std::string name, ClosePairsFinder *cpf, unsigned int n, IMP_NEW(Model, m, ()); ParticlesTemp ps = create_xyzr_particles(m, n, rmin); ParticleIndexes psi = IMP::internal::get_index(ps); - ::boost::uniform_real<> rand(rmin, rmax); + ::boost::random::uniform_real_distribution<> rand(rmin, rmax); for (unsigned int i = 0; i < ps.size(); ++i) { XYZ(ps[i]) .set_coordinates(get_random_vector_in(BoundingBox3D(minc, maxc))); @@ -45,7 +45,7 @@ void test_one(std::string name, ClosePairsFinder *cpf, unsigned int n, ParticlesTemp ps1 = create_xyzr_particles(m, n, rmin); ParticleIndexes ps0i = IMP::internal::get_index(ps0); ParticleIndexes ps1i = IMP::internal::get_index(ps1); - ::boost::uniform_real<> rand(rmin, rmax); + ::boost::random::uniform_real_distribution<> rand(rmin, rmax); for (unsigned int i = 0; i < ps0.size(); ++i) { XYZ(ps0[i]) .set_coordinates(get_random_vector_in(BoundingBox3D(minc, maxc))); diff --git a/modules/container/benchmark/benchmark_random_collisions.cpp b/modules/container/benchmark/benchmark_random_collisions.cpp index e263444109..90c8daeac5 100644 --- a/modules/container/benchmark/benchmark_random_collisions.cpp +++ b/modules/container/benchmark/benchmark_random_collisions.cpp @@ -58,7 +58,7 @@ void test_one(std::string name, ClosePairsFinder *cpf, unsigned int n, IMP_NEW(Model, m, ()); ParticlesTemp ps = create_xyzr_particles(m, n, rmin); ParticleIndexes pis = IMP::internal::get_index(ps); - ::boost::uniform_real<> rand(rmin, rmax); + ::boost::random::uniform_real_distribution<> rand(rmin, rmax); for (unsigned int i = 0; i < ps.size(); ++i) { XYZR(ps[i]).set_radius(rand(random_number_generator)); } diff --git a/modules/container/include/ConsecutivePairContainer.h b/modules/container/include/ConsecutivePairContainer.h index b3abae4ecd..76fd77a7f6 100644 --- a/modules/container/include/ConsecutivePairContainer.h +++ b/modules/container/include/ConsecutivePairContainer.h @@ -37,10 +37,10 @@ class IMPCONTAINEREXPORT ConsecutivePairContainer : public PairContainer { void init(); bool get_contains(const ParticleIndexPair &p) const { - if (!get_model()->get_has_attribute(key_, p[0])) return false; - int ia = get_model()->get_attribute(key_, p[0]); - if (!get_model()->get_has_attribute(key_, p[1])) return false; - int ib = get_model()->get_attribute(key_, p[1]); + if (!get_model()->get_has_attribute(key_, std::get<0>(p))) return false; + int ia = get_model()->get_attribute(key_, std::get<0>(p)); + if (!get_model()->get_has_attribute(key_, std::get<1>(p))) return false; + int ib = get_model()->get_attribute(key_, std::get<1>(p)); return std::abs(ia - ib) == 1; } @@ -117,16 +117,17 @@ class IMPCONTAINEREXPORT ExclusiveConsecutivePairContainer const ParticleIndexPair &pp) { ObjectKey ok = ExclusiveConsecutivePairContainer::get_exclusive_object_key(); - bool has_eok_0 = m->get_has_attribute(ok, pp[0]); - bool has_eok_1= m->get_has_attribute(ok, pp[1]); + bool has_eok_0 = m->get_has_attribute(ok, std::get<0>(pp)); + bool has_eok_1= m->get_has_attribute(ok, std::get<1>(pp)); if ( !has_eok_0 || !has_eok_1 ) return false; - if (m->get_attribute(ok, pp[0]) != m->get_attribute(ok, pp[1])) { + if (m->get_attribute(ok, std::get<0>(pp)) + != m->get_attribute(ok, std::get<1>(pp))) { return false; } IntKey k = ExclusiveConsecutivePairContainer::get_exclusive_key(); - int ia = m->get_attribute(k, pp[0]); - int ib = m->get_attribute(k, pp[1]); + int ia = m->get_attribute(k, std::get<0>(pp)); + int ib = m->get_attribute(k, std::get<1>(pp)); return std::abs(ia - ib) == 1; } void init(); diff --git a/modules/container/src/internal/ClosePairContainer.cpp b/modules/container/src/internal/ClosePairContainer.cpp index 34240034be..29fd73ef6d 100644 --- a/modules/container/src/internal/ClosePairContainer.cpp +++ b/modules/container/src/internal/ClosePairContainer.cpp @@ -140,15 +140,16 @@ void ClosePairContainer::check_list(bool check_slack) const { core::internal::filter_close_pairs(this, found); IMP_LOG_TERSE("In check found " << found << std::endl); for (unsigned int i = 0; i < found.size(); ++i) { - ParticleIndexPair pi(found[i][0], found[i][1]); - ParticleIndexPair pii(found[i][1], found[i][0]); + ParticleIndexPair pi(std::get<0>(found[i]), std::get<1>(found[i])); + ParticleIndexPair pii(std::get<1>(found[i]), std::get<0>(found[i])); IMP_INTERNAL_CHECK( existings.find(pi) != existings.end() || existings.find(pii) != existings.end(), "Pair " << pi << " not found in close pairs list" << " at distance " - << core::get_distance(core::XYZR(get_model(), found[i][0]), - core::XYZR(get_model(), found[i][1]))); + << core::get_distance( + core::XYZR(get_model(), std::get<0>(found[i])), + core::XYZR(get_model(), std::get<1>(found[i])))); } } } diff --git a/modules/container/test/test_non_rigid.cpp b/modules/container/test/test_non_rigid.cpp index f8bf7eadc4..d949cf5213 100644 --- a/modules/container/test/test_non_rigid.cpp +++ b/modules/container/test/test_non_rigid.cpp @@ -122,8 +122,8 @@ void check_close_pairs(IMP::Model *m, for (unsigned int j = 0; j < i; ++j) { IMP::ParticleIndexPair pp(ps[i], ps[j]); IMP::ParticleIndexPair ppi(ps[j], ps[i]); - IMP::core::XYZR d0(m, pp[0]); - IMP::core::XYZR d1(m, pp[1]); + IMP::core::XYZR d0(m, std::get<0>(pp)); + IMP::core::XYZR d1(m, std::get<1>(pp)); if (IMP::core::RigidMember::get_is_setup(m, ps[i]) && IMP::core::RigidMember::get_is_setup(m, ps[j]) && IMP::core::RigidMember(m, ps[i]).get_rigid_body() == diff --git a/modules/core/dependency/python-ihm.cmake b/modules/core/dependency/python-ihm.cmake index 047e5e9845..e92126d02e 100644 --- a/modules/core/dependency/python-ihm.cmake +++ b/modules/core/dependency/python-ihm.cmake @@ -18,39 +18,47 @@ execute_process(COMMAND ${CMAKE_COMMAND} -E rm -rf else(IMP_USE_SYSTEM_IHM) -execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory - ${CMAKE_BINARY_DIR}/lib/ihm - RESULT_VARIABLE setup) -if(NOT ${setup} EQUAL 0) - message(FATAL_ERROR "Failed making ${CMAKE_BINARY_DIR}/lib/ihm directory") -endif() - -FILE(GLOB ihmpys - "${CMAKE_SOURCE_DIR}/modules/core/dependency/python-ihm/ihm/*.py") -if (WIN32) - foreach(ihmpy ${ihmpys}) - get_filename_component(ihmpyname ${ihmpy} NAME) - execute_process(COMMAND ${CMAKE_COMMAND} -E copy - ${ihmpy} ${CMAKE_BINARY_DIR}/lib/ihm/${ihmpyname} - RESULT_VARIABLE setup) - if(NOT ${setup} EQUAL 0) - message(FATAL_ERROR "Failed copying ${ihmpy}") - endif() - endforeach() -else() - foreach(ihmpy ${ihmpys}) - get_filename_component(ihmpyname ${ihmpy} NAME) - execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink - ${ihmpy} ${CMAKE_BINARY_DIR}/lib/ihm/${ihmpyname} - RESULT_VARIABLE setup) - if(NOT ${setup} EQUAL 0) - message(FATAL_ERROR "Failed symlinking ${ihmpy}") - endif() - endforeach() -endif() +function(link_python_ihm_pys srcdir bindir) + set(srcdir ${ARGV0}) + set(bindir ${ARGV1}) + execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory + ${CMAKE_BINARY_DIR}/${bindir} + RESULT_VARIABLE setup) + if(NOT ${setup} EQUAL 0) + message(FATAL_ERROR "Failed making ${CMAKE_BINARY_DIR}/${bindir} directory") + endif() + + FILE(GLOB ihmpys + "${CMAKE_SOURCE_DIR}/modules/core/dependency/python-ihm/${srcdir}/*.py") + if (WIN32) + foreach(ihmpy ${ihmpys}) + get_filename_component(ihmpyname ${ihmpy} NAME) + execute_process(COMMAND ${CMAKE_COMMAND} -E copy + ${ihmpy} ${CMAKE_BINARY_DIR}/${bindir}/${ihmpyname} + RESULT_VARIABLE setup) + if(NOT ${setup} EQUAL 0) + message(FATAL_ERROR "Failed copying ${ihmpy}") + endif() + endforeach() + else() + foreach(ihmpy ${ihmpys}) + get_filename_component(ihmpyname ${ihmpy} NAME) + execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink + ${ihmpy} ${CMAKE_BINARY_DIR}/${bindir}/${ihmpyname} + RESULT_VARIABLE setup) + if(NOT ${setup} EQUAL 0) + message(FATAL_ERROR "Failed symlinking ${ihmpy}") + endif() + endforeach() + endif() +endfunction(link_python_ihm_pys) + +link_python_ihm_pys("ihm" "lib/ihm") +link_python_ihm_pys("ihm/util" "lib/ihm/util") # Install Python modules install_deref(${CMAKE_BINARY_DIR}/lib/ihm * ${CMAKE_INSTALL_PYTHONDIR}/ihm) +install_deref(${CMAKE_BINARY_DIR}/lib/ihm/util * ${CMAKE_INSTALL_PYTHONDIR}/ihm/util) # Build C extension diff --git a/modules/core/dependency/python-ihm/.github/workflows/codeql-analysis.yml b/modules/core/dependency/python-ihm/.github/workflows/codeql-analysis.yml index e245e2d3da..82140be134 100644 --- a/modules/core/dependency/python-ihm/.github/workflows/codeql-analysis.yml +++ b/modules/core/dependency/python-ihm/.github/workflows/codeql-analysis.yml @@ -39,11 +39,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v2 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v1 + uses: github/codeql-action/init@v2 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,4 +55,4 @@ jobs: python setup.py build_ext --inplace -t build - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v1 + uses: github/codeql-action/analyze@v2 diff --git a/modules/core/dependency/python-ihm/.github/workflows/linter.yml b/modules/core/dependency/python-ihm/.github/workflows/linter.yml index d9ba1c3fdd..aebfd1a8de 100644 --- a/modules/core/dependency/python-ihm/.github/workflows/linter.yml +++ b/modules/core/dependency/python-ihm/.github/workflows/linter.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Lint Code Base uses: docker://github/super-linter:v2.1.0 env: diff --git a/modules/core/dependency/python-ihm/ChangeLog.rst b/modules/core/dependency/python-ihm/ChangeLog.rst index a3d905934e..6c26532603 100644 --- a/modules/core/dependency/python-ihm/ChangeLog.rst +++ b/modules/core/dependency/python-ihm/ChangeLog.rst @@ -1,3 +1,33 @@ +1.1 - 2024-05-09 +================ + - :class:`ihm.System` now allows for one or more official database IDs to + be associated with the system using the new :class:`ihm.Database` class. + This maps to the mmCIF ``_database_2`` category (#135). + - :class:`ihm.location.FileLocation` now allows for an optional file format + to be specified (#139). + - The ``util/make-mmcif.py`` script is now included in the installed package, + so can be run if desired with ``python3 -m ihm.util.make_mmcif`` (#134). + - Bugfix: allow for file sizes in input mmCIF files to be floating point + values (#138). + - Bugfix: the 'Other' content type is now handled correctly when reading + information about external files from an mmCIF file (#139). + +1.0 - 2024-02-13 +================ + - Support for multi-state schemes (such as kinetic rates and relaxation + times for conversions between states) was added; + see :mod:`ihm.multi_state_scheme`. + - Residue numbering in non-polymer, water, and branched entities should + now be better handled, no longer requiring the various scheme tables + to precede ``atom_site``. If you subclass :class:`ihm.model.Model`, atoms + may need to be renumbered; see :meth:`ihm.model.Model.add_atom` (#130). + - Original author-provided numbering can now be provided for waters, + using the ``orig_auth_seq_id_map`` argument to :class:`ihm.WaterAsymUnit`. + - The make-mmcif.py utility script now has basic functionality for + combining multiple input files into one, relabeling chain IDs if necessary. + - An :class:`ihm.Entity` containing just a single sugar is now classified + as a nonpolymer, not branched. + 0.43 - 2023-12-08 ================= - Branched and polymeric empty entities are now distinguished diff --git a/modules/core/dependency/python-ihm/LICENSE b/modules/core/dependency/python-ihm/LICENSE index 7a52608174..317b1f479f 100644 --- a/modules/core/dependency/python-ihm/LICENSE +++ b/modules/core/dependency/python-ihm/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018-2023 IHM Working Group +Copyright (c) 2018-2024 IHM Working Group Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/modules/core/dependency/python-ihm/MANIFEST.in b/modules/core/dependency/python-ihm/MANIFEST.in index b9081bb5ca..4ec906d30d 100644 --- a/modules/core/dependency/python-ihm/MANIFEST.in +++ b/modules/core/dependency/python-ihm/MANIFEST.in @@ -4,4 +4,4 @@ include examples/* include util/make-mmcif.py include src/ihm_format.h include src/ihm_format.i -include src/ihm_format_wrap_0.43.c +include src/ihm_format_wrap_1.1.c diff --git a/modules/core/dependency/python-ihm/README.md b/modules/core/dependency/python-ihm/README.md index f3be699e7b..e0c8485ddd 100644 --- a/modules/core/dependency/python-ihm/README.md +++ b/modules/core/dependency/python-ihm/README.md @@ -6,9 +6,9 @@ [![Windows Build Status](https://ci.appveyor.com/api/projects/status/5o28oe477ii8ur4h?svg=true)](https://ci.appveyor.com/project/benmwebb/python-ihm) [![codecov](https://codecov.io/gh/ihmwg/python-ihm/branch/main/graph/badge.svg)](https://codecov.io/gh/ihmwg/python-ihm) -This is a Python package to assist in handling [mmCIF](http://mmcif.wwpdb.org/) +This is a Python package to assist in handling [mmCIF](https://mmcif.wwpdb.org/) and [BinaryCIF](https://github.com/dsehnal/BinaryCIF) files compliant with the -[integrative/hybrid modeling (IHM)](http://mmcif.wwpdb.org/dictionaries/mmcif_ihm.dic/Index/) +[integrative/hybrid modeling (IHM)](https://mmcif.wwpdb.org/dictionaries/mmcif_ihm_ext.dic/Index/) extension. It works with Python 2.7 or Python 3. To handle non-integrative theoretical models (for example, homology models), @@ -42,6 +42,14 @@ On a Fedora or RedHat Enterprise Linux box, install with dnf copr enable salilab/salilab; dnf install python3-ihm ``` +On an Ubuntu LTS box, install from +[our PPA](https://launchpad.net/~salilab/+archive/ubuntu/ppa) with + +``` +apt install software-properties-common; add-apt-repository ppa:salilab/ppa; +apt install python3-ihm +``` + Alternatively, install with pip: ``` diff --git a/modules/core/dependency/python-ihm/docs/conf.py b/modules/core/dependency/python-ihm/docs/conf.py index 866dd4f104..e15de8a007 100644 --- a/modules/core/dependency/python-ihm/docs/conf.py +++ b/modules/core/dependency/python-ihm/docs/conf.py @@ -156,3 +156,6 @@ author, 'Python-IHM', 'One line description of project.', 'Miscellaneous'), ] + +# Warn about broken links to classes, etc. +nitpicky = True diff --git a/modules/core/dependency/python-ihm/docs/design.rst b/modules/core/dependency/python-ihm/docs/design.rst index c3062c08f4..9080405c13 100644 --- a/modules/core/dependency/python-ihm/docs/design.rst +++ b/modules/core/dependency/python-ihm/docs/design.rst @@ -27,7 +27,7 @@ Types rather than enums Where the underlying IHM mmCIF dictionary uses an enumeration, generally this corresponds to separate sibling classes in this package. For example, two datasets which differ only in their ``data_type`` -`in the dictionary `_ +`in the dictionary `_ (such as a electron microscopy density map and small angle scattering data) are represented with two classes in this package: :class:`ihm.dataset.EMDensityDataset` and :class:`ihm.dataset.SASDataset`. @@ -42,7 +42,7 @@ This is naturally represented in Python as a hierarchy of classes, with members pointing to other objects as appropriate. IDs are not used to look up other objects, and are only used internally to populate the tables. For example, to group multiple models together, the dictionary assigns all of -the models the same `model_group_id `_ +the models the same `model_group id `_ while in the Python package the :class:`ihm.model.Model` objects are placed into a :class:`ihm.model.ModelGroup` object, which acts like a simple Python list. @@ -71,7 +71,7 @@ mmCIF backend ============= The classes in this package roughly correspond to -`categories `_ +`categories `_ in the underlying IHM mmCIF dictionary. This allows for simple output of mmCIF formatted files, but also allows for the potential future support for other file formats that support the dictionary or a subset of it, such diff --git a/modules/core/dependency/python-ihm/docs/introduction.rst b/modules/core/dependency/python-ihm/docs/introduction.rst index 7e4d83493d..7da9b2c50b 100644 --- a/modules/core/dependency/python-ihm/docs/introduction.rst +++ b/modules/core/dependency/python-ihm/docs/introduction.rst @@ -20,11 +20,11 @@ application with a set of Python objects. This includes Once created, this set of Python objects can be written to an mmCIF file that is compliant with the -`IHM extension `_ -to the `PDBx/mmCIF dictionary `_, +`IHMCIF extension `_ +to the `PDBx/mmCIF dictionary `_, suitable for deposition in the `PDB-Dev repository `_. The files are best viewed -in a viewer that supports IHM mmCIF, such as +in a viewer that supports IHMCIF, such as `UCSF ChimeraX `_, although they may be partially viewable in regular PDBx mmCIF viewers (likely only the atomic coordinates will be visible). diff --git a/modules/core/dependency/python-ihm/docs/main.rst b/modules/core/dependency/python-ihm/docs/main.rst index 800f7c40e1..99dab30146 100644 --- a/modules/core/dependency/python-ihm/docs/main.rst +++ b/modules/core/dependency/python-ihm/docs/main.rst @@ -12,6 +12,9 @@ The :mod:`ihm` Python module .. autoclass:: System :members: +.. autoclass:: Database + :members: + .. autoclass:: Software :members: diff --git a/modules/core/dependency/python-ihm/docs/usage.rst b/modules/core/dependency/python-ihm/docs/usage.rst index fc76b2213b..1778891d27 100644 --- a/modules/core/dependency/python-ihm/docs/usage.rst +++ b/modules/core/dependency/python-ihm/docs/usage.rst @@ -118,7 +118,11 @@ of the data used in modeling: - *Internal numbering*. Residues are always numbered sequentially starting at 1 in an :class:`~ihm.Entity`. All references to residues or residue ranges in - the library use this numbering. + the library use this numbering. For polymers, this internal numbering matches + the ``seq_id`` used in the mmCIF dictionary, while for branched entities, + this matches ``num`` in the dictionary. (For other types of entities + (non-polymers, waters) ``seq_id`` is not used in mmCIF, + but the residues are still numbered sequentially from 1 in this library.) - *Author-provided numbering*. If a different numbering scheme is used by the authors, for example to correspond to the numbering of the original sequence that is modeled, this can be given as an author-provided numbering for diff --git a/modules/core/dependency/python-ihm/ihm/__init__.py b/modules/core/dependency/python-ihm/ihm/__init__.py index 16bc1dc549..6ac0b4ba1a 100644 --- a/modules/core/dependency/python-ihm/ihm/__init__.py +++ b/modules/core/dependency/python-ihm/ihm/__init__.py @@ -20,7 +20,7 @@ import json from . import util -__version__ = '0.43' +__version__ = '1.1' class __UnknownValue(object): @@ -74,14 +74,22 @@ class System(object): :param str id: Unique identifier for this system in the mmCIF file. :param str model_details: Detailed description of the system, like an abstract. + :param databases: If this system is part of one or more official + databases (e.g. PDB, PDB-Dev, SwissModel), details of + the database identifiers. + :type databases: sequence of :class:`Database` """ structure_determination_methodology = "integrative" - def __init__(self, title=None, id='model', model_details=None): + def __init__(self, title=None, id='model', model_details=None, + databases=[]): self.id = id self.title = title self.model_details = model_details + self.databases = [] + self.databases.extend(databases) + self._database_status = {} #: List of plain text comments. These will be added to the top of #: the mmCIF file. @@ -93,7 +101,7 @@ def __init__(self, title=None, id='model', model_details=None): #: List of all authors of this system, as a list of strings (last name #: followed by initials, e.g. "Smith, A.J."). When writing out a file, #: if this list is empty, the set of all citation authors (see - #: :attr:`Citation.authors`) is used instead. + #: :class:`Citation`) is used instead. self.authors = [] #: List of all grants that supported this work. See :class:`Grant`. @@ -663,6 +671,24 @@ def _check_after_write(self): "can be grouped." % g) +class Database(object): + """Information about a System that is part of an official database. + + If a :class:`System` is part of one or more official databases + (e.g. PDB, PDB-Dev, SwissModel), this class contains details of the + database identifiers. It should be passed to the :class:`System` + constructor. + + :param str id: Abbreviated name of the database (e.g. PDB). + :param str code: Identifier from the database (e.g. 1abc). + :param str doi: Digital Object Identifier of the database entry. + :param str accession: Extended accession code of the database entry. + """ + def __init__(self, id, code, doi=None, accession=None): + self.id, self.code = id, code + self.doi, self.accession = doi, accession + + class Software(object): """Software used as part of the modeling protocol. @@ -681,7 +707,7 @@ class Software(object): passed to :class:`ihm.startmodel.StartingModel`, :class:`ihm.protocol.Step`, :class:`ihm.analysis.Step`, or - :class:`ihm.restraint.PredictedContactResstraint` objects. + :class:`ihm.restraint.PredictedContactRestraint` objects. """ def __init__(self, name, classification, description, location, type='program', version=None, citation=None): @@ -902,7 +928,7 @@ def __str__(self): def __get_weight(self): # Calculate weight from formula - if self.formula is None: + if self.formula in (None, unknown): return spl = self.formula.split() # Remove formal charge if present @@ -1250,7 +1276,7 @@ def __init__(self, seq_id, entity=None, asym=None): self.seq_id = seq_id def atom(self, atom_id): - """Get a :class:`Atom` in this residue with the given name.""" + """Get a :class:`~ihm.Atom` in this residue with the given name.""" return Atom(residue=self, id=atom_id) def _get_auth_seq_id(self): @@ -1380,11 +1406,11 @@ def get_chem_comp(s): #: String descriptors of branched chemical structure. #: These generally only make sense for oligosaccharide entities, - #: and should be a list of :class:`BranchDescriptor` objects. + #: and should be a list of :class:`~ihm.BranchDescriptor` objects. self.branch_descriptors = [] #: Any links between components in a branched entity. - #: This is a list of :class:`BranchLink` objects. + #: This is a list of :class:`~ihm.BranchLink` objects. self.branch_links = [] def __str__(self): @@ -1402,7 +1428,7 @@ def is_polymeric(self): def is_branched(self): """Return True iff this entity is branched (generally an oligosaccharide)""" - return ((len(self.sequence) > 0 + return ((len(self.sequence) > 1 and isinstance(self.sequence[0], SaccharideChemComp)) or (len(self.sequence) == 0 and self._hint_branched)) @@ -1491,7 +1517,7 @@ class AsymUnit(object): was modeled. Note that this class should not be used to describe crystal waters; - for that, see :class:`WaterAsymUnit`. + for that, see :class:`ihm.WaterAsymUnit`. :param entity: The unique sequence of this asymmetric unit. :type entity: :class:`Entity` @@ -1523,12 +1549,13 @@ class AsymUnit(object): numbering. This differs from `auth_seq_id_map` as the original numbering need not follow any defined scheme, while `auth_seq_id_map` must follow certain PDB-defined rules. This - can either be a mapping type (dict, list, tuple) in which case + can be any mapping type (dict, list, tuple) in which case ``orig_auth_seq_id = orig_auth_seq_id_map[seq_id]``. If the mapping is None (the default), or a given `seq_id` cannot be found in the mapping, ``orig_auth_seq_id = auth_seq_id``. This mapping is only used in the various `scheme` tables, such as ``pdbx_poly_seq_scheme``. + See :attr:`System.asym_units`. """ @@ -1545,6 +1572,11 @@ def __init__(self, entity, details=None, auth_seq_id_map=0, id=None, self.id = id self._strand_id = strand_id + #: For branched entities read from files, mapping from provisional + #: to final internal numbering (`seq_id`), or None if no mapping is + #: necessary. See :meth:`ihm.model.Model.add_atom`. + self.num_map = None + def _get_auth_seq_id_ins_code(self, seq_id): if isinstance(self.auth_seq_id_map, int): return seq_id + self.auth_seq_id_map, None @@ -1604,13 +1636,14 @@ class WaterAsymUnit(AsymUnit): """ def __init__(self, entity, number, details=None, auth_seq_id_map=0, - id=None, strand_id=None): + id=None, strand_id=None, orig_auth_seq_id_map=None): if entity.type != 'water': raise TypeError( "WaterAsymUnit can only be used for water entities") super(WaterAsymUnit, self).__init__( entity, details=details, auth_seq_id_map=auth_seq_id_map, - id=id, strand_id=strand_id) + id=id, strand_id=strand_id, + orig_auth_seq_id_map=orig_auth_seq_id_map) self.number = number self._water_sequence = [entity.sequence[0]] * number diff --git a/modules/core/dependency/python-ihm/ihm/analysis.py b/modules/core/dependency/python-ihm/ihm/analysis.py index 2ec151ea83..e1fa71bd0e 100644 --- a/modules/core/dependency/python-ihm/ihm/analysis.py +++ b/modules/core/dependency/python-ihm/ihm/analysis.py @@ -8,9 +8,10 @@ class Step(object): """A single step in an :class:`Analysis`. - Normally one of the base classes is used; see :class:`FilterStep`, - :class:`ClusterStep`, :class:`RescoreStep`, :class:`ValidationStep`, - :class:`OtherStep`, and :class:`EmptyStep`. + Normally one of the more specific derived classes is used; + see :class:`FilterStep`, :class:`ClusterStep`, :class:`RescoreStep`, + :class:`ValidationStep`, and :class:`EmptyStep`, although this base + class can be used for a generic 'other' step. :param str feature: feature energy/score;RMSD;dRMSD;other :param int num_models_begin: The number of models at the beginning diff --git a/modules/core/dependency/python-ihm/ihm/dumper.py b/modules/core/dependency/python-ihm/ihm/dumper.py index a3f297c122..dc3d9f2918 100644 --- a/modules/core/dependency/python-ihm/ihm/dumper.py +++ b/modules/core/dependency/python-ihm/ihm/dumper.py @@ -89,13 +89,13 @@ def dump(self, system, writer): class _AuditConformDumper(Dumper): URL = ("https://raw.githubusercontent.com/" + - "ihmwg/IHM-dictionary/%s/ihm-extension.dic") + "ihmwg/IHMCIF/%s/dist/mmcif_ihm.dic") def dump(self, system, writer): with writer.category("_audit_conform") as lp: # Update to match the version of the IHM dictionary we support: - lp.write(dict_name="ihm-extension.dic", dict_version="1.24", - dict_location=self.URL % "9be59e1") + lp.write(dict_name="mmcif_ihm.dic", dict_version="1.25", + dict_location=self.URL % "460a278") class _StructDumper(Dumper): @@ -219,6 +219,24 @@ def dump(self, system, writer): grant_number=grant.grant_number, ordinal=n + 1) +class _DatabaseDumper(Dumper): + def dump(self, system, writer): + with writer.loop("_database_2", + ["database_id", "database_code", + "pdbx_database_accession", "pdbx_DOI"]) as lp: + for d in system.databases: + lp.write(database_id=d.id, database_code=d.code, + pdbx_DOI=d.doi, + pdbx_database_accession=d.accession) + + +class _DatabaseStatusDumper(Dumper): + def dump(self, system, writer): + with writer.category("_pdbx_database_status") as lp: + # Pass through all data items from a Python dict + lp.write(**system._database_status) + + class _ChemCompDumper(Dumper): def dump(self, system, writer): comps = frozenset(comp for e in system.entities for comp in e.sequence) @@ -745,7 +763,7 @@ class _BranchSchemeDumper(Dumper): def dump(self, system, writer): with writer.loop("_pdbx_branch_scheme", ["asym_id", "entity_id", "mon_id", "num", - "pdb_seq_num", "auth_seq_num", + "pdb_seq_num", "pdb_ins_code", "auth_seq_num", "auth_mon_id", "pdb_mon_id", "pdb_asym_id"]) as lp: for asym in system.asym_units: entity = asym.entity @@ -758,7 +776,7 @@ def dump(self, system, writer): lp.write(asym_id=asym._id, pdb_asym_id=asym.strand_id, entity_id=entity._id, num=num + 1, - pdb_seq_num=pdb_seq_num, + pdb_seq_num=pdb_seq_num, pdb_ins_code=ins, auth_seq_num=auth_seq_num, mon_id=comp.id, auth_mon_id=comp.id, pdb_mon_id=comp.id) @@ -979,7 +997,7 @@ def dump_repos(self, writer): def dump_refs(self, writer): with writer.loop("_ihm_external_files", ["id", "reference_id", "file_path", "content_type", - "file_size_bytes", "details"]) as lp: + "file_format", "file_size_bytes", "details"]) as lp: for r in self._ref_by_id: repo = r.repo or self._local_files if r.path is None: @@ -988,6 +1006,7 @@ def dump_refs(self, writer): file_path = self._posix_path(repo._get_full_path(r.path)) lp.write(id=r._id, reference_id=repo._id, file_path=file_path, content_type=r.content_type, + file_format=r.file_format, file_size_bytes=r.file_size, details=r.details) # On Windows systems, convert native paths to POSIX-like (/-separated) @@ -3576,8 +3595,8 @@ class IHMVariant(Variant): """Used to select typical PDBx/IHM file output. See :func:`write`.""" _dumpers = [ _EntryDumper, # must be first - _CollectionDumper, - _StructDumper, _CommentDumper, _AuditConformDumper, _CitationDumper, + _CollectionDumper, _StructDumper, _CommentDumper, _AuditConformDumper, + _DatabaseDumper, _DatabaseStatusDumper, _CitationDumper, _SoftwareDumper, _AuditAuthorDumper, _GrantDumper, _ChemCompDumper, _ChemDescriptorDumper, _EntityDumper, _EntitySrcGenDumper, _EntitySrcNatDumper, _EntitySrcSynDumper, _StructRefDumper, diff --git a/modules/core/dependency/python-ihm/ihm/flr.py b/modules/core/dependency/python-ihm/ihm/flr.py index caa5923e8b..136fa79589 100644 --- a/modules/core/dependency/python-ihm/ihm/flr.py +++ b/modules/core/dependency/python-ihm/ihm/flr.py @@ -203,7 +203,7 @@ class EntityAssembly(object): """The assembly of the entities that are in the system. :param entity: The entity to add. - :type entity: :class:`Entity` + :type entity: :class:`ihm.Entity` :param num_copies: The number of copies for the entity in the assembly. """ @@ -382,7 +382,7 @@ class FRETAnalysis(object): :type lifetime_fit_model: :class:`LifetimeFitModel` :param ref_measurement_group: The group of reference measurements in case of lifetime-based analyses. - :type ref_measurement_group: :class:`LifetimeRefMeasurementGroup` + :type ref_measurement_group: :class:`RefMeasurementGroup` :param str method_name: The method used for the analysis. :param float chi_square_reduced: The chi-square reduced as a quality measure for the fit. diff --git a/modules/core/dependency/python-ihm/ihm/format.py b/modules/core/dependency/python-ihm/ihm/format.py index 0955a4e450..80655ca7d2 100644 --- a/modules/core/dependency/python-ihm/ihm/format.py +++ b/modules/core/dependency/python-ihm/ihm/format.py @@ -12,6 +12,7 @@ import sys import textwrap import operator +import ihm # getargspec is deprecated in Python 3, but getfullargspec has a very # similar interface try: @@ -189,7 +190,7 @@ def loop(self, category, keys): def write_comment(self, comment): """Write a simple comment to the CIF file. The comment will be wrapped if necessary for readability. - See :meth:`set_line_wrap`.""" + See :meth:`_set_line_wrap`.""" if self._line_wrap: for line in textwrap.wrap(comment, 78): self.fh.write('# ' + line + '\n') @@ -251,20 +252,34 @@ class _ValueToken(_Token): class _OmittedValueToken(_ValueToken): """A value that is deliberately omitted (the '.' string in mmCIF)""" - pass + def as_mmcif(self): + return "." class _UnknownValueToken(_ValueToken): """A value that is unknown (the '?' string in mmCIF)""" - pass + def as_mmcif(self): + return "?" class _TextValueToken(_ValueToken): """The value of a variable in mmCIF as a piece of text""" - __slots__ = ['txt'] + __slots__ = ['txt', 'quote'] - def __init__(self, txt): + def __init__(self, txt, quote): self.txt = txt + self.quote = quote + + def as_mmcif(self): + if '\n' in self.txt or self.quote == ';': + suffix = ";\n" if self.txt.endswith('\n') else "\n;\n" + return ";" + self.txt + suffix + elif self.quote == "'": + return "'" + self.txt + "'" + elif self.quote == '"' or ' ' in self.txt: + return '"' + self.txt + '"' + else: + return self.txt class _VariableToken(_Token): @@ -281,14 +296,65 @@ def __init__(self, val, linenum): "(%s) on line %d" % (val, linenum)) +class _PreservingVariableToken(_VariableToken): + """A variable name that preserves the original case of the keyword""" + + __slots__ = ['category', 'keyword', 'orig_keyword'] + + def __init__(self, val, linenum): + super(_PreservingVariableToken, self).__init__(val, linenum) + _, _, self.orig_keyword = val.partition('.') + + def as_mmcif(self): + if self.orig_keyword and self.orig_keyword.lower() == self.keyword: + return self.category + '.' + self.orig_keyword + else: + return self.category + '.' + self.keyword + + +class _CommentToken(_Token): + """A comment in mmCIF without the leading '#'""" + __slots__ = ['txt'] + + def __init__(self, txt): + self.txt = txt + + def as_mmcif(self): + return "#" + self.txt + + +class _WhitespaceToken(_Token): + """Space between other mmCIF tokens""" + __slots__ = ['txt'] + + def __init__(self, txt): + self.txt = txt + + def as_mmcif(self): + return self.txt + + +class _EndOfLineToken(_Token): + """End of a line in an mmCIF file""" + def as_mmcif(self): + return "\n" + + class _DataToken(_Token): """A data_* keyword in mmCIF, denoting a new data block""" - pass + __slots__ = ['txt'] + + def __init__(self, txt): + self.txt = txt + + def as_mmcif(self): + return 'data_' + self.txt class _LoopToken(_Token): """A loop_ keyword in mmCIF, denoting the start of a loop construct""" - pass + def as_mmcif(self): + return "loop_" class _SaveToken(_Token): @@ -314,48 +380,8 @@ def python_to_cif(field): for x in getargspec(h.__call__)[0][1:]] -class CifReader(_Reader): - """Class to read an mmCIF file and extract some or all of its data. - - Use :meth:`read_file` to actually read the file. - - :param file fh: Open handle to the mmCIF file - :param dict category_handler: A dict to handle data - extracted from the file. Keys are category names - (e.g. "_entry") and values are objects that have a `__call__` - method and `not_in_file`, `omitted`, and `unknown` attributes. - The names of the arguments to this `__call__` method - are mmCIF keywords that are extracted from the file (for the - keywords tr_vector[N] and rot_matrix[N][M] simply omit the [ - and ] characters, since these are not valid for Python - identifiers). The object will be called with the data from - the file as a set of strings, or `not_in_file`, `omitted` or - `unkonwn` for any keyword that is not present in the file, - the mmCIF omitted value (.), or mmCIF unknown value (?) - respectively. (mmCIF keywords are case insensitive, so this - class always treats them as lowercase regardless of the - file contents.) - :param unknown_category_handler: A callable (or `None`) that is called - for each category in the file that isn't handled; it is given - two arguments: the name of the category, and the line in the - file at which the category was encountered (if known, otherwise - None). - :param unknown_keyword_handler: A callable (or `None`) that is called - for each keyword in the file that isn't handled (within a - category that is handled); it is given three arguments: - the names of the category and keyword, and the line in the - file at which the keyword was encountered (if known, - otherwise None). - """ - def __init__(self, fh, category_handler, unknown_category_handler=None, - unknown_keyword_handler=None): - if _format is not None: - c_file = _format.ihm_file_new_from_python(fh) - self._c_format = _format.ihm_reader_new(c_file) - self.category_handler = category_handler - self.unknown_category_handler = unknown_category_handler - self.unknown_keyword_handler = unknown_keyword_handler - self._category_data = {} +class _CifTokenizer(object): + def __init__(self, fh): self.fh = fh self._tokens = [] self._token_index = 0 @@ -374,10 +400,6 @@ def _read_line(self): else: return line - def __del__(self): - if hasattr(self, '_c_format'): - _format.ihm_reader_free(self._c_format) - def _read_multiline_token(self, first_line, ignore_multiline): """Read a semicolon-delimited (multiline) token""" lines = [first_line[1:]] # Skip initial semicolon @@ -392,7 +414,7 @@ def _read_multiline_token(self, first_line, ignore_multiline): elif nextline.startswith(';'): # Strip last newline lines[-1] = lines[-1].rstrip('\r\n') - self._tokens = [_TextValueToken("".join(lines))] + self._tokens = [_TextValueToken("".join(lines), ';')] return elif not ignore_multiline: lines.append(nextline) @@ -414,15 +436,19 @@ def _handle_quoted_token(self, line, strlen, start_pos, quote_type): elif end == strlen - 1 or line[end + 1] in _WHITESPACE: # A quoted string is always a literal string, even if it is # "?" or ".", not an unknown/omitted value - self._tokens.append(_TextValueToken(line[start_pos + 1:end])) + self._tokens.append(_TextValueToken(line[start_pos + 1:end], + quote)) return end + 1 # Step past the closing quote + def _skip_initial_whitespace(self, line, strlen, start_pos): + while start_pos < strlen and line[start_pos] in _WHITESPACE: + start_pos += 1 + return start_pos + def _extract_line_token(self, line, strlen, start_pos): """Extract the next token from the given line starting at start_pos, populating self._tokens. The new start_pos is returned.""" - # Skip initial whitespace - while start_pos < strlen and line[start_pos] in _WHITESPACE: - start_pos += 1 + start_pos = self._skip_initial_whitespace(line, strlen, start_pos) if start_pos >= strlen: return strlen if line[start_pos] == '"': @@ -431,6 +457,7 @@ def _extract_line_token(self, line, strlen, start_pos): return self._handle_quoted_token(line, strlen, start_pos, "Single") elif line[start_pos] == "#": # Comment - discard the rest of the line + self._handle_comment(line, start_pos) return strlen else: # Find end of token (whitespace or end of line) @@ -441,11 +468,11 @@ def _extract_line_token(self, line, strlen, start_pos): if val == 'loop_': tok = _LoopToken() elif val.startswith('data_'): - tok = _DataToken() + tok = _DataToken(val[5:]) elif val.startswith('save_'): tok = _SaveToken() elif val.startswith('_'): - tok = _VariableToken(val, self._linenum) + tok = self._handle_variable_token(val, self._linenum) elif val == '.': tok = _OmittedValueToken() elif val == '?': @@ -454,14 +481,22 @@ def _extract_line_token(self, line, strlen, start_pos): # Note that we do no special processing for other reserved # words (global_, save_, stop_). But the probability of # them occurring where we expect a value is pretty small. - tok = _TextValueToken(val) # don't alter case of values + tok = _TextValueToken(val, None) # don't alter case of values self._tokens.append(tok) return end_pos + def _handle_variable_token(self, val, linenum): + return _VariableToken(val, linenum) + + def _handle_comment(self, line, start_pos): + """Potentially handle a comment that spans line[start_pos:].""" + pass + def _tokenize(self, line): """Break up a line into tokens, populating self._tokens""" self._tokens = [] if line.startswith('#'): + self._handle_comment(line, 0) return # Skip comment lines start_pos = 0 strlen = len(line) @@ -494,6 +529,321 @@ def _get_token(self, ignore_multiline=False): self._token_index += 1 return self._tokens[self._token_index - 1] + +class _PreservingCifTokenizer(_CifTokenizer): + """A tokenizer subclass which preserves comments, case and whitespace""" + + def _tokenize(self, line): + _CifTokenizer._tokenize(self, line) + self._tokens.append(_EndOfLineToken()) + + def _handle_comment(self, line, start_pos): + self._tokens.append(_CommentToken(line[start_pos + 1:])) + + def _handle_variable_token(self, val, linenum): + return _PreservingVariableToken(val, linenum) + + def _skip_initial_whitespace(self, line, strlen, start_pos): + end_pos = start_pos + while end_pos < strlen and line[end_pos] in _WHITESPACE: + end_pos += 1 + if end_pos > start_pos: + self._tokens.append(_WhitespaceToken(line[start_pos:end_pos])) + return end_pos + + +class _CategoryTokenGroup(object): + """A group of tokens which set a single data item""" + def __init__(self, vartoken, valtoken): + self.vartoken, self.valtoken = vartoken, valtoken + + def __str__(self): + return ("<_CategoryTokenGroup(%s, %s)>" + % (self.vartoken.as_mmcif(), self.valtoken.token.as_mmcif())) + + def as_mmcif(self): + return self.vartoken.as_mmcif() + self.valtoken.as_mmcif() + "\n" + + def __set_value(self, val): + self.valtoken.value = val + + category = property(lambda self: self.vartoken.category) + keyword = property(lambda self: self.vartoken.keyword) + value = property(lambda self: self.valtoken.value, __set_value) + + +class _LoopHeaderTokenGroup(object): + """A group of tokens that form the start of a loop_ construct""" + def __init__(self, looptoken, category, keywords, end_spacers): + self._loop, self.category = looptoken, category + self.keywords = keywords + self.end_spacers = end_spacers + + def keyword_index(self, keyword): + """Get the zero-based index of the given keyword, or ValueError""" + return [k.token.keyword for k in self.keywords].index(keyword) + + def __str__(self): + return ("<_LoopHeaderTokenGroup(%s, %s)>" + % (self.category, + str([k.token.keyword for k in self.keywords]))) + + def as_mmcif(self): + all_tokens = [self._loop] + self.keywords + self.end_spacers + return "".join(x.as_mmcif() for x in all_tokens) + + +class _LoopRowTokenGroup(object): + """A group of tokens that represent one row in a loop_ construct""" + def __init__(self, items): + self.items = items + + def as_mmcif(self): + return "".join(x.as_mmcif() for x in self.items) + + +class _SpacedToken(object): + """A token with zero or more leading whitespace or newline tokens""" + def __init__(self, spacers, token): + self.spacers, self.token = spacers, token + + def as_mmcif(self): + return ("".join(x.as_mmcif() for x in self.spacers) + + self.token.as_mmcif()) + + def __get_value(self): + if isinstance(self.token, _OmittedValueToken): + return None + elif isinstance(self.token, _UnknownValueToken): + return ihm.unknown + else: + return self.token.txt + + def __set_value(self, val): + if val is None: + self.token = _OmittedValueToken() + elif val is ihm.unknown: + self.token = _UnknownValueToken() + elif isinstance(self.token, _TextValueToken): + self.token.txt = val + else: + self.token = _TextValueToken(val, quote=None) + + value = property(__get_value, __set_value) + + +class _ChangeValueFilter(object): + def __init__(self, target, old, new): + ts = target.split('.') + if len(ts) == 1 or not ts[0]: + self.category = None + else: + self.category = ts[0] + self.keyword = ts[-1] + self.old, self.new = old, new + + def filter_category(self, tok): + if ((self.category is None or tok.category == self.category) + and tok.keyword == self.keyword and tok.value == self.old): + tok.value = self.new + return tok + + def get_loop_filter(self, tok): + if self.category is None or tok.category == self.category: + try: + keyword_index = tok.keyword_index(self.keyword) + except ValueError: + return + + def loop_filter(t): + if t.items[keyword_index].value == self.old: + t.items[keyword_index].value = self.new + return t + return loop_filter + + +class _PreservingCifReader(_PreservingCifTokenizer): + """Read an mmCIF file and break it into tokens""" + def __init__(self, fh): + super(_PreservingCifReader, self).__init__(fh) + + def read_file(self, filters=None): + """Read the file and yield tokens and/or token groups""" + if filters is None: + return self._read_file_internal() + else: + return self._read_file_with_filters(filters) + + def _read_file_with_filters(self, filters): + loop_filters = None + for tok in self._read_file_internal(): + if isinstance(tok, _CategoryTokenGroup): + tok = self._filter_category(tok, filters) + elif isinstance(tok, ihm.format._LoopHeaderTokenGroup): + loop_filters = [f.get_loop_filter(tok) for f in filters] + loop_filters = [f for f in loop_filters if f is not None] + elif (isinstance(tok, ihm.format._LoopRowTokenGroup) + and loop_filters): + tok = self._filter_loop(tok, loop_filters) + if tok is not None: + yield tok + + def _filter_category(self, tok, filters): + for f in filters: + tok = f.filter_category(tok) + if tok is None: + return + return tok + + def _filter_loop(self, tok, filters): + for f in filters: + tok = f(tok) + if tok is None: + return + return tok + + def _read_file_internal(self): + while True: + token = self._get_token() + if token is None: + break + if isinstance(token, _VariableToken): + yield self._read_value(token) + elif isinstance(token, _LoopToken): + for tok in self._read_loop(token): + yield tok + # Did we hit the end of the file? + if self._token_index < 0: + break + else: + yield token + + def _get_spaced_token(self): + """Get the next token plus any number of leading space/EOL tokens""" + spacers = [] + while True: + token = self._get_token() + if isinstance(token, (_EndOfLineToken, _WhitespaceToken)): + spacers.append(token) + else: + return _SpacedToken(spacers, token) + + def _read_value(self, vartoken): + """Read a line that sets a single value, e.g. "_entry.id 1YTI""" + spval = self._get_spaced_token() + if not isinstance(spval.token, _ValueToken): + raise CifParserError( + "No valid value found for %s.%s on line %d" + % (vartoken.category, vartoken.keyword, self._linenum)) + eoltok = self._get_token() + if not isinstance(eoltok, _EndOfLineToken): + raise CifParserError( + "No end of line after %s.%s on line %d" + % (vartoken.category, vartoken.keyword, self._linenum)) + return _CategoryTokenGroup(vartoken, spval) + + def _read_loop(self, looptoken): + """Handle a loop_ construct""" + header = self._read_loop_header(looptoken) + yield header + for line in self._read_loop_data(header.keywords): + yield line + + def _read_loop_header(self, looptoken): + """Read the set of keywords for a loop_ construct""" + category = None + keywords = [] + while True: + spt = self._get_spaced_token() + if isinstance(spt.token, _VariableToken): + if category is None: + category = spt.token.category + elif category != spt.token.category: + raise CifParserError( + "mmCIF files cannot contain multiple " + "categories within a single loop at line %d" + % self._linenum) + keywords.append(spt) + elif isinstance(spt.token, _ValueToken): + # OK, end of keywords; proceed on to values + self._unget_token() + return _LoopHeaderTokenGroup(looptoken, category, keywords, + spt.spacers) + else: + raise CifParserError("Was expecting a keyword or value for " + "loop at line %d" % self._linenum) + + def _read_loop_data(self, keywords): + """Read the data for a loop_ construct""" + while True: + items = [] + for i, keyword in enumerate(keywords): + spt = self._get_spaced_token() + if isinstance(spt.token, _ValueToken): + items.append(spt) + elif i == 0: + # OK, end of the loop + for s in spt.spacers: + yield s + if spt.token is not None: + self._unget_token() + return + else: + raise CifParserError( + "Wrong number of data values in loop " + "(should be an exact multiple of the number " + "of keys) at line %d" % self._linenum) + yield _LoopRowTokenGroup(items) + + +class CifReader(_Reader, _CifTokenizer): + """Class to read an mmCIF file and extract some or all of its data. + + Use :meth:`read_file` to actually read the file. + + :param file fh: Open handle to the mmCIF file + :param dict category_handler: A dict to handle data + extracted from the file. Keys are category names + (e.g. "_entry") and values are objects that have a `__call__` + method and `not_in_file`, `omitted`, and `unknown` attributes. + The names of the arguments to this `__call__` method + are mmCIF keywords that are extracted from the file (for the + keywords tr_vector[N] and rot_matrix[N][M] simply omit the [ + and ] characters, since these are not valid for Python + identifiers). The object will be called with the data from + the file as a set of strings, or `not_in_file`, `omitted` or + `unkonwn` for any keyword that is not present in the file, + the mmCIF omitted value (.), or mmCIF unknown value (?) + respectively. (mmCIF keywords are case insensitive, so this + class always treats them as lowercase regardless of the + file contents.) + :param unknown_category_handler: A callable (or `None`) that is called + for each category in the file that isn't handled; it is given + two arguments: the name of the category, and the line in the + file at which the category was encountered (if known, otherwise + None). + :param unknown_keyword_handler: A callable (or `None`) that is called + for each keyword in the file that isn't handled (within a + category that is handled); it is given three arguments: + the names of the category and keyword, and the line in the + file at which the keyword was encountered (if known, + otherwise None). + """ + def __init__(self, fh, category_handler, unknown_category_handler=None, + unknown_keyword_handler=None): + if _format is not None: + c_file = _format.ihm_file_new_from_python(fh) + self._c_format = _format.ihm_reader_new(c_file) + self.category_handler = category_handler + self.unknown_category_handler = unknown_category_handler + self.unknown_keyword_handler = unknown_keyword_handler + self._category_data = {} + _CifTokenizer.__init__(self, fh) + + def __del__(self): + if hasattr(self, '_c_format'): + _format.ihm_reader_free(self._c_format) + def _read_value(self, vartoken): """Read a line that sets a single value, e.g. "_entry.id 1YTI""" # Only read the value if we're interested in this category and key diff --git a/modules/core/dependency/python-ihm/ihm/location.py b/modules/core/dependency/python-ihm/ihm/location.py index e0f3caaae4..4a46eae611 100644 --- a/modules/core/dependency/python-ihm/ihm/location.py +++ b/modules/core/dependency/python-ihm/ihm/location.py @@ -19,8 +19,8 @@ class Location(object): experimental dataset may be found; - an :class:`~ihm.model.Ensemble` to point to coordinates for an entire ensemble, for example as a DCD file; - - a :class:`LocalizationDensity` to point to an external localization - density, for example in MRC format; + - a :class:`ihm.model.LocalizationDensity` to point to an external + localization density, for example in MRC format; - :data:`ihm.System.locations` to point to other files relating to the modeling in general, such as a modeling control script (:class:`WorkflowFileLocation`) or a command script for a @@ -191,15 +191,16 @@ class FileLocation(Location): containing the file, or `None` if it is stored on the local disk :type repo: :class:`Repository` :param str details: optional description of the file + :param str file_format: optional file type (e.g. TXT, PNG, FASTA) """ _eq_keys = Location._eq_keys + ['repo', 'path', 'content_type'] - content_type = None + content_type = 'Other' - def __init__(self, path, repo=None, details=None): + def __init__(self, path, repo=None, details=None, file_format=None): super(FileLocation, self).__init__(details) - self.repo = repo + self.repo, self.file_format = repo, file_format if repo: self.path = path # Cannot determine file size if non-local diff --git a/modules/core/dependency/python-ihm/ihm/model.py b/modules/core/dependency/python-ihm/ihm/model.py index f9f1fe0f34..8e48938fca 100644 --- a/modules/core/dependency/python-ihm/ihm/model.py +++ b/modules/core/dependency/python-ihm/ihm/model.py @@ -135,6 +135,12 @@ def add_atom(self, atom): """Add to the model's set of :class:`Atom` objects. See :meth:`get_spheres` for more details. + + Note that for branched entities, the `seq_id` of the new atom + is provisional. It should be mapped to the correct ID once the + input file is completely read, using :attr:`ihm.AsymUnit.num_map`. + This is done automatically by ihm.reader when using the default + implementation. """ self._atoms.append(atom) diff --git a/modules/core/dependency/python-ihm/ihm/multi_state_scheme.py b/modules/core/dependency/python-ihm/ihm/multi_state_scheme.py index db8d6d1145..0b0b5148ce 100644 --- a/modules/core/dependency/python-ihm/ihm/multi_state_scheme.py +++ b/modules/core/dependency/python-ihm/ihm/multi_state_scheme.py @@ -17,11 +17,10 @@ class MultiStateScheme(object): :param str details: Details on the scheme. :param connectivities: A list of connectivities that belong to the scheme. - :type connectivities: List of - :class:`ìhm.multi_state_scheme.Connectivity` + :type connectivities: List of :class:`Connectivity` :param relaxation_times: A list of relaxation times not assigned to specific connectivities, but to the scheme - :type relaxation_times: List of :class:`ihm.RelaxationTime` + :type relaxation_times: List of :class:`RelaxationTime` """ def __init__(self, name, details=None, connectivities=None, relaxation_times=None): @@ -149,11 +148,11 @@ class Connectivity(object): :type begin_state: :class:`ihm.model.State` :param end_state: The end state of the connectivity. Can be None in case of states that are not connected to others. - :type end_state: :class:`ìhm.model.State` + :type end_state: :class:`ihm.model.State` :param details: Details to the connectivity. :param dataset_group: The DatasetGroup that was used to obtain information on the connectivity. - :type dataset_group: :class:`ìhm.dataset.DatasetGroup` + :type dataset_group: :class:`ihm.dataset.DatasetGroup` :param kinetic_rate: A kinetic rate assigned to the connectivity. :type kinetic_rate: :class:`KineticRate` :param relaxation_time: A relaxation time assigned to the connectivity. diff --git a/modules/core/dependency/python-ihm/ihm/reader.py b/modules/core/dependency/python-ihm/ihm/reader.py index 86eb9b9f74..3de04a8e0e 100644 --- a/modules/core/dependency/python-ihm/ihm/reader.py +++ b/modules/core/dependency/python-ihm/ihm/reader.py @@ -163,7 +163,7 @@ def _make_new_object(self, newcls=None): class RangeIDMapper(object): """Utility class to handle mapping from mmCIF IDs to - :class:`ihm.AsymUnitRange` or :class:`EntityRange` objects.""" + :class:`ihm.AsymUnitRange` or :class:`ihm.EntityRange` objects.""" def __init__(self): self._id_map = {} @@ -451,7 +451,7 @@ def __init__(self, model_class, starting_model_class): self.assemblies = IDMapper(self.system.orphan_assemblies, ihm.Assembly) #: Mapping from ID to :class:`ihm.AsymUnitRange` - #: or :class:`EntityRange` objects + #: or :class:`ihm.EntityRange` objects self.ranges = RangeIDMapper() #: Mapping from ID to :class:`ihm.location.Repository` objects @@ -634,7 +634,7 @@ def __init__(self, model_class, starting_model_class): ihm.multi_state_scheme.KineticRate) #: Mapping from ID to - #: :class:`ihm.multi_state_schene.RelaxationTime` objects + #: :class:`ihm.multi_state_scheme.RelaxationTime` objects self.relaxation_times = IDMapper( None, ihm.multi_state_scheme.RelaxationTime, @@ -801,7 +801,7 @@ def __init__(self, model_class, starting_model_class): self.flr_data, ihm.flr.FPSMPPModeling, *(None,) * 3) #: Mapping from ID to - #: :class:`ihm.flr.KineticRateFRETAnalysisConnection` objects + #: :class:`ihm.flr.KineticRateFretAnalysisConnection` objects self.flr_kinetic_rate_fret_analysis_connection = _FLRIDMapper( '_collection_flr_kinetic_rate_fret_analysis_connection', 'kinetic_rate_fret_analysis_connections', @@ -810,7 +810,7 @@ def __init__(self, model_class, starting_model_class): *(None,) * 3) #: Mapping from ID to - #: :class:`ihm.flr.KineticRateFRETAnalysisConnection` objects + #: :class:`ihm.flr.RelaxationTimeFretAnalysisConnection` objects self.flr_relaxation_time_fret_analysis_connection = _FLRIDMapper( '_collection_flr_relaxation_time_fret_analysis_connection', 'relaxation_time_fret_analysis_connections', @@ -1025,6 +1025,53 @@ def __call__(self, citation_id, name): s.authors.append(name) +class _DatabaseHandler(Handler): + category = '_database_2' + + def __call__(self, database_code, database_id, pdbx_doi, + pdbx_database_accession): + d = ihm.Database(id=database_id, code=database_code, + doi=pdbx_doi, accession=pdbx_database_accession) + self.system.databases.append(d) + + +class _DatabaseStatusHandler(Handler): + category = '_pdbx_database_status' + + # placeholder; the reader will otherwise only return strings or None + not_in_file = 0 + _keys = ['entry_id', 'sg_entry', 'author_approval_type', + 'author_release_status_code', 'date_author_approval', + 'date_author_release_request', 'date_begin_deposition', + 'date_begin_processing', 'date_begin_release_preparation', + 'date_chemical_shifts', 'date_coordinates', + 'date_deposition_form', 'date_end_processing', + 'date_hold_chemical_shifts', 'date_hold_coordinates', + 'date_hold_nmr_constraints', 'date_hold_struct_fact', + 'date_manuscript', 'date_nmr_constraints', 'date_of_pdb_release', + 'date_of_cs_release', 'date_of_mr_release', 'date_of_sf_release', + 'date_struct_fact', 'date_submitted', + 'dep_release_code_chemical_shifts', + 'dep_release_code_coordinates', + 'dep_release_code_nmr_constraints', 'dep_release_code_sequence', + 'dep_release_code_struct_fact', 'deposit_site', + 'hold_for_publication', 'methods_development_category', + 'name_depositor', 'pdb_date_of_author_approval', + 'pdb_format_compatible', 'process_site', 'rcsb_annotator', + 'recvd_author_approval', 'recvd_chemical_shifts', + 'recvd_coordinates', 'recvd_deposit_form', + 'recvd_initial_deposition_date', 'recvd_internal_approval', + 'recvd_manuscript', 'recvd_nmr_constraints', 'recvd_struct_fact', + 'status_code', 'status_code_cs', 'status_code_mr', + 'status_code_sf'] + + def __call__(self, *args): + # Just pass through all data items present in the file, as a dict + self.system._database_status = dict( + (k, v) for (k, v) in zip(self._keys, args) + if v != self.not_in_file) + + class _ChemCompHandler(Handler): category = '_chem_comp' @@ -1404,14 +1451,20 @@ def __init__(self, *args): and x[1] is not ihm.location.FileLocation) def __call__(self, content_type, id, reference_id, details, file_path, - file_size_bytes): + file_format, file_size_bytes): typ = None if content_type is None else content_type.lower() f = self.sysr.external_files.get_by_id( id, self.type_map.get(typ, ihm.location.FileLocation)) f.repo = self.sysr.repos.get_by_id(reference_id) - f.file_size = self.get_int(file_size_bytes) + # IHMCIF dictionary defines file size as a float, although only int + # values make sense, so allow for either ints or floats here + try: + f.file_size = self.get_int(file_size_bytes) + except ValueError: + f.file_size = self.get_float(file_size_bytes) self.copy_if_present( - f, locals(), keys=['details'], mapkeys={'file_path': 'path'}) + f, locals(), keys=['details', 'file_format'], + mapkeys={'file_path': 'path'}) # Handle DOI that is itself a file if file_path is None: f.path = '.' @@ -2043,24 +2096,28 @@ class _AtomSiteHandler(Handler): def __init__(self, *args): super(_AtomSiteHandler, self).__init__(*args) self._missing_sequence = collections.defaultdict(dict) + # Mapping from asym+auth_seq_id to internal ID self._seq_id_map = {} def _get_seq_id_from_auth(self, auth_seq_id, pdbx_pdb_ins_code, asym): """Get an internal seq_id for something not a polymer (nonpolymer, water, branched), given author-provided info""" if asym._id not in self._seq_id_map: - m = {} - # Make reverse mapping from author-provided info to seq_id - if isinstance(asym.auth_seq_id_map, dict): - for key, val in asym.auth_seq_id_map.items(): - m[val] = key - self._seq_id_map[asym._id] = m + self._seq_id_map[asym._id] = {} m = self._seq_id_map[asym._id] # Treat ? and . missing insertion codes equivalently if pdbx_pdb_ins_code is ihm.unknown: pdbx_pdb_ins_code = None - # If no match, use the author-provided numbering as-is - return m.get((auth_seq_id, pdbx_pdb_ins_code), auth_seq_id) + auth = (auth_seq_id, pdbx_pdb_ins_code) + if auth not in m: + # Assign a new ID starting from 1 + seq_id = len(m) + 1 + m[auth] = seq_id + # Add this info to the seq_id -> auth_seq_id mapping too + if asym.auth_seq_id_map == 0: + asym.auth_seq_id_map = {} + asym.auth_seq_id_map[seq_id] = (auth_seq_id, pdbx_pdb_ins_code) + return m[auth] def __call__(self, pdbx_pdb_model_num, label_asym_id, b_iso_or_equiv, label_seq_id, label_atom_id, type_symbol, cartn_x, cartn_y, @@ -2081,7 +2138,7 @@ def __call__(self, pdbx_pdb_model_num, label_asym_id, b_iso_or_equiv, asym = self.sysr.asym_units.get_by_id(label_asym_id) auth_seq_id = self.get_int_or_string(auth_seq_id) if seq_id is None: - # Fill in our internal seq_id if possible + # Fill in our internal seq_id using author-provided info our_seq_id = self._get_seq_id_from_auth( auth_seq_id, pdbx_pdb_ins_code, asym) else: @@ -2497,18 +2554,13 @@ def finalize(self): def _get_auth_seq_id_offset(self, asym): """Get the offset from seq_id to auth_seq_id. Return None if no consistent offset exists.""" - # Do nothing if the entity is not polymeric or branched - if asym.entity is None or (not asym.entity.is_polymeric() - and not asym.entity.is_branched()): + # Do nothing if the entity is not polymeric + if asym.entity is None or not asym.entity.is_polymeric(): return # Do nothing if no map exists if asym.auth_seq_id_map == 0: return - if asym.entity.is_branched(): - # Hack, as branched entities don't technically have seq_ids - rng = (1, len(asym.entity.sequence)) - else: - rng = asym.seq_id_range + rng = asym.seq_id_range offset = None for seq_id in range(rng[0], rng[1] + 1): # If a residue isn't in the map, it has an effective offset of 0, @@ -2535,6 +2587,10 @@ def _get_auth_seq_id_offset(self, asym): class _NonPolySchemeHandler(Handler): category = '_pdbx_nonpoly_scheme' + def __init__(self, *args): + super(_NonPolySchemeHandler, self).__init__(*args) + self._scheme = {} + def __call__(self, asym_id, entity_id, pdb_seq_num, mon_id, pdb_ins_code, pdb_strand_id, ndb_seq_num, auth_seq_num): entity = self.sysr.entities.get_by_id(entity_id) @@ -2550,71 +2606,137 @@ def __call__(self, asym_id, entity_id, pdb_seq_num, mon_id, pdb_ins_code, mon_id, name=entity.description) entity.sequence.append(s) asym = self.sysr.asym_units.get_by_id(asym_id) - if entity.type == 'water' and not isinstance(asym, ihm.WaterAsymUnit): - # Replace AsymUnit with WaterAsymUnit if necessary - asym.__class__ = ihm.WaterAsymUnit - asym._water_sequence = [entity.sequence[0]] - asym.number = 1 if pdb_strand_id not in (None, ihm.unknown, asym_id): asym._strand_id = pdb_strand_id pdb_seq_num = self.get_int_or_string(pdb_seq_num) auth_seq_num = self.get_int_or_string(auth_seq_num) - if entity.type == 'water': - # For waters, assume ndb_seq_num counts starting from 1, - # so use as our internal seq_id. Make sure the WaterAsymUnit - # is long enough to handle all ids - seq_id = self.get_int(ndb_seq_num) - if seq_id is None: - # If no ndb_seq_num, we cannot map - return - # Don't bother adding a 1->1 mapping - if (pdb_seq_num != seq_id - or pdb_ins_code not in (None, ihm.unknown)): - asym.number = max(asym.number, seq_id) - asym._water_sequence = [entity.sequence[0]] * asym.number - if asym.auth_seq_id_map == 0: - asym.auth_seq_id_map = {} - asym.auth_seq_id_map[seq_id] = (pdb_seq_num, pdb_ins_code) - # Note any residues that have different pdb_seq_num & auth_seq_num - if (auth_seq_num is not None and pdb_seq_num is not None - and auth_seq_num != pdb_seq_num): - if asym.orig_auth_seq_id_map is None: - asym.orig_auth_seq_id_map = {} - asym.orig_auth_seq_id_map[seq_id] = auth_seq_num - else: - # For nonpolymers, assume a single ChemComp with seq_id=1, - # but don't bother adding a 1->1 mapping - if pdb_seq_num != 1 or pdb_ins_code not in (None, ihm.unknown): - asym.auth_seq_id_map = {1: (pdb_seq_num, pdb_ins_code)} - # Note any residues that have different pdb_seq_num & auth_seq_num - if (auth_seq_num is not None and pdb_seq_num is not None - and auth_seq_num != pdb_seq_num): - asym.orig_auth_seq_id_map = {1: auth_seq_num} + ndb_seq_num = self.get_int(ndb_seq_num) + # Make mapping from author-provided numbering (*pdb_seq_num*, not + # auth_seq_num) to original and NDB numbering. We will use this at + # finalize time to map internal ID ("seq_id") to auth, orig_auth, + # and NDB numbering. + if asym_id not in self._scheme: + self._scheme[asym_id] = [] + self._scheme[asym_id].append((pdb_seq_num, pdb_ins_code, + auth_seq_num, ndb_seq_num)) + + def finalize(self): + for asym in self.system.asym_units: + entity = asym.entity + if entity is None or entity.is_polymeric() or entity.is_branched(): + continue + self._finalize_asym(asym) + + def _finalize_asym(self, asym): + # Add mapping info from scheme tables (to that already extracted + # from atom_site); if a mismatch we use atom_site info + scheme = self._scheme.get(asym._id) + if scheme: + if not asym.auth_seq_id_map: + asym.auth_seq_id_map = {} + if not asym.orig_auth_seq_id_map: + asym.orig_auth_seq_id_map = {} + # Make reverse mapping from author-provided info to internal ID + auth_map = {} + for key, val in asym.auth_seq_id_map.items(): + auth_map[val] = key + for pdb_seq_num, pdb_ins_code, auth_seq_num, ndb_seq_num in scheme: + auth = (pdb_seq_num, pdb_ins_code) + seq_id = auth_map.get(auth) + if seq_id is None: + seq_id = len(asym.auth_seq_id_map) + 1 + asym.auth_seq_id_map[seq_id] = auth + if pdb_seq_num != auth_seq_num: + asym.orig_auth_seq_id_map[seq_id] = auth_seq_num + if not asym.orig_auth_seq_id_map: + asym.orig_auth_seq_id_map = None + if asym.entity.type == 'water': + # Replace AsymUnit with WaterAsymUnit if necessary + if not isinstance(asym, ihm.WaterAsymUnit): + asym.__class__ = ihm.WaterAsymUnit + asym.number = len(asym.auth_seq_id_map) + asym._water_sequence = [asym.entity.sequence[0]] * asym.number + # todo: add mapping from seq_id to ndb numbering? class _BranchSchemeHandler(Handler): category = '_pdbx_branch_scheme' - def __call__(self, asym_id, num, pdb_seq_num, auth_seq_num, pdb_asym_id): + def __init__(self, *args): + super(_BranchSchemeHandler, self).__init__(*args) + self._scheme = {} + + def __call__(self, asym_id, num, pdb_seq_num, auth_seq_num, pdb_asym_id, + pdb_ins_code): asym = self.sysr.asym_units.get_by_id(asym_id) if pdb_asym_id not in (None, ihm.unknown, asym_id): asym._strand_id = pdb_asym_id pdb_seq_num = self.get_int_or_string(pdb_seq_num) auth_seq_num = self.get_int_or_string(auth_seq_num) num = self.get_int(num) - # Note any residues that have different num and auth_seq_id - # These will be finalized by _PolySeqSchemeHandler - if num is not None and pdb_seq_num is not None \ - and num != pdb_seq_num: - if asym.auth_seq_id_map == 0: - asym.auth_seq_id_map = {} - asym.auth_seq_id_map[num] = pdb_seq_num, None - # Note any residues that have different pdb_seq_num and auth_seq_num - if (num is not None and auth_seq_num is not None - and pdb_seq_num is not None and auth_seq_num != pdb_seq_num): - if asym.orig_auth_seq_id_map is None: - asym.orig_auth_seq_id_map = {} - asym.orig_auth_seq_id_map[num] = auth_seq_num + # Make mapping from author-provided numbering (*pdb_seq_num*, not + # auth_seq_num) to original and "num" numbering. We will use this at + # finalize time to map internal ID ("seq_id") to auth, orig_auth, + # and "num" numbering. + if asym_id not in self._scheme: + self._scheme[asym_id] = [] + self._scheme[asym_id].append((pdb_seq_num, pdb_ins_code, + auth_seq_num, num)) + + def finalize(self): + need_map_num = False + for asym in self.system.asym_units: + entity = asym.entity + if entity is None or not entity.is_branched(): + continue + self._finalize_asym(asym) + if asym.num_map: + need_map_num = True + if need_map_num: + self._reassign_seq_ids() + + def _reassign_seq_ids(self): + """Change provisional seq_ids so that they match + _pdbx_branch_scheme.num""" + for m in self.sysr.models.get_all(): + for atom in m._atoms: + if atom.asym_unit.num_map: + atom.seq_id = atom.asym_unit.num_map[atom.seq_id] + + def _finalize_asym(self, asym): + # Populate auth_seq_id mapping from scheme tables, and correct + # any incorrect seq_ids assigned in atom_site to use num + scheme = self._scheme.get(asym._id, []) + # Make reverse mapping from atom_site author-provided info + # to internal ID + auth_map = {} + if asym.auth_seq_id_map: + for key, val in asym.auth_seq_id_map.items(): + auth_map[val] = key + asym.auth_seq_id_map = {} + asym.orig_auth_seq_id_map = {} + asym.num_map = {} + for pdb_seq_num, pdb_ins_code, auth_seq_num, num in scheme: + asym.auth_seq_id_map[num] = (pdb_seq_num, pdb_ins_code) + if pdb_seq_num != auth_seq_num: + asym.orig_auth_seq_id_map[num] = auth_seq_num + as_seq_id = auth_map.get((pdb_seq_num, pdb_ins_code)) + if as_seq_id is not None: + if as_seq_id != num: + asym.num_map[as_seq_id] = num + del auth_map[(pdb_seq_num, pdb_ins_code)] + if not asym.orig_auth_seq_id_map: + asym.orig_auth_seq_id_map = None + if not asym.num_map: + asym.num_map = None + # If any residues from atom_site are left, we can't assign a num + # for them, so raise an error + if auth_map: + raise ValueError( + "For branched asym %s, the following author-provided " + "residue numbers (atom_site.auth_seq_id) are not present in " + "the pdbx_branch_scheme table: %s" + % (asym._id, ", ".join(repr(x[0]) for x in auth_map.keys()))) class _EntityBranchListHandler(Handler): @@ -3622,6 +3744,7 @@ class IHMVariant(Variant): _handlers = [ _CollectionHandler, _StructHandler, _SoftwareHandler, _CitationHandler, + _DatabaseHandler, _DatabaseStatusHandler, _AuditAuthorHandler, _GrantHandler, _CitationAuthorHandler, _ChemCompHandler, _ChemDescriptorHandler, _EntityHandler, _EntitySrcNatHandler, _EntitySrcGenHandler, _EntitySrcSynHandler, diff --git a/modules/core/dependency/python-ihm/ihm/restraint.py b/modules/core/dependency/python-ihm/ihm/restraint.py index a439f92f5f..8f5118a3cc 100644 --- a/modules/core/dependency/python-ihm/ihm/restraint.py +++ b/modules/core/dependency/python-ihm/ihm/restraint.py @@ -587,14 +587,14 @@ def _all_entities_or_asyms(self): class ResidueFeature(Feature): """Selection of one or more residues from the system. - Residues can be selected from both :class:`AsymUnit` and - :class:`Entity` (the latter implies that it selects residues + Residues can be selected from both :class:`ihm.AsymUnit` and + :class:`ihm.Entity` (the latter implies that it selects residues in all instances of that entity). Individual residues can - also be selected by passing :class:`Residue` objects. + also be selected by passing :class:`ihm.Residue` objects. - :param sequence ranges: A list of :class:`AsymUnitRange`, - :class:`AsymUnit`, :class:`EntityRange`, :class:`Residue`, - and/or :class:`Entity` objects. + :param sequence ranges: A list of :class:`ihm.AsymUnitRange`, + :class:`ihm.AsymUnit`, :class:`ihm.EntityRange`, + :class:`ihm.Residue`, and/or :class:`ihm.Entity` objects. :param str details: Additional text describing this feature. """ @@ -628,8 +628,8 @@ def _get_entity(x): class AtomFeature(Feature): """Selection of one or more atoms from the system. Atoms can be selected from polymers or non-polymers (but not both). - Atoms can also be selected from both :class:`AsymUnit` and - :class:`Entity` (the latter implies that it selects atoms + Atoms can also be selected from both :class:`ihm.AsymUnit` and + :class:`ihm.Entity` (the latter implies that it selects atoms in all instances of that entity). For selecting an entire polymer or residue(s), see :class:`ResidueFeature`. For selecting an entire non-polymer, @@ -659,12 +659,12 @@ class NonPolyFeature(Feature): """Selection of one or more non-polymers from the system. To select individual atoms from a non-polymer, see :class:`AtomFeature`. - Features can include both :class:`AsymUnit` and - :class:`Entity` (the latter implies that it selects non-polymers + Features can include both :class:`ihm.AsymUnit` and + :class:`ihm.Entity` (the latter implies that it selects non-polymers in all instances of that entity). - :param sequence objs: A list of :class:`AsymUnit` and/or - :class:`Entity` objects. + :param sequence objs: A list of :class:`ihm.AsymUnit` and/or + :class:`ihm.Entity` objects. :param str details: Additional text describing this feature. """ diff --git a/modules/core/dependency/python-ihm/ihm/source.py b/modules/core/dependency/python-ihm/ihm/source.py index ab3f3af3e9..8a2c96153a 100644 --- a/modules/core/dependency/python-ihm/ihm/source.py +++ b/modules/core/dependency/python-ihm/ihm/source.py @@ -29,7 +29,7 @@ def __init__(self, ncbi_taxonomy_id=None, scientific_name=None, class Manipulated(Source): """An entity isolated from a genetically manipulated source. - See :class:`Entity`. + See :class:`ihm.Entity`. :param gene: Details about the gene source. :type gene: :class:`Details` @@ -43,12 +43,12 @@ def __init__(self, gene=None, host=None): class Natural(Source, Details): - """An entity isolated from a natural source. See :class:`Entity`. + """An entity isolated from a natural source. See :class:`ihm.Entity`. See :class:`Details` for a description of the parameters.""" src_method = 'nat' class Synthetic(Source, Details): - """An entity obtained synthetically. See :class:`Entity`. + """An entity obtained synthetically. See :class:`ihm.Entity`. See :class:`Details` for a description of the parameters.""" src_method = 'syn' diff --git a/modules/core/dependency/python-ihm/ihm/util.py b/modules/core/dependency/python-ihm/ihm/util/__init__.py similarity index 100% rename from modules/core/dependency/python-ihm/ihm/util.py rename to modules/core/dependency/python-ihm/ihm/util/__init__.py diff --git a/modules/core/dependency/python-ihm/ihm/util/make_mmcif.py b/modules/core/dependency/python-ihm/ihm/util/make_mmcif.py new file mode 100644 index 0000000000..5bb9082ac7 --- /dev/null +++ b/modules/core/dependency/python-ihm/ihm/util/make_mmcif.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python3 + +""" +Add minimal IHM-related tables to an mmCIF file. + +Given any mmCIF file as input, this script will add any missing +IHM-related tables and write out a new file that is minimally compliant +with the IHM dictionary. + +This is done by simply reading in the original file with python-ihm and +then writing it out again, so + a) any data in the input file that is not understood by python-ihm + will be lost on output; and + b) input files that aren't compliant with the PDBx dictionary, or that + contain syntax errors or other problems, may crash or otherwise confuse + python-ihm. + +The --add option can also be used to combine multiple input mmCIF files into +one. This is typically used when the mmCIF files contain models with +differing composition. Only model (coordinate) information is combined, not +other IHM information such as starting models or restraints. +""" + + +import ihm.reader +import ihm.dumper +import ihm.model +import ihm.protocol +import os +import argparse + + +def add_ihm_info(s): + if not s.title: + s.title = 'Auto-generated system' + + # Simple default assembly containing all chains + default_assembly = ihm.Assembly(s.asym_units, name='Modeled assembly') + + # Simple default atomic representation for everything + default_representation = ihm.representation.Representation( + [ihm.representation.AtomicSegment(asym, rigid=False) + for asym in s.asym_units]) + + # Simple default modeling protocol + default_protocol = ihm.protocol.Protocol(name='modeling') + + for state_group in s.state_groups: + for state in state_group: + for model_group in state: + for model in model_group: + if not model.assembly: + model.assembly = default_assembly + if not model.representation: + model.representation = default_representation + if not model.protocol: + model.protocol = default_protocol + return s + + +def add_ihm_info_one_system(fname): + """Read mmCIF file `fname`, which must contain a single System, and + return it with any missing IHM data added.""" + with open(fname) as fh: + systems = ihm.reader.read(fh) + if len(systems) != 1: + raise ValueError("mmCIF file %s must contain exactly 1 data block " + "(%d found)" % (fname, len(systems))) + return add_ihm_info(systems[0]) + + +def combine(s, other_s): + """Add models from the System `other_s` into the System `s`. + After running this function, `s` will contain all Models from both + systems. The models are added to new StateGroup(s) in `s`. + Note that this function also modifies `other_s` in place, so that + System should no longer be used after calling this function.""" + # First map all Entity and AsymUnit objects in `other_s` to equivalent + # objects in `s` + entity_map = combine_entities(s, other_s) + asym_map = combine_asyms(s, other_s, entity_map) + # Now handle the Models themselves + combine_atoms(s, other_s, asym_map) + + +def combine_entities(s, other_s): + """Add `other_s` entities into `s`. Returns a dict that maps Entities + in `other_s` to equivalent objects in `s`.""" + entity_map = {} + sequences = dict((e.sequence, e) for e in s.entities) + for e in other_s.entities: + if e.sequence in sequences: + # If the `other_s` Entity already exists in `s`, map to it + entity_map[e] = sequences[e.sequence] + else: + # Otherwise, add the `other_s` Entity to `s` + s.entities.append(e) + entity_map[e] = e + return entity_map + + +def combine_asyms(s, other_s, entity_map): + """Add `other_s` asyms into `s`. Returns a dict that maps AsymUnits + in `other_s` to equivalent objects in `s`.""" + asym_map = {} + # Collect author-provided information for existing asyms. For polymers, + # we use the author-provided chain ID; for non-polymers, we also use + # the author-provided residue number of the first (only) residue + poly_asyms = dict(((a.entity, a.strand_id), a) + for a in s.asym_units if a.entity.is_polymeric()) + nonpoly_asyms = dict(((a.entity, a.strand_id, a.auth_seq_id_map[1]), a) + for a in s.asym_units + if a.entity.type == 'non-polymer') + + def map_asym(asym, orig_asym): + if orig_asym: + # If an equivalent asym already exists, use it (and its asym_id) + asym_map[asym] = orig_asym + else: + # Otherwise, add a new asym + asym_map[asym] = asym + asym.id = None # Assign new ID + s.asym_units.append(asym) + + for asym in other_s.asym_units: + # Point to Entity in `s`, not `other_s` + asym.entity = entity_map[asym.entity] + # For polymers and non-polymers, if an asym in `other_s` has the + # same author-provided information and entity_id as an asym in `s`, + # reuse the asym_id + if asym.entity.is_polymeric(): + map_asym(asym, poly_asyms.get((asym.entity, asym.strand_id))) + elif asym.entity.type == 'non-polymer': + map_asym(asym, nonpoly_asyms.get((asym.entity, asym.strand_id, + asym.auth_seq_id_map[1]))) + else: + # For waters and branched entities, always assign a new asym_id + asym_map[asym] = asym + asym.id = None # Assign new ID + s.asym_units.append(asym) + return asym_map + + +def combine_atoms(s, other_s, asym_map): + """Add `other_s` atoms into `s`""" + seen_asmb = set() + seen_rep = set() + for state_group in other_s.state_groups: + for state in state_group: + for model_group in state: + for model in model_group: + # Assembly, Representation and Atom and Sphere objects + # all reference `other_s` asyms. We must map these to + # asyms in `s`. + asmb = model.assembly + if id(asmb) not in seen_asmb: + seen_asmb.add(id(asmb)) + # todo: also handle AsymUnitRange + asmb[:] = [asym_map[asym] for asym in asmb] + rep = model.representation + if id(rep) not in seen_rep: + seen_rep.add(id(rep)) + for seg in rep: + seg.asym_unit = asym_map[seg.asym_unit] + for atom in model._atoms: + atom.asym_unit = asym_map[atom.asym_unit] + for sphere in model._spheres: + sphere.asym_unit = asym_map[sphere.asym_unit] + + # Add all models as new state groups + s.state_groups.extend(other_s.state_groups) + + +def get_args(): + p = argparse.ArgumentParser( + description="Add minimal IHM-related tables to an mmCIF file.") + p.add_argument("input", metavar="input.cif", help="input mmCIF file name") + p.add_argument("output", metavar="output.cif", + help="output mmCIF file name", + default="output.cif", nargs="?") + p.add_argument("--add", "-a", action='append', metavar="add.cif", + help="also add model information from the named mmCIF " + "file to the output file") + return p.parse_args() + + +def main(): + args = get_args() + + if (os.path.exists(args.input) and os.path.exists(args.output) + and os.path.samefile(args.input, args.output)): + raise ValueError("Input and output are the same file") + + if args.add: + s = add_ihm_info_one_system(args.input) + for other in args.add: + other_s = add_ihm_info_one_system(other) + combine(s, other_s) + with open(args.output, 'w') as fhout: + ihm.dumper.write( + fhout, [s], + variant=ihm.dumper.IgnoreVariant(['_audit_conform'])) + else: + with open(args.input) as fh: + with open(args.output, 'w') as fhout: + ihm.dumper.write( + fhout, [add_ihm_info(s) for s in ihm.reader.read(fh)], + variant=ihm.dumper.IgnoreVariant(['_audit_conform'])) + + +if __name__ == '__main__': + main() diff --git a/modules/core/dependency/python-ihm/make-release.sh b/modules/core/dependency/python-ihm/make-release.sh index 28f634a71f..9a0ed7fd19 100755 --- a/modules/core/dependency/python-ihm/make-release.sh +++ b/modules/core/dependency/python-ihm/make-release.sh @@ -4,7 +4,8 @@ # - Update AuditConformDumper to match latest IHM dictionary if necessary # - Run util/validate-outputs.py to make sure all example outputs validate # (cd util; PYTHONPATH=.. python3 ./validate-outputs.py) -# - Update ChangeLog.rst and util/python-ihm.spec with the release number +# - Update ChangeLog.rst, util/debian/changelog, and util/python-ihm.spec +# with the release number and date # - Update release number in ihm/__init__.py, MANIFEST.in, and setup.py # - Commit, tag, and push # - Make release on GitHub @@ -23,4 +24,4 @@ python3 setup.py sdist rm -f "src/ihm_format_wrap_${VERSION}.c" echo "Now use 'twine upload dist/ihm-${VERSION}.tar.gz' to publish the release on PyPi." -echo "Then, update the conda-forge, COPR, and Homebrew packages to match." +echo "Then, update the conda-forge, COPR, PPA, and Homebrew packages to match." diff --git a/modules/core/dependency/python-ihm/setup.py b/modules/core/dependency/python-ihm/setup.py index 473459fd84..d54a6555a9 100755 --- a/modules/core/dependency/python-ihm/setup.py +++ b/modules/core/dependency/python-ihm/setup.py @@ -7,7 +7,7 @@ import sys import os -VERSION = "0.43" +VERSION = "1.1" copy_args = sys.argv[1:] @@ -52,7 +52,7 @@ author_email='ben@salilab.org', url='https://github.com/ihmwg/python-ihm', ext_modules=mod, - packages=['ihm'], + packages=['ihm', 'ihm.util'], install_requires=['msgpack'], classifiers=[ "Programming Language :: Python :: 2.7", diff --git a/modules/core/dependency/python-ihm/test/input/mini_add.cif b/modules/core/dependency/python-ihm/test/input/mini_add.cif new file mode 100644 index 0000000000..3f2743baec --- /dev/null +++ b/modules/core/dependency/python-ihm/test/input/mini_add.cif @@ -0,0 +1,55 @@ +data_model +# +_exptl.method 'model, MODELLER Version 9.24 2020/08/21 11:54:31' +# +_modeller.version 9.24 +# +loop_ +_struct_asym.id +_struct_asym.entity_id +_struct_asym.details +A 1 ? +B 2 ? +# +loop_ +_entity_poly_seq.entity_id +_entity_poly_seq.num +_entity_poly_seq.mon_id +1 1 VAL +1 2 GLY +1 3 GLN +1 4 GLN +1 5 TYR +1 6 SER +1 7 SER +2 1 PRO +2 2 GLU +# +loop_ +_atom_site.group_PDB +_atom_site.type_symbol +_atom_site.label_atom_id +_atom_site.label_alt_id +_atom_site.label_comp_id +_atom_site.label_asym_id +_atom_site.auth_asym_id +_atom_site.label_seq_id +_atom_site.auth_seq_id +_atom_site.pdbx_PDB_ins_code +_atom_site.Cartn_x +_atom_site.Cartn_y +_atom_site.Cartn_z +_atom_site.occupancy +_atom_site.B_iso_or_equiv +_atom_site.label_entity_id +_atom_site.id +_atom_site.pdbx_PDB_model_num +ATOM C CA . VAL A A 1 2 ? 114.370 27.980 -26.088 1.000 143.490 1 2 1 +ATOM C CA . GLY A A 2 3 ? 111.506 26.368 -28.075 1.000 137.530 1 9 1 +ATOM C CA . GLN A A 3 4 ? 113.468 23.113 -28.639 1.000 128.420 1 13 1 +ATOM C CA . GLN A A 4 5 ? 113.808 21.534 -32.168 1.000 117.620 1 22 1 +ATOM C CA . TYR A A 5 6 ? 116.743 22.770 -34.259 1.000 103.700 1 31 1 +ATOM C CA . SER A A 6 7 ? 116.626 25.161 -37.229 1.000 93.490 1 43 1 +ATOM C CA . SER A A 7 8 ? 119.165 25.590 -40.036 1.000 87.320 1 49 1 +ATOM C CA . PRO B B 1 3 ? 70.427 58.819 51.717 1.000 152.390 2 55 1 +ATOM C CA . GLU B B 2 4 ? 68.584 58.274 48.425 1.000 152.090 2 63 1 diff --git a/modules/core/dependency/python-ihm/test/input/mini_nonpoly.cif b/modules/core/dependency/python-ihm/test/input/mini_nonpoly.cif new file mode 100644 index 0000000000..ae3d6db2d3 --- /dev/null +++ b/modules/core/dependency/python-ihm/test/input/mini_nonpoly.cif @@ -0,0 +1,82 @@ +data_model +_entry.id model +_struct.entry_id model +_struct.pdbx_model_details . +_struct.pdbx_structure_determination_methodology integrative +_struct.title . +_audit_conform.dict_location https://raw.githubusercontent.com/ihmwg/IHM-dictionary/9be59e1/ihm-extension.dic +_audit_conform.dict_name ihm-extension.dic +_audit_conform.dict_version 1.24 +# +loop_ +_chem_comp.id +_chem_comp.type +_chem_comp.name +_chem_comp.formula +_chem_comp.formula_weight +HEM non-polymer 'PROTOPORPHYRIN IX CONTAINING FE' 'C34 H32 Fe N4 O4' 616.499 +# +# +loop_ +_entity.id +_entity.type +_entity.src_method +_entity.pdbx_description +_entity.formula_weight +_entity.pdbx_number_of_molecules +_entity.details +1 non-polymer man Heme 616.499 2 . +# +# +loop_ +_pdbx_entity_nonpoly.entity_id +_pdbx_entity_nonpoly.name +_pdbx_entity_nonpoly.comp_id +1 Heme HEM +# +# +loop_ +_struct_asym.id +_struct_asym.entity_id +_struct_asym.details +A 1 'First heme' +B 1 'Second heme' +# +# +loop_ +_pdbx_nonpoly_scheme.asym_id +_pdbx_nonpoly_scheme.entity_id +_pdbx_nonpoly_scheme.mon_id +_pdbx_nonpoly_scheme.ndb_seq_num +_pdbx_nonpoly_scheme.pdb_seq_num +_pdbx_nonpoly_scheme.auth_seq_num +_pdbx_nonpoly_scheme.auth_mon_id +_pdbx_nonpoly_scheme.pdb_strand_id +_pdbx_nonpoly_scheme.pdb_ins_code +A 1 HEM 1 100 100 HEM A . +B 1 HEM 1 200 200 HEM B . +# +# +loop_ +_atom_site.group_PDB +_atom_site.id +_atom_site.type_symbol +_atom_site.label_atom_id +_atom_site.label_alt_id +_atom_site.label_comp_id +_atom_site.label_seq_id +_atom_site.auth_seq_id +_atom_site.pdbx_PDB_ins_code +_atom_site.label_asym_id +_atom_site.Cartn_x +_atom_site.Cartn_y +_atom_site.Cartn_z +_atom_site.occupancy +_atom_site.label_entity_id +_atom_site.auth_asym_id +_atom_site.auth_comp_id +_atom_site.B_iso_or_equiv +_atom_site.pdbx_PDB_model_num +_atom_site.ihm_model_id +HETATM 1 FE FE . HEM . 100 ? A 0 0 0 . 1 A HEM . 1 1 +HETATM 2 FE FE . HEM . 200 ? B 10.000 10.000 10.000 . 1 B HEM . 1 1 diff --git a/modules/core/dependency/python-ihm/test/input/mini_nonpoly_add.cif b/modules/core/dependency/python-ihm/test/input/mini_nonpoly_add.cif new file mode 100644 index 0000000000..c61f3a60d4 --- /dev/null +++ b/modules/core/dependency/python-ihm/test/input/mini_nonpoly_add.cif @@ -0,0 +1,82 @@ +data_model +_entry.id model +_struct.entry_id model +_struct.pdbx_model_details . +_struct.pdbx_structure_determination_methodology integrative +_struct.title . +_audit_conform.dict_location https://raw.githubusercontent.com/ihmwg/IHM-dictionary/9be59e1/ihm-extension.dic +_audit_conform.dict_name ihm-extension.dic +_audit_conform.dict_version 1.24 +# +loop_ +_chem_comp.id +_chem_comp.type +_chem_comp.name +_chem_comp.formula +_chem_comp.formula_weight +HEM non-polymer 'PROTOPORPHYRIN IX CONTAINING FE' 'C34 H32 Fe N4 O4' 616.499 +# +# +loop_ +_entity.id +_entity.type +_entity.src_method +_entity.pdbx_description +_entity.formula_weight +_entity.pdbx_number_of_molecules +_entity.details +1 non-polymer man Heme 616.499 2 . +# +# +loop_ +_pdbx_entity_nonpoly.entity_id +_pdbx_entity_nonpoly.name +_pdbx_entity_nonpoly.comp_id +1 Heme HEM +# +# +loop_ +_struct_asym.id +_struct_asym.entity_id +_struct_asym.details +A 1 'First heme' +B 1 'Second heme' +# +# +loop_ +_pdbx_nonpoly_scheme.asym_id +_pdbx_nonpoly_scheme.entity_id +_pdbx_nonpoly_scheme.mon_id +_pdbx_nonpoly_scheme.ndb_seq_num +_pdbx_nonpoly_scheme.pdb_seq_num +_pdbx_nonpoly_scheme.auth_seq_num +_pdbx_nonpoly_scheme.auth_mon_id +_pdbx_nonpoly_scheme.pdb_strand_id +_pdbx_nonpoly_scheme.pdb_ins_code +A 1 HEM 1 100 100 HEM A . +B 1 HEM 1 800 800 HEM B . +# +# +loop_ +_atom_site.group_PDB +_atom_site.id +_atom_site.type_symbol +_atom_site.label_atom_id +_atom_site.label_alt_id +_atom_site.label_comp_id +_atom_site.label_seq_id +_atom_site.auth_seq_id +_atom_site.pdbx_PDB_ins_code +_atom_site.label_asym_id +_atom_site.Cartn_x +_atom_site.Cartn_y +_atom_site.Cartn_z +_atom_site.occupancy +_atom_site.label_entity_id +_atom_site.auth_asym_id +_atom_site.auth_comp_id +_atom_site.B_iso_or_equiv +_atom_site.pdbx_PDB_model_num +_atom_site.ihm_model_id +HETATM 1 FE FE . HEM . 100 ? A 0 0 0 . 1 A HEM . 1 1 +HETATM 2 FE FE . HEM . 800 ? B 10.000 10.000 10.000 . 1 B HEM . 1 1 diff --git a/modules/core/dependency/python-ihm/test/test_dumper.py b/modules/core/dependency/python-ihm/test/test_dumper.py index a49574f7fc..54208f7bf8 100644 --- a/modules/core/dependency/python-ihm/test/test_dumper.py +++ b/modules/core/dependency/python-ihm/test/test_dumper.py @@ -114,7 +114,7 @@ def test_audit_conform_dumper(self): lines = sorted(out.split('\n')) self.assertEqual(lines[1].split()[0], "_audit_conform.dict_location") self.assertEqual(lines[2].rstrip('\r\n'), - "_audit_conform.dict_name ihm-extension.dic") + "_audit_conform.dict_name mmcif_ihm.dic") self.assertEqual(lines[3].split()[0], "_audit_conform.dict_version") def test_struct_dumper(self): @@ -426,9 +426,10 @@ def test_entity_duplicates(self): def test_entity_duplicate_branched(self): """Test EntityDumper with duplicate branched entities""" system = ihm.System() - sacc = ihm.SaccharideChemComp('NAG') - system.entities.append(ihm.Entity([sacc])) - system.entities.append(ihm.Entity([sacc])) + sacc1 = ihm.SaccharideChemComp('NAG') + sacc2 = ihm.SaccharideChemComp('FUC') + system.entities.append(ihm.Entity([sacc1, sacc2])) + system.entities.append(ihm.Entity([sacc1, sacc2])) dumper = ihm.dumper._EntityDumper() dumper.finalize(system) # Assign IDs out = _get_dumper_output(dumper, system) @@ -863,7 +864,8 @@ def test_entity_nonpoly_dumper(self): e2 = ihm.Entity([ihm.NonPolymerChemComp('HEM')], description='heme') e3 = ihm.Entity([ihm.WaterChemComp()]) # Branched entity - e4 = ihm.Entity([ihm.SaccharideChemComp('NAG')]) + e4 = ihm.Entity([ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('FUC')]) system.entities.extend((e1, e2, e3, e4)) ed = ihm.dumper._EntityDumper() @@ -1197,7 +1199,8 @@ def test_external_reference_dumper(self): 'bar')) repo3 = ihm.location.Repository(doi="10.5281/zenodo.58025", url='foo.spd') - loc = ihm.location.InputFileLocation(repo=repo1, path='bar') + loc = ihm.location.InputFileLocation(repo=repo1, path='bar', + file_format='TXT') system.locations.append(loc) # Duplicates should be ignored loc = ihm.location.InputFileLocation(repo=repo1, path='bar') @@ -1259,14 +1262,15 @@ def test_external_reference_dumper(self): _ihm_external_files.reference_id _ihm_external_files.file_path _ihm_external_files.content_type +_ihm_external_files.file_format _ihm_external_files.file_size_bytes _ihm_external_files.details -1 1 bar 'Input data or restraints' . . -2 1 baz 'Input data or restraints' . . -3 2 foo/bar/baz 'Modeling or post-processing output' . . -4 3 foo.spd 'Input data or restraints' . 'EM micrographs' -5 3 . 'Input data or restraints' . 'EM micrographs' -6 4 %s 'Modeling workflow or script' 4 . +1 1 bar 'Input data or restraints' TXT . . +2 1 baz 'Input data or restraints' . . . +3 2 foo/bar/baz 'Modeling or post-processing output' . . . +4 3 foo.spd 'Input data or restraints' . . 'EM micrographs' +5 3 . 'Input data or restraints' . . 'EM micrographs' +6 4 %s 'Modeling workflow or script' . 4 . # """ % bar.replace(os.sep, '/')) @@ -5064,7 +5068,8 @@ def test_entity_branch_list_dumper(self): """Test EntityBranchListDumper""" system = ihm.System() system.entities.append(ihm.Entity( - [ihm.SaccharideChemComp('NAG')])) + [ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('FUC')])) # Non-branched entity system.entities.append(ihm.Entity('ACGT')) ed = ihm.dumper._EntityDumper() @@ -5078,6 +5083,7 @@ def test_entity_branch_list_dumper(self): _pdbx_entity_branch_list.comp_id _pdbx_entity_branch_list.hetero 1 1 NAG . +1 2 FUC . # """) @@ -5085,7 +5091,8 @@ def test_entity_branch_dumper(self): """Test EntityBranchDumper""" system = ihm.System() system.entities.append(ihm.Entity( - [ihm.SaccharideChemComp('NAG')])) + [ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('FUC')])) # Non-branched entity system.entities.append(ihm.Entity('ACGT')) ed = ihm.dumper._EntityDumper() @@ -5103,14 +5110,21 @@ def test_entity_branch_dumper(self): def test_branch_scheme_dumper(self): """Test BranchSchemeDumper""" system = ihm.System() - e1 = ihm.Entity([ihm.SaccharideChemComp('NAG')]) - e2 = ihm.Entity([ihm.SaccharideChemComp('FUC')]) + e1 = ihm.Entity([ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('FUC')]) + e2 = ihm.Entity([ihm.SaccharideChemComp('FUC'), + ihm.SaccharideChemComp('BGC')]) + e3 = ihm.Entity([ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('BGC')]) # Non-branched entity - e3 = ihm.Entity('ACT') - system.entities.extend((e1, e2, e3)) + e4 = ihm.Entity('ACT') + system.entities.extend((e1, e2, e3, e4)) system.asym_units.append(ihm.AsymUnit(e1, 'foo')) system.asym_units.append(ihm.AsymUnit(e2, 'bar', auth_seq_id_map=5)) - system.asym_units.append(ihm.AsymUnit(e3, 'baz')) + system.asym_units.append(ihm.AsymUnit( + e3, 'bar', auth_seq_id_map={1: 6, 2: (7, 'A')}, + orig_auth_seq_id_map={1: 100})) + system.asym_units.append(ihm.AsymUnit(e4, 'baz')) ihm.dumper._EntityDumper().finalize(system) ihm.dumper._StructAsymDumper().finalize(system) dumper = ihm.dumper._BranchSchemeDumper() @@ -5122,12 +5136,17 @@ def test_branch_scheme_dumper(self): _pdbx_branch_scheme.mon_id _pdbx_branch_scheme.num _pdbx_branch_scheme.pdb_seq_num +_pdbx_branch_scheme.pdb_ins_code _pdbx_branch_scheme.auth_seq_num _pdbx_branch_scheme.auth_mon_id _pdbx_branch_scheme.pdb_mon_id _pdbx_branch_scheme.pdb_asym_id -A 1 NAG 1 1 1 NAG NAG A -B 2 FUC 1 6 6 FUC FUC B +A 1 NAG 1 1 . 1 NAG NAG A +A 1 FUC 2 2 . 2 FUC FUC A +B 2 FUC 1 6 . 6 FUC FUC B +B 2 BGC 2 7 . 7 BGC BGC B +C 3 NAG 1 6 . 100 NAG NAG C +C 3 BGC 2 7 A 7 BGC BGC C # """) @@ -5191,6 +5210,50 @@ def test_branch_link_dumper(self): # """) + def test_database_dumper(self): + """Test DatabaseDumper""" + system = ihm.System() + dumper = ihm.dumper._DatabaseDumper() + out = _get_dumper_output(dumper, system) + self.assertEqual(out, '') + + system = ihm.System( + databases=[ihm.Database(id='foo', code='bar'), + ihm.Database(id='baz', code='1abc', accession='1abcxyz', + doi='1.2.3.4')]) + dumper = ihm.dumper._DatabaseDumper() + out = _get_dumper_output(dumper, system) + self.assertEqual(out, """# +loop_ +_database_2.database_id +_database_2.database_code +_database_2.pdbx_database_accession +_database_2.pdbx_DOI +foo bar . . +baz 1abc 1abcxyz 1.2.3.4 +# +""") + + def test_database_status_dumper(self): + """Test DatabaseStatusDumper""" + system = ihm.System() + system._database_status = { + 'status_code': 'REL', 'entry_id': '5FD1', + 'recvd_initial_deposition_date': '1993-06-29', + 'deposit_site': ihm.unknown, 'process_site': 'BNL', + 'sg_entry': None} + dumper = ihm.dumper._DatabaseStatusDumper() + out = _get_dumper_output(dumper, system) + # sort to remove dict order + self.assertEqual("\n".join(sorted(out.split('\n'))), + """ +_pdbx_database_status.deposit_site ? +_pdbx_database_status.entry_id 5FD1 +_pdbx_database_status.process_site BNL +_pdbx_database_status.recvd_initial_deposition_date 1993-06-29 +_pdbx_database_status.sg_entry . +_pdbx_database_status.status_code REL""") + if __name__ == '__main__': unittest.main() diff --git a/modules/core/dependency/python-ihm/test/test_examples.py b/modules/core/dependency/python-ihm/test/test_examples.py index c0a78e94d2..945e8b9983 100644 --- a/modules/core/dependency/python-ihm/test/test_examples.py +++ b/modules/core/dependency/python-ihm/test/test_examples.py @@ -55,7 +55,7 @@ def test_locations_example(self): # can read it with open(out) as fh: contents = fh.readlines() - self.assertEqual(len(contents), 70) + self.assertEqual(len(contents), 71) with open(out) as fh: s, = ihm.reader.read(fh) os.unlink(out) diff --git a/modules/core/dependency/python-ihm/test/test_format.py b/modules/core/dependency/python-ihm/test/test_format.py index 94a2783b99..170432aadd 100644 --- a/modules/core/dependency/python-ihm/test/test_format.py +++ b/modules/core/dependency/python-ihm/test/test_format.py @@ -800,6 +800,258 @@ def __call__(self): _format.ihm_reader_free(reader) fh.close() + def test_preserving_tokenizer_get_token(self): + """Test _PreservingCifTokenizer._get_token()""" + cif = """ +# Full line comment +_cat1.Foo baz # End of line comment +""" + t = ihm.format._PreservingCifTokenizer(StringIO(cif)) + tokens = [t._get_token() for _ in range(11)] + self.assertIsInstance(tokens[0], ihm.format._EndOfLineToken) + self.assertIsInstance(tokens[1], ihm.format._CommentToken) + self.assertEqual(tokens[1].txt, ' Full line comment') + self.assertIsInstance(tokens[2], ihm.format._EndOfLineToken) + self.assertIsInstance(tokens[3], ihm.format._PreservingVariableToken) + self.assertEqual(tokens[3].category, '_cat1') + self.assertEqual(tokens[3].keyword, 'foo') + self.assertEqual(tokens[3].orig_keyword, 'Foo') + self.assertIsInstance(tokens[4], ihm.format._WhitespaceToken) + self.assertEqual(tokens[4].txt, ' ') + self.assertIsInstance(tokens[5], ihm.format._TextValueToken) + self.assertEqual(tokens[5].txt, 'baz') + self.assertIsInstance(tokens[6], ihm.format._WhitespaceToken) + self.assertEqual(tokens[6].txt, ' ') + self.assertIsInstance(tokens[7], ihm.format._CommentToken) + self.assertEqual(tokens[7].txt, ' End of line comment') + self.assertIsInstance(tokens[8], ihm.format._EndOfLineToken) + self.assertIsNone(tokens[9]) + self.assertIsNone(tokens[10]) + + # Make sure we can reconstruct the original mmCIF from the tokens + new_cif = "".join(x.as_mmcif() for x in tokens[:-2]) + self.assertEqual(new_cif, cif) + + def test_preserving_tokenizer_reconstruct(self): + """Make sure _PreservingCifTokenizer can reconstruct original mmCIF""" + cif = """ +data_foo_bar +# +_cat1.foo ? +# +_cat2.BaR . +# +loop_ +foo.bar +foo.baz +foo.single +foo.double +foo.multi +x . 'single' "double" +;multi +; +""" + t = ihm.format._PreservingCifTokenizer(StringIO(cif)) + tokens = [] + while True: + tok = t._get_token() + if tok is None: + break + else: + tokens.append(tok) + new_cif = "".join(x.as_mmcif() for x in tokens) + self.assertEqual(new_cif, cif) + + def test_preserving_variable_token(self): + """Test _PreservingVariableToken class""" + t = ihm.format._PreservingVariableToken("foo.BAR", 1) + self.assertEqual(t.keyword, 'bar') + self.assertEqual(t.orig_keyword, 'BAR') + self.assertEqual(t.as_mmcif(), 'foo.BAR') + t.keyword = 'baz' + self.assertEqual(t.as_mmcif(), 'foo.baz') + + def test_preserving_cif_reader(self): + """Test _PreservingCifReader class""" + cif = """ +data_foo_bar +# +_cat1.foo ? +# +loop_ +_foo.bar +_foo.baz +a b c d +x y +""" + r = ihm.format._PreservingCifReader(StringIO(cif)) + tokens = list(r.read_file()) + self.assertIsInstance(tokens[5], ihm.format._CategoryTokenGroup) + self.assertIsInstance(tokens[8], ihm.format._LoopHeaderTokenGroup) + self.assertIsInstance(tokens[9], ihm.format._LoopRowTokenGroup) + self.assertIsInstance(tokens[10], ihm.format._LoopRowTokenGroup) + self.assertIsInstance(tokens[11], ihm.format._LoopRowTokenGroup) + new_cif = "".join(x.as_mmcif() for x in tokens) + self.assertEqual(new_cif, cif) + + def test_preserving_cif_reader_filter(self): + """Test _PreservingCifReader class with filters""" + cif = """ +data_foo_bar +# +_cat1.bar old +# +loop_ +_foo.bar +_foo.baz +a b c d +x y +""" + r = ihm.format._PreservingCifReader(StringIO(cif)) + filters = [ihm.format._ChangeValueFilter(".bar", old='old', new='new'), + ihm.format._ChangeValueFilter(".bar", old='a', new='newa'), + ihm.format._ChangeValueFilter(".foo", old='old', new='new')] + tokens = list(r.read_file(filters)) + new_cif = "".join(x.as_mmcif() for x in tokens) + self.assertEqual(new_cif, """ +data_foo_bar +# +_cat1.bar new +# +loop_ +_foo.bar +_foo.baz +newa b c d +x y +""") + + def test_category_token_group(self): + """Test CategoryTokenGroup class""" + var = ihm.format._PreservingVariableToken("_foo.bar", 1) + space = ihm.format._WhitespaceToken(" ") + val = ihm.format._TextValueToken("baz", quote=None) + tg = ihm.format._CategoryTokenGroup( + var, ihm.format._SpacedToken([space], val)) + self.assertEqual(str(tg), "<_CategoryTokenGroup(_foo.bar, baz)>") + self.assertEqual(tg.as_mmcif(), '_foo.bar baz\n') + self.assertEqual(tg.category, "_foo") + self.assertEqual(tg.keyword, "bar") + self.assertEqual(tg.value, "baz") + tg.value = None + self.assertIsNone(tg.value) + + def test_spaced_token(self): + """Test SpacedToken class""" + space = ihm.format._WhitespaceToken(" ") + val = ihm.format._TextValueToken("baz", quote=None) + sp = ihm.format._SpacedToken([space], val) + self.assertEqual(sp.as_mmcif(), " baz") + self.assertEqual(sp.value, 'baz') + sp.value = None + self.assertIsNone(sp.value) + self.assertEqual(sp.as_mmcif(), ' .') + sp.value = ihm.unknown + self.assertIs(sp.value, ihm.unknown) + self.assertEqual(sp.as_mmcif(), ' ?') + sp.value = "test value" + self.assertEqual(sp.as_mmcif(), ' "test value"') + + def test_loop_header_token_group(self): + """Test LoopHeaderTokenGroup class""" + cif = """ +loop_ +_foo.bar +_foo.baz +x y +""" + r = ihm.format._PreservingCifReader(StringIO(cif)) + token = list(r.read_file())[1] + self.assertIsInstance(token, ihm.format._LoopHeaderTokenGroup) + self.assertEqual(str(token), + "<_LoopHeaderTokenGroup(_foo, ['bar', 'baz'])>") + self.assertEqual(token.keyword_index("bar"), 0) + self.assertEqual(token.keyword_index("baz"), 1) + self.assertRaises(ValueError, token.keyword_index, "foo") + + def test_change_value_filter_init(self): + """Test ChangeValueFilter constructor""" + f = ihm.format._ChangeValueFilter("_citation.id", old='1', new='2') + self.assertEqual(f.category, '_citation') + self.assertEqual(f.keyword, 'id') + f = ihm.format._ChangeValueFilter(".bar", old='1', new='2') + self.assertIsNone(f.category) + self.assertEqual(f.keyword, 'bar') + f = ihm.format._ChangeValueFilter("bar", old='1', new='2') + self.assertIsNone(f.category) + self.assertEqual(f.keyword, 'bar') + + def test_change_value_filter_category(self): + """Test ChangeValueFilter.filter_category""" + var = ihm.format._PreservingVariableToken("_foo.bar", 1) + space = ihm.format._WhitespaceToken(" ") + val = ihm.format._TextValueToken("baz", quote=None) + tg = ihm.format._CategoryTokenGroup( + var, ihm.format._SpacedToken([space], val)) + # Value does not match + f = ihm.format._ChangeValueFilter("_foo.bar", old='old', new='new') + new_tg = f.filter_category(tg) + self.assertEqual(new_tg.value, 'baz') + + # Keyword does not match + f = ihm.format._ChangeValueFilter("_foo.foo", old='baz', new='new') + new_tg = f.filter_category(tg) + self.assertEqual(new_tg.value, 'baz') + + # Category does not match + f = ihm.format._ChangeValueFilter("_bar.bar", old='baz', new='new') + new_tg = f.filter_category(tg) + self.assertEqual(new_tg.value, 'baz') + + # Category matches exactly + f = ihm.format._ChangeValueFilter("_foo.bar", old='baz', new='new') + new_tg = f.filter_category(tg) + self.assertEqual(new_tg.value, 'new') + + # All-category match + f = ihm.format._ChangeValueFilter(".bar", old='new', new='new2') + new_tg = f.filter_category(tg) + self.assertEqual(new_tg.value, 'new2') + + def test_change_value_filter_loop(self): + """Test ChangeValueFilter.get_loop_filter""" + cif = """ +loop_ +_foo.bar +_foo.baz +x y +""" + r = ihm.format._PreservingCifReader(StringIO(cif)) + tokens = list(r.read_file()) + header = tokens[1] + row = tokens[2] + # Keyword does not match + f = ihm.format._ChangeValueFilter("_foo.foo", old='x', new='new') + self.assertIsNone(f.get_loop_filter(header)) + + # Category does not match + f = ihm.format._ChangeValueFilter("_bar.bar", old='x', new='new') + self.assertIsNone(f.get_loop_filter(header)) + + # Value does not match + f = ihm.format._ChangeValueFilter("_foo.bar", old='notx', new='new') + lf = f.get_loop_filter(header) + self.assertEqual(lf(row).as_mmcif(), "x y") + + # Category matches exactly + f = ihm.format._ChangeValueFilter("_foo.bar", old='x', new='new') + lf = f.get_loop_filter(header) + self.assertEqual(lf(row).as_mmcif(), "new y") + + # All-category match + f = ihm.format._ChangeValueFilter(".bar", old='new', new='new2') + lf = f.get_loop_filter(header) + self.assertEqual(lf(row).as_mmcif(), "new2 y") + if __name__ == '__main__': unittest.main() diff --git a/modules/core/dependency/python-ihm/test/test_location.py b/modules/core/dependency/python-ihm/test/test_location.py index 5be9b89a0e..989a99c437 100644 --- a/modules/core/dependency/python-ihm/test/test_location.py +++ b/modules/core/dependency/python-ihm/test/test_location.py @@ -183,6 +183,7 @@ def test_file_location_repo(self): self.assertIsNone(loc.file_size) self.assertEqual(str(loc), "") + self.assertEqual(str(r), "") # locations should only compare equal if path and repo both match loc2 = ihm.location.InputFileLocation('foo/bar', repo=r) self.assertEqual(loc, loc2) diff --git a/modules/core/dependency/python-ihm/test/test_main.py b/modules/core/dependency/python-ihm/test/test_main.py index 0eaf8775a9..14bdb2bcba 100644 --- a/modules/core/dependency/python-ihm/test/test_main.py +++ b/modules/core/dependency/python-ihm/test/test_main.py @@ -23,6 +23,7 @@ def test_system(self): s = ihm.System(title='test system') self.assertEqual(s.title, 'test system') self.assertEqual(s.id, 'model') + self.assertEqual(s.databases, []) def test_chem_comp(self): """Test ChemComp class""" @@ -39,6 +40,9 @@ def test_chem_comp(self): self.assertEqual(cc1, cc2) self.assertEqual(hash(cc1), hash(cc2)) self.assertNotEqual(cc1, cc3) + cc4 = ihm.ChemComp(id='GLY', code='G', code_canonical='G', + formula=ihm.unknown) + self.assertIsNone(cc4.formula_weight) def test_chem_comp_id_5(self): """Test new-style 5-character CCD IDs in ChemComp""" @@ -219,19 +223,21 @@ def test_entity(self): e2 = ihm.Entity('AHCD', description='bar') e3 = ihm.Entity('AHCDE', description='foo') heme = ihm.Entity([ihm.NonPolymerChemComp('HEM')]) - sugar = ihm.Entity([ihm.SaccharideChemComp('NAG')]) + sugar = ihm.Entity([ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('FUC')]) self.assertEqual(e1, e2) self.assertNotEqual(e1, e3) self.assertEqual(e1.seq_id_range, (1, 4)) self.assertEqual(e3.seq_id_range, (1, 5)) - sugar2 = ihm.Entity([ihm.SaccharideChemComp('NAG')]) + sugar2 = ihm.Entity([ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('FUC')]) # Branched entities never compare equal unless they are the same object self.assertEqual(sugar, sugar) self.assertNotEqual(sugar, sugar2) # seq_id does not exist for nonpolymers self.assertEqual(heme.seq_id_range, (None, None)) # We do have an internal seq_id_range for branched entities - self.assertEqual(sugar.seq_id_range, (1, 1)) + self.assertEqual(sugar.seq_id_range, (1, 2)) def test_entity_weight(self): """Test Entity.formula_weight""" @@ -246,7 +252,8 @@ def test_entity_type(self): protein = ihm.Entity('AHCD') heme = ihm.Entity([ihm.NonPolymerChemComp('HEM')]) water = ihm.Entity([ihm.WaterChemComp()]) - sugar = ihm.Entity([ihm.SaccharideChemComp('NAG')]) + sugar = ihm.Entity([ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('FUC')]) self.assertEqual(protein.type, 'polymer') self.assertTrue(protein.is_polymeric()) self.assertFalse(protein.is_branched()) @@ -260,6 +267,12 @@ def test_entity_type(self): self.assertFalse(sugar.is_polymeric()) self.assertTrue(sugar.is_branched()) + # A single sugar should be classified non-polymer + single_sugar = ihm.Entity([ihm.SaccharideChemComp('NAG')]) + self.assertEqual(single_sugar.type, 'non-polymer') + self.assertFalse(single_sugar.is_polymeric()) + self.assertFalse(single_sugar.is_branched()) + # A single amino acid should be classified non-polymer single_aa = ihm.Entity('A') self.assertEqual(single_aa.type, 'non-polymer') @@ -415,7 +428,7 @@ def test_water_asym(self): self.assertEqual(a.number_of_molecules, 3) self.assertRaises(TypeError, ihm.AsymUnit, water) - self.assertRaises(TypeError, ihm.WaterAsymUnit, e) + self.assertRaises(TypeError, ihm.WaterAsymUnit, e, number=3) def test_asym_unit_residue(self): """Test Residue derived from an AsymUnit""" @@ -475,7 +488,8 @@ def test_asym_range(self): """Test AsymUnitRange class""" e = ihm.Entity('AHCDAH') heme = ihm.Entity([ihm.NonPolymerChemComp('HEM')]) - sugar = ihm.Entity([ihm.SaccharideChemComp('NAG')]) + sugar = ihm.Entity([ihm.SaccharideChemComp('NAG'), + ihm.SaccharideChemComp('FUC')]) a = ihm.AsymUnit(e, "testdetail") aheme = ihm.AsymUnit(heme) asugar = ihm.AsymUnit(sugar) @@ -484,7 +498,7 @@ def test_asym_range(self): # seq_id is not defined for nonpolymers self.assertEqual(aheme.seq_id_range, (None, None)) # We use seq_id internally for branched entities - self.assertEqual(asugar.seq_id_range, (1, 1)) + self.assertEqual(asugar.seq_id_range, (1, 2)) r = a(3, 4) self.assertEqual(r.seq_id_range, (3, 4)) self.assertEqual(r._id, 42) @@ -550,6 +564,26 @@ def test_orig_auth_seq_id_dict(self): self.assertEqual(a._get_pdb_auth_seq_id_ins_code(2), (4, 4, 'A')) self.assertEqual(a._get_pdb_auth_seq_id_ins_code(3), (3, 3, None)) + def test_water_orig_auth_seq_id_none(self): + """Test default water orig_auth_seq_id_map (None)""" + water = ihm.Entity([ihm.WaterChemComp()]) + a = ihm.WaterAsymUnit(water, number=3, + auth_seq_id_map={1: 0, 2: (4, 'A')}) + self.assertIsNone(a.orig_auth_seq_id_map) + self.assertEqual(a._get_pdb_auth_seq_id_ins_code(1), (0, 0, None)) + self.assertEqual(a._get_pdb_auth_seq_id_ins_code(2), (4, 4, 'A')) + self.assertEqual(a._get_pdb_auth_seq_id_ins_code(3), (3, 3, None)) + + def test_water_orig_auth_seq_id_dict(self): + """Test water orig_auth_seq_id_map as dict""" + water = ihm.Entity([ihm.WaterChemComp()]) + a = ihm.WaterAsymUnit(water, number=3, + auth_seq_id_map={1: 0, 2: (4, 'A')}, + orig_auth_seq_id_map={1: 5}) + self.assertEqual(a._get_pdb_auth_seq_id_ins_code(1), (0, 5, None)) + self.assertEqual(a._get_pdb_auth_seq_id_ins_code(2), (4, 4, 'A')) + self.assertEqual(a._get_pdb_auth_seq_id_ins_code(3), (3, 3, None)) + def test_assembly(self): """Test Assembly class""" e1 = ihm.Entity('AHCD') diff --git a/modules/core/dependency/python-ihm/test/test_make_mmcif.py b/modules/core/dependency/python-ihm/test/test_make_mmcif.py index d3206b071e..88e811e487 100644 --- a/modules/core/dependency/python-ihm/test/test_make_mmcif.py +++ b/modules/core/dependency/python-ihm/test/test_make_mmcif.py @@ -7,15 +7,16 @@ TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) utils.set_search_paths(TOPDIR) import ihm.reader +import ihm.util.make_mmcif # Script should also be importable -MAKE_MMCIF = os.path.join(TOPDIR, 'util', 'make-mmcif.py') +MAKE_MMCIF = os.path.join(TOPDIR, 'ihm', 'util', 'make_mmcif.py') class Tests(unittest.TestCase): - @unittest.skipIf(sys.version_info[0] < 3, "make-mmcif.py needs Python 3") + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") def test_simple(self): - """Simple test of make-mmcif utility script""" + """Simple test of make_mmcif utility script""" incif = utils.get_input_file_name(TOPDIR, 'struct_only.cif') subprocess.check_call([sys.executable, MAKE_MMCIF, incif]) with open('output.cif') as fh: @@ -25,9 +26,9 @@ def test_simple(self): 'of transcription regulation by Gdown1') os.unlink('output.cif') - @unittest.skipIf(sys.version_info[0] < 3, "make-mmcif.py needs Python 3") + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") def test_non_default_output(self): - """Simple test of make-mmcif with non-default output name""" + """Simple test of make_mmcif with non-default output name""" incif = utils.get_input_file_name(TOPDIR, 'struct_only.cif') subprocess.check_call([sys.executable, MAKE_MMCIF, incif, 'non-default-output.cif']) @@ -38,9 +39,9 @@ def test_non_default_output(self): 'of transcription regulation by Gdown1') os.unlink('non-default-output.cif') - @unittest.skipIf(sys.version_info[0] < 3, "make-mmcif.py needs Python 3") + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") def test_no_title(self): - """Check that make-mmcif adds missing title""" + """Check that make_mmcif adds missing title""" incif = utils.get_input_file_name(TOPDIR, 'no_title.cif') subprocess.check_call([sys.executable, MAKE_MMCIF, incif]) with open('output.cif') as fh: @@ -48,22 +49,22 @@ def test_no_title(self): self.assertEqual(s.title, 'Auto-generated system') os.unlink('output.cif') - @unittest.skipIf(sys.version_info[0] < 3, "make-mmcif.py needs Python 3") + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") def test_bad_usage(self): - """Bad usage of make-mmcif utility script""" + """Bad usage of make_mmcif utility script""" ret = subprocess.call([sys.executable, MAKE_MMCIF]) - self.assertEqual(ret, 1) + self.assertEqual(ret, 2) - @unittest.skipIf(sys.version_info[0] < 3, "make-mmcif.py needs Python 3") + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") def test_same_file(self): - """Check that make-mmcif fails if input and output are the same""" + """Check that make_mmcif fails if input and output are the same""" incif = utils.get_input_file_name(TOPDIR, 'struct_only.cif') ret = subprocess.call([sys.executable, MAKE_MMCIF, incif, incif]) self.assertEqual(ret, 1) - @unittest.skipIf(sys.version_info[0] < 3, "make-mmcif.py needs Python 3") + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") def test_mini(self): - """Check that make-mmcif works given only basic atom info""" + """Check that make_mmcif works given only basic atom info""" incif = utils.get_input_file_name(TOPDIR, 'mini.cif') subprocess.check_call([sys.executable, MAKE_MMCIF, incif]) with open('output.cif') as fh: @@ -82,9 +83,9 @@ def test_mini(self): self.assertEqual(s.title, 'Auto-generated system') os.unlink('output.cif') - @unittest.skipIf(sys.version_info[0] < 3, "make-mmcif.py needs Python 3") + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") def test_pass_through(self): - """Check that make-mmcif passes through already-compliant files""" + """Check that make_mmcif passes through already-compliant files""" incif = utils.get_input_file_name(TOPDIR, 'docking.cif') subprocess.check_call([sys.executable, MAKE_MMCIF, incif]) with open('output.cif') as fh: @@ -104,6 +105,111 @@ def test_pass_through(self): self.assertEqual(s.title, 'Output from simple-docking example') os.unlink('output.cif') + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") + def test_add_polymers(self): + """Check that make_mmcif combines polymer information""" + # mini.cif contains two chains A, B + incif = utils.get_input_file_name(TOPDIR, 'mini.cif') + # mini_add.cif also contains A, B; A is the same sequence as mini.cif + # but B is different (so should be renamed C when we add) + addcif = utils.get_input_file_name(TOPDIR, 'mini_add.cif') + subprocess.check_call([sys.executable, MAKE_MMCIF, incif, + '--add', addcif]) + with open('output.cif') as fh: + s, = ihm.reader.read(fh) + self.assertEqual(len(s.entities), 3) + self.assertEqual(len(s.asym_units), 3) + self.assertEqual(len(s.state_groups), 2) + # Model from mini.cif + self.assertEqual(len(s.state_groups[0]), 1) + self.assertEqual(len(s.state_groups[0][0]), 1) + self.assertEqual(len(s.state_groups[0][0][0]), 1) + m = s.state_groups[0][0][0][0] + self.assertEqual(m.protocol.name, 'modeling') + self.assertEqual(m.assembly.name, 'Modeled assembly') + chain_a, chain_b, = m.representation + self.assertIs(chain_a.asym_unit.asym, s.asym_units[0]) + self.assertIs(chain_b.asym_unit.asym, s.asym_units[1]) + for chain in chain_a, chain_b: + self.assertIsInstance(chain, ihm.representation.AtomicSegment) + self.assertFalse(chain.rigid) + # Model from mini_add.cif + self.assertEqual(len(s.state_groups[1]), 1) + self.assertEqual(len(s.state_groups[1][0]), 1) + self.assertEqual(len(s.state_groups[1][0][0]), 1) + m = s.state_groups[1][0][0][0] + self.assertEqual(m.protocol.name, 'modeling') + self.assertEqual(m.assembly.name, 'Modeled assembly') + chain_a, chain_c, = m.representation + self.assertIs(chain_a.asym_unit.asym, s.asym_units[0]) + self.assertIs(chain_c.asym_unit.asym, s.asym_units[2]) + for chain in chain_a, chain_c: + self.assertIsInstance(chain, ihm.representation.AtomicSegment) + self.assertFalse(chain.rigid) + self.assertEqual(s.title, 'Auto-generated system') + os.unlink('output.cif') + + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") + def test_add_non_polymers(self): + """Check that make_mmcif combines non-polymer information""" + # mini_nonpoly.cif contains two hemes A, B + incif = utils.get_input_file_name(TOPDIR, 'mini_nonpoly.cif') + # mini_nonpoly_add.cif also contains A, B; A has the same author + # provided residue number as mini_nonpoly.cif but B is different + # (so should be renamed C when we add) + addcif = utils.get_input_file_name(TOPDIR, 'mini_nonpoly_add.cif') + subprocess.check_call([sys.executable, MAKE_MMCIF, incif, + '--add', addcif]) + with open('output.cif') as fh: + s, = ihm.reader.read(fh) + self.assertEqual(len(s.entities), 1) + self.assertEqual(len(s.asym_units), 3) + self.assertEqual(len(s.state_groups), 2) + # Model from mini_nonpoly.cif + self.assertEqual(len(s.state_groups[0]), 1) + self.assertEqual(len(s.state_groups[0][0]), 1) + self.assertEqual(len(s.state_groups[0][0][0]), 1) + m = s.state_groups[0][0][0][0] + self.assertEqual(m.protocol.name, 'modeling') + self.assertEqual(m.assembly.name, 'Modeled assembly') + chain_a, chain_b, = m.representation + self.assertIs(chain_a.asym_unit, s.asym_units[0]) + self.assertIs(chain_b.asym_unit, s.asym_units[1]) + for chain in chain_a, chain_b: + self.assertIsInstance(chain, ihm.representation.AtomicSegment) + self.assertFalse(chain.rigid) + # Model from mini_nonpoly_add.cif + self.assertEqual(len(s.state_groups[1]), 1) + self.assertEqual(len(s.state_groups[1][0]), 1) + self.assertEqual(len(s.state_groups[1][0][0]), 1) + m = s.state_groups[1][0][0][0] + self.assertEqual(m.protocol.name, 'modeling') + self.assertEqual(m.assembly.name, 'Modeled assembly') + chain_a, chain_c, = m.representation + self.assertIs(chain_a.asym_unit, s.asym_units[0]) + self.assertIs(chain_c.asym_unit, s.asym_units[2]) + for chain in chain_a, chain_c: + self.assertIsInstance(chain, ihm.representation.AtomicSegment) + self.assertFalse(chain.rigid) + self.assertEqual(s.title, 'Auto-generated system') + os.unlink('output.cif') + + @unittest.skipIf(sys.version_info[0] < 3, "make_mmcif.py needs Python 3") + def test_add_multi_data(self): + """make_mmcif should fail to add system with multiple data blocks""" + incif = utils.get_input_file_name(TOPDIR, 'mini.cif') + addcif = utils.get_input_file_name(TOPDIR, 'mini_add.cif') + with open(addcif) as fh: + addcif_contents = fh.read() + addcif_multi = 'addcif_multi.cif' + with open(addcif_multi, 'w') as fh: + fh.write(addcif_contents) + fh.write(addcif_contents.replace('data_model', 'data_model2')) + ret = subprocess.call([sys.executable, MAKE_MMCIF, incif, + '--add', addcif_multi]) + self.assertEqual(ret, 1) + os.unlink(addcif_multi) + if __name__ == '__main__': unittest.main() diff --git a/modules/core/dependency/python-ihm/test/test_multi_state_scheme.py b/modules/core/dependency/python-ihm/test/test_multi_state_scheme.py index dc3077cc90..76b1dbade0 100644 --- a/modules/core/dependency/python-ihm/test/test_multi_state_scheme.py +++ b/modules/core/dependency/python-ihm/test/test_multi_state_scheme.py @@ -386,7 +386,7 @@ def test_equilibriumconstant_init(self): 'equilibrium constant is determined from kinetic rates, kAB/kBA') def test_equilibrium_constant_eq(self): - """Test equality of EquilibriumConstant objetcs""" + """Test equality of EquilibriumConstant objects""" e_ref1 = ihm.multi_state_scheme.EquilibriumConstant( value='1.0', unit='a') @@ -550,7 +550,7 @@ def test_relaxationtime_init(self): unit='wrong_unit') def test_relaxationtime_eq(self): - """Test equality of RelaxationTime objetcs""" + """Test equality of RelaxationTime objects""" r_ref = ihm.multi_state_scheme.RelaxationTime( value=1.0, unit='milliseconds', diff --git a/modules/core/dependency/python-ihm/test/test_reader.py b/modules/core/dependency/python-ihm/test/test_reader.py index f8dbf604d4..3655dcddea 100644 --- a/modules/core/dependency/python-ihm/test/test_reader.py +++ b/modules/core/dependency/python-ihm/test/test_reader.py @@ -914,12 +914,14 @@ def test_external_file_handler(self): _ihm_external_files.reference_id _ihm_external_files.file_path _ihm_external_files.content_type +_ihm_external_files.file_format _ihm_external_files.file_size_bytes _ihm_external_files.details -1 1 scripts/test.py 'Modeling workflow or script' 180 'Test script' -2 2 foo/bar.txt 'Input data or restraints' . 'Test text' -3 3 . 'Modeling or post-processing output' . 'Ensemble structures' -4 3 . . . . +1 1 scripts/test.py 'Modeling workflow or script' TXT 180 'Test script' +2 2 foo/bar.txt 'Input data or restraints' TXT 42.0 'Test text' +3 3 . 'Modeling or post-processing output' . . 'Ensemble structures' +4 3 . . . . . +5 3 foo.txt Other . . 'Other file' """ # Order of the categories shouldn't matter cif1 = ext_ref_cat + ext_file_cat @@ -927,11 +929,13 @@ def test_external_file_handler(self): for cif in cif1, cif2: for fh in cif_file_handles(cif): s, = ihm.reader.read(fh) - l1, l2, l3, l4 = s.locations + l1, l2, l3, l4, l5 = s.locations self.assertEqual(l1.path, 'scripts/test.py') self.assertEqual(l1.details, 'Test script') self.assertEqual(l1.repo.doi, '10.5281/zenodo.1218053') + self.assertIsInstance(l1.file_size, int) self.assertEqual(l1.file_size, 180) + self.assertEqual(l1.file_format, 'TXT') self.assertEqual(l1.repo.details, 'test repo') self.assertEqual(l1.__class__, ihm.location.WorkflowFileLocation) @@ -939,22 +943,29 @@ def test_external_file_handler(self): self.assertEqual(l2.path, 'foo/bar.txt') self.assertEqual(l2.details, 'Test text') self.assertIsNone(l2.repo) - self.assertIsNone(l2.file_size) + self.assertIsInstance(l2.file_size, float) + self.assertAlmostEqual(l2.file_size, 42.0, delta=0.01) + self.assertEqual(l2.file_format, 'TXT') self.assertEqual(l2.__class__, ihm.location.InputFileLocation) self.assertEqual(l3.path, '.') self.assertEqual(l3.details, 'Ensemble structures') self.assertIsNone(l3.file_size) + self.assertIsNone(l3.file_format) self.assertEqual(l3.repo.doi, '10.5281/zenodo.1218058') self.assertEqual(l3.__class__, ihm.location.OutputFileLocation) self.assertEqual(l4.path, '.') self.assertIsNone(l4.file_size) + self.assertIsNone(l4.file_format) self.assertIsNone(l4.details) self.assertEqual(l4.repo.doi, '10.5281/zenodo.1218058') # Type is unspecified self.assertEqual(l4.__class__, ihm.location.FileLocation) + self.assertEqual(l5.content_type, 'Other') + self.assertEqual(l5.__class__, ihm.location.FileLocation) + def test_dataset_list_handler(self): """Test DatasetListHandler""" cif = """ @@ -1993,7 +2004,7 @@ def test_atom_site_handler(self): self.assertIsNone(a1.occupancy) self.assertEqual(a2.asym_unit._id, 'B') - self.assertIsNone(a2.seq_id) + self.assertEqual(a2.seq_id, 1) self.assertEqual(a2.atom_id, 'CA') self.assertEqual(a2.type_symbol, 'C') self.assertEqual(a2.het, True) @@ -2123,6 +2134,12 @@ def test_atom_site_handler_water(self): _entity.id _entity.type 1 water +loop_ +_struct_asym.id +_struct_asym.entity_id +_struct_asym.details +A 1 Water +B 1 Water # loop_ _pdbx_nonpoly_scheme.asym_id @@ -2134,7 +2151,7 @@ def test_atom_site_handler_water(self): _pdbx_nonpoly_scheme.auth_mon_id _pdbx_nonpoly_scheme.pdb_strand_id _pdbx_nonpoly_scheme.pdb_ins_code -A 1 HOH 1 6 6 HOH A . +A 1 HOH 1 50 500 HOH A . # loop_ _atom_site.group_PDB @@ -2156,17 +2173,93 @@ def test_atom_site_handler_water(self): _atom_site.B_iso_or_equiv _atom_site.pdbx_PDB_model_num _atom_site.ihm_model_id -HETATM 1 O O . HOH . 6 ? A 10.000 10.000 10.000 . 1 A . 1 1 -HETATM 2 O O . HOH . 7 . A 20.000 20.000 20.000 . 1 A . 1 1 +HETATM 1 O O . HOH . 40 ? A 10.000 10.000 10.000 . 1 A . 1 1 +HETATM 2 O O . HOH . 50 ? A 10.000 10.000 10.000 . 1 A . 1 1 +HETATM 3 O O . HOH . 60 . A 20.000 20.000 20.000 . 1 A . 1 1 +HETATM 4 O O . HOH . 70 . B 20.000 20.000 20.000 . 1 B . 1 1 """) s, = ihm.reader.read(fh) m = s.state_groups[0][0][0][0] - a1, a2 = m._atoms - # First atom is in pdbx_nonpoly_scheme with - # ndb_seq_num=1, pdb_seq_num=6 + a1, a2, a3, b1 = m._atoms + # Should include info from both atom_site and scheme table + self.assertEqual(a1.asym_unit.auth_seq_id_map, + {1: (40, None), 2: (50, None), 3: (60, None)}) + self.assertEqual(a1.asym_unit.orig_auth_seq_id_map, + {2: 500}) + self.assertEqual(b1.asym_unit.auth_seq_id_map, {1: (70, None)}) + self.assertIsNone(b1.asym_unit.orig_auth_seq_id_map) + # seq_id should be assigned based on atom_site self.assertEqual(a1.seq_id, 1) - # Second atom is not in pdbx_nonpoly_scheme, so we keep auth_seq_id - self.assertEqual(a2.seq_id, 7) + self.assertEqual(a2.seq_id, 2) + self.assertEqual(a3.seq_id, 3) + self.assertEqual(b1.seq_id, 1) + + def test_atom_site_handler_branched(self): + """Test AtomSiteHandler reading branched molecules""" + cif = """ +loop_ +_entity.id +_entity.type +1 branched +loop_ +_struct_asym.id +_struct_asym.entity_id +_struct_asym.details +A 1 . +# +loop_ +_pdbx_branch_scheme.asym_id +_pdbx_branch_scheme.entity_id +_pdbx_branch_scheme.mon_id +_pdbx_branch_scheme.num +_pdbx_branch_scheme.pdb_seq_num +_pdbx_branch_scheme.auth_seq_num +_pdbx_branch_scheme.auth_mon_id +_pdbx_branch_scheme.pdb_asym_id +A 1 BGC 1 51 501 BGC A +A 1 BGC 2 52 502 BGC A +A 1 BGC 3 53 503 BGC A +# +loop_ +_atom_site.group_PDB +_atom_site.id +_atom_site.type_symbol +_atom_site.label_atom_id +_atom_site.label_alt_id +_atom_site.label_comp_id +_atom_site.label_seq_id +_atom_site.auth_seq_id +_atom_site.pdbx_PDB_ins_code +_atom_site.label_asym_id +_atom_site.Cartn_x +_atom_site.Cartn_y +_atom_site.Cartn_z +_atom_site.occupancy +_atom_site.label_entity_id +_atom_site.auth_asym_id +_atom_site.B_iso_or_equiv +_atom_site.pdbx_PDB_model_num +_atom_site.ihm_model_id +HETATM 1 C C . BGC . 52 ? A 10.000 10.000 10.000 . 1 A . 1 1 +HETATM 2 C C . BGC . 53 ? A 10.000 10.000 10.000 . 1 A . 1 1 +""" + # Should fail since residue #60 is not in the scheme table + badline = "HETATM 3 C C . BGC . 60 . A 20.00 20.00 20.00 . 1 A . 1 1" + fh = StringIO(cif + badline) + self.assertRaises(ValueError, ihm.reader.read, fh) + + fh = StringIO(cif) + s, = ihm.reader.read(fh) + m = s.state_groups[0][0][0][0] + a1, a2 = m._atoms + # seq_id should match num, i.e. start at 2 since residue 51 is missing + self.assertEqual(a1.seq_id, 2) + self.assertEqual(a2.seq_id, 3) + self.assertEqual(a1.asym_unit.auth_seq_id_map, + {1: (51, None), 2: (52, None), 3: (53, None)}) + self.assertEqual(a1.asym_unit.orig_auth_seq_id_map, + {1: 501, 2: 502, 3: 503}) + self.assertEqual(a1.asym_unit.num_map, {1: 2, 2: 3}) def test_derived_distance_restraint_handler(self): """Test DerivedDistanceRestraintHandler""" @@ -2679,18 +2772,18 @@ def test_poly_seq_scheme_handler_str_seq_id(self): _pdbx_poly_seq_scheme.pdb_ins_code A 1 1 6 6 ? . A 1 2 7 12 ? . -A 1 3 8 24 ? X +A 1 3 8 24 ? . A 1 4 9A 48A ? . """) s, = ihm.reader.read(fh) asym, = s.asym_units self.assertIsNone(asym._strand_id) self.assertEqual(asym.auth_seq_id_map, {1: (6, None), 2: (7, None), - 3: (8, 'X'), 4: ('9A', None)}) + 3: (8, None), 4: ('9A', None)}) self.assertEqual([asym.residue(i).auth_seq_id for i in range(1, 5)], [6, 7, 8, '9A']) self.assertIsNone(asym.residue(1).ins_code) - self.assertEqual(asym.residue(3).ins_code, 'X') + self.assertIsNone(asym.residue(3).ins_code) self.assertEqual(asym.orig_auth_seq_id_map, {2: 12, 3: 24, 4: '48A'}) def test_nonpoly_scheme_handler(self): @@ -2733,13 +2826,13 @@ def test_nonpoly_scheme_handler(self): _pdbx_nonpoly_scheme.auth_seq_num _pdbx_nonpoly_scheme.pdb_strand_id _pdbx_nonpoly_scheme.pdb_ins_code -A 1 FOO 1 1 1 . . A 1 BAR 1 101 202 . . B 2 BAR 1 1 1 Q X C 3 HOH . 1 1 . . C 3 HOH 2 2 2 . . C 3 HOH 3 5 10 . . C 3 HOH 4 1 20 . . +C 3 HOH 5 7 7 . . """) s, = ihm.reader.read(fh) e1, e2, e3 = s.entities @@ -2767,10 +2860,9 @@ def test_nonpoly_scheme_handler(self): self.assertEqual(a2._strand_id, 'Q') self.assertIsNone(a2.orig_auth_seq_id_map) - # For waters, the first row should be ignored since ndb_seq_num - # is missing; the second row should also be ignored because it - # is a one-to-one mapping; only the last two rows should be used - self.assertEqual(a3.auth_seq_id_map, {3: (5, None), 4: (1, None)}) + self.assertEqual(a3.auth_seq_id_map, {1: (1, None), 2: (2, None), + 3: (5, None), 4: (1, None), + 5: (7, None)}) self.assertEqual(a3.orig_auth_seq_id_map, {3: 10, 4: 20}) def test_cross_link_list_handler(self): @@ -4941,23 +5033,31 @@ def test_branch_scheme_handler(self): """) s, = ihm.reader.read(fh) asym_a, asym_b, asym_c = s.asym_units - self.assertEqual(asym_a.auth_seq_id_map, 4) + self.assertEqual(asym_a.auth_seq_id_map, + {1: (5, None), 2: (6, None), 3: (7, None), + 4: (8, None)}) self.assertEqual(asym_a._strand_id, '0') self.assertEqual(asym_a.residue(1).auth_seq_id, 5) self.assertIsNone(asym_a.orig_auth_seq_id_map) + self.assertIsNone(asym_a.num_map) - self.assertEqual(asym_b.auth_seq_id_map, 0) + self.assertEqual(asym_b.auth_seq_id_map, + {1: (1, None), 2: (2, None), 3: (3, None), + 4: (4, None)}) self.assertIsNone(asym_b._strand_id) self.assertEqual(asym_b.residue(1).auth_seq_id, 1) self.assertEqual(asym_b.orig_auth_seq_id_map, {1: 11, 2: 12, 3: 13, 4: 14}) + self.assertIsNone(asym_b.num_map) self.assertEqual(asym_c.auth_seq_id_map, {1: (2, None), 2: (4, None), 3: (6, None), 4: (8, None)}) self.assertIsNone(asym_c._strand_id) self.assertEqual(asym_c.residue(1).auth_seq_id, 2) - self.assertIsNone(asym_c.orig_auth_seq_id_map) + self.assertEqual(asym_c.orig_auth_seq_id_map, + {1: None, 2: None, 3: None, 4: None}) + self.assertIsNone(asym_c.num_map) def test_entity_branch_list_handler(self): """Test EntityBranchListHandler""" @@ -5041,6 +5141,48 @@ def test_entity_branch_link_handler(self): self.assertIsNone(lnk2.order) self.assertIsNone(lnk2.details) + def test_database_handler(self): + """Test DatabaseHandler""" + fh = StringIO(""" +loop_ +_database_2.database_id +_database_2.database_code +_database_2.pdbx_database_accession +_database_2.pdbx_DOI +foo bar . ? +baz 1abc 1abcxyz 1.2.3.4 +""") + s, = ihm.reader.read(fh) + d1, d2 = s.databases + self.assertEqual(d1.id, 'foo') + self.assertEqual(d1.code, 'bar') + self.assertIsNone(d1.accession) + self.assertIs(d1.doi, ihm.unknown) + self.assertEqual(d2.id, 'baz') + self.assertEqual(d2.code, '1abc') + self.assertEqual(d2.accession, '1abcxyz') + self.assertEqual(d2.doi, '1.2.3.4') + + def test_database_status_handler(self): + """Test DatabaseStatusHandler""" + fh = StringIO(""" +_pdbx_database_status.status_code REL +_pdbx_database_status.entry_id 5FD1 +_pdbx_database_status.recvd_initial_deposition_date 1993-06-29 +_pdbx_database_status.deposit_site ? +_pdbx_database_status.process_site BNL +_pdbx_database_status.SG_entry . +""") + s, = ihm.reader.read(fh) + # Should pass through to a dict + self.assertEqual(s._database_status, + {'status_code': 'REL', + 'entry_id': '5FD1', + 'recvd_initial_deposition_date': '1993-06-29', + 'deposit_site': ihm.unknown, + 'process_site': 'BNL', + 'sg_entry': None}) + if __name__ == '__main__': unittest.main() diff --git a/modules/core/dependency/python-ihm/util/debian/changelog b/modules/core/dependency/python-ihm/util/debian/changelog new file mode 100644 index 0000000000..21f0eaf831 --- /dev/null +++ b/modules/core/dependency/python-ihm/util/debian/changelog @@ -0,0 +1,11 @@ +python-ihm (1.1-1~@CODENAME@) @CODENAME@; urgency=low + + * python-ihm 1.1 release + + -- Ben Webb Thu, 09 May 2024 12:44:51 -0700 + +python-ihm (1.0-1~@CODENAME@) @CODENAME@; urgency=low + + * Initial .deb release + + -- Ben Webb Thu, 07 Mar 2024 00:19:35 +0000 diff --git a/modules/core/dependency/python-ihm/util/debian/control b/modules/core/dependency/python-ihm/util/debian/control new file mode 100644 index 0000000000..82578bb835 --- /dev/null +++ b/modules/core/dependency/python-ihm/util/debian/control @@ -0,0 +1,16 @@ +Source: python-ihm +Priority: optional +Maintainer: Ben Webb +Build-Depends: debhelper-compat (= 13), dh-python, swig, python3-dev +Standards-Version: 4.6.2 +Section: libs +Homepage: https://github.com/ihmwg/python-ihm +Vcs-Browser: https://github.com/ihmwg/python-ihm + +Package: python3-ihm +Section: libs +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends}, python3-msgpack +Description: Package for handling IHM mmCIF and BinaryCIF files + This is a Python package to assist in handling mmCIF and BinaryCIF files + compliant with the integrative/hybrid modeling (IHM) extension. diff --git a/modules/core/dependency/python-ihm/util/debian/copyright b/modules/core/dependency/python-ihm/util/debian/copyright new file mode 100644 index 0000000000..915c2202b8 --- /dev/null +++ b/modules/core/dependency/python-ihm/util/debian/copyright @@ -0,0 +1,23 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: python-ihm +Source: https://github.com/ihmwg/python-ihm + +Copyright: 2018-2024 IHM Working Group +License: MIT + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + . + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + . + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/modules/core/dependency/python-ihm/util/debian/make-package.sh b/modules/core/dependency/python-ihm/util/debian/make-package.sh new file mode 100755 index 0000000000..2e859631af --- /dev/null +++ b/modules/core/dependency/python-ihm/util/debian/make-package.sh @@ -0,0 +1,24 @@ +#!/bin/sh +# Build a Debian package from source + +set -e + +VERSION=$(grep __version__ ../../ihm/__init__.py |cut -d\' -f2) +CODENAME=`lsb_release -c -s` + +# Make sure we can find the rest of our input files +TOOL_DIR=`dirname "$0"` +# Get absolute path to top dir +TOP_DIR=`cd "${TOOL_DIR}/../.." && pwd` + +cd ${TOP_DIR} +rm -rf debian +cp -r util/debian/ . +rm debian/make-package.sh +sed -i -e "s/\@CODENAME\@/$CODENAME/g" debian/changelog + +if [ "${CODENAME}" = "focal" ]; then + sed -i -e "s/debhelper-compat (= 13)/debhelper-compat (= 12)/" debian/control +fi + +dpkg-buildpackage -S diff --git a/modules/core/dependency/python-ihm/util/debian/rules b/modules/core/dependency/python-ihm/util/debian/rules new file mode 100755 index 0000000000..7386c89636 --- /dev/null +++ b/modules/core/dependency/python-ihm/util/debian/rules @@ -0,0 +1,5 @@ +#!/usr/bin/make -f +#export DH_VERBOSE=1 +export PYBUILD_NAME=ihm +%: + dh $@ --with python3 --buildsystem=pybuild diff --git a/modules/core/dependency/python-ihm/util/debian/source/format b/modules/core/dependency/python-ihm/util/debian/source/format new file mode 100644 index 0000000000..163aaf8d82 --- /dev/null +++ b/modules/core/dependency/python-ihm/util/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/modules/core/dependency/python-ihm/util/make-mmcif.py b/modules/core/dependency/python-ihm/util/make-mmcif.py deleted file mode 100644 index 817858fae6..0000000000 --- a/modules/core/dependency/python-ihm/util/make-mmcif.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 - -""" -Add minimal IHM-related tables to an mmCIF file. - -Given any mmCIF file as input, this script will add any missing -IHM-related tables and write out a new file that is minimally compliant -with the IHM dictionary. - -This is done by simply reading in the original file with python-ihm and -then writing it out again, so - a) any data in the input file that is not understood by python-ihm - will be lost on output; and - b) input files that aren't compliant with the PDBx dictionary, or that - contain syntax errors or other problems, may crash or otherwise confuse - python-ihm. -""" - - -import ihm.reader -import ihm.dumper -import ihm.model -import ihm.protocol -import sys -import os - - -def add_ihm_info(s): - if not s.title: - s.title = 'Auto-generated system' - - # Simple default assembly containing all chains - default_assembly = ihm.Assembly(s.asym_units, name='Modeled assembly') - - # Simple default atomic representation for everything - default_representation = ihm.representation.Representation( - [ihm.representation.AtomicSegment(asym, rigid=False) - for asym in s.asym_units]) - - # Simple default modeling protocol - default_protocol = ihm.protocol.Protocol(name='modeling') - - for state_group in s.state_groups: - for state in state_group: - for model_group in state: - for model in model_group: - if not model.assembly: - model.assembly = default_assembly - if not model.representation: - model.representation = default_representation - if not model.protocol: - model.protocol = default_protocol - return s - - -if len(sys.argv) != 2 and len(sys.argv) != 3: - print("Usage: %s input.cif [output.cif]" % sys.argv[0], file=sys.stderr) - sys.exit(1) - -fname = sys.argv[1] -if len(sys.argv) > 2: - out_fname = sys.argv[2] -else: - out_fname = 'output.cif' - -if (os.path.exists(fname) and os.path.exists(out_fname) - and os.path.samefile(fname, out_fname)): - raise ValueError("Input and output are the same file") - -with open(fname) as fh: - with open(out_fname, 'w') as fhout: - ihm.dumper.write( - fhout, [add_ihm_info(s) for s in ihm.reader.read(fh)], - variant=ihm.dumper.IgnoreVariant(['_audit_conform'])) diff --git a/modules/core/dependency/python-ihm/util/python-ihm.spec b/modules/core/dependency/python-ihm/util/python-ihm.spec index c037d64c51..1f96050538 100644 --- a/modules/core/dependency/python-ihm/util/python-ihm.spec +++ b/modules/core/dependency/python-ihm/util/python-ihm.spec @@ -1,7 +1,7 @@ Name: python3-ihm License: MIT Group: Applications/Engineering -Version: 0.43 +Version: 1.1 Release: 1%{?dist} Summary: Package for handling IHM mmCIF and BinaryCIF files Packager: Ben Webb @@ -36,6 +36,12 @@ sed -i -e "s/install_requires=\['msgpack'\]/#/" setup.py %defattr(-,root,root) %changelog +* Thu May 09 2024 Ben Webb 1.1-1 +- Update to latest upstream. + +* Tue Feb 13 2024 Ben Webb 1.0-1 +- Update to latest upstream. + * Fri Dec 08 2023 Ben Webb 0.43-1 - Update to latest upstream. diff --git a/modules/core/include/ClosePairsPairScore.h b/modules/core/include/ClosePairsPairScore.h index 18dc3f93b4..0e9915660e 100644 --- a/modules/core/include/ClosePairsPairScore.h +++ b/modules/core/include/ClosePairsPairScore.h @@ -43,8 +43,9 @@ class IMPCOREEXPORT KClosePairsPairScore : public PairScore { ParticlePairsTemp get_close_pairs(const ParticlePair &pp) const { return IMP::internal::get_particle( - pp[0]->get_model(), - get_close_pairs(pp[0]->get_model(), IMP::internal::get_index(pp))); + std::get<0>(pp)->get_model(), + get_close_pairs(std::get<0>(pp)->get_model(), + IMP::internal::get_index(pp))); } Restraints create_current_decomposition( @@ -88,8 +89,9 @@ class IMPCOREEXPORT ClosePairsPairScore : public PairScore { ParticlePairsTemp get_close_pairs(const ParticlePair &pp) const { return IMP::internal::get_particle( - pp[0]->get_model(), - get_close_pairs(pp[0]->get_model(), IMP::internal::get_index(pp))); + std::get<0>(pp)->get_model(), + get_close_pairs(std::get<0>(pp)->get_model(), + IMP::internal::get_index(pp))); } Restraints create_current_decomposition( Model *m, const ParticleIndexPair &vt) const; diff --git a/modules/core/include/DistanceToSingletonScore.h b/modules/core/include/DistanceToSingletonScore.h index e23d08552e..47a98ce0f7 100644 --- a/modules/core/include/DistanceToSingletonScore.h +++ b/modules/core/include/DistanceToSingletonScore.h @@ -28,7 +28,7 @@ IMPCORE_BEGIN_NAMESPACE point as passed to a UnaryFunction. This is useful for anchoring constraining particles within a sphere. - To restrain a set of particles store in SingletonContainer pc in a sphere + To restrain a set of particles stored in SingletonContainer pc in a sphere do the following: \include core/restrain_in_sphere.py */ @@ -96,7 +96,7 @@ IMP_GENERIC_OBJECT(DistanceToSingletonScore, distance_to_singleton_score, point as passed to a UnaryFunction. This is useful for anchoring constraining particles within a sphere. - To restrain a set of particles store in SingletonContainer pc in a sphere + To restrain a set of particles stored in SingletonContainer pc in a sphere do the following: \include core/restrain_in_sphere.py */ diff --git a/modules/core/include/MonteCarlo.h b/modules/core/include/MonteCarlo.h index b8e595d01a..5e88e57fe9 100644 --- a/modules/core/include/MonteCarlo.h +++ b/modules/core/include/MonteCarlo.h @@ -16,7 +16,7 @@ #include #include -#include +#include IMPCORE_BEGIN_NAMESPACE @@ -210,7 +210,7 @@ class IMPCOREEXPORT MonteCarlo : public Optimizer { bool score_moved_; double min_score_; IMP::PointerMember best_; - ::boost::uniform_real<> rand_; + ::boost::random::uniform_real_distribution<> rand_; }; //! This variant of Monte Carlo that relaxes after each move diff --git a/modules/core/include/SphereDistancePairScore.h b/modules/core/include/SphereDistancePairScore.h index beaaef68b8..5688fd820c 100644 --- a/modules/core/include/SphereDistancePairScore.h +++ b/modules/core/include/SphereDistancePairScore.h @@ -109,18 +109,20 @@ inline double HarmonicUpperBoundSphereDiameterPairScore::evaluate_index( Model *m, const ParticleIndexPair &p, DerivativeAccumulator *da) const { algebra::Vector3D delta = - m->get_sphere(p[0]).get_center() - m->get_sphere(p[1]).get_center(); + m->get_sphere(std::get<0>(p)).get_center() + - m->get_sphere(std::get<1>(p)).get_center(); static const double MIN_DISTANCE = .00001; double distance = delta.get_magnitude(); - double shifted_distance = distance - x0_ + m->get_sphere(p[0]).get_radius() + - m->get_sphere(p[1]).get_radius(); + double shifted_distance + = distance - x0_ + m->get_sphere(std::get<0>(p)).get_radius() + + m->get_sphere(std::get<1>(p)).get_radius(); if (shifted_distance < 0) return 0; double score = .5 * k_ * square(shifted_distance); if (da && distance > MIN_DISTANCE) { double deriv = k_ * shifted_distance; algebra::Vector3D uv = delta / distance; - m->add_to_coordinate_derivatives(p[0], uv * deriv, *da); - m->add_to_coordinate_derivatives(p[1], -uv * deriv, *da); + m->add_to_coordinate_derivatives(std::get<0>(p), uv * deriv, *da); + m->add_to_coordinate_derivatives(std::get<1>(p), -uv * deriv, *da); } return score; } diff --git a/modules/core/include/internal/close_pairs_helpers.h b/modules/core/include/internal/close_pairs_helpers.h index beb2e42b57..4b8d7d0d9c 100644 --- a/modules/core/include/internal/close_pairs_helpers.h +++ b/modules/core/include/internal/close_pairs_helpers.h @@ -46,8 +46,8 @@ inline void filter_close_pairs(C *c, ParticleIndexPairs &pips) { //! canonize pairs index order in pips, such that pi1>=pi0 for each pair (pi0,pi1) inline void fix_order(ParticleIndexPairs &pips) { for (unsigned int i = 0; i < pips.size(); ++i) { - if (pips[i][0] > pips[i][1]) { - pips[i] = ParticleIndexPair(pips[i][1], pips[i][0]); + if (std::get<0>(pips[i]) > std::get<1>(pips[i])) { + pips[i] = ParticleIndexPair(std::get<1>(pips[i]), std::get<0>(pips[i])); } } } @@ -116,8 +116,8 @@ FarParticle {} bool operator()(const ParticleIndexPair &pp) const { - int index0= pp[0].get_index(); - int index1= pp[1].get_index(); + int index0 = std::get<0>(pp).get_index(); + int index1 = std::get<1>(pp).get_index(); return !get_are_close(model_spheres_table_[index0], model_spheres_table_[index1], d_); @@ -148,9 +148,9 @@ struct InList { }*/ } bool operator()(const ParticlePair &pp) const { - if (std::binary_search(ps_.begin(), ps_.end(), pp[0])) + if (std::binary_search(ps_.begin(), ps_.end(), std::get<0>(pp))) return true; - else if (std::binary_search(ps_.begin(), ps_.end(), pp[1])) + else if (std::binary_search(ps_.begin(), ps_.end(), std::get<1>(pp))) return true; return false; // return pp[0]->has_attribute(key_) || pp[1]->has_attribute(key_); diff --git a/modules/core/include/predicates.h b/modules/core/include/predicates.h index 94a266e779..75213a99ba 100644 --- a/modules/core/include/predicates.h +++ b/modules/core/include/predicates.h @@ -68,21 +68,24 @@ class IsCollisionPairPredicate : public PairPredicate { const ParticleIndexPair &pi) const override { Float sr = - m->get_sphere(pi[0]).get_radius() + m->get_sphere(pi[1]).get_radius(); + m->get_sphere(std::get<0>(pi)).get_radius() + + m->get_sphere(std::get<1>(pi)).get_radius(); #if IMP_HAS_CHECKS > 1 - bool check_collisions = (get_distance(XYZR(m, pi[0]), XYZR(m, pi[1])) <= 0); + bool check_collisions = (get_distance(XYZR(m, std::get<0>(pi)), + XYZR(m, std::get<1>(pi))) <= 0); #endif for (unsigned int i = 0; i < 3; ++i) { - double delta = std::abs(m->get_sphere(pi[0]).get_center()[i] - - m->get_sphere(pi[1]).get_center()[i]); + double delta = std::abs(m->get_sphere(std::get<0>(pi)).get_center()[i] - + m->get_sphere(std::get<1>(pi)).get_center()[i]); if (delta >= sr) { IMP_INTERNAL_CHECK(!check_collisions, "Should be a collision"); return 0; } } bool col = - algebra::get_squared_distance(m->get_sphere(pi[0]).get_center(), - m->get_sphere(pi[1]).get_center()) < + algebra::get_squared_distance( + m->get_sphere(std::get<0>(pi)).get_center(), + m->get_sphere(std::get<1>(pi)).get_center()) < algebra::get_squared(sr); IMP_INTERNAL_CHECK(col == check_collisions, "Don't match"); return col ? 1 : 0; diff --git a/modules/core/src/AngleTripletScore.cpp b/modules/core/src/AngleTripletScore.cpp index 0f850d7ec9..176cefc1b9 100644 --- a/modules/core/src/AngleTripletScore.cpp +++ b/modules/core/src/AngleTripletScore.cpp @@ -22,9 +22,9 @@ Float AngleTripletScore::evaluate_index(Model *m, const ParticleIndexTriplet &pi, DerivativeAccumulator *da) const { IMP_CHECK_OBJECT(f_.get()); - XYZ d0 = XYZ(m, pi[0]); - XYZ d1 = XYZ(m, pi[1]); - XYZ d2 = XYZ(m, pi[2]); + XYZ d0 = XYZ(m, std::get<0>(pi)); + XYZ d1 = XYZ(m, std::get<1>(pi)); + XYZ d2 = XYZ(m, std::get<2>(pi)); Float score; diff --git a/modules/core/src/BallMover.cpp b/modules/core/src/BallMover.cpp index abebb065ca..001085416b 100644 --- a/modules/core/src/BallMover.cpp +++ b/modules/core/src/BallMover.cpp @@ -10,7 +10,6 @@ #include #include #include -#include IMPCORE_BEGIN_NAMESPACE diff --git a/modules/core/src/ChecksScoreState.cpp b/modules/core/src/ChecksScoreState.cpp index 3e7614fe54..b38d509c21 100644 --- a/modules/core/src/ChecksScoreState.cpp +++ b/modules/core/src/ChecksScoreState.cpp @@ -9,7 +9,7 @@ #include #include #include -#include +#include IMPCORE_BEGIN_NAMESPACE @@ -22,7 +22,7 @@ ChecksScoreState::ChecksScoreState(Model *m, double prob) } void ChecksScoreState::do_before_evaluate() { - ::boost::uniform_real<> rand(0, 1); + ::boost::random::uniform_real_distribution<> rand(0, 1); if (rand(random_number_generator) < probability_) { set_check_level(USAGE_AND_INTERNAL); ++num_checked_; diff --git a/modules/core/src/ClosePairsPairScore.cpp b/modules/core/src/ClosePairsPairScore.cpp index e303ae90e1..b65ec49cb9 100644 --- a/modules/core/src/ClosePairsPairScore.cpp +++ b/modules/core/src/ClosePairsPairScore.cpp @@ -71,8 +71,8 @@ ParticleIndexPairs ClosePairsPairScore::get_close_pairs( Model *m, const ParticleIndexPair &p) const { ParticleIndexPairs ppt; Floats dists; - ParticleIndexes ps0 = expand(m->get_particle(p[0]), r_); - ParticleIndexes ps1 = expand(m->get_particle(p[1]), r_); + ParticleIndexes ps0 = expand(m->get_particle(std::get<0>(p)), r_); + ParticleIndexes ps1 = expand(m->get_particle(std::get<1>(p)), r_); fill_close_pairs(cpf_, m, th_, ps0, ps1, ppt); return ppt; } @@ -98,14 +98,16 @@ ParticleIndexPairs KClosePairsPairScore::get_close_pairs( IMP_OBJECT_LOG; // double mr= std::max(max_radius(psa), max_radius(psb)); ParticleIndexPairs ppt; - ParticleIndexes ps0 = expand(m->get_particle(p[0]), r_); - ParticleIndexes ps1 = expand(m->get_particle(p[1]), r_); + ParticleIndexes ps0 = expand(m->get_particle(std::get<0>(p)), r_); + ParticleIndexes ps1 = expand(m->get_particle(std::get<1>(p)), r_); if (ps0.size() + ps1.size() > 50) { Floats dists; double dist = last_distance_; - IMP_USAGE_CHECK(ps0.size() > 0, "Empty set of particles used for " << p[0]); - IMP_USAGE_CHECK(ps1.size() > 0, "Empty set of particles used for " << p[1]); + IMP_USAGE_CHECK(ps0.size() > 0, "Empty set of particles used for " + << std::get<0>(p)); + IMP_USAGE_CHECK(ps1.size() > 0, "Empty set of particles used for " + << std::get<1>(p)); do { IMP_LOG_VERBOSE("Searching for close pairs " << dist << std::endl); fill_close_pairs(cpf_, m, dist, ps0, ps1, ppt); @@ -115,8 +117,8 @@ ParticleIndexPairs KClosePairsPairScore::get_close_pairs( } while (ppt.size() < static_cast(k_)); algebra::internal::MinimalSet ms(k_); for (unsigned int i = 0; i < ppt.size(); ++i) { - double d = algebra::get_distance(m->get_sphere(ppt[i][0]), - m->get_sphere(ppt[i][1])); + double d = algebra::get_distance(m->get_sphere(std::get<0>(ppt[i])), + m->get_sphere(std::get<1>(ppt[i]))); // std::cout << "Trying " << d << " " << ppt[i] << std::endl; ms.insert(d, ppt[i]); } @@ -136,7 +138,8 @@ ParticleIndexPairs KClosePairsPairScore::get_close_pairs( IMP_IF_CHECK(USAGE) { if (k_ == 1) { double distance = - get_distance(XYZR(m, retps[0][0]), XYZR(m, retps[0][1])); + get_distance(XYZR(m, std::get<0>(retps[0])), + XYZR(m, std::get<1>(retps[0]))); for (unsigned int i = 0; i < ps0.size(); ++i) { for (unsigned int j = 0; j < ps1.size(); ++j) { double cdistance = get_distance(XYZR(m, ps0[i]), XYZR(m, ps1[j])); diff --git a/modules/core/src/DirectionMover.cpp b/modules/core/src/DirectionMover.cpp index 63a074adfc..8ea276ab6a 100644 --- a/modules/core/src/DirectionMover.cpp +++ b/modules/core/src/DirectionMover.cpp @@ -55,14 +55,14 @@ MonteCarloMoverResult DirectionMover::do_propose() { algebra::Vector3D axis = algebra::get_random_vector_on(algebra::get_unit_sphere_d<3>()); axis -= (axis * last_direction_) * last_direction_; - ::boost::uniform_real<> rand1(-max_angle_, max_angle_); + ::boost::random::uniform_real_distribution<> rand1(-max_angle_, max_angle_); Float angle = rand1(random_number_generator); algebra::Rotation3D rt = algebra::get_rotation_about_axis(axis, angle); d.set_direction(rt.get_rotated(last_direction_)); } if (reflect_prob_ > 0) { - ::boost::uniform_real<> rand2(0, 1); + ::boost::random::uniform_real_distribution<> rand2(0, 1); Float test = rand2(random_number_generator); if (test < reflect_prob_) { d.reflect(); diff --git a/modules/core/src/ExcludedVolumeRestraint.cpp b/modules/core/src/ExcludedVolumeRestraint.cpp index 97c0be8085..335ffa6837 100644 --- a/modules/core/src/ExcludedVolumeRestraint.cpp +++ b/modules/core/src/ExcludedVolumeRestraint.cpp @@ -246,7 +246,8 @@ double ExcludedVolumeRestraint::unprotected_evaluate_if_good( for (unsigned int i = 0; i < cur_list_.size(); ++i) { double c = ssps_->evaluate_index( get_model(), - ParticleIndexPair(cur_list_[i][0], cur_list_[i][1]), da); + ParticleIndexPair(std::get<0>(cur_list_[i]), + std::get<1>(cur_list_[i])), da); cur += c; max -= c; if (max < 0) { diff --git a/modules/core/src/MCCGSampler.cpp b/modules/core/src/MCCGSampler.cpp index cf3d24d060..e9401335ce 100644 --- a/modules/core/src/MCCGSampler.cpp +++ b/modules/core/src/MCCGSampler.cpp @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include #include @@ -154,7 +154,7 @@ MonteCarloMoverResult ScoreWeightedIncrementalBallMover::do_propose() { IMP_LOG(SILENT, std::endl); };*/ while (true) { - ::boost::uniform_real<> rand(0, 1); + ::boost::random::uniform_real_distribution<> rand(0, 1); for (unsigned int i = 0; i < weights.size(); ++i) { if (rand(random_number_generator) < weights[i]) { moved_.push_back(ps_[i]); diff --git a/modules/core/src/NeighborsTable.cpp b/modules/core/src/NeighborsTable.cpp index 349fde86ce..8003f74cb0 100644 --- a/modules/core/src/NeighborsTable.cpp +++ b/modules/core/src/NeighborsTable.cpp @@ -21,8 +21,8 @@ void NeighborsTable::do_before_evaluate() { data_.clear(); IMP_CONTAINER_FOREACH(PairContainer, input_, { - data_[_1[0]].push_back(_1[1]); - data_[_1[1]].push_back(_1[0]); + data_[std::get<0>(_1)].push_back(std::get<1>(_1)); + data_[std::get<1>(_1)].push_back(std::get<0>(_1)); }); } diff --git a/modules/core/src/RefinedPairsPairScore.cpp b/modules/core/src/RefinedPairsPairScore.cpp index 3f45b464f8..5d131ad3b8 100644 --- a/modules/core/src/RefinedPairsPairScore.cpp +++ b/modules/core/src/RefinedPairsPairScore.cpp @@ -34,8 +34,8 @@ ParticlesTemp get_set(Particle *a, Refiner *r) { Float RefinedPairsPairScore::evaluate_index(Model *m, const ParticleIndexPair &p, DerivativeAccumulator *da) const { - ParticlesTemp ps[2] = {get_set(m->get_particle(p[0]), r_), - get_set(m->get_particle(p[1]), r_)}; + ParticlesTemp ps[2] = {get_set(m->get_particle(std::get<0>(p)), r_), + get_set(m->get_particle(std::get<1>(p)), r_)}; double ret = 0; for (unsigned int i = 0; i < ps[0].size(); ++i) { for (unsigned int j = 0; j < ps[1].size(); ++j) { diff --git a/modules/core/src/RigidBodyAnglePairScore.cpp b/modules/core/src/RigidBodyAnglePairScore.cpp index 910c103c4c..572db36c59 100644 --- a/modules/core/src/RigidBodyAnglePairScore.cpp +++ b/modules/core/src/RigidBodyAnglePairScore.cpp @@ -22,9 +22,9 @@ Float RigidBodyAnglePairScore::evaluate_index(Model *m, IMP_USAGE_CHECK(!da, "Derivatives not implemented"); // check if rigid body - IMP_USAGE_CHECK(RigidBody::get_is_setup(m, pi[0]), + IMP_USAGE_CHECK(RigidBody::get_is_setup(m, std::get<0>(pi)), "Particle is not a rigid body"); - IMP_USAGE_CHECK(RigidBody::get_is_setup(m, pi[1]), + IMP_USAGE_CHECK(RigidBody::get_is_setup(m, std::get<1>(pi)), "Particle is not a rigid body"); // principal axis of inertia is aligned to z axis when creating rigid body @@ -32,8 +32,10 @@ Float RigidBodyAnglePairScore::evaluate_index(Model *m, algebra::Vector3D origin=algebra::Vector3D(0.0,0.0,0.0); // get the two references frames - algebra::ReferenceFrame3D rf0 = RigidBody(m, pi[0]).get_reference_frame(); - algebra::ReferenceFrame3D rf1 = RigidBody(m, pi[1]).get_reference_frame(); + algebra::ReferenceFrame3D rf0 + = RigidBody(m, std::get<0>(pi)).get_reference_frame(); + algebra::ReferenceFrame3D rf1 + = RigidBody(m, std::get<1>(pi)).get_reference_frame(); // rigid body 0 algebra::Vector3D i0 = rf0.get_global_coordinates(inertia); diff --git a/modules/core/src/RigidBodyMover.cpp b/modules/core/src/RigidBodyMover.cpp index 7e24686696..5c85c28528 100644 --- a/modules/core/src/RigidBodyMover.cpp +++ b/modules/core/src/RigidBodyMover.cpp @@ -40,7 +40,7 @@ MonteCarloMoverResult RigidBodyMover::do_propose() { if (max_angle_ > 0) { algebra::Vector3D axis = algebra::get_random_vector_on(algebra::get_unit_sphere_d<3>()); - ::boost::uniform_real<> rand(-max_angle_, max_angle_); + ::boost::random::uniform_real_distribution<> rand(-max_angle_, max_angle_); Float angle = rand(random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis, angle); rc = r * d.get_reference_frame().get_transformation_to().get_rotation(); diff --git a/modules/core/src/RigidBodyTunneler.cpp b/modules/core/src/RigidBodyTunneler.cpp index 52d115b66d..1c41677625 100644 --- a/modules/core/src/RigidBodyTunneler.cpp +++ b/modules/core/src/RigidBodyTunneler.cpp @@ -7,7 +7,7 @@ */ #include #include -#include +#include #include #include #include @@ -92,7 +92,8 @@ MonteCarloMoverResult RigidBodyTunneler::do_propose() { // pick another entry point at random IMP_LOG_TERSE("New iteration, entry_nums.size() == " << entry_nums.size() << std::endl); - ::boost::uniform_int randint(0, entry_nums.size() - 1); + ::boost::random::uniform_int_distribution randint( + 0, entry_nums.size() - 1); unsigned dnum = randint(random_number_generator); distant = entry_nums[dnum]; IMP_LOG_TERSE("distant entry point is number " << distant << std::endl); diff --git a/modules/core/src/SphereDistancePairScore.cpp b/modules/core/src/SphereDistancePairScore.cpp index 731b2e39f1..07c73e5b3a 100644 --- a/modules/core/src/SphereDistancePairScore.cpp +++ b/modules/core/src/SphereDistancePairScore.cpp @@ -33,12 +33,12 @@ NormalizedSphereDistancePairScore::NormalizedSphereDistancePairScore( double NormalizedSphereDistancePairScore::evaluate_index( Model *m, const ParticleIndexPair &pip, DerivativeAccumulator *da) const { - Float ra = m->get_attribute(radius_, pip[0]); - Float rb = m->get_attribute(radius_, pip[1]); + Float ra = m->get_attribute(radius_, std::get<0>(pip)); + Float rb = m->get_attribute(radius_, std::get<1>(pip)); Float mr = std::min(ra, rb); // lambda is inefficient due to laziness return internal::evaluate_distance_pair_score( - XYZ(m, pip[0]), XYZ(m, pip[1]), da, f_.get(), + XYZ(m, std::get<0>(pip)), XYZ(m, std::get<1>(pip)), da, f_.get(), boost::lambda::_1 / mr - (ra + rb) / mr); } @@ -54,13 +54,13 @@ WeightedSphereDistancePairScore::WeightedSphereDistancePairScore( double WeightedSphereDistancePairScore::evaluate_index( Model *m, const ParticleIndexPair &p, DerivativeAccumulator *da) const { - Float ra = m->get_attribute(radius_, p[0]); - Float rb = m->get_attribute(radius_, p[1]); - Float wa = m->get_attribute(weight_, p[0]); - Float wb = m->get_attribute(weight_, p[1]); + Float ra = m->get_attribute(radius_, std::get<0>(p)); + Float rb = m->get_attribute(radius_, std::get<1>(p)); + Float wa = m->get_attribute(weight_, std::get<0>(p)); + Float wb = m->get_attribute(weight_, std::get<1>(p)); // lambda is inefficient due to laziness return internal::evaluate_distance_pair_score( - XYZ(m, p[0]), XYZ(m, p[1]), da, f_.get(), + XYZ(m, std::get<0>(p)), XYZ(m, std::get<1>(p)), da, f_.get(), (boost::lambda::_1 - (ra + rb)) * (wa + wb)); } diff --git a/modules/core/src/SurfaceMover.cpp b/modules/core/src/SurfaceMover.cpp index 3844ee7fa0..24c43666f2 100644 --- a/modules/core/src/SurfaceMover.cpp +++ b/modules/core/src/SurfaceMover.cpp @@ -70,14 +70,14 @@ MonteCarloMoverResult SurfaceMover::do_propose() { if (max_angle_ > 0) { algebra::Vector3D axis = algebra::get_random_vector_on(algebra::get_unit_sphere_d<3>()); - ::boost::uniform_real<> rand1(-max_angle_, max_angle_); + ::boost::random::uniform_real_distribution<> rand1(-max_angle_, max_angle_); Float angle = rand1(random_number_generator); algebra::Rotation3D rt = algebra::get_rotation_about_axis(axis, angle); s.set_normal(rt.get_rotated(s.get_normal())); } if (reflect_prob_ > 0) { - ::boost::uniform_real<> rand2(0, 1); + ::boost::random::uniform_real_distribution<> rand2(0, 1); Float test = rand2(random_number_generator); if (test < reflect_prob_) { s.reflect(); diff --git a/modules/core/src/SurfaceSymmetryConstraint.cpp b/modules/core/src/SurfaceSymmetryConstraint.cpp index 61e70629c9..540afed4aa 100644 --- a/modules/core/src/SurfaceSymmetryConstraint.cpp +++ b/modules/core/src/SurfaceSymmetryConstraint.cpp @@ -16,33 +16,33 @@ SurfaceSymmetryConstraint::SurfaceSymmetryConstraint(Model *m, const ParticleIndexPair &rbs) : IMP::Constraint(m, "SurfaceSymmetryConstraint%1%") , spip_(ss), rbpip_(rbs) { - IMP_USAGE_CHECK(Surface::get_is_setup(get_model(), ss[0]) & - Surface::get_is_setup(get_model(), ss[1]), + IMP_USAGE_CHECK(Surface::get_is_setup(get_model(), std::get<0>(ss)) & + Surface::get_is_setup(get_model(), std::get<1>(ss)), "First pair must be Surface particles."); - IMP_USAGE_CHECK(RigidBody::get_is_setup(get_model(), rbs[0]) & - RigidBody::get_is_setup(get_model(), rbs[1]), + IMP_USAGE_CHECK(RigidBody::get_is_setup(get_model(), std::get<0>(rbs)) & + RigidBody::get_is_setup(get_model(), std::get<1>(rbs)), "Second pair must be RigidBody particles."); } algebra::Transformation3D SurfaceSymmetryConstraint::get_transformation() const { return algebra::get_transformation_from_first_to_second( - RigidBody(get_model(), rbpip_[1]).get_reference_frame(), - RigidBody(get_model(), rbpip_[0]).get_reference_frame()); + RigidBody(get_model(), std::get<1>(rbpip_)).get_reference_frame(), + RigidBody(get_model(), std::get<0>(rbpip_)).get_reference_frame()); } void SurfaceSymmetryConstraint::do_update_attributes() { algebra::Transformation3D tf = get_transformation(); Surface s, sref; - s = Surface(get_model(), spip_[0]); - sref = Surface(get_model(), spip_[1]); + s = Surface(get_model(), std::get<0>(spip_)); + sref = Surface(get_model(), std::get<1>(spip_)); s.set_normal(tf.get_rotation().get_rotated(sref.get_normal())); s.set_coordinates(tf.get_transformed(sref.get_coordinates())); } void SurfaceSymmetryConstraint::do_update_derivatives(DerivativeAccumulator *da) { algebra::Rotation3D rot = get_transformation().get_inverse().get_rotation(); - Surface s1 = Surface(get_model(), spip_[0]); - Surface s2 = Surface(get_model(), spip_[1]); + Surface s1 = Surface(get_model(), std::get<0>(spip_)); + Surface s2 = Surface(get_model(), std::get<1>(spip_)); s2.add_to_derivatives(rot.get_rotated(s1.get_derivatives()), *da); s2.add_to_normal_derivatives(rot.get_rotated(s1.get_normal_derivatives()), *da); @@ -50,16 +50,16 @@ void SurfaceSymmetryConstraint::do_update_derivatives(DerivativeAccumulator *da) ModelObjectsTemp SurfaceSymmetryConstraint::do_get_inputs() const { ParticlesTemp ps; - ps.push_back(get_model()->get_particle(spip_[1])); - ps.push_back(get_model()->get_particle(rbpip_[0])); - ps.push_back(get_model()->get_particle(rbpip_[1])); + ps.push_back(get_model()->get_particle(std::get<1>(spip_))); + ps.push_back(get_model()->get_particle(std::get<0>(rbpip_))); + ps.push_back(get_model()->get_particle(std::get<1>(rbpip_))); return ps; } ModelObjectsTemp SurfaceSymmetryConstraint::do_get_outputs() const { ParticlesTemp ps; - ps.push_back(get_model()->get_particle(spip_[0])); - ps.push_back(get_model()->get_particle(spip_[1])); + ps.push_back(get_model()->get_particle(std::get<0>(spip_))); + ps.push_back(get_model()->get_particle(std::get<1>(spip_))); return ps; } diff --git a/modules/core/src/TransformedDistancePairScore.cpp b/modules/core/src/TransformedDistancePairScore.cpp index b2385d59b7..818a97a207 100644 --- a/modules/core/src/TransformedDistancePairScore.cpp +++ b/modules/core/src/TransformedDistancePairScore.cpp @@ -46,12 +46,12 @@ struct TransformParticle { double TransformedDistancePairScore::evaluate_index( Model *m, const ParticleIndexPair &pip, DerivativeAccumulator *da) const { - TransformParticle tb(t_, ri_, m, pip[1]); + TransformParticle tb(t_, ri_, m, std::get<1>(pip)); IMP_LOG_VERBOSE("Transformed particle is " << tb.get_coordinate(0) << " " << tb.get_coordinate(1) << " " << tb.get_coordinate(2) << std::endl); Float ret = internal::evaluate_distance_pair_score( - XYZ(m, pip[0]), tb, da, f_.get(), boost::lambda::_1); + XYZ(m, std::get<0>(pip)), tb, da, f_.get(), boost::lambda::_1); return ret; } diff --git a/modules/core/src/TypedPairScore.cpp b/modules/core/src/TypedPairScore.cpp index c1ef163bba..b4357f3056 100644 --- a/modules/core/src/TypedPairScore.cpp +++ b/modules/core/src/TypedPairScore.cpp @@ -12,15 +12,16 @@ IMPCORE_BEGIN_NAMESPACE Float TypedPairScore::evaluate_index(Model *m, const ParticleIndexPair &pip, DerivativeAccumulator *da) const { - ParticlePair p(m->get_particle(pip[0]), m->get_particle(pip[1])); + ParticlePair p(m->get_particle(std::get<0>(pip)), + m->get_particle(std::get<1>(pip))); PairScore *ps = get_pair_score(p); if (!ps) { if (!allow_invalid_types_) { IMP_THROW( "Attempt to evaluate TypedPairScore on " "particles with invalid types (" - << p[0]->get_value(typekey_) << ", " << p[1]->get_value(typekey_) - << ")", + << std::get<0>(p)->get_value(typekey_) << ", " + << std::get<1>(p)->get_value(typekey_) << ")", ValueException); } else { return 0.0; @@ -31,14 +32,14 @@ Float TypedPairScore::evaluate_index(Model *m, } PairScore *TypedPairScore::get_pair_score(const ParticlePair &p) const { - if (!p[0]->has_attribute(typekey_)) { - set_particle_type(p[0]); + if (!std::get<0>(p)->has_attribute(typekey_)) { + set_particle_type(std::get<0>(p)); } - if (!p[1]->has_attribute(typekey_)) { - set_particle_type(p[1]); + if (!std::get<1>(p)->has_attribute(typekey_)) { + set_particle_type(std::get<1>(p)); } - Int atype = p[0]->get_value(typekey_); - Int btype = p[1]->get_value(typekey_); + Int atype = std::get<0>(p)->get_value(typekey_); + Int btype = std::get<1>(p)->get_value(typekey_); ScoreMap::const_iterator psit = score_map_.find( std::pair(std::min(atype, btype), std::max(atype, btype))); diff --git a/modules/core/src/direction.cpp b/modules/core/src/direction.cpp index e2112aafc5..0ca00c1ac7 100644 --- a/modules/core/src/direction.cpp +++ b/modules/core/src/direction.cpp @@ -90,7 +90,7 @@ ObjectKey Direction::get_constraint_key() { void DirectionAngle::do_setup_particle(Model *m, ParticleIndex pi, const ParticleIndexPair &ds) { - DirectionAngle::do_setup_particle(m, pi, ds[0], ds[1]); + DirectionAngle::do_setup_particle(m, pi, std::get<0>(ds), std::get<1>(ds)); } void DirectionAngle::do_setup_particle(Model *m, ParticleIndex pi, diff --git a/modules/core/src/symmetry.cpp b/modules/core/src/symmetry.cpp index cb5dab52f9..8f638cb47f 100644 --- a/modules/core/src/symmetry.cpp +++ b/modules/core/src/symmetry.cpp @@ -157,7 +157,7 @@ TransformationSymmetryMover::get_random_rotation_about_point( // First, get a random rotation about the origin algebra::Vector3D axis = algebra::get_random_vector_on(algebra::get_unit_sphere_d<3>()); - ::boost::uniform_real<> rand(-max_angle_, max_angle_); + ::boost::random::uniform_real_distribution<> rand(-max_angle_, max_angle_); Float angle = rand(random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis, angle); diff --git a/modules/display/include/geometry_macros.h b/modules/display/include/geometry_macros.h index 6ccb84d9b8..060143b6c2 100644 --- a/modules/display/include/geometry_macros.h +++ b/modules/display/include/geometry_macros.h @@ -166,8 +166,8 @@ : display::PairGeometry(pp) {} \ display::Geometries get_components() const override { \ display::Geometries ret; \ - Decorator d0(get_particle_pair()[0]); \ - Decorator d1(get_particle_pair()[1]); \ + Decorator d0(std::get<0>(get_particle_pair())); \ + Decorator d1(std::get<1>(get_particle_pair())); \ action; \ return ret; \ } \ @@ -180,8 +180,8 @@ display::Geometries get_components() const override { \ display::Geometries ret; \ for(ParticleIndexPair pip : get_container()->get_contents()) { \ - Decorator d0(get_container()->get_model(), pip[0]); \ - Decorator d1(get_container()->get_model(), pip[1]); \ + Decorator d0(get_container()->get_model(), std::get<0>(pip)); \ + Decorator d1(get_container()->get_model(), std::get<1>(pip)); \ action; \ } \ return ret; \ diff --git a/modules/display/src/particle_geometry.cpp b/modules/display/src/particle_geometry.cpp index 37e95f16ec..1bae5068bc 100644 --- a/modules/display/src/particle_geometry.cpp +++ b/modules/display/src/particle_geometry.cpp @@ -20,7 +20,8 @@ SingletonsGeometry::SingletonsGeometry(SingletonContainerAdaptor pc, Color c) : Geometry(c, pc->get_name() + " geometry"), sc_(pc) {} PairGeometry::PairGeometry(const ParticlePair &p) - : Geometry(p.get_name() + " geometry"), p0_(p[0]), p1_(p[1]) {} + : Geometry(p.get_name() + " geometry"), + p0_(std::get<0>(p)), p1_(std::get<1>(p)) {} PairsGeometry::PairsGeometry(PairContainer *pc) : Geometry(pc->get_name() + " geometry"), sc_(pc) {} diff --git a/modules/domino/include/assignment_containers.h b/modules/domino/include/assignment_containers.h index ca730e19cd..b8af4ff35d 100644 --- a/modules/domino/include/assignment_containers.h +++ b/modules/domino/include/assignment_containers.h @@ -19,8 +19,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -149,8 +149,8 @@ class IMPDOMINOEXPORT SampleAssignmentContainer : public AssignmentContainer { int width_; unsigned int k_; unsigned int i_; - boost::uniform_real select_; - boost::uniform_int<> place_; + boost::random::uniform_real_distribution select_; + boost::random::uniform_int_distribution<> place_; public: SampleAssignmentContainer(unsigned int k, diff --git a/modules/domino/include/internal/inference_utility.h b/modules/domino/include/internal/inference_utility.h index abed00d5a8..1be699e7c0 100644 --- a/modules/domino/include/internal/inference_utility.h +++ b/modules/domino/include/internal/inference_utility.h @@ -37,8 +37,8 @@ class IMPDOMINOEXPORT InferenceStatistics { }; Data get_data(const Subset &s, AssignmentContainer *ss) const; boost::unordered_map subsets_; - mutable boost::uniform_real select_; - mutable boost::uniform_int<> place_; + mutable boost::random::uniform_real_distribution select_; + mutable boost::random::uniform_int_distribution<> place_; const Data &get_data(const Subset &s) const; public: diff --git a/modules/domino/src/interactive.cpp b/modules/domino/src/interactive.cpp index 3a236f8d9c..123a056fe0 100644 --- a/modules/domino/src/interactive.cpp +++ b/modules/domino/src/interactive.cpp @@ -6,7 +6,7 @@ */ #include #include -#include +#include IMPDOMINO_BEGIN_NAMESPACE @@ -52,8 +52,8 @@ void load_merged_assignments_random_order(const Subset &first_subset, Assignments nd1a = second->get_assignments(IntRange(0, second->get_number_of_assignments())); - boost::uniform_int<> dist0(0, nd0a.size() - 1); - boost::uniform_int<> dist1(0, nd1a.size() - 1); + boost::random::uniform_int_distribution<> dist0(0, nd0a.size() - 1); + boost::random::uniform_int_distribution<> dist1(0, nd1a.size() - 1); IMP_PROGRESS_DISPLAY("Merge " << first_subset << " and " << second_subset, maximum_tries); for (unsigned int t = 0; t < maximum_tries; ++t) { diff --git a/modules/domino/src/particle_states.cpp b/modules/domino/src/particle_states.cpp index 3f506bc390..58c81b4fec 100644 --- a/modules/domino/src/particle_states.cpp +++ b/modules/domino/src/particle_states.cpp @@ -11,7 +11,7 @@ #include #include #include -#include +#include IMPDOMINO_BEGIN_NAMESPACE ParticleStates::~ParticleStates() {} @@ -178,7 +178,7 @@ namespace { struct RandomWrapper { int operator()(int i) { IMP_INTERNAL_CHECK(i > 0, "Zero i"); - boost::uniform_int ri(0, i - i); + boost::random::uniform_int_distribution ri(0, i - i); unsigned int ret = ri(random_number_generator); return ret; } diff --git a/modules/domino/src/subset_filters.cpp b/modules/domino/src/subset_filters.cpp index edba97690e..b827f7eb23 100644 --- a/modules/domino/src/subset_filters.cpp +++ b/modules/domino/src/subset_filters.cpp @@ -682,7 +682,7 @@ void PairListSubsetFilterTable::set_allowed_states(ParticlePair p, namespace { class ProbabilisticSubsetFilter : public SubsetFilter { double p_; - mutable boost::uniform_real<> r_; + mutable boost::random::uniform_real_distribution<> r_; public: ProbabilisticSubsetFilter(double p) diff --git a/modules/em/bin/resample_density b/modules/em/bin/resample_density index 04958eeef6..2fc67f8201 100755 --- a/modules/em/bin/resample_density +++ b/modules/em/bin/resample_density @@ -5,9 +5,7 @@ import IMP.em def main(): IMP.set_log_level(IMP.SILENT) - usage = """%prog [options] - """ - desc = """Resample a density map.""" + desc = "Resample a density map." p = IMP.ArgumentParser(description=desc) p.add_argument("density", help="input EM density map file name") p.add_argument("resample_level", type=int, help="resample level 2/3/4...") diff --git a/modules/em/bin/view_density_header b/modules/em/bin/view_density_header index 4305833ffb..22f7a97034 100755 --- a/modules/em/bin/view_density_header +++ b/modules/em/bin/view_density_header @@ -5,7 +5,6 @@ import IMP.em def main(): IMP.set_log_level(IMP.SILENT) - usage = "%prog [options] " p = IMP.ArgumentParser() p.add_argument("em_map", help="EM density map file name") diff --git a/modules/em/src/rigid_fitting.cpp b/modules/em/src/rigid_fitting.cpp index 2051ca040d..f5117b83d7 100644 --- a/modules/em/src/rigid_fitting.cpp +++ b/modules/em/src/rigid_fitting.cpp @@ -240,7 +240,8 @@ FittingSolutions local_rigid_fitting_grid_search( for (int i = 0; i < number_of_rotations; i++) { algebra::Vector3D axis = algebra::get_random_vector_on( algebra::Sphere3D(algebra::Vector3D(0.0, 0.0, 0.0), 1.)); - ::boost::uniform_real<> rand(-max_angle_in_radians, max_angle_in_radians); + ::boost::random::uniform_real_distribution<> rand(-max_angle_in_radians, + max_angle_in_radians); Float angle = rand(random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis, angle); rots.push_back(r); diff --git a/modules/em2d/include/JPGImageReaderWriter.h b/modules/em2d/include/JPGImageReaderWriter.h index 8ffc60f7d0..8366aa6c4f 100644 --- a/modules/em2d/include/JPGImageReaderWriter.h +++ b/modules/em2d/include/JPGImageReaderWriter.h @@ -10,7 +10,7 @@ #include #include "IMP/em2d/opencv_interface.h" #include "IMP/em/ImageHeader.h" -#include +#include IMPEM2D_BEGIN_NAMESPACE @@ -74,7 +74,7 @@ class JPGImageReaderWriter : public ImageReaderWriter { "discards image header " << std::endl); // check extension - String ext = boost::filesystem::extension(filename); + String ext = boost::filesystem::path(filename).extension().string(); IMP_LOG_VERBOSE("JPGImageReaderWriter writing to " << filename << std::endl); if (ext != ".jpg" && ext != ".jpeg") { diff --git a/modules/em2d/include/TIFFImageReaderWriter.h b/modules/em2d/include/TIFFImageReaderWriter.h index 05ffc9a778..46e79f8496 100644 --- a/modules/em2d/include/TIFFImageReaderWriter.h +++ b/modules/em2d/include/TIFFImageReaderWriter.h @@ -76,7 +76,7 @@ class TIFFImageReaderWriter : public ImageReaderWriter { "discards image header " << std::endl); // check extension - String ext = boost::filesystem::extension(filename); + String ext = boost::filesystem::path(filename).extension().string(); IMP_LOG_VERBOSE("TIFFImageReaderWriter writing to " << filename << std::endl); if (ext != ".tiff" && ext != ".tif") { diff --git a/modules/em2d/src/RegistrationResult.cpp b/modules/em2d/src/RegistrationResult.cpp index 54185c1f67..7322f500c1 100644 --- a/modules/em2d/src/RegistrationResult.cpp +++ b/modules/em2d/src/RegistrationResult.cpp @@ -7,7 +7,7 @@ #include "IMP/em2d/RegistrationResult.h" #include "IMP/em2d/internal/rotation_helper.h" #include -#include +#include IMPEM2D_BEGIN_NAMESPACE @@ -60,7 +60,7 @@ void write_registration_results(String filename, void RegistrationResult::set_random_registration(unsigned int index, double maximum_shift) { - ::boost::uniform_real<> rand(0., 1.); + ::boost::random::uniform_real_distribution<> rand(0., 1.); // Random point in the sphere, pick to ensure even distribution double u = rand(random_number_generator); double v = rand(random_number_generator); diff --git a/modules/em2d/src/RelativePositionMover.cpp b/modules/em2d/src/RelativePositionMover.cpp index 276079f58b..6f98f02811 100644 --- a/modules/em2d/src/RelativePositionMover.cpp +++ b/modules/em2d/src/RelativePositionMover.cpp @@ -11,8 +11,8 @@ #include #include #include -#include -#include +#include +#include IMPEM2D_BEGIN_NAMESPACE @@ -36,14 +36,14 @@ void RelativePositionMover::add_internal_transformations( core::MonteCarloMoverResult RelativePositionMover::do_propose() { last_transformation_ = rbA_.get_reference_frame().get_transformation_to(); - ::boost::uniform_real<> zeroone(0., 1.); + ::boost::random::uniform_real_distribution<> zeroone(0., 1.); double p = zeroone(random_number_generator); if (p < probability_of_random_move_) { algebra::Vector3D translation = algebra::get_random_vector_in( algebra::Sphere3D(rbA_.get_coordinates(), max_translation_)); algebra::Vector3D axis = algebra::get_random_vector_on( algebra::Sphere3D(algebra::Vector3D(0.0, 0.0, 0.0), 1.)); - ::boost::uniform_real<> rand(-max_angle_, max_angle_); + ::boost::random::uniform_real_distribution<> rand(-max_angle_, max_angle_); Float angle = rand(random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis, angle); algebra::Rotation3D rc = @@ -54,10 +54,12 @@ core::MonteCarloMoverResult RelativePositionMover::do_propose() { // << rbA_ << " Transformation " << t << std::endl; rbA_.set_reference_frame(algebra::ReferenceFrame3D(t)); } else { - ::boost::uniform_int<> randi(0, reference_rbs_.size() - 1); + ::boost::random::uniform_int_distribution<> randi( + 0, reference_rbs_.size() - 1); unsigned int i = randi(random_number_generator); - ::boost::uniform_int<> randj(0, transformations_map_[i].size() - 1); + ::boost::random::uniform_int_distribution<> randj( + 0, transformations_map_[i].size() - 1); unsigned int j = randj(random_number_generator); algebra::Transformation3D Tint = transformations_map_[i][j]; diff --git a/modules/em2d/src/image_processing.cpp b/modules/em2d/src/image_processing.cpp index 8e4f4d3782..a11a0f778b 100644 --- a/modules/em2d/src/image_processing.cpp +++ b/modules/em2d/src/image_processing.cpp @@ -491,7 +491,7 @@ void add_noise(cv::Mat &v, double op1, double op2, const String &mode, double /*df*/) { IMP_LOG_TERSE("Adding noise: mean " << op1 << " Stddev " << op2 << std::endl); // Distribution types - typedef boost::uniform_real<> unif_distribution; + typedef boost::random::uniform_real_distribution<> unif_distribution; typedef boost::normal_distribution<> norm_distribution; // Variate generators (put generator and distributions together) typedef boost::variate_generator namespace po = boost::program_options; -#include +#include #include using namespace IMP::integrative_docking::internal; @@ -31,9 +31,10 @@ using namespace IMP::integrative_docking::internal; namespace { void select_cross_links(const std::vector& cross_links, std::vector& selected_cross_links) { - boost::uniform_real<> uni_dist(0, 1); + boost::random::uniform_real_distribution<> uni_dist(0, 1); boost::variate_generator > uni(IMP::random_number_generator, uni_dist); + boost::random::uniform_real_distribution<> > uni( + IMP::random_number_generator, uni_dist); for (unsigned int i = 0; i < cross_links.size(); i++) { if (cross_links[i].get_actual_distance() <= 15.0) diff --git a/modules/integrative_docking/test/test_soap.py b/modules/integrative_docking/test/test_soap.py index db8a111177..572c910c74 100644 --- a/modules/integrative_docking/test/test_soap.py +++ b/modules/integrative_docking/test/test_soap.py @@ -34,9 +34,8 @@ def test_simple_docking_trans_score(self): self.assertApplicationExitedCleanly(p.returncode, err) # count the number of lines in output file - fin = open('soap_score.res', 'r') - text = fin.read() - fin.close() + with open('soap_score.res', 'r') as fin: + text = fin.read() number_of_lines = text.count('\n') self.assertEqual(number_of_lines, 14) os.unlink('soap_score.res') @@ -44,11 +43,10 @@ def test_simple_docking_trans_score(self): def test_simple_filenames_score(self): """Simple test of filenames SOAP score application""" - text_file = open("filenames.txt", "w") - text_file.write(self.get_input_file_name('static.pdb')) - text_file.write(" ") - text_file.write(self.get_input_file_name('transformed.pdb')) - text_file.close() + with open("filenames.txt", "w") as text_file: + text_file.write(self.get_input_file_name('static.pdb')) + text_file.write(" ") + text_file.write(self.get_input_file_name('transformed.pdb')) p = self.run_application('soap_score', ['filenames.txt']) diff --git a/modules/isd/include/em_utilities.h b/modules/isd/include/em_utilities.h index e74a659e98..da65cfad57 100644 --- a/modules/isd/include/em_utilities.h +++ b/modules/isd/include/em_utilities.h @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include IMPISD_BEGIN_NAMESPACE @@ -28,10 +28,10 @@ inline Float score_gaussian_overlap(Model *m, double determinant; bool invertible; Eigen::Matrix3d inverse = Eigen::Matrix3d::Zero(); - Float mass12 = atom::Mass(m,pp[0]).get_mass() * - atom::Mass(m,pp[1]).get_mass(); - core::Gaussian g1(m,pp[0]); - core::Gaussian g2(m,pp[1]); + Float mass12 = atom::Mass(m, std::get<0>(pp)).get_mass() * + atom::Mass(m, std::get<1>(pp)).get_mass(); + core::Gaussian g1(m, std::get<0>(pp)); + core::Gaussian g2(m, std::get<1>(pp)); Eigen::Matrix3d covar = g1.get_global_covariance() + g2.get_global_covariance(); Eigen::Vector3d v = Eigen::Vector3d(g2.get_coordinates().get_data()) @@ -58,9 +58,10 @@ inline FloatsList sample_points_from_density(const em::DensityMap * dmap_orig, // setup random number generator FloatsList ret; - boost::uniform_real<> uni_dist(0,1); + boost::random::uniform_real_distribution<> uni_dist(0,1); boost::variate_generator< - IMP::RandomNumberGenerator&, boost::uniform_real<> > + IMP::RandomNumberGenerator&, + boost::random::uniform_real_distribution<> > uni(IMP::random_number_generator, uni_dist); for (int i=0;i len(b): a, b = b, a @@ -404,7 +402,8 @@ def find_contributions(self, line): def create_distance_restraint(self, distances, volume, contributions): if distances is None and volume is None: raise ValueError("could not find either volume or " - "distance: %s %s %s" % (distances, volume, contributions)) + "distance: %s %s %s" + % (distances, volume, contributions)) if distances is None: distances = [volume ** (-1. / 6), 0, 0] dist = distances[0] @@ -461,7 +460,7 @@ def read_distances(self, filename, key, naming_system=IUPAC_CONVENTION, if decompose: - d = decompose_restraints(restraints) + d = decompose_restraints(restraints) # noqa: F821 for _type in d.keys(): if not d[_type]: @@ -505,14 +504,16 @@ def read_dihedrals(self, filename, key, naming_system=IUPAC_CONVENTION): if len(new_contribs) > 1: raise ValueError( - 'Inconsistency in data file, multiple contributions detected.') + 'Inconsistency in data file, multiple contributions ' + 'detected.') atoms = self.split_contribution(new_contribs[0]) atoms = [self.extract_atom(x) for x in atoms] name = self.resolve_dihedral_name(atoms) - r = create_dihedral_restraint(seq_number, name, values, atoms) + r = create_dihedral_restraint(seq_number, name, # noqa: F821 + values, atoms) restraints.append(r) seq_number += 1 @@ -550,7 +551,7 @@ def read_rdcs(self, filename, key, naming_system=IUPAC_CONVENTION): contributions += self.build_contributions(atoms) if contributions: - r = create_rdc_restraint( + r = create_rdc_restraint( # noqa: F821 seq_number, distances[0], contributions) @@ -561,6 +562,7 @@ def read_rdcs(self, filename, key, naming_system=IUPAC_CONVENTION): if restraints: return restraints + if __name__ == '__main__': noe = 'noe.tbl' diff --git a/modules/isd/pyext/src/TuneRex.py b/modules/isd/pyext/src/TuneRex.py index 35b4b2e606..a36341e2c8 100755 --- a/modules/isd/pyext/src/TuneRex.py +++ b/modules/isd/pyext/src/TuneRex.py @@ -3,23 +3,18 @@ from __future__ import print_function __doc__ = """ -This module provides a few methods to improve the efficiency of a replica-exchange simulation -by tuning its parameters. +This module provides a few methods to improve the efficiency of a +replica-exchange simulation by tuning its parameters. Author: Yannick Spill """ import sys -#from mpmath import erf, erfinv -#from math import sqrt -#from random import randint -from numpy import * -from numpy.random import randint import rpy2.robjects as robjects kB = 1.3806503 * 6.0221415 / 4184.0 # Boltzmann constant in kcal/mol/K # here for float comparison. Floats are equal if their difference +# is smaller than EPSILON EPSILON = 1e-8 - # is smaller than EPSILON debug = False @@ -27,8 +22,8 @@ def prdb(arg): if debug: print(arg) -# R compatibility functions +# R compatibility functions r = robjects.r robjects.globalenv["kB"] = kB _rinverf = r('invErf <- function(x) {qnorm((1 + x) /2) / sqrt(2)}') @@ -57,13 +52,14 @@ def spline(xy, mean, method=None): robjects.globalenv["x"] = robjects.FloatVector(x) robjects.globalenv["y"] = robjects.FloatVector(y) global _rinterp - #_rinterp = r.splinefun(x,y) + # _rinterp = r.splinefun(x,y) if method is None: r('cvsplinenonbounded <- splinefun(x,y)') else: r('cvsplinenonbounded <- splinefun(x,y,method="%s")' % method) _rinterp = r( - 'cvspline <- function(x) { tmp = cvsplinenonbounded(x); if (tmp>0) {tmp} else {%f}}' % + 'cvspline <- function(x) { tmp = cvsplinenonbounded(x); ' + 'if (tmp>0) {tmp} else {%f}}' % mean) def interpolated(x): @@ -79,7 +75,7 @@ def linear_interpolation(xy, mean): robjects.globalenv["x"] = robjects.FloatVector(x) robjects.globalenv["y"] = robjects.FloatVector(y) global _rinterp - #_rinterp = r.splinefun(x,y) + # _rinterp = r.splinefun(x,y) _rinterp = r('cvspline <- approxfun(x,y)') def interpolated(x): @@ -91,10 +87,11 @@ def interpolated(x): # R testing functions def anova(*args): - """perform anova using R and return statistic, p-value, between and within variance""" + """perform anova using R and return statistic, p-value, between and + within variance""" ngroups = len(args) # number of groups # nreps = len(args[0]) #number of repetitions - #group = r.gl(ngroups,nreps) + # group = r.gl(ngroups,nreps) reps = r.rep(0, len(args[0])) weight = robjects.FloatVector(args[0]) for i in range(1, len(args)): @@ -120,8 +117,8 @@ def anova(*args): def kruskal(*args): """perform kruskal-wallis rank test""" ngroups = len(args) - #nreps = len(args[0]) - #group = r.gl(ngroups,nreps) + # nreps = len(args[0]) + # group = r.gl(ngroups,nreps) reps = r.rep(0, len(args[0])) weight = robjects.FloatVector(args[0]) for i in range(1, len(args)): @@ -153,7 +150,8 @@ def binom(obs, target): def bartlett(*args): - """perform bartlett's test on the equality of variances of the observations""" + """perform bartlett's test on the equality of variances of the + observations""" ngroups = len(args) nreps = len(args[0]) group = r.gl(ngroups, nreps) @@ -183,24 +181,27 @@ def fligner(*args): def power_test(ar, power=0.8, alpha=0.05): """perform an anova power test and return - the power of the test with this input data - - the number of points that would be needed to achieve a default power of 0.8 + - the number of points that would be needed to achieve a default + power of 0.8 ar: the output of anova() """ result = r('power.anova.test')(groups=ar['nreps'], n=min(ar['nsteps']), - between=ar['between'], within=ar['within'], sig=alpha) + between=ar['between'], within=ar['within'], + sig=alpha) prdb('the power of this anova was: %.3f' % result[5][0]) result = r('power.anova.test')(groups=ar['nreps'], - between=ar['between'], within=ar['within'], sig=alpha, pow=power) - prdb( - 'To have a power of %.3f, there should be at least %d exchange attempts.' % - (power, result[1][0])) + between=ar['between'], within=ar['within'], + sig=alpha, pow=power) + prdb('To have a power of %.3f, there should be at least %d exchange ' + 'attempts.' % (power, result[1][0])) return def minimum_n(ar, alpha=0.05): - """This routine tries to return an estimate of the additional number of exchange trials that - could lead to a positive result of the anova (e.g. the average ARs are not the same). It is - still very crude. It also assumes that a one-way anova was made. + """This routine tries to return an estimate of the additional number of + exchange trials that could lead to a positive result of the anova (e.g. + the average ARs are not the same). It is still very crude. It also assumes + that a one-way anova was made. ar: the output of anova() alpha: type I error """ @@ -208,13 +209,11 @@ def minimum_n(ar, alpha=0.05): nsteps = ar['nsteps'] try: nsteps = min(nsteps) - except: + except: # noqa: E722 pass fstat = ar['fstat'] - return ( - nsteps * - (sqrt(Finv(1 - alpha, nreps - 1, nreps * (nsteps - 1)) / fstat) - 1) - ) + return nsteps * (numpy.sqrt(Finv(1 - alpha, nreps - 1, + nreps * (nsteps - 1)) / fstat) - 1) # Heat capacity class @@ -230,7 +229,6 @@ class CvEstimator: def __init__(self, params, energies=None, indicators=None, method="constant", temps=None, write_cv=False): - kB = 1.3806503 * 6.0221415 / 4184.0 # Boltzmann constant in kcal/mol/K self.__initialized = False self.__cv = [] self.method = method @@ -258,12 +256,12 @@ def __init__(self, params, energies=None, indicators=None, if write_cv: fl = open('cv', 'w') fl.write("".join(["%f %f\n" % (x, self.get(x)) - for x in linspace(params[0] / 2, 2 * params[-1])])) + for x in numpy.linspace(params[0] / 2, 2 * params[-1])])) fl.close() def estimate_cv_interpolate(self, params, indicators): - """interpolate using previous values, by reversing the approximate overlap - function + """interpolate using previous values, by reversing the approximate + overlap function """ if self.__initialized: return @@ -272,7 +270,8 @@ def estimate_cv_interpolate(self, params, indicators): "the length of indicators and params does not match!") if params != tuple(sorted(params)): raise NotImplementedError( - "unable to work on parameters that do not change monotonically") + "unable to work on parameters that do not change " + "monotonically") prdb("storing params and means") self.__params = params @@ -285,7 +284,8 @@ def estimate_cv_interpolate(self, params, indicators): p1 = params[i] p2 = params[i + 1] self.__cv.append( - (self.__pmeans[i], (p1 ** 2 + p2 ** 2) * float(Y2) / (p2 - p1) ** 2)) + (self.__pmeans[i], + (p1 ** 2 + p2 ** 2) * float(Y2) / (p2 - p1) ** 2)) prdb(self.__params) prdb(self.__cv) self.__cvmean = sum([i[1] for i in self.__cv]) / float(len(self.__cv)) @@ -306,23 +306,23 @@ def needs_init(self): if not self.__initialized: raise RuntimeError("Class was not initialized correctly!") - def estimate_cv_mbar(params, energies, temps): + def estimate_cv_mbar(self, params, energies, temps): "use MBAR to get the heat capacity" - raise NotImplementedError(method) - if self.__initialized: - return + raise NotImplementedError("estimate_cv_mbar") def _isinbounds(self, p, params): - """returns True if p is within params, else false. the params list must be sorted ascendingly.""" + """returns True if p is within params, else false. the params list + must be sorted ascendingly.""" if p < params[0] - EPSILON or p > params[-1] + EPSILON: - #prdb("Warning: value %f is outside of bounds, extrapolating." % p) + # prdb("Warning: value %f is outside of bounds, " + # "extrapolating." % p) return False else: return True def _interpolate(self, xval, xlist): - """return interpolation of Cv at point xval, and return the average instead - if this value is negative. + """return interpolation of Cv at point xval, and return the average + instead if this value is negative. """ self._isinbounds(xval, xlist) val = self.__cvfun(xval) @@ -332,16 +332,16 @@ def _interpolate(self, xval, xlist): return self.__cvmean def get_interp(self, param): - """returns the point estimate of the first derivative of the energy with - respect to the replica exchange parameter (usually T or q). + """returns the point estimate of the first derivative of the energy + with respect to the replica exchange parameter (usually T or q). This version assumes that the means of cv are given. """ self.needs_init() return self._interpolate(param, self.__pmeans) def get_mbar(self, param): - """returns the point estimate of the first derivative of the energy with - respect to the replica exchange parameter (usually T or q). + """returns the point estimate of the first derivative of the energy + with respect to the replica exchange parameter (usually T or q). This version assumes that the values of cv are given. """ self.needs_init() @@ -368,8 +368,8 @@ def update_good_dumb(newp, oldp, *args, **kwargs): In the "dumb" method, the Cv and targetAR keywords are ignored. Here the newp[1] parameter is modified because prior changes have set newp[0] to a different value than oldp[0]. Thus, we should move newp[1] by - minimizing the effect on the AR since it is supposedly equal to targetAR. In - this simple method, the parameter is just translated. + minimizing the effect on the AR since it is supposedly equal to targetAR. + In this simple method, the parameter is just translated. """ prdb( "newp[0] has moved (%.3f -> %.3f), adjusting the position of newp[1]" % @@ -400,12 +400,13 @@ def update_bad_dumb(newp, oldp, ind, targetAR=0.4, scale=0.1, **kwargs): def update_any_cv_step(newp, oldp, ind, targetAR=0.4, Cv=None, **kwargs): """here we use the average AR formula of two gaussians to get newp[1] as a - function of newp[1], knowing the targetAR and estimating the Cv. If targetAR - is negative, consider that mean(ind) equals the target AR and skip any - calculation in the case that oldp[0] equals newp[0]. + function of newp[1], knowing the targetAR and estimating the Cv. If + targetAR is negative, consider that mean(ind) equals the target AR and + skip any calculation in the case that oldp[0] equals newp[0]. step: suppose the heat capacity is stepwise constant, i.e. use the heat - capacity at position newp[0] as an estimate of the mean of the heat capacity - between newp[0] and newp[1]. This does not require any self-consistent loop. + capacity at position newp[0] as an estimate of the mean of the heat + capacity between newp[0] and newp[1]. This does not require any + self-consistent loop. """ global kB @@ -415,11 +416,11 @@ def update_any_cv_step(newp, oldp, ind, targetAR=0.4, Cv=None, **kwargs): if targetAR < 0: targetAR = sum(ind) / float(len(ind)) cv = Cv.get(newp[0]) - Y = sqrt(2 * kB) * float(erfinv(1 - targetAR)) + Y = numpy.sqrt(2 * kB) * float(erfinv(1 - targetAR)) if Y ** 2 >= cv: raise ValueError("""targetAR too small for this approximate method, use the full self-consistent method instead.""") - return newp[0] * (cv + Y * sqrt(2 * cv - Y ** 2)) / (cv - Y ** 2) + return newp[0] * (cv + Y * numpy.sqrt(2 * cv - Y ** 2)) / (cv - Y ** 2) def update_any_cv_sc(newp, oldp, ind, targetAR=0.4, Cv=None, @@ -433,21 +434,23 @@ def update_any_cv_sc(newp, oldp, ind, targetAR=0.4, Cv=None, if targetAR < 0: targetAR = sum(ind) / float(len(ind)) cv = Cv.get(newp[0]) - Y = sqrt(2 * kB) * float(erfinv(1 - targetAR)) + Y = numpy.sqrt(2 * kB) * float(erfinv(1 - targetAR)) if Y ** 2 >= cv: raise ValueError("""targetAR too small for this approximate method, use the full self-consistent method instead.""") - targetp = newp[0] * (cv + Y * sqrt(2 * cv - Y ** 2)) / (cv - Y ** 2) + targetp = newp[0] * (cv + Y * numpy.sqrt(2 * cv - Y ** 2)) / (cv - Y ** 2) for i in range(maxiter): cv = Cv.mean(newp[0], targetp) (oldtargetp, targetp) = ( - targetp, newp[0] * (cv + Y * sqrt(2 * cv - Y ** 2)) / (cv - Y ** 2)) + targetp, newp[0] * (cv + Y * numpy.sqrt(2 * cv - Y ** 2)) + / (cv - Y ** 2)) if abs(targetp - oldtargetp) <= tol: break - if isnan(targetp): + if numpy.isnan(targetp): if Y ** 2 >= cv: - raise ValueError("""targetAR too small for this approximate method, use - the full self-consistent method instead.""") + raise ValueError( + "targetAR too small for this approximate method, use the " + "full self-consistent method instead.") else: raise ValueError("""something unexpected happened""") if i == maxiter - 1: @@ -464,17 +467,17 @@ def update_any_cv_scfull(newp, oldp, ind, targetAR=0.4, Cv=None, """self-consistent solver version, on the exact average AR equation""" # create helper functions and overlap function - #_ru21 = r('u21 <- function(t1,t2) { (t2-t1)*(cvspline(t2)-cvspline(t1))/2. }') - _ru21 = r( - 'u21 <- function(t1,t2) { integrate(Vectorize(cvspline),t1,t2)$value }') - _rb21 = r('b21 <- function(t1,t2) { 1./(kB*t2) - 1./(kB*t1) }') - _rsigma2 = r('sigma2 <- function(t) {cvspline(t)*kB*t**2}') + _ = r('u21 <- function(t1,t2) { integrate(Vectorize(cvspline),' + 't1,t2)$value }') + _ = r('b21 <- function(t1,t2) { 1./(kB*t2) - 1./(kB*t1) }') + _ = r('sigma2 <- function(t) {cvspline(t)*kB*t**2}') _rovboltz = r('ovboltz <- function(t1,t2) {\ - 1/2*( 1-erf(\ - u21(t1,t2)/sqrt(2*(sigma2(t1)+sigma2(t2))))\ - + exp(b21(t1,t2)*(u21(t1,t2)+b21(t1,t2)*(sigma2(t1)+sigma2(t2))/2))\ - * (1+erf((u21(t1,t2)+b21(t1,t2)*(sigma2(t1)+sigma2(t2)))/(sqrt(2*(sigma2(t1)+sigma2(t2))))))\ - )}') + 1/2*( 1-erf(\ + u21(t1,t2)/sqrt(2*(sigma2(t1)+sigma2(t2))))\ + + exp(b21(t1,t2)*(u21(t1,t2)+b21(t1,t2)*(sigma2(t1)+sigma2(t2))/2))\ + * (1+erf((u21(t1,t2)+b21(t1,t2)*(sigma2(t1)+sigma2(t2)))\ + /(sqrt(2*(sigma2(t1)+sigma2(t2))))))\ + )}') _rrootfn = r( 'rootfn <- function(t2) {ovboltz(%f,t2)-%f}' % (newp[0], targetAR)) @@ -494,12 +497,11 @@ def update_any_cv_scfull(newp, oldp, ind, targetAR=0.4, Cv=None, raise RuntimeError('could not find zero of function!') # find root - _runiroot = r( - 'uniroot(rootfn,c(%f,%f),f.lower = %f, f.upper = %f, tol = %f, maxiter = %d)' % (newp[0], tmp, - 1 - targetAR, -targetAR, tol, maxiter)) - prdb( - "self-consistent solver converged after %s iterations and an estimated precision of %s " % - (_runiroot[2][0], _runiroot[3][0])) + _runiroot = r('uniroot(rootfn,c(%f,%f),f.lower = %f, f.upper = %f, ' + 'tol = %f, maxiter = %d)' + % (newp[0], tmp, 1 - targetAR, -targetAR, tol, maxiter)) + prdb("self-consistent solver converged after %s iterations and an " + "estimated precision of %s " % (_runiroot[2][0], _runiroot[3][0])) prdb( ["root:", _runiroot[0][0], @@ -523,8 +525,8 @@ def are_equal_to_targetAR( targetAR=0.4, alpha=0.05, method="binom"): - """here, all indicators have same average, we want to know if it is equal to - targetAR + """here, all indicators have same average, we want to know if it is + equal to targetAR """ # calculate sample mean deviation of each indicator function from targetAR @@ -534,16 +536,16 @@ def are_equal_to_targetAR( # perform t-test if method == "ttest": - #from statlib.stats import ttest_1samp as ttest - ttest = ttest + # from statlib.stats import ttest_1samp as ttest + our_ttest = ttest elif method == "binom": - ttest = binom + our_ttest = binom else: raise NotImplementedError try: - test, pval = ttest(deviant[1], targetAR) - except: + test, pval = our_ttest(deviant[1], targetAR) + except: # noqa: E722 if abs(targetAR - sum(deviant[1]) / len(deviant[1])) > EPSILON: pval = 0 else: @@ -555,8 +557,9 @@ def are_equal_to_targetAR( def are_stationnary(indicators, alpha=0.05, method="anova"): - """test on the stationarity of the observations (block analysis). Done so by - launching an anova on the difference between the two halves of each observations. + """test on the stationarity of the observations (block analysis). Done + so by launching an anova on the difference between the two halves of + each observations. """ if method == "kruskal": @@ -564,7 +567,7 @@ def are_stationnary(indicators, alpha=0.05, method="anova"): else: test = anova - tmp = array(indicators) + tmp = numpy.array(indicators) blocklen = len(indicators[0]) / 2 block = tmp[:, :blocklen] - tmp[:, blocklen:2 * blocklen] if test(*block)['pval'] < alpha: @@ -592,7 +595,7 @@ def are_equal(indicators, targetAR=0.4, alpha=0.05, if varMethod == "bartlett": pval = bartlett(*indicators)[1] elif varMethod == "fligner": - pval = killeen(*indicators)[1] + pval = fligner(*indicators)[1] else: raise NotImplementedError( "variance testing method unknown: %s" % @@ -610,13 +613,6 @@ def are_equal(indicators, targetAR=0.4, alpha=0.05, # p-value < alpha => H0 rejected => result == False tr['result'] = tr['pval'] >= alpha - # if tr['test'] == 'anova': - # try: - # power_test(tr, power=power, alpha=alpha) - # except: - # prdb("power test failed") - # pass - return tr @@ -632,10 +628,10 @@ def find_good_ARs(indicators, targetAR=0.4, alpha=0.05, method="binom"): # perform t-test if method == "ttest": - #from statlib.stats import ttest_1samp as ttest - ttest = ttest + # from statlib.stats import ttest_1samp as ttest + our_ttest = ttest elif method == "binom": - ttest = binom + our_ttest = binom else: raise NotImplementedError @@ -643,12 +639,11 @@ def find_good_ARs(indicators, targetAR=0.4, alpha=0.05, method="binom"): # start from the lowest means and stop when they are ok prdb("starting left") for (i, (mean, pos, ind)) in enumerate(means): - prdb( - "performing t-test on couple %d having average AR %f, position %d" % - (pos, mean, i)) + prdb("performing t-test on couple %d having average AR %f, " + "position %d" % (pos, mean, i)) try: - test, pval = ttest(ind, targetAR) - except: + test, pval = our_ttest(ind, targetAR) + except: # noqa: E722 if abs(targetAR - mean) > EPSILON: pval = 0 else: @@ -664,7 +659,7 @@ def find_good_ARs(indicators, targetAR=0.4, alpha=0.05, method="binom"): for (i, (mean, pos, ind)) in enumerate(reversed(means)): prdb("performing t-test on couple %d having average AR %f, position %d" % (pos, mean, len(means) - 1 - i)) - if ttest(ind, targetAR)[1] < alpha: + if our_ttest(ind, targetAR)[1] < alpha: # means are different isGoodTuple.append((pos, False)) else: @@ -710,7 +705,6 @@ def mean_first_passage_times( """ from numpy import array, zeros - from pprint import pprint, pformat replicanums = array(replicanums_ori)[:, start::subs] N = len(replicanums) tauN = [0] * N @@ -729,7 +723,7 @@ def mean_first_passage_times( return tau0, tauN, None, None, None else: - #prdb('not using average AR') + # prdb('not using average AR') if sys.version_info[0] >= 3: izip = zip else: @@ -747,7 +741,7 @@ def mean_first_passage_times( alreadyN = [False for i in range(N)] timesN = [[] for i in range(N)] - #prdb('looping over replicanums') + # prdb('looping over replicanums') for time, frame in enumerate(izip(*replicanums)): # case of the replica in state 0 if not already0[frame[0]]: @@ -774,8 +768,8 @@ def mean_first_passage_times( storeN[rep, state] = False # store time since this replica left state N timesN[state].append(time - lastN[rep]) - #prdb([replicanums.shape, len(storeN), len(last0)]) - times = [[] for i in range(N)] + # prdb([replicanums.shape, len(storeN), len(last0)]) + # times = [[] for i in range(N)] chose_N = [len(timesN[state]) > len(times0[state]) for state in range(N)] for state in range(N): @@ -838,19 +832,19 @@ def spline_diffusivity(pup, params): robjects.globalenv["hetax"] = robjects.FloatVector(params) robjects.globalenv["pupx"] = robjects.FloatVector(params) robjects.globalenv["pupy"] = robjects.FloatVector(pup) - heta = r('heta <- splinefun(hetax,hetay,method="monoH.FC")') - eff = r('eff <- splinefun(pupx,pupy,method="monoH.FC")') + _ = r('heta <- splinefun(hetax,hetay,method="monoH.FC")') + _ = r('eff <- splinefun(pupx,pupy,method="monoH.FC")') diff = r('diff <- function(x) {-1/(heta(x,deriv=1)*eff(x,deriv=1))}') return lambda x: diff(x)[0] # Misc def compute_indicators(replicanums, subs=1, start=0): - """input: replicanums : a list of N lists of size M, where N is the number of - states and M is the length of the simulation. Each element is an integer, - and corresponds to the label of a replica. - output: an indicator function of exchanges (size (N-1)x(M-1)), 1 if exchange and - 0 if not. + """input: replicanums : a list of N lists of size M, where N is the number + of states and M is the length of the simulation. Each element is an + integer, and corresponds to the label of a replica. + output: an indicator function of exchanges (size (N-1)x(M-1)), 1 if + exchange and 0 if not. """ def exchange(n, m): if replicanums[n][m] == replicanums[n + 1][m + 1] \ @@ -861,8 +855,9 @@ def exchange(n, m): indicators = [] for n in range(len(replicanums) - 1): - indicators.append([exchange(n, m) - for m in range(len(replicanums[n]) - 1)][start::subs]) + indicators.append( + [exchange(n, m) + for m in range(len(replicanums[n]) - 1)][start::subs]) return indicators # Main routines @@ -871,7 +866,7 @@ def exchange(n, m): def update_params_nonergodic(pup, params, write_g=False, num=False): from numpy import linspace - #g = spline(zip(pup,params),0,method='monoH.FC') + # g = spline(zip(pup,params),0,method='monoH.FC') g = linear_interpolation(list(zip(pup, params)), 0) if write_g: d = spline_diffusivity(pup, params) @@ -963,11 +958,11 @@ def tune_params_flux(replicanums, params, subs=1, start=0, alpha=0.05, raise NotImplementedError prdb("computing mean first passage times") - tau0, tauN, chose_N, times0, timesN = mean_first_passage_times(replicanums, - subs=subs, start=start, use_avgAR=use_avgAR) + tau0, tauN, chose_N, times0, timesN = mean_first_passage_times( + replicanums, subs=subs, start=start, use_avgAR=use_avgAR) prdb("average round trip time: %.2f (%d+%d events)" % - (tau0[-1] + tauN[0], len(times0[-1]), len(timesN[0]))) + (tau0[-1] + tauN[0], len(times0[-1]), len(timesN[0]))) prdb("checking if the parameterset needs to be improved") N = len(replicanums) if chose_N is None: @@ -990,7 +985,7 @@ def tune_params_flux(replicanums, params, subs=1, start=0, alpha=0.05, prdb("flux is constant, nothing to do!") min_n = minimum_n(anova_result, alpha) prdb('Try to rerun this test with at least %d more samples.' % - ceil(min_n)) + numpy.ceil(min_n)) return (False, min_n) # the flux is not constant so the parameters need improvement. @@ -1005,10 +1000,11 @@ def tune_params_flux(replicanums, params, subs=1, start=0, alpha=0.05, return (True, params) -def tune_params_ar( - indicators, params, targetAR=0.4, alpha=0.05, immobilePoint=1, CvMethod="skip", badMethod="dumb", goodMethod="dumb", - varMethod="skip", testMethod="anova", meanMethod="binom", - energies=None, temps=None, power=0.8, dumb_scale=0.1): +def tune_params_ar(indicators, params, targetAR=0.4, alpha=0.05, + immobilePoint=1, CvMethod="skip", badMethod="dumb", + goodMethod="dumb", varMethod="skip", testMethod="anova", + meanMethod="binom", energies=None, temps=None, power=0.8, + dumb_scale=0.1): """Tune the replica-exchange parameters and return a new set. Arguments: @@ -1028,12 +1024,13 @@ def tune_params_ar( "skip", other options: "mbar", "spline", "constant") badMethod -- how to correct the (j+1)th parameter if the acceptance ratio between replicas j and j+1 is off the - target value (default: "dumb", options: "step", "sc", "scfull", "nr") + target value (default: "dumb", options: "step", "sc", + "scfull", "nr") goodMethod -- how to update the value of the (j+1)th parameter in the case of a correctly exchanging couple, but if the jth parameter has been modified (default: "dumb",options: "step", - "sc" self-consistent, "scfull" self-consistent using the exact equation, - "nr" newton-raphson solver for the exact equation) + "sc" self-consistent, "scfull" self-consistent using the exact + equation, "nr" newton-raphson solver for the exact equation) dumb_scale -- (0.0-1.0) in the "dumb" method, scale wrong temperature intervals by this amount. (default: 0.1) testMethod -- how to test for the difference of the means, @@ -1046,7 +1043,8 @@ def tune_params_ar( "bartlett" for Bartlett's test, "skip" to pass. energies -- if CvMethod is set to "mbar", the energies of each state as a function of time are used to estimate the heat capacity. - temps -- the temperatures of the simulations, if estimating with "mbar". + temps -- the temperatures of the simulations, if estimating + with "mbar". Return Value: returns a tuple: (bool, params). bool is True if params have @@ -1058,13 +1056,14 @@ def tune_params_ar( prdb("performing ANOVA") anova_result = are_equal(indicators, targetAR, alpha, method=testMethod, varMethod=varMethod, power=power) - if (anova_result['result'] and - are_equal_to_targetAR(indicators, targetAR, alpha, method=meanMethod)): + if (anova_result['result'] + and are_equal_to_targetAR(indicators, targetAR, alpha, + method=meanMethod)): prdb("all means are equal to target AR, nothing to do!") min_n = minimum_n(anova_result, alpha) prdb( 'Try to rerun this test with at least %d more samples.' % - ceil(min_n)) + numpy.ceil(min_n)) return (False, min_n) prdb("some means are different, performing t-tests") @@ -1102,35 +1101,41 @@ def tune_params_ar( # update the current parameter set to match the target AR params = update_params(indicators, params, isGood, targetAR=targetAR, immobilePoint=immobilePoint, Cv=Cv, - badMethod=badMethod, goodMethod=goodMethod, dumb_scale=dumb_scale) + badMethod=badMethod, goodMethod=goodMethod, + dumb_scale=dumb_scale) prdb('Done') return (True, params) + if __name__ == '__main__': - from numpy import * + import numpy replicanums = [] for i in range(1, 8): - replicanums.append(tuple(fromfile('data/replica-indices/%d.rep' % i, - dtype=int, sep='\n'))) + replicanums.append( + tuple(numpy.fromfile('data/replica-indices/%d.rep' % i, + dtype=int, sep='\n'))) prdb("replicanums: %dx%d" % (len(replicanums), len(replicanums[0]))) - params = tuple(fromfile('data/temperatures', sep=' ')) + params = tuple(numpy.fromfile('data/temperatures', sep=' ')) prdb(params) indicators = compute_indicators(replicanums, subs=1, start=0) prdb("indicators: %dx%d" % (len(indicators), len(indicators[0]))) prdb("Exchange rate:") - prdb(array([sum(ind) / float(len(ind)) for ind in indicators])) - array([sum(ind) / float(len(ind)) for ind in - indicators]).tofile('xchgs', sep='\n') - changed, newparams = tune_params(indicators, params, targetAR=0.25, - badMethod="dumb", goodMethod="dumb", CvMethod="skip", testMethod="anova", alpha=0.05) + prdb(numpy.array([sum(ind) / float(len(ind)) for ind in indicators])) + numpy.array([sum(ind) / float(len(ind)) + for ind in indicators]).tofile('xchgs', sep='\n') + changed, newparams = tune_params_ar( + indicators, params, targetAR=0.25, + badMethod="dumb", goodMethod="dumb", CvMethod="skip", + testMethod="anova", alpha=0.05) if not changed: print("Parameter set seems optimal.") else: - if not True in [abs(newparams[i + 1] - newparams[i]) < 1e-3 for i in range(len(newparams) - 1)]: - array(newparams).tofile('data/temperatures', sep=' ') + if True not in [abs(newparams[i + 1] - newparams[i]) < 1e-3 + for i in range(len(newparams) - 1)]: + numpy.array(newparams).tofile('data/temperatures', sep=' ') else: print("PROBLEM IN NEW PARAMETERSET -> not saved") print("params :", params) diff --git a/modules/isd/pyext/src/create_gmm.py b/modules/isd/pyext/src/create_gmm.py index cbe49dfd5d..b3e05780f5 100644 --- a/modules/isd/pyext/src/create_gmm.py +++ b/modules/isd/pyext/src/create_gmm.py @@ -108,7 +108,7 @@ def run(args): print('fitting gmm') if args.force_weight_frac: - force_weight = 1.0/ncenters + force_weight = 1.0 / ncenters else: force_weight = args.force_weight if force_weight != -1: diff --git a/modules/isd/pyext/src/gmm_tools.py b/modules/isd/pyext/src/gmm_tools.py index a091e06fc8..0986d1fdde 100644 --- a/modules/isd/pyext/src/gmm_tools.py +++ b/modules/isd/pyext/src/gmm_tools.py @@ -28,7 +28,7 @@ def decorate_gmm_from_text(in_fn, ps, mdl, transform=None, radius_scale=1.0, with open(in_fn, 'r') as inf: for line in inf: if line[0] != '#': - if ncomp > len(ps)-1: + if ncomp > len(ps) - 1: ps.append(IMP.Particle(mdl, "GMM%d" % next(added_ps))) p = ps[ncomp] fields = line.split('|') @@ -44,10 +44,10 @@ def decorate_gmm_from_text(in_fn, ps, mdl, transform=None, radius_scale=1.0, g = IMP.core.Gaussian(ps[ncomp]) g.set_gaussian(shape) if not IMP.atom.Mass.get_is_setup(p): - IMP.atom.Mass.setup_particle(p, weight*mass_scale) + IMP.atom.Mass.setup_particle(p, weight * mass_scale) else: - IMP.atom.Mass(p).set_mass(weight*mass_scale) - rmax = sqrt(max(g.get_variances()))*radius_scale + IMP.atom.Mass(p).set_mass(weight * mass_scale) + rmax = sqrt(max(g.get_variances())) * radius_scale if not IMP.core.XYZR.get_is_setup(ps[ncomp]): IMP.core.XYZR.setup_particle(ps[ncomp], rmax) else: @@ -82,7 +82,7 @@ def gmm2map(to_draw, voxel_size, bounding_box=None, origin=None, fast=False, if type(to_draw[0]) in (IMP.Particle, IMP.atom.Hierarchy, IMP.core.Hierarchy): ps = to_draw - elif type(to_draw[0]) == IMP.core.Gaussian: + elif type(to_draw[0]) is IMP.core.Gaussian: ps = [g.get_particle() for g in to_draw] else: print('ps must be Particles or Gaussians') @@ -94,7 +94,8 @@ def gmm2map(to_draw, voxel_size, bounding_box=None, origin=None, fast=False, s2 = IMP.algebra.Sphere3D(s.get_center(), s.get_radius() * 3) else: g = IMP.core.Gaussian(ps[0]).get_gaussian() - s2 = IMP.algebra.Sphere3D(g.get_center(), max(g.get_variances())*3) + s2 = IMP.algebra.Sphere3D(g.get_center(), + max(g.get_variances()) * 3) bounding_box = IMP.algebra.get_bounding_box(s2) shapes = [] weights = [] @@ -170,7 +171,7 @@ def draw_points(pts, out_fn, colors = ['0 1 0', '0 0 1', '0 1 1'] for nt, t in enumerate(pts[start:]): if use_colors and nt % 2 == 0: - outf.write('.color %s\n' % colors[nt/2]) + outf.write('.color %s\n' % colors[nt / 2]) pt = trans.get_transformed(IMP.algebra.Vector3D(t)) outf.write('.dotat %.2f %.2f %.2f\n' % (pt[0], pt[1], pt[2])) @@ -291,12 +292,12 @@ def fit_gmm_to_points(points, if force_radii != -1.0: print('warning: radii can no longer be forced, but setting ' 'initial values to ', force_radii) - precisions_init = np.array([[1./force_radii]*3 + precisions_init = np.array([[1. / force_radii] * 3 for i in range(n_components)]) if force_weight != -1.0: print('warning: weights can no longer be forced, but setting ' 'initial values to ', force_weight) - weights_init = np.array([force_weight]*n_components) + weights_init = np.array([force_weight] * n_components) gmm = GaussianMixture(n_components=n_components, max_iter=num_iter, @@ -348,7 +349,7 @@ def fit_gmm_to_points(points, else: covar = covar.tolist() center = list(gmm.means_[ng]) - weight = mass_multiplier*gmm.weights_[ng] + weight = mass_multiplier * gmm.weights_[ng] if ng >= len(ps): ps.append(IMP.Particle(mdl)) shape = IMP.algebra.get_gaussian_from_covariance( @@ -390,9 +391,9 @@ def fit_dirichlet_gmm_to_points( print('using dirichlet prior') if new_sklearn: gmm = BayesianGaussianMixture( - weight_concentration_prior_type='dirichlet_process', - n_components=n_components, max_iter=num_iter, - covariance_type=covariance_type) + weight_concentration_prior_type='dirichlet_process', + n_components=n_components, max_iter=num_iter, + covariance_type=covariance_type) else: gmm = DPGMM(n_components=n_components, n_iter=num_iter, covariance_type=covariance_type) @@ -410,7 +411,7 @@ def fit_dirichlet_gmm_to_points( else: covar = covar.tolist() center = list(gmm.means_[ng]) - weight = mass_multiplier*gmm.weights_[ng] + weight = mass_multiplier * gmm.weights_[ng] if ng >= len(ps): ps.append(IMP.Particle(mdl)) shape = IMP.algebra.get_gaussian_from_covariance( diff --git a/modules/isd/pyext/src/shared_functions.py b/modules/isd/pyext/src/shared_functions.py index fcad00393d..b322517da8 100644 --- a/modules/isd/pyext/src/shared_functions.py +++ b/modules/isd/pyext/src/shared_functions.py @@ -377,7 +377,7 @@ def find_atom(self, atom, prot): self.__memoized = {prot: {}} try: return self.__memoized[prot][atom] - except: + except KeyError: pass try: sel = IMP.atom.Selection(hierarchy=prot, diff --git a/modules/isd/pyext/src/utils.py b/modules/isd/pyext/src/utils.py index 0efe1aa74f..2b0b88cfec 100644 --- a/modules/isd/pyext/src/utils.py +++ b/modules/isd/pyext/src/utils.py @@ -27,12 +27,6 @@ import os import os.path import socket - - -try: - from queue import Queue # python3 -except ImportError: - from Queue import Queue # python2 from threading import Thread debug = False @@ -236,44 +230,6 @@ def is_full(self): __repr__ = __str__ -class SortedQueue(Queue): - - def sort(self): - - from numpy.oldnumeric import array - from Isd.misc.mathutils import average - - self.queue.sort(lambda a, b: cmp(average(a.time), average(b.time))) - - self.times = array([average(x.time) for x in self.queue]) - - def _put(self, item): - - Queue._put(self, item) - self.sort() - - def _get(self): - - from numpy.oldnumeric import power - from Isd.misc.mathutils import draw_dirichlet, rescale_uniform - - # compute "probabilities" - - p = 1. - rescale_uniform(self.times) - p = power(p, 2.) - - index = draw_dirichlet(p) - - val = self.queue[index] - - self.queue = self.queue[:index] + self.queue[index + 1:] - - if len(self.queue): - self.sort() - - return val - - def load_pdb(filename): import os diff --git a/modules/isd/src/AmbiguousNOERestraint.cpp b/modules/isd/src/AmbiguousNOERestraint.cpp index 4260542585..2506697770 100644 --- a/modules/isd/src/AmbiguousNOERestraint.cpp +++ b/modules/isd/src/AmbiguousNOERestraint.cpp @@ -42,8 +42,8 @@ double AmbiguousNOERestraint::unprotected_evaluate(DerivativeAccumulator *accum) double vol = 0; Floats vols; IMP_CONTAINER_FOREACH(PairContainer, pc_, { - core::XYZ d0(get_model(), _1[0]); - core::XYZ d1(get_model(), _1[1]); + core::XYZ d0(get_model(), std::get<0>(_1)); + core::XYZ d1(get_model(), std::get<1>(_1)); algebra::Vector3D c0 = d0.get_coordinates(); algebra::Vector3D c1 = d1.get_coordinates(); // will raise an error if c0 == c1 @@ -75,8 +75,8 @@ double AmbiguousNOERestraint::unprotected_evaluate(DerivativeAccumulator *accum) /* derivative for coordinates */ double factor = -6 / vol; IMP_CONTAINER_FOREACH(PairContainer, pc_, { - core::XYZ d0(get_model(), _1[0]); - core::XYZ d1(get_model(), _1[1]); + core::XYZ d0(get_model(), std::get<0>(_1)); + core::XYZ d1(get_model(), std::get<1>(_1)); algebra::Vector3D c0 = d0.get_coordinates(); algebra::Vector3D c1 = d1.get_coordinates(); algebra::Vector3D deriv = DFM * factor * (c0 - c1) * vols[_2] / diff --git a/modules/isd/src/AtomicCrossLinkMSRestraint.cpp b/modules/isd/src/AtomicCrossLinkMSRestraint.cpp index 0e45c45269..7857b9151a 100644 --- a/modules/isd/src/AtomicCrossLinkMSRestraint.cpp +++ b/modules/isd/src/AtomicCrossLinkMSRestraint.cpp @@ -57,11 +57,11 @@ Float AtomicCrossLinkMSRestraint::evaluate_for_contributions(Ints c, // loop over the contributions and score things for (Ints::const_iterator nit=c.begin();nit!=c.end();++nit){ int n = *nit; - core::XYZ d0(get_model(),ppis_[n][0]); - core::XYZ d1(get_model(),ppis_[n][1]); + core::XYZ d0(get_model(), std::get<0>(ppis_[n])); + core::XYZ d1(get_model(), std::get<1>(ppis_[n])); - Float s0 = isd::Scale(get_model(),sigmass_[n][0]).get_scale(); - Float s1 = isd::Scale(get_model(),sigmass_[n][1]).get_scale(); + Float s0 = isd::Scale(get_model(), std::get<0>(sigmass_[n])).get_scale(); + Float s1 = isd::Scale(get_model(), std::get<1>(sigmass_[n])).get_scale(); Float sig = std::sqrt(s0*s0+s1*s1); Float sig2 = sig*sig; @@ -103,8 +103,8 @@ Float AtomicCrossLinkMSRestraint::evaluate_for_contributions(Ints c, if (accum){ for (Ints::const_iterator nit=c.begin();nit!=c.end();++nit){ int n = *nit; - core::XYZ d0(get_model(),ppis_[n][0]); - core::XYZ d1(get_model(),ppis_[n][1]); + core::XYZ d0(get_model(), std::get<0>(ppis_[n])); + core::XYZ d1(get_model(), std::get<1>(ppis_[n])); algebra::Vector3D v = d1.get_coordinates() - d0.get_coordinates(); Float dist = v.get_magnitude(); Float score_deriv = - (2*psi-1) * score_accum/tmp_scores[n] * tmp_derivs[n]; @@ -132,10 +132,10 @@ double AtomicCrossLinkMSRestraint::unprotected_evaluate(DerivativeAccumulator *a ModelObjectsTemp AtomicCrossLinkMSRestraint::do_get_inputs() const { ParticlesTemp ret; for (unsigned int k = 0; k < get_number_of_contributions(); ++k) { - ret.push_back(get_model()->get_particle(ppis_[k][0])); - ret.push_back(get_model()->get_particle(ppis_[k][1])); - ret.push_back(get_model()->get_particle(sigmass_[k][0])); - ret.push_back(get_model()->get_particle(sigmass_[k][1])); + ret.push_back(get_model()->get_particle(std::get<0>(ppis_[k]))); + ret.push_back(get_model()->get_particle(std::get<1>(ppis_[k]))); + ret.push_back(get_model()->get_particle(std::get<0>(sigmass_[k]))); + ret.push_back(get_model()->get_particle(std::get<1>(sigmass_[k]))); } ret.push_back(get_model()->get_particle(psi_)); return ret; diff --git a/modules/isd/src/CrossLinkMSRestraint.cpp b/modules/isd/src/CrossLinkMSRestraint.cpp index 1c2a9dc198..f33b31408f 100644 --- a/modules/isd/src/CrossLinkMSRestraint.cpp +++ b/modules/isd/src/CrossLinkMSRestraint.cpp @@ -52,9 +52,10 @@ double CrossLinkMSRestraint::get_probability() const { for (unsigned int k = 0; k < get_number_of_contributions(); ++k) { IMP::ParticleIndexPair ppi = ppis_[k]; - double dist ; - if (ppi[0] != ppi[1]) { - core::XYZ d0(get_model(), ppi[0]), d1(get_model(), ppi[1]); + double dist; + if (std::get<0>(ppi) != std::get<1>(ppi)) { + core::XYZ d0(get_model(), std::get<0>(ppi)); + core::XYZ d1(get_model(), std::get<1>(ppi)); dist = (d0.get_coordinates() - d1.get_coordinates()).get_magnitude(); } else { @@ -62,15 +63,17 @@ double CrossLinkMSRestraint::get_probability() const { // get the distance as if the residue positions were randomly // taken from within the sphere representing the domain // Lund O, Protein Eng. 1997 Nov;10(11):1241-8. - double R=core::XYZR(get_model(), ppi[0]).get_radius(); + double R=core::XYZR(get_model(), std::get<0>(ppi)).get_radius(); dist=36.0/35.0*R; } if (dist<0.0001){dist=0.0001;} IMP::ParticleIndexPair sigmas = sigmass_[k]; IMP::ParticleIndex psii = psis_[k]; double psi = isd::Scale(get_model(), psii).get_scale(); - double sigmai = isd::Scale(get_model(), sigmas[0]).get_scale(); - double sigmaj = isd::Scale(get_model(), sigmas[1]).get_scale(); + double sigmai = isd::Scale(get_model(), + std::get<0>(sigmas)).get_scale(); + double sigmaj = isd::Scale(get_model(), + std::get<1>(sigmas)).get_scale(); double voli = 4.0 / 3.0 * IMP::PI * sigmai * sigmai * sigmai; double volj = 4.0 / 3.0 * IMP::PI * sigmaj * sigmaj * sigmaj; @@ -162,17 +165,17 @@ double CrossLinkMSRestraint::unprotected_evaluate(DerivativeAccumulator *accum) ModelObjectsTemp CrossLinkMSRestraint::do_get_inputs() const { ParticlesTemp ret; for (unsigned int k = 0; k < get_number_of_contributions(); ++k) { - if (ppis_[k][0] == ppis_[k][1]) { - ret.push_back(get_model()->get_particle(ppis_[k][0])); + if (std::get<0>(ppis_[k]) == std::get<1>(ppis_[k])) { + ret.push_back(get_model()->get_particle(std::get<0>(ppis_[k]))); } else { - ret.push_back(get_model()->get_particle(ppis_[k][0])); - ret.push_back(get_model()->get_particle(ppis_[k][1])); + ret.push_back(get_model()->get_particle(std::get<0>(ppis_[k]))); + ret.push_back(get_model()->get_particle(std::get<1>(ppis_[k]))); } - if ( sigmass_[k][0] == sigmass_[k][1]) { - ret.push_back(get_model()->get_particle(sigmass_[k][0])); + if (std::get<0>(sigmass_[k]) == std::get<1>(sigmass_[k])) { + ret.push_back(get_model()->get_particle(std::get<0>(sigmass_[k]))); } else { - ret.push_back(get_model()->get_particle(sigmass_[k][0])); - ret.push_back(get_model()->get_particle(sigmass_[k][1])); + ret.push_back(get_model()->get_particle(std::get<0>(sigmass_[k]))); + ret.push_back(get_model()->get_particle(std::get<1>(sigmass_[k]))); } ret.push_back(get_model()->get_particle(psis_[k])); } @@ -191,8 +194,8 @@ RestraintInfo *CrossLinkMSRestraint::get_static_info() const { ParticleIndexes ps; for (const auto &sigma : sigmass_) { - ps.push_back(sigma[0]); - ps.push_back(sigma[1]); + ps.push_back(std::get<0>(sigma)); + ps.push_back(std::get<1>(sigma)); } ri->add_particle_indexes("sigmas", ps); @@ -200,8 +203,8 @@ RestraintInfo *CrossLinkMSRestraint::get_static_info() const { ps.clear(); for (const auto &ppi : ppis_) { - ps.push_back(ppi[0]); - ps.push_back(ppi[1]); + ps.push_back(std::get<0>(ppi)); + ps.push_back(std::get<1>(ppi)); } ri->add_particle_indexes("endpoints", ps); diff --git a/modules/isd/src/GaussianAnchorEMRestraint.cpp b/modules/isd/src/GaussianAnchorEMRestraint.cpp index 1ee09e846a..7e349160f6 100644 --- a/modules/isd/src/GaussianAnchorEMRestraint.cpp +++ b/modules/isd/src/GaussianAnchorEMRestraint.cpp @@ -26,11 +26,11 @@ GaussianAnchorEMRestraint::unprotected_evaluate(DerivativeAccumulator *accum) typedef std::map::const_iterator ppiter; IMP_CONTAINER_FOREACH(container::CloseBipartitePairContainer, md_container_,{ - core::XYZ d1(get_model(),_1[0]); - core::XYZ d2(get_model(),_1[1]); + core::XYZ d1(get_model(), std::get<0>(_1)); + core::XYZ d2(get_model(), std::get<1>(_1)); Float dist=core::get_distance(d1,d2); - ParticlePair pp(get_model()->get_particle(_1[0]), - get_model()->get_particle(_1[1])); + ParticlePair pp(get_model()->get_particle(std::get<0>(_1)), + get_model()->get_particle(std::get<1>(_1))); ppiter iter_pre=md_prefactors_.find(pp); ppiter iter_prod=md_prods_.find(pp); md_score+=iter_pre->second*calc_score(dist,iter_prod->second); diff --git a/modules/isd/src/GaussianEMRestraint.cpp b/modules/isd/src/GaussianEMRestraint.cpp index 8833758bce..a50bd1aba3 100644 --- a/modules/isd/src/GaussianEMRestraint.cpp +++ b/modules/isd/src/GaussianEMRestraint.cpp @@ -173,8 +173,10 @@ double GaussianEMRestraint::unprotected_evaluate(DerivativeAccumulator *accum) mm_score = kahan_sum(mm_score,2*score); if (accum) { //multiply by 2 because... - derivs_mm[_1[0]] = kahan_vector_sum(derivs_mm[_1[0]],-2.0*deriv); - derivs_mm[_1[1]] = kahan_vector_sum(derivs_mm[_1[1]],2.0*deriv); + derivs_mm[std::get<0>(_1)] = kahan_vector_sum( + derivs_mm[std::get<0>(_1)], -2.0*deriv); + derivs_mm[std::get<1>(_1)] = kahan_vector_sum( + derivs_mm[std::get<1>(_1)], 2.0*deriv); } }); @@ -182,9 +184,10 @@ double GaussianEMRestraint::unprotected_evaluate(DerivativeAccumulator *accum) md_container_,{ Float score = score_gaussian_overlap(get_model(),_1,&deriv); md_score = kahan_sum(md_score,score); - if (local_) local_dens.insert(_1[1]); + if (local_) local_dens.insert(std::get<1>(_1)); if (accum) { - derivs_md[_1[0]] = kahan_vector_sum(derivs_md[_1[0]],-deriv); + derivs_md[std::get<0>(_1)] = kahan_vector_sum( + derivs_md[std::get<0>(_1)], -deriv); } }); diff --git a/modules/isd/src/LognormalAmbiguousRestraint.cpp b/modules/isd/src/LognormalAmbiguousRestraint.cpp index 168403953e..476d712a55 100644 --- a/modules/isd/src/LognormalAmbiguousRestraint.cpp +++ b/modules/isd/src/LognormalAmbiguousRestraint.cpp @@ -16,7 +16,7 @@ #include #include #include -#include +#include IMPISD_BEGIN_NAMESPACE @@ -58,7 +58,7 @@ void LognormalAmbiguousRestraint::draw_k_from_posterior(double kt) pi.push_back(xi[k]/sumi); } - boost::uniform_real<> rand(0.0,1.0); + boost::random::uniform_real_distribution<> rand(0.0,1.0); double a =rand(IMP::random_number_generator); sumi=0.0; diff --git a/modules/isd/src/MarginalHBondRestraint.cpp b/modules/isd/src/MarginalHBondRestraint.cpp index fcc83bf591..74d1be10a8 100644 --- a/modules/isd/src/MarginalHBondRestraint.cpp +++ b/modules/isd/src/MarginalHBondRestraint.cpp @@ -52,8 +52,8 @@ double MarginalHBondRestraint::unprotected_evaluate( double mean = 0; Vector dists; IMP_CONTAINER_FOREACH(PairContainer, contribs_[i], { - core::XYZ d0(get_model(), _1[0]); - core::XYZ d1(get_model(), _1[1]); + core::XYZ d0(get_model(), std::get<0>(_1)); + core::XYZ d1(get_model(), std::get<1>(_1)); double dist = (d1.get_coordinates() - d0.get_coordinates()).get_squared_magnitude(); dist = 1.0 / cube(dist); @@ -76,8 +76,8 @@ double MarginalHBondRestraint::unprotected_evaluate( pow(meandists[i], 1. / 6) * 6; IMP_CONTAINER_FOREACH(PairContainer, contribs_[i], { double deriv_pair = pow(alldists[i][_2] / meandists[i], -7. / 6); - core::XYZ d0(get_model(), _1[0]); - core::XYZ d1(get_model(), _1[1]); + core::XYZ d0(get_model(), std::get<0>(_1)); + core::XYZ d1(get_model(), std::get<1>(_1)); algebra::Vector3D dev = (d1.get_coordinates() - d0.get_coordinates()); double dist = dev.get_magnitude(); algebra::Vector3D deriv = deriv_mean * deriv_pair * dev / dist; diff --git a/modules/isd/src/MarginalNOERestraint.cpp b/modules/isd/src/MarginalNOERestraint.cpp index ea3d20d621..904badb891 100644 --- a/modules/isd/src/MarginalNOERestraint.cpp +++ b/modules/isd/src/MarginalNOERestraint.cpp @@ -47,8 +47,8 @@ double MarginalNOERestraint::unprotected_evaluate(DerivativeAccumulator *accum) double mean = 0; Vector dists; IMP_CONTAINER_FOREACH(PairContainer, contribs_[i], { - core::XYZ d0(get_model(), _1[0]); - core::XYZ d1(get_model(), _1[1]); + core::XYZ d0(get_model(), std::get<0>(_1)); + core::XYZ d1(get_model(), std::get<1>(_1)); double dist = (d1.get_coordinates() - d0.get_coordinates()).get_squared_magnitude(); dist = 1.0 / cube(dist); @@ -79,8 +79,8 @@ double MarginalNOERestraint::unprotected_evaluate(DerivativeAccumulator *accum) for (int i = 0; i < ncontribs; ++i) { double deriv_mean = logterms[i] * 6 * (ncontribs - 1) / SS; IMP_CONTAINER_FOREACH(PairContainer, contribs_[i], { - core::XYZ d0(get_model(), _1[0]); - core::XYZ d1(get_model(), _1[1]); + core::XYZ d0(get_model(), std::get<0>(_1)); + core::XYZ d1(get_model(), std::get<1>(_1)); double deriv_pair = alldists[i][_2] / meandists[i]; if (std::abs(deriv_pair) > 1e2) { std::cout << "NOE derivative warning : deriv mean " << deriv_mean diff --git a/modules/isd/src/RepulsiveDistancePairScore.cpp b/modules/isd/src/RepulsiveDistancePairScore.cpp index 81f46827d3..5e4b6ec087 100644 --- a/modules/isd/src/RepulsiveDistancePairScore.cpp +++ b/modules/isd/src/RepulsiveDistancePairScore.cpp @@ -17,7 +17,7 @@ RepulsiveDistancePairScore::RepulsiveDistancePairScore(double d0, double k) double RepulsiveDistancePairScore::evaluate_index( Model *m, const ParticleIndexPair &p, DerivativeAccumulator *da) const { - core::XYZR d0(m, p[0]), d1(m, p[1]); + core::XYZR d0(m, std::get<0>(p)), d1(m, std::get<1>(p)); algebra::VectorD<3> delta; for (int i = 0; i < 3; ++i) { delta[i] = d0.get_coordinate(i) - d1.get_coordinate(i); diff --git a/modules/isd/src/ResidueProteinProximityRestraint.cpp b/modules/isd/src/ResidueProteinProximityRestraint.cpp index dea56b1535..1cbdef6048 100644 --- a/modules/isd/src/ResidueProteinProximityRestraint.cpp +++ b/modules/isd/src/ResidueProteinProximityRestraint.cpp @@ -89,8 +89,8 @@ double ResidueProteinProximityRestraint::evaluate_for_contributions(Ints c) Vector dists_; IMP_CONTAINER_FOREACH(PairContainer, contribs_[n], { //! Distances - core::XYZ d0(get_model(), _1[0]); - core::XYZ d1(get_model(), _1[1]); + core::XYZ d0(get_model(), std::get<0>(_1)); + core::XYZ d1(get_model(), std::get<1>(_1)); double dist = get_distance(d0,d1); dists_.push_back(dist); diff --git a/modules/isd/src/WeightMover.cpp b/modules/isd/src/WeightMover.cpp index 34a6279a8c..23d7445e41 100644 --- a/modules/isd/src/WeightMover.cpp +++ b/modules/isd/src/WeightMover.cpp @@ -9,7 +9,6 @@ #include #include #include -#include IMPISD_BEGIN_NAMESPACE diff --git a/modules/isd/test/test_MultivariateFNormalSufficient.cpp b/modules/isd/test/test_MultivariateFNormalSufficient.cpp index f24e155f3c..e88c487c0e 100644 --- a/modules/isd/test/test_MultivariateFNormalSufficient.cpp +++ b/modules/isd/test/test_MultivariateFNormalSufficient.cpp @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #include @@ -20,7 +20,7 @@ namespace { using namespace IMP::isd; -boost::uniform_real<> uniform(0, 1); +boost::random::uniform_real_distribution<> uniform(0, 1); #define rand() uniform(IMP::random_number_generator) diff --git a/modules/kernel/include/Array.h b/modules/kernel/include/Array.h index 67901bdbe0..e1ce056d82 100644 --- a/modules/kernel/include/Array.h +++ b/modules/kernel/include/Array.h @@ -2,7 +2,7 @@ * \file IMP/Array.h * \brief Classes to handle static sized arrays of things. * - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-2024 IMP Inventors. All rights reserved. * */ @@ -26,6 +26,11 @@ IMPKERNEL_BEGIN_NAMESPACE /** Only the constructor with the correct number of arguments for the dimensionality can be used. + Elements can be accessed using [] notation or std::get. The latter + is more efficient when the index is a constant, since the bounds check + can be done at compile time rather than runtime, e.g. x = std::get<0>(array) + is more efficient than x = array[0]. + \note These are mapped to/from Python tuples, so there is no need to use types that are typedefs of this on the Python side. @@ -160,4 +165,19 @@ inline std::size_t hash_value(const Array &t) { IMPKERNEL_END_NAMESPACE +/* Overload std::get to work on IMP::Array similarly to std::array */ +namespace std { + template + const Data& get(const IMP::Array &arr) { + static_assert(I < D, "array index is within bounds"); + return *(arr.begin() + I); + } + + template + Data& get(IMP::Array &arr) { + static_assert(I < D, "array index is within bounds"); + return *(arr.begin() + I); + } +} + #endif /* IMPKERNEL_ARRAY_H */ diff --git a/modules/kernel/include/Decorator.h b/modules/kernel/include/Decorator.h index f6e688c295..ced696150d 100644 --- a/modules/kernel/include/Decorator.h +++ b/modules/kernel/include/Decorator.h @@ -225,7 +225,7 @@ class IMPKERNELEXPORT Decorator : public Value { IMP_HASHABLE_INLINE(Decorator, return boost::hash_value(get_particle());); #if !defined(IMP_DOXYGEN) && !defined(SWIG) - typedef boost::false_type DecoratorHasTraits; + typedef std::false_type DecoratorHasTraits; #endif }; diff --git a/modules/kernel/include/Index.h b/modules/kernel/include/Index.h index 99c600e2a8..f4502acd2e 100644 --- a/modules/kernel/include/Index.h +++ b/modules/kernel/include/Index.h @@ -24,7 +24,10 @@ IMPKERNEL_BEGIN_NAMESPACE around to help avoid bugs caused by mixing them up. Care has been taken so that it can be replaced by an integer everywhere, if needed. */ template -class Index : public Value { +class Index { + // We should really derive from Value, but this seems to confuse gcc at least + // into padding the struct and wasting memory, + // e.g. sizeof(ParticleIndexPair) should be 8 (2*int) but is actually 12. int i_; friend class cereal::access; @@ -82,9 +85,10 @@ namespace { run-length encoding, as these vectors are often sparse. For objects that do not implement operator== (e.g. VectorD, SphereD), a custom comparison functor should be provided. */ -template > -class IndexVector : public Vector { - typedef Vector P; +template , + class Equal = std::equal_to > +class IndexVector : public Vector { + typedef Vector P; template void write_no_compression( Archive &ar, typename P::const_iterator start, diff --git a/modules/kernel/include/Vector.h b/modules/kernel/include/Vector.h index 1d26216061..3ff566ac83 100644 --- a/modules/kernel/include/Vector.h +++ b/modules/kernel/include/Vector.h @@ -28,6 +28,14 @@ #include #endif +// Use unified memory to back vectors when building with CUDA +#ifdef IMP_KERNEL_CUDA_LIB +# include +# define IMP_VECTOR_ALLOCATOR internal::UnifiedAllocator +#else +# define IMP_VECTOR_ALLOCATOR std::allocator +#endif + IMPKERNEL_BEGIN_NAMESPACE //! A more \imp-like version of the \c std::vector. @@ -38,22 +46,22 @@ IMPKERNEL_BEGIN_NAMESPACE - implicit conversion when the contents are implicitly convertible - bounds checking in debug mode */ -template +template > class Vector : public Value #if !defined(IMP_DOXYGEN) && !defined(SWIG) #if IMP_COMPILER_HAS_DEBUG_VECTOR &&IMP_HAS_CHECKS >= IMP_INTERNAL , - public __gnu_debug::vector + public __gnu_debug::vector #else , - public std::vector + public std::vector #endif #endif { #if IMP_COMPILER_HAS_DEBUG_VECTOR &&IMP_HAS_CHECKS >= IMP_INTERNAL - typedef __gnu_debug::vector V; + typedef __gnu_debug::vector V; #else - typedef std::vector V; + typedef std::vector V; #endif friend class cereal::access; @@ -138,13 +146,14 @@ class Vector : public Value }; #if !defined(SWIG) && !defined(IMP_DOXYGEN) -template -void swap(Vector &a, Vector &b) { +template +void swap(Vector &a, Vector &b) { a.swap(b); } -template -inline Vector operator+(Vector ret, const Vector &o) { +template +inline Vector operator+(Vector ret, + const Vector &o) { ret.insert(ret.end(), o.begin(), o.end()); return ret; } @@ -152,8 +161,8 @@ inline Vector operator+(Vector ret, const Vector &o) { #endif #if IMP_COMPILER_HAS_DEBUG_VECTOR &&IMP_HAS_CHECKS >= IMP_INTERNAL -template -inline std::size_t hash_value(const __gnu_debug::vector &t) { +template +inline std::size_t hash_value(const __gnu_debug::vector &t) { return boost::hash_range(t.begin(), t.end()); } #endif @@ -161,8 +170,8 @@ inline std::size_t hash_value(const __gnu_debug::vector &t) { IMPKERNEL_END_NAMESPACE namespace cereal { - template - struct specialize, + template + struct specialize, cereal::specialization::member_load_save> {}; } diff --git a/modules/kernel/include/base_types.h b/modules/kernel/include/base_types.h index 4d09280569..d2cb9ce1ab 100644 --- a/modules/kernel/include/base_types.h +++ b/modules/kernel/include/base_types.h @@ -176,15 +176,16 @@ class ParticleIndexTag {}; Model::get_particle(). */ typedef Index ParticleIndex; -typedef Vector ParticleIndexes; +typedef Vector> ParticleIndexes; typedef Array<2, ParticleIndex> ParticleIndexPair; typedef Array<3, ParticleIndex> ParticleIndexTriplet; typedef Array<4, ParticleIndex> ParticleIndexQuad; -IMP_VALUES(ParticleIndexPair, ParticleIndexPairs); -IMP_VALUES(ParticleIndexTriplet, ParticleIndexTriplets); -IMP_VALUES(ParticleIndexQuad, ParticleIndexQuads); +IMP_VALUES_UNIFIED(ParticleIndexPair, ParticleIndexPairs); +IMP_VALUES_UNIFIED(ParticleIndexTriplet, ParticleIndexTriplets); +IMP_VALUES_UNIFIED(ParticleIndexQuad, ParticleIndexQuads); IMPKERNEL_END_NAMESPACE diff --git a/modules/kernel/include/cache.h b/modules/kernel/include/cache.h index fceeab35f3..736417fb02 100644 --- a/modules/kernel/include/cache.h +++ b/modules/kernel/include/cache.h @@ -96,8 +96,8 @@ class SparseSymmetricPairMemoizer { Generator gen_; Checker checker_; - static Key get_0(Entry e) { return e[0]; } - static Key get_1(Entry e) { return e[1]; } + static Key get_0(Entry e) { return std::get<0>(e); } + static Key get_1(Entry e) { return std::get<1>(e); } // The This:: is needed to make certain gcc versions (4.7) happier typedef boost::multi_index::global_fun P0Member; diff --git a/modules/kernel/include/decorator_macros.h b/modules/kernel/include/decorator_macros.h index 4da707103e..8e89cbd1b4 100644 --- a/modules/kernel/include/decorator_macros.h +++ b/modules/kernel/include/decorator_macros.h @@ -12,6 +12,7 @@ #include "particle_index.h" #include "Particle.h" #include "Decorator.h" +#include #include #include #include @@ -73,7 +74,7 @@ comparison macros*/ IMP_NO_DOXYGEN( \ typedef Parent ParentDecorator); \ - IMP_NO_DOXYGEN(typedef boost::true_type DecoratorHasTraits); \ + IMP_NO_DOXYGEN(typedef std::true_type DecoratorHasTraits); \ Name() : Parent() {} \ Name(::IMP::Model *m, ::IMP::ParticleIndex id, \ const TraitsType &tr = default_traits) \ @@ -560,7 +561,7 @@ static bool get_is_setup(Model *m, ParticleIndex pi) { \ return m->get_has_attribute(get_constraint_key(), pi); \ } \ - IMP_NO_DOXYGEN(typedef boost::false_type DecoratorHasTraits); \ + IMP_NO_DOXYGEN(typedef std::false_type DecoratorHasTraits); \ \ private: \ /* hide set methods*/ \ diff --git a/modules/kernel/include/internal/AccumulatorScoreModifier.h b/modules/kernel/include/internal/AccumulatorScoreModifier.h index 954fd38367..4338f35fae 100644 --- a/modules/kernel/include/internal/AccumulatorScoreModifier.h +++ b/modules/kernel/include/internal/AccumulatorScoreModifier.h @@ -20,18 +20,19 @@ #include #include #include -#include +#include #include IMPKERNEL_BEGIN_INTERNAL_NAMESPACE //! Get indexes of container contents that involve any of the given particles template inline -typename boost::disable_if, - std::vector >::type +typename std::enable_if::value, + std::vector >::type get_container_indexes( Model *m, - const Vector &contents, + const Vector> &contents, const ParticleIndexes &ps) { boost::dynamic_bitset<> bps(m->get_particles_size()); for (ParticleIndex pi : ps) { @@ -40,7 +41,8 @@ get_container_indexes( std::vector ret; unsigned i = 0; - for (typename Vector::const_iterator + for (typename Vector>::const_iterator cit = contents.begin(); cit != contents.end(); ++cit, ++i) { for (typename Score::IndexArgument::const_iterator pit = cit->begin(); pit != cit->end(); ++pit) { @@ -56,11 +58,12 @@ get_container_indexes( // Specialization for SingletonContainer, where each content entry is // a ParticleIndex, not a fixed-size array of indexes template inline -typename boost::enable_if, - std::vector >::type +typename std::enable_if::value, + std::vector >::type get_container_indexes( Model *m, - const Vector &contents, + const Vector> &contents, const ParticleIndexes &ps) { boost::dynamic_bitset<> bps(m->get_particles_size()); for (ParticleIndex pi : ps) { @@ -69,8 +72,9 @@ get_container_indexes( std::vector ret; unsigned i = 0; - for (Vector::const_iterator cit = contents.begin(); - cit != contents.end(); ++cit, ++i) { + for (typename Vector>::const_iterator + cit = contents.begin(); cit != contents.end(); ++cit, ++i) { if (bps[cit->get_index()]) { ret.push_back(i); } @@ -179,7 +183,8 @@ class AccumulatorScoreModifier : public Score::Modifier { } virtual void apply_indexes( - Model *m, const Vector &a, + Model *m, const Vector> &a, unsigned int lower_bound, unsigned int upper_bound) const override { double score = ss_->evaluate_indexes(m, a, sa_.get_derivative_accumulator(), lower_bound, upper_bound); @@ -189,7 +194,8 @@ class AccumulatorScoreModifier : public Score::Modifier { } virtual void apply_indexes_moved( - Model *m, const Vector &a, + Model *m, const Vector> &a, unsigned int lower_bound, unsigned int upper_bound, const ParticleIndexes &moved_pis, const ParticleIndexes &reset_pis) const override { diff --git a/modules/kernel/include/internal/AttributeTable.h b/modules/kernel/include/internal/AttributeTable.h index ba4d66e95f..f6ebdc32cf 100644 --- a/modules/kernel/include/internal/AttributeTable.h +++ b/modules/kernel/include/internal/AttributeTable.h @@ -77,9 +77,9 @@ struct DefaultTraits { static ContainerDataAccess access_container_data(Container& c) { return c.data(); } }; -template +template > struct ArrayTraits { - typedef IMP::Vector Value; + typedef IMP::Vector Value; typedef IndexVector Container; typedef const Value &PassValue; typedef K Key; @@ -131,7 +131,8 @@ struct ParticleAttributeTableTraits }; struct ParticlesAttributeTableTraits - : public ArrayTraits {}; + : public ArrayTraits> {}; struct ObjectAttributeTableTraits { typedef Object *Value; diff --git a/modules/kernel/include/internal/Flag.h b/modules/kernel/include/internal/Flag.h index 48a9416a92..76ccbdc6af 100644 --- a/modules/kernel/include/internal/Flag.h +++ b/modules/kernel/include/internal/Flag.h @@ -11,12 +11,8 @@ #include #include -#include -#include +#include #include -#include -#include -#include #include #include @@ -47,11 +43,9 @@ class FlagImpl : public boost::noncopyable { }; template -class FlagImpl >, - boost::mpl::not_ > > >:: - type> : public T, - public boost::noncopyable { +class FlagImpl::value && !std::is_fundamental::value>::type> + : public T, public boost::noncopyable { public: FlagImpl(boost::program_options::options_description &opt, std::string name, std::string description, T default_value) @@ -64,9 +58,9 @@ class FlagImpl -class FlagImpl, boost::is_enum > >:: - type> : public boost::noncopyable { +class FlagImpl::value || std::is_enum::value>::type> + : public boost::noncopyable { T v_; public: diff --git a/modules/kernel/include/internal/KeyVector.h b/modules/kernel/include/internal/KeyVector.h index 3f6613e4d2..3a0bd66d29 100644 --- a/modules/kernel/include/internal/KeyVector.h +++ b/modules/kernel/include/internal/KeyVector.h @@ -2,7 +2,7 @@ * \file internal/KeyVector.h * \brief A class for storing data indexed by a Key. * - * Copyright 2007-2023 IMP Inventors. All rights reserved. + * Copyright 2007-2024 IMP Inventors. All rights reserved. * */ @@ -24,13 +24,13 @@ IMPKERNEL_BEGIN_INTERNAL_NAMESPACE indexes). Instead, we must include the Key string names in the serialized data. */ -template +template > class KeyVector : public Vector { #if IMP_COMPILER_HAS_DEBUG_VECTOR &&IMP_HAS_CHECKS >= IMP_INTERNAL - typedef __gnu_debug::vector V; + typedef __gnu_debug::vector V; #else - typedef std::vector V; + typedef std::vector V; #endif friend class cereal::access; diff --git a/modules/kernel/include/internal/PointerBase.h b/modules/kernel/include/internal/PointerBase.h index d0b0ff05d2..1933ffdfe0 100644 --- a/modules/kernel/include/internal/PointerBase.h +++ b/modules/kernel/include/internal/PointerBase.h @@ -22,7 +22,6 @@ #if defined(BOOST_NO_CXX11_NULLPTR) || defined(BOOST_NO_NULLPTR) #include -#include #include #include #endif @@ -106,23 +105,21 @@ struct GetPointer { #if(defined(BOOST_NO_CXX11_NULLPTR) || defined(BOOST_NO_NULLPTR)) && \ !defined(nullptr) template -struct GetPointer::value>, - boost::mpl::not_::value> > >::type> { +struct GetPointer::value && !std::is_pointer::value>::type> { static O* get_pointer(const OO& o) { return o; } static const O* get_const_pointer(const OO& o) { return o; } }; template struct GetPointer::value>::type> { + typename std::enable_if::value>::type> { static O* get_pointer(OO* o) { return o; } static const O* get_const_pointer(const OO* o) { return o; } }; template struct GetPointer::value>::type> { + typename std::enable_if::value>::type> { static O* get_pointer(const OO& o) { IMP_INTERNAL_CHECK_VARIABLE(o) IMP_INTERNAL_CHECK(o == 0, "Non-zero pointer constant found."); diff --git a/modules/kernel/include/internal/UnifiedAllocator.h b/modules/kernel/include/internal/UnifiedAllocator.h new file mode 100644 index 0000000000..4c66c79a4d --- /dev/null +++ b/modules/kernel/include/internal/UnifiedAllocator.h @@ -0,0 +1,49 @@ +/** + * \file IMP/kernel/internal/UnifiedAllocator.h + * \brief Allocator that uses CUDA unified memory + * + * Copyright 2007-2024 IMP Inventors. All rights reserved. + * + */ + +#ifndef IMPKERNEL_INTERNAL_UNIFIED_ALLOCATOR_H +#define IMPKERNEL_INTERNAL_UNIFIED_ALLOCATOR_H + +#include + +IMPKERNEL_BEGIN_INTERNAL_NAMESPACE + +//! Use CUDA unified memory to back STL containers +/** Can be used as the second argument to many containers + (e.g. std::vector) to ensure that the memory they use is accessible + from both the CPU and the GPU. */ +template class UnifiedAllocator { +public: + typedef T value_type; + typedef T* pointer; + typedef std::size_t size_type; + + UnifiedAllocator() noexcept = default; + + T* allocate(std::size_t n, const void* = 0) { + return (T*)IMPcuda::kernel::internal::allocate_unified_cuda(n * sizeof(T)); + } + + void deallocate(T *p, std::size_t) { + IMPcuda::kernel::internal::deallocate_unified_cuda(p); + } +}; + +template +bool operator==(UnifiedAllocator const &, UnifiedAllocator const &) { + return true; +} + +template +bool operator!=(UnifiedAllocator const &, UnifiedAllocator const &) { + return false; +} + +IMPKERNEL_END_INTERNAL_NAMESPACE + +#endif /* IMPKERNEL_INTERNAL_UNIFIED_ALLOCATOR_H */ diff --git a/modules/kernel/include/internal/Unit.h b/modules/kernel/include/internal/Unit.h index 121760aa71..bbb2ee4ccb 100644 --- a/modules/kernel/include/internal/Unit.h +++ b/modules/kernel/include/internal/Unit.h @@ -1,7 +1,7 @@ /** * \file Unit.h \brief Classes to help with converting between units. * - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-2024 IMP Inventors. All rights reserved. * */ @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include @@ -214,7 +213,8 @@ class Unit { template Unit(Unit o) : v_(o.v_) { - BOOST_STATIC_ASSERT((boost::mpl::equal::type::value)); + static_assert((boost::mpl::equal::type::value), + "size mismatch"); } template @@ -222,8 +222,9 @@ class Unit { : v_(o.v_) {} operator double() const { - BOOST_STATIC_ASSERT((internal::IsNoUnits< - 0, boost::mpl::size::type::value, Units>::value)); + static_assert((internal::IsNoUnits< + 0, boost::mpl::size::type::value, Units>::value), + "type mismatch"); return v_.get_normalized_value(); } @@ -262,8 +263,9 @@ inline std::ostream &operator<<(std::ostream &out, Unit o) { template struct Divide { - BOOST_STATIC_ASSERT( - (boost::mpl::equal::type::value)); + static_assert( + (boost::mpl::equal::type::value), + "type mismatch"); typedef typename internal::Divide::type raw_units; typedef typename internal::Normalize::type units; @@ -272,8 +274,9 @@ struct Divide { template struct Multiply { - BOOST_STATIC_ASSERT( - (boost::mpl::equal::type::value)); + static_assert( + (boost::mpl::equal::type::value), + "type mismatch"); typedef typename internal::Multiply::type raw_units; typedef typename internal::Normalize::type units; @@ -294,10 +297,12 @@ struct Shift { template struct Exchange { - BOOST_STATIC_ASSERT( - (boost::mpl::equal::type::value)); - BOOST_STATIC_ASSERT( - (boost::mpl::equal::type::value)); + static_assert( + (boost::mpl::equal::type::value), + "type mismatch"); + static_assert( + (boost::mpl::equal::type::value), + "type mismatch"); typedef typename internal::Divide::type Div; typedef typename internal::Multiply::type Mul; diff --git a/modules/kernel/include/internal/attribute_tables.h b/modules/kernel/include/internal/attribute_tables.h index bf86c98a0b..c04faf7b06 100644 --- a/modules/kernel/include/internal/attribute_tables.h +++ b/modules/kernel/include/internal/attribute_tables.h @@ -408,12 +408,16 @@ class FloatAttributeTable { // vector spheres_; // vector sphere_derivatives_; IndexVector, sphere_equal > spheres_; IndexVector, sphere_equal > sphere_derivatives_; IndexVector, vector_equal > internal_coordinates_; IndexVector, vector_equal > internal_coordinate_derivatives_; BasicAttributeTable data_; diff --git a/modules/kernel/include/internal/base_graph_utility.h b/modules/kernel/include/internal/base_graph_utility.h index 62419aa3a6..593a4bf89c 100644 --- a/modules/kernel/include/internal/base_graph_utility.h +++ b/modules/kernel/include/internal/base_graph_utility.h @@ -17,19 +17,11 @@ #include #include #include -#include -#include -#include #include #include IMPKERNEL_BEGIN_INTERNAL_NAMESPACE namespace OWN { -using boost::enable_if; -using boost::mpl::and_; -using boost::mpl::not_; -using boost::is_convertible; -using boost::is_base_of; using std::is_pointer; template diff --git a/modules/kernel/include/internal/container_helpers.h b/modules/kernel/include/internal/container_helpers.h index 112740817f..28ead12158 100644 --- a/modules/kernel/include/internal/container_helpers.h +++ b/modules/kernel/include/internal/container_helpers.h @@ -50,7 +50,8 @@ inline ParticlesTemp flatten(const Vector< template inline ParticleIndexes flatten( - const Vector > &in) { + const Vector, + IMP_VECTOR_ALLOCATOR>> &in) { ParticleIndexes ret(in.size() * D); for (unsigned int i = 0; i < in.size(); ++i) { for (unsigned int j = 0; j < D; ++j) { @@ -118,7 +119,8 @@ inline ParticlesTemp get_particle(Model *m, const ParticleIndexes &ps) { template inline Vector, Particle *> > get_particle(Model *m, - const Vector > &ps) { + const Vector, + IMP_VECTOR_ALLOCATOR>> &ps) { Vector, Particle *> > ret( ps.size()); for (unsigned int i = 0; i < ps.size(); ++i) { @@ -149,9 +151,11 @@ inline ParticleIndexes get_index(const ParticlesTemp &p) { return ret; } template -Vector > get_index(const Vector< +Vector, + IMP_VECTOR_ALLOCATOR>> get_index(const Vector< Array, Particle *> > &in) { - Vector > ret(in.size()); + Vector, + IMP_VECTOR_ALLOCATOR>> ret(in.size()); for (unsigned int i = 0; i < ret.size(); ++i) { Array c; for (unsigned int j = 0; j < D; ++j) { diff --git a/modules/kernel/include/internal/memory_cuda.h b/modules/kernel/include/internal/memory_cuda.h new file mode 100644 index 0000000000..464d4ece79 --- /dev/null +++ b/modules/kernel/include/internal/memory_cuda.h @@ -0,0 +1,25 @@ +/** + * \file IMP/kernel/internal/memory_cuda.h + * \brief CUDA memory allocation functions + * + * Copyright 2007-2024 IMP Inventors. All rights reserved. + * + */ + +#ifndef IMPKERNEL_INTERNAL_MEMORY_CUDA_H +#define IMPKERNEL_INTERNAL_MEMORY_CUDA_H + +namespace IMPcuda { + namespace kernel { + namespace internal { + + //! Use CUDA API to allocate unified memory + void *allocate_unified_cuda(std::size_t n); + + //! Use CUDA API to free unified memory + void deallocate_unified_cuda(void *p); + } + } +} + +#endif /* IMPKERNEL_INTERNAL_MEMORY_CUDA_H */ diff --git a/modules/kernel/include/internal/random_number_generation_boost.h b/modules/kernel/include/internal/random_number_generation_boost.h index d94e854530..e22e6c6819 100644 --- a/modules/kernel/include/internal/random_number_generation_boost.h +++ b/modules/kernel/include/internal/random_number_generation_boost.h @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include IMPKERNEL_BEGIN_INTERNAL_NAMESPACE @@ -51,7 +51,7 @@ template void get_random_numbers_uniform_boost(RealType* p_random_array, unsigned int n) { - boost::uniform_real ud(0.0, 1.0); + boost::random::uniform_real_distribution ud(0.0, 1.0); for(unsigned int i=0; i : public ConvertValueBase { template struct Convert< - T, typename enable_if, - not_ > >:: - type> : public ConvertValueBase { + T, typename std::enable_if::value + && !T::DecoratorHasTraits::value>::type> + : public ConvertValueBase { static const int converter = 3; template static T get_cpp_object(PyObject *o, const char *symname, int argnum, @@ -163,8 +163,8 @@ struct Convert< template struct Convert< - T, typename enable_if< - typename T::DecoratorHasTraits>::type> : public ConvertValueBase { + T, typename std::enable_if::type> + : public ConvertValueBase { static const int converter = 4; template static T get_cpp_object(PyObject *o, const char *symname, int argnum, diff --git a/modules/kernel/include/internal/swig_helpers_base.h b/modules/kernel/include/internal/swig_helpers_base.h index 33c6516370..5004415bfc 100644 --- a/modules/kernel/include/internal/swig_helpers_base.h +++ b/modules/kernel/include/internal/swig_helpers_base.h @@ -2,7 +2,7 @@ * \file internal/swig_helpers_base.h * \brief Functions for use in swig wrappers * - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-2024 IMP Inventors. All rights reserved. */ #ifndef IMPKERNEL_INTERNAL_SWIG_HELPERS_BASE_H @@ -105,12 +105,7 @@ typedef PyPointer PyOwnerPointer; } \ } -using boost::enable_if; -using boost::mpl::and_; -using boost::mpl::not_; -using boost::is_convertible; // using namespace boost; -// using namespace boost::mpl; template void assign(V*& a, const V& b) { @@ -165,7 +160,7 @@ struct ValueOrObject { template struct ValueOrObject >::type> { + typename std::enable_if::value>::type> { static const T* get(const T* t) { return *t; } typedef T type; typedef T* store_type; @@ -173,7 +168,7 @@ struct ValueOrObject struct ValueOrObject >::type> { + typename std::enable_if::value>::type> { static const T* get(const T* t) { return *t; } typedef T type; typedef T* store_type; @@ -181,7 +176,7 @@ struct ValueOrObject struct ValueOrObject, - typename enable_if >::type> { + typename std::enable_if::value>::type> { static const T* get(const T* t) { return *t; } typedef T type; typedef T* store_type; @@ -189,7 +184,7 @@ struct ValueOrObject, template struct ValueOrObject, - typename enable_if >::type> { + typename std::enable_if::value>::type> { static const T* get(const T* t) { return *t; } typedef T type; typedef T* store_type; @@ -197,7 +192,7 @@ struct ValueOrObject, template struct ValueOrObject, - typename enable_if >::type> { + typename std::enable_if::value>::type> { static const T* get(const T* t) { return *t; } typedef T type; typedef T* store_type; @@ -213,7 +208,7 @@ inline std::string get_convert_error(const char *err, const char *symname, template struct ConvertAllBase { - BOOST_STATIC_ASSERT(!std::is_pointer::value); + static_assert(!std::is_pointer::value, "is pointer"); template static bool get_is_cpp_object(PyObject* o, SwigData st, SwigData, SwigData) { void* vp; @@ -224,8 +219,8 @@ struct ConvertAllBase { template struct ConvertValueBase : public ConvertAllBase { - BOOST_STATIC_ASSERT(!std::is_pointer::value); - BOOST_STATIC_ASSERT(!(boost::is_base_of::value)); + static_assert(!std::is_pointer::value, "is pointer"); + static_assert(!(std::is_base_of::value), "wrong base class"); template static const T& get_cpp_object(PyObject* o, const char *symname, int argnum, const char *argtype, SwigData st, SwigData, @@ -252,9 +247,9 @@ struct ConvertValueBase : public ConvertAllBase { // T should not be a pointer to the object template struct ConvertObjectBase : public ConvertAllBase { - BOOST_STATIC_ASSERT(!std::is_pointer::value); - BOOST_STATIC_ASSERT((boost::is_base_of::value) || - (boost::is_same::value)); + static_assert(!std::is_pointer::value, "is pointer"); + static_assert((std::is_base_of::value) || + (std::is_same::value), "wrong base class"); template static T* get_cpp_object(PyObject* o, const char *symname, int argnum, const char *argtype, SwigData st, SwigData, @@ -285,7 +280,7 @@ struct ConvertObjectBase : public ConvertAllBase { // T should not be a pointer to the object template struct ConvertRAII : public ConvertAllBase { - BOOST_STATIC_ASSERT(!std::is_pointer::value); + static_assert(!std::is_pointer::value, "is pointer"); template static T* get_cpp_object(PyObject* o, const char *symname, int argnum, const char *argtype, SwigData st, SwigData, @@ -331,8 +326,8 @@ struct Convert : public ConvertValueBase { }; template -struct Convert >::type> : public ConvertObjectBase { +struct Convert::value>::type> : public ConvertObjectBase { static const int converter = 1; }; @@ -344,8 +339,8 @@ struct Convert : public ConvertObjectBase { }; template -struct Convert >::type> : public ConvertObjectBase { +struct Convert::value>::type> : public ConvertObjectBase { static const int converter = 1; }; @@ -357,7 +352,7 @@ struct Convert struct ConvertSequenceHelper { typedef typename ValueOrObject::type V; - BOOST_STATIC_ASSERT(!std::is_pointer::value); + static_assert(!std::is_pointer::value, "is pointer"); template static bool get_is_cpp_object(PyObject* in, SwigData st, SwigData particle_st, SwigData decorator_st) { @@ -666,7 +661,7 @@ struct ConvertSequence : public ConvertVectorBase< SwigData particle_st, SwigData decorator_st) { if (numpy_import_retval == 0 && is_native_numpy_1d_array(o, NPY_INT)) { - BOOST_STATIC_ASSERT(sizeof(ParticleIndex) == sizeof(int)); + static_assert(sizeof(ParticleIndex) == sizeof(int), "size mismatch"); int dim = PyArray_DIM((PyArrayObject*)o, 0); ParticleIndex *data = (ParticleIndex *)PyArray_DATA((PyArrayObject*)o); return ParticleIndexes(data, data+dim); @@ -680,7 +675,7 @@ struct ConvertSequence : public ConvertVectorBase< static PyObject* create_python_object(const ParticleIndexes& t, SwigData st, int OWN) { if (numpy_import_retval == 0) { - BOOST_STATIC_ASSERT(sizeof(ParticleIndex) == sizeof(int)); + static_assert(sizeof(ParticleIndex) == sizeof(int), "size mismatch"); npy_intp dims[2]; dims[0] = t.size(); PyReceivePointer ret(PyArray_SimpleNew(1, dims, NPY_INT)); @@ -705,8 +700,14 @@ static IndexArray create_index_array_cpp(PyObject *o) { IndexArray arr(sz); if (sz > 0) { char *data = (char *)PyArray_DATA(a); - for (npy_intp i = 0; i < sz; ++i) { - memcpy(arr[i].data(), data + i * D * sizeof(int), sizeof(int) * D); + if (sizeof(typename IndexArray::value_type) == sizeof(int) * D) { + /* If no padding, we can just do a single copy since std::vector + is contiguous */ + memcpy(arr[0].data(), data, sizeof(int) * D * sz); + } else { + for (npy_intp i = 0; i < sz; ++i) { + memcpy(arr[i].data(), data + i * D * sizeof(int), sizeof(int) * D); + } } } return arr; @@ -722,8 +723,14 @@ static PyObject* create_index_array_numpy(const IndexArray &t) { if (t.size() > 0) { PyObject *obj = ret; char *data = (char *)PyArray_DATA((PyArrayObject*)obj); - for (size_t i = 0; i < t.size(); ++i) { - memcpy(data + i * D * sizeof(int), t[i].data(), sizeof(int) * D); + if (sizeof(typename IndexArray::value_type) == sizeof(int) * D) { + /* If no padding, we can just do a single copy since std::vector + is contiguous */ + memcpy(data, t[0].data(), sizeof(int) * D * t.size()); + } else { + for (size_t i = 0; i < t.size(); ++i) { + memcpy(data + i * D * sizeof(int), t[i].data(), sizeof(int) * D); + } } } return ret.release(); diff --git a/modules/kernel/include/random_utils.h b/modules/kernel/include/random_utils.h index cb332f6700..6560dcc741 100644 --- a/modules/kernel/include/random_utils.h +++ b/modules/kernel/include/random_utils.h @@ -21,7 +21,7 @@ #endif #include -#include +#include // #include // DEBUG //#include // DEBUG @@ -151,7 +151,7 @@ get_random_float_uniform() } return cache[i++]; #else - static boost::uniform_real rand(0.0, 1.0); + static boost::random::uniform_real_distribution rand(0.0, 1.0); return rand(random_number_generator); #endif } @@ -163,7 +163,7 @@ get_random_float_uniform(float min, float max) #ifdef IMP_KERNEL_CUDA_LIB return get_random_float_uniform()*(max-min)+min; #else - ::boost::uniform_real rand(min, max); + ::boost::random::uniform_real_distribution rand(min, max); return rand(random_number_generator); #endif } @@ -185,7 +185,7 @@ get_random_double_uniform() } return cache[i++]; #else - static boost::uniform_real rand(0.0, 1.0); + static boost::random::uniform_real_distribution rand(0.0, 1.0); return rand(random_number_generator); #endif } @@ -196,7 +196,7 @@ get_random_double_uniform(double min, double max) #ifdef IMP_KERNEL_CUDA_LIB return get_random_double_uniform()*(max-min)+min; #else - ::boost::uniform_real rand(min, max); + ::boost::random::uniform_real_distribution rand(min, max); return rand(random_number_generator); #endif } diff --git a/modules/kernel/include/value_macros.h b/modules/kernel/include/value_macros.h index 7e6508ec09..bccbb59f38 100644 --- a/modules/kernel/include/value_macros.h +++ b/modules/kernel/include/value_macros.h @@ -23,6 +23,11 @@ #define IMP_VALUES(Name, PluralName) \ /** Pass or store a set of Name. */ typedef IMP::Vector PluralName +//! Like IMP_VALUES but using CUDA unified memory +#define IMP_VALUES_UNIFIED(Name, PluralName) \ + /** Pass or store a set of Name using CUDA unified memory. */ \ + typedef IMP::Vector> PluralName + /** To be used with native types.*/ #define IMP_BUILTIN_VALUES(Name, PluralName) \ IMP_VALUES(Name, PluralName); \ diff --git a/modules/kernel/pyext/include/IMP_kernel.types.i b/modules/kernel/pyext/include/IMP_kernel.types.i index 3922a76588..9f95854181 100644 --- a/modules/kernel/pyext/include/IMP_kernel.types.i +++ b/modules/kernel/pyext/include/IMP_kernel.types.i @@ -98,8 +98,9 @@ so copy the typemaps. */ %apply IMP::ParticleIndex { ::IMP::ParticleIndex }; /*%{ - BOOST_STATIC_ASSERT(Convert::converter ==2); - BOOST_STATIC_ASSERT(Convert::converter ==3); + static_assert(Convert::converter ==2, "wrong converter"); + static_assert(Convert::converter ==3, + "wrong converter"); %}*/ @@ -622,7 +623,7 @@ IMP_SWIG_DECORATOR_BASE(Namespace, Name, PluralName); IMP_SWIG_OBJECT_SERIALIZE_IMPL(Namespace, Name); IMP_SWIG_OBJECT_SERIALIZE_PICKLE(Namespace, Name); %{ - BOOST_STATIC_ASSERT(Convert< Namespace::Name >::converter==3); + static_assert(Convert< Namespace::Name >::converter==3, "wrong converter"); %} %pythoncode %{ _value_types.append(#Name) @@ -637,7 +638,7 @@ _value_types.append(#Name) %define IMP_SWIG_DECORATOR_WITH_TRAITS(Namespace, Name, PluralName) IMP_SWIG_DECORATOR_BASE(Namespace, Name, PluralName); %{ - BOOST_STATIC_ASSERT(Convert< Namespace::Name >::converter==4); + static_assert(Convert< Namespace::Name >::converter==4, "wrong converter"); %} %pythoncode %{ _value_types.append(#Name) @@ -965,7 +966,7 @@ IMP_SWIG_OBJECT_SERIALIZE_PICKLE(Namespace, Name) %define IMP_SWIG_RAII_INSTANCE(Namespace, Name, NiceName) %typemap(in) Namespace::Name* { - BOOST_STATIC_ASSERT($argnum==1); // RAII object Namespace::Name cannot be passed as an argument + static_assert($argnum==1, "wrong number of args"); // RAII object Namespace::Name cannot be passed as an argument try { $1=ConvertRAII::get_cpp_object($input, "$symname", $argnum, "$1_type", $descriptor(Namespace::Name*), $descriptor(IMP::Particle*), $descriptor(IMP::Decorator*)); } catch (const IMP::Exception &e) { diff --git a/modules/kernel/src/RestraintInfo.cpp b/modules/kernel/src/RestraintInfo.cpp index c0e2b50f32..8b150755aa 100644 --- a/modules/kernel/src/RestraintInfo.cpp +++ b/modules/kernel/src/RestraintInfo.cpp @@ -11,6 +11,7 @@ IMPKERNEL_BEGIN_NAMESPACE +#if IMP_HAS_CHECKS >= IMP_USAGE namespace { bool is_string_key_path(std::string name) { return boost::algorithm::ends_with(name, "filename") || @@ -20,6 +21,7 @@ bool is_string_key_path(std::string name) { name == "path"; } } +#endif void RestraintInfo::add_int(std::string key, int value) { int_.push_back(IntData(key, value)); diff --git a/modules/kernel/src/internal/directories.cpp b/modules/kernel/src/internal/directories.cpp index e181a91113..23eb28816a 100644 --- a/modules/kernel/src/internal/directories.cpp +++ b/modules/kernel/src/internal/directories.cpp @@ -23,6 +23,7 @@ #endif #include #include +#include #include diff --git a/modules/kernel/src/internal/memory_cuda.cu b/modules/kernel/src/internal/memory_cuda.cu new file mode 100644 index 0000000000..5d9f7b4c8b --- /dev/null +++ b/modules/kernel/src/internal/memory_cuda.cu @@ -0,0 +1,27 @@ +/** + * \file IMP/kernel/src/internal/memory_cuda.cu + * \brief CUDA memory allocation functions + * + * Copyright 2007-2024 IMP Inventors. All rights reserved. + * + */ + + +#ifdef __NVCC__ +#include +#include +#include + +void *IMPcuda::kernel::internal::allocate_unified_cuda(std::size_t n) { + void *p; + IMP_checkCudaErrors(cudaMallocManaged(&p, n)); + return p; +} + +void IMPcuda::kernel::internal::deallocate_unified_cuda(void *p) { + if (p) { + IMP_checkCudaErrors(cudaFree(p)); + } +} + +#endif diff --git a/modules/kernel/src/internal/swig.cpp b/modules/kernel/src/internal/swig.cpp index eb6837113b..40239c199f 100644 --- a/modules/kernel/src/internal/swig.cpp +++ b/modules/kernel/src/internal/swig.cpp @@ -87,10 +87,8 @@ const Particles &_give_particles(Model *m) { const Particles &_pass_particles(const Particles &ps) { return ps; } Particle *_pass_particle(Particle *ps) { return ps; } const ParticlePair &_pass_particle_pair(const ParticlePair &pp) { - for (unsigned int i = 0; i < 2; ++i) { - std::cout << pp[i]->get_name() << " "; - } - std::cout << std::endl; + std::cout << std::get<0>(pp)->get_name() << " "; + std::cout << std::get<1>(pp)->get_name() << std::endl; return pp; } Particles _give_particles_copy(Model *m) { @@ -148,7 +146,8 @@ ParticleIndexes _create_particles_from_pdb(std::string name, Model *m) { Float _LogPairScore::evaluate_index(Model *m, const ParticleIndexPair &ipp, DerivativeAccumulator *) const { - ParticlePair pp(m->get_particle(ipp[0]), m->get_particle(ipp[1])); + ParticlePair pp(m->get_particle(std::get<0>(ipp)), + m->get_particle(std::get<1>(ipp))); if (map_.find(pp) == map_.end()) { map_[pp] = 0; } diff --git a/modules/kernel/test/expensive_test_get_array.py b/modules/kernel/test/expensive_test_get_array.py new file mode 100644 index 0000000000..089c20657a --- /dev/null +++ b/modules/kernel/test/expensive_test_get_array.py @@ -0,0 +1,17 @@ +import IMP.test + + +class Tests(IMP.test.TestCase): + + def test_get_array(self): + """Test std::get""" + # Should be a compile-time error to access an out of range element + self.assertCompileFails( + headers=['IMP/base_types.h'], + body=""" +IMP::ParticleIndexPair pp(IMP::ParticleIndex(1), IMP::ParticleIndex(3)); +IMP::ParticleIndex p = std::get<2>(pp);""") + + +if __name__ == '__main__': + IMP.test.main() diff --git a/modules/kernel/test/test_cache.cpp b/modules/kernel/test/test_cache.cpp index e9b8485adc..9494e45434 100644 --- a/modules/kernel/test/test_cache.cpp +++ b/modules/kernel/test/test_cache.cpp @@ -9,7 +9,7 @@ #include #include #include -#include +#include struct PlusOne { typedef int result_type; typedef int argument_type; @@ -22,7 +22,7 @@ struct PlusOne { int main(int argc, char *argv[]) { IMP::setup_from_argv(argc, argv, "Test of base caches in C++"); IMP::LRUCache table(PlusOne(), 10); - boost::uniform_int<> ui(0, 20); + boost::random::uniform_int_distribution<> ui(0, 20); for (unsigned int i = 0; i < 10; ++i) { int in = i; int out = table.get(in); diff --git a/modules/kernel/test/test_pair_memoizer.cpp b/modules/kernel/test/test_pair_memoizer.cpp index bc9f0ebe1d..d6faed8cf0 100644 --- a/modules/kernel/test/test_pair_memoizer.cpp +++ b/modules/kernel/test/test_pair_memoizer.cpp @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include // Skip test on g++ 4.2, since it fails to compile due to a g++ bug @@ -108,7 +108,7 @@ struct SortedPairs { } std::cout << "Returning "; for (unsigned int i = 0; i < ret.size(); ++i) { - std::cout << *ret[i][0] << "-" << *ret[i][1] << " "; + std::cout << *std::get<0>(ret[i]) << "-" << *std::get<1>(ret[i]) << " "; } std::cout << std::endl; return ret; @@ -118,16 +118,21 @@ struct SortedPairs { struct SetEquals { struct LessPair { bool operator()(IMP::Entry a, IMP::Entry b) const { - if (a[0] > a[1]) std::swap(a[0], a[1]); - if (b[0] > b[1]) std::swap(b[0], b[1]); - if (a[0] < b[0]) + if (std::get<0>(a) > std::get<1>(a)) { + std::swap(std::get<0>(a), std::get<1>(a)); + } + if (std::get<0>(b) > std::get<1>(b)) { + std::swap(std::get<0>(b), std::get<1>(b)); + } + if (std::get<0>(a) < std::get<0>(b)) { return true; - else if (a[0] > b[0]) + } else if (std::get<0>(a) > std::get<0>(b)) { return false; - else if (a[1] < b[1]) + } else if (std::get<1>(a) < std::get<1>(b)) { return true; - else + } else { return false; + } } }; bool operator()(SortedPairs::result_type t0) const { @@ -136,11 +141,11 @@ struct SetEquals { std::sort(t1.begin(), t1.end(), LessPair()); std::cout << "Comparing " << t0 << " and " << t1 << "= "; for (unsigned int i = 0; i < t0.size(); ++i) { - std::cout << *t0[i][0] << "-" << *t0[i][1] << " "; + std::cout << *std::get<0>(t0[i]) << "-" << *std::get<1>(t0[i]) << " "; } std::cout << " and "; for (unsigned int i = 0; i < t1.size(); ++i) { - std::cout << *t1[i][0] << "-" << *t1[i][1] << " "; + std::cout << *std::get<0>(t1[i]) << "-" << *std::get<1>(t1[i]) << " "; } std::cout << std::endl; if (t0.size() != t1.size()) return false; @@ -156,12 +161,12 @@ typedef IMP::SparseSymmetricPairMemoizer Table; struct Sum { int value; Sum() : value(0) {} - void operator()(IMP::Entry a) { value += *a[0] + *a[1]; } + void operator()(IMP::Entry a) { value += *std::get<0>(a) + *std::get<1>(a); } }; struct Show { void operator()(IMP::Entry a) { - std::cout << *a[0] << "-" << *a[1] << ", "; + std::cout << *std::get<0>(a) << "-" << *std::get<1>(a) << ", "; } }; @@ -186,8 +191,8 @@ int main(int argc, char *argv[]) { IMP::setup_from_argv(argc, argv, "Test memoizer"); IMP::set_log_level(IMP::VERBOSE); const int n = 5; - boost::uniform_int<> ui(0, n * 2); - boost::uniform_int<> pi(0, n - 1); + boost::random::uniform_int_distribution<> ui(0, n * 2); + boost::random::uniform_int_distribution<> pi(0, n - 1); for (int i = 0; i < n; ++i) { values.push_back(ui(IMP::random_number_generator)); diff --git a/modules/kernel/test/test_typemaps.py b/modules/kernel/test/test_typemaps.py index d4ce668cfe..7bc66468b5 100644 --- a/modules/kernel/test/test_typemaps.py +++ b/modules/kernel/test/test_typemaps.py @@ -4,6 +4,9 @@ import IMP.test import io +if IMP.IMP_KERNEL_HAS_NUMPY: + import numpy + class Tests(IMP.test.TestCase): @@ -105,6 +108,13 @@ def test_index_pairs(self): pso = IMP._pass_particle_index_pairs(ps) self._equal_lists(ps, pso) + @IMP.test.skipUnless(IMP.IMP_KERNEL_HAS_NUMPY, "Needs numpy") + def test_index_pairs_numpy(self): + """Check particle index pairs using NumPy arrays""" + pis = numpy.array([[0,1], [1,2], [2,3], [3,4]], dtype=numpy.int32) + piso = IMP._pass_particle_index_pairs(pis) + self.assertTrue(numpy.array_equal(piso, pis)) + def test_failure(self): """Checking bad list""" self.assertRaises(TypeError, IMP._pass_particle_pairs, [1]) diff --git a/modules/kinematics/include/FibrilSampler.h b/modules/kinematics/include/FibrilSampler.h index e6d6c47b79..0bd13b7c6e 100644 --- a/modules/kinematics/include/FibrilSampler.h +++ b/modules/kinematics/include/FibrilSampler.h @@ -13,7 +13,7 @@ #include "DOFsSampler.h" #include "revolute_joints.h" #include "TransformationJoint.h" -#include +#include #include #include #include @@ -47,7 +47,7 @@ class IMPKINEMATICSEXPORT FibrilSampler : public DOFsSampler { private: // boost::mt19937 rng_; // init random number generator - mutable std::vector > u_rand_; + mutable std::vector > u_rand_; DihedralAngleRevoluteJoints dihedral_joints_; TransformationJoints trans_joint_; }; diff --git a/modules/kinematics/include/UniformBackboneSampler.h b/modules/kinematics/include/UniformBackboneSampler.h index 5e765f9846..4f50f8748f 100644 --- a/modules/kinematics/include/UniformBackboneSampler.h +++ b/modules/kinematics/include/UniformBackboneSampler.h @@ -12,7 +12,7 @@ #include #include "DOFsSampler.h" #include "revolute_joints.h" -#include +#include IMPKINEMATICS_BEGIN_NAMESPACE @@ -42,7 +42,7 @@ class IMPKINEMATICSEXPORT UniformBackboneSampler : public DOFsSampler { private: // boost::mt19937 rng_; // init random number generator - mutable std::vector > u_rand_; + mutable std::vector > u_rand_; DihedralAngleRevoluteJoints joints_; }; diff --git a/modules/kinematics/src/FibrilSampler.cpp b/modules/kinematics/src/FibrilSampler.cpp index d4d9043519..d25ff77d5c 100644 --- a/modules/kinematics/src/FibrilSampler.cpp +++ b/modules/kinematics/src/FibrilSampler.cpp @@ -23,8 +23,9 @@ FibrilSampler::FibrilSampler( // TODO: Ensure that first DOF is a Transformation Joint // init random number generators for (unsigned int i = 0; i < get_number_of_dofs(); i++) { - boost::uniform_real<> u_rand_i(get_dof(i)->get_range().first, - get_dof(i)->get_range().second); + boost::random::uniform_real_distribution<> u_rand_i( + get_dof(i)->get_range().first, + get_dof(i)->get_range().second); u_rand_.push_back(u_rand_i); } } diff --git a/modules/kinematics/src/UniformBackboneSampler.cpp b/modules/kinematics/src/UniformBackboneSampler.cpp index 2b85058c63..66ef0b8f27 100644 --- a/modules/kinematics/src/UniformBackboneSampler.cpp +++ b/modules/kinematics/src/UniformBackboneSampler.cpp @@ -21,8 +21,9 @@ UniformBackboneSampler::UniformBackboneSampler( // init random number generators for (unsigned int i = 0; i < get_number_of_dofs(); i++) { - boost::uniform_real<> u_rand_i(get_dof(i)->get_range().first, - get_dof(i)->get_range().second); + boost::random::uniform_real_distribution<> u_rand_i( + get_dof(i)->get_range().first, + get_dof(i)->get_range().second); u_rand_.push_back(u_rand_i); } } diff --git a/modules/kmeans/src/internal/KMrand.cpp b/modules/kmeans/src/internal/KMrand.cpp index b18981c867..d11ae3d430 100644 --- a/modules/kmeans/src/internal/KMrand.cpp +++ b/modules/kmeans/src/internal/KMrand.cpp @@ -25,8 +25,8 @@ #include "IMP/kmeans/internal/KMrand.h" // random generator declarations #include -#include -#include +#include +#include IMPKMEANS_BEGIN_INTERNAL_NAMESPACE @@ -41,7 +41,7 @@ int kmRanInt(int n) { if (n == 0) { return -1; } else { - ::boost::uniform_int randint(0, n - 1); + ::boost::random::uniform_int_distribution randint(0, n - 1); return randint(random_number_generator); } } @@ -51,7 +51,7 @@ int kmRanInt(int n) { //------------------------------------------------------------------------ double kmRanUnif(double lo, double hi) { /* Modified for IMP: use IMP's random number generator instead */ - ::boost::uniform_real randfloat(lo, hi); + ::boost::random::uniform_real_distribution randfloat(lo, hi); return randfloat(random_number_generator); } diff --git a/modules/misc/src/CommonEndpointPairFilter.cpp b/modules/misc/src/CommonEndpointPairFilter.cpp index 07f0a3a555..16b818a987 100644 --- a/modules/misc/src/CommonEndpointPairFilter.cpp +++ b/modules/misc/src/CommonEndpointPairFilter.cpp @@ -15,12 +15,12 @@ CommonEndpointPairFilter::CommonEndpointPairFilter() {} int CommonEndpointPairFilter::get_value_index( Model *m, const ParticleIndexPair &p) const { - if (!IMP::atom::Bond::get_is_setup(m, p[0]) || - !IMP::atom::Bond::get_is_setup(m, p[1])) { + if (!IMP::atom::Bond::get_is_setup(m, std::get<0>(p)) || + !IMP::atom::Bond::get_is_setup(m, std::get<1>(p))) { return false; } else { - IMP::atom::Bond b0(m, p[0]); - IMP::atom::Bond b1(m, p[1]); + IMP::atom::Bond b0(m, std::get<0>(p)); + IMP::atom::Bond b1(m, std::get<1>(p)); return b0.get_bonded(0) == b1.get_bonded(1) || b0.get_bonded(1) == b1.get_bonded(0) || b0.get_bonded(0) == b1.get_bonded(0) || diff --git a/modules/misc/src/LowestRefinedPairScore.cpp b/modules/misc/src/LowestRefinedPairScore.cpp index 40f7a2faf7..13e184bdda 100644 --- a/modules/misc/src/LowestRefinedPairScore.cpp +++ b/modules/misc/src/LowestRefinedPairScore.cpp @@ -51,14 +51,14 @@ std::pair get_lowest(ParticlesTemp ps[2], Float LowestRefinedPairScore::evaluate_index( Model *m, const ParticleIndexPair &pi, DerivativeAccumulator *da) const { - ParticlesTemp ps[2] = {get_set(m->get_particle(pi[0]), r_), - get_set(m->get_particle(pi[1]), r_)}; + ParticlesTemp ps[2] = {get_set(m->get_particle(std::get<0>(pi)), r_), + get_set(m->get_particle(std::get<1>(pi)), r_)}; std::pair r = get_lowest(ps, f_); if (da) { - f_->evaluate_index(m, ParticleIndexPair(r.second[0]->get_index(), - r.second[1]->get_index()), + f_->evaluate_index(m, ParticleIndexPair(std::get<0>(r.second)->get_index(), + std::get<1>(r.second)->get_index()), da); } diff --git a/modules/misc/src/SoftCylinderPairScore.cpp b/modules/misc/src/SoftCylinderPairScore.cpp index 5ba7128c13..d6f79a70b3 100644 --- a/modules/misc/src/SoftCylinderPairScore.cpp +++ b/modules/misc/src/SoftCylinderPairScore.cpp @@ -17,7 +17,8 @@ SoftCylinderPairScore::SoftCylinderPairScore(double k) : k_(k) {} Float SoftCylinderPairScore::evaluate_index( Model *m, const ParticleIndexPair &pip, DerivativeAccumulator *da) const { - atom::Bond b[2] = {atom::Bond(m, pip[0]), atom::Bond(m, pip[1])}; + atom::Bond b[2] = {atom::Bond(m, std::get<0>(pip)), + atom::Bond(m, std::get<1>(pip))}; core::XYZR d[2][2] = { {core::XYZR(b[0].get_bonded(0)), core::XYZR(b[0].get_bonded(1))}, diff --git a/modules/misc/test/test_metric_close_pairs.cpp b/modules/misc/test/test_metric_close_pairs.cpp index 556cb75dd5..a4b8db1c7a 100644 --- a/modules/misc/test/test_metric_close_pairs.cpp +++ b/modules/misc/test/test_metric_close_pairs.cpp @@ -19,25 +19,25 @@ namespace { struct LowerBound { double operator()(IMP::Model *m, const IMP::ParticleIndexPair &pip) const { - return IMP::core::get_distance(IMP::core::XYZR(m, pip[0]), - IMP::core::XYZR(m, pip[1])); + return IMP::core::get_distance(IMP::core::XYZR(m, std::get<0>(pip)), + IMP::core::XYZR(m, std::get<1>(pip))); } }; struct UpperBound { double operator()(IMP::Model *m, const IMP::ParticleIndexPair &pip) const { - return IMP::core::get_distance(IMP::core::XYZ(m, pip[0]), - IMP::core::XYZ(m, pip[1])) + - IMP::core::XYZR(m, pip[0]).get_radius() + - IMP::core::XYZR(m, pip[1]).get_radius(); + return IMP::core::get_distance(IMP::core::XYZ(m, std::get<0>(pip)), + IMP::core::XYZ(m, std::get<1>(pip))) + + IMP::core::XYZR(m, std::get<0>(pip)).get_radius() + + IMP::core::XYZR(m, std::get<1>(pip)).get_radius(); } }; void canonicalize(IMP::ParticleIndexPairs &pip) { for (unsigned int i = 0; i < pip.size(); ++i) { - if (pip[i][0] > pip[i][1]) { - pip[i] = IMP::ParticleIndexPair(pip[i][1], pip[i][0]); + if (std::get<0>(pip[i]) > std::get<1>(pip[i])) { + pip[i] = IMP::ParticleIndexPair(std::get<1>(pip[i]), std::get<0>(pip[i])); } } } diff --git a/modules/mmcif/test/test_dumper.py b/modules/mmcif/test/test_dumper.py index 60c03380ab..361cdc3151 100644 --- a/modules/mmcif/test/test_dumper.py +++ b/modules/mmcif/test/test_dumper.py @@ -440,9 +440,10 @@ def test_workflow(self): _ihm_external_files.reference_id _ihm_external_files.file_path _ihm_external_files.content_type +_ihm_external_files.file_format _ihm_external_files.file_size_bytes _ihm_external_files.details -1 1 %s 'Modeling workflow or script' %d +1 1 %s 'Modeling workflow or script' . %d 'Integrative modeling Python script' # """ % (os.path.basename(__file__), os.stat(__file__).st_size)) diff --git a/modules/modeller/dependencies.py b/modules/modeller/dependencies.py index 933d5de8ab..5a915e98e4 100644 --- a/modules/modeller/dependencies.py +++ b/modules/modeller/dependencies.py @@ -1,3 +1,4 @@ required_modules = 'core' required_dependencies = '' optional_dependencies = '' +python_only = True diff --git a/modules/modeller/examples/imp_restraints_in_modeller.py b/modules/modeller/examples/imp_restraints_in_modeller.py index 2817fa43af..b992339b48 100644 --- a/modules/modeller/examples/imp_restraints_in_modeller.py +++ b/modules/modeller/examples/imp_restraints_in_modeller.py @@ -13,11 +13,11 @@ IMP.setup_from_argv(sys.argv, "IMP restraints in Modeller") # Set up Modeller and build a model from the GGCC primary sequence -e = modeller.environ() +e = modeller.Environ() e.edat.dynamic_sphere = False e.libs.topology.read('${LIB}/top_heav.lib') e.libs.parameters.read('${LIB}/par.lib') -modmodel = modeller.model(e) +modmodel = modeller.Model(e) modmodel.build_sequence('GGCC') # Set up IMP and load the Modeller model in as a new Hierarchy @@ -37,5 +37,5 @@ t.append(IMP.modeller.IMPRestraints(atoms, sf)) # Calculate the Modeller energy (score) for the whole protein -sel = modeller.selection(modmodel) +sel = modeller.Selection(modmodel) sel.energy() diff --git a/modules/modeller/examples/load_modeller_model.py b/modules/modeller/examples/load_modeller_model.py index 83067debac..01d6259edd 100644 --- a/modules/modeller/examples/load_modeller_model.py +++ b/modules/modeller/examples/load_modeller_model.py @@ -12,14 +12,14 @@ IMP.setup_from_argv(sys.argv, "Load Modeller model") # Set up Modeller and build a model from the GGCC primary sequence -e = modeller.environ() +e = modeller.Environ() e.edat.dynamic_sphere = True e.libs.topology.read('${LIB}/top_heav.lib') e.libs.parameters.read('${LIB}/par.lib') -modmodel = modeller.model(e) +modmodel = modeller.Model(e) modmodel.build_sequence('GGCC') # Generate Modeller stereochemistry -sel = modeller.selection(modmodel) +sel = modeller.Selection(modmodel) modmodel.restraints.make(sel, restraint_type='STEREO', spline_on_site=False) # Set up IMP and use the ModelLoader class to load the atom coordinates diff --git a/modules/modeller/examples/modeller_restraints_in_imp.py b/modules/modeller/examples/modeller_restraints_in_imp.py index 3a265a734c..03f5674ee1 100644 --- a/modules/modeller/examples/modeller_restraints_in_imp.py +++ b/modules/modeller/examples/modeller_restraints_in_imp.py @@ -13,16 +13,16 @@ IMP.setup_from_argv(sys.argv, "Modeller restraints in IMP") # Set up Modeller and build a model from the GGCC primary sequence -e = modeller.environ() +e = modeller.Environ() e.edat.dynamic_sphere = False e.libs.topology.read('${LIB}/top_heav.lib') e.libs.parameters.read('${LIB}/par.lib') -modmodel = modeller.model(e) +modmodel = modeller.Model(e) modmodel.build_sequence('GGCC') # Add a simple Modeller distance restraint between the first and last atoms -feat = modeller.features.distance(modmodel.atoms[0], modmodel.atoms[-1]) -r = modeller.forms.gaussian(feature=feat, mean=10.0, stdev=1.0, +feat = modeller.features.Distance(modmodel.atoms[0], modmodel.atoms[-1]) +r = modeller.forms.Gaussian(feature=feat, mean=10.0, stdev=1.0, group=modeller.physical.xy_distance) modmodel.restraints.add(r) diff --git a/modules/modeller/pyext/IMP_modeller.init.i b/modules/modeller/pyext/src/__init__.py similarity index 99% rename from modules/modeller/pyext/IMP_modeller.init.i rename to modules/modeller/pyext/src/__init__.py index 4229ef79de..8ba28bed6d 100644 --- a/modules/modeller/pyext/IMP_modeller.init.i +++ b/modules/modeller/pyext/src/__init__.py @@ -1,5 +1,3 @@ -%pythoncode %{ - import os import tempfile import shutil @@ -616,5 +614,3 @@ def load_dynamic_restraints(self, pair_filter=None): ps = IMP.atom.CoulombPairScore(sf) ps.set_relative_dielectric(edat.relative_dielectric) yield IMP.container.PairsRestraint(ps, nbl) - -%} diff --git a/modules/modeller/pyext/swig.i-in b/modules/modeller/pyext/swig.i-in deleted file mode 100644 index 224006a2af..0000000000 --- a/modules/modeller/pyext/swig.i-in +++ /dev/null @@ -1,2 +0,0 @@ -/* Include top-level Python interface */ -%include "IMP_modeller.init.i" diff --git a/modules/mpi/include/ReplicaExchange.h b/modules/mpi/include/ReplicaExchange.h index 1556a8adc7..1678cd1071 100644 --- a/modules/mpi/include/ReplicaExchange.h +++ b/modules/mpi/include/ReplicaExchange.h @@ -11,9 +11,20 @@ #include #include #include -#include #include +// We only want the C API, so try to suppress the C++ API +#ifndef MPICH_SKIP_MPICXX +#define MPICH_SKIP_MPICXX +#endif +#ifndef OMPI_SKIP_MPICXX +#define OMPI_SKIP_MPICXX +#endif +#ifndef _MPICC_H +#define _MPICC_H +#endif +#include + IMPMPI_BEGIN_NAMESPACE //! A class to implement Hamiltonian Replica Exchange diff --git a/modules/mpi/src/ReplicaExchange.cpp b/modules/mpi/src/ReplicaExchange.cpp index 583dcc095f..3a4849b528 100644 --- a/modules/mpi/src/ReplicaExchange.cpp +++ b/modules/mpi/src/ReplicaExchange.cpp @@ -7,7 +7,19 @@ */ #include #include + +// We only want the C API, so try to suppress the C++ API +#ifndef MPICH_SKIP_MPICXX +#define MPICH_SKIP_MPICXX +#endif +#ifndef OMPI_SKIP_MPICXX +#define OMPI_SKIP_MPICXX +#endif +#ifndef _MPICC_H +#define _MPICC_H +#endif #include "mpi.h" + #include #include #include diff --git a/modules/mpi/src/internal/mpi_helpers.cpp b/modules/mpi/src/internal/mpi_helpers.cpp index bf3a54cd6d..0fd8f7e378 100644 --- a/modules/mpi/src/internal/mpi_helpers.cpp +++ b/modules/mpi/src/internal/mpi_helpers.cpp @@ -6,6 +6,17 @@ */ #include + +// We only want the C API, so try to suppress the C++ API +#ifndef MPICH_SKIP_MPICXX +#define MPICH_SKIP_MPICXX +#endif +#ifndef OMPI_SKIP_MPICXX +#define OMPI_SKIP_MPICXX +#endif +#ifndef _MPICC_H +#define _MPICC_H +#endif #include "mpi.h" #if defined(OMPI_MAJOR_VERSION) && !defined(_MSC_VER) @@ -48,7 +59,7 @@ namespace { void dlopen_libmpi() { #if defined(OMPI_MAJOR_VERSION) && !defined(_MSC_VER) -#if OMPI_MAJOR_VERSION == 3 || OMPI_MAJOR_VERSION == 4 +#if OMPI_MAJOR_VERSION >= 3 ompi_dlopen("40"); #elif OMPI_MAJOR_VERSION == 2 ompi_dlopen("20"); diff --git a/modules/mpi/utility/hello_world.cpp b/modules/mpi/utility/hello_world.cpp index 6692dac550..91be8669a9 100644 --- a/modules/mpi/utility/hello_world.cpp +++ b/modules/mpi/utility/hello_world.cpp @@ -3,6 +3,17 @@ */ #include + +// We only want the C API, so try to suppress the C++ API +#ifndef MPICH_SKIP_MPICXX +#define MPICH_SKIP_MPICXX +#endif +#ifndef OMPI_SKIP_MPICXX +#define OMPI_SKIP_MPICXX +#endif +#ifndef _MPICC_H +#define _MPICC_H +#endif #include static int numprocs; diff --git a/modules/multi_state/bin/multi_foxs.cpp b/modules/multi_state/bin/multi_foxs.cpp index 246828a503..bdadb59b8d 100644 --- a/modules/multi_state/bin/multi_foxs.cpp +++ b/modules/multi_state/bin/multi_foxs.cpp @@ -180,7 +180,8 @@ int main(int argc, char* argv[]) { ("version", "Show version info and exit.") ("number-of-states,s", po::value(&number_of_states)->default_value(10), "maximal ensemble size") - ("bestK,k", po::value(&best_k)->default_value(1000), "bestK") + ("bestK,k", po::value(&best_k)->default_value(1000), + "limit on the number of multi-state models for each state") ("threshold,t", po::value(&chi_percentage_cluster_thr)->default_value(0.3, "0.3"), "chi value percentage threshold for profile similarity") ("chi_threshold,c", po::value(&chi_thr)->default_value(0.0, "0.0"), diff --git a/modules/multifit/pyext/src/add_fit_rmsd.py b/modules/multifit/pyext/src/add_fit_rmsd.py index e5c0d80ddb..f5b4fbd296 100755 --- a/modules/multifit/pyext/src/add_fit_rmsd.py +++ b/modules/multifit/pyext/src/add_fit_rmsd.py @@ -6,8 +6,9 @@ __doc__ = "Add RMSD to reference to each fitting file." + def parse_args(): - desc = """ + desc = """ Given a set of local fits (e.g. generated by fit_fft), the RMSD between each subunit and a reference orientation is calculated and added to each fitting file, in the final "RMSD to reference" column. (The original fitting file is @@ -37,16 +38,15 @@ def run(asmb_fn, proteomics_fn, mapping_fn, params_fn, dock_trans): alignment_params = IMP.multifit.AlignmentParams(params_fn) align = IMP.multifit.ProteomicsEMAlignmentAtomic( mapping_data, asmb, alignment_params) - ensmb = IMP.multifit.Ensemble(asmb, mapping_data) print("=========6") # load all proteomics restraints mdl = align.get_model() mhs = align.get_molecules() - gs = [] for i, mh in enumerate(mhs): fits_fn = asmb.get_component_header(i).get_transformations_fn() fits = IMP.multifit.read_fitting_solutions(fits_fn) - print("calculating rmsd for", len(fits), "fits of protein", mh.get_name()) + print("calculating rmsd for", len(fits), "fits of protein", + mh.get_name()) xyz = IMP.core.XYZs(IMP.core.get_leaves(mh)) mh_ref = IMP.atom.read_pdb( asmb.get_component_header(i).get_reference_fn(), @@ -71,5 +71,6 @@ def main(): run(args.assembly_file, args.proteomics_file, args.mapping_file, args.param_file, args.use_dock) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/align.py b/modules/multifit/pyext/src/align.py index 4f5c806a2d..f74cb78fdf 100755 --- a/modules/multifit/pyext/src/align.py +++ b/modules/multifit/pyext/src/align.py @@ -9,6 +9,7 @@ # analyse the ensemble, first we will do the rmsd stuff + class progressBar: def __init__(self, minValue=0, maxValue=10, totalWidth=12): @@ -57,7 +58,7 @@ def __str__(self): def parse_args(): - desc = """Align proteomics graph with the EM map.""" + desc = """Align proteomics graph with the EM map.""" p = ArgumentParser(description=desc) p.add_argument("-m", "--max", type=int, dest="max", default=999999999, help="maximum number of fits considered") @@ -139,7 +140,7 @@ def run(asmb_fn, proteomics_fn, mapping_fn, params_fn, mapping_data = IMP.multifit.read_protein_anchors_mapping(prot_data, mapping_fn) print("=========4") - em_anchors = mapping_data.get_anchors() + _ = mapping_data.get_anchors() print("=========5") # load all proteomics restraints align = IMP.multifit.ProteomicsEMAlignmentAtomic(mapping_data, asmb, @@ -172,5 +173,6 @@ def main(): run(args.assembly_file, args.proteomics_file, args.mapping_file, args.param_file, args.combinations_file, args.scores_file, args.max) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/anchors.py b/modules/multifit/pyext/src/anchors.py index 8a8c38f0d1..eeec936e1e 100755 --- a/modules/multifit/pyext/src/anchors.py +++ b/modules/multifit/pyext/src/anchors.py @@ -7,13 +7,15 @@ __doc__ = "Generate anchors for a density map." + def parse_args(): desc = """Generate anchors for a density map.""" p = ArgumentParser(description=desc) p.add_argument("-s", "--size", type=int, dest="size", default=-1, help="number of residues per bead") p.add_argument("assembly_file", help="assembly file name") - p.add_argument("anchor_prefix", help="prefix for output anchors file names") + p.add_argument("anchor_prefix", + help="prefix for output anchors file names") return p.parse_args() @@ -48,5 +50,6 @@ def main(): output + ".pdb", output + ".cmm", "", output + ".txt") + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/cluster.py b/modules/multifit/pyext/src/cluster.py index f18cbc7c15..743082141a 100755 --- a/modules/multifit/pyext/src/cluster.py +++ b/modules/multifit/pyext/src/cluster.py @@ -9,6 +9,7 @@ __doc__ = "Cluster assembly solutions." + def get_uniques(seq): # Not order preserving keys = {} @@ -94,15 +95,17 @@ def __init__(self, asmb_fn, prot_fn, map_fn, align_fn, combs_fn): self.ensmb.set_was_used(True) self.mhs = self.align.get_molecules() for i, mh in enumerate(self.mhs): - self.ensmb.add_component_and_fits(mh, - IMP.multifit.read_fitting_solutions(self.asmb.get_component_header(i).get_transformations_fn())) + trans_fname = \ + self.asmb.get_component_header(i).get_transformations_fn() + self.ensmb.add_component_and_fits( + mh, IMP.multifit.read_fitting_solutions(trans_fname)) # load the density map self.dmap = IMP.em.read_map( self.asmb.get_assembly_header().get_dens_fn()) self.dmap.set_was_used(True) self.dmap.get_header().set_resolution( self.asmb.get_assembly_header().get_resolution()) - threshold = self.asmb.get_assembly_header().get_threshold() + _ = self.asmb.get_assembly_header().get_threshold() self.dmap.update_voxel_size( self.asmb.get_assembly_header().get_spacing()) self.dmap.set_origin(self.asmb.get_assembly_header().get_origin()) @@ -160,16 +163,19 @@ def do_clustering(self, max_comb_ind, max_rmsd): def get_placement_score_from_coordinates( self, model_coords, native_coords): """ - Computes the position error (placement distance) and the orientation error (placement angle) of the coordinates in model_coords respect to the coordinates in native_coords. + Computes the position error (placement distance) and the orientation + error (placement angle) of the coordinates in model_coords with + respect to the coordinates in native_coords. placement distance - translation between the centroids of the - coordinates placement angle - Angle in the axis-angle formulation of the rotation - aligning the two rigid bodies. + coordinates. + placement angle - Angle in the axis-angle formulation of the rotation + aligning the two rigid bodies. """ native_centroid = IMP.algebra.get_centroid(native_coords) model_centroid = IMP.algebra.get_centroid(model_coords) translation_vector = native_centroid - model_centroid distance = translation_vector.get_magnitude() - if(len(model_coords) != len(native_coords)): + if (len(model_coords) != len(native_coords)): raise ValueError( "Mismatch in the number of members %d %d " % ( len(model_coords), @@ -197,18 +203,15 @@ def get_cc(self, ps): bb_union = IMP.algebra.BoundingBox3D(bottom, top) ''' - resolution = self.dmap.get_header().get_resolution() - voxel_size = self.dmap.get_spacing() - map_solution = IMP.em.SampledDensityMap(self.dmap.get_header()) map_solution.set_particles(ps) map_solution.resample() map_solution.set_was_used(True) map_solution.calcRMS() - # base the calculation of the cross_correlation coefficient on the threshold - # for the native map, because the threshold for the map of the model changes - # with each model + # base the calculation of the cross_correlation coefficient on the + # threshold for the native map, because the threshold for the map of + # the model changes with each model # map_solution.get_header().show() threshold = 0.01 # threshold AFTER normalization using calcRMS() ccc = IMP.em.get_coarse_cc_coefficient(map_solution, @@ -259,8 +262,6 @@ def analyze_cluster(self, query_cluster_ind, max_comb_ind): angles.append([]) best_sampled_ind = -1 best_scored_ind = -1 - voxel_size = 3 # check with javi - resolution = 20 # check with javi counter = -1 for elem_ind1, cluster_ind1 in enumerate(self.cluster_inds): if cluster_ind1 != query_cluster_ind: @@ -270,7 +271,8 @@ def analyze_cluster(self, query_cluster_ind, max_comb_ind): rmsds.append( IMP.atom.get_rmsd( list(itertools.chain.from_iterable(mhs_native_ca)), - list(itertools.chain.from_iterable(self.coords[elem_ind1])))) + list(itertools.chain.from_iterable( + self.coords[elem_ind1])))) if best_scored_ind == -1: self.ensmb.load_combination(self.combs[elem_ind1]) best_scored_ind = counter @@ -333,7 +335,7 @@ def analyze_cluster(self, query_cluster_ind, max_comb_ind): def usage(): - desc = """ + desc = """ Clustering of assembly solutions. This program uses the Python 'fastcluster' module, which can be obtained from @@ -364,8 +366,7 @@ def main(): args.mapping_file, args.param_file, args.combinations_file) - clusters = clust_engine.do_clustering(args.max, args.rmsd) - cluster_representatives = [] + _ = clust_engine.do_clustering(args.max, args.rmsd) print("clustering completed") print("start analysis") clust_engine.do_analysis(args.max) @@ -379,13 +380,21 @@ def main(): info = clust_engine.get_cluster_stats(cluster_ind) repr_combs.append( clust_engine.get_cluster_representative_combination(cluster_ind)) - print("==========Cluster index:", info.cluster_ind, "size:", info.cluster_size) + print("==========Cluster index:", info.cluster_ind, "size:", + info.cluster_size) if info.rmsd_calculated: - print("best sampled in cluster (index,cc,distance,angle,rmsd):", info.best_sampled_ind, info.best_sampled_cc, info.best_sampled_distance, info.best_sampled_angle, info.best_sampled_rmsd) + print("best sampled in cluster (index,cc,distance,angle,rmsd):", + info.best_sampled_ind, info.best_sampled_cc, + info.best_sampled_distance, info.best_sampled_angle, + info.best_sampled_rmsd) if info.rmsd_calculated: - print("cluster representative (index,cc,distance,angle,rmsd):", info.best_scored_ind, info.best_scored_cc, info.best_scored_distance, info.best_scored_angle, info.best_scored_rmsd) + print("cluster representative (index,cc,distance,angle,rmsd):", + info.best_scored_ind, info.best_scored_cc, + info.best_scored_distance, info.best_scored_angle, + info.best_scored_rmsd) else: - print("cluster representative (index,cc):", info.best_scored_ind, info.best_scored_cc) + print("cluster representative (index,cc):", info.best_scored_ind, + info.best_scored_cc) if __name__ == "__main__": diff --git a/modules/multifit/pyext/src/cluster_coarse.py b/modules/multifit/pyext/src/cluster_coarse.py index 5d3c94596f..e228d0e6f8 100755 --- a/modules/multifit/pyext/src/cluster_coarse.py +++ b/modules/multifit/pyext/src/cluster_coarse.py @@ -7,6 +7,7 @@ # analyse the ensemble, first we will do the rmsd stuff + def parse_args(): desc = "A script for clustering an ensemble of solutions" p = ArgumentParser(description=desc) @@ -33,12 +34,14 @@ def run(asmb_fn, proteomics_fn, mapping_fn, align_param_fn, # load all proteomics restraints align = IMP.multifit.ProteomicsEMAlignmentAtomic( mapping_data, asmb_data, alignment_params) - mdl = align.get_model() + _ = align.get_model() mhs = align.get_molecules() ensb = IMP.multifit.Ensemble(asmb_data, mapping_data) for i, mh in enumerate(mhs): - ensb.add_component_and_fits(mh, - IMP.multifit.read_fitting_solutions(asmb_data.get_component_header(i).get_transformations_fn())) + ensb.add_component_and_fits( + mh, + IMP.multifit.read_fitting_solutions( + asmb_data.get_component_header(i).get_transformations_fn())) mol_path_centers = [] # save the molecule centers for each path # iterate over the molecules @@ -82,7 +85,8 @@ def run(asmb_fn, proteomics_fn, mapping_fn, align_param_fn, key=operator.itemgetter(0), reverse=True) cluster_reps = [] - for ind, [cluster_size, cluster_ind, cluster_elems] in enumerate(cluster_stat): + for ind, [cluster_size, cluster_ind, + cluster_elems] in enumerate(cluster_stat): print("cluster index:", ind, "with", cluster_size, "combinations") cluster_reps.append(combs[cluster_elems[0]]) print("============clustering============") @@ -90,6 +94,7 @@ def run(asmb_fn, proteomics_fn, mapping_fn, align_param_fn, print("==================================") IMP.multifit.write_paths(cluster_reps, output_comb_fn) + if __name__ == "__main__": args = parse_args() run(args.assembly_file, args.proteomics_file, args.mapping_file, diff --git a/modules/multifit/pyext/src/fit_fft.py b/modules/multifit/pyext/src/fit_fft.py index 63237432f1..1164480c95 100755 --- a/modules/multifit/pyext/src/fit_fft.py +++ b/modules/multifit/pyext/src/fit_fft.py @@ -13,7 +13,7 @@ multiproc_exception = None try: - from multiprocessing import Pool + import multiprocessing # Detect whether we are running Windows Python via Wine. Wine does not # currently support some named pipe functions which the multiprocessing # module needs: http://bugs.winehq.org/show_bug.cgi?id=17273 @@ -23,6 +23,19 @@ multiproc_exception = str(detail) +def _get_context(): + if hasattr(multiprocessing, 'get_context'): + # Use 'forkserver' rather than 'fork' start method if we can; + # 'fork' does not work well with multithreaded processes or CUDA + if 'forkserver' in multiprocessing.get_all_start_methods(): + return multiprocessing.get_context('forkserver') + else: + return multiprocessing.get_context() + else: + # For Python < 3.4, just use the original module + return multiprocessing + + class Fitter(object): def __init__( @@ -65,7 +78,7 @@ def run(self): mdl = IMP.Model() mol2fit = IMP.atom.read_pdb(self.pdb, mdl) mh_xyz = IMP.core.XYZs(IMP.core.get_leaves(mol2fit)) - rb = IMP.atom.create_rigid_body(mol2fit) + _ = IMP.atom.create_rigid_body(mol2fit) ff = IMP.multifit.FFTFitting() ff.set_was_used(True) fits = ff.do_global_fitting(dmap, self.threshold, mol2fit, @@ -161,13 +174,16 @@ def run(asmb_fn, options): if multiproc_exception is None and options.cpus > 1: # No point in spawning more processes than components nproc = min(options.cpus, asmb_input.get_number_of_component_headers()) - p = Pool(processes=nproc) - out = list(p.imap_unordered(do_work, work_units)) + ctx = _get_context() + p = ctx.Pool(processes=nproc) + _ = list(p.imap_unordered(do_work, work_units)) + p.close() def main(): args = parse_args() run(args.assembly_file, args) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/indexes.py b/modules/multifit/pyext/src/indexes.py index c606277311..37fee7095a 100755 --- a/modules/multifit/pyext/src/indexes.py +++ b/modules/multifit/pyext/src/indexes.py @@ -6,6 +6,7 @@ __doc__ = "Generate indexes of fitting solutions." + def parse_args(): desc = """Generate indexes of fitting solutions.""" p = ArgumentParser(description=desc) @@ -35,8 +36,7 @@ def run(assembly_name, asmb_fn, num_fits, mapping_fn=""): for i in range(min(num_fits, len(fits))): indexes.append([i]) IMP.multifit.write_paths(indexes, index_fn) - mapping_data.write("|protein|" + name + - "|" + index_fn + "|\n") + mapping_data.write("|protein|" + name + "|" + index_fn + "|\n") mapping_data.close() @@ -45,5 +45,6 @@ def main(): run(args.assembly_name, args.assembly_file, args.num_fits, args.indexes_file) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/merge_tree.py b/modules/multifit/pyext/src/merge_tree.py index 6b63488e3c..d9e28ad5d4 100755 --- a/modules/multifit/pyext/src/merge_tree.py +++ b/modules/multifit/pyext/src/merge_tree.py @@ -6,8 +6,9 @@ __doc__ = "Show the DOMINO merge tree to be used in alignment." + def parse_args(): - desc = """ + desc = """ Show the DOMINO merge tree to be used in the alignment procedure """ p = ArgumentParser(description=desc) @@ -36,7 +37,7 @@ def run(asmb_fn, proteomics_fn, mapping_fn, params_fn): mapping_data = IMP.multifit.read_protein_anchors_mapping(prot_data, mapping_fn) - em_anchors = mapping_data.get_anchors() + _ = mapping_data.get_anchors() # load all proteomics restraints align = IMP.multifit.ProteomicsEMAlignmentAtomic(mapping_data, asmb, @@ -55,5 +56,6 @@ def main(): run(args.assembly_file, args.proteomics_file, args.mapping_file, args.param_file) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/models.py b/modules/multifit/pyext/src/models.py index 6f2827d626..1164e88891 100755 --- a/modules/multifit/pyext/src/models.py +++ b/modules/multifit/pyext/src/models.py @@ -8,6 +8,7 @@ # analyse the ensemble, first we will do the rmsd stuff + def parse_args(): desc = """Write output models.""" p = ArgumentParser(description=desc) @@ -47,5 +48,6 @@ def main(): run(args.assembly_file, args.proteomics_file, args.mapping_file, args.combinations_file, args.model_prefix, args.max) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/param.py b/modules/multifit/pyext/src/param.py index 00a7099d60..6e2f2bea01 100755 --- a/modules/multifit/pyext/src/param.py +++ b/modules/multifit/pyext/src/param.py @@ -61,8 +61,9 @@ def get_density_data(name, density_fn, resolution, spacing, threshold, sd = IMP.multifit.SettingsData() sd.set_was_used(True) msg = sd.get_density_header_line() - msg += density_fn + "|" + str(resolution) + "|" + str(spacing) + "|" + str( - threshold) + "|" + str(origin[0]) + "|" + str(origin[1]) + "|" + str(origin[2]) + msg += (density_fn + "|" + str(resolution) + "|" + str(spacing) + "|" + + str(threshold) + "|" + str(origin[0]) + "|" + str(origin[1]) + + "|" + str(origin[2])) msg += "|" + anchor_dir_name + name + "_em_coarse_anchors.txt|" + \ anchor_dir_name + name + "_em_coarse_anchors_FINE.txt|" msg += anchor_dir_name + name + "_em_fine_anchors.txt|" + \ @@ -88,8 +89,8 @@ def get_protein_data( surface_fn = fnn[:-1].split()[1] + ".ms" # TODO - add the number of copies data mh = IMP.atom.read_pdb(fn, mdl) - num_anchors = len(IMP.atom.get_by_type(mh, - IMP.atom.RESIDUE_TYPE)) // coarse_level + num_anchors = len(IMP.atom.get_by_type( + mh, IMP.atom.RESIDUE_TYPE)) // coarse_level msg += name + "|" + fn + "|" + surface_fn + "|" + \ anchor_dir_name + name + "_anchors.txt|" + \ str(num_anchors) + "|" diff --git a/modules/multifit/pyext/src/proteomics.py b/modules/multifit/pyext/src/proteomics.py index 04eff3729a..b260e70e6f 100755 --- a/modules/multifit/pyext/src/proteomics.py +++ b/modules/multifit/pyext/src/proteomics.py @@ -76,5 +76,6 @@ def main(): args = parse_args() run(args.assembly_file, args.anchors_file, args.proteomics_file) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/reference.py b/modules/multifit/pyext/src/reference.py index 0c001ffaa9..abc5948a9d 100755 --- a/modules/multifit/pyext/src/reference.py +++ b/modules/multifit/pyext/src/reference.py @@ -8,6 +8,7 @@ # analyse the ensemble, first we will do the rmsd stuff + def get_placement_scores_from_coordinates(model_components_coords, native_components_coords): """ @@ -42,7 +43,7 @@ def get_placement_score_from_coordinates(model_coords, native_coords): model_centroid = IMP.algebra.get_centroid(model_coords) translation_vector = native_centroid - model_centroid distance = translation_vector.get_magnitude() - if(len(model_coords) != len(native_coords)): + if (len(model_coords) != len(native_coords)): raise ValueError( "Mismatch in the number of members %d %d " % ( len(model_coords), @@ -55,8 +56,8 @@ def get_placement_score_from_coordinates(model_coords, native_coords): def get_rmsd(hierarchy1, hierarchy2): - xyz1 = [IMP.core.XYZ(l) for l in IMP.atom.get_leaves(hierarchy1)] - xyz2 = [IMP.core.XYZ(l) for l in IMP.atom.get_leaves(hierarchy2)] + xyz1 = [IMP.core.XYZ(leaf) for leaf in IMP.atom.get_leaves(hierarchy1)] + xyz2 = [IMP.core.XYZ(leaf) for leaf in IMP.atom.get_leaves(hierarchy2)] return IMP.atom.get_rmsd(xyz1, xyz2) @@ -73,17 +74,17 @@ def get_components_placement_scores(assembly, native_assembly, align=False): placement distances of the children. The second list contains the placement angles """ - model_coords_per_child = [get_coordinates(c) + model_coords_per_child = [get_coordinates(c) # noqa: F821 for c in assembly.get_children()] - native_coords_per_child = [get_coordinates(c) + native_coords_per_child = [get_coordinates(c) # noqa: F821 for c in native_assembly.get_children()] if align: model_coords = [] - nil = [model_coords.extend(x) for x in model_coords_per_child] + _ = [model_coords.extend(x) for x in model_coords_per_child] native_coords = [] - nil = [native_coords.extend(x) for x in native_coords_per_child] - T = alg.get_transformation_aligning_first_to_second(model_coords, - native_coords) + _ = [native_coords.extend(x) for x in native_coords_per_child] + T = IMP.algebra.get_transformation_aligning_first_to_second( + model_coords, native_coords) # get aligned coordinates new_model_coords_per_child = [] for c in model_coords_per_child: @@ -163,5 +164,6 @@ def main(): return run(args.assembly_file, args.proteomics_file, args.mapping_file, args.combinations_file, args.max) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/refine_fft.py b/modules/multifit/pyext/src/refine_fft.py index a4c470594e..f206321b35 100755 --- a/modules/multifit/pyext/src/refine_fft.py +++ b/modules/multifit/pyext/src/refine_fft.py @@ -6,11 +6,10 @@ import IMP.atom import IMP.em from IMP import ArgumentParser -import os -import sys __doc__ = "Refine fitting subunits into a density map with FFT." + class Fitter(object): def __init__( @@ -66,14 +65,15 @@ def run_local_fitting(self, mol2fit, rb, initial_transformation): # fits = ff.do_local_fitting(dmap, self.threshold, mol2fit, self.angle / 180.0 * math.pi, - self.max_angle / 180.0 * - math.pi, self.max_trans, num_fits_to_report, + self.max_angle / 180.0 * math.pi, + self.max_trans, num_fits_to_report, do_cluster_fits, self.angles_per_voxel, - max_clustering_translation, max_clustering_rotation) + max_clustering_translation, + max_clustering_rotation) fits.set_was_used(True) final_fits = fits.best_fits_ if self.ref_pdb != '': - ref_mh = IMP.atom.read_pdb(self.ref_pdb, mdl) + ref_mh = IMP.atom.read_pdb(self.ref_pdb, mdl) # noqa: F821 ref_mh_xyz = IMP.core.XYZs(IMP.core.get_leaves(ref_mh)) cur_low = [1e4, 0] for i, fit in enumerate(final_fits): @@ -155,7 +155,7 @@ def run( ensmb_ref = IMP.multifit.load_ensemble(asmb_input, mdl2, mapping_data) ensmb_ref.set_was_used(True) - mhs_ref = ensmb_ref.get_molecules() + _ = ensmb_ref.get_molecules() ensmb.load_combination(combs[comb_ind]) @@ -175,17 +175,16 @@ def run( rb_ref = rbs_ref[i] rb = rbs[i] - initial_transformation = IMP.algebra.get_transformation_from_first_to_second( - rb_ref.get_reference_frame(), - rb.get_reference_frame()) + initial_transformation = \ + IMP.algebra.get_transformation_from_first_to_second( + rb_ref.get_reference_frame(), rb.get_reference_frame()) pdb_fn = asmb_input.get_component_header(i).get_filename() - f = Fitter( - em_map, spacing, resolution, origin, asmb_input.get_assembly_header( - ).get_threshold( - ), pdb_fn, fits_fn, options.angle, options.num, options.angle_voxel, - options.max_trans, options.max_angle) + f = Fitter(em_map, spacing, resolution, origin, + asmb_input.get_assembly_header().get_threshold(), pdb_fn, + fits_fn, options.angle, options.num, options.angle_voxel, + options.max_trans, options.max_angle) f.run_local_fitting(mh, rb, initial_transformation) @@ -195,5 +194,6 @@ def main(): args.mapping_file, args.combinations_file, args.combination_index, args) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/score.py b/modules/multifit/pyext/src/score.py index eb9e41be2f..9b53999b1a 100755 --- a/modules/multifit/pyext/src/score.py +++ b/modules/multifit/pyext/src/score.py @@ -8,6 +8,7 @@ # analyse the ensemble, first we will do the rmsd stuff + def get_color_map(): colors = {} colors["Rpt1"] = [0.78, 0.78, 0.73] @@ -88,7 +89,7 @@ def decompose(dmap, mhs): def score_each_protein(dmap, mhs, sd): norm_factors = decompose(dmap, mhs) scores = [] - mdl = mhs[0].get_model() + # mdl = mhs[0].get_model() for i in range(len(mhs)): leaves = IMP.core.get_leaves(mhs[i]) rb = IMP.core.RigidMember(leaves[0]).get_rigid_body() @@ -116,12 +117,8 @@ def score_each_protein(dmap, mhs, sd): def usage(): - usage = """%prog [options] - - -Score each of a set of combinations. -""" - p = ArgumentParser(usage) + desc = "Score each of a set of combinations." + p = ArgumentParser(description=desc) p.add_argument("-m", "--max", dest="max", type=int, default=999999999, help="maximum number of fits considered") p.add_argument("assembly_file", help="assembly file name") @@ -144,8 +141,8 @@ def run(asmb_fn, proteomics_fn, mapping_fn, params_fn, combs_fn, threshold = asmb.get_assembly_header().get_threshold() combs = IMP.multifit.read_paths(combs_fn) # get rmsd for subunits - colors = get_color_map() - names = list(colors.keys()) + # colors = get_color_map() + # names = list(colors.keys()) print(params_fn) alignment_params = IMP.multifit.AlignmentParams(params_fn) alignment_params.show() @@ -161,7 +158,7 @@ def run(asmb_fn, proteomics_fn, mapping_fn, params_fn, combs_fn, mapping_data = IMP.multifit.read_protein_anchors_mapping( prot_data, mapping_fn) print("=========4") - em_anchors = mapping_data.get_anchors() + _ = mapping_data.get_anchors() print("=========5") ensmb = IMP.multifit.Ensemble(asmb, mapping_data) print("=========6") @@ -173,19 +170,20 @@ def run(asmb_fn, proteomics_fn, mapping_fn, params_fn, combs_fn, mdl = align.get_model() mhs = align.get_molecules() align.add_states_and_filters() - rbs = align.get_rigid_bodies() - print(IMP.core.RigidMember(IMP.core.get_leaves(mhs[0])[0]).get_rigid_body()) + # rbs = align.get_rigid_bodies() + print(IMP.core.RigidMember( + IMP.core.get_leaves(mhs[0])[0]).get_rigid_body()) align.set_density_map(dmap, threshold) - gs = [] for i, mh in enumerate(mhs): - ensmb.add_component_and_fits(mh, - IMP.multifit.read_fitting_solutions(asmb.get_component_header(i).get_transformations_fn())) + ensmb.add_component_and_fits( + mh, IMP.multifit.read_fitting_solutions( + asmb.get_component_header(i).get_transformations_fn())) + ''' try: rgb = colors[mh.get_name()] except: rgb = colors[names[i]] color = IMP.display.Color(rgb[0], rgb[1], rgb[2]) - ''' for p in IMP.core.get_leaves(mh): g= IMP.display.XYZRGeometry(p) g.set_color(color) @@ -277,5 +275,6 @@ def main(): run(args.assembly_file, args.proteomics_file, args.mapping_file, args.param_file, args.combinations_file, args.scores_file, args.max) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/segment.py b/modules/multifit/pyext/src/segment.py index 1d3e589ffe..85d3c98dc3 100755 --- a/modules/multifit/pyext/src/segment.py +++ b/modules/multifit/pyext/src/segment.py @@ -62,5 +62,6 @@ def main(): args.num_cluster, args.output, args.cmm, args.seg, args.txt) + if __name__ == "__main__": main() diff --git a/modules/multifit/pyext/src/transforms.py b/modules/multifit/pyext/src/transforms.py index da5a14beea..027d74e940 100755 --- a/modules/multifit/pyext/src/transforms.py +++ b/modules/multifit/pyext/src/transforms.py @@ -7,6 +7,7 @@ __doc__ = "Write assembly transformation file in other formats." + class Formatter(object): def __init__(self, fh): @@ -17,10 +18,10 @@ def write_header(self, settings): class ChimeraFormatter(Formatter): - __doc__ = \ -"""Each line in 'chimera' format lists the transformation index, the -cross correlation score, and then the transformation for each component, -as a rotation matrix (row-major order) and a translation in angstroms.""" + """Each line in 'chimera' format lists the transformation index, the + cross correlation score, and then the transformation for each component, + as a rotation matrix (row-major order) and a translation in angstroms. + """ def write_line(self, ind, score, transforms): self.fh.write(str(ind) + "\t" + score + "\t") @@ -36,10 +37,9 @@ def write_line(self, ind, score, transforms): class DockRefFormatter(Formatter): - __doc__ = \ -"""Each line in 'dockref' format lists the transformation for each component, -as a set of three Euler angles (in radians about the fixed x, y and z axes) -and a translation in angstroms.""" + """Each line in 'dockref' format lists the transformation for each + component, as a set of three Euler angles (in radians about the + fixed x, y and z axes) and a translation in angstroms.""" def write_header(self, sd): # write the name of the proteins @@ -70,10 +70,10 @@ def parse_args(): p = ArgumentParser(description=desc) p.add_argument("-f", "--format", default='chimera', - choices=list(formatters.keys()), - help="type of output to generate (" - + ", ".join(formatters.keys()) - + "; default: chimera)") + choices=list(formatters.keys()), + help="type of output to generate (" + + ", ".join(formatters.keys()) + + "; default: chimera)") p.add_argument("assembly_file", help="assembly file name") p.add_argument("combinations_file", help="combinations file name") p.add_argument("output_file", help="output file name") @@ -103,5 +103,6 @@ def main(): fmt = formatters[args.format](open(args.output_file, 'w')) run(args.assembly_file, args.combinations_file, fmt) + if __name__ == "__main__": main() diff --git a/modules/nestor b/modules/nestor new file mode 160000 index 0000000000..a83cce0209 --- /dev/null +++ b/modules/nestor @@ -0,0 +1 @@ +Subproject commit a83cce02097a32f3f67c54d897a3635f9a347afc diff --git a/modules/npc/include/ProteinLocalizationRestraint.h b/modules/npc/include/ProteinLocalizationRestraint.h index 1e2e6d711c..9c9f469d7f 100644 --- a/modules/npc/include/ProteinLocalizationRestraint.h +++ b/modules/npc/include/ProteinLocalizationRestraint.h @@ -798,6 +798,187 @@ class IMPNPCEXPORT ProteinProximityRestraint IMP_OBJECT_METHODS(ProteinProximityRestraint);; }; +//! Restrain particles by their x coordinate +/** Each particle's x coordinate is harmonically restrained to lie between + the given lower and upper bounds. + */ +class IMPNPCEXPORT XAxialPositionRestraint : public Restraint +{ + IMP::PointerMember sc_; + double lower_bound_; + double upper_bound_; + double sigma_; + bool consider_radius_; + friend class cereal::access; + template void serialize(Archive &ar) { + ar(cereal::base_class(this), + sc_, lower_bound_, upper_bound_, sigma_, consider_radius_); + } + IMP_OBJECT_SERIALIZE_DECL(XAxialPositionRestraint); + +public: + XAxialPositionRestraint(Model *m, SingletonContainerAdaptor sc, + double lower_bound, double upper_bound, bool consider_radius, double sigma=1); + XAxialPositionRestraint(Model *m, + double lower_bound, double upper_bound, bool consider_radius, double sigma=1); + XAxialPositionRestraint() {} + +#ifndef IMP_DOXYGEN + void add_particle(Particle *p); + void add_particles(const ParticlesTemp &ps); + void set_particles(const ParticlesTemp &ps); +#endif + + double unprotected_evaluate( + IMP::DerivativeAccumulator *accum) const override; + ModelObjectsTemp do_get_inputs() const override; + + //! \return Information for writing to RMF files + RestraintInfo *get_static_info() const override; + + IMP_OBJECT_METHODS(XAxialPositionRestraint);; +}; + +//! Restrain particles by their x coordinate +/** Each particle's x coordinate is harmonically restrained to lie above + the given lower bound. + */ +class IMPNPCEXPORT XAxialPositionLowerRestraint : public Restraint +{ + IMP::PointerMember sc_; + double lower_bound_; + double sigma_; + bool consider_radius_; + + friend class cereal::access; + template void serialize(Archive &ar) { + ar(cereal::base_class(this), + sc_, lower_bound_, sigma_, consider_radius_); + } + IMP_OBJECT_SERIALIZE_DECL(XAxialPositionLowerRestraint); + +public: + XAxialPositionLowerRestraint(Model *m, SingletonContainerAdaptor sc, + double lower_bound, bool consider_radius, double sigma=1); + XAxialPositionLowerRestraint(Model *m, + double lower_bound, bool consider_radius, double sigma=1); + XAxialPositionLowerRestraint() {} + +#ifndef IMP_DOXYGEN + void add_particle(Particle *p); + void add_particles(const ParticlesTemp &ps); + void set_particles(const ParticlesTemp &ps); +#endif + + double unprotected_evaluate( + IMP::DerivativeAccumulator *accum) const override; + ModelObjectsTemp do_get_inputs() const override; + + //! \return Information for writing to RMF files + RestraintInfo *get_static_info() const override; + + IMP_OBJECT_METHODS(XAxialPositionLowerRestraint);; +}; + +//! Restrain particles by their x coordinate +/** Each particle's x coordinate is harmonically restrained to lie below + the given upper bound. + */ +class IMPNPCEXPORT XAxialPositionUpperRestraint : public Restraint +{ + IMP::PointerMember sc_; + double upper_bound_; + double sigma_; + bool consider_radius_; + + friend class cereal::access; + template void serialize(Archive &ar) { + ar(cereal::base_class(this), + sc_, upper_bound_, sigma_, consider_radius_); + } + IMP_OBJECT_SERIALIZE_DECL(XAxialPositionUpperRestraint); + +public: + XAxialPositionUpperRestraint(Model *m, SingletonContainerAdaptor sc, + double upper_bound, bool consider_radius, double sigma=1); + XAxialPositionUpperRestraint(Model *m, + double upper_bound, bool consider_radius, double sigma=1); + XAxialPositionUpperRestraint() {} + + + + +#ifndef IMP_DOXYGEN + void add_particle(Particle *p); + void add_particles(const ParticlesTemp &ps); + void set_particles(const ParticlesTemp &ps); +#endif + + double unprotected_evaluate( + IMP::DerivativeAccumulator *accum) const override; + ModelObjectsTemp do_get_inputs() const override; + + //! \return Information for writing to RMF files + RestraintInfo *get_static_info() const override; + + IMP_OBJECT_METHODS(XAxialPositionUpperRestraint);; +}; + +//! Restrain particle to a specific position +/** All distances are in Angstrom +While SphereDistanceToSingletonScore creates a score based on a UnaryFunction object, +OverallPositionRestraint assumes a harmonic restraint. +\param[in] x_start x position to restrain to +\param[in] y_start y position to restrain to +\param[in] z_start z position to restrain to +\param[in] tolerance range of distances where restraint=0 +\param[in] consider_radius bool, consider the radius of the particle +\param[in] sigma inverse strength of harmonic potential + */ +class IMPNPCEXPORT OverallPositionRestraint : public Restraint +{ + IMP::PointerMember sc_; + double x_start_; + double y_start_; + double z_start_; + double tolerance_; + double sigma_; + bool consider_radius_; + + friend class cereal::access; + template void serialize(Archive &ar) { + ar(cereal::base_class(this), + sc_, x_start_, y_start_, z_start_, tolerance_, sigma_, consider_radius_); + } + IMP_OBJECT_SERIALIZE_DECL(OverallPositionRestraint); + +public: + + OverallPositionRestraint(Model *m, + SingletonContainerAdaptor sc, + double x_start, double y_start, double z_start, double tolerance, bool consider_radius, double sigma=1); + OverallPositionRestraint(Model *m, + double x_start, double y_start, double z_start, double tolerance, bool consider_radius, double sigma=1); + OverallPositionRestraint() {} + + +#ifndef IMP_DOXYGEN + void add_particle(Particle *p); + void add_particles(const ParticlesTemp &ps); + void set_particles(const ParticlesTemp &ps); +#endif + + double unprotected_evaluate( + IMP::DerivativeAccumulator *accum) const override; + ModelObjectsTemp do_get_inputs() const override; + + //! \return Information for writing to RMF files + RestraintInfo *get_static_info() const override; + + IMP_OBJECT_METHODS(OverallPositionRestraint);; +}; + + IMPNPC_END_NAMESPACE diff --git a/modules/npc/include/SlabWithPore.h b/modules/npc/include/SlabWithPore.h new file mode 100644 index 0000000000..6f8dcbb7a9 --- /dev/null +++ b/modules/npc/include/SlabWithPore.h @@ -0,0 +1,127 @@ +/** + * \file IMP/npc/SlabWithPore.h + * \brief A decorator for a particle that's a slab with a pore. + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#ifndef IMPNPC_SLAB_WITH_PORE_H +#define IMPNPC_SLAB_WITH_PORE_H + +#include "npc_config.h" +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +//! A decorator for a particle that represents a slab containing +//! a cylindrical pore +/** \ingroup helper + \ingroup decorators + */ +class IMPNPCEXPORT SlabWithPore +: public IMP::Decorator +{ + /** Decorate a particle that represents a slab (e.g. nuclear + envelope) with specified thickness and a cylindrical pore of + specified radius. Note that the radius is controlled by set_radius() + as any other XYZR particle, but the XYZ coordinates are ignored for now + (assumed to be 0,0,0). + + The slab is parallel to the x,y plane from z=-0.5*thickness to + z=0.5*thickness, and the central axis of the pore lies on the + origin. + + @param m the model + @param pi the particle index + @param thickness slab thickness + @param pore_radius pore radius + + @note the pore radius is initially not optimizable + */ + static void do_setup_particle(IMP::Model* m, + ParticleIndex pi, + double thickness, + double pore_radius); + + + public: + IMP_DECORATOR_METHODS(SlabWithPore, Decorator); + + + /** Decorate a particle that represents a slab (e.g. nuclear + envelope) with specified thickness and a cylindrical pore of + specified pore_radius. + + The slab is parallel to the x,y plane from z=-0.5*thickness to + z=0.5*thickness, and the central axis of the pore lies on the + origin. + + @param m the model + @param pi the particle index + @param thickness slab thickness + @param pore_radius pore radius + + @note the pore radius is initially not optimizable + */ + IMP_DECORATOR_SETUP_2(SlabWithPore, + double, thickness, + double, pore_radius); + + //! Return true if the particle is an instance of SlabWithPore + static bool get_is_setup(Model *m, ParticleIndex pi) { + return m->get_has_attribute(get_pore_radius_key(), pi) && + m->get_has_attribute(get_thickness_key(), pi); + } + + //! sets slab thickness + void set_thickness(double thickness) { + get_particle()->set_value(get_thickness_key(), + thickness); + } + + //! returns whether the particle last entered the transport moiety from its + //!top + Float get_thickness() const { + return get_particle()->get_value(get_thickness_key()); + } + + //! get cylindrical pore radius + Float get_pore_radius() const { + return get_particle()->get_value(get_pore_radius_key()); + } + + //! set cylindrical pore radius + void set_pore_radius(double r) const { + get_particle()->set_value(get_pore_radius_key(), r); + } + + //! add v to the derivative of the cylindrical pore radius, + //! using derivative accumulator d + void add_to_pore_radius_derivative(double v, DerivativeAccumulator &d) { + get_particle()->add_to_derivative(get_pore_radius_key(), v, d); + } + + bool get_pore_radius_is_optimized() const { + return get_particle()->get_is_optimized(get_pore_radius_key()); + } + //! Set whether the pore radius is optimized + void set_pore_radius_is_optimized(bool tf) const { + get_particle()->set_is_optimized(get_pore_radius_key(), tf); + } + + //! Get the key for the pore thickness. + static FloatKey get_thickness_key(); + + //! Get the key for the pore radius. + static FloatKey get_pore_radius_key(); +}; + + + +IMP_DECORATORS(SlabWithPore, SlabsWithPores, IMP::Decorators); + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SLAB_WITH_PORE_H */ diff --git a/modules/npc/include/SlabWithSphericalIndent.h b/modules/npc/include/SlabWithSphericalIndent.h new file mode 100644 index 0000000000..8482df6d07 --- /dev/null +++ b/modules/npc/include/SlabWithSphericalIndent.h @@ -0,0 +1,103 @@ +/** + * \file IMP/npc/SlabWithSphericalIndent.h + * \brief A decorator for a particle representing a slab with a spherical cap indent. + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#ifndef IMPNPC_SLAB_WITH_SPHERICAL_INDENT_H +#define IMPNPC_SLAB_WITH_SPHERICAL_INDENT_H + +#include +#include +#include +#include +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +//! A decorator for a particle that represents a slab containing a spherical cap indent. +/** \ingroup helper + \ingroup decorators +*/ +class IMPNPCEXPORT SlabWithSphericalIndent : public Decorator { + /* Decorate a particle that represents a slab with an indent + * that is modeled as a spherical cap. The spherical cap model is + * specified by two parameters, a radius, R, that is the radius of the + * underlying sphere and a height, h, that is the distance along the + * perpendicular from the intersecting plane to the edge of the sphere. + */ + static StringKey get_name_key(); + //! Returns the name of the particle, which is "SlabWithSphericalIndent" by default + /** The create function should take arguments which allow + the initial state of the Decorator to be reasonable (i.e. + make sure there is a non-empty name). + */ + static void do_setup_particle(Model *m, + ParticleIndex pi, + double R, + double h); + + public: + //! return true if the particle has a R and h defined + static bool get_is_setup(Model *m, ParticleIndex pi) { + return m->get_has_attribute(get_sphere_radius_key(), pi) && m->get_has_attribute(get_sphere_depth_key(), pi); + } + + // set radius + void set_sphere_radius(double R) { + get_particle()->set_value(get_sphere_radius_key(), R); + } + + // set sphere depth + void set_sphere_depth(double h) { + get_particle()->set_value(get_sphere_depth_key(), h); + } + + //! return the sphere radius + Float get_sphere_radius() const { + return get_particle()->get_value(get_sphere_radius_key()); + } + + //! return the sphere depth + Float get_sphere_depth() const { + return get_particle()->get_value(get_sphere_depth_key()); + } + + //Float get_base_circle_radius() const { + //return get_particle()->get_value(get_base_circle_radius()); + //} + + //! get the decorator key for sphere radius + static FloatKey get_sphere_radius_key(); + + //! get the decorator key for the sphere depth + static FloatKey get_sphere_depth_key(); + + + //! Get the name added to the particle (Ensures decorator_name to be the same as the name_key) + std::string get_decorator_name() const { + return get_particle()->get_value(get_name_key()); + } + + //! Set the name added to the particle + void set_decorator_name(std::string nm) { + // use the usage check macro to check that functions are called properly + IMP_USAGE_CHECK(!nm.empty(), "The name cannot be empty"); + get_particle()->set_value(get_name_key(), nm); + } + + /* Declare the basic constructors and the cast function.*/ + IMP_DECORATOR_METHODS(SlabWithSphericalIndent, Decorator); + IMP_DECORATOR_SETUP_2(SlabWithSphericalIndent, double, R, double, h); + + +}; // end class + +IMP_DECORATORS(SlabWithSphericalIndent, SlabWithSphericalIndents, IMP::Decorators); + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SLAB_WITH_SPHERICAL_INDENT_H */ diff --git a/modules/npc/include/SlabWithSphericalIndentGeometry.h b/modules/npc/include/SlabWithSphericalIndentGeometry.h new file mode 100644 index 0000000000..2941fbf149 --- /dev/null +++ b/modules/npc/include/SlabWithSphericalIndentGeometry.h @@ -0,0 +1,35 @@ +/** + * \file IMP/npc/SlabWithSphericalIndentGeometry.h + * \brief A geometry for displaying a wireframe model of the surface + * with a spherical cap indent. + * + * Copyright 2007-2024 IMP Inventors. All rights reserved. + * + * */ + +#ifndef IMPNPC_SLAB_WITH_SPHERICAL_INDENT_GEOMETRY_H +#define IMPNPC_SLAB_WITH_SPHERICAL_INDENT_GEOMETRY_H + +#include "npc_config.h" +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +//! Geometry for displaying a wireframe model of the surface with a +//! spherical cap indent. +class IMPNPCEXPORT SlabWithSphericalIndentGeometry : public display::Geometry { + double radius_, depth_, length_; + +public: + SlabWithSphericalIndentGeometry(double radius, double depth, double length); + + virtual IMP::display::Geometries get_components() const override; + + IMP_OBJECT_METHODS(SlabWithSphericalIndentGeometry); + +}; + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SLAB_WITH_SPHERICAL_INDENT_GEOMETRY_H */ diff --git a/modules/npc/include/SlabWithSphericalIndentMBMScore.h b/modules/npc/include/SlabWithSphericalIndentMBMScore.h new file mode 100644 index 0000000000..16912192fb --- /dev/null +++ b/modules/npc/include/SlabWithSphericalIndentMBMScore.h @@ -0,0 +1,57 @@ +/** + * \file IMP/npc/SlabWithSphericalIndentMBMScore.h + * \brief A Score on the distance between a particle and the surface of a spherical indent in a plane. + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + */ + +#ifndef IMPNPC_SLAB_WITH_SPHERICAL_INDENT_MBM_SCORE_H +#define IMPNPC_SLAB_WITH_SPHERICAL_INDENT_MBM_SCORE_H + +#include "npc_config.h" +#include "SlabWithSphericalIndent.h" +#include +#include +#include +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +//! Function to apply a harmonic restraint between a SlabWithSphericalIndent object and +//! the membrane binding motifs (MBM) of a protein +/** The source code is as follows: + \include SlabWithSphericalIndentMBMScore.h + \include SlabWithSphericalIndentMBMScore.cpp +*/ +class IMPNPCEXPORT SlabWithSphericalIndentMBMScore : public PairScore { +double x0_, k_; + +//private: +//inline double get_surface_distance(algebra::Vector3D* out_translation) const; + +public: +//! Constructs a horizontal slab with a toroidal pore, +//! centered at the z=0 plane +/** + Constructs a score over a horizontal slab with a spherical indent + + @param k the slab repulsive force constant in kcal/mol/A +*/ +SlabWithSphericalIndentMBMScore(double x0, double k); +virtual double evaluate_index(Model *m, + const ParticleIndexPair &p, + DerivativeAccumulator *da) const override; + +virtual ModelObjectsTemp do_get_inputs( + Model *m, const ParticleIndexes &pis) const override; + +IMP_PAIR_SCORE_METHODS(SlabWithSphericalIndentMBMScore); +IMP_OBJECT_METHODS(SlabWithSphericalIndentMBMScore); +}; + +IMP_OBJECTS(SlabWithSphericalIndentMBMScore, SlabWithSphericalIndentMBMScores); + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SLAB_WITH_SPHERICAL_INDENT_MBM_SCORE_H */ diff --git a/modules/npc/include/SlabWithToroidalPore.h b/modules/npc/include/SlabWithToroidalPore.h new file mode 100644 index 0000000000..e64d40bc94 --- /dev/null +++ b/modules/npc/include/SlabWithToroidalPore.h @@ -0,0 +1,144 @@ +/** + * \file IMP/npc/SlabWithToroidalPore.h + * \brief A decorator for a particle that's a slab with a toroidal pore. + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#ifndef IMPNPC_SLAB_WITH_TOROIDAL_PORE_H +#define IMPNPC_SLAB_WITH_TOROIDAL_PORE_H + +#include "npc_config.h" +#include "SlabWithPore.h" +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +//! A decorator for a particle that represents a slab containing +//! a toroidal pore +/** \ingroup helper + \ingroup decorators + */ +class IMPNPCEXPORT SlabWithToroidalPore +: public SlabWithPore +{ + /** Decorate a particle that represents a slab (e.g. nuclear + envelope) with specified thickness and a toroidal pore of + specified major radius and thickness/2.0 minor radius. + Note that the radius is controlled by set_pore_radius() + as any other XYZR particle, but the XYZ coordinates are ignored for now + (assumed to be 0,0,0). + + The slab is parallel to the x,y plain from z=-0.5*thickness to + z=0.5*thickness, and the central axis of the pore lies on the + origin. + + @param m the model + @param pi the particle index + @param thickness slab thickness, also twice the minor_radius + @param major_radius pore major radius + */ + static void do_setup_particle(IMP::Model* m, + ParticleIndex pi, + double thickness, + double major_radius, + double minor_radius_h2v_aspect_ratio=1.0); + + + public: + IMP_DECORATOR_METHODS(SlabWithToroidalPore, SlabWithPore); + + + /** Decorate a particle that represents a slab (e.g. nuclear + envelope) with specified thickness and a toroidal pore of + specified major radius and minor radius of 0.5*thickness, + with equal horizontal and vertical minor radii. + + The slab is parallel to the x,y plain from z=-0.5*thickness to + z=0.5*thickness, and the central axis of the pore lies on the + origin. + + @param m the model + @param pi the particle index + @param thickness slab thickness, also twice the minor radius + @param major_radius pore major radius + */ + IMP_DECORATOR_SETUP_2(SlabWithToroidalPore, + double, thickness, + double, major_radius); + + /** Decorate a particle that represents a slab (e.g. nuclear + envelope) with specified thickness and a toroidal pore of + specified major radius and minor radius of 0.5*thickness, + and specified ratio between horizontal and vertical minor + radii. + + The slab is parallel to the x,y plain from z=-0.5*thickness to + z=0.5*thickness, and the central axis of the pore lies on the + origin. + + @param m the model + @param pi the particle index + @param thickness slab thickness, also twice the vertical + minor radius + @param major_radius pore major radius + @param minor_radius_h2v_aspect_ratio + ratio between horizontal and vertical minor radius + */ + IMP_DECORATOR_SETUP_3(SlabWithToroidalPore, + double, thickness, + double, major_radius, + double, minor_radius_h2v_aspect_ratio); + + //! Return true if the particle is an instance of SlabWithToroidalPore + static bool get_is_setup(Model *m, ParticleIndex pi) { + return SlabWithPore::get_is_setup(m, pi) && + m->get_has_attribute(get_minor_radius_h2v_aspect_ratio_key(), pi) && + m->get_has_attribute(get_toroidal_pore_key(), pi); + } + + void set_minor_radius_h2v_aspect_ratio(double aspect_ratio){ + get_particle()->set_value + (get_minor_radius_h2v_aspect_ratio_key(), + aspect_ratio); + } + + double get_minor_radius_h2v_aspect_ratio() const{ + return get_particle()->get_value + (get_minor_radius_h2v_aspect_ratio_key()); + } + + double get_vertical_minor_radius() const{ + return get_thickness()*0.5; + } + + void set_vertical_minor_radius(double rv) { + set_thickness(2.0*rv); + } + + double get_horizontal_minor_radius() const{ + return get_vertical_minor_radius()*get_minor_radius_h2v_aspect_ratio(); + } + + void set_horizontal_minor_radius(double rh) { + set_minor_radius_h2v_aspect_ratio + ( rh/ get_vertical_minor_radius() ); + } + + //! Get the decorator key for the ratio between horizontal and vertical aspect ratio + static FloatKey get_minor_radius_h2v_aspect_ratio_key(); + + //! Get the decorator key indicating a toroidal pore + static IntKey get_toroidal_pore_key(); + +}; + + + +IMP_DECORATORS(SlabWithToroidalPore, SlabsWithToroidalPores, IMP::SlabsWithPores); + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SLAB_WITH_TOROIDAL_PORE_H */ diff --git a/modules/npc/include/SlabWithToroidalPoreGeometry.h b/modules/npc/include/SlabWithToroidalPoreGeometry.h new file mode 100644 index 0000000000..84f7dd1585 --- /dev/null +++ b/modules/npc/include/SlabWithToroidalPoreGeometry.h @@ -0,0 +1,50 @@ +/** + * \file SlabWithToroidalPoreGeometry.h + * \brief A decorator for a particle representing a toroidal pore + * + * Copyright 2007-8 Sali Lab. All rights reserved. + */ + +#ifndef IMPNPC_SLAB_WITH_TOROIDAL_PORE_GEOMETRY_H +#define IMPNPC_SLAB_WITH_TOROIDAL_PORE_GEOMETRY_H + +#include "npc_config.h" +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +//! A decorator for a particle that represents a toroidal pore. +/** \ingroup helper + \ingroup decorators +*/ +class IMPNPCEXPORT SlabWithToroidalPoreWireGeometry : public display::Geometry { + double rv_; // minor radius in vertical direction (vertical semi-axis) + double rh_; // minor radius in horizontal direction (horizontal semi-axis) + double R_; // major radius + double slab_length_; // length of slab edge + + public: + //! Slab with specified height from top to bottom, slab_length x slab_length area, + //! and an elliptic toroidal pore of specified major radius, + //! slab_height/2.0 vertical semi-axis, specified horizontal semi-axis + SlabWithToroidalPoreWireGeometry(double slab_height, + double major_radius, + double horizontal_semiaxis, + double slab_length); + + //! Slab with specified height from top to bottom, slab_length x slab_length area, + //! and a ring toroidal pore of specified major radius and slab_height/2.0 minor radius + SlabWithToroidalPoreWireGeometry(double slab_height, double major_radius, double slab_length); + + //! returns the set of geometric components that comprise this geometry + //! (for e.g. storing in RMF format) + virtual IMP::display::Geometries get_components() const override; + + IMP_OBJECT_METHODS(SlabWithToroidalPoreWireGeometry); +}; + + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SLAB_WITH_TOROIDAL_PORE_GEOMETRY_H */ diff --git a/modules/npc/include/SlabWithToroidalPoreGoPairScore.h b/modules/npc/include/SlabWithToroidalPoreGoPairScore.h new file mode 100644 index 0000000000..3a6f50a469 --- /dev/null +++ b/modules/npc/include/SlabWithToroidalPoreGoPairScore.h @@ -0,0 +1,57 @@ +/** + * \file IMP/npc/SlabWithToroidalPoreGoPairScore.h + * \brief a go-like score for a slab with a toroidal pore + * + + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#ifndef IMPNPC_SLAB_WITH_TOROIDAL_PORE_GO_PAIR_SCORE_H +#define IMPNPC_SLAB_WITH_TOROIDAL_PORE_GO_PAIR_SCORE_H + +#include "npc_config.h" +#include "SlabWithToroidalPore.h" +#include +#include +#include +#include +#include +#include "IMP/core/XYZR.h" + +IMPNPC_BEGIN_NAMESPACE + +//! apply a harmonic to the distance between a particle and the normal to toroidal membrane +/** The source code is as follows: + * \include SlabWithToroidalPoreGoPairScore.h + * \include SlabWithToroidalPoreGoPairScore.cpp + */ + +// scores a go-like harmonic interaction based on normal distance to closest point +class IMPNPCEXPORT SlabWithToroidalPoreGoPairScore : public PairScore { + double x0_, k_; + +public: +/** + * Constructs a score over a horizontal slab with a toroidal indent. + + @param k the slab repulsive force constant in kcal/mol/A + */ +SlabWithToroidalPoreGoPairScore(double x0, double k); +virtual double evaluate_index(Model *m, + const ParticleIndexPair &p, + DerivativeAccumulator *da) const override; + +virtual ModelObjectsTemp do_get_inputs(Model *m, + const ParticleIndexes &pis) const override; + +IMP_PAIR_SCORE_METHODS(SlabWithToroidalPoreGoPairScore); +IMP_OBJECT_METHODS(SlabWithToroidalPoreGoPairScore); + +}; + +IMP_OBJECTS(SlabWithToroidalPoreGoPairScore, SlabWithToroidalPoreGoPairScores); + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SLAB_WITH_TOROIDAL_PORE_GO_PAIR_SCORE_H */ \ No newline at end of file diff --git a/modules/npc/include/SlabWithToroidalPoreMBMScore.h b/modules/npc/include/SlabWithToroidalPoreMBMScore.h new file mode 100644 index 0000000000..7b3128692d --- /dev/null +++ b/modules/npc/include/SlabWithToroidalPoreMBMScore.h @@ -0,0 +1,57 @@ +/** + * \file IMP/npc/SlabWithToroidalPoreMBMScore.h + * \brief a go-like score for a slab with a toroidal pore + * + + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#ifndef IMPNPC_SLAB_WITH_TOROIDAL_PORE_MBM_SCORE_H +#define IMPNPC_SLAB_WITH_TOROIDAL_PORE_MBM_SCORE_H + +#include "npc_config.h" +#include "SlabWithToroidalPore.h" +#include +#include +#include +#include +#include +#include "IMP/core/XYZR.h" + +IMPNPC_BEGIN_NAMESPACE + +//! apply harmonic walls to the distance between a particle and the normal to toroidal membrane +/** The source code is as follows: + * \include SlabWithToroidalPoreMBMScore.h + * \include SlabWithToroidalPoreMBMScore.cpp + */ + +// scores a go-like harmonic interaction based on normal distance to closest point +class IMPNPCEXPORT SlabWithToroidalPoreMBMScore : public PairScore { + double x0_upper_, x0_lower_, k_; + +public: +/** + * Constructs a score over a horizontal slab with a toroidal indent. + + @param k the slab repulsive force constant in kcal/mol/A + */ +SlabWithToroidalPoreMBMScore(double x0_upper, double x0_lower, double k); +virtual double evaluate_index(Model *m, + const ParticleIndexPair &p, + DerivativeAccumulator *da) const override; + +virtual ModelObjectsTemp do_get_inputs(Model *m, + const ParticleIndexes &pis) const override; + +IMP_PAIR_SCORE_METHODS(SlabWithToroidalPoreMBMScore); +IMP_OBJECT_METHODS(SlabWithToroidalPoreMBMScore); + +}; + +IMP_OBJECTS(SlabWithToroidalPoreMBMScore, SlabWithToroidalPoreMBMScores); + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SLAB_WITH_TOROIDAL_PORE_MBM_SCORE_H */ diff --git a/modules/npc/include/SphericalIndentSurfaceDepthPairScore.h b/modules/npc/include/SphericalIndentSurfaceDepthPairScore.h new file mode 100644 index 0000000000..3ed268117b --- /dev/null +++ b/modules/npc/include/SphericalIndentSurfaceDepthPairScore.h @@ -0,0 +1,57 @@ +/** + * \file IMP/npc/SphericalIndentSurfaceDepthPairScore.h + * \brief A Score on the distance between a particle and the surface of a spherical indent in a plane. + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + */ + +#ifndef IMPNPC_SPHERICAL_INDENT_SURFACE_DEPTH_PAIR_SCORE_H +#define IMPNPC_SPHERICAL_INDENT_SURFACE_DEPTH_PAIR_SCORE_H + +#include "npc_config.h" +#include "SlabWithSphericalIndent.h" +#include +#include +#include +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +//! Applies a harmonic excluded volume restraint between a SlabWithSphericalIndent object +//! and another Particle +/** The source code is as follows: + \include SphericalIndentSurfaceDepthPairScore.h + \include SphericalIndentSurfaceDepthPairScore.cpp +*/ +class IMPNPCEXPORT SphericalIndentSurfaceDepthPairScore : public PairScore { +double k_; + +//private: +//inline double get_surface_distance(algebra::Vector3D* out_translation) const; + +public: +//! Constructs a horizontal slab with a toroidal pore, +//! centered at the z=0 plane +/** + Constructs a score over a horizontal slab with a spherical indent + + @param k the slab repulsive force constant in kcal/mol/A +*/ +SphericalIndentSurfaceDepthPairScore(double k); +virtual double evaluate_index(Model *m, + const ParticleIndexPair &p, + DerivativeAccumulator *da) const override; + +virtual ModelObjectsTemp do_get_inputs( + Model *m, const ParticleIndexes &pis) const override; + +IMP_PAIR_SCORE_METHODS(SphericalIndentSurfaceDepthPairScore); +IMP_OBJECT_METHODS(SphericalIndentSurfaceDepthPairScore); +}; + +IMP_OBJECTS(SphericalIndentSurfaceDepthPairScore, SphericalIndentSurfaceDepthPairScores); + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_SPHERICAL_INDENT_SURFACE_DEPTH_PAIR_SCORE_H */ diff --git a/modules/npc/include/ToroidalPoreSurfaceDepthPairScore.h b/modules/npc/include/ToroidalPoreSurfaceDepthPairScore.h new file mode 100644 index 0000000000..7f1f9b5e60 --- /dev/null +++ b/modules/npc/include/ToroidalPoreSurfaceDepthPairScore.h @@ -0,0 +1,57 @@ +/** + * \file IMP/npc/ToroidalPoreSurfaceDepthPairScore.h + * \brief a go-like score for a slab with a toroidal pore + * + + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#ifndef IMPNPC_TOROIDAL_PORE_SURFACE_DEPTH_PAIR_SCORE_H +#define IMPNPC_TOROIDAL_PORE_SURFACE_DEPTH_PAIR_SCORE_H + +#include "npc_config.h" +#include "SlabWithToroidalPore.h" +#include +#include +#include +#include +#include +#include "IMP/core/XYZR.h" + +IMPNPC_BEGIN_NAMESPACE + +//! apply repulsive force to the distance between a particle and the normal to toroidal membrane +/** The source code is as follows: + * \include ToroidalPoreSurfaceDepthPairScore.h + * \include ToroidalPoreSurfaceDepthPairScore.cpp + */ + +// scores a go-like harmonic interaction based on normal distance to closest point +class IMPNPCEXPORT ToroidalPoreSurfaceDepthPairScore : public PairScore { +double k_; + +public: +/** + * Constructs a score over a horizontal slab with a toroidal indent. + + @param k the slab repulsive force constant in kcal/mol/A + */ +ToroidalPoreSurfaceDepthPairScore(double k); +virtual double evaluate_index(Model *m, + const ParticleIndexPair &p, + DerivativeAccumulator *da) const override; + +virtual ModelObjectsTemp do_get_inputs(Model *m, + const ParticleIndexes &pis) const override; + +IMP_PAIR_SCORE_METHODS(ToroidalPoreSurfaceDepthPairScore); +IMP_OBJECT_METHODS(ToroidalPoreSurfaceDepthPairScore); + +}; + +IMP_OBJECTS(ToroidalPoreSurfaceDepthPairScore, ToroidalPoreSurfaceDepthPairScores); + +IMPNPC_END_NAMESPACE + +#endif /* IMPNPC_TOROIDAL_PORE_SURFACE_DEPTH_PAIR_SCORE_H */ \ No newline at end of file diff --git a/modules/npc/pyext/swig.i-in b/modules/npc/pyext/swig.i-in index e934392a0c..dc960478e8 100644 --- a/modules/npc/pyext/swig.i-in +++ b/modules/npc/pyext/swig.i-in @@ -19,7 +19,31 @@ IMP_SWIG_OBJECT_SERIALIZE( IMP::npc, YAxialPositionLowerRestraint, YAxialPositio IMP_SWIG_OBJECT_SERIALIZE( IMP::npc, YAxialPositionRestraint, YAxialPositionRestraints); IMP_SWIG_OBJECT_SERIALIZE( IMP::npc, YAxialPositionUpperRestraint, YAxialPositionUpperRestraints); IMP_SWIG_OBJECT_SERIALIZE( IMP::npc, ZAxialPositionRestraint, ZAxialPositionRestraints); +IMP_SWIG_OBJECT_SERIALIZE( IMP::npc, XAxialPositionLowerRestraint, XAxialPositionLowerRestraints); +IMP_SWIG_OBJECT_SERIALIZE( IMP::npc, XAxialPositionRestraint, XAxialPositionRestraints); +IMP_SWIG_OBJECT_SERIALIZE( IMP::npc, XAxialPositionUpperRestraint, XAxialPositionUpperRestraints); +IMP_SWIG_OBJECT_SERIALIZE( IMP::npc, OverallPositionRestraint, OverallPositionRestraints); +IMP_SWIG_DECORATOR(IMP::npc, SlabWithSphericalIndent, SlabWithSphericalIndents); +IMP_SWIG_OBJECT(IMP::npc, SlabWithSphericalIndentMBMScore, SlabWithSphericalIndentMBMScores); +IMP_SWIG_OBJECT(IMP::npc, SlabWithSphericalIndentGeometry, SlabWithSphericalIndentGeometries); +IMP_SWIG_OBJECT(IMP::npc, SphericalIndentSurfaceDepthPairScore, SphericalIndentSurfaceDepthPairScores); +IMP_SWIG_OBJECT(IMP::npc, SlabWithToroidalPoreMBMScore, SlabWithToroidalPoreMBMScores); +IMP_SWIG_OBJECT(IMP::npc, SlabWithToroidalPoreGoPairScore, SlabWithToroidalPoreGoPairScores); +IMP_SWIG_OBJECT(IMP::npc, ToroidalPoreSurfaceDepthPairScore, ToroidalPoreSurfaceDepthPairScores); +IMP_SWIG_DECORATOR(IMP::npc, SlabWithPore, SlabWithPores); +IMP_SWIG_DECORATOR(IMP::npc, SlabWithToroidalPore, SlabWithToroidalPores); +IMP_SWIG_OBJECT(IMP::npc, SlabWithToroidalPoreWireGeometry, SlabWithToroidalPoreWireGeometries); %include "IMP/npc/ProteinLocalizationRestraint.h" %include "IMP/npc/CompositeRestraint.h" %include "IMP/npc/MinimumSphereDistancePairScore.h" +%include "IMP/npc/SlabWithSphericalIndent.h" +%include "IMP/npc/SlabWithSphericalIndentMBMScore.h" +%include "IMP/npc/SlabWithSphericalIndentGeometry.h" +%include "IMP/npc/SphericalIndentSurfaceDepthPairScore.h" +%include "IMP/npc/SlabWithToroidalPoreMBMScore.h" +%include "IMP/npc/SlabWithToroidalPoreGoPairScore.h" +%include "IMP/npc/ToroidalPoreSurfaceDepthPairScore.h" +%include "IMP/npc/SlabWithPore.h" +%include "IMP/npc/SlabWithToroidalPore.h" +%include "IMP/npc/SlabWithToroidalPoreGeometry.h" diff --git a/modules/npc/src/CompositeRestraint.cpp b/modules/npc/src/CompositeRestraint.cpp index 651d2870e0..236e57f40a 100644 --- a/modules/npc/src/CompositeRestraint.cpp +++ b/modules/npc/src/CompositeRestraint.cpp @@ -208,8 +208,8 @@ double CompositeRestraint::unprotected_evaluate(DerivativeAccumulator *accum) IMP_LOG_VERBOSE("Minimum subtree is ["); for (const ParticleIndexPair &it : pis) { IMP_UNUSED(it); - IMP_LOG_VERBOSE("(" << get_model()->get_particle_name(it[0]) - << ", " << get_model()->get_particle_name(it[1]) + IMP_LOG_VERBOSE("(" << get_model()->get_particle_name(std::get<0>(it)) + << ", " << get_model()->get_particle_name(std::get<1>(it)) << ") "); } IMP_LOG_VERBOSE("]" << std::endl); diff --git a/modules/npc/src/MinimumSphereDistancePairScore.cpp b/modules/npc/src/MinimumSphereDistancePairScore.cpp index 4c91c4d529..754d4b8471 100644 --- a/modules/npc/src/MinimumSphereDistancePairScore.cpp +++ b/modules/npc/src/MinimumSphereDistancePairScore.cpp @@ -16,8 +16,8 @@ IMPNPC_BEGIN_NAMESPACE double MinimumSphereDistancePairScore::evaluate_index(Model *m, const ParticleIndexPair &pi, DerivativeAccumulator *da) const { - core::XYZR d0(m, pi[0]); - core::XYZR d1(m, pi[1]); + core::XYZR d0(m, std::get<0>(pi)); + core::XYZR d1(m, std::get<1>(pi)); algebra::Vector3D c0 = d0.get_coordinates(); algebra::Vector3D c1_orig = d1.get_coordinates(), c1_min = c1_orig; double dist2_min = algebra::get_squared_distance(c0, c1_orig); diff --git a/modules/npc/src/ProteinLocalizationRestraint.cpp b/modules/npc/src/ProteinLocalizationRestraint.cpp index e3e265ec9d..133072113e 100644 --- a/modules/npc/src/ProteinLocalizationRestraint.cpp +++ b/modules/npc/src/ProteinLocalizationRestraint.cpp @@ -1979,6 +1979,422 @@ ModelObjectsTemp ProteinProximityRestraint::do_get_inputs() const { return IMP::get_particles(get_model(), all); } +/*##################################################### +# Restraints setup - Immuno-EM XAxialPositionRestraint - Added by Andrew P. Latham +# Supplementary Table 7. Upper and lower bounds on X-axial restraints of C-terminal bead of nups +# NupType : (min X value, max X value) (in Angstrom) +#####################################################*/ +XAxialPositionRestraint::XAxialPositionRestraint(Model *m, + SingletonContainerAdaptor sc, + double lower_bound, double upper_bound, bool consider_radius, double sigma) + : Restraint(m, "XAxialPositionRestraint %1%") + , lower_bound_(lower_bound) + , upper_bound_(upper_bound) + , sigma_(sigma) + , consider_radius_(consider_radius) +{ + sc_ = sc; +} + +XAxialPositionRestraint::XAxialPositionRestraint(Model *m, + double lower_bound, double upper_bound, bool consider_radius, double sigma) + : Restraint(m, "XAxialPositionRestraint %1%") + , lower_bound_(lower_bound) + , upper_bound_(upper_bound) + , sigma_(sigma) + , consider_radius_(consider_radius) +{ +} + +void XAxialPositionRestraint::set_particles(const ParticlesTemp &ps) { + if (!sc_ && !ps.empty()) { + sc_ = new IMP::internal::StaticListContainer( + ps[0]->get_model(), "X axial list"); + } + get_list(sc_)->set(IMP::internal::get_index(ps)); +} + +void XAxialPositionRestraint::add_particles(const ParticlesTemp &ps) { + if (!sc_ && !ps.empty()) { + sc_ = new IMP::internal::StaticListContainer( + ps[0]->get_model(), "X axial list"); + } + get_list(sc_)->add(IMP::internal::get_index(ps)); +} + +void XAxialPositionRestraint::add_particle(Particle *ps) { + if (!sc_) { + sc_ = new IMP::internal::StaticListContainer( + ps->get_model(), "X axial list"); + } + get_list(sc_)->add(IMP::internal::get_index(ps)); +} + +double +XAxialPositionRestraint::unprotected_evaluate(DerivativeAccumulator *accum) const +{ + IMP_CHECK_OBJECT(sc_.get()); + double v = 0.0; + IMP::ParticlesTemp all_particles = sc_->get(); + for (unsigned int i=0; i < all_particles.size(); ++i ) + { + double r = consider_radius_ ? core::XYZR(all_particles[i]).get_radius() : 0.0; + double x = core::XYZR(all_particles[i]).get_coordinate(0); + double x_down = consider_radius_ ? x - r : x; + double x_up = consider_radius_ ? x + r : x; + double x_diff = x_down - lower_bound_; + // check lower_bound_ + if ( x_diff < 0 ) + { + v += x_diff*x_diff; + if ( accum ) + { + algebra::Vector3D dx; + dx[2] = dx[1] = 0.0; + dx[0] = 2.0*x_diff/sigma_; + all_particles[i]->get_model()->add_to_coordinate_derivatives(IMP::internal::get_index(all_particles[i]), dx, *accum); + } + } + // check upper_bound_ + x_diff = x_up - upper_bound_; + if ( x_diff > 0 ) + { + v += x_diff*x_diff; + if ( accum ) + { + algebra::Vector3D dx; + dx[2] = dx[1] = 0.0; + dx[0] = 2.0*x_diff/sigma_; + all_particles[i]->get_model()->add_to_coordinate_derivatives(IMP::internal::get_index(all_particles[i]), dx, *accum); + } + } + } + return v/sigma_; +} + +ModelObjectsTemp XAxialPositionRestraint::do_get_inputs() const { + if ( !sc_ ) + return ModelObjectsTemp(); + ParticleIndexes all = sc_->get_all_possible_indexes(); + return IMP::get_particles(get_model(), all); +} + +RestraintInfo *XAxialPositionRestraint::get_static_info() const { + IMP_NEW(RestraintInfo, ri, ()); + ri->add_string("type", "IMP.npc.XAxialPositionRestraint"); + ri->add_float("lower bound", lower_bound_); + ri->add_float("upper bound", upper_bound_); + ri->add_float("sigma", sigma_); + return ri.release(); +} + +/*##################################################### +# Restraints setup - Immuno-EM XAxialPositionLowerRestraint - Added by Andrew P. Latham +# Supplementary Table 7. Upper and lower bounds on X-axial restraints of C-terminal bead of nups +# NupType : (min X value, max X value) (in Angstrom) +#####################################################*/ +XAxialPositionLowerRestraint::XAxialPositionLowerRestraint(Model *m, + SingletonContainerAdaptor sc, + double lower_bound, bool consider_radius, double sigma) + : Restraint(m, "XAxialPositionLowerRestraint %1%") + , lower_bound_(lower_bound) + , sigma_(sigma) + , consider_radius_(consider_radius) +{ + sc_ = sc; +} + +XAxialPositionLowerRestraint::XAxialPositionLowerRestraint(Model *m, + double lower_bound, bool consider_radius, double sigma) + : Restraint(m, "XAxialPositionLowerRestraint %1%") + , lower_bound_(lower_bound) + , sigma_(sigma) + , consider_radius_(consider_radius) +{ +} + +void XAxialPositionLowerRestraint::set_particles(const ParticlesTemp &ps) { + if (!sc_ && !ps.empty()) { + sc_ = new IMP::internal::StaticListContainer( + ps[0]->get_model(), "X axial list"); + } + get_list(sc_)->set(IMP::internal::get_index(ps)); +} + +void XAxialPositionLowerRestraint::add_particles(const ParticlesTemp &ps) { + if (!sc_ && !ps.empty()) { + sc_ = new IMP::internal::StaticListContainer( + ps[0]->get_model(), "X axial list"); + } + get_list(sc_)->add(IMP::internal::get_index(ps)); +} + +void XAxialPositionLowerRestraint::add_particle(Particle *ps) { + if (!sc_) { + sc_ = new IMP::internal::StaticListContainer( + ps->get_model(), "X axial list"); + } + get_list(sc_)->add(IMP::internal::get_index(ps)); +} + +double +XAxialPositionLowerRestraint::unprotected_evaluate(DerivativeAccumulator *accum) const +{ + IMP_CHECK_OBJECT(sc_.get()); + double v = 0; + IMP::ParticlesTemp all_particles = sc_->get(); + for (unsigned int i = 0; i < all_particles.size(); ++i ) + { + double r = core::XYZR(all_particles[i]).get_radius(); + double x = core::XYZR(all_particles[i]).get_coordinate(0); + double x_down = consider_radius_ ? x - r : x; + double x_diff = x_down - lower_bound_; + if ( x_diff < 0 ) + { + v += x_diff*x_diff; + if ( accum ) + { + algebra::Vector3D dx; + dx[2] = dx[1] = 0; + dx[0] = 2*x_diff/sigma_; + all_particles[i]->get_model()->add_to_coordinate_derivatives(IMP::internal::get_index(all_particles[i]), dx, *accum); + } + } + } + return v/sigma_; +} + +ModelObjectsTemp XAxialPositionLowerRestraint::do_get_inputs() const { + if ( !sc_ ) + return ModelObjectsTemp(); + ParticleIndexes all = sc_->get_all_possible_indexes(); + return IMP::get_particles(get_model(), all); +} + +RestraintInfo *XAxialPositionLowerRestraint::get_static_info() const { + IMP_NEW(RestraintInfo, ri, ()); + ri->add_string("type", "IMP.npc.XAxialPositionLowerRestraint"); + ri->add_float("lower bound", lower_bound_); + ri->add_float("sigma", sigma_); + return ri.release(); +} + +/*##################################################### +# Restraints setup - Immuno-EM XAxialPositionUpperRestraint - Added by Andrew P. Latham +# Supplementary Table 7. Upper and lower bounds on X-axial restraints of C-terminal bead of nups +# NupType : (min X value, max X value) (in Angstrom) +#####################################################*/ +XAxialPositionUpperRestraint::XAxialPositionUpperRestraint(Model *m, + SingletonContainerAdaptor sc, + double upper_bound, bool consider_radius, double sigma) + : Restraint(m, "XAxialPositionUpperRestraint %1%") + , upper_bound_(upper_bound) + , sigma_(sigma) + , consider_radius_(consider_radius) +{ + sc_ = sc; +} + +XAxialPositionUpperRestraint::XAxialPositionUpperRestraint(Model *m, + double upper_bound, bool consider_radius, double sigma) + : Restraint(m, "XAxialPositionUpperRestraint %1%") + , upper_bound_(upper_bound) + , sigma_(sigma) + , consider_radius_(consider_radius) +{ +} + +void XAxialPositionUpperRestraint::set_particles(const ParticlesTemp &ps) { + if (!sc_ && !ps.empty()) { + sc_ = new IMP::internal::StaticListContainer( + ps[0]->get_model(), "X axial list"); + } + get_list(sc_)->set(IMP::internal::get_index(ps)); +} + +void XAxialPositionUpperRestraint::add_particles(const ParticlesTemp &ps) { + if (!sc_ && !ps.empty()) { + sc_ = new IMP::internal::StaticListContainer( + ps[0]->get_model(), "X axial list"); + } + get_list(sc_)->add(IMP::internal::get_index(ps)); +} + +void XAxialPositionUpperRestraint::add_particle(Particle *ps) { + if (!sc_) { + sc_ = new IMP::internal::StaticListContainer( + ps->get_model(), "X axial list"); + } + get_list(sc_)->add(IMP::internal::get_index(ps)); +} + +double +XAxialPositionUpperRestraint::unprotected_evaluate(DerivativeAccumulator *accum) const +{ + IMP_CHECK_OBJECT(sc_.get()); + double v = 0; + IMP::ParticlesTemp all_particles = sc_->get(); + for (unsigned int i = 0; i < all_particles.size(); ++i ) + { + double r = consider_radius_ ? core::XYZR(all_particles[i]).get_radius() : 0; + double x = core::XYZR(all_particles[i]).get_coordinate(0); + double x_up = x + r; + double x_diff = x_up - upper_bound_; + if ( x_diff > 0 ) + { + v += x_diff*x_diff; + if ( accum ) + { + algebra::Vector3D dx; + dx[2] = dx[1] = 0; + dx[0] = 2*x_diff/sigma_; + all_particles[i]->get_model()->add_to_coordinate_derivatives(IMP::internal::get_index(all_particles[i]), dx, *accum); + } + } + } + return v/sigma_; +} + +ModelObjectsTemp XAxialPositionUpperRestraint::do_get_inputs() const { + if ( !sc_ ) + return ModelObjectsTemp(); + ParticleIndexes all = sc_->get_all_possible_indexes(); + return IMP::get_particles(get_model(), all); +} + +RestraintInfo *XAxialPositionUpperRestraint::get_static_info() const { + IMP_NEW(RestraintInfo, ri, ()); + ri->add_string("type", "IMP.npc.XAxialPositionUpperRestraint"); + ri->add_float("upper bound", upper_bound_); + ri->add_float("sigma", sigma_); + return ri.release(); +} + +/*##################################################### +# Restraints setup - Restrain to a specific position +# Added by Andrew Latham +# x_start, y_start, z_start, tolerance, sigma +# All distances are in Angstrom +# x_start - x position to restrain to +# y_start - y position to restrain to +# z_start - z position to restrain to +# tolerance - range of distances where restraint=0 +# consider_radius - bool, consider the radius of the particle +# sigma - inverse strength of harmonic potential +#####################################################*/ +OverallPositionRestraint::OverallPositionRestraint(Model *m, + SingletonContainerAdaptor sc, + double x_start, double y_start, double z_start, double tolerance, bool consider_radius, double sigma) + : Restraint(m, "OverallPositionRestraint %1%") + , x_start_(x_start) + , y_start_(y_start) + , z_start_(z_start) + , tolerance_(tolerance) + , sigma_(sigma) + , consider_radius_(consider_radius) +{ + sc_ = sc; +} + +OverallPositionRestraint::OverallPositionRestraint(Model *m, + double x_start, double y_start, double z_start, double tolerance, bool consider_radius, double sigma) + : Restraint(m, "OverallPositionRestraint %1%") + , x_start_(x_start) + , y_start_(y_start) + , z_start_(z_start) + , tolerance_(tolerance) + , sigma_(sigma) + , consider_radius_(consider_radius) +{ +} + +void OverallPositionRestraint::set_particles(const ParticlesTemp &ps) { + if (!sc_ && !ps.empty()) { + sc_ = new IMP::internal::StaticListContainer( + ps[0]->get_model(), "Position restraint list"); + } + get_list(sc_)->set(IMP::internal::get_index(ps)); +} + +void OverallPositionRestraint::add_particles(const ParticlesTemp &ps) { + if (!sc_ && !ps.empty()) { + sc_ = new IMP::internal::StaticListContainer( + ps[0]->get_model(), "Position restraint list"); + } + get_list(sc_)->add(IMP::internal::get_index(ps)); +} + +void OverallPositionRestraint::add_particle(Particle *ps) { + if (!sc_) { + sc_ = new IMP::internal::StaticListContainer( + ps->get_model(), "Position restraint list"); + } + get_list(sc_)->add(IMP::internal::get_index(ps)); +} + +double +OverallPositionRestraint::unprotected_evaluate(DerivativeAccumulator *accum) const +{ + IMP_CHECK_OBJECT(sc_.get()); + double v = 0.0; + IMP::ParticlesTemp all_particles = sc_->get(); + for (unsigned int i=0; i < all_particles.size(); ++i ) + { + double r = consider_radius_ ? core::XYZR(all_particles[i]).get_radius() : 0.0; + double x = core::XYZR(all_particles[i]).get_coordinate(0); + double y = core::XYZR(all_particles[i]).get_coordinate(1); + double z = core::XYZR(all_particles[i]).get_coordinate(2); + double deltaX = x-x_start_; + double deltaY = y-y_start_; + double deltaZ = z-z_start_; + double radial = std::sqrt(deltaX*deltaX + deltaY*deltaY + deltaZ*deltaZ); + double diff = radial - r - tolerance_; + // if diff <0, within tolerance + if ( diff < 0 ) + { + v += 0; + if ( accum ) + { + algebra::Vector3D dz; + dz[0] = dz[1] = dz[2] = 0.0; + all_particles[i]->get_model()->add_to_coordinate_derivatives(IMP::internal::get_index(all_particles[i]), dz, *accum); + } + } + // if diff <0, not within tolerance + if ( diff > 0 ) + { + v += diff*diff; + if ( accum ) + { + algebra::Vector3D dz; + dz[0] = (2*deltaX*diff) / (radial*sigma_); + dz[1] = (2*deltaY*diff) / (radial*sigma_); + dz[2] = (2*deltaZ*diff) / (radial*sigma_); + all_particles[i]->get_model()->add_to_coordinate_derivatives(IMP::internal::get_index(all_particles[i]), dz, *accum); + } + } + } + return v/sigma_; +} + +ModelObjectsTemp OverallPositionRestraint::do_get_inputs() const { + if ( !sc_ ) + return ModelObjectsTemp(); + ParticleIndexes all = sc_->get_all_possible_indexes(); + return IMP::get_particles(get_model(), all); +} + +RestraintInfo *OverallPositionRestraint::get_static_info() const { + IMP_NEW(RestraintInfo, ri, ()); + ri->add_string("type", "IMP.npc.OverallPositionRestraint"); + ri->add_float("x start", x_start_); + ri->add_float("y start", y_start_); + ri->add_float("z start", z_start_); + ri->add_float("tolerance", tolerance_); + ri->add_float("sigma", sigma_); + return ri.release(); +} + IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::ZAxialPositionRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::ZAxialPositionLowerRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::ZAxialPositionUpperRestraint); @@ -1991,13 +2407,16 @@ IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::XYRadialPositionUpperRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::ProteinContactRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::ProteinChainRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::MembraneSurfaceLocationRestraint); -IMP_OBJECT_SERIALIZE_IMPL( - IMP::npc::MembraneSurfaceLocationConditionalRestraint); +IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::MembraneSurfaceLocationConditionalRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::MembraneExclusionRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::PoreSideVolumeLocationRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::PerinuclearVolumeLocationRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::AssemblySymmetryByDistanceRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::AssemblySymmetryByDihedralRestraint); IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::ProteinProximityRestraint); +IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::XAxialPositionRestraint); +IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::XAxialPositionLowerRestraint); +IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::XAxialPositionUpperRestraint); +IMP_OBJECT_SERIALIZE_IMPL(IMP::npc::OverallPositionRestraint); IMPNPC_END_NAMESPACE diff --git a/modules/npc/src/SlabWithPore.cpp b/modules/npc/src/SlabWithPore.cpp new file mode 100644 index 0000000000..c5035af46d --- /dev/null +++ b/modules/npc/src/SlabWithPore.cpp @@ -0,0 +1,40 @@ +/** + * \file SlabWithPore.cpp + * \brief Decorator for slab particle with a cylindrical pore + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#include "IMP/npc/SlabWithPore.h" + +IMPNPC_BEGIN_NAMESPACE + +void +SlabWithPore::do_setup_particle(IMP::Model* m, + ParticleIndex pi, + double thickness, + double pore_radius) +{ + m->add_attribute(get_thickness_key(), pi, thickness, false/*is_optimizable*/); + m->add_attribute(get_pore_radius_key(), pi, pore_radius, true/*is_optimizable*/); + SlabWithPore(m,pi).set_pore_radius_is_optimized(false); //default +} + +FloatKey SlabWithPore::get_thickness_key() { + static FloatKey fk("thickness"); + return fk; +} + +FloatKey SlabWithPore::get_pore_radius_key() { + static FloatKey fk("pore_radius"); + return fk; +} + +void SlabWithPore::show(std::ostream &out) const { + out << "SlabWithPore thickness=" + << get_thickness() + << " ; radius=" << get_pore_radius(); +} + +IMPNPC_END_NAMESPACE diff --git a/modules/npc/src/SlabWithSphericalIndent.cpp b/modules/npc/src/SlabWithSphericalIndent.cpp new file mode 100644 index 0000000000..f4e7687311 --- /dev/null +++ b/modules/npc/src/SlabWithSphericalIndent.cpp @@ -0,0 +1,54 @@ +/** + * \file IMP/npc/SlabWithSphericalIndent.cpp + * \brief Decorator for a surface with a spherical indent. + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#include +#include + +IMPNPC_BEGIN_NAMESPACE + + void + SlabWithSphericalIndent::do_setup_particle(IMP::Model* m, + ParticleIndex pi, + double R, + double h) + { + // for now set all to non-optimizable + m->add_attribute(get_sphere_radius_key(), pi, R, false); + m->add_attribute(get_sphere_depth_key(), pi, h, false); + + //double alpha = sqrt(); + + //m->add_attribute(get_base_circle_radius_key(), pi, alpha, false); + } + + StringKey SlabWithSphericalIndent::get_name_key() { + static StringKey k("SlabWithSphericalIndent"); + return k; + } + + FloatKey SlabWithSphericalIndent::get_sphere_radius_key() { + static FloatKey k("sphere_radius"); + return k; + } + + FloatKey SlabWithSphericalIndent::get_sphere_depth_key() { + static FloatKey k("sphere_depth"); + return k; + } + + + + void SlabWithSphericalIndent::show(std::ostream &out) const { + out << "SlabWithSphericalIndent" + << " radius=" + << get_sphere_radius() + << " depth=" + << get_sphere_depth(); + } + +IMPNPC_END_NAMESPACE diff --git a/modules/npc/src/SlabWithSphericalIndentGeometry.cpp b/modules/npc/src/SlabWithSphericalIndentGeometry.cpp new file mode 100644 index 0000000000..93d6a24232 --- /dev/null +++ b/modules/npc/src/SlabWithSphericalIndentGeometry.cpp @@ -0,0 +1,162 @@ +/** + * \file SlabWithSphericalIndentGeometry.cpp + * \brief A geometry for displaying a wireframe model of a surface with a spherical indent. + * + * Copyright 2007-8 Sali Lab. All rights reserved. + * + */ + +#include "IMP/npc/SlabWithSphericalIndentGeometry.h" +#include +#include +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +// length is the x-y bounds on displaying the edges of the plane +SlabWithSphericalIndentGeometry +::SlabWithSphericalIndentGeometry(double radius, double length, double depth) : Geometry("SlabWithSphericalIndentSurfaceMesh"), + radius_(radius), + length_(length), + depth_(depth) +{} + +display::Geometries SlabWithSphericalIndentGeometry::get_components() const +{ + // initialize return geometries + display::Geometries ret; + + // compute the base circle radius (at the intersection of the plane and the sphere) + double a_sqrd = depth_*(2.0*radius_-depth_); + + // build the spherical cap segment + + // number of segments in the radial and semicircle directions + const int nrad = 10; + const int ncc = 5; + + // put line segments on plane outer boundary + //ret.push_back(new display::SegmentGeometry(algebra::Segment3D(v00, v10))); + double hlen = length_ / 2.0; + + // corners + algebra::Vector3D c1(-hlen, hlen, 0.0); + algebra::Vector3D c2(hlen, hlen, 0.0); + algebra::Vector3D c3(hlen, -hlen, 0.0); + algebra::Vector3D c4(-hlen, -hlen, 0.0); + + // top edge + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(c1, c2))); + // right edge + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(c2, c3))); + // bottom edge + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(c3, c4))); + // left edge + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(c1, c4))); + + + + + // build semicircles + const double dz = depth_ / ncc; // spacing between semiscircles + const double nrseg = 360; // number of segments to produce + + double z; + double x_1,y_1; + double x_2,y_2; + double alpha; + + + for (int i = 0; i < ncc; i++) { + z = dz * i; + + // again compute radius of semicircle as alpha of spherical cap of z-height + alpha =sqrt((depth_ - z) * (2.0 * radius_- (depth_ - z))); + + for (int s = 0; s < nrseg; s++) { + // again compute radius of semicircle as alpha of spherical cap of z-height + x_1 = alpha * cos(2.0 * IMP::PI * s / 360); + y_1 = alpha * sin(2.0 * IMP::PI * s / 360); + x_2 = alpha * cos(2.0 * IMP::PI * (s+1) / 360); + y_2 = alpha * sin(2.0 * IMP::PI * (s+1) / 360); + algebra::Segment3D seg(algebra::Vector3D(x_1, y_1, z), + algebra::Vector3D(x_2, y_2, z)); + ret.push_back(new display::SegmentGeometry(seg)); + + } + } + + // build longitudinal arcs and segments extending to boundary box + const double nlseg = 12; // number of longitudinal arcs to draw + double theta, phi; + double z_1, z_2; + double m; + + + phi = asin(sqrt(depth_ * (2.0 * radius_ - depth_)) / radius_); + // draw longitudinal arcs + for (int i = 0; i < nlseg; i++) { + theta = 2.0 * IMP::PI * i / nlseg; + //printf("%f theta \n", theta); + for (int s = 0; s < 90; s++) { + x_1 = radius_ * sin(phi * s / 90) * cos(theta); + y_1 = radius_ * sin(phi * s / 90) * sin(theta); + x_2 = radius_ * sin(phi * (s+1) / 90) * cos(theta); + y_2 = radius_ * sin(phi * (s+1) / 90) * sin(theta); + z_1 = radius_ * cos(phi * s / 90) - (radius_ - depth_); + z_2 = radius_ * cos(phi * (s+1) / 90) - (radius_ - depth_); + + algebra::Segment3D seg(algebra::Vector3D(x_1, y_1, z_1), + algebra::Vector3D(x_2, y_2, z_2)); + ret.push_back(new display::SegmentGeometry(seg)); + + } + + // build segments extending to boundary + x_2 = radius_ * sin(phi) * cos(theta); // last iteration from inner for + y_2 = radius_ * sin(phi) * sin(theta); + + m = y_2 / x_2; + // compute intersection of the line with the boundary + if (theta <= (IMP::PI / 4.0)) { + algebra::Segment3D seg(algebra::Vector3D(x_2, y_2, 0.0), + algebra::Vector3D(hlen, m*hlen, 0.0)); + + ret.push_back(new display::SegmentGeometry(seg)); + } + else if (theta <= (3.0 * IMP::PI / 4.0)) { + algebra::Segment3D seg(algebra::Vector3D(x_2, y_2, 0.0), + algebra::Vector3D(hlen / m, hlen, 0.0)); + + ret.push_back(new display::SegmentGeometry(seg)); + } + else if (theta <= (5.0*IMP::PI / 4.0)) { + algebra::Segment3D seg(algebra::Vector3D(x_2, y_2, 0.0), + algebra::Vector3D(-hlen,-m * hlen, 0.0)); + + ret.push_back(new display::SegmentGeometry(seg)); + } + else if (theta <= (7.0 * IMP::PI / 4.0)) { + algebra::Segment3D seg(algebra::Vector3D(x_2, y_2, 0.0), + algebra::Vector3D(-hlen / m, -hlen, 0.0)); + + ret.push_back(new display::SegmentGeometry(seg)); + } + else if (theta <= (2.0 * IMP::PI)) { + algebra::Segment3D seg(algebra::Vector3D(x_2, y_2, 0.0), + algebra::Vector3D(hlen, m*hlen, 0.0)); + + ret.push_back(new display::SegmentGeometry(seg)); + } + + } + + + + + return ret; + +} + +IMPNPC_END_NAMESPACE diff --git a/modules/npc/src/SlabWithSphericalIndentMBMScore.cpp b/modules/npc/src/SlabWithSphericalIndentMBMScore.cpp new file mode 100644 index 0000000000..6ab8f3e363 --- /dev/null +++ b/modules/npc/src/SlabWithSphericalIndentMBMScore.cpp @@ -0,0 +1,101 @@ +/** + * \file SlabWithSphericalIndentMBMScore.cpp + * \brief A Harmonic Score on the z distance above a surface with a spherical indent + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + */ + +#include +#include +#include +//#include + +IMPNPC_BEGIN_NAMESPACE + +SlabWithSphericalIndentMBMScore::SlabWithSphericalIndentMBMScore(double x0, double k) : x0_(x0), k_(k) {} + +// return score for a given particle pair +double SlabWithSphericalIndentMBMScore::evaluate_index(Model *m, + const ParticleIndexPair &pip, + DerivativeAccumulator *da) const { + + // turn on logging for this method + //IMP_OBJECT_LOG; + + // check that the first particle is the spherical indent + IMP_USAGE_CHECK(SlabWithSphericalIndent::get_is_setup(m, pip[0]) , "First particle passed is not of type SlabWithSphericalIndent"); + + // + // assume they have coordinates + SlabWithSphericalIndent d1(m, pip[0]); + core::XYZ d2(m, pip[1]); + + double x = d2.get_coordinate(0); + double y = d2.get_coordinate(1); + double z = d2.get_coordinate(2); + + // check if optimized. + //if (!d2.get_coordinates_are_optimized()) { + // return false; + //} + // could write a sub routine here but let's code it directly + //double score=get_surface_distance() + double score = 0.0; + double dv; + + // get the surface distance. + double R = d1.get_sphere_radius(); + double h = d1.get_sphere_depth(); + + // compute the base circle radius + double a_sqrd = h*(2.0*R-h); + + // compute score for being above the indent + if ((x*x+y*y) < a_sqrd) { + + score = 0.5 * k_ * square(z - (sqrt(R*R - x*x - y*y) + (R-h))); + //IMP_LOG_PROGRESS("INDENT SCORE: " << score << "\n"); + // do derivatives + if (da) { + dv = -k_ * (z - (sqrt(R*R - x*x - y*y) + (R-h))); + algebra::Vector3D udelta = algebra::Vector3D(0.0, 0.0, 1.0); + d2.add_to_derivatives(udelta * dv, *da); + } + } + // compute score for being outside the indent + else { + + score = 0.5 * k_ * square(z - x0_); + //IMP_LOG_PROGRESS("PLANE SCORE: " << score << "\n"); + // do derivatives + if (da) { + dv = -k_ * (z - x0_); + algebra::Vector3D udelta = algebra::Vector3D(0.0, 0.0, 1.0); + d2.add_to_derivatives(udelta * dv, *da); + } + } + + //IMP_LOG_PROGRESS("OUTPUT SCORE: " << score << "\n"); + return score; +} + + +/* +Double SlabWithSphericalIndentMBMScore::get_surface_distance(algebra::Vector3D* coord) const { + + double ret; + // for now just do distance in to restrain the z-distance from the surface + double z = coord[2]; + + ret = () + +} +*/ + +ModelObjectsTemp SlabWithSphericalIndentMBMScore::do_get_inputs( + Model *m, const ParticleIndexes &pis) const { + return IMP::get_particles(m, pis); +} + +IMPNPC_END_NAMESPACE + diff --git a/modules/npc/src/SlabWithToroidalPore.cpp b/modules/npc/src/SlabWithToroidalPore.cpp new file mode 100644 index 0000000000..a1e1c82b90 --- /dev/null +++ b/modules/npc/src/SlabWithToroidalPore.cpp @@ -0,0 +1,45 @@ +/** + * \file SlabWithToroidalPore.cpp + * \brief Decorator for slab particle with a toroidal pore + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + * + */ + +#include "IMP/npc/SlabWithToroidalPore.h" + +IMPNPC_BEGIN_NAMESPACE + +void +SlabWithToroidalPore::do_setup_particle(IMP::Model* m, + ParticleIndex pi, + double thickness, + double major_radius, + double minor_radius_h2v_aspect_ratio) +{ + SlabWithPore::setup_particle(m, pi, thickness, major_radius); + // std::cout << "Adding toroidal pore with minor radius h2v: " + // << minor_radius_h2v_aspect_ratio << std::endl; + m->add_attribute(get_minor_radius_h2v_aspect_ratio_key(), + pi, + minor_radius_h2v_aspect_ratio, + false); // non-optimizble + m->add_attribute(get_toroidal_pore_key(), pi, true); +} + +FloatKey SlabWithToroidalPore::get_minor_radius_h2v_aspect_ratio_key(){ + static FloatKey k("minor_radius_h2v_aspect_ratio"); + return k; +} + +IntKey SlabWithToroidalPore::get_toroidal_pore_key(){ + static IntKey k("toroidal_pore"); + return k; +} + +void SlabWithToroidalPore::show(std::ostream &out) const { + out << "SlabWithToroidalPore"; + SlabWithPore::show(out); +} + +IMPNPC_END_NAMESPACE diff --git a/modules/npc/src/SlabWithToroidalPoreGeometry.cpp b/modules/npc/src/SlabWithToroidalPoreGeometry.cpp new file mode 100644 index 0000000000..5fcd539248 --- /dev/null +++ b/modules/npc/src/SlabWithToroidalPoreGeometry.cpp @@ -0,0 +1,109 @@ +/** + * \file SlabWithToroidalPoreGeometry.cpp + * \brief A decorator for a particle representing a toroidal pore. + * + * Copyright 2007-8 Sali Lab. All rights reserved. + * + */ + +#include "IMP/npc/SlabWithToroidalPoreGeometry.h" +#include + +#include +#include +#ifdef IMP_NPC_USE_IMP_CGAL +#include +#endif +#include +#include +#include + +IMPNPC_BEGIN_NAMESPACE + +//! Slab with specified height from top to bottom, slab_length x slab_length area, +//! and an elliptic toroidal pore of specified major radius, +//! slab_height/2.0 vertical semi-axis, specified horizontal semi-axis +SlabWithToroidalPoreWireGeometry +::SlabWithToroidalPoreWireGeometry +(double slab_height, + double major_radius, + double horizontal_semiaxis, + double slab_length) + : Geometry("SlabWithToroidalPore"), + rv_(slab_height/2.0), + rh_(horizontal_semiaxis), + R_(major_radius), + slab_length_(slab_length) +{} + +// +SlabWithToroidalPoreWireGeometry +::SlabWithToroidalPoreWireGeometry +(double slab_height, + double major_radius, + double slab_length) + : Geometry("SlabWithToroidalPore"), + rv_(slab_height/2.0), + rh_(slab_height/2.0), + R_(major_radius), + slab_length_(slab_length) +{} + +// +display::Geometries +SlabWithToroidalPoreWireGeometry +::get_components() const +{ + display::Geometries ret; + // Add trimmed toroidal pore: + const int n1= 30; + const int n2= 10; + for (int i= 1; i <= n1; ++i) { // major radius + double f= static_cast(i) / n1; + double f1= static_cast(i - 1) / n1; + double theta= 2 * IMP::PI * f; + double theta1= 2 * IMP::PI * f1; + algebra::Vector3D v00(R_ * sin(theta), + R_ * cos(theta), + rv_); + for (int j= 0; j <= n2; ++j) { // minor radius + double g= static_cast(j) / n2; + double omega= IMP::PI * g; + double dZ= rv_ * cos(omega); + double dXY= R_ - rh_ * sin(omega); + algebra::Vector3D v10(dXY * sin(theta), + dXY * cos(theta), + dZ); + algebra::Vector3D v11(dXY * sin(theta1), + dXY * cos(theta1), + dZ); + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(v00, v10))); // cylinder curved face + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(v10, v11))); // cylinder edge between theta and theta 1 from top to bottom + v00=v10; + if(j==0 || j == n2) { + // Ray to slab edge: + double isin= 1.0 / (sin(theta)+0.0000001); + double icos= 1.0 / (cos(theta)+0.0000001); + double d= 0.5 * slab_length_ * std::min(std::abs(isin), std::abs(icos)); + algebra::Vector3D v2(d * sin(theta), + d * cos(theta), + dZ); + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(v10, v2))); + } + } // j + } // i + // Add top and bottom slab rectangles: + for(int sign=-1; sign<=1; sign+=2){ + algebra::Vector3D vNE(0.5*slab_length_, 0.5*slab_length_, sign*rv_); + algebra::Vector3D vNW(-0.5*slab_length_, 0.5*slab_length_, sign*rv_); + algebra::Vector3D vSW(-0.5*slab_length_, -0.5*slab_length_, sign*rv_); + algebra::Vector3D vSE(0.5*slab_length_, -0.5*slab_length_, sign*rv_); + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(vNE, vNW))); // slab bottom face + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(vNW, vSW))); // slab bottom face + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(vSW, vSE))); // slab bottom face + ret.push_back(new display::SegmentGeometry(algebra::Segment3D(vSE, vNE))); // slab bottom face + } + return ret; +} + +IMPNPC_END_NAMESPACE diff --git a/modules/npc/src/SlabWithToroidalPoreGoPairScore.cpp b/modules/npc/src/SlabWithToroidalPoreGoPairScore.cpp new file mode 100644 index 0000000000..164f2fca47 --- /dev/null +++ b/modules/npc/src/SlabWithToroidalPoreGoPairScore.cpp @@ -0,0 +1,132 @@ +/** + * \file SlabWithToroidalPoreGoPairScore.cpp + * \brief A Harmonic Score on the z distance above a surface with a spherical indent + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + */ + +#include +#include +#include +//#include + +IMPNPC_BEGIN_NAMESPACE + +SlabWithToroidalPoreGoPairScore::SlabWithToroidalPoreGoPairScore(double x0, double k) : x0_(x0), k_(k) {} + +// return score for a given particle pair +double SlabWithToroidalPoreGoPairScore::evaluate_index(Model *m, + const ParticleIndexPair &pip, + DerivativeAccumulator *da) const { + + // turn on logging for this method + //IMP_OBJECT_LOG; + + // check that the first particle is the spherical indent + IMP_USAGE_CHECK(SlabWithToroidalPore::get_is_setup(m, pip[0]) , "First particle passed is not of type SlabWithToroidalPore"); + + // + // assume they have coordinates + SlabWithToroidalPore d1(m, pip[0]); + core::XYZ d2(m, pip[1]); + + IMP_USAGE_CHECK_FLOAT_EQUAL(d1.get_minor_radius_h2v_aspect_ratio(), 1.0, "Toroidal Go-like Score must be used with aspect ratio of 1.0."); + + double x = d2.get_coordinate(0); + double y = d2.get_coordinate(1); + double z = d2.get_coordinate(2); + + // get the pore radius + double rpre = d1.get_pore_radius(); + // get the slab thickness, is the distance between planes + double zpre = d1.get_thickness(); + + // check if optimized. + //if (!d2.get_coordinates_are_optimized()) { + // return false; + //} + // could write a sub routine here but let's code it inline + //double score=get_surface_distance() + double score = 0.0; + double dv; + + // compute if the particle is in the toroidal pore region in xy-plane + double rxy = x*x + y*y; + + // if the particle is outside toroidal pore region + // compute score as function of z-height relative to the plane + if (rxy > rpre*rpre) { + double z0; + // compute height relative to the pore slab + if (z > 0.0) { + z0 = 0.5*zpre + x0_; + } + + else { + z0 = -0.5*zpre - x0_; + } + + // compute score + score += 0.5 * k_ * square(z - z0); + // compute derivative + if (da) { + dv = -k_ * (z - z0); + algebra::Vector3D udelta = algebra::Vector3D(0.0, 0.0, 1.0); + d2.add_to_derivatives(udelta * dv, *da); + } + + } + + else { + // compute vector towards radial origin of the toroidal region + // ASSUMES NON-ELLIPSOIDAL TOROID, i.e. aspect ratio = 1 + + double r = std::sqrt(rxy); + // cutoff for numerical overflows + if (r < 1e-9) { + r = 1e-9; + } + double rscl = rpre / r; + // compute distance between particle and major radius of torus + double dist; + dist = std::sqrt((x-rscl*x)*(x-rscl*x) + (y-rscl*y)*(y-rscl*y) + z*z); + + double rmin = d1.get_vertical_minor_radius(); + double d0 = rmin + x0_; + + // compute score + + score += 0.5 * k_ * square(dist - d0); + + if (da) { + dv = -k_ * (dist - d0); + algebra::Vector3D udelta = algebra::Vector3D(x-rscl*x, y-rscl*y, z); + d2.add_to_derivatives(udelta * dv, *da); + + } + } + + //IMP_LOG_PROGRESS("OUTPUT SCORE: " << score << "\n"); + return score; +} + + +/* +Double SlabWithToroidalPoreGoPairScore::get_surface_distance(algebra::Vector3D* coord) const { + + double ret; + // for now just do distance in to restrain the z-distance from the surface + double z = coord[2]; + + ret = () + +} +*/ + +ModelObjectsTemp SlabWithToroidalPoreGoPairScore::do_get_inputs( + Model *m, const ParticleIndexes &pis) const { + return IMP::get_particles(m, pis); +} + +IMPNPC_END_NAMESPACE + diff --git a/modules/npc/src/SlabWithToroidalPoreMBMScore.cpp b/modules/npc/src/SlabWithToroidalPoreMBMScore.cpp new file mode 100644 index 0000000000..4efa794d04 --- /dev/null +++ b/modules/npc/src/SlabWithToroidalPoreMBMScore.cpp @@ -0,0 +1,166 @@ +/** + * \file SlabWithToroidalPoreMBMScore.cpp + * \brief A Harmonic Score on the z distance above a surface with a spherical indent + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + */ + +#include +#include +#include +//#include + +IMPNPC_BEGIN_NAMESPACE + +SlabWithToroidalPoreMBMScore::SlabWithToroidalPoreMBMScore(double x0_upper, double x0_lower, double k) : x0_upper_(x0_upper), x0_lower_(x0_lower), k_(k) {} + +// return score for a given particle pair +double SlabWithToroidalPoreMBMScore::evaluate_index(Model *m, + const ParticleIndexPair &pip, + DerivativeAccumulator *da) const { + + // turn on logging for this method + //IMP_OBJECT_LOG; + + // check that the first particle is the spherical indent + IMP_USAGE_CHECK(SlabWithToroidalPore::get_is_setup(m, pip[0]) , "First particle passed is not of type SlabWithToroidalPore"); + + // + // assume they have coordinates + SlabWithToroidalPore d1(m, pip[0]); + core::XYZ d2(m, pip[1]); + + IMP_USAGE_CHECK_FLOAT_EQUAL(d1.get_minor_radius_h2v_aspect_ratio(), 1.0, "Toroidal Go-like Score must be used with aspect ratio of 1.0."); + + double x = d2.get_coordinate(0); + double y = d2.get_coordinate(1); + double z = d2.get_coordinate(2); + + // get the pore radius + double rpre = d1.get_pore_radius(); + // get the slab thickness, is the distance between planes + double zpre = d1.get_thickness(); + + // check if optimized. + //if (!d2.get_coordinates_are_optimized()) { + // return false; + //} + // could write a sub routine here but let's code it inline + //double score=get_surface_distance() + double score = 0.0; + double dv; + + // compute if the particle is in the toroidal pore region in xy-plane + double rxy = x*x + y*y; + + // if the particle is outside toroidal pore region + // compute score as function of z-height relative to the plane + // specifically, applies harmonic "walls" to enforce that particle + // is between x0_upper and x0_lower + if (rxy > rpre*rpre) { + double z0_upper, z0_lower; + // compute height of upper and lower references + if (z > 0.0) { + z0_upper = 0.5*zpre + x0_upper_; + z0_lower = 0.5*zpre + x0_lower_; + + // compute score for top slab of membrane + if (z > z0_upper) { + score += 0.5 * k_ * square(z - z0_upper); + + // compute derivative + dv = -k_ * (z - z0_upper); + } + else if (z < z0_lower) { + score += 0.5 * k_ * square(z - z0_lower); + + // compute derivative + dv = -k_ * (z - z0_lower); + } + } + + else { + z0_upper = -0.5*zpre - x0_upper_; + z0_lower = -0.5*zpre - x0_lower_; + + // compute score for bottom side of membrane + if (z < z0_upper) { + score += 0.5 * k_ * square(z - z0_upper); + + // compute derivative + dv = -k_ * (z - z0_upper); + } + else if (z > z0_lower) { + score += 0.5 * k_ * square(z - z0_lower); + + // compute derivative + dv = -k_ * (z - z0_lower); + } + } + + // compute derivative using computed dv + if (da) { + algebra::Vector3D udelta = algebra::Vector3D(0.0, 0.0, 1.0); + d2.add_to_derivatives(udelta * dv, *da); + } + + } + + else { + // compute vector towards radial origin of the toroidal region + // ASSUMES NON-ELLIPSOIDAL TOROID, i.e. aspect ratio = 1 + + double r = std::sqrt(rxy); + // cutoff for numerical overflows + if (r < 1e-9) { + r = 1e-9; + } + double rscl = rpre / r; + // compute distance between particle and major radius of torus + double dist; + dist = std::sqrt((x-rscl*x)*(x-rscl*x) + (y-rscl*y)*(y-rscl*y) + z*z); + + double rmin = d1.get_vertical_minor_radius(); + double d0_upper = rmin + x0_upper_; + double d0_lower = rmin + x0_lower_; + + // compute score and derivative + if (dist > d0_upper) { + score += 0.5 * k_ * square(dist - d0_upper); + dv = -k_ * (dist - d0_upper); + } + else if (dist < d0_lower) { + score += 0.5 * k_ * square(dist - d0_lower); + dv = -k_ * (dist - d0_lower); + } + + if (da) { + algebra::Vector3D udelta = algebra::Vector3D(x-rscl*x, y-rscl*y, z); + d2.add_to_derivatives(udelta * dv, *da); + } + } + + //IMP_LOG_PROGRESS("OUTPUT SCORE: " << score << "\n"); + return score; +} + + +/* +Double SlabWithToroidalPoreMBMScore::get_surface_distance(algebra::Vector3D* coord) const { + + double ret; + // for now just do distance in to restrain the z-distance from the surface + double z = coord[2]; + + ret = () + +} +*/ + +ModelObjectsTemp SlabWithToroidalPoreMBMScore::do_get_inputs( + Model *m, const ParticleIndexes &pis) const { + return IMP::get_particles(m, pis); +} + +IMPNPC_END_NAMESPACE + diff --git a/modules/npc/src/SphericalIndentSurfaceDepthPairScore.cpp b/modules/npc/src/SphericalIndentSurfaceDepthPairScore.cpp new file mode 100644 index 0000000000..d1323d0aaa --- /dev/null +++ b/modules/npc/src/SphericalIndentSurfaceDepthPairScore.cpp @@ -0,0 +1,107 @@ +/** + * \file SphericalIndentSurfaceDepthPairScore.cpp + * \brief + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + */ + +#include +#include +#include +//#include + +IMPNPC_BEGIN_NAMESPACE + +SphericalIndentSurfaceDepthPairScore::SphericalIndentSurfaceDepthPairScore(double k) : k_(k) {} + +// return score for a given particle pair +double SphericalIndentSurfaceDepthPairScore::evaluate_index(Model *m, + const ParticleIndexPair &pip, + DerivativeAccumulator *da) const { + + // turn on logging for this method + //IMP_OBJECT_LOG; + + // check that the first particle is the spherical indent + IMP_USAGE_CHECK(SlabWithSphericalIndent::get_is_setup(m, pip[0]) , "First particle passed is not of type SlabWithSphericalIndent"); + + // + // assume they have coordinates + SlabWithSphericalIndent d1(m, pip[0]); + core::XYZ d2(m, pip[1]); + + double x = d2.get_coordinate(0); + double y = d2.get_coordinate(1); + double z = d2.get_coordinate(2); + + // check if optimized. + //if (!d2.get_coordinates_are_optimized()) { + // return false; + //} + // could write a sub routine here but let's code it directly + //double score=get_surface_distance() + double score; + double dv; + double r_norm; + + // get the surface distance. + double R = d1.get_sphere_radius(); + double h = d1.get_sphere_depth(); + + // compute the base circle radius + double a_sqrd = h*(2.0*R-h); + + // compute score for being above the indent + if ((x*x+y*y) < a_sqrd) { + // if z is above plane, ignore + if (z < 0.0) { + score = 0.0; + } + else { + r_norm = sqrt(x*x + y*y + square(z - R + h)); + if (r_norm > R) { + score = 0.5 * k_ * square(r_norm - R); + // do derivatives + if (da) { + dv = -k_ * (r_norm - R); + algebra::Vector3D udelta = algebra::Vector3D(x, y, z-R+h); // vec to sphere center + d2.add_to_derivatives(udelta * dv / r_norm, *da); + } + } + else { + score = 0.0; + } + + } + } + // compute score for being outside the indent + else { + + // if particle is above xy plane then no contribution + if (z < 0.0) { + score = 0.0; + } + else { + score = 0.5 * k_ * z*z; + + // do derivatives + if (da) { + dv = -k_ * z; + algebra::Vector3D udelta = algebra::Vector3D(0.0, 0.0, 1.0); + d2.add_to_derivatives(udelta * dv, *da); + } + } + } + + + return score; +} + + +ModelObjectsTemp SphericalIndentSurfaceDepthPairScore::do_get_inputs( + Model *m, const ParticleIndexes &pis) const { + return IMP::get_particles(m, pis); +} + +IMPNPC_END_NAMESPACE + diff --git a/modules/npc/src/ToroidalPoreSurfaceDepthPairScore.cpp b/modules/npc/src/ToroidalPoreSurfaceDepthPairScore.cpp new file mode 100644 index 0000000000..1612956e63 --- /dev/null +++ b/modules/npc/src/ToroidalPoreSurfaceDepthPairScore.cpp @@ -0,0 +1,119 @@ +/** + * \file ToroidalPoreSurfaceDepthPairScore.cpp + * \brief A Harmonic Score on the z distance above a surface with a spherical indent + * + * Copyright 2007-2018 IMP Inventors. All rights reserved. + */ + +#include +#include +#include +//#include + +IMPNPC_BEGIN_NAMESPACE + +ToroidalPoreSurfaceDepthPairScore::ToroidalPoreSurfaceDepthPairScore(double k) : k_(k) {} + +// return score for a given particle pair +double ToroidalPoreSurfaceDepthPairScore::evaluate_index(Model *m, + const ParticleIndexPair &pip, + DerivativeAccumulator *da) const { + + // turn on logging for this method + //IMP_OBJECT_LOG; + + // check that the first particle is the spherical indent + IMP_USAGE_CHECK(SlabWithToroidalPore::get_is_setup(m, pip[0]) , "First particle passed is not of type SlabWithToroidalPore"); + + // + // assume they have coordinates + SlabWithToroidalPore d1(m, pip[0]); + core::XYZ d2(m, pip[1]); + + double x = d2.get_coordinate(0); + double y = d2.get_coordinate(1); + double z = d2.get_coordinate(2); + + // get the pore radius + double rpre = d1.get_pore_radius(); + // get the slab thickness, is the distance between planes + double zpre = d1.get_thickness(); + + // check if optimized. + //if (!d2.get_coordinates_are_optimized()) { + // return false; + //} + // could write a sub routine here but let's code it inline + //double score=get_surface_distance() + double score = 0.0; + double dv; + + // compute if the particle is in the toroidal pore region in xy-plane + double rxy = x*x + y*y; + + // if the particle is outside toroidal pore region + // compute score just as function of z-height relative to the plane + if (rxy > rpre*rpre) { + // compute height relative to the pore slab + bool is_above = z > 0.5*zpre; + bool is_below = z < -0.5*zpre; + // if outside slab, continue + if (is_above || is_below) { + } + else { + double z0; + if (z >= 0.0) { + z0 = 0.5*zpre; + } + else if (z < 0.0) { + z0 = -0.5*zpre; + } + + score += 0.5 * k_ * square(z - z0); + + if (da) { + dv = -k_ * (z - z0); + algebra::Vector3D udelta = algebra::Vector3D(0.0, 0.0, 1.0); + d2.add_to_derivatives(udelta * dv, *da); + } + } + + } + + else { + // compute vector towards radial origin of the toroidal region + double r = std::sqrt(rxy); + // cutoff for numerical overflows + if (r < 1e-9) { + r = 1e-9; + } + double rscl = rpre / r; + // compute distance between particle and major radius of torus + double dist; + dist = std::sqrt((x-rscl*x)*(x-rscl*x) + (y-rscl*y)*(y-rscl*y) + z*z); + + double rmin = d1.get_vertical_minor_radius(); + + // if particle is not under membrane, no score contribution + if (dist < rmin) { + score += 0.5 * k_ * square(dist - rmin); + + if (da) { + dv = k_ * (dist - rmin); + algebra::Vector3D udelta = algebra::Vector3D(x-rscl*x, y-rscl*y, z); + d2.add_to_derivatives(udelta * dv, *da); + } + } + } + + //IMP_LOG_PROGRESS("OUTPUT SCORE: " << score << "\n"); + return score; +} + +ModelObjectsTemp ToroidalPoreSurfaceDepthPairScore::do_get_inputs( + Model *m, const ParticleIndexes &pis) const { + return IMP::get_particles(m, pis); +} + +IMPNPC_END_NAMESPACE + diff --git a/modules/npc/test/test_membrane_restraints.py b/modules/npc/test/test_membrane_restraints.py new file mode 100644 index 0000000000..6324ae68c4 --- /dev/null +++ b/modules/npc/test/test_membrane_restraints.py @@ -0,0 +1,86 @@ +import IMP +import IMP.test +import IMP.algebra +import IMP.core +import IMP.npc +import IMP.container +import pickle + + +def setup_system_toroid(): + m = IMP.Model() + membrane = IMP.npc.SlabWithToroidalPore.setup_particle(m, IMP.Particle(m), 200.0, 50.0) + p = IMP.core.XYZR.setup_particle( + IMP.Particle(m), + IMP.algebra.Sphere3D(IMP.algebra.Vector3D(0.0,80.0,50.0), 0.4)) + return m, membrane, p + +def setup_system_indent(): + m = IMP.Model() + membrane = IMP.npc.SlabWithSphericalIndent.setup_particle(m, IMP.Particle(m), 100.0, 50.0) + p = IMP.core.XYZR.setup_particle( + IMP.Particle(m), + IMP.algebra.Sphere3D(IMP.algebra.Vector3D(100.0,100.0,20.0), 0.4)) + return m, membrane, p + + +class Tests(IMP.test.TestCase): + + def test_setup_toroidal_membrane(self): + """Test SlabWithToroidalPore""" + m,membrane,p=setup_system_toroid() + ratio=membrane.get_minor_radius_h2v_aspect_ratio() + thickness=membrane.get_thickness() + radius=membrane.get_pore_radius() + self.assertAlmostEqual(ratio, 1.0, delta=1e-4) + self.assertAlmostEqual(thickness, 200.0, delta=1e-4) + self.assertAlmostEqual(radius, 50.0, delta=1e-4) + + def test_toroidal_membrane_EV(self): + """Test ToroidalPoreSurfaceDepthPairScore""" + m,membrane,p=setup_system_toroid() + mlsc = IMP.container.ListSingletonContainer(m, [membrane]) + sdc = IMP.npc.ToroidalPoreSurfaceDepthPairScore(4.0) + bpc = IMP.container.AllBipartitePairContainer(mlsc, [p]) + sdpr = IMP.container.PairsRestraint(sdc, bpc, "membrane ev score") + self.assertAlmostEqual(sdpr.unprotected_evaluate(None), 5000.0, delta=1e-4) + + def test_toroidal_membrane_MBM(self): + """Test SlabWithToroidalPoreMBMScore""" + m,membrane,p=setup_system_toroid() + p.set_coordinates([100.0,100.0,-112.0]) + sps = IMP.npc.SlabWithToroidalPoreMBMScore(10.0, 0.0, 4.0) + mlsc = IMP.container.ListSingletonContainer(m, [membrane]) + bpc = IMP.container.AllBipartitePairContainer(mlsc, [p]) + prs = IMP.container.PairsRestraint(sps, bpc, "membrane mbm restraint") + self.assertAlmostEqual(prs.unprotected_evaluate(None), 8.0, delta=1e-4) + + def test_setup_indent_membrane(self): + """Test SlabWithSphericalIndent""" + m,membrane,p=setup_system_indent() + r=membrane.get_sphere_radius() + h=membrane.get_sphere_depth() + self.assertAlmostEqual(r, 100.0, delta=1e-4) + self.assertAlmostEqual(h, 50.0, delta=1e-4) + + def test_indent_membrane_EV(self): + """Test SphericalIndentSurfaceDepthPairScore""" + m,membrane,p=setup_system_indent() + mlsc = IMP.container.ListSingletonContainer(m, [membrane]) + sdc = IMP.npc.SphericalIndentSurfaceDepthPairScore(4.0) + bpc = IMP.container.AllBipartitePairContainer(mlsc, [p]) + sdpr = IMP.container.PairsRestraint(sdc, bpc, "membrane indent ev score") + self.assertAlmostEqual(sdpr.unprotected_evaluate(None), 800.0, delta=1e-4) + + def test_indent_membrane_MBM(self): + """Test SlabWithToroidalPoreMBMScore""" + m,membrane,p=setup_system_indent() + p.set_coordinates([100.0,100.0,-12.0]) + sps = IMP.npc.SlabWithSphericalIndentMBMScore(0.0, 4.0) + mlsc = IMP.container.ListSingletonContainer(m, [membrane]) + bpc = IMP.container.AllBipartitePairContainer(mlsc, [p]) + prs = IMP.container.PairsRestraint(sps, bpc, "membrane mbm restraint") + self.assertAlmostEqual(prs.unprotected_evaluate(None), 288.0, delta=1e-4) + +if __name__ == '__main__': + IMP.test.main() diff --git a/modules/npc/test/test_protein_localization_restraints.py b/modules/npc/test/test_protein_localization_restraints.py index fc56e592c4..e2dde5d77b 100644 --- a/modules/npc/test/test_protein_localization_restraints.py +++ b/modules/npc/test/test_protein_localization_restraints.py @@ -199,6 +199,57 @@ def test_membrane_surface_location_conditional_restraint(self): self.assertEqual(p2ind, p2.get_particle_index()) self._check_pickle(r, score=1352.0) + def test_x_axial(self): + """Test XAxialPositionRestraint""" + m, p = setup_system() + p.set_coordinates([0.0,2.0,2.0]) + r = IMP.npc.XAxialPositionRestraint(m, [p], 1.0, 2.0, True, 0.5) + info = _parse_restraint_info(r.get_static_info()) + self.assertAlmostEqual(info['lower bound'], 1.0, delta=1e-4) + self.assertAlmostEqual(info['upper bound'], 2.0, delta=1e-4) + self.assertAlmostEqual(info['sigma'], 0.5, delta=1e-4) + self.assertEqual(info['type'], 'IMP.npc.XAxialPositionRestraint') + self._check_pickle(r, score=3.920) + + + def test_x_axial_lower(self): + """Test XAxialPositionLowerRestraint""" + m, p = setup_system() + p.set_coordinates([0.0,2.0,2.0]) + r = IMP.npc.XAxialPositionLowerRestraint(m, [p], 1.0, True, 0.5) + info = _parse_restraint_info(r.get_static_info()) + self.assertAlmostEqual(info['lower bound'], 1.0, delta=1e-4) + self.assertAlmostEqual(info['sigma'], 0.5, delta=1e-4) + self.assertEqual(info['type'], 'IMP.npc.XAxialPositionLowerRestraint') + self._check_pickle(r, score=3.920) + + + def test_x_axial_upper(self): + """Test XAxialPositionUpperRestraint""" + m, p = setup_system() + p.set_coordinates([0.0,2.0,2.0]) + r = IMP.npc.XAxialPositionUpperRestraint(m, [p], -2.0, True, 0.5) + info = _parse_restraint_info(r.get_static_info()) + self.assertAlmostEqual(info['upper bound'], -2.0, delta=1e-4) + self.assertAlmostEqual(info['sigma'], 0.5, delta=1e-4) + self.assertEqual(info['type'], 'IMP.npc.XAxialPositionUpperRestraint') + self._check_pickle(r, score=11.52) + + + def test_overall(self): + """Test OverallPositionRestraint""" + m, p = setup_system() + p.set_coordinates([0.0,1.0,2.0]) + r = IMP.npc.OverallPositionRestraint(m, [p], 2.0, 0.0, 3.0, 1.0, True, 0.5) + info = _parse_restraint_info(r.get_static_info()) + self.assertAlmostEqual(info['x start'], 2.0, delta=1e-4) + self.assertAlmostEqual(info['y start'], 0.0, delta=1e-4) + self.assertAlmostEqual(info['z start'], 3.0, delta=1e-4) + self.assertAlmostEqual(info['tolerance'], 1.0, delta=1e-4) + self.assertAlmostEqual(info['sigma'], 0.5, delta=1e-4) + self.assertEqual(info['type'], 'IMP.npc.OverallPositionRestraint') + self._check_pickle(r, score=2.203) + if __name__ == '__main__': IMP.test.main() diff --git a/modules/npctransport b/modules/npctransport index 7b440affc1..80432c12db 160000 --- a/modules/npctransport +++ b/modules/npctransport @@ -1 +1 @@ -Subproject commit 7b440affc187c2804bf96d87b375a40754bfe622 +Subproject commit 80432c12db8754e4b80e76c34a08a615193d3a70 diff --git a/modules/pmi/.github/workflows/build.yml b/modules/pmi/.github/workflows/build.yml index 6afcb6d2a7..060759b2ba 100644 --- a/modules/pmi/.github/workflows/build.yml +++ b/modules/pmi/.github/workflows/build.yml @@ -17,7 +17,7 @@ jobs: runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 with: submodules: true - name: Setup conda and dependencies diff --git a/modules/pmi/doc/resolution.md b/modules/pmi/doc/resolution.md index fa030de637..04a3a18244 100644 --- a/modules/pmi/doc/resolution.md +++ b/modules/pmi/doc/resolution.md @@ -18,9 +18,8 @@ a bead for every 10 residues, while resolution 1 uses a bead for each residue. An all-atom representation is assigned a resolution of 0. Many PMI functions take a `resolution` parameter to specify which resolution -to act on. These functions use IMP::pmi::tools::HierarchyDatabase to get -the closest resolution if the exact resolution requested is not available. +to act on. These functions will use the closest resolution if the exact +resolution requested is not available. -@note currently resolution is stored in the particle name. Ultimately it -is intended to use the IMP::atom::Representation decorator for this purpose -instead. +@note Internally, the IMP::atom::Representation decorator is used to keep track +of all system representations at the desired resolutions. diff --git a/modules/pmi/include/utilities.h b/modules/pmi/include/utilities.h index d8219a1b05..76dc29b038 100644 --- a/modules/pmi/include/utilities.h +++ b/modules/pmi/include/utilities.h @@ -87,21 +87,6 @@ inline std::string get_molecule_name_and_copy(atom::Hierarchy h){ boost::lexical_cast(atom::get_copy_index(h)); } -IMPPMI_DEPRECATED_METHOD_DECL(2.20) -inline bool get_is_canonical(atom::Hierarchy h){ - IMPPMI_DEPRECATED_FUNCTION_DEF( - 2.20, "Support for PMI1-style hierarchies is no longer present; " - "all hierarchies should now be considered 'canonical'."); - - while (h) { - if (h->get_name()=="System") { - return true; - } - h = get_parent_representation(h); - } - return false; -} - IMPPMI_END_NAMESPACE #endif /* IMPPMI_UTILITIES_H */ diff --git a/modules/pmi/pyext/src/macros.py b/modules/pmi/pyext/src/macros.py index 85d27a2783..c7d4e3845a 100644 --- a/modules/pmi/pyext/src/macros.py +++ b/modules/pmi/pyext/src/macros.py @@ -26,6 +26,8 @@ import warnings import math +import pickle + class _MockMPIValues(object): """Replace samplers.MPI_values when in test mode""" @@ -109,7 +111,10 @@ def __init__(self, model, root_hier, atomistic=False, replica_exchange_object=None, test_mode=False, - score_moved=False): + score_moved=False, + use_nestor=False, + nestor_restraints=None, + nestor_rmf_fname_prefix="nested",): """Constructor. @param model The IMP model @param root_hier Top-level (System)hierarchy @@ -174,6 +179,13 @@ def __init__(self, model, root_hier, @param score_moved If True, attempt to speed up Monte Carlo sampling by caching scoring function terms on particles that didn't move. + @param use_nestor If True, follows the Nested Sampling workflow + of the NestOR module and skips writing stat files and + replica stat files. + @param nestor_restraints A list of restraints for which + likelihoods are to be computed for use by NestOR module. + @param nestor_rmf_fname_prefix Prefix to be used for storing .rmf3 + files generated by NestOR . """ self.model = model self.vars = {} @@ -258,6 +270,9 @@ def __init__(self, model, root_hier, self.vars["geometries"] = None self.test_mode = test_mode self.score_moved = score_moved + self.nest = use_nestor + self.nestor_restraints = nestor_restraints + self.nestor_rmf_fname = nestor_rmf_fname_prefix def add_geometries(self, geometries): if self.vars["geometries"] is None: @@ -276,6 +291,7 @@ def show_info(self): keys.sort() for v in keys: print("------", v.ljust(30), self.vars[v]) + print("Use nestor: ", self.nest) def get_replica_exchange_object(self): return self.replica_exchange_object @@ -375,7 +391,7 @@ def execute_macro(self): rmf_dir = globaldir + self.vars["rmf_dir"] pdb_dir = globaldir + self.vars["best_pdb_dir"] - if not self.test_mode: + if not self.test_mode and not self.nest: if self.vars["do_clean_first"]: pass @@ -410,16 +426,19 @@ def execute_macro(self): if self.rmf_output_objects is not None: self.rmf_output_objects.append(sw) - print("Setting up stat file") output = IMP.pmi.output.Output(atomistic=self.vars["atomistic"]) - low_temp_stat_file = globaldir + \ - self.vars["stat_file_name_suffix"] + "." + str(myindex) + ".out" + + if not self.nest: + print("Setting up stat file") + low_temp_stat_file = globaldir + \ + self.vars["stat_file_name_suffix"] + "." + \ + str(myindex) + ".out" # Ensure model is updated before saving init files if not self.test_mode: self.model.update() - if not self.test_mode: + if not self.test_mode and not self.nest: if self.output_objects is not None: output.init_stat2(low_temp_stat_file, self.output_objects, @@ -427,16 +446,18 @@ def execute_macro(self): else: print("Stat file writing is disabled") - if self.rmf_output_objects is not None: + if self.rmf_output_objects is not None and not self.nest: print("Stat info being written in the rmf file") - print("Setting up replica stat file") - replica_stat_file = globaldir + \ - self.vars["replica_stat_file_suffix"] + "." + str(myindex) + ".out" - if not self.test_mode: - output.init_stat2(replica_stat_file, [rex], extralabels=["score"]) + if not self.test_mode and not self.nest: + print("Setting up replica stat file") + replica_stat_file = globaldir + \ + self.vars["replica_stat_file_suffix"] + "." + \ + str(myindex) + ".out" + if not self.test_mode: + output.init_stat2(replica_stat_file, [rex], + extralabels=["score"]) - if not self.test_mode: print("Setting up best pdb files") if not self.is_multi_state: if self.vars["number_of_best_scoring_models"] > 0: @@ -478,7 +499,7 @@ def execute_macro(self): else: output_hierarchies = [self.root_hier] - if not self.test_mode: + if not self.test_mode and not self.nest: print("Setting up and writing initial rmf coordinate file") init_suffix = globaldir + self.vars["initial_rmf_name_suffix"] output.init_rmf(init_suffix + "." + str(myindex) + ".rmf3", @@ -498,7 +519,7 @@ def execute_macro(self): self._add_provenance(sampler_md, sampler_mc) - if not self.test_mode: + if not self.test_mode and not self.nest: print("Setting up production rmf files") rmfname = rmf_dir + "/" + str(myindex) + ".rmf3" output.init_rmf(rmfname, output_hierarchies, @@ -508,14 +529,25 @@ def execute_macro(self): if self._rmf_restraints: output.add_restraints_to_rmf(rmfname, self._rmf_restraints) + if not self.test_mode and self.nest: + print("Setting up NestOR rmf files") + nestor_rmf_fname = str(self.nestor_rmf_fname) + '_' + \ + str(self.replica_exchange_object.get_my_index()) + '.rmf3' + + output.init_rmf(nestor_rmf_fname, output_hierarchies, + geometries=self.vars["geometries"], + listofobjects=self.rmf_output_objects) + ntimes_at_low_temp = 0 - if myindex == 0: + if myindex == 0 and not self.nest: self.show_info() self.replica_exchange_object.set_was_used(True) nframes = self.vars["number_of_frames"] if self.test_mode: nframes = 1 + + sampled_likelihoods = [] for i in range(nframes): if self.test_mode: score = 0. @@ -529,7 +561,8 @@ def execute_macro(self): score = IMP.pmi.tools.get_restraint_set( self.model).evaluate(False) mpivs.set_value("score", score) - output.set_output_entry("score", score) + if not self.nest: + output.set_output_entry("score", score) my_temp_index = int(rex.get_my_temp() * temp_index_factor) @@ -552,7 +585,17 @@ def execute_macro(self): if save_frame: print("--- frame %s score %s " % (str(i), str(score))) - if not self.test_mode: + if self.nest: + if math.isnan(score): + sampled_likelihoods.append(math.nan) + else: + likelihood_for_sample = 1 + for rstrnt in self.nestor_restraints: + likelihood_for_sample *= rstrnt.get_likelihood() + sampled_likelihoods.append(likelihood_for_sample) + output.write_rmf(nestor_rmf_fname) + + if not self.test_mode and not self.nest: if i % self.vars["nframes_write_coordinates"] == 0: print('--- writing coordinates') if self.vars["number_of_best_scoring_models"] > 0: @@ -568,14 +611,23 @@ def execute_macro(self): output.write_stat2(low_temp_stat_file) ntimes_at_low_temp += 1 - if not self.test_mode: + if not self.test_mode and not self.nest: output.write_stat2(replica_stat_file) if self.vars["replica_exchange_swap"]: rex.swap_temp(i, score) + + if self.nest and len(sampled_likelihoods) > 0: + with open("likelihoods_" + + str(self.replica_exchange_object.get_my_index()), + "wb") as lif: + pickle.dump(sampled_likelihoods, lif) + + output.close_rmf(nestor_rmf_fname) + for p, state in IMP.pmi.tools._all_protocol_outputs(self.root_hier): p.add_replica_exchange(state, self) - if not self.test_mode: + if not self.test_mode and not self.nest: print("closing production rmf files") output.close_rmf(rmfname) diff --git a/modules/pmi/pyext/src/restraints/crosslinking.py b/modules/pmi/pyext/src/restraints/crosslinking.py old mode 100644 new mode 100755 index b5791f0888..731909b6b6 --- a/modules/pmi/pyext/src/restraints/crosslinking.py +++ b/modules/pmi/pyext/src/restraints/crosslinking.py @@ -455,6 +455,14 @@ def get_output(self): return output + def get_likelihood(self): + """Get the unweighted likelihood of the restraint""" + likelihood = 1 + for restraint in self.xl_restraints: + likelihood *= restraint.get_probability() + + return likelihood + def get_movers(self): """ Get all need data to construct a mover in IMP.pmi.dof class""" movers = [] diff --git a/modules/pmi/pyext/src/restraints/em.py b/modules/pmi/pyext/src/restraints/em.py old mode 100644 new mode 100755 index 469c33261c..39c7e386a9 --- a/modules/pmi/pyext/src/restraints/em.py +++ b/modules/pmi/pyext/src/restraints/em.py @@ -358,6 +358,11 @@ def get_output(self): self.label] = str(self.sigmaglobal.get_scale()) return output + def get_likelihood(self): + """Get the unweighted likelihood of the restraint""" + likelihood = self.gaussianEM_restraint.get_probability() + return likelihood + def evaluate(self): return self.weight * self.rs.unprotected_evaluate(None) diff --git a/modules/pmi/src/TransformMover.cpp b/modules/pmi/src/TransformMover.cpp index 414b378102..e1409934ec 100644 --- a/modules/pmi/src/TransformMover.cpp +++ b/modules/pmi/src/TransformMover.cpp @@ -2,13 +2,15 @@ * \file TransformMover.cpp * \brief A mover that transforms a rigid body * - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-2024 IMP Inventors. All rights reserved. * */ #include #include #include #include +#include + IMPPMI_BEGIN_NAMESPACE TransformMover::TransformMover(Model *m, @@ -84,7 +86,7 @@ core::MonteCarloMoverResult TransformMover::do_propose() { axis_=(d2.get_coordinates()-d1.get_coordinates()).get_unit_vector(); } - ::boost::uniform_real<> rand(-max_angle_, max_angle_); + ::boost::random::uniform_real_distribution<> rand(-max_angle_, max_angle_); Float angle = rand(random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis_, angle); algebra::Transformation3D t(r, translation); diff --git a/modules/pmi/tools/dev_tools/.github/workflows/build.yml b/modules/pmi/tools/dev_tools/.github/workflows/build.yml index d719007528..e32144d46a 100644 --- a/modules/pmi/tools/dev_tools/.github/workflows/build.yml +++ b/modules/pmi/tools/dev_tools/.github/workflows/build.yml @@ -9,7 +9,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: [2.7, 3.7, 3.8, 3.9, "3.10"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] runs-on: ${{ matrix.os }} steps: @@ -20,8 +20,11 @@ jobs: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | - pip install 'pytest-flake8<1.1' pytest-cov + pip install pytest flake8 pytest-cov - name: Test run: | - py.test --cov=. --cov-branch --cov-report=xml -v --flake8 . + py.test --cov=. --cov-branch --cov-report=xml -v . + flake8 --ignore=E402,W503 --exclude python_tools/reindent.py - uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/modules/pmi/tools/dev_tools/check_standards.py b/modules/pmi/tools/dev_tools/check_standards.py index ddda6c0ec2..036b3a02ac 100755 --- a/modules/pmi/tools/dev_tools/check_standards.py +++ b/modules/pmi/tools/dev_tools/check_standards.py @@ -18,7 +18,7 @@ print("Pygments (http://pygments.org/) library: please install.") print() -from optparse import OptionParser +from argparse import ArgumentParser def _check_do_not_commit(line, filename, num, errors): @@ -187,15 +187,18 @@ def get_all_files(): def main(): - parser = OptionParser() - options, args = parser.parse_args() + parser = ArgumentParser() + parser.add_argument("patterns", metavar="PATTERN", nargs="*", + help="File patterns to check; if unspecified, " + "check all files") + args = parser.parse_args() errors = [] - if len(args) == 0: + if len(args.patterns) == 0: modfiles = get_all_files() print("usage:", sys.argv[0], "file_patterns") else: - modfiles = args + modfiles = args.patterns for pattern in modfiles: expanded = glob.glob(pattern) # rint pattern, expanded diff --git a/modules/pmi/tools/dev_tools/cleanup_code.py b/modules/pmi/tools/dev_tools/cleanup_code.py index 57098b8fc2..dd42118bee 100755 --- a/modules/pmi/tools/dev_tools/cleanup_code.py +++ b/modules/pmi/tools/dev_tools/cleanup_code.py @@ -4,7 +4,7 @@ files.""" from __future__ import print_function -from optparse import OptionParser +from argparse import ArgumentParser import subprocess import os import sys @@ -24,8 +24,8 @@ from python_tools.reindent import Reindenter -parser = OptionParser(usage="%prog [options] [FILENAME ...]", - description="""Reformat the given C++ and Python files +parser = ArgumentParser( + description="""Reformat the given C++ and Python files (using the clang-format tool if available and reindent.py, respectively). If the --all option is given, reformat all such files under the current directory. @@ -34,46 +34,48 @@ by giving the -a option. autopep8 is much more aggressive than reindent.py and will fix other issues, such as use of old-style Python syntax. """) -parser.add_option("-c", "--clang-format", dest="clang_format", - default="auto", metavar="EXE", - help="The clang-format command.") -parser.add_option("-a", dest="use_ap", action="store_true", default=False, - help="Use autopep8 rather than reindent.py for " - "Python files.") -parser.add_option("--all", dest="all_files", action="store_true", - default=False, - help="Reformat all files under current directory") -parser.add_option("--autopep8", dest="autopep8", - default="auto", metavar="EXE", - help="The autopep8 command.") -parser.add_option("-e", "--exclude", dest="exclude", - default="eigen3:config_templates", metavar="DIRS", - help="Colon-separated list of dirnames to ignore.") -parser.add_option("-v", "--verbose", dest="verbose", action="store_true", - default=False, - help="Print extra info.") -(options, args) = parser.parse_args() -if not args and not options.all_files: +parser.add_argument("-c", "--clang-format", dest="clang_format", + default="auto", metavar="EXE", + help="The clang-format command.") +parser.add_argument("-a", dest="use_ap", action="store_true", default=False, + help="Use autopep8 rather than reindent.py for " + "Python files.") +parser.add_argument("--all", dest="all_files", action="store_true", + default=False, + help="Reformat all files under current directory") +parser.add_argument("--autopep8", dest="autopep8", + default="auto", metavar="EXE", + help="The autopep8 command.") +parser.add_argument("-e", "--exclude", dest="exclude", + default="eigen3:config_templates", metavar="DIRS", + help="Colon-separated list of dirnames to ignore.") +parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", + default=False, + help="Print extra info.") +parser.add_argument("files", metavar="FILENAME", nargs="*", + help="C++ and Python files to reformat.") +args = parser.parse_args() +if not args.files and not args.all_files: parser.error("No files selected") # clang-format-3.4", # autopep8 # search for executables -if options.clang_format == "auto": - options.clang_format = None +if args.clang_format == "auto": + args.clang_format = None for name in ["clang-format-3.4", "clang-format"]: if which(name): - options.clang_format = name + args.clang_format = name break -if options.autopep8 == "auto": - options.autopep8 = None +if args.autopep8 == "auto": + args.autopep8 = None for name in ["autopep8"]: if which(name): - options.autopep8 = name + args.autopep8 = name break -exclude = options.exclude.split(":") +exclude = args.exclude.split(":") error = None @@ -137,10 +139,10 @@ def _do_get_files(glb, cur): def _get_files(glb): match = [] - if len(args) == 0: + if len(args.files) == 0: match = _do_get_files(glb, ".") else: - for a in args: + for a in args.files: if os.path.isdir(a): match += _do_get_files(glb, a) elif a.endswith(glb): @@ -163,8 +165,8 @@ def clean_cpp(path): # skip code that isn't ours if "dependency" in path or "/eigen3/" in path: return - if options.clang_format: - contents = _run([options.clang_format, "--style=Google", path]) + if args.clang_format: + contents = _run([args.clang_format, "--style=Google", path]) else: contents = open(path, "r").read() contents = contents.replace("% template", "%template") @@ -172,8 +174,8 @@ def clean_cpp(path): def clean_py(path): - if options.use_ap and options.autopep8: - contents = _run([options.autopep8, "--aggressive", "--aggressive", + if args.use_ap and args.autopep8: + contents = _run([args.autopep8, "--aggressive", "--aggressive", path]) else: r = Reindenter(open(path)) @@ -185,25 +187,25 @@ def clean_py(path): def main(): - if options.verbose: - if options.autopep8 is None: + if args.verbose: + if args.autopep8 is None: print("autopep8 not found") else: - print("autopep8 is `%s`" % options.autopep8) - if options.clang_format is None: + print("autopep8 is `%s`" % args.autopep8) + if args.clang_format is None: print("clang-format not found") else: - print("clang-format is `%s`" % options.clang_format) + print("clang-format is `%s`" % args.clang_format) tp = ThreadPool() - if args: - for f in args: + if args.files: + for f in args.files: if f.endswith(".py"): tp.add_task(clean_py, f) elif f.endswith(".h") or f.endswith(".cpp"): tp.add_task(clean_cpp, f) - elif options.all_files: + elif args.all_files: for f in _get_files(".py"): tp.add_task(clean_py, f) for f in _get_files(".h") + _get_files(".cpp"): diff --git a/modules/pmi/tools/dev_tools/git/bootstrap_setup_git.py b/modules/pmi/tools/dev_tools/git/bootstrap_setup_git.py index 1f1a1fa9f1..cd553ebc23 100755 --- a/modules/pmi/tools/dev_tools/git/bootstrap_setup_git.py +++ b/modules/pmi/tools/dev_tools/git/bootstrap_setup_git.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 """Update submodules and then call the main setup_git.py. This should be copied to the main directory of your project and named setup_git.py.""" diff --git a/modules/pmi/tools/dev_tools/git/setup_git.py b/modules/pmi/tools/dev_tools/git/setup_git.py index 0fbbbea078..6821d3aee3 100755 --- a/modules/pmi/tools/dev_tools/git/setup_git.py +++ b/modules/pmi/tools/dev_tools/git/setup_git.py @@ -1,24 +1,24 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import sys import os.path import subprocess import glob -from optparse import OptionParser +from argparse import ArgumentParser import shutil dev_tools_path = os.path.join("tools", "dev_tools") -opt = OptionParser() -opt.add_option("-g", "--global", - action="store_true", dest="glob", default=False, - help="Set global git settings instead of repo " - "settings [default]") +opt = ArgumentParser() +opt.add_argument("-g", "--global", + action="store_true", dest="glob", default=False, + help="Set global git settings instead of repo " + "settings [default]") -(options, args) = opt.parse_args() +args = opt.parse_args() -if options.glob: +if args.glob: git_config = "git config --global --replace-all" config_contents = "" else: diff --git a/modules/pmi/tools/dev_tools/make_all_header.py b/modules/pmi/tools/dev_tools/make_all_header.py index ec9af48aac..b10deee041 100755 --- a/modules/pmi/tools/dev_tools/make_all_header.py +++ b/modules/pmi/tools/dev_tools/make_all_header.py @@ -6,6 +6,7 @@ import sys import glob +import datetime import os sys.path.append(os.path.split(sys.argv[0])[0]) @@ -27,13 +28,14 @@ def _add_includes(headers, output): includepath = sys.argv[1][sys.argv[1].find("include") + len("include") + 1:] +year = datetime.datetime.now().year output = ["""/** * \\file %s * \\brief Include all non-deprecated headers in %s. * - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-%d IMP Inventors. All rights reserved. */ -""" % (includepath, includepath[:-2].replace('/', '.'))] +""" % (includepath, includepath[:-2].replace('/', '.'), year)] guard = includepath.replace( "/", "_").replace("\\", diff --git a/modules/pmi/tools/dev_tools/pytest.ini b/modules/pmi/tools/dev_tools/pytest.ini deleted file mode 100644 index aec0cb16fd..0000000000 --- a/modules/pmi/tools/dev_tools/pytest.ini +++ /dev/null @@ -1,3 +0,0 @@ -[pytest] -flake8-ignore = E402 W503 -addopts = --ignore=python_tools/reindent.py diff --git a/modules/pmi/tools/dev_tools/python_tools/__init__.py b/modules/pmi/tools/dev_tools/python_tools/__init__.py index 17fd602769..aecd175fbb 100644 --- a/modules/pmi/tools/dev_tools/python_tools/__init__.py +++ b/modules/pmi/tools/dev_tools/python_tools/__init__.py @@ -174,8 +174,8 @@ def get_modules(source): def split(string, sep=":"): - return([x.replace("@", ":") - for x in string.replace("\\:", "@").split(sep) if x != ""]) + return ([x.replace("@", ":") + for x in string.replace("\\:", "@").split(sep) if x != ""]) def get_project_info(path): diff --git a/modules/pmi/tools/dev_tools/test/test_header.py b/modules/pmi/tools/dev_tools/test/test_header.py index a00c32bd4a..ef3f7b0647 100644 --- a/modules/pmi/tools/dev_tools/test/test_header.py +++ b/modules/pmi/tools/dev_tools/test/test_header.py @@ -1,6 +1,7 @@ import unittest import subprocess import os +import datetime import utils TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) @@ -26,13 +27,14 @@ def test_header(self): 'include/test/subdir'], cwd=tmpdir) stdout, stderr = p.communicate() self.assertEqual(p.returncode, 0) + year = datetime.datetime.now().year self.assertEqual(utils.read_file(os.path.join(tmpdir, 'include/test.h')), """/** * \\file test.h * \\brief Include all non-deprecated headers in test. * - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-%d IMP Inventors. All rights reserved. */ #ifndef TEST_H @@ -43,7 +45,7 @@ def test_header(self): #include #endif #endif /* TEST_H */ -""") +""" % year) if __name__ == '__main__': diff --git a/modules/pmi/tools/setup_ci.sh b/modules/pmi/tools/setup_ci.sh index dac0650a23..36a466bf15 100755 --- a/modules/pmi/tools/setup_ci.sh +++ b/modules/pmi/tools/setup_ci.sh @@ -12,12 +12,15 @@ python_version=$1 if [ ${python_version} = "2.7" ]; then BOOST="" pip="pip<=19.3.1" + # Python.h includes crypt.h, which is no longer provided by default + crypt="libxcrypt" else BOOST="libboost-devel" pip="pip" + crypt="" fi conda config --remove channels defaults # get conda-forge, not main, packages -conda create --yes -q -n python${python_version} -c salilab -c conda-forge python=${python_version} ${pip} scipy matplotlib imp-nightly ${BOOST} gxx_linux-64 eigen cereal swig cmake +conda create --yes -q -n python${python_version} -c salilab -c conda-forge python=${python_version} ${pip} ${crypt} scipy matplotlib imp-nightly ${BOOST} gxx_linux-64 eigen cereal swig cmake eval "$(conda shell.bash hook)" conda activate python${python_version} diff --git a/modules/pmi1 b/modules/pmi1 index d4e053edd9..0edd43de39 160000 --- a/modules/pmi1 +++ b/modules/pmi1 @@ -1 +1 @@ -Subproject commit d4e053edd94cf9bb73b524210d65de872ea31f59 +Subproject commit 0edd43de397e51492bf7062611cc2356f837a97c diff --git a/modules/rmf/dependency/RMF/.github/workflows/build.yml b/modules/rmf/dependency/RMF/.github/workflows/build.yml index dd2432347d..2f03b07489 100644 --- a/modules/rmf/dependency/RMF/.github/workflows/build.yml +++ b/modules/rmf/dependency/RMF/.github/workflows/build.yml @@ -36,9 +36,9 @@ jobs: runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - name: Install dependencies (Linux) diff --git a/modules/rmf/dependency/RMF/CMakeLists.txt b/modules/rmf/dependency/RMF/CMakeLists.txt index d11890ba99..ac9707a323 100644 --- a/modules/rmf/dependency/RMF/CMakeLists.txt +++ b/modules/rmf/dependency/RMF/CMakeLists.txt @@ -54,7 +54,7 @@ find_package(Boost COMPONENTS system filesystem thread program_options iostreams REQUIRED) include(FindHDF5) -find_package(HDF5 1.8) +find_package(HDF5) if("${HDF5_INCLUDE_DIRS}" MATCHES .*NOTFOUND) set(RMF_DEPRECATED_BACKENDS 0 CACHE BOOL "Whether to build deprecated backends.") @@ -140,7 +140,7 @@ set(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib) # Version information set (RMF_VERSION_MAJOR 1) set (RMF_VERSION_MINOR 6) -set (RMF_VERSION_MICRO 0) +set (RMF_VERSION_MICRO 1) math (EXPR RMF_VERSION "${RMF_VERSION_MAJOR} * 100000 + ${RMF_VERSION_MINOR} * 100 + ${RMF_VERSION_MICRO}") set(RMF_SOVERSION "${RMF_VERSION_MAJOR}.${RMF_VERSION_MINOR}" CACHE INTERNAL "" FORCE) diff --git a/modules/rmf/dependency/RMF/ChangeLog.md b/modules/rmf/dependency/RMF/ChangeLog.md index 46fda4e6ca..aeb3f0634e 100644 --- a/modules/rmf/dependency/RMF/ChangeLog.md +++ b/modules/rmf/dependency/RMF/ChangeLog.md @@ -1,6 +1,14 @@ Change Log {#changelog} ========== +# 1.6.1 - 2024-05-13 # {#changelog_1_6_1} +- .deb packages for RMF for Ubuntu are now provided via the + [Ubuntu PPA](https://launchpad.net/~salilab/+archive/ubuntu/ppa) mechanism. +- `rmf_cat` now checks that all of the files it has been asked to concatenate + have the same structure and static frame, and exits with an error if they + don't. A new `--force` option is provided to override this check. +- Bugfix: fixes to build with Boost 1.85 + # 1.6.0 - 2023-12-14 # {#changelog_1_6_0} - RPM packages for RMF for RedHat Linux (and clones such as Alma or Rocky) and for Fedora are now provided by the diff --git a/modules/rmf/dependency/RMF/benchmark/benchmark_rmf.cpp b/modules/rmf/dependency/RMF/benchmark/benchmark_rmf.cpp index 4dd8f90981..3800e6e85b 100644 --- a/modules/rmf/dependency/RMF/benchmark/benchmark_rmf.cpp +++ b/modules/rmf/dependency/RMF/benchmark/benchmark_rmf.cpp @@ -6,7 +6,11 @@ */ #include +#include #include +#if BOOST_VERSION >= 107200 +#include +#endif #include #include #include diff --git a/modules/rmf/dependency/RMF/bin/rmf_cat.cpp b/modules/rmf/dependency/RMF/bin/rmf_cat.cpp index 9ec94438b9..553f88f681 100644 --- a/modules/rmf/dependency/RMF/bin/rmf_cat.cpp +++ b/modules/rmf/dependency/RMF/bin/rmf_cat.cpp @@ -21,19 +21,42 @@ std::string output; } int main(int argc, char** argv) { try { + options.add_options()("force,f", + "Combine files even if they have different " + "structure or static frames."); positional_options.add_options()( "input-files,i", boost::program_options::value >(&inputs), "input rmf file"); positional_names.emplace_back("input_1.rmf input_2.rmf ... output.rmf"); positional_options_description.add("input-files", -1); - process_options(argc, argv); + boost::program_options::variables_map vm(process_options(argc, argv)); + if (inputs.size() < 3) { print_help_and_exit(argv); } output = inputs.back(); inputs.pop_back(); + bool force = vm.count("force"); + if (!force && inputs.size() > 1) { + RMF::FileConstHandle rh1 = RMF::open_rmf_file_read_only(inputs[0]); + for (unsigned int i = 1; i < inputs.size(); ++i) { + RMF::FileConstHandle rh2 = RMF::open_rmf_file_read_only(inputs[i]); + if (!RMF::get_equal_structure(rh1, rh2, true)) { + std::cerr << inputs[0] << " and " << inputs[i] + << " have different structure, cannot concatenate " + << "without --force" << std::endl; + exit(1); + } + if (!RMF::get_equal_static_values(rh1, rh2)) { + std::cerr << inputs[0] << " and " << inputs[i] + << " have different static frames, cannot concatenate " + << "without --force" << std::endl; + exit(1); + } + } + } RMF::FileHandle orh = RMF::create_rmf_file(output); orh.set_producer("rmf_cat"); for (unsigned int i = 0; i < inputs.size(); ++i) { diff --git a/modules/rmf/dependency/RMF/doc/Executables.md b/modules/rmf/dependency/RMF/doc/Executables.md index 5b1bb0658d..5924c29a5c 100644 --- a/modules/rmf/dependency/RMF/doc/Executables.md +++ b/modules/rmf/dependency/RMF/doc/Executables.md @@ -52,6 +52,7 @@ and how much they are used. Combine two or more rmf files. Usage: ./bin/rmf_cat input_1.rmf input_2.rmf ... output.rmf + -f [ --force ] Combine files even if they have different structure or static frame. -h [ --help ] Get help on command line arguments. -v [ --verbose ] Produce more output. --hdf5-errors Show hdf5 errors. diff --git a/modules/rmf/dependency/RMF/doc/Installation.md b/modules/rmf/dependency/RMF/doc/Installation.md index 45a5d8f81a..a7ac72935f 100644 --- a/modules/rmf/dependency/RMF/doc/Installation.md +++ b/modules/rmf/dependency/RMF/doc/Installation.md @@ -14,10 +14,16 @@ or on a Mac with [Homebrew](https://brew.sh/), install with brew tap salilab/salilab; brew install rmf -or on a Fedora or RedHat Enterprise Linux system, install with +or on a Fedora or RedHat Enterprise Linux system, install via +[COPR](https://copr.fedorainfracloud.org/coprs/salilab/salilab/) with dnf copr enable salilab/salilab; dnf install RMF +or on an Ubuntu LTS system, install via +[PPA](https://launchpad.net/~salilab/+archive/ubuntu/ppa) with + + apt install software-properties-common; add-apt-repository ppa:salilab/ppa; apt install rmf + IMP: Download an IMP binary (which includes RMF) from the [IMP download page](https://integrativemodeling.org/download.html). diff --git a/modules/rmf/dependency/RMF/doc/Main.md b/modules/rmf/dependency/RMF/doc/Main.md index cdb4b4156a..0d75166b81 100644 --- a/modules/rmf/dependency/RMF/doc/Main.md +++ b/modules/rmf/dependency/RMF/doc/Main.md @@ -19,7 +19,7 @@ See Also see the [rmf examples](https://github.com/salilab/rmf_examples) repository for examples of interesting or problematic RMF files. -Copyright 2007-2023 IMP Inventors. +Copyright 2007-2024 IMP Inventors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/modules/rmf/dependency/RMF/include/RMF/Vector.h b/modules/rmf/dependency/RMF/include/RMF/Vector.h index 73b0846ec6..1d230c4486 100644 --- a/modules/rmf/dependency/RMF/include/RMF/Vector.h +++ b/modules/rmf/dependency/RMF/include/RMF/Vector.h @@ -16,7 +16,6 @@ #include #include #include -#include #include RMF_ENABLE_WARNINGS @@ -61,13 +60,13 @@ class Vector RMF_CXX11_DEFAULT_COPY_CONSTRUCTOR(Vector); Vector(float x, float y, float z) { - BOOST_STATIC_ASSERT(D == 3); + static_assert(D == 3, "Constructor only valid for Vector3"); P::operator[](0) = x; P::operator[](1) = y; P::operator[](2) = z; } Vector(float x, float y, float z, float q) { - BOOST_STATIC_ASSERT(D == 4); + static_assert(D == 4, "Constructor only valid for Vector4"); P::operator[](0) = x; P::operator[](1) = y; P::operator[](2) = z; diff --git a/modules/rmf/dependency/RMF/include/RMF/exceptions.h b/modules/rmf/dependency/RMF/include/RMF/exceptions.h index 088e7fe0c8..52428a4594 100644 --- a/modules/rmf/dependency/RMF/include/RMF/exceptions.h +++ b/modules/rmf/dependency/RMF/include/RMF/exceptions.h @@ -37,12 +37,12 @@ class RMFEXPORT Exception : public virtual std::exception, /** Use this instead of the more standard what() to get the message as what() presents issues for memory management - with dynamically generated messages like. */ + with dynamically generated messages. */ RMFEXPORT std::string get_message(const Exception& e); /** Usage exceptions are thrown when the library is misused in some way, e.g., an out of bounds element is requested from a collection. In general - when these are throw, the failed operation should have been cleanly + when these are thrown, the failed operation should have been cleanly aborted without changing the file. */ class RMFEXPORT UsageException : public Exception { diff --git a/modules/rmf/dependency/RMF/include/RMF/internal/swig_helpers.h b/modules/rmf/dependency/RMF/include/RMF/internal/swig_helpers.h index 385d23f185..998f2f11dd 100644 --- a/modules/rmf/dependency/RMF/include/RMF/internal/swig_helpers.h +++ b/modules/rmf/dependency/RMF/include/RMF/internal/swig_helpers.h @@ -118,7 +118,7 @@ struct ValueOrObject { template struct ConvertAllBase { - BOOST_STATIC_ASSERT(!is_pointer::value); + static_assert(!is_pointer::value, "is a pointer"); template static bool get_is_cpp_object(PyObject* o, SwigData st) { void* vp; @@ -129,7 +129,7 @@ struct ConvertAllBase { template struct ConvertValueBase : public ConvertAllBase { - BOOST_STATIC_ASSERT(!is_pointer::value); + static_assert(!is_pointer::value, "is a pointer"); template static const T& get_cpp_object(PyObject* o, SwigData st) { void* vp; @@ -175,7 +175,7 @@ struct Convert : public ConvertValueBase { template struct ConvertSequenceHelper { typedef typename ValueOrObject::type V; - BOOST_STATIC_ASSERT(!is_pointer::value); + static_assert(!is_pointer::value, "is a pointer"); template static bool get_is_cpp_object(PyObject* in, SwigData st) { if (!in || !PySequence_Check(in)) { diff --git a/modules/rmf/dependency/RMF/src/avrocpp/api/AvroParse.hh b/modules/rmf/dependency/RMF/src/avrocpp/api/AvroParse.hh index b44817d65d..35c1e85166 100644 --- a/modules/rmf/dependency/RMF/src/avrocpp/api/AvroParse.hh +++ b/modules/rmf/dependency/RMF/src/avrocpp/api/AvroParse.hh @@ -20,7 +20,6 @@ #define avro_AvroParse_hh__ #include "Config.hh" -#include #include "AvroTraits.hh" #include "ResolvingReader.hh" @@ -48,12 +47,12 @@ void parse(ResolvingReader &p, T &val) { template void parse(Reader &p, T &val, const boost::false_type &) { - BOOST_STATIC_ASSERT(sizeof(T) == 0); + static_assert(sizeof(T) == 0, "val should be empty"); } template void translatingParse(Reader &p, T &val, const boost::false_type &) { - BOOST_STATIC_ASSERT(sizeof(T) == 0); + static_assert(sizeof(T) == 0, "val should be empty"); } // @{ diff --git a/modules/rmf/dependency/RMF/src/avrocpp/api/AvroSerialize.hh b/modules/rmf/dependency/RMF/src/avrocpp/api/AvroSerialize.hh index 106c587d18..2cdfc34c82 100644 --- a/modules/rmf/dependency/RMF/src/avrocpp/api/AvroSerialize.hh +++ b/modules/rmf/dependency/RMF/src/avrocpp/api/AvroSerialize.hh @@ -20,7 +20,6 @@ #define avro_AvroSerialize_hh__ #include "Config.hh" -#include #include "AvroTraits.hh" /// \file @@ -43,7 +42,7 @@ void serialize(Writer &s, const T &val) { template void serialize(Writer &s, const T &val, const boost::false_type &) { - BOOST_STATIC_ASSERT(sizeof(T) == 0); + static_assert(sizeof(T) == 0, "val should be empty"); } /// The remainder of the file includes default implementations for serializable diff --git a/modules/rmf/dependency/RMF/src/avrocpp/api/buffer/BufferReader.hh b/modules/rmf/dependency/RMF/src/avrocpp/api/buffer/BufferReader.hh index 94cbc9fa0d..9a9f6d217c 100644 --- a/modules/rmf/dependency/RMF/src/avrocpp/api/buffer/BufferReader.hh +++ b/modules/rmf/dependency/RMF/src/avrocpp/api/buffer/BufferReader.hh @@ -235,7 +235,7 @@ class AVRO_DECL BufferReader : private boost::noncopyable { /// An uninstantiable function, this is if boost::is_fundamental check fails template bool read(T &val, const boost::false_type &) { - BOOST_STATIC_ASSERT(sizeof(T) == 0); + static_assert(sizeof(T) == 0, "val should be empty"); return false; } diff --git a/modules/rmf/dependency/RMF/src/avrocpp/api/buffer/detail/BufferDetail.hh b/modules/rmf/dependency/RMF/src/avrocpp/api/buffer/detail/BufferDetail.hh index e79542b69c..317cafa62f 100644 --- a/modules/rmf/dependency/RMF/src/avrocpp/api/buffer/detail/BufferDetail.hh +++ b/modules/rmf/dependency/RMF/src/avrocpp/api/buffer/detail/BufferDetail.hh @@ -21,7 +21,6 @@ #include #include -#include #include #include #ifdef HAVE_BOOST_ASIO @@ -325,7 +324,7 @@ class BufferImpl : boost::noncopyable { /// and will compile-time assert. template void writeTo(T val, const boost::false_type &) { - BOOST_STATIC_ASSERT(sizeof(T) == 0); + static_assert(sizeof(T) == 0, "val should be empty"); } /// Write a block of data to the buffer, adding new chunks if necessary. diff --git a/modules/rmf/dependency/RMF/src/avrocpp/impl/Resolver.cc b/modules/rmf/dependency/RMF/src/avrocpp/impl/Resolver.cc index 8f445f1841..5f8721dd33 100644 --- a/modules/rmf/dependency/RMF/src/avrocpp/impl/Resolver.cc +++ b/modules/rmf/dependency/RMF/src/avrocpp/impl/Resolver.cc @@ -562,8 +562,8 @@ class ResolverFactory : private boost::noncopyable { &ResolverFactory::constructCompound, &ResolverFactory::constructCompound}; - BOOST_STATIC_ASSERT((sizeof(funcs) / sizeof(BuilderFunc)) == - (AVRO_NUM_TYPES)); + static_assert((sizeof(funcs) / sizeof(BuilderFunc)) == + (AVRO_NUM_TYPES), "function table size mismatch"); BuilderFunc func = funcs[currentWriter->type()]; assert(func); @@ -594,8 +594,8 @@ class ResolverFactory : private boost::noncopyable { &ResolverFactory::constructCompoundSkipper, &ResolverFactory::constructCompoundSkipper}; - BOOST_STATIC_ASSERT((sizeof(funcs) / sizeof(BuilderFunc)) == - (AVRO_NUM_TYPES)); + static_assert((sizeof(funcs) / sizeof(BuilderFunc)) == + (AVRO_NUM_TYPES), "function table size mismatch"); BuilderFunc func = funcs[currentWriter->type()]; assert(func); diff --git a/modules/rmf/dependency/RMF/src/avrocpp/impl/Types.cc b/modules/rmf/dependency/RMF/src/avrocpp/impl/Types.cc index a8cd5bf9ba..90700198dc 100644 --- a/modules/rmf/dependency/RMF/src/avrocpp/impl/Types.cc +++ b/modules/rmf/dependency/RMF/src/avrocpp/impl/Types.cc @@ -18,7 +18,6 @@ #include #include -#include #include "Types.hh" namespace internal_avro { @@ -28,8 +27,8 @@ const std::string typeToString[] = { "string", "bytes", "int", "long", "float", "double", "boolean", "null", "record", "enum", "array", "map", "union", "fixed", "symbolic"}; -BOOST_STATIC_ASSERT((sizeof(typeToString) / sizeof(std::string)) == - (AVRO_NUM_TYPES + 1)); +static_assert((sizeof(typeToString) / sizeof(std::string)) == + (AVRO_NUM_TYPES + 1), "type table size mismatch"); } // namespace strings @@ -37,7 +36,7 @@ BOOST_STATIC_ASSERT((sizeof(typeToString) / sizeof(std::string)) == // each type, // and it would be a problem for this flag if we ever supported more than 32 // types -BOOST_STATIC_ASSERT(AVRO_NUM_TYPES < 32); +static_assert(AVRO_NUM_TYPES < 32, "flags should fit in 32-bit"); const std::string &toString(Type type) { static std::string undefinedType = "Undefined type"; diff --git a/modules/rmf/dependency/RMF/src/avrocpp/impl/Validator.cc b/modules/rmf/dependency/RMF/src/avrocpp/impl/Validator.cc index cc07ca27d6..16562ba6de 100644 --- a/modules/rmf/dependency/RMF/src/avrocpp/impl/Validator.cc +++ b/modules/rmf/dependency/RMF/src/avrocpp/impl/Validator.cc @@ -16,8 +16,6 @@ * limitations under the License. */ -#include - #include "Validator.hh" #include "ValidSchema.hh" #include "NodeImpl.hh" @@ -141,8 +139,8 @@ void Validator::doAdvance() { &Validator::enumAdvance, &Validator::countingAdvance, &Validator::countingAdvance, &Validator::unionAdvance, &Validator::fixedAdvance}; - BOOST_STATIC_ASSERT((sizeof(funcs) / sizeof(AdvanceFunc)) == - (AVRO_NUM_TYPES)); + static_assert((sizeof(funcs) / sizeof(AdvanceFunc)) == + (AVRO_NUM_TYPES), "function table size mismatch"); expectedTypesFlag_ = 0; // loop until we encounter a next expected type, or we've exited all compound @@ -199,7 +197,8 @@ void Validator::setupFlag(Type type) { typeToFlag(AVRO_ENUM), typeToFlag(AVRO_ARRAY), typeToFlag(AVRO_MAP), typeToFlag(AVRO_UNION), typeToFlag(AVRO_FIXED)}; - BOOST_STATIC_ASSERT((sizeof(flags) / sizeof(flag_t)) == (AVRO_NUM_TYPES)); + static_assert((sizeof(flags) / sizeof(flag_t)) == (AVRO_NUM_TYPES), + "flags table size mismatch"); expectedTypesFlag_ = flags[type]; } diff --git a/modules/rmf/dependency/RMF/src/backend/deprecated_avro/MultipleAvroFileReader.cpp b/modules/rmf/dependency/RMF/src/backend/deprecated_avro/MultipleAvroFileReader.cpp index e6155cb452..c7e6375838 100644 --- a/modules/rmf/dependency/RMF/src/backend/deprecated_avro/MultipleAvroFileReader.cpp +++ b/modules/rmf/dependency/RMF/src/backend/deprecated_avro/MultipleAvroFileReader.cpp @@ -6,7 +6,11 @@ * */ +#include #include +#if BOOST_VERSION >= 107200 +#include +#endif #include #include #include diff --git a/modules/rmf/dependency/RMF/test/test_numpy.py b/modules/rmf/dependency/RMF/test/test_numpy.py index e09a1e7e59..3b57706345 100644 --- a/modules/rmf/dependency/RMF/test/test_numpy.py +++ b/modules/rmf/dependency/RMF/test/test_numpy.py @@ -58,6 +58,14 @@ def test_get_global_coordinates_particle_no_coords(self): expected_coord = numpy.array([[1., 2., 3.]]) self.assertLess(numpy.linalg.norm(coord - expected_coord), 1e-4) + @unittest.skipIf(RMF.RMF_HAS_NUMPY, "We have numpy") + def test_numpy_is_missing(self): + """Check that the numpy library is missing""" + # This test is a noop; it is here simply so that not all tests are + # skipped when numpy is missing (Python 3.12 flags all tests being + # skipped as an error) + pass + if __name__ == '__main__': unittest.main() diff --git a/modules/rmf/dependency/RMF/test/test_rmf_cat.py b/modules/rmf/dependency/RMF/test/test_rmf_cat.py new file mode 100644 index 0000000000..95401a4fbd --- /dev/null +++ b/modules/rmf/dependency/RMF/test/test_rmf_cat.py @@ -0,0 +1,108 @@ +import unittest +import os +import RMF +import subprocess + + +def make_input_rmf(fname, add_node=True, static_value=1): + fh = RMF.create_rmf_file(fname) + fh.add_frame("root", RMF.FRAME) + rn = fh.get_root_node() + ch = rn.add_child("ch1", RMF.REPRESENTATION) + if add_node: + ch = rn.add_child("ch2", RMF.REPRESENTATION) + cat = fh.get_category("MyCat") + key = fh.get_key(cat, "MyKey", RMF.int_tag) + fh.get_root_node().set_static_value(key, static_value) + + +class Tests(unittest.TestCase): + + def test_help(self): + """Test rmf_cat --help""" + p = subprocess.Popen(['rmf_cat', '--help'], stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + out, err = p.communicate() + self.assertEqual(out, "") + self.assertIn("Combine two or more rmf files", err) + self.assertEqual(p.returncode, 1) + + def test_version(self): + """Test rmf_cat --version""" + p = subprocess.Popen(['rmf_cat', '--version'], stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + out, err = p.communicate() + self.assertEqual(err, "") + self.assertIn("RMF version", out) + self.assertEqual(p.returncode, 0) + + def test_cat_mismatch_structure(self): + """Test rmf_cat of structure-mismatched files""" + make_input_rmf("mismatch_struc_1.rmf") + make_input_rmf("mismatch_struc_2.rmf", add_node=False) + p = subprocess.Popen( + ['rmf_cat', "mismatch_struc_1.rmf", "mismatch_struc_2.rmf", + 'mismatch_struc.rmf'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + self.assertIn("have different structure", err) + self.assertEqual(p.returncode, 1) + os.unlink('mismatch_struc_1.rmf') + os.unlink('mismatch_struc_2.rmf') + + def test_cat_mismatch_static(self): + """Test rmf_cat of static-frame-mismatched files""" + make_input_rmf("mismatch_static_1.rmf", static_value=1) + make_input_rmf("mismatch_static_2.rmf", static_value=2) + p = subprocess.Popen( + ['rmf_cat', "mismatch_static_1.rmf", "mismatch_static_2.rmf", + 'mismatch_static.rmf'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + self.assertIn("have different static frames", err) + self.assertEqual(p.returncode, 1) + os.unlink('mismatch_static_1.rmf') + os.unlink('mismatch_static_2.rmf') + + def test_cat_mismatch_force(self): + """Test forced rmf_cat of mismatched files""" + make_input_rmf("mismatch_force_1.rmf", static_value=1) + make_input_rmf("mismatch_force_2.rmf", static_value=2) + p = subprocess.Popen( + ['rmf_cat', "mismatch_force_1.rmf", "mismatch_force_2.rmf", + '--force', 'mismatch_force.rmf'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + self.assertEqual(out, "") + self.assertEqual(err, "") + self.assertEqual(p.returncode, 0) + os.unlink('mismatch_force_1.rmf') + os.unlink('mismatch_force_2.rmf') + os.unlink('mismatch_force.rmf') + + def test_cat_ok(self): + """Test rmf_cat of similar files""" + make_input_rmf("cat_ok_1.rmf") + make_input_rmf("cat_ok_2.rmf") + p = subprocess.Popen( + ['rmf_cat', "cat_ok_1.rmf", "cat_ok_2.rmf", 'cat_ok.rmf'], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + out, err = p.communicate() + self.assertEqual(out, "") + self.assertEqual(err, "") + self.assertEqual(p.returncode, 0) + os.unlink('cat_ok_1.rmf') + os.unlink('cat_ok_2.rmf') + os.unlink('cat_ok.rmf') + + +if __name__ == '__main__': + unittest.main() diff --git a/modules/rmf/dependency/RMF/tools/RMF.spec b/modules/rmf/dependency/RMF/tools/RMF.spec index de918b50c4..b64846c112 100644 --- a/modules/rmf/dependency/RMF/tools/RMF.spec +++ b/modules/rmf/dependency/RMF/tools/RMF.spec @@ -25,7 +25,7 @@ %endif Name: RMF -Version: 1.6.0 +Version: 1.6.1 Release: 1%{?dist} License: Apache 2.0 Summary: Library to support reading and writing of RMF files @@ -193,6 +193,9 @@ ${RPM_BUILD_ROOT}%{_prefix}/bin/rmf3_dump --version %{_libdir}/libRMF*.so %changelog +* Mon May 13 2024 Ben Webb 1.6.1-1 +- Update for 1.6.1 release. + * Thu Dec 14 2023 Ben Webb 1.6.0-1 - Update for 1.6.0 release. diff --git a/modules/rmf/dependency/RMF/tools/debian/changelog b/modules/rmf/dependency/RMF/tools/debian/changelog new file mode 100644 index 0000000000..25ea3634c1 --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/changelog @@ -0,0 +1,18 @@ +rmf (1.6.1-1~@CODENAME@) @CODENAME@; urgency=low + + * RMF 1.6.1 release + + -- IMP Developers Mon, 13 May 2024 15:24:48 -0700 + +rmf (1.6.0-2~@CODENAME@) @CODENAME@; urgency=low + + * Add install tests + * Fix warnings about __pycache__ directory on package removal + + -- IMP Developers Mon, 11 Mar 2024 17:13:55 -0700 + +rmf (1.6.0-1~@CODENAME@) @CODENAME@; urgency=low + + * Initial .deb release + + -- IMP Developers Thu, 07 Mar 2024 14:52:50 -0800 diff --git a/modules/rmf/dependency/RMF/tools/debian/control b/modules/rmf/dependency/RMF/tools/debian/control new file mode 100644 index 0000000000..de221865e8 --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/control @@ -0,0 +1,24 @@ +Source: rmf +Priority: optional +Maintainer: Ben Webb +Build-Depends: debhelper-compat (= 13), cmake, swig, libboost-filesystem-dev, libboost-iostreams-dev, libboost-program-options-dev, libboost-thread-dev, libhdf5-dev, python3-dev, python3-numpy +Standards-Version: 4.6.2 +Section: libs +Homepage: https://integrativemodeling.org/rmf +Vcs-Browser: https://github.com/salilab/rmf/ + +Package: rmf-dev +Section: libdevel +Architecture: any +Depends: rmf (= ${binary:Version}), ${misc:Depends}, cmake, swig, libboost-filesystem-dev, libboost-iostreams-dev, libboost-program-options-dev, libboost-thread-dev, libhdf5-dev, python3-dev, python3-numpy +Description: Library to support reading and writing of RMF files - development files + Headers to compile against RMF. + +Package: rmf +Section: libs +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends}, python3-numpy +Description: Library to support reading and writing of RMF files + The library provides support for the RMF file format for storing hierarchical + molecular data (such as atomic or coarse grained representations of proteins), + along with markup, including geometry and score data. diff --git a/modules/rmf/dependency/RMF/tools/debian/copyright b/modules/rmf/dependency/RMF/tools/debian/copyright new file mode 100644 index 0000000000..68325e7a7b --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/copyright @@ -0,0 +1,6 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: rmf +Source: https://integrativemodeling.org/rmf/ + +Copyright: 2007-2024 IMP Inventors +License: Apache diff --git a/modules/rmf/dependency/RMF/tools/debian/make-package.sh b/modules/rmf/dependency/RMF/tools/debian/make-package.sh new file mode 100755 index 0000000000..cb846d0cf5 --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/make-package.sh @@ -0,0 +1,23 @@ +#!/bin/sh +# Build a Debian package from source + +set -e + +CODENAME=`lsb_release -c -s` + +# Make sure we can find the rest of our input files +TOOL_DIR=`dirname "$0"` +# Get absolute path to top dir +TOP_DIR=`cd "${TOOL_DIR}/../.." && pwd` + +cd ${TOP_DIR} +rm -rf debian +cp -r tools/debian/ . +rm debian/make-package.sh +sed -i -e "s/\@CODENAME\@/$CODENAME/g" debian/changelog + +if [ "${CODENAME}" = "focal" ]; then + sed -i -e "s/debhelper-compat (= 13)/debhelper-compat (= 12)/" debian/control +fi + +dpkg-buildpackage -S diff --git a/modules/rmf/dependency/RMF/tools/debian/rmf-dev.install b/modules/rmf/dependency/RMF/tools/debian/rmf-dev.install new file mode 100644 index 0000000000..03f0beda1f --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/rmf-dev.install @@ -0,0 +1,3 @@ +usr/lib/*/libRMF*.so +usr/include/* +usr/share/RMF/swig diff --git a/modules/rmf/dependency/RMF/tools/debian/rmf.install b/modules/rmf/dependency/RMF/tools/debian/rmf.install new file mode 100644 index 0000000000..6260d155bd --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/rmf.install @@ -0,0 +1,16 @@ +usr/lib/*/libRMF*.so.* +usr/lib/python3*/dist-packages/RMF* +usr/lib/python3*/dist-packages/_RMF* +usr/bin/rmf3_dump +usr/bin/rmf_cat +usr/bin/rmf_frames +usr/bin/rmf_info +usr/bin/rmf_interpolate +usr/bin/rmf_pdb +usr/bin/rmf_show +usr/bin/rmf_signature +usr/bin/rmf_slice +usr/bin/rmf_transform +usr/bin/rmf_update +usr/bin/rmf_validate +usr/bin/rmf_xml diff --git a/modules/rmf/dependency/RMF/tools/debian/rmf.postinst b/modules/rmf/dependency/RMF/tools/debian/rmf.postinst new file mode 100755 index 0000000000..e6b1359397 --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/rmf.postinst @@ -0,0 +1,3 @@ +#!/bin/sh +python3 -m compileall -q /usr/lib/python3.*/dist-packages/RMF.py +python3 -m compileall -q /usr/lib/python3.*/dist-packages/RMF_HDF5.py diff --git a/modules/rmf/dependency/RMF/tools/debian/rmf.prerm b/modules/rmf/dependency/RMF/tools/debian/rmf.prerm new file mode 100755 index 0000000000..b58ebf3b17 --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/rmf.prerm @@ -0,0 +1,4 @@ +#!/bin/sh +rm -f /usr/lib/python3.*/dist-packages/__pycache__/RMF.*.pyc +rm -f /usr/lib/python3.*/dist-packages/__pycache__/RMF_HDF5.*.pyc +rmdir /usr/lib/python3.*/dist-packages/__pycache__ 2>&1 || : diff --git a/modules/rmf/dependency/RMF/tools/debian/rules b/modules/rmf/dependency/RMF/tools/debian/rules new file mode 100755 index 0000000000..6773a32ba3 --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/rules @@ -0,0 +1,45 @@ +#!/usr/bin/make -f +# -*- makefile -*- + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +# Don't use Debian-provided flags for now; they slow down the build +CFLAGS := +CXXFLAGS := +LDFLAGS := + +%: + dh $@ + +override_dh_auto_configure: + mkdir build + cd build && py3_ver=`python3 -c "import sys; print('%d.%d' % sys.version_info[:2])"` \ + && cmake .. -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PYTHONDIR=/usr/lib/python$${py3_ver}/dist-packages \ + -DCMAKE_INSTALL_PREFIX=/usr -DUSE_PYTHON2=off + +override_dh_auto_build: + $(MAKE) -C build + +override_dh_install: + $(MAKE) -C build DESTDIR=$(CURDIR)/debian/tmp install + # Make sure all Python applications use the system Python in /usr/bin + perl -pi -e 's@^#!.*python.*$$@#!/usr/bin/python3@' debian/tmp/usr/bin/* + dh_install + dh_missing --fail-missing + +execute_after_dh_install: + # Run basic tests on the installation + TOPDIR=`pwd`/debian/tmp \ + && py3_ver=`python3 -c "import sys; print('%d.%d' % sys.version_info[:2])"` \ + && export LD_LIBRARY_PATH="$${TOPDIR}/usr/lib/`uname -m`-linux-gnu" \ + && export PYTHONPATH="$${TOPDIR}/usr/lib/python$${py3_ver}/dist-packages" \ + && "$${TOPDIR}/usr/bin/rmf3_dump" --version \ + && python3 -c "import RMF; assert(hasattr(RMF, '__version__'))" \ + && python3 -c "import RMF; assert(hasattr(RMF, 'get_all_global_coordinates'))" \ + && rm -rf "$${TOPDIR}/usr/lib/python$${py3_ver}/dist-packages/__pycache__" + +override_dh_compress: + # Don't compress example files, since then they won't work! + dh_compress -Xexamples diff --git a/modules/rmf/dependency/RMF/tools/debian/source/format b/modules/rmf/dependency/RMF/tools/debian/source/format new file mode 100644 index 0000000000..163aaf8d82 --- /dev/null +++ b/modules/rmf/dependency/RMF/tools/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/modules/rmf/dependency/RMF/tools/dev_tools/.github/workflows/build.yml b/modules/rmf/dependency/RMF/tools/dev_tools/.github/workflows/build.yml index bb9873950c..e32144d46a 100644 --- a/modules/rmf/dependency/RMF/tools/dev_tools/.github/workflows/build.yml +++ b/modules/rmf/dependency/RMF/tools/dev_tools/.github/workflows/build.yml @@ -9,7 +9,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: ["2.7", "3.7", "3.8", "3.9", "3.10", "3.11"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] runs-on: ${{ matrix.os }} steps: @@ -26,3 +26,5 @@ jobs: py.test --cov=. --cov-branch --cov-report=xml -v . flake8 --ignore=E402,W503 --exclude python_tools/reindent.py - uses: codecov/codecov-action@v1 + with: + token: ${{ secrets.CODECOV_TOKEN }} diff --git a/modules/rmf/dependency/RMF/tools/dev_tools/check_standards.py b/modules/rmf/dependency/RMF/tools/dev_tools/check_standards.py index ddda6c0ec2..036b3a02ac 100755 --- a/modules/rmf/dependency/RMF/tools/dev_tools/check_standards.py +++ b/modules/rmf/dependency/RMF/tools/dev_tools/check_standards.py @@ -18,7 +18,7 @@ print("Pygments (http://pygments.org/) library: please install.") print() -from optparse import OptionParser +from argparse import ArgumentParser def _check_do_not_commit(line, filename, num, errors): @@ -187,15 +187,18 @@ def get_all_files(): def main(): - parser = OptionParser() - options, args = parser.parse_args() + parser = ArgumentParser() + parser.add_argument("patterns", metavar="PATTERN", nargs="*", + help="File patterns to check; if unspecified, " + "check all files") + args = parser.parse_args() errors = [] - if len(args) == 0: + if len(args.patterns) == 0: modfiles = get_all_files() print("usage:", sys.argv[0], "file_patterns") else: - modfiles = args + modfiles = args.patterns for pattern in modfiles: expanded = glob.glob(pattern) # rint pattern, expanded diff --git a/modules/rmf/dependency/RMF/tools/dev_tools/cleanup_code.py b/modules/rmf/dependency/RMF/tools/dev_tools/cleanup_code.py index 57098b8fc2..dd42118bee 100755 --- a/modules/rmf/dependency/RMF/tools/dev_tools/cleanup_code.py +++ b/modules/rmf/dependency/RMF/tools/dev_tools/cleanup_code.py @@ -4,7 +4,7 @@ files.""" from __future__ import print_function -from optparse import OptionParser +from argparse import ArgumentParser import subprocess import os import sys @@ -24,8 +24,8 @@ from python_tools.reindent import Reindenter -parser = OptionParser(usage="%prog [options] [FILENAME ...]", - description="""Reformat the given C++ and Python files +parser = ArgumentParser( + description="""Reformat the given C++ and Python files (using the clang-format tool if available and reindent.py, respectively). If the --all option is given, reformat all such files under the current directory. @@ -34,46 +34,48 @@ by giving the -a option. autopep8 is much more aggressive than reindent.py and will fix other issues, such as use of old-style Python syntax. """) -parser.add_option("-c", "--clang-format", dest="clang_format", - default="auto", metavar="EXE", - help="The clang-format command.") -parser.add_option("-a", dest="use_ap", action="store_true", default=False, - help="Use autopep8 rather than reindent.py for " - "Python files.") -parser.add_option("--all", dest="all_files", action="store_true", - default=False, - help="Reformat all files under current directory") -parser.add_option("--autopep8", dest="autopep8", - default="auto", metavar="EXE", - help="The autopep8 command.") -parser.add_option("-e", "--exclude", dest="exclude", - default="eigen3:config_templates", metavar="DIRS", - help="Colon-separated list of dirnames to ignore.") -parser.add_option("-v", "--verbose", dest="verbose", action="store_true", - default=False, - help="Print extra info.") -(options, args) = parser.parse_args() -if not args and not options.all_files: +parser.add_argument("-c", "--clang-format", dest="clang_format", + default="auto", metavar="EXE", + help="The clang-format command.") +parser.add_argument("-a", dest="use_ap", action="store_true", default=False, + help="Use autopep8 rather than reindent.py for " + "Python files.") +parser.add_argument("--all", dest="all_files", action="store_true", + default=False, + help="Reformat all files under current directory") +parser.add_argument("--autopep8", dest="autopep8", + default="auto", metavar="EXE", + help="The autopep8 command.") +parser.add_argument("-e", "--exclude", dest="exclude", + default="eigen3:config_templates", metavar="DIRS", + help="Colon-separated list of dirnames to ignore.") +parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", + default=False, + help="Print extra info.") +parser.add_argument("files", metavar="FILENAME", nargs="*", + help="C++ and Python files to reformat.") +args = parser.parse_args() +if not args.files and not args.all_files: parser.error("No files selected") # clang-format-3.4", # autopep8 # search for executables -if options.clang_format == "auto": - options.clang_format = None +if args.clang_format == "auto": + args.clang_format = None for name in ["clang-format-3.4", "clang-format"]: if which(name): - options.clang_format = name + args.clang_format = name break -if options.autopep8 == "auto": - options.autopep8 = None +if args.autopep8 == "auto": + args.autopep8 = None for name in ["autopep8"]: if which(name): - options.autopep8 = name + args.autopep8 = name break -exclude = options.exclude.split(":") +exclude = args.exclude.split(":") error = None @@ -137,10 +139,10 @@ def _do_get_files(glb, cur): def _get_files(glb): match = [] - if len(args) == 0: + if len(args.files) == 0: match = _do_get_files(glb, ".") else: - for a in args: + for a in args.files: if os.path.isdir(a): match += _do_get_files(glb, a) elif a.endswith(glb): @@ -163,8 +165,8 @@ def clean_cpp(path): # skip code that isn't ours if "dependency" in path or "/eigen3/" in path: return - if options.clang_format: - contents = _run([options.clang_format, "--style=Google", path]) + if args.clang_format: + contents = _run([args.clang_format, "--style=Google", path]) else: contents = open(path, "r").read() contents = contents.replace("% template", "%template") @@ -172,8 +174,8 @@ def clean_cpp(path): def clean_py(path): - if options.use_ap and options.autopep8: - contents = _run([options.autopep8, "--aggressive", "--aggressive", + if args.use_ap and args.autopep8: + contents = _run([args.autopep8, "--aggressive", "--aggressive", path]) else: r = Reindenter(open(path)) @@ -185,25 +187,25 @@ def clean_py(path): def main(): - if options.verbose: - if options.autopep8 is None: + if args.verbose: + if args.autopep8 is None: print("autopep8 not found") else: - print("autopep8 is `%s`" % options.autopep8) - if options.clang_format is None: + print("autopep8 is `%s`" % args.autopep8) + if args.clang_format is None: print("clang-format not found") else: - print("clang-format is `%s`" % options.clang_format) + print("clang-format is `%s`" % args.clang_format) tp = ThreadPool() - if args: - for f in args: + if args.files: + for f in args.files: if f.endswith(".py"): tp.add_task(clean_py, f) elif f.endswith(".h") or f.endswith(".cpp"): tp.add_task(clean_cpp, f) - elif options.all_files: + elif args.all_files: for f in _get_files(".py"): tp.add_task(clean_py, f) for f in _get_files(".h") + _get_files(".cpp"): diff --git a/modules/rmf/dependency/RMF/tools/dev_tools/git/setup_git.py b/modules/rmf/dependency/RMF/tools/dev_tools/git/setup_git.py index ec9451b739..6821d3aee3 100755 --- a/modules/rmf/dependency/RMF/tools/dev_tools/git/setup_git.py +++ b/modules/rmf/dependency/RMF/tools/dev_tools/git/setup_git.py @@ -4,21 +4,21 @@ import os.path import subprocess import glob -from optparse import OptionParser +from argparse import ArgumentParser import shutil dev_tools_path = os.path.join("tools", "dev_tools") -opt = OptionParser() -opt.add_option("-g", "--global", - action="store_true", dest="glob", default=False, - help="Set global git settings instead of repo " - "settings [default]") +opt = ArgumentParser() +opt.add_argument("-g", "--global", + action="store_true", dest="glob", default=False, + help="Set global git settings instead of repo " + "settings [default]") -(options, args) = opt.parse_args() +args = opt.parse_args() -if options.glob: +if args.glob: git_config = "git config --global --replace-all" config_contents = "" else: diff --git a/modules/rmf/dependency/RMF/tools/new-release.txt b/modules/rmf/dependency/RMF/tools/new-release.txt index 0a137c7282..4601e3778b 100644 --- a/modules/rmf/dependency/RMF/tools/new-release.txt +++ b/modules/rmf/dependency/RMF/tools/new-release.txt @@ -1,6 +1,7 @@ To make a new release: -- Update ChangeLog.md and tools/RMF.spec with release date and features. +- Update ChangeLog.md tools/RMF.spec tools/debian/changelog with release + date and features. - Add version number as RMF_VERSION_(MAJOR,MINOR,MICRO) to CMakeLists.txt. - git push origin develop - Make sure all CI passes @@ -12,3 +13,9 @@ To make a new release: - Tag the new release: - git tag -s -u E6414C85 x.y.z; git push origin x.y.z - Make new release on GitHub +- Check that Zenodo is updated (should be automatic) +- Build an SRPM from the spec file tools/RMF.spec and the .tar.gz from + the GitHub release page, and upload to COPR +- Build Debian source packages and upload to PPA +- Open PR on conda-forge to update the conda package +- Update the Homebrew package and build bottles diff --git a/modules/sampcon b/modules/sampcon index af5ae311b5..5603c11554 160000 --- a/modules/sampcon +++ b/modules/sampcon @@ -1 +1 @@ -Subproject commit af5ae311b55af2236f6469a41428feb3afaf339e +Subproject commit 5603c1155414856093cee025f88466bd221f8083 diff --git a/modules/saxs/src/ChiFreeScore.cpp b/modules/saxs/src/ChiFreeScore.cpp index 11cf7bb103..1260b8cb27 100644 --- a/modules/saxs/src/ChiFreeScore.cpp +++ b/modules/saxs/src/ChiFreeScore.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include IMPSAXS_BEGIN_NAMESPACE @@ -29,9 +29,10 @@ double ChiFreeScore::compute_score(const Profile* exp_profile, } const_cast(this)->last_scale_updated_ = false; - boost::uniform_real<> uni_dist(0, 1); + boost::random::uniform_real_distribution<> uni_dist(0, 1); boost::variate_generator > uni(IMP::random_number_generator, uni_dist); + boost::random::uniform_real_distribution<> > uni( + IMP::random_number_generator, uni_dist); Vector > chis(K_); unsigned int bin_size = std::floor((double)exp_profile->size()) / ns_; diff --git a/modules/saxs/src/RatioVolatilityScore.cpp b/modules/saxs/src/RatioVolatilityScore.cpp index fa3da59888..2b47fede7a 100644 --- a/modules/saxs/src/RatioVolatilityScore.cpp +++ b/modules/saxs/src/RatioVolatilityScore.cpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include IMPSAXS_BEGIN_NAMESPACE diff --git a/modules/score_functor/include/DistancePairScore.h b/modules/score_functor/include/DistancePairScore.h index 0bc3770994..5f158864e2 100644 --- a/modules/score_functor/include/DistancePairScore.h +++ b/modules/score_functor/include/DistancePairScore.h @@ -2,7 +2,7 @@ * \file IMP/score_functor/DistancePairScore.h * \brief A Score on the distance between a pair of particles. * - * Copyright 2007-2022 IMP Inventors. All rights reserved. + * Copyright 2007-2024 IMP Inventors. All rights reserved. */ #ifndef IMPSCORE_FUNCTOR_DISTANCE_PAIR_SCORE_H @@ -70,7 +70,8 @@ inline double DistancePairScore::evaluate_index( Model *m, const ParticleIndexPair &p, DerivativeAccumulator *da) const { algebra::Vector3D delta = - m->get_sphere(p[0]).get_center() - m->get_sphere(p[1]).get_center(); + m->get_sphere(std::get<0>(p)).get_center() + - m->get_sphere(std::get<1>(p)).get_center(); double sq = delta.get_squared_magnitude(); if (ds_.get_is_trivially_zero(m, p, sq)) { return 0; @@ -85,8 +86,8 @@ inline double DistancePairScore::evaluate_index( } else { uv = algebra::get_zero_vector_d<3>(); } - m->add_to_coordinate_derivatives(p[0], uv * sp.second, *da); - m->add_to_coordinate_derivatives(p[1], -uv * sp.second, *da); + m->add_to_coordinate_derivatives(std::get<0>(p), uv * sp.second, *da); + m->add_to_coordinate_derivatives(std::get<1>(p), -uv * sp.second, *da); return sp.first; } else { return ds_.get_score(m, p, dist); diff --git a/modules/score_functor/include/DistancePairScoreWithCache.h b/modules/score_functor/include/DistancePairScoreWithCache.h index d0b2e022f4..bba3de33b6 100644 --- a/modules/score_functor/include/DistancePairScoreWithCache.h +++ b/modules/score_functor/include/DistancePairScoreWithCache.h @@ -129,7 +129,8 @@ DistancePairScoreWithCache::evaluate_index_with_cache( Model *m, const ParticleIndexPair &p, DerivativeAccumulator *da) const { algebra::Vector3D delta = - m->get_sphere(p[0]).get_center() - m->get_sphere(p[1]).get_center(); + m->get_sphere(std::get<0>(p)).get_center() + - m->get_sphere(std::get<1>(p)).get_center(); double sq = delta.get_squared_magnitude(); if (ds_.get_is_trivially_zero_with_cache(m, p, sq)) { return 0; @@ -145,8 +146,8 @@ DistancePairScoreWithCache::evaluate_index_with_cache( } else { uv = algebra::get_zero_vector_d<3>(); } - m->add_to_coordinate_derivatives(p[0], uv * sp.second, *da); - m->add_to_coordinate_derivatives(p[1], -uv * sp.second, *da); + m->add_to_coordinate_derivatives(std::get<0>(p), uv * sp.second, *da); + m->add_to_coordinate_derivatives(std::get<1>(p), -uv * sp.second, *da); return sp.first; } else { return ds_.get_score_with_cache(m, p, dist); diff --git a/modules/score_functor/include/OrientedSoap.h b/modules/score_functor/include/OrientedSoap.h index 4fbdecbb73..7c8c2c11d1 100644 --- a/modules/score_functor/include/OrientedSoap.h +++ b/modules/score_functor/include/OrientedSoap.h @@ -86,8 +86,8 @@ class OrientedSoap : public ScoreWithCache { int distbin = potential_.get_index(internal::SoapPotential::DISTANCE, distance); if (distbin >= 0) { - atom::Atom a1(m, pis[0]); - atom::Atom a2(m, pis[1]); + atom::Atom a1(m, std::get<0>(pis)); + atom::Atom a2(m, std::get<1>(pis)); // Find the other atoms (if any) in the two doublet(s) that (a1,a2) // are members of const DList &doublets1 = doublets_.get_for_atom(a1); diff --git a/modules/score_functor/include/PointToSphereDistance.h b/modules/score_functor/include/PointToSphereDistance.h index d78f3e8576..ce1b0b1a2c 100644 --- a/modules/score_functor/include/PointToSphereDistance.h +++ b/modules/score_functor/include/PointToSphereDistance.h @@ -34,23 +34,24 @@ class PointToSphereDistance : public BaseDistanceScore { PointToSphereDistance(BaseDistanceScore base) : P(base) {} double get_score(Model *m, const ParticleIndexPair &pi, double distance) const { - return P::get_score(m, pi, distance - get_radius(m, pi[1])); + return P::get_score(m, pi, distance - get_radius(m, std::get<1>(pi))); } DerivativePair get_score_and_derivative(Model *m, const ParticleIndexPair &pi, double distance) const { - return P::get_score_and_derivative(m, pi, distance - get_radius(m, pi[1])); + return P::get_score_and_derivative(m, pi, + distance - get_radius(m, std::get<1>(pi))); } double get_maximum_range(Model *m, const ParticleIndexPair &pi) const { - return P::get_maximum_range(m, pi) + get_radius(m, pi[1]); + return P::get_maximum_range(m, pi) + get_radius(m, std::get<1>(pi)); } bool get_is_trivially_zero(Model *m, const ParticleIndexPair &pi, double squared_distance) const { return squared_distance > algebra::get_squared(P::get_maximum_range(m, pi) + - get_radius(m, pi[1])); + get_radius(m, std::get<1>(pi))); } }; diff --git a/modules/score_functor/include/SphereDistance.h b/modules/score_functor/include/SphereDistance.h index 05e5991637..245e2a423a 100644 --- a/modules/score_functor/include/SphereDistance.h +++ b/modules/score_functor/include/SphereDistance.h @@ -33,8 +33,8 @@ class SphereDistance : public BaseDistanceScore { typedef BaseDistanceScore P; static double get_rsum(Model *m, const ParticleIndexPair &pi) { - return m->get_sphere(pi[0]).get_radius() + - m->get_sphere(pi[1]).get_radius(); + return m->get_sphere(std::get<0>(pi)).get_radius() + + m->get_sphere(std::get<1>(pi)).get_radius(); } public: diff --git a/modules/score_functor/include/Statistical.h b/modules/score_functor/include/Statistical.h index 93e1450a64..964a89ee6d 100644 --- a/modules/score_functor/include/Statistical.h +++ b/modules/score_functor/include/Statistical.h @@ -90,8 +90,8 @@ class Statistical : public Score { if (distance >= threshold_ || distance < 0.001) { return 0; } - int pt = m->get_attribute(key_, pp[0]); - int lt = m->get_attribute(key_, pp[1]); + int pt = m->get_attribute(key_, std::get<0>(pp)); + int lt = m->get_attribute(key_, std::get<1>(pp)); if (pt == -1 || lt == -1) return 0; return table_->get_score(pt, lt, distance); } @@ -101,8 +101,8 @@ class Statistical : public Score { if (distance >= threshold_ || distance < 0.001) { return DerivativePair(0, 0); } - int pt = m->get_attribute(key_, pp[0]); - int lt = m->get_attribute(key_, pp[1]); + int pt = m->get_attribute(key_, std::get<0>(pp)); + int lt = m->get_attribute(key_, std::get<1>(pp)); if (pt == -1 || lt == -1) return DerivativePair(0, 0); return table_->get_score_with_derivative(pt, lt, distance); } diff --git a/modules/score_functor/include/SurfaceDistancePairScore.h b/modules/score_functor/include/SurfaceDistancePairScore.h index fb3e7f9d35..225f669f55 100644 --- a/modules/score_functor/include/SurfaceDistancePairScore.h +++ b/modules/score_functor/include/SurfaceDistancePairScore.h @@ -74,9 +74,10 @@ inline double SurfaceDistancePairScore::evaluate_index( DerivativeAccumulator *da) const { algebra::Vector3D delta; // normal vector from surface to point - double dist = get_distance(m->get_sphere(p[0]).get_center(), - internal::get_direction(m, p[0]), - m->get_sphere(p[1]).get_center(), &delta); + double dist = get_distance( + m->get_sphere(std::get<0>(p)).get_center(), + internal::get_direction(m, std::get<0>(p)), + m->get_sphere(std::get<1>(p)).get_center(), &delta); // Using squared distance for trivial check currently doesn't work for surfaces // if (ds_.get_is_trivially_zero(m, p, dist * dist)) { @@ -85,8 +86,8 @@ inline double SurfaceDistancePairScore::evaluate_index( if (da) { std::pair sp = ds_.get_score_and_derivative(m, p, dist); - m->add_to_coordinate_derivatives(p[0], -delta * sp.second, *da); - m->add_to_coordinate_derivatives(p[1], delta * sp.second, *da); + m->add_to_coordinate_derivatives(std::get<0>(p), -delta * sp.second, *da); + m->add_to_coordinate_derivatives(std::get<1>(p), delta * sp.second, *da); return sp.first; } else { return ds_.get_score(m, p, dist); diff --git a/modules/score_functor/include/internal/PMFTable.h b/modules/score_functor/include/internal/PMFTable.h index 7a6d1ac475..cc758dae8b 100644 --- a/modules/score_functor/include/internal/PMFTable.h +++ b/modules/score_functor/include/internal/PMFTable.h @@ -54,8 +54,8 @@ struct PMFTable : public Object { } const RawOpenCubicSpline &get(int i, int j) const { Array<2, int> is; - is[0] = i; - is[1] = j; + std::get<0>(is) = i; + std::get<1>(is) = j; typename Storage::ExtendedIndex ei(is.begin(), is.end()); return data_[data_.get_index(ei)]; } @@ -137,8 +137,8 @@ struct PMFTable : public Object { } order(i, j); Array<2, int> is; - is[0] = i; - is[1] = j; + std::get<0>(is) = i; + std::get<1>(is) = j; typename Storage::ExtendedIndex ei(is.begin(), is.end()); if (!data_.get_has_index(ei)) { data_.add_voxel(ei, score_functor::internal::RawOpenCubicSpline( diff --git a/modules/scratch/CMakeLists.txt b/modules/scratch/CMakeLists.txt index 012ffdc613..defd37a625 100644 --- a/modules/scratch/CMakeLists.txt +++ b/modules/scratch/CMakeLists.txt @@ -1,14 +1,19 @@ # Are we running cmake from this directory (out of tree build) ? if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) - cmake_minimum_required(VERSION 2.8.3) + cmake_minimum_required(VERSION 2.8.12...3.6.0) + project(imp_module) if(POLICY CMP0058) cmake_policy(SET CMP0058 NEW) endif(POLICY CMP0058) + if(POLICY CMP0053) + cmake_policy(SET CMP0053 NEW) + endif(POLICY CMP0053) + set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/cmake_modules) - set(SWIG_EXECUTABLE swig CACHE STRING "Swig program") + set(SWIG_EXECUTABLE swig CACHE STRING "SWIG program") find_package(IMP REQUIRED) include(${IMP_USE_FILE}) @@ -23,7 +28,7 @@ if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR) include_directories(SYSTEM ${IMP_INCLUDE_DIR}) include_directories("${CMAKE_BINARY_DIR}/include") - imp_build_module(${CMAKE_SOURCE_DIR}) + imp_build_module(${CMAKE_SOURCE_DIR} scratch) else() include(ModuleBuild.cmake) diff --git a/modules/spatiotemporal/.gitignore b/modules/spatiotemporal/.gitignore new file mode 100644 index 0000000000..42b2633026 --- /dev/null +++ b/modules/spatiotemporal/.gitignore @@ -0,0 +1,19 @@ +*.os +*.o +*.pyc +*~ +*.old +*.orig +*.gcno +*.gcda +test/CMakeLists.txt +src/CMakeLists.txt +ModuleBuild.cmake +CMakeModules/ +benchmark/CMakeLists.txt +bin/CMakeLists.txt +dependency/CMakeLists.txt +examples/CMakeLists.txt +pyext/CMakeLists.txt +utility/CMakeLists.txt +*/Files.cmake diff --git a/modules/spatiotemporal/.imp_info.py b/modules/spatiotemporal/.imp_info.py new file mode 100644 index 0000000000..0224fd89fb --- /dev/null +++ b/modules/spatiotemporal/.imp_info.py @@ -0,0 +1,3 @@ +{ + "name": "IMP.spatiotemporal" +} diff --git a/modules/spatiotemporal/README.md b/modules/spatiotemporal/README.md new file mode 100644 index 0000000000..4e8db2878f --- /dev/null +++ b/modules/spatiotemporal/README.md @@ -0,0 +1,260 @@ +\brief Spatialtemporal scoring in IMP + +This flexible code is for analyzing stepwise spatiotemporal models, such as those we used to compute our model of NPC assembly. If you are interested in using this code, please check out our examples. Our code is modular and easy to modify, as described in the spatiotemporal folder. + +Dependencies for making spatiotemporal models: +- numpy +- os +- warnings +- sys +- itertools +- pandas + + +Dependencies for visualizing spatiotemporal models as graphs: +- matplotlib +- graphviz (https://graphviz.org) + +# Info + +_Author(s)_: Andrew Latham and Jeremy Tempkin + +_Maintainer_: alatham13 + +_License_: LGPL. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. + +_Publications_: +- S Otsuka, et al., A quantitative map of nuclear pore assembly reveals two distinct mechanisms. Nature 613, 575–581 (2023). +- A Latham, et al., in preparation. (2024). + +# Spatialtemporal scoring in IMP +Here, we describe the spatiotemporal modeling package. The goal of this package is to read in a variety of input data and convert this data into a weighted graph of states. In the following, we go through each script, along with the functions available. + +## create_DAG.py + +### create_DAG(state_dict,input_dir='', scorestr='_scores.log', output_dir='',spatio_temporal_rule=False, subcomplexstr='.config',expected_subcomplexes=[],score_comp=False, exp_comp_map={},out_cdf=True, out_labeled_pdf=True, out_pdf=False, npaths=0,draw_dag=True) +This functions streamlines the process of creating a graph by performing all the necessary steps and saving relevant input to files. Features of this function are walked through in example/toy/Simple_spatiotemporal_example.ipynb + +#### Possible inputs: + +#### Inputs related to model input / calculation +state_dict - dictionary that defines the spatiotemporal model. They keys are strings that correspond to each time point in the stepwise temporal process. +Keys should be ordered according to the steps in the spatiotemporal process. The values are integers that correspond to the number of possible states at that timepoint. +Scores for each model are expected to be stored as state_timescorestr, +where state are integers 1->value of the dictionary, time is the key in the dictionary, and +scorestr is trailing characters, which are assumed to be constant for all states. + +input_dir - string, directory where the data is stored. Empty string assumes current working directory. + +scorestr - string, trailing characters at the end of the file with scores for each stage of the spatiotemporal model (default: '_scores.log'). + +output_dir - string, directory where the output will be written. Empty string assumes the same directory as the input_dir. + +#### Inputs related to spatiotemporal scoring (all optional) +spatio_temporal_rule- Boolean. If true, enforces that all components earlier in the assembly process are present later in the process. (default: False) + +subcomplexstr- string, trailing characters after the subcomplex file, which is a list of subcomplexes included in the given label/time (default: '.config') + +expected_subcomplexes- list of all possible subcomplex strings in the model (default: []) Should be a list without duplicates of all components in the subcomplex files. + +#### Inputs related to composition scores (all optional) +score_comp - Boolean to determine whether or not to score models based on the protein composition + +exp_comp_map - dictionary for determining protein composition score. The keys are the proteins. The code checks if the name of these proteins are within the subcomplex_components for each node. As such, the naming scheme should be such that the keys of exp_comp_map are substrings of expected_subcomplexes the values of exp_comp_map should correspond to a csv file for each subcomplex with protein copy numbers. Each csv file should have 3 columns: +1) 'Time' - should correspond to the keys of state_dict, 2) 'mean' - mean copy number from experimental data, and 3) std - standard deviation from experimental data + +#### Inputs related to model output (all optional) +out_cdf - Boolean to determine whether or not to write out the cumulative distribution function (cdf) for the graph (default: True) + +out_labeled_pdf - Boolean to determine whether to output the labeled pdf file, which includes both the pdf and the ordered states visited along each path (default: True). + +labeled_pdf_fn - string, name of the file for the labeled pdf (default: 'labeled_pdf.txt') + +out_pdf - Boolean to determine whether or not to write out the probability distribution function (pdf) for the graph (default: False) + +npaths - int, write out the states along the n most likely paths, based on the pdf (default: 0) + +draw_dag - Boolean to determine whether or not to write out a directed acyclic graph (dag) to a file (default: True) + +## analysis.py + +### temporal_precision(labaled_pdf1_fn,labaled_pdf2_fn,output_fn) +Function that reads in two labeled_pdfs from create_DAG and returns the temporal_precision, defined as the probability overlap between two pathway models. + +labaled_pdf1_fn - labeled_pdf from one independent sampling + +labaled_pdf2_fn - labeled_pdf from another independent sampling + +output_fn - name of output file (default: 'temporal_precision.txt') + +### purity(labeled_pdf_fn,output_fn) +Function that reads in one labeled_pdf from create_DAG and returns the purity, defined as the sum of the squared probability of all trajectories. + +labeled_pdf_fn - labeled_pdf from the total model + +output_fn - name of output file (default: 'temporal_precision.txt') + +## graphNode.py + +### graphNode +graphNode is a class that defines how states are connected in a spatiotemporal model. + +#### a graphNode can possess the following properties: +edges - set, which describes connections to other nodes + +scores - list of floats, mean scores based on priors and likelihoods used for modeling + +time - string, describes the time of the node + +index - int, unique numbering for each node + +label - int, numbering for each node that starts at 1 at each time point. This will correspond to the number in the input data + +components - List of proteins/subcomplexes in this particular node. Must be a subset of expected_subcomplexes + +expected_subcomplexes - list of possible proteins/subcomplexes in the overall system + +#### init_graphNode(time, label, scorestr, subcomplexstr, expected_subcomplexes): +Function that initiates a graph node with specific time, label, and expected_subcomplexes. Scores and components are extracted from files named scorestr and subcomplexstr respectively. Returns a single graphNode object. + +### draw_edge(nodeA, nodeB, spatio_temporal_rule) +Draws an edge between graphNode objects nodeA and nodeB. If spatio_temporal_rule, node will only be drawn if the components of nodeA are a subset of the components of nodeB. + +## composition_scoring.py + +### calc_likelihood(exp_comp_map,nodes): +Function that adds a score for the compositional likelihood for all states represented as nodes in the graph. The composition likelihood assumes a Gaussian distribution for copy number of each protein or subcomplex with means and standard deviatiations derived from experiment. Returns the nodes, with the new weights added. + +nodes - list of graphNode objects, which have been already been initiated with static scores + +exp_comp_map - dictionary, which describes protein stoicheometery. The key describes the protein, which should correspond to names within the expected_subcomplexes. Only copy numbers for proteins or subcomplexes included in this dictionary will be scored. For each of these proteins, a csv file should be provided with protein copy number data. The csv file should have 3 columns, 1) "Time", which matches up to the possible times in the graph, 2) "mean", the average protein copy number at that time point from experiment, and 3) "std", the standard deviation of that protein copy number from experiment. + +### composition_likelihood_function(mean, std, prots, node): +Function that calculates the likelihood of an individual node, used by calc_likelihood(). + +mean - dictionary of dictionaries where the first key is the protein, the second key is the time, and the expected mean copy number from experiment is returned. + +std - dictionary of dictionaries where the first key is the protein, the second key is the time, and the expected standard deviation of protein copy number from experiment is returned. + +prots - list of proteins or subcomplexes which will be scored according to this likelihood function + +node - the graphNode object for which the likelihood will be calculated. + +get_state(subcomplex_components,prot): +function to calculate how many times a protein appears in a list of proteins + +prot - string, protein or subcomplex we are interested in finding + +subcomplex_components - subcomplexes or components in a given node, which can be accessed by graphNode.get_subcomplex_components() + +## score_graph.py + +### score_graph(nodes,keys): +Function to score a graph based on nodes, which has scores and edges, as well as keys, which is a list of the states visited. Note that all edges must be drawn and scores must be added to nodes before calling this function. + +nodes - list of graphNode objects, which has been initialized with all weights and edges + +keys - list of all ordered states visited along the graph. Paths will be determined in sequential order passed to this function. + +Returns 3 objects: + +all_paths - list of all paths through the graph. Each path is a list of graphNode objects that correspond to the states visited along the path. + +path_prob - list of probabilities for each path, ordered in the same order as all_paths + +path_scores - list of tuples, where the first object is the path (list of graphNode objects for each state along the trajectory), and the second object is the score of the path, which can be used to calculate the probability. + +### get_graph_as_dict(nodes): +converts a list of graphNode objects (nodes), which have been initiated with scores and edges into a dictionary representation of a graph (graph). Each node in the graph is a key, which returns edges in the next state. + +### find_all_paths(graph, start, end, path=[]): +Finds all paths between nodes, which already have edges drawn between them. + +graph - dictionary representation of the graph, acquired in get_graph_as_dict() + +start - candidate starting graphNode + +end - candidate ending graphNode + +path - list of graphNodes on the path, which is defined recursively. + +Returns all paths that exist between the starting node and ending node + +## write_output.py + +### write_cdf(out_cdf,cdf_fn,graph_prob): +function to output the cumulative distribution function (cdf) + +out_cdf - bool, writes cdf if true + +cdf_fn - str, filename of cdf + +graph_prob - list of probabilities for each path, (path_prob from score_graph()) + +### write_pdf(out_pdf,pdf_fn,graph_prob): +function to output the probability distribution function (pdf) + +out_pdf - bool, writes pdf if true + +pdf_fn - str, filename of pdf + +graph_prob - list of probabilities for each path, (path_prob from score_graph()) + +### write_labeled_pdf(out_labeled_pdf,labeled_pdf_fn,graph,graph_prob): +function to output the labeled probability distribution function (pdf) + +out_labeled_pdf - bool, writes labeled_pdf if true + +labeled_pdf_fn - str, filename of labeled_pdf + +graph - list of graphNode objects visited for each path, (all_paths from score_graph()) + +graph_prob - list of probabilities for each path, (path_prob from score_graph()) + +### write_final_npaths(npaths,npath_fn,graph_scores,graph_prob): +function to output a file with all states for each of the n most likely paths + +npaths - int, number of paths to output + +nphath_fn - str, name of the file for all paths + +graph_scores - list of tuples, where the first object is the path (list of graphNode objects for each state along the trajectory), and the second object is the score of the path, which can be used to calculate the probability. (path_scores from score_graph()) + +graph_prob - list of probabilities for each path, (path_prob from score_graph()) + +### draw_dag(dag_fn, nodes, paths, path_prob, key,heatmap=True, colormap="Purples", penscale=0.6, arrowsize=1.2, fontname="Helvetica", fontsize="18", height="0.6", width="0.6", draw_label=True) +Function to render the DAG with heatmap information. + +dag_fn is a string with the filename path. + +nodes is a list of graphNode objects. + +paths is a list of lists containing the paths. + +path_prob - list of probabilities for each path, (path_prob from score_graph()) + +keys - states visited in the graph (list of keys to the state_dict) + +heatmap - Boolean to determine whether or not to write the dag with a heatmap based on the probability of each state (default: True) + +colormap - string, colormap used by the dag to represent probability. Chooses from those available in matplotlib +(https://matplotlib.org/stable/users/explain/colors/colormaps.html) (default: "Purples"). + +draw_label - Boolean to determine whether or not to draw state labels on the dag + +fontname - string, font used for the labels on the dag + +fontsize - string, font size used for the labels on the dag + +penscale - float, size of the pen used to draw arrows on the dag + +arrowsize - float, size of arrows connecting states on the dag + +height - string, height of each node on the dag + +width - string, width of each node on the dag + +### draw_dag_in_graphviz(nodes, coloring=None, draw_label=True, fontname="Helvetica", fontsize="18", penscale=0.6, arrowsize=1.2, height="0.6",width="0.6"): +Function used by draw_dag() to render the graph using graphviz. Takes a list of graphNodes (nodes) and initializes the nodes and edges. Coloring is expected to be a list of RGBA strings specifying how to color each node. Expected to be same length as nodes. + + diff --git a/modules/spatiotemporal/dependencies.py b/modules/spatiotemporal/dependencies.py new file mode 100644 index 0000000000..62a82be036 --- /dev/null +++ b/modules/spatiotemporal/dependencies.py @@ -0,0 +1,4 @@ +required_modules = '' +required_dependencies = '' +optional_dependencies = '' +python_only = True diff --git a/modules/spatiotemporal/examples/README.md b/modules/spatiotemporal/examples/README.md new file mode 100644 index 0000000000..7256845c21 --- /dev/null +++ b/modules/spatiotemporal/examples/README.md @@ -0,0 +1,5 @@ +# Spatialtemporal scoring in IMP + +## toy + +A toy model to demonstrate the use of our spatiotemporal code diff --git a/modules/spatiotemporal/examples/toy/README.md b/modules/spatiotemporal/examples/toy/README.md new file mode 100644 index 0000000000..2192e0b1d8 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/README.md @@ -0,0 +1,4 @@ +# Spatialtemporal scoring in IMP +Tutorial for scoring a spatiotemporal model using our spatiotemporal package. We assume that static structural models have already been generated, following existing procedures (https://integrativemodeling.org/tutorials/actin/). + +The jupyter notebook (Simple_spatiotemporal_example.ipynb) walks through the basics of using the create_DAG function, including the necessary and optional inputs, and how to interpret the resulting model. diff --git a/modules/spatiotemporal/examples/toy/Simple_spatiotemporal_example.ipynb b/modules/spatiotemporal/examples/toy/Simple_spatiotemporal_example.ipynb new file mode 100644 index 0000000000..b182d7332b --- /dev/null +++ b/modules/spatiotemporal/examples/toy/Simple_spatiotemporal_example.ipynb @@ -0,0 +1,299 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example of spatiotemporal integrative modeling\n", + "\n", + "Here, we walk through an example of spatiotemporal integrative modeling. For a toy model, we assume there are 2 proteins: A and B. We are interested in assembly of the trimer of these proteins.\n", + "\n", + "For simplicity, we will assume that data suggests the assembly proceeds as a monomer->dimer->trimer, and that the data for these states was collected at set time points, namely 0 minutes, 5 minutes, and 10 minutes. Based on these assumptions, our model can be present in 6 possible states at 3 time points (2 states at 0 minues, 3 states at 5 minutes, and 2 states at 10 minutes), as summarized in the image below:\n", + "\n", + "![title](image/toy_model.pdf)\n", + "\n", + "For each of these combinations of proteins A and B, we would generate integrative models of each complex, following procedures such as those in modeling the Actin complex (https://integrativemodeling.org/tutorials/actin/). We will assume that this process was already done. The scores of each model were saved as *_scores.log and the stoicheometery for each model is described by *.config.\n", + "\n", + "### Model without temporal scoring\n", + "Using just this data as an input, we can compute our spatiotemporal model with one line of code:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initialing graph...\n", + "Done.\n", + "Scoring directed acycling graph...\n", + "Done.\n", + "Writing output...\n", + "Done.\n" + ] + } + ], + "source": [ + "# import relevant modules\n", + "import sys\n", + "import IMP.spatiotemporal as spatiotemporal\n", + "import os\n", + "\n", + "# Always from in main_dir\n", + "main_dir=os.getcwd()\n", + "os.chdir(main_dir)\n", + "\n", + "# Input variables. \n", + "# dict is a the state dictionary, which returns the number of states at each time point, for each time point.\n", + "# Input is the directory where the data is stored. \n", + "# Output is the directory to which the output will be written\n", + "dict={'0min':2,'5min':3,'10min':2}\n", + "input='data'\n", + "output='../output_notemp'\n", + "\n", + "# create DAG\n", + "nodes,graph,graph_prob,graph_scores=spatiotemporal.create_DAG(dict,input_dir=input,output_dir=output)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Outputs and data analysis\n", + "\n", + "#### Output files\n", + "Note that the folder output_notemp has 4 different files. Each of these files contains different ways of describing or visualizing the graph.\n", + "\n", + "cdf.txt - stores the cummulative distribution function (cdf) over all trajectories, with each row corresponding to the next most likely trajectory being added to the total cdf.\n", + "\n", + "labeled_pdf.txt - stores the probability distribution function over all trajectories. Each row has 2 columns. The first column describes each state along the trajectory, divided by '|'. The next column is the probability of that trajectory.\n", + "\n", + "dag_heatmap/dag_heatmap.eps - visualization of the graph as a heatmap, created by graphviz. By default, each state is represented as a sphere with darker spheres corresponding to higher probability." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Model with temporal scoring\n", + "Closer analysis of the output from the analysis above shows that not trajectories are purely associative, meaning proteins would have to dissociate from the assembling complex for that trajectory to be relevant. We can filter out these unwanted trajectories using the spatio_temporal_rule keyword:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# go to main_dir\n", + "os.chdir(main_dir)\n", + "\n", + "# Input variables. \n", + "dict={'0min':2,'5min':3,'10min':2}\n", + "input='data'\n", + "output='../output_temp'\n", + "# expected_subcomplexes is a list of all possible subcomplex strings in the model. Should match the configuration files\n", + "subcomplexes=['A1','A2','B1','B2']\n", + "\n", + "graph,graph_scores=spatiotemporal.create_DAG(dict,input_dir=input,output_dir=output,spatio_temporal_rule=True,expected_subcomplexes=subcomplexes)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Adding stoichiometeric data\n", + "Sometimes, experiments such as fluorescence correlation spectroscopy (FCS) can provide time-dependent stoichiometeric data. In such cases, we can utilize this data to inform our spatiotemporal model. By adding the score_comp keyword, we can include a liklihood to our Bayesian scoring function that accounts for protein composition. Note that the precision of our model increased because of the additional data provided:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initialing graph...\n", + "Done.\n", + "Calculation composition likelihood...\n", + "Done.\n", + "Scoring directed acycling graph...\n", + "Done.\n", + "Writing output...\n", + "Done.\n" + ] + } + ], + "source": [ + "# go to main_dir\n", + "os.chdir(main_dir)\n", + "\n", + "# Input variables. \n", + "dict={'0min':2,'5min':3,'10min':2}\n", + "input='data'\n", + "output='../output_stoich'\n", + "subcomplexes=['A1','A2','B1','B2']\n", + "# exp_comp_map is a dictionary that describes protein stoicheometery. The key describes the protein, which should correspond to names within the expected_subcomplexes. For each of these proteins, a csv file should be provided with protein copy number data\n", + "exp_comp={'A':'exp_comp_A.csv','B':'exp_comp_B.csv'}\n", + "\n", + "\n", + "nodes,graph,graph_prob,graph_scores=spatiotemporal.create_DAG(dict,input_dir=input,output_dir=output,spatio_temporal_rule=True,expected_subcomplexes=subcomplexes,score_comp=True,exp_comp_map=exp_comp)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Precision of model is dependent on the precision of input data\n", + "The precision of the final model is dependent on the precision of the data used to construct the model. To demonstrate this point, we assume we can create a data set with more precise stoicheometeric data. Note that, now, the model converges onto a single trajectory. Here, we also demonstrate some other output options that may be useful:\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initialing graph...\n", + "Done.\n", + "Calculation composition likelihood...\n", + "Done.\n", + "Scoring directed acycling graph...\n", + "Done.\n", + "Writing output...\n", + "Done.\n" + ] + } + ], + "source": [ + "# go to main_dir\n", + "os.chdir(main_dir)\n", + "\n", + "# Input variables. \n", + "dict={'0min':2,'5min':3,'10min':2}\n", + "input='data_precise'\n", + "output='../output_stoich_precise'\n", + "subcomplexes=['A1','A2','B1','B2']\n", + "exp_comp={'A':'exp_comp_A_precise.csv','B':'exp_comp_B_precise.csv'}\n", + "\n", + "\n", + "nodes,graph,graph_prob,graph_scores=spatiotemporal.create_DAG(dict,out_pdf=True,npaths=2,input_dir=input,output_dir=output,spatio_temporal_rule=True,expected_subcomplexes=subcomplexes,score_comp=True,exp_comp_map=exp_comp)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Other outputs\n", + "Note the 2 additional commands, out_pdf and npaths, which caused the creation of new analysis files 'pdf.txt' and 'path1.txt'/'path2.txt' respectively. These files describe:\n", + "\n", + "pdf.txt - stores the probability distribution function (pdf) over all trajectories, with each row corresponding to the next most likely trajectory.\n", + "\n", + "path1.txt / path2.txt - Each line corresponds to a single state visited in the most likely path (path1.txt) or 2nd most likely path (path2.txt). Note that 2 files were written because npaths=2." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary of all options\n", + "The create_DAG function has a variety of options for model input and plotting. The tutorial above highlighted the key functions that are necessary to create spatio-temporal models, but other options are also availabe. Here, we highlight all options of the create_DAG function:\n", + "\n", + "#### Inputs related to model input / calculation\n", + "state_dict - dictionary that defines the spatiotemporal model. They keys are strings that correspond to each time point in the stepwise temporal process.\n", + "Keys should be ordered according to the steps in the spatiotemporal process. The values are integers that correspond to the number of possible states at that timepoint.\n", + "Scores for each model are expected to be stored as state_timescorestr,\n", + "where state are integers 1->value of the dictionary, time is the key in the dictionary, and\n", + "scorestr is trailing characters, which are assumed to be constant for all states.\n", + "\n", + "input_dir - string, directory where the data is stored. Empty string assumes current working directory.\n", + "\n", + "scorestr - string, trailing characters at the end of the file with scores for each stage of the spatiotemporal model (default: '_scores.log').\n", + "\n", + "output_dir - string, directory where the output will be written. Empty string assumes the same directory as the input_dir.\n", + "\n", + "#### Inputs related to spatiotemporal scoring (all optional)\n", + "spatio_temporal_rule- Boolean. If true, enforces that all components earlier in the assembly process are present later in the process. (default: False)\n", + "\n", + "subcomplexstr- string, trailing characters after the subcomplex file, which is a list of subcomplexes included in the given label/time (default: '.config')\n", + "\n", + "expected_subcomplexes- list of all possible subcomplex strings in the model (default: []) Should be a list without duplicates of all components in the subcomplex files.\n", + "\n", + "#### Inputs related to composition scores (all optional)\n", + "score_comp - Boolean to determine whether or not to score models based on the protein composition\n", + "\n", + "exp_comp_map - dictionary for determining protein composition score. The keys are the proteins. The code checks if the name of these proteins are within the subcomplex_components for each node. As such, the naming scheme should be such that the keys of exp_comp_map are substrings of expected_subcomplexes the values of exp_comp_map should correspond to a csv file for each subcomplex with protein copy numbers. Each csv file should have 3 columns:\n", + "1) 'Time' - should correspond to the keys of state_dict, 2) 'mean' - mean copy number from experimental data, and 3) std - standard deviation from experimental data\n", + "\n", + "#### Inputs related to model output (all optional)\n", + "out_cdf - Boolean to determine whether or not to write out the cumulative distribution function (cdf) for the graph (default: True)\n", + "\n", + "cdf_fn - string, filename for the cdf (default: 'cdf.txt')\n", + "\n", + "out_labeled_pdf - Boolean to determine whether to output the labeled pdf file, which includes both the pdf and the ordered states visited along each path (default: True).\n", + "\n", + "labeled_pdf_fn - string, name of the file for the labeled pdf (default: 'labeled_pdf.txt')\n", + "\n", + "out_pdf - Boolean to determine whether or not to write out the probability distribution function (pdf) for the graph (default: False)\n", + "\n", + "pdf_fn - string, filename for the pdf (default: 'pdf.txt')\n", + "\n", + "npaths - int, write out the states along the n most likely paths, based on the pdf (default: 0)\n", + "\n", + "npath_fn - string, name of the file for each of the n most likely paths. 'n.txt' will be appended to the end of npath_fn (default: 'path')\n", + "\n", + "#### Inputs related to directed acyclic graph (DAG) output (all optional)\n", + "draw_dag - Boolean to determine whether or not to write out a directed acyclic graph (dag) to a file (default: True)\n", + "\n", + "dag_fn - string, filename for the dag image (default: 'dag_heatmap')\n", + "\n", + "dag_heatmap - Boolean to determine whether or not to write the dag with a heatmap based on the probability of each state (default: True)\n", + "\n", + "dag_colormap - string, colormap used by the dag to represent probability. Chooses from those available in matplotlib\n", + "(https://matplotlib.org/stable/users/explain/colors/colormaps.html) (default: \"Purples\").\n", + "\n", + "dag_draw_label - Boolean to determine whether or not to draw state labels on the dag\n", + "\n", + "dag_fontname - string, font used for the labels on the dag\n", + "\n", + "dag_fontsize - string, font size used for the labels on the dag\n", + "\n", + "dag_penscale - float, size of the pen used to draw arrows on the dag\n", + "\n", + "dag_arrowsize - float, size of arrows connecting states on the dag\n", + "\n", + "dag_height - string, height of each node on the dag\n", + "\n", + "dag_width - string, width of each node on the dag" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/modules/spatiotemporal/examples/toy/Simple_spatiotemporal_example.py b/modules/spatiotemporal/examples/toy/Simple_spatiotemporal_example.py new file mode 100644 index 0000000000..11495e3160 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/Simple_spatiotemporal_example.py @@ -0,0 +1,63 @@ +import sys + +# Example of model without temporal scoring ----------------------------------------------------------------- + +if sys.version_info[0] < 3: + print("Sorry, this example needs Python 3 and a recent version of numpy") + sys.exit(0) + + +# import relevant modules +import pandas +import IMP.spatiotemporal as spatiotemporal +import os + +# Input variables. +# dict is a the state dictionary, which returns the number of states at each time point, for each time point. +# Input is the directory where the data is stored. +# Output is the directory to which the output will be written +dict={'0min':2,'5min':3,'10min':2} +input=spatiotemporal.get_example_path('toy/data') +output=spatiotemporal.get_example_path('toy/output_notemp') + +# create DAG +# Set draw_dag to True if you want to write out the DAG to a file. +# This requires that you have the 'graphviz' Python package installed. +draw_dag = False +nodes,graph,graph_prob,graph_scores=spatiotemporal.create_DAG(dict,input_dir=input,output_dir=output,draw_dag=draw_dag) + +# Example of model with temporal scoring ----------------------------------------------------------------- + +# Input variables. +dict={'0min':2,'5min':3,'10min':2} +input=spatiotemporal.get_example_path('toy/data') +output=spatiotemporal.get_example_path('toy/output_temp') +# expected_subcomplexes is a list of all possible subcomplex strings in the model. Should match the configuration files +subcomplexes=['A1','A2','B1','B2'] + +nodes,graph,graph_prob,graph_scores=spatiotemporal.create_DAG(dict,input_dir=input,output_dir=output,spatio_temporal_rule=True,expected_subcomplexes=subcomplexes,draw_dag=draw_dag) + +# Example with stoichiometeric data ------------------------------------------------------------------------- + +# Input variables. +dict={'0min':2,'5min':3,'10min':2} +input=spatiotemporal.get_example_path('toy/data') +output=spatiotemporal.get_example_path('toy/output_stoich') +subcomplexes=['A1','A2','B1','B2'] +# exp_comp_map is a dictionary that describes protein stoicheometery. The key describes the protein, which should correspond to names within the expected_subcomplexes. For each of these proteins, a csv file should be provided with protein copy number data +exp_comp={'A':'exp_comp_A.csv','B':'exp_comp_B.csv'} + + +nodes,graph,graph_prob,graph_scores=spatiotemporal.create_DAG(dict,input_dir=input,output_dir=output,spatio_temporal_rule=True,expected_subcomplexes=subcomplexes,score_comp=True,exp_comp_map=exp_comp,draw_dag=draw_dag) + +# Example of how model precision dpends on the input data --------------------------------------------------- + +# Input variables. +dict={'0min':2,'5min':3,'10min':2} +input=spatiotemporal.get_example_path('toy/data_precise') +output=spatiotemporal.get_example_path('toy/output_stoich_precise') +subcomplexes=['A1','A2','B1','B2'] +exp_comp={'A':'exp_comp_A_precise.csv','B':'exp_comp_B_precise.csv'} + + +nodes,graph,graph_prob,graph_scores=spatiotemporal.create_DAG(dict,out_pdf=True,npaths=2,input_dir=input,output_dir=output,spatio_temporal_rule=True,expected_subcomplexes=subcomplexes,score_comp=True,exp_comp_map=exp_comp,draw_dag=draw_dag) diff --git a/modules/spatiotemporal/examples/toy/data/1_0min.config b/modules/spatiotemporal/examples/toy/data/1_0min.config new file mode 100644 index 0000000000..1e6fea0beb --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/1_0min.config @@ -0,0 +1 @@ +A1 diff --git a/modules/spatiotemporal/examples/toy/data/1_0min_scores.log b/modules/spatiotemporal/examples/toy/data/1_0min_scores.log new file mode 100644 index 0000000000..ba66466c2a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/1_0min_scores.log @@ -0,0 +1 @@ +0.0 diff --git a/modules/spatiotemporal/examples/toy/data/1_10min.config b/modules/spatiotemporal/examples/toy/data/1_10min.config new file mode 100644 index 0000000000..995df58283 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/1_10min.config @@ -0,0 +1,3 @@ +A1 +A2 +B1 diff --git a/modules/spatiotemporal/examples/toy/data/1_10min_scores.log b/modules/spatiotemporal/examples/toy/data/1_10min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/1_10min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data/1_5min.config b/modules/spatiotemporal/examples/toy/data/1_5min.config new file mode 100644 index 0000000000..5556ed8b5a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/1_5min.config @@ -0,0 +1,2 @@ +A1 +A2 diff --git a/modules/spatiotemporal/examples/toy/data/1_5min_scores.log b/modules/spatiotemporal/examples/toy/data/1_5min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/1_5min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data/2_0min.config b/modules/spatiotemporal/examples/toy/data/2_0min.config new file mode 100644 index 0000000000..a19a02740a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/2_0min.config @@ -0,0 +1 @@ +B1 diff --git a/modules/spatiotemporal/examples/toy/data/2_0min_scores.log b/modules/spatiotemporal/examples/toy/data/2_0min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/2_0min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data/2_10min.config b/modules/spatiotemporal/examples/toy/data/2_10min.config new file mode 100644 index 0000000000..f35885cbf1 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/2_10min.config @@ -0,0 +1,3 @@ +A1 +B1 +B2 diff --git a/modules/spatiotemporal/examples/toy/data/2_10min_scores.log b/modules/spatiotemporal/examples/toy/data/2_10min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/2_10min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data/2_5min.config b/modules/spatiotemporal/examples/toy/data/2_5min.config new file mode 100644 index 0000000000..7f1002a84d --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/2_5min.config @@ -0,0 +1,2 @@ +A1 +B1 diff --git a/modules/spatiotemporal/examples/toy/data/2_5min_scores.log b/modules/spatiotemporal/examples/toy/data/2_5min_scores.log new file mode 100644 index 0000000000..ba66466c2a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/2_5min_scores.log @@ -0,0 +1 @@ +0.0 diff --git a/modules/spatiotemporal/examples/toy/data/3_5min.config b/modules/spatiotemporal/examples/toy/data/3_5min.config new file mode 100644 index 0000000000..6df30375b1 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/3_5min.config @@ -0,0 +1,2 @@ +B1 +B2 diff --git a/modules/spatiotemporal/examples/toy/data/3_5min_scores.log b/modules/spatiotemporal/examples/toy/data/3_5min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/3_5min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data/exp_comp_A.csv b/modules/spatiotemporal/examples/toy/data/exp_comp_A.csv new file mode 100644 index 0000000000..8ed0e941f4 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/exp_comp_A.csv @@ -0,0 +1,4 @@ +Time,mean,std +0min,0.75,1 +5min,0.75,1 +10min,1.75,1 \ No newline at end of file diff --git a/modules/spatiotemporal/examples/toy/data/exp_comp_B.csv b/modules/spatiotemporal/examples/toy/data/exp_comp_B.csv new file mode 100644 index 0000000000..7a564cd647 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data/exp_comp_B.csv @@ -0,0 +1,4 @@ +Time,mean,std +0min,0.25,1 +5min,1.25,1 +10min,1.25,1 \ No newline at end of file diff --git a/modules/spatiotemporal/examples/toy/data_precise/1_0min.config b/modules/spatiotemporal/examples/toy/data_precise/1_0min.config new file mode 100644 index 0000000000..1e6fea0beb --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/1_0min.config @@ -0,0 +1 @@ +A1 diff --git a/modules/spatiotemporal/examples/toy/data_precise/1_0min_scores.log b/modules/spatiotemporal/examples/toy/data_precise/1_0min_scores.log new file mode 100644 index 0000000000..ba66466c2a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/1_0min_scores.log @@ -0,0 +1 @@ +0.0 diff --git a/modules/spatiotemporal/examples/toy/data_precise/1_10min.config b/modules/spatiotemporal/examples/toy/data_precise/1_10min.config new file mode 100644 index 0000000000..995df58283 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/1_10min.config @@ -0,0 +1,3 @@ +A1 +A2 +B1 diff --git a/modules/spatiotemporal/examples/toy/data_precise/1_10min_scores.log b/modules/spatiotemporal/examples/toy/data_precise/1_10min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/1_10min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data_precise/1_5min.config b/modules/spatiotemporal/examples/toy/data_precise/1_5min.config new file mode 100644 index 0000000000..5556ed8b5a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/1_5min.config @@ -0,0 +1,2 @@ +A1 +A2 diff --git a/modules/spatiotemporal/examples/toy/data_precise/1_5min_scores.log b/modules/spatiotemporal/examples/toy/data_precise/1_5min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/1_5min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data_precise/2_0min.config b/modules/spatiotemporal/examples/toy/data_precise/2_0min.config new file mode 100644 index 0000000000..a19a02740a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/2_0min.config @@ -0,0 +1 @@ +B1 diff --git a/modules/spatiotemporal/examples/toy/data_precise/2_0min_scores.log b/modules/spatiotemporal/examples/toy/data_precise/2_0min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/2_0min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data_precise/2_10min.config b/modules/spatiotemporal/examples/toy/data_precise/2_10min.config new file mode 100644 index 0000000000..f35885cbf1 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/2_10min.config @@ -0,0 +1,3 @@ +A1 +B1 +B2 diff --git a/modules/spatiotemporal/examples/toy/data_precise/2_10min_scores.log b/modules/spatiotemporal/examples/toy/data_precise/2_10min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/2_10min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data_precise/2_5min.config b/modules/spatiotemporal/examples/toy/data_precise/2_5min.config new file mode 100644 index 0000000000..7f1002a84d --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/2_5min.config @@ -0,0 +1,2 @@ +A1 +B1 diff --git a/modules/spatiotemporal/examples/toy/data_precise/2_5min_scores.log b/modules/spatiotemporal/examples/toy/data_precise/2_5min_scores.log new file mode 100644 index 0000000000..ba66466c2a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/2_5min_scores.log @@ -0,0 +1 @@ +0.0 diff --git a/modules/spatiotemporal/examples/toy/data_precise/3_5min.config b/modules/spatiotemporal/examples/toy/data_precise/3_5min.config new file mode 100644 index 0000000000..6df30375b1 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/3_5min.config @@ -0,0 +1,2 @@ +B1 +B2 diff --git a/modules/spatiotemporal/examples/toy/data_precise/3_5min_scores.log b/modules/spatiotemporal/examples/toy/data_precise/3_5min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/3_5min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/examples/toy/data_precise/exp_comp_A_precise.csv b/modules/spatiotemporal/examples/toy/data_precise/exp_comp_A_precise.csv new file mode 100644 index 0000000000..05ce32dd2a --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/exp_comp_A_precise.csv @@ -0,0 +1,4 @@ +Time,mean,std +0min,1,0.0001 +5min,1,0.0001 +10min,2,0.0001 \ No newline at end of file diff --git a/modules/spatiotemporal/examples/toy/data_precise/exp_comp_B_precise.csv b/modules/spatiotemporal/examples/toy/data_precise/exp_comp_B_precise.csv new file mode 100644 index 0000000000..c8333b1229 --- /dev/null +++ b/modules/spatiotemporal/examples/toy/data_precise/exp_comp_B_precise.csv @@ -0,0 +1,4 @@ +Time,mean,std +0min,0,0.0001 +5min,1,0.0001 +10min,1,0.0001 \ No newline at end of file diff --git a/modules/spatiotemporal/examples/toy/image/toy_model.pdf b/modules/spatiotemporal/examples/toy/image/toy_model.pdf new file mode 100644 index 0000000000..5e494ee031 Binary files /dev/null and b/modules/spatiotemporal/examples/toy/image/toy_model.pdf differ diff --git a/modules/spatiotemporal/examples/toy/output_notemp/.gitignore b/modules/spatiotemporal/examples/toy/output_notemp/.gitignore new file mode 100644 index 0000000000..6169662aee --- /dev/null +++ b/modules/spatiotemporal/examples/toy/output_notemp/.gitignore @@ -0,0 +1,2 @@ +# Placeholder to ensure directory is kept by git +!.gitignore diff --git a/modules/spatiotemporal/examples/toy/output_stoich/.gitignore b/modules/spatiotemporal/examples/toy/output_stoich/.gitignore new file mode 100644 index 0000000000..6169662aee --- /dev/null +++ b/modules/spatiotemporal/examples/toy/output_stoich/.gitignore @@ -0,0 +1,2 @@ +# Placeholder to ensure directory is kept by git +!.gitignore diff --git a/modules/spatiotemporal/examples/toy/output_stoich_precise/.gitignore b/modules/spatiotemporal/examples/toy/output_stoich_precise/.gitignore new file mode 100644 index 0000000000..6169662aee --- /dev/null +++ b/modules/spatiotemporal/examples/toy/output_stoich_precise/.gitignore @@ -0,0 +1,2 @@ +# Placeholder to ensure directory is kept by git +!.gitignore diff --git a/modules/spatiotemporal/examples/toy/output_temp/.gitignore b/modules/spatiotemporal/examples/toy/output_temp/.gitignore new file mode 100644 index 0000000000..6169662aee --- /dev/null +++ b/modules/spatiotemporal/examples/toy/output_temp/.gitignore @@ -0,0 +1,2 @@ +# Placeholder to ensure directory is kept by git +!.gitignore diff --git a/modules/spatiotemporal/include/README.md b/modules/spatiotemporal/include/README.md new file mode 100644 index 0000000000..36904bdef2 --- /dev/null +++ b/modules/spatiotemporal/include/README.md @@ -0,0 +1,11 @@ +Place the public header files in this directory. They will be +available to your code (and other modules) with + + #include + +All headers should include `IMP/spatiotemporal/spatiotemporal_config.h` as their +first include and surround all code with `IMPSPATIOTEMPORAL_BEGIN_NAMESPACE` +and `IMPSPATIOTEMPORAL_END_NAMESPACE` to put it in the IMP::spatiotemporal namespace +and manage compiler warnings. + +Headers should also be exposed to SWIG in the `pyext/swig.i-in` file. diff --git a/modules/spatiotemporal/include/internal/README.md b/modules/spatiotemporal/include/internal/README.md new file mode 100644 index 0000000000..2a75e58c19 --- /dev/null +++ b/modules/spatiotemporal/include/internal/README.md @@ -0,0 +1,9 @@ +Place the private header files in this directory. They will be +available to your code with + + #include + +All headers should include `IMP/spatiotemporal/spatiotemporal_config.h` as their +first include and surround all code with `IMPSPATIOTEMPORAL_BEGIN_INTERNAL_NAMESPACE` +and `IMPSPATIOTEMPORAL_END_INTERNAL_NAMESPACE` to put it in the +IMP::spatiotemporal::internal namespace and manage compiler warnings. diff --git a/modules/spatiotemporal/pyext/src/__init__.py b/modules/spatiotemporal/pyext/src/__init__.py new file mode 100644 index 0000000000..80f5dae877 --- /dev/null +++ b/modules/spatiotemporal/pyext/src/__init__.py @@ -0,0 +1 @@ +from .create_DAG import create_DAG # noqa: F401 diff --git a/modules/spatiotemporal/pyext/src/analysis.py b/modules/spatiotemporal/pyext/src/analysis.py new file mode 100644 index 0000000000..4ed5bd2b86 --- /dev/null +++ b/modules/spatiotemporal/pyext/src/analysis.py @@ -0,0 +1,120 @@ +"""@namespace IMP.spatiotemporal.analysis + Functions to analyze spatiotemporal models. +""" +import numpy as np + + +def temporal_precision(labeled_pdf1_fn, labeled_pdf2_fn, + output_fn='temporal_precision.txt'): + """ + Function that reads in two labeled_pdfs from create_DAG and returns the + temporal_precision, defined as the probability overlap between two + pathway models. + + @param labeled_pdf1_fn: string, labeled pdf file name (including the path); + labeled_pdf from one independent sampling + @param labeled_pdf2_fn: string, labeled pdf file name (including the path); + labeled_pdf from another independent sampling + @param output_fn: string, name of output file + (default: 'temporal_precision.txt') + @return temporal precision, written to output_fn + """ + pdf_files = [labeled_pdf1_fn, labeled_pdf2_fn] + dict_list = [] + for pdf_file in pdf_files: + # create blank dictonary to store the results + prob_dict = {} + # read in labeled pdf file + old = open(pdf_file, 'r') + line = old.readline() + # store the path through various nodes, as well as the probability + # of that path + while line: + line_split = line.split() + # assumes the first string is the trajectory string, the second + # string is the probability + if len(line_split) > 1: + # use # for comments + if line_split[0] == '#': + pass + else: + trj = line_split[0] + prob = float(line_split[1]) + # store in dictionary + prob_dict[trj] = prob + line = old.readline() + old.close() + # append dictionary to dict_list + dict_list.append(prob_dict) + # calculate + key_list = dict_list[0].keys() + key_list2 = dict_list[1].keys() + # print error if keys not found + if len(key_list) == 0 or len(key_list2) == 0: + raise Exception('Error reading labeled_pdf!!! Keys not found') + # precision starts at 1 + precision = 1 + for key in key_list2: + if key in key_list: + # reduce by 1/2 the Manhattan distance between probabilities + precision -= 0.5 * np.abs(dict_list[0][key] - dict_list[1][key]) + else: + # states in key_list2, but not key_list inherently contribute + # to the temporal precision + precision -= 0.5 * np.abs(dict_list[1][key]) + for key in key_list: + if key in key_list2: + pass + # states in key_list, but not key_list2 inherently contribute to + # the temporal precision + else: + precision -= 0.5 * np.abs(dict_list[0][key]) + with open(output_fn, 'w') as new: + new.write('Temporal precision between ' + labeled_pdf1_fn + ' and ' + + labeled_pdf2_fn + ':\n') + new.write(str(precision)) + print('Temporal precision between ' + labeled_pdf1_fn + ' and ' + + labeled_pdf2_fn + ':') + print(precision) + + +def purity(labeled_pdf_fn, output_fn='purity.txt'): + """ + Function that reads in one labeled_pdf from create_DAG and returns the + purity, defined as the sum of the squared probability of all trajectories. + + @param labeled_pdf_fn: string, labeled pdf file name (including the path); + labeled_pdf from the total model + @param output_fn: string, name of output file + (default: 'temporal_precision.txt') + @return temporal purity, written to output_fn + """ + # create blank dictonary to store the results + prob_list = [] + # read in labeled pdf file + old = open(labeled_pdf_fn, 'r') + line = old.readline() + # store the path through various nodes, as well as the probability + # of that path + while line: + line_split = line.split() + # assumes the first string is the trajectory string, the second + # string is the probability + if len(line_split) > 1: + # use # for comments + if line_split[0] == '#': + pass + else: + prob = float(line_split[1]) + # store in dictionary + prob_list.append(prob) + line = old.readline() + old.close() + pure = 0 + for prob in prob_list: + pure += prob * prob + with open(output_fn, 'w') as new: + new.write('Purity of ' + labeled_pdf_fn + ':\n') + new.write(str(pure)) + print('Purity of ' + labeled_pdf_fn) + print(str(pure)) diff --git a/modules/spatiotemporal/pyext/src/composition_scoring.py b/modules/spatiotemporal/pyext/src/composition_scoring.py new file mode 100644 index 0000000000..c7340828d6 --- /dev/null +++ b/modules/spatiotemporal/pyext/src/composition_scoring.py @@ -0,0 +1,117 @@ +"""@namespace IMP.spatiotemporal.composition_scoring + Functions for weighting graphNode objects based on stoichiometry data. +""" +import numpy as np +import warnings +import os + + +def get_state(subcomplex_components, prot): + """ + function to calculate how many times a protein appears in a list of + proteins, which can be accessed from a graphNode object using + node.get_subcomplex_components() + + @param subcomplex_components: subcomplexes or components in a given node, + which can be accessed by graphNode.get_subcomplex_components() + @param prot: string, protein or subcomplex we are interested in finding + @return state, int, number of times the protein or subcomplex appears + in subcomplex_components + """ + state = 0 + for subcomplex in subcomplex_components: + if prot in subcomplex: + state += 1 + return state + + +def composition_likelihood_function(mean, std, prots, node): + """Function that calculates the likelihood of an individual node, used by + calc_likelihood(). + + @param mean: dictionary of dictionaries where the first key is the protein, + the second key is the time, and the expected mean copy number + from experiment is returned. + @param std: dictionary of dictionaries where the first key is the protein, + the second key is the time, and the expected standard deviation + of protein copy number from experiment is returned. + @param prots: list of proteins or subcomplexes which will be scored + according to this likelihood function + @param node: the graphNode object for which the likelihood will be + calculated. + @return w: float, the weight of the graphNode according to the composition + likelihood function. + """ + # get time + t = node.get_time() + w = 0 + for prot in prots: + # x counts the number of proteins of a given type in the node + x = get_state(node.get_subcomplex_components(), prot) + # check std is greater than 0 + if std[prot][t] > 0: + pass + else: + warnings.warn( + 'WARNING!!! Standard deviation of protein ' + prot + + ' 0 or less at time ' + t + + '. May lead to illogical results.') + w += (0.5 * ((x - mean[prot][t]) / std[prot][t])**2 + + np.log(std[prot][t] * np.sqrt(2 * np.pi))) + return w + + +def calc_likelihood(exp_comp_map, nodes): + """ + Function that adds a score for the compositional likelihood for all + states represented as nodes in the graph. The composition likelihood + assumes a Gaussian distribution for copy number of each protein or + subcomplex with means and standard deviatiations derived from experiment. + Returns the nodes, with the new weights added. + + @param exp_comp_map: dictionary, which describes protein stoicheometery. + The key describes the protein, which should correspond to names + within the expected_subcomplexes. Only copy numbers for proteins + or subcomplexes included in this dictionary will be scored. For + each of these proteins, a csv file should be provided with protein + copy number data. The csv file should have 3 columns, + 1) "Time", which matches up to the possible times in the graph, + 2) "mean", the average protein copy number at that time point + from experiment, and 3) "std", the standard deviation of that + protein copy number from experiment. + @param nodes: list of graphNode objects, which have been already been + initiated with static scores + @return nodes: editted list of graphNode objects, which now have static + and composition scores + """ + import pandas as pd + # Get list of all all proteins + prots = list(exp_comp_map.keys()) + # Data is stored as a dictionary of dictionaries. The first dictionary + # references which protein you are refering to. + # the 2nd dictionary references which time you are refering to. The return + # is the mean or standard deviation of the protein copy number + mean = {} + std = {} + # import csv file as pandas data frame + for prot in prots: + prot_dict_mean = {} + prot_dict_std = {} + if os.path.exists(exp_comp_map[prot]): + exp = pd.read_csv(exp_comp_map[prot]) + else: + raise Exception( + "Error!!! Check exp_comp_map. Unable to find composition " + "file: " + exp_comp_map[prot] + '\nClosing...') + for i in range(len(exp)): + prot_dict_mean[exp['Time'][i]] = exp['mean'][i] + prot_dict_std[exp['Time'][i]] = exp['std'][i] + mean[prot] = prot_dict_mean + std[prot] = prot_dict_std + # loop over all nodes and calculate the likelihood for each noe + for node in nodes: + # compute the compositional likelihood of the nodes + weight = composition_likelihood_function(mean, std, prots, node) + # add state weight to node + node.add_score(float(weight)) + return nodes diff --git a/modules/spatiotemporal/pyext/src/create_DAG.py b/modules/spatiotemporal/pyext/src/create_DAG.py new file mode 100644 index 0000000000..ce9be9e5c5 --- /dev/null +++ b/modules/spatiotemporal/pyext/src/create_DAG.py @@ -0,0 +1,248 @@ +"""@namespace IMP.spatiotemporal.create_DAG + Simplified function for creating a spatiotemporal model. +""" +import os +import itertools +import warnings +from IMP.spatiotemporal import graphNode +from IMP.spatiotemporal.score_graph import score_graph +from IMP.spatiotemporal import write_output +from IMP.spatiotemporal import composition_scoring + + +def create_DAG(state_dict, + # optional inputs related to model input / calculation + input_dir='', scorestr='_scores.log', output_dir='', + # optional inputs related to spatiotemporal scoring + # (only allowing associative transitions). + spatio_temporal_rule=False, subcomplexstr='.config', + expected_subcomplexes=[], + # optional inputs related to composition scores + score_comp=False, exp_comp_map={}, + # optional inputs related to model output + out_cdf=True, out_labeled_pdf=True, out_pdf=False, npaths=0, + # optional inputs related to DAG output + draw_dag=True): + """ + This functions streamlines the process of creating a graph by performing + all the necessary steps and saving relevant input to files. Features of + this function are walked through in + example/toy/Simple_spatiotemporal_example.py + + @param state_dict: dictionary that defines the spatiotemporal model. + The keys are strings that correspond to each time point in the + stepwise temporal process. Keys should be ordered according to the + steps in the spatiotemporal process. The values are integers that + correspond to the number of possible states at that timepoint. + Scores for each model are expected to be stored as + $state_$timescorestr, where state are integers 1->value of the + dictionary, time is the key in the dictionary, and scorestr is + trailing characters, which are assumed to be constant for + all states. + @param input_dir: string, directory where the data is stored. Empty string + assumes current working directory. + @param scorestr: string, trailing characters at the end of the file with + scores for each stage of the spatiotemporal model + (default: '_scores.log'). + @param output_dir: string, directory where the output will be written. + Empty string assumes the same directory as the input_dir. + @param spatio_temporal_rule: Boolean. If true, enforces that all components + earlier in the assembly process are present later in the process. + (default: False) + @param subcomplexstr: string, trailing characters after the subcomplex + file, which is a list of subcomplexes included in the given + label/time (default: '.config') + @param expected_subcomplexes: list of all possible subcomplex strings + in the model (default: []) Should be a list without duplicates of + all components in the subcomplex files. + @param score_comp: Boolean to determine whether or not to score models + based on the protein composition. + @param exp_comp_map: dictionary for determining protein composition score. + The keys are the proteins. The code checks if the name of these + proteins are within the subcomplex_components for each node. + As such, the naming scheme should be such that the keys of + exp_comp_map are substrings of expected_subcomplexes the values of + exp_comp_map should correspond to a csv file for each subcomplex + with protein copy numbers. Each csv file should have 3 columns: + 1) 'Time' - should correspond to the keys of state_dict, + 2) 'mean' - mean copy number from experimental data, and + 3) std - standard deviation from experimental data + @param out_cdf: Boolean to determine whether or not to write out the + cumulative distribution function (cdf) for the graph + (default: True). filename: "cdf.txt" + @param out_labeled_pdf: Boolean to determine whether to output the + labeled pdf file, which includes both the pdf and the ordered + states visited along each path (default: True). + filename: "labeled_pdf.txt" + @param out_pdf: Boolean to determine whether or not to write out the + probability distribution function (pdf) for the graph + (default: False) filename: "pdf.txt" + @param npaths: int, write out the states along the n most likely paths, + based on the pdf (default: 0) filename: "pathXX.txt", where XX + is the number of the path + @param draw_dag: Boolean to determine whether or not to write out a + directed acyclic graph (dag) to a file (default: True) + filename: "dag_heatmap" + @return nodes: list of graphNode objects, corresponding to the snapshot + models in the spatiotemporal model + @return graph: list of all paths through the graph. Each path is a list + of graphNode objects that correspond to the states visited + along the path. + @return graph_prob: list of probabilities for each path, ordered in the + same order as all_paths + @return graph_scores: list of tuples, where the first object is the + path (list of graphNode objects for each state along the + trajectory), and the second object is the score of the path, + which can be used to calculate the probability. + """ + + # Set manual parameters + # cdf_fn - string, name of the file for the cdf + cdf_fn = 'cdf.txt' + # labeled_pdf_fn - string, name of the file for the labeled pdf + labeled_pdf_fn = 'labeled_pdf.txt' + # pdf_fn - string, name of the file for the pdf + pdf_fn = 'pdf.txt' + # npath_fn - string, name of the file for each pathway + npath_fn = 'path' + + # dag_fn - string, filename for the dag image (default: 'dag_heatmap') + dag_fn = 'dag_heatmap' + # dag_heatmap - Boolean to determine whether or not to write the dag + # with a heatmap based on the probability of each state (default: True) + dag_heatmap = True + # dag_colormap - string, colormap used by the dag to represent probability. + # Chooses from those available in matplotlib + # (https://matplotlib.org/stable/users/explain/colors/colormaps.html) + # (default: "Purples"). + dag_colormap = "Purples" + # dag_draw_label - Boolean to determine whether or not to draw state + # labels on the dag + dag_draw_label = True + # dag_fontname - string, font used for the labels on the dag + dag_fontname = "Helvetica" + # dag_fontsize - string, font size used for the labels on the dag + dag_fontsize = "18" + # dag_penscale - float, size of the pen used to draw arrows on the dag + dag_penscale = 0.6 + # dag_arrowsize - float, size of arrows connecting states on the dag + dag_arrowsize = 1.2 + # dag_height - string, height of each node on the dag + dag_height = "0.6" + # dag_width - string, width of each node on the dag + dag_width = "0.6" + + # Assert that all inputs are the correct variable type + if not isinstance(state_dict, dict): + raise TypeError("state_dict should be of type dict") + if not isinstance(input_dir, str): + raise TypeError("input_dir should be of type str") + if not isinstance(scorestr, str): + raise TypeError("scorestr should be of type str") + if not isinstance(spatio_temporal_rule, bool): + raise TypeError("state_dict should be of type bool") + if not isinstance(subcomplexstr, str): + raise TypeError("subcomplexstr should be of type str") + if not isinstance(expected_subcomplexes, list): + raise TypeError("expected_subcomplexes should be of type list") + if not isinstance(score_comp, bool): + raise TypeError("score_comp should be of type bool") + if not isinstance(exp_comp_map, dict): + raise TypeError("exp_comp_map should be of type dict") + if not isinstance(out_cdf, bool): + raise TypeError("out_cdf should be of type bool") + if not isinstance(out_labeled_pdf, bool): + raise TypeError("out_labeled_pdf should be of type bool") + if not isinstance(out_pdf, bool): + raise TypeError("out_pdf should be of type bool") + if not isinstance(npaths, int): + raise TypeError("npaths should be of type int") + if not isinstance(draw_dag, bool): + raise TypeError("draw_dag should be of type bool") + + # check proteins in the exp_comp_map exist in expected_complexes + for key in exp_comp_map.keys(): + found = 0 + for subcomplex in expected_subcomplexes: + if key in subcomplex: + found = found + 1 + if found == 0: + warnings.warn( + 'WARNING!!! Check exp_comp_map and expected_subcomplexes. ' + 'protein ' + key + ' is not found in expected_subcomplexes. ' + 'This could cause illogical results.') + + # Step 1: Initialize graph with static scores + # list of all nodes + print('Initialing graph...') + nodes = [] + # keys correspond to all timepoints + keys = list(state_dict.keys()) + # Go to input_dir, if it exists + if len(input_dir) > 0: + if os.path.exists(input_dir): + os.chdir(input_dir) + else: + raise Exception( + "Error!!! Does not exist: " + input_dir + '\nClosing...') + + # Loop over all keys and all states + for key in keys: + for i in range(state_dict[key]): + index = i + 1 + node = graphNode.graphNode() + node.init_graphNode(key, str(index), scorestr, subcomplexstr, + expected_subcomplexes) + nodes.append(node) + + # build up candidate edges in graph + tpairs = [(keys[i], keys[i + 1]) for i in range(0, len(keys) - 1)] + for a, b in tpairs: + # get time marginalized nodes + anode = [n for n in nodes if n.get_time() == a] + bnode = [n for n in nodes if n.get_time() == b] + # draw edges between pairs. Include whether or not to include + # spatio_temporal_rule + for na, nb in itertools.product(anode, bnode): + graphNode.draw_edge(na, nb, spatio_temporal_rule) + # set indeces for all nodes. These are unique for each node, + # unlike labels, which can overlap + for ni, node in enumerate(nodes): + node.set_index(ni) + print('Done.') + + # Step 2: Add composition static score to graph + if score_comp: + print('Calculation composition likelihood...') + nodes = composition_scoring.calc_likelihood(exp_comp_map, nodes) + print('Done.') + + # Step 3: Compute all paths, as well as their scores + print('Scoring directed acycling graph...') + graph, graph_prob, graph_scores = score_graph(nodes, keys) + print('Done.') + + # Step 4: Draw DAG and save relevant output + print('Writing output...') + # Go to output directory + if len(output_dir) > 0: + if os.path.exists(output_dir): + os.chdir(output_dir) + else: + os.mkdir(output_dir) + os.chdir(output_dir) + write_output.write_cdf(out_cdf, cdf_fn, graph_prob) + write_output.write_pdf(out_pdf, pdf_fn, graph_prob) + write_output.write_labeled_pdf(out_labeled_pdf, labeled_pdf_fn, graph, + graph_prob) + write_output.write_final_npaths(npaths, npath_fn, graph_scores, graph_prob) + if draw_dag: + write_output.draw_dag( + dag_fn, nodes, graph, graph_prob, keys, heatmap=dag_heatmap, + colormap=dag_colormap, draw_label=dag_draw_label, + fontname=dag_fontname, fontsize=dag_fontsize, + penscale=dag_penscale, arrowsize=dag_arrowsize, height=dag_height, + width=dag_width) + print('Done.') + + return nodes, graph, graph_prob, graph_scores diff --git a/modules/spatiotemporal/pyext/src/graphNode.py b/modules/spatiotemporal/pyext/src/graphNode.py new file mode 100644 index 0000000000..6adfd6fe0c --- /dev/null +++ b/modules/spatiotemporal/pyext/src/graphNode.py @@ -0,0 +1,203 @@ +"""@namespace IMP.spatiotemporal.graphNode + Defines the graphNode class. Each node corresponds to a snapshot model. + Nodes can be connected to create spatiotemporal models. +""" +import numpy as np +import os + + +class graphNode: + """A class to represent a node in a spatiotemporal process. + + Each graphNode contains a list of it's component subcoplexes, + an array of scores, a time index and a list of pointers to nodes + to which edges coming from this node point. + """ + + def __init__(self): + """Initialize a node. + """ + + self._edges = set() + self._scores = [] + self._time = None + self._index = None + self._label = None + self._components = [] + self._expected_subcomplexes = [] + + def __repr__(self): + """String representation of a graphNode + """ + return ("graphNode(" + ",".join([str(self._time), str(self._label)]) + + ")") + + def init_graphNode(self, time, label, scorestr, subcomplexstr, + expected_subcomplexes): + """Function that initiates a graph node with specific time, label, + and expected_subcomplexes. Scores and components are extracted from + files named scorestr and subcomplexstr respectively. Returns a single + graphNode object. + + @param time: string, time point in the stepwise temporal process + @param label: string, a number label for the graphNode + @param scorestr: string, trailing characters at the end of the file + with scores for each stage of the spatiotemporal model + @param subcomplexstr: string, trailing characters after the subcomplex + file, which is a list of subcomplexes included in the given + label/time + @param expected_subcomplexes: list of all possible subcomplex strings + in the model + """ + + # Set values input from the function call + self.set_time(time) + self.set_label(label) + self.set_expected_subcomplexes(expected_subcomplexes) + + # scores is a list of energies, where each entry corresponds to the + # score at a given timepoint. It is averaged over all simulations + scores_fn = label + '_' + time + scorestr + if os.path.exists(scores_fn): + scores = np.loadtxt(scores_fn) + else: + raise Exception("Error!!! Unable to find scores file: " + + scores_fn + '\nClosing...') + self.add_score(scores.mean()) + + if len(expected_subcomplexes) > 0: + with open(label + '_' + time + subcomplexstr, "r") as fh: + included_prots = fh.read().splitlines() + + self.set_subcomplex_components( + included_prots, label + '_' + time + subcomplexstr) + + # Index indexes over all nodes + def set_index(self, index): + """Set an index to label node's identity. + """ + self._index = index + + def get_index(self): + """Return the node's index. + """ + return self._index + + # Labels are different states at the same time point + def set_label(self, label): + """Set an index to label node's identity. + """ + self._label = label + + def get_label(self): + """Return the node's index. + """ + return self._label + + def get_weight(self): + """Return the weight of the node. A weight refers in this case to the + sum score of the scores list. + """ + return sum(self._scores) + + def get_subcomplex_components(self): + """Return a list of subcomplexes in this node's representation. + """ + return self._components + + def set_subcomplex_components(self, scs, fn): + """Set the list of subcomplex components. + + Should be one of the standard used components. + """ + for sc in scs: + if sc not in self._expected_subcomplexes: + raise Exception("Error!!! Did not recognize the subcomplex " + "name " + sc + " from config file: " + fn) + + self._components = scs + + def set_expected_subcomplexes(self, expected_subcomplexes): + """Set the list of possible subcomplex components. + Should include all possible components across the entire + spatiotemporal model + """ + self._expected_subcomplexes = expected_subcomplexes + + def add_edge(self, edge): + """Add a directed edge to the node. + + Expects a graphNode object. + """ + + if not isinstance(edge, graphNode): + raise TypeError("Object " + str(edge) + " is not a graphNode") + + # add if not already present + self._edges.add(edge) + + def get_edges(self): + """Return the list of edges for this node. + """ + return self._edges + + def set_time(self, time): + """Set the time. + """ + self._time = time + + def get_time(self): + """Return the time associated with this node. + """ + return self._time + + def set_scores(self, scores): + """Set the score data for this node. + + Expects an list of floats which represent the total score array. + """ + self._scores = scores + + def add_score(self, score): + """Update the score list by appending score. + """ + # assert we're getting a float + if not isinstance(score, float): + raise TypeError("add_score expects a float but got a " + + str(type(score))) + + self._scores.append(score) + + def get_scores(self): + """Return the scores array. + """ + return self._scores + + +def draw_edge(nodeA, nodeB, spatio_temporal_rule): + """ + Draws an edge between graphNode objects nodeA and nodeB. If + spatio_temporal_rule, node will only be drawn if the components of nodeA + are a subset of the components of nodeB. + If spatio_temporal_rule: determines if an edge should be drawn if Node A + comes immediately before node B in time and if node B contains node A's + components as a subset. + Else: draws an edge between nodeA and nodeB regardless. + + @param nodeA: first graphNode object + @param nodeB: second graphNode object + @param spatio_temporal_rule: Boolean, whether to consider composition + when drawing edges + """ + + # assert both are nodes + assert isinstance(nodeA, graphNode), str(nodeA) + " is not a graphNode." + assert isinstance(nodeB, graphNode), str(nodeB) + " is not a graphNode." + + # check subcomponents are subsets. All nodes in timestep t are also in t+1 + if spatio_temporal_rule: + if frozenset(nodeA.get_subcomplex_components()).issubset( + set(nodeB.get_subcomplex_components())): + nodeA.add_edge(nodeB) + else: + nodeA.add_edge(nodeB) diff --git a/modules/spatiotemporal/pyext/src/score_graph.py b/modules/spatiotemporal/pyext/src/score_graph.py new file mode 100644 index 0000000000..57060aa0f0 --- /dev/null +++ b/modules/spatiotemporal/pyext/src/score_graph.py @@ -0,0 +1,108 @@ +"""@namespace IMP.spatiotemporal.score_graph + Functions to traverse and score the spatiotemporal graphs. +""" + +import numpy as np + + +def get_graph_as_dict(nodes): + """ + converts a list of graphNode objects (nodes), which have been initiated + with scores and edges into a dictionary representation of a graph (graph). + Each node in the graph is a key, which returns edges in the next state. + + @param nodes: list of graphNode objects + @return graph: dictionary where each node is a key and the values are + the edges in the graph for that node + """ + graph = {} + + for node in nodes: + graph[node] = node.get_edges() + + return graph + + +def find_all_paths(graph, start, end, path=[]): + """ + Finds all paths between nodes, which already have edges drawn between them. + + @param graph: dictionary representation of the graph, acquired + in get_graph_as_dict() + @param start: graphNode, candidate for starting the graph + @param end: graphNode, candidate to end the graph + @param path: list of graphNodes on the path, which is defined recursively. + @return paths: list of all paths that exist between the starting node + and ending node + """ + + path = path + [start] + if start == end: + return [path] + if start not in graph.keys(): + return [] + paths = [] + for node in graph[start]: + if node not in path: + newpaths = find_all_paths(graph, node, end, path) + for newpath in newpaths: + paths.append(newpath) + + return paths + + +# Function to score a graph based on nodes, which has scores and edges, +# as well as keys, which is a list of the states visited +def score_graph(nodes, keys): + """ + Function to score a graph based on nodes, which has scores and edges, + as well as keys, which is a list of the states visited. Note that all + edges must be drawn and scores must be added to nodes before calling + this function. + + @param nodes: list of graphNode objects, which has been initialized with + all weights and edges + @param keys: list of all ordered states (strings) visited along the graph. + Paths will be determined in sequential order passed to this + function. + @return all_paths: list of all paths through the graph. Each path is a + list of graphNode objects that correspond to the states visited + along the path. + @return path_prob: list of probabilities for each path, ordered in the + same order as all_paths + @return path_scores: list of tuples, where the first object is the path + (list of graphNode objects for each state along the trajectory), + and the second object is the score of the path, which can be used + to calculate the probability. + """ + # nodes - graphNode object, which has been initialized with all + # weights and edges + # keys - Ordered list of all states. Paths will be determined in + # sequential order passed to this function. + + # Determine starting state and final state + time_start = keys[0] + time_end = keys[-1] + + # enumerate all paths by iterating over all possible starting and + # ending points + starting_nodes = [n for n in nodes if n.get_time() == time_start] + + # get mature pore + ending_nodes = [n for n in nodes if n.get_time() == time_end] + + graph = get_graph_as_dict(nodes) + + all_paths = [] + for sn in starting_nodes: + for en in ending_nodes: + all_paths += find_all_paths(graph, sn, en) + + # compute all path scores as a np array. + path_scores = [(path, np.array([n.get_weight() for n in path]).sum()) + for path in all_paths] + s = np.array([p[1] for p in path_scores]) + s -= s.min() + path_prob = np.exp(-s) / np.exp(-s).sum() + + return all_paths, path_prob, path_scores diff --git a/modules/spatiotemporal/pyext/src/write_output.py b/modules/spatiotemporal/pyext/src/write_output.py new file mode 100644 index 0000000000..9033f221bf --- /dev/null +++ b/modules/spatiotemporal/pyext/src/write_output.py @@ -0,0 +1,236 @@ +"""@namespace IMP.spatiotemporal.write_output + Functions to write spatiotemporal graph information to files. +""" + +import numpy as np +try: + from graphviz import Digraph +except ImportError: + Digraph = None +try: + try: + from matplotlib import colormaps as cm # matplotlib 3.7+ + except ImportError: + from matplotlib import cm + from matplotlib import colors as clr +except ImportError: + cm = None + clr = None + + +# Text / probability output + +def write_cdf(out_cdf, cdf_fn, graph_prob): + """ + Function to output the cumulative distribution function (cdf) + + @param out_cdf: bool, writes cdf if true + @param cdf_fn: str, filename of cdf + @param graph_prob: list of probabilities for each path, (path_prob + from score_graph()) + """ + if out_cdf: + cdf = np.cumsum(np.flip(np.sort(graph_prob), axis=0)) + np.savetxt(cdf_fn, cdf) + + +def write_pdf(out_pdf, pdf_fn, graph_prob): + """ + Function to output the probability distribution function (pdf) + @param out_pdf: bool, writes pdf if true + @param pdf_fn: str, filename of pdf + @param graph_prob: list of probabilities for each path, (path_prob + from score_graph()) + """ + if out_pdf: + pdf = np.flip(np.sort(graph_prob), axis=0) + np.savetxt(pdf_fn, pdf) + + +def write_labeled_pdf(out_labeled_pdf, labeled_pdf_fn, graph, graph_prob): + """ + Function to output the labeled probability distribution function (pdf) + @param out_labeled_pdf: bool, writes labeled_pdf if true + @param labeled_pdf_fn: str, filename of labeled_pdf + @param graph: list of graphNode objects visited for each path, + (all_paths from score_graph()) + @param graph_prob: list of probabilities for each path, (path_prob + from score_graph()) + """ + if out_labeled_pdf: + # open file + new = open(labeled_pdf_fn, 'w') + new.write('#\tPath\t\tpdf\n') + # loop over all paths in the graph + for i in range(0, len(graph_prob)): + # get index for the ith most likely path + pdf_index = np.flip(np.argsort(graph_prob), axis=0)[i] + path = graph[pdf_index] + # get all labels / time for the ith most likely path + all_labels = '' + for node in path: + all_labels += node.get_label() + '_' + node.get_time() + '|' + # write that path to a new file + new.write(all_labels + '\t' + str(graph_prob[pdf_index]) + '\n') + new.close() + + +def write_final_npaths(npaths, npath_fn, graph_scores, graph_prob): + """ + Function to output a file with all states for each of the n most likely + paths + + @param npaths: int, number of paths to output + @param npath_fn: str, name of the file for all paths + @param graph_scores: list of tuples, where the first object is the path + (list of graphNode objects for each state along the trajectory), + and the second object is the score of the path, which can be used + to calculate the probability. (path_scores from score_graph()) + @param graph_prob: list of probabilities for each path, (path_prob from + score_graph()) + """ + # loop over npaths + for i in range(-1, -1 * npaths - 1, -1): + path = [] + # get index for sorted probability + m = np.argsort(graph_prob)[i] + # go to that index and grab the path + for node in graph_scores[m][0]: + # append times not yet in the path + if node.get_time() not in path: + path.append(node.get_label() + '_' + node.get_time()) + + # save to new file + with open(npath_fn + str(abs(i)) + ".txt", "w") as fh: + for statename in path: + fh.write(statename + "\n") + + +# Rendering DAG +def draw_dag_in_graphviz(nodes, coloring=None, draw_label=True, + fontname="Helvetica", fontsize="18", penscale=0.6, + arrowsize=1.2, height="0.6", width="0.6"): + """Draw a DAG representation in graphviz and return the resulting Digraph. + Takes a list of graphNodes and initializes the nodes and edges. + Coloring is expected to be a list of RGBA strings specifying how to color + each node. Expected to be same length as nodes. + + @param nodes: list of graphNode objects + @param coloring: list of RGBA strings to specify the color of each node. + Expected to be the same length as nodes + @param draw_label: bool, whether or not to draw graph labels + @param fontname: string, name of font for graph labels + @param fontsize: string, size of font for graph labels + @param penscale: float, size of pen + @param arrowsize: float, size of arrows + @param height: string, height of nodes + @param width: string, width of nodes + @return dot: Digraph object to be rendered + """ + + if Digraph is None: + raise Exception( + "graphviz not available, will not be able to draw graph") + else: + # create a dot object for the graph + dot = Digraph(format="eps", engine="dot") + dot.attr(ratio="1.5") + dot.attr(rotate="0") + + for ni, node in enumerate(nodes): + if coloring is not None: + color = coloring[ni] + else: + color = "#ffffff" + + if draw_label: + dot.node(str(node), label=node.get_label(), style="filled", + fillcolor=color, fontname=fontname, fontsize=fontsize, + height=height, width=width) + else: + dot.node(str(node), label=' ', style="filled", + fillcolor=color, fontname=fontname, fontsize=fontsize, + height=height, width=width) + + for ni, node in enumerate(nodes): + edges = node.get_edges() + for edge in edges: + dot.edge(str(node), + str(edge), + arrowsize=str(arrowsize), + color="black", + penwidth=str(penscale)) + + return dot + + +# first set of parameters are required and determine the connectivity +# of the map +def draw_dag(dag_fn, nodes, paths, path_prob, keys, + # 2nd set of parameters are for rendering the heatmap + heatmap=True, colormap="Purples", penscale=0.6, arrowsize=1.2, + fontname="Helvetica", fontsize="18", height="0.6", width="0.6", + draw_label=True): + """ + Function to render the DAG with heatmap information. + @param dag_fn: string, filename path + @param nodes: list of graphNode objects for which the graph will be drawn + @param paths: list of lists containing all paths visited by the graphNode + objects + @param path_prob: list of probabilities for each path, (path_prob from + score_graph()) + @param keys: states visited in the graph (list of keys to the state_dict) + @param heatmap: Boolean to determine whether or not to write the dag with + a heatmap based on the probability of each state (default: True) + @param colormap: string, colormap used by the dag to represent probability. + Chooses from those available in matplotlib + (https://matplotlib.org/stable/users/explain/colors/colormaps.html) + (default: "Purples"). + @param penscale: float, size of the pen used to draw arrows on the dag + @param arrowsize: float, size of arrows connecting states on the dag + @param fontname: string, font used for the labels on the dag + @param fontsize: string, font size used for the labels on the dag + @param height: string, height of each node on the dag + @param width: string, width of each node on the dag + @param draw_label: Boolean to determine whether or not to draw state + labels on the dag + """ + + # determines if heatmap will be overlayed on top of DAG + if heatmap: + + if cm is None or clr is None: + raise Exception( + "matplotlib not available, will not be able to draw graph") + else: + + default_cmap = cm.get_cmap(colormap) + + # make a list of counts for each node to color + coloring = np.zeros(len(nodes), dtype=float) + for path, p in zip(paths, path_prob): + for n in path: + coloring[int(n.get_index())] += 1 * p + + # normalize probability + for t in keys: + b = np.array([t == n.get_time() for n in nodes]) + coloring[b] /= coloring[b].sum() + + # convert probability to colors + cmap_colors = [clr.to_hex(default_cmap(color)) + for color in coloring] + + dot = draw_dag_in_graphviz( + nodes, coloring=cmap_colors, draw_label=draw_label, + fontname=fontname, fontsize=fontsize, penscale=penscale, + arrowsize=arrowsize, height=height, width=width) + dot.render(dag_fn) + + # no heatmap + else: + dot = draw_dag_in_graphviz( + nodes, coloring=None, draw_label=draw_label, fontname=fontname, + fontsize=fontsize, penscale=penscale, arrowsize=arrowsize, + height=height, width=width) + dot.render(dag_fn) diff --git a/modules/spatiotemporal/test/input/data/1_0min.config b/modules/spatiotemporal/test/input/data/1_0min.config new file mode 100644 index 0000000000..1e6fea0beb --- /dev/null +++ b/modules/spatiotemporal/test/input/data/1_0min.config @@ -0,0 +1 @@ +A1 diff --git a/modules/spatiotemporal/test/input/data/1_0min_scores.log b/modules/spatiotemporal/test/input/data/1_0min_scores.log new file mode 100644 index 0000000000..ba66466c2a --- /dev/null +++ b/modules/spatiotemporal/test/input/data/1_0min_scores.log @@ -0,0 +1 @@ +0.0 diff --git a/modules/spatiotemporal/test/input/data/1_10min.config b/modules/spatiotemporal/test/input/data/1_10min.config new file mode 100644 index 0000000000..995df58283 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/1_10min.config @@ -0,0 +1,3 @@ +A1 +A2 +B1 diff --git a/modules/spatiotemporal/test/input/data/1_10min_scores.log b/modules/spatiotemporal/test/input/data/1_10min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/1_10min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/test/input/data/1_5min.config b/modules/spatiotemporal/test/input/data/1_5min.config new file mode 100644 index 0000000000..5556ed8b5a --- /dev/null +++ b/modules/spatiotemporal/test/input/data/1_5min.config @@ -0,0 +1,2 @@ +A1 +A2 diff --git a/modules/spatiotemporal/test/input/data/1_5min_scores.log b/modules/spatiotemporal/test/input/data/1_5min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/1_5min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/test/input/data/2_0min.config b/modules/spatiotemporal/test/input/data/2_0min.config new file mode 100644 index 0000000000..a19a02740a --- /dev/null +++ b/modules/spatiotemporal/test/input/data/2_0min.config @@ -0,0 +1 @@ +B1 diff --git a/modules/spatiotemporal/test/input/data/2_0min_scores.log b/modules/spatiotemporal/test/input/data/2_0min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/2_0min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/test/input/data/2_10min.config b/modules/spatiotemporal/test/input/data/2_10min.config new file mode 100644 index 0000000000..f35885cbf1 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/2_10min.config @@ -0,0 +1,3 @@ +A1 +B1 +B2 diff --git a/modules/spatiotemporal/test/input/data/2_10min_scores.log b/modules/spatiotemporal/test/input/data/2_10min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/2_10min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/test/input/data/2_5min.config b/modules/spatiotemporal/test/input/data/2_5min.config new file mode 100644 index 0000000000..7f1002a84d --- /dev/null +++ b/modules/spatiotemporal/test/input/data/2_5min.config @@ -0,0 +1,2 @@ +A1 +B1 diff --git a/modules/spatiotemporal/test/input/data/2_5min_scores.log b/modules/spatiotemporal/test/input/data/2_5min_scores.log new file mode 100644 index 0000000000..ba66466c2a --- /dev/null +++ b/modules/spatiotemporal/test/input/data/2_5min_scores.log @@ -0,0 +1 @@ +0.0 diff --git a/modules/spatiotemporal/test/input/data/3_5min.config b/modules/spatiotemporal/test/input/data/3_5min.config new file mode 100644 index 0000000000..6df30375b1 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/3_5min.config @@ -0,0 +1,2 @@ +B1 +B2 diff --git a/modules/spatiotemporal/test/input/data/3_5min_scores.log b/modules/spatiotemporal/test/input/data/3_5min_scores.log new file mode 100644 index 0000000000..d3827e75a5 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/3_5min_scores.log @@ -0,0 +1 @@ +1.0 diff --git a/modules/spatiotemporal/test/input/data/exp_comp_A.csv b/modules/spatiotemporal/test/input/data/exp_comp_A.csv new file mode 100644 index 0000000000..8ed0e941f4 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/exp_comp_A.csv @@ -0,0 +1,4 @@ +Time,mean,std +0min,0.75,1 +5min,0.75,1 +10min,1.75,1 \ No newline at end of file diff --git a/modules/spatiotemporal/test/input/data/exp_comp_B.csv b/modules/spatiotemporal/test/input/data/exp_comp_B.csv new file mode 100644 index 0000000000..7a564cd647 --- /dev/null +++ b/modules/spatiotemporal/test/input/data/exp_comp_B.csv @@ -0,0 +1,4 @@ +Time,mean,std +0min,0.25,1 +5min,1.25,1 +10min,1.25,1 \ No newline at end of file diff --git a/modules/spatiotemporal/test/standards_exceptions b/modules/spatiotemporal/test/standards_exceptions new file mode 100644 index 0000000000..2c7bc949f7 --- /dev/null +++ b/modules/spatiotemporal/test/standards_exceptions @@ -0,0 +1,4 @@ +value_object_exceptions=[] +function_name_exceptions=['create_DAG'] +show_exceptions=[] +spelling_exceptions=[] diff --git a/modules/spatiotemporal/test/test_make_graph.py b/modules/spatiotemporal/test/test_make_graph.py new file mode 100644 index 0000000000..44148e33f1 --- /dev/null +++ b/modules/spatiotemporal/test/test_make_graph.py @@ -0,0 +1,192 @@ +try: + import pandas +except ImportError: + pandas = None +import IMP +import IMP.test +import IMP.spatiotemporal as spatiotemporal +import IMP.spatiotemporal.graphNode as graphNode +import shutil +import os +import sys +import itertools +import numpy as np + +def setup_system(): + """ + Function to set up initial variables + """ + # Input variables. + dict = {'0min': 2, '5min': 3, '10min': 2} + subcomplexes = ['A1', 'A2', 'B1', 'B2'] + # exp_comp_map is a dictionary that describes protein stoicheometery. The key describes the protein, which should correspond to names within the expected_subcomplexes. For each of these proteins, a csv file should be provided with protein copy number data + exp_comp = {'A': 'exp_comp_A.csv', 'B': 'exp_comp_B.csv'} + return dict, subcomplexes, exp_comp + +class Tests(IMP.test.TestCase): + + def test_graph_setup(self): + """ + Test setting up a graph. Tests functionality of graphNode.py + """ + state_dict, expected_subcomplexes, exp_comp_map = setup_system() + # set input dir + with IMP.test.temporary_directory() as tmpdir: + input_dir = os.path.join(tmpdir, 'data') + shutil.copytree(self.get_input_file_name('data'), input_dir) + temp_key = list(exp_comp_map.keys()) + for key in temp_key: + found = 0 + for subcomplex in expected_subcomplexes: + if key in subcomplex: + found = found + 1 + if found == 0: + raise Exception('WARNING!!! Check exp_comp_map and expected_subcomplexes. protein ' + key + ' is not found in expected_subcomplexes. This could cause illogical results.') + + # Step 1: Initialize graph with static scores ---------------------------------------------------------------------- + # list of all nodes + nodes = [] + # keys correspond to all timepoints + keys = list(state_dict.keys()) + if len(input_dir) > 0: + if os.path.exists(input_dir): + os.chdir(input_dir) + else: + raise Exception("Error!!! Does not exist: " + input_dir + '\nClosing...') + + # Loop over all keys and all states + for key in keys: + for i in range(state_dict[key]): + index = i + 1 + node = graphNode.graphNode() + node.init_graphNode(key, str(index), '_scores.log', '.config', []) + nodes.append(node) + + # build up candidate edges in graph + tpairs = [(keys[i], keys[i + 1]) for i in range(0, len(keys) - 1)] + for a, b in tpairs: + # get time marginalized nodes + anode = [n for n in nodes if n.get_time() == a] + bnode = [n for n in nodes if n.get_time() == b] + # draw edges between pairs. Include whether or not to include spatio_temporal_rule + for na, nb in itertools.product(anode, bnode): + graphNode.draw_edge(na, nb, False) + # set indeces for all nodes. These are unique for each node, unlike labels, which can overlap + for ni, node in enumerate(nodes): + node.set_index(ni) + # check that all nodes are graphNode objects + for node in nodes: + self.assertIsInstance(node,graphNode.graphNode) + + def test_graph_scoring(self): + """ + Tests scoring the same graph built above. Tests score_graph function (score_graph.py) + """ + state_dict, expected_subcomplexes, exp_comp_map = setup_system() + # set input dir + with IMP.test.temporary_directory() as tmpdir: + input = os.path.join(tmpdir, 'data') + shutil.copytree(self.get_input_file_name('data'), input) + nodes, graph, graph_prob, graph_scores = spatiotemporal.create_DAG(state_dict, input_dir=input, out_cdf=False,out_labeled_pdf=False,draw_dag=False) + # Check the overall score for the first trajectory + for trajectory in graph_scores: + if trajectory[0][0].get_label() == '1' and trajectory[0][1].get_label() == '1' and trajectory[0][2].get_label() == '1': + self.assertAlmostEqual(trajectory[1], 2.0, delta=1e-4) + + @IMP.test.skipIf(sys.version_info[0] < 3, + "Does not work with ancient numpy in Python 2") + def test_temporal_scoring(self): + """ + Tests spatiotemporal rule functionality. Found in draw_edge function of graphNode + """ + state_dict, expected_subcomplexes, exp_comp_map = setup_system() + # set input dir + with IMP.test.temporary_directory() as tmpdir: + input = os.path.join(tmpdir, 'data') + shutil.copytree(self.get_input_file_name('data'), input) + nodes, graph, graph_prob, graph_scores= spatiotemporal.create_DAG(state_dict, input_dir=input, out_cdf=False,out_labeled_pdf=False,spatio_temporal_rule=True,expected_subcomplexes=expected_subcomplexes,draw_dag=False) + # Check the overall score for the first trajectory + for trajectory in graph_scores: + if trajectory[0][0].get_label() == '1' and trajectory[0][1].get_label() == '1' and trajectory[0][2].get_label() == '1': + self.assertAlmostEqual(trajectory[1], 2.0, delta=1e-4) + + def test_composition_scoring(self): + """ + Tests composition scoring functionality. Found in composition_scoring.py, calc_likelihood + """ + if pandas is None: + self.skipTest( + 'pandas not available, will not test composition scoring') + state_dict, expected_subcomplexes, exp_comp_map = setup_system() + # set input dir + with IMP.test.temporary_directory() as tmpdir: + input = os.path.join(tmpdir, 'data') + shutil.copytree(self.get_input_file_name('data'), input) + nodes, graph, graph_prob, graph_scores = spatiotemporal.create_DAG(state_dict, input_dir=input, out_cdf=False,out_labeled_pdf=False, score_comp=True,exp_comp_map=exp_comp_map,draw_dag=False,spatio_temporal_rule=False,expected_subcomplexes=expected_subcomplexes) + # Check the overall score for the lowest energy trajectory + for trajectory in graph_scores: + if trajectory[0][0].get_label() == '1' and trajectory[0][1].get_label() == '2' and trajectory[0][2].get_label() == '1': + self.assertAlmostEqual(trajectory[1], 6.701131199228036, delta=1e-4) + + @IMP.test.skipIf(sys.version_info[0] < 3, + "Does not work with ancient numpy in Python 2") + def test_writing_output(self): + """ + Tests writing text output. From write_output.py + """ + state_dict, expected_subcomplexes, exp_comp_map = setup_system() + # set input dir + with IMP.test.temporary_directory() as tmpdir: + input = os.path.join(tmpdir, 'data') + shutil.copytree(self.get_input_file_name('data'), input) + # set output dir + output = os.path.join(tmpdir, 'output') + spatiotemporal.create_DAG(state_dict, input_dir=input,output_dir=output,out_labeled_pdf=True,out_cdf=True,out_pdf=True,draw_dag=False) + # Read in input files + cdf=np.loadtxt(output+'/cdf.txt') + # the 2nd most likely path in the cdf + self.assertAlmostEqual(cdf[1], 0.42117519, delta=1e-4) + # the 2nd most likely path in the pdf + pdf = np.loadtxt(output + '/pdf.txt') + self.assertAlmostEqual(pdf[1], 0.2105876, delta=1e-4) + check_label_pdf=open(output+'/labeled_pdf.txt','r') + # read in the 2nd line, + line=check_label_pdf.readline() + line=check_label_pdf.readline() + line_split=line.split() + check_label_pdf.close() + # 2 possibilities are acceptable: '1_0min|2_5min|2_10min|','1_0min|2_5min|1_10min|'. Check this output in 3 parts: + self.assertEqual(line_split[0][0:14], '1_0min|2_5min|') + self.assertAlmostEqual(int(line_split[0][14]), 1.5,delta=0.5000001) + self.assertEqual(line_split[0][15:], '_10min|') + + def test_writing_dag(self): + """ + Tests writing DAG output. From write_output.py + """ + state_dict, expected_subcomplexes, exp_comp_map = setup_system() + # set input dir + with IMP.test.temporary_directory() as tmpdir: + input = os.path.join(tmpdir, 'data') + shutil.copytree(self.get_input_file_name('data'), input) + # set output dir + output=self.get_tmp_file_name('output') + skip=0 + try: + from graphviz import Digraph + except ImportError: + self.skipTest('graphviz not available, will not test drawing graph') + try: + from matplotlib import cm + from matplotlib import colors as clr + except ImportError: + self.skipTest('matplotlib not available, will not test drawing graph') + nodes, graph, graph_prob, graph_scores = spatiotemporal.create_DAG(state_dict, input_dir=input,output_dir=output, draw_dag=True,out_labeled_pdf=False,out_cdf=False,out_pdf=False) + # scores for the first trajectory match + for trajectory in graph_scores: + if trajectory[0][0].get_label() == '1' and trajectory[0][1].get_label() == '1' and trajectory[0][2].get_label() == '1': + self.assertAlmostEqual(trajectory[1], 2.0, delta=1e-4) + + +if __name__ == '__main__': + IMP.test.main() diff --git a/modules/spb/bin/spb.cpp b/modules/spb/bin/spb.cpp index 4e65f58ecb..78fca129b4 100644 --- a/modules/spb/bin/spb.cpp +++ b/modules/spb/bin/spb.cpp @@ -18,6 +18,17 @@ #include #include #include + +// We only want the C API, so try to suppress the C++ API +#ifndef MPICH_SKIP_MPICXX +#define MPICH_SKIP_MPICXX +#endif +#ifndef OMPI_SKIP_MPICXX +#define OMPI_SKIP_MPICXX +#endif +#ifndef _MPICC_H +#define _MPICC_H +#endif #include "mpi.h" using namespace IMP; diff --git a/modules/spb/bin/spb_density_perbead.cpp b/modules/spb/bin/spb_density_perbead.cpp index 632a3f47c8..169d34ab36 100644 --- a/modules/spb/bin/spb_density_perbead.cpp +++ b/modules/spb/bin/spb_density_perbead.cpp @@ -16,6 +16,17 @@ #include #include #include + +// We only want the C API, so try to suppress the C++ API +#ifndef MPICH_SKIP_MPICXX +#define MPICH_SKIP_MPICXX +#endif +#ifndef OMPI_SKIP_MPICXX +#define OMPI_SKIP_MPICXX +#endif +#ifndef _MPICC_H +#define _MPICC_H +#endif #include "mpi.h" using namespace IMP; diff --git a/modules/spb/src/BoxedMover.cpp b/modules/spb/src/BoxedMover.cpp index e7ba064262..2c4f075aea 100644 --- a/modules/spb/src/BoxedMover.cpp +++ b/modules/spb/src/BoxedMover.cpp @@ -27,7 +27,7 @@ BoxedMover::BoxedMover(Particle *p, Float max_tr, algebra::Vector3Ds centers) core::MonteCarloMoverResult BoxedMover::do_propose() { /*IMP_LOG(VERBOSE,"BoxedMover:: propose move f is : " << f < rand(0,1); + ::boost::random::uniform_real_distribution<> rand(0,1); double fc =rand(random_number_generator); if (fc > f) return ParticlesTemp(); } diff --git a/modules/spb/src/CellMover.cpp b/modules/spb/src/CellMover.cpp index f45575cb01..a065d7a422 100644 --- a/modules/spb/src/CellMover.cpp +++ b/modules/spb/src/CellMover.cpp @@ -71,12 +71,12 @@ algebra::Vector3D CellMover::get_transformed(Float cf, algebra::Vector3D oc) { core::MonteCarloMoverResult CellMover::do_propose() { /*IMP_LOG(VERBOSE,"CellMover::f is : " << f < rand(0,1); + ::boost::random::uniform_real_distribution<> rand(0,1); double fc =rand(IMP::random_number_generator); if (fc > f) return ParticlesTemp(); } */ - boost::uniform_real<> rand(0, 1); + boost::random::uniform_real_distribution<> rand(0, 1); boost::normal_distribution mrng(0, max_translation_); boost::variate_generator > diff --git a/modules/spb/src/PbcBoxedRigidBodyMover.cpp b/modules/spb/src/PbcBoxedRigidBodyMover.cpp index c60947faf6..83918fe8ac 100644 --- a/modules/spb/src/PbcBoxedRigidBodyMover.cpp +++ b/modules/spb/src/PbcBoxedRigidBodyMover.cpp @@ -111,7 +111,7 @@ core::MonteCarloMoverResult PbcBoxedRigidBodyMover::do_propose() { // generate rotation around random axis algebra::VectorD<3> axis = algebra::get_random_vector_on( algebra::Sphere3D(algebra::VectorD<3>(0.0, 0.0, 0.0), 1.)); - ::boost::uniform_real<> rand(-max_angle_, max_angle_); + ::boost::random::uniform_real_distribution<> rand(-max_angle_, max_angle_); Float angle = rand(IMP::random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis, angle); diff --git a/modules/spb/src/RigidBodyNewMover.cpp b/modules/spb/src/RigidBodyNewMover.cpp index 151d5bde90..4f92b8cd5f 100644 --- a/modules/spb/src/RigidBodyNewMover.cpp +++ b/modules/spb/src/RigidBodyNewMover.cpp @@ -52,7 +52,7 @@ core::MonteCarloMoverResult RigidBodyNewMover::do_propose() { algebra::VectorD<3> axis = algebra::get_random_vector_on( algebra::Sphere3D(algebra::VectorD<3>(0.0, 0.0, 0.0), 1.)); - ::boost::uniform_real<> rand(-max_angle_, max_angle_); + ::boost::random::uniform_real_distribution<> rand(-max_angle_, max_angle_); Float angle = rand(IMP::random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis, angle); algebra::Rotation3D rc = diff --git a/modules/statistics/include/internal/VQClustering.h b/modules/statistics/include/internal/VQClustering.h index a35b45e8de..0db150024e 100644 --- a/modules/statistics/include/internal/VQClustering.h +++ b/modules/statistics/include/internal/VQClustering.h @@ -23,7 +23,7 @@ class IMPSTATISTICSEXPORT VQClusteringParameters { int number_of_runs_; int number_of_steps_; double ei_, ef_; // parameters for epsilon updates - double li_, lf_; // parameters for lamda updates + double li_, lf_; // parameters for lambda updates double random_offset_; // random offset for point sampling bool eq_clusters_; // should the clusters have equal size void show(std::ostream &out = std::cout) const { @@ -31,7 +31,7 @@ class IMPSTATISTICSEXPORT VQClusteringParameters { out << "Number of runs : " << number_of_runs_ << std::endl; out << "Number of steps: " << number_of_steps_ << std::endl; out << "Epsilon updates: " << ei_ << " " << ef_ << std::endl; - out << "Lamda updates : " << li_ << " " << lf_ << std::endl; + out << "Lambda updates : " << li_ << " " << lf_ << std::endl; out << "Random offset : " << random_offset_ << std::endl; } VQClusteringParameters(int dim, int k) : dim_(dim), k_(k) { init(); } diff --git a/modules/statistics/include/internal/random_generator.h b/modules/statistics/include/internal/random_generator.h index 8577136653..a4da6993da 100644 --- a/modules/statistics/include/internal/random_generator.h +++ b/modules/statistics/include/internal/random_generator.h @@ -20,12 +20,12 @@ IMPSTATISTICS_BEGIN_INTERNAL_NAMESPACE \param[in] n , the range is [0,n-1] */ inline int random_int(int n) { - ::boost::uniform_int<> rand(0, n - 1); + ::boost::random::uniform_int_distribution<> rand(0, n - 1); return rand(random_number_generator); } //! Generate a random number in the range [lo,hi] inline double random_uniform(double lo = 0.0, double hi = 1.0) { - ::boost::uniform_real<> rand(lo, hi); + ::boost::random::uniform_real_distribution<> rand(lo, hi); return rand(random_number_generator); } //! Gaussian random number generator diff --git a/modules/symmetry/src/RigidBodyMover.cpp b/modules/symmetry/src/RigidBodyMover.cpp index aa44051630..3cbec5360a 100644 --- a/modules/symmetry/src/RigidBodyMover.cpp +++ b/modules/symmetry/src/RigidBodyMover.cpp @@ -91,7 +91,7 @@ core::MonteCarloMoverResult RigidBodyMover::do_propose() { algebra::VectorD<3> axis = algebra::get_random_vector_on( algebra::Sphere3D(algebra::VectorD<3>(0.0, 0.0, 0.0), 1.)); - ::boost::uniform_real<> rand(-max_ang_, max_ang_); + ::boost::random::uniform_real_distribution<> rand(-max_ang_, max_ang_); Float angle = rand(random_number_generator); algebra::Rotation3D r = algebra::get_rotation_about_axis(axis, angle); diff --git a/modules/test/pyext/src/__init__.py b/modules/test/pyext/src/__init__.py index 017ecc3869..2238773b0d 100644 --- a/modules/test/pyext/src/__init__.py +++ b/modules/test/pyext/src/__init__.py @@ -318,6 +318,56 @@ def assertSequenceAlmostEqual(self, first, second, places=None, msg=None, msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) + def _read_cmake_cfg(self, cmake_cfg): + """Parse IMPConfig.cmake and extract info on the C++ compiler""" + cxx = flags = sysroot = None + includes = [] + with open(cmake_cfg) as fh: + for line in fh: + if line.startswith('set(IMP_CXX_COMPILER '): + cxx = line.split('"')[1] + elif line.startswith('set(IMP_CXX_FLAGS '): + flags = line.split('"')[1] + elif line.startswith('set(IMP_OSX_SYSROOT '): + sysroot = line.split('"')[1] + elif line.startswith('SET(Boost_INCLUDE_DIR '): + includes.append(line.split('"')[1]) + elif line.startswith('SET(EIGEN3_INCLUDE_DIR '): + includes.append(line.split('"')[1]) + elif line.startswith('SET(cereal_INCLUDE_DIRS '): + includes.append(line.split('"')[1]) + return cxx, flags, includes, sysroot + + def assertCompileFails(self, headers, body): + """Test that the given C++ code fails to compile with a static + assertion.""" + if sys.platform == 'win32': + self.skipTest("No support for Windows yet") + libdir = os.path.dirname(IMP.__file__) + cmake_cfg = os.path.join(libdir, '..', '..', 'IMPConfig.cmake') + if not os.path.exists(cmake_cfg): + self.skipTest("cannot find IMPConfig.cmake") + cxx, flags, includes, sysroot = self._read_cmake_cfg(cmake_cfg) + # On Mac we need to point to the SDK + if sys.platform == 'darwin' and sysroot: + flags = flags + " -isysroot" + sysroot + includes.append(os.path.join(libdir, '..', '..', 'include')) + include = " ".join("-I" + inc for inc in includes) + with temporary_directory() as tmpdir: + fname = os.path.join(tmpdir, 'test.cpp') + with open(fname, 'w') as fh: + for h in headers: + fh.write("#include <%s>\n" % h) + fh.write("\nint main() {\n" + body + "\n return 0;\n}\n") + cmdline = "%s %s %s %s" % (cxx, flags, include, fname) + print(cmdline) + p = subprocess.Popen(cmdline, shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + out, err = p.communicate() + self.assertIn('error: static assertion failed', err) + def create_point_particle(self, model, x, y, z): """Make a particle with optimizable x, y and z attributes, and add it to the model.""" @@ -644,6 +694,12 @@ def assertFunctionNames(self, module, exceptions, words): def assertShow(self, modulename, exceptions): """Check that all the classes in modulename have a show method""" all = dir(modulename) + if hasattr(modulename, '_raii_types'): + excludes = frozenset( + modulename._raii_types + modulename._plural_types) + else: + # Python-only modules don't have these two lists + excludes = frozenset() not_found = [] for f in all: # Exclude SWIG C global variables object @@ -659,8 +715,7 @@ def assertShow(self, modulename, exceptions): and f not in exceptions\ and not f.endswith("Temp") and not f.endswith("Iterator")\ and not f.endswith("Exception") and\ - f not in modulename._raii_types and \ - f not in modulename._plural_types: + f not in excludes: if not hasattr(getattr(modulename, f), 'show'): not_found.append(f) self.assertEqual( @@ -792,6 +847,11 @@ def stopTestRun(self): protocol = min(pickle.HIGHEST_PROTOCOL, 4) fname = (Path(os.environ['IMP_TEST_DETAIL_DIR']) / Path(sys.argv[0]).name) + # In Wine builds, we may have cd'd to a different drive, e.g. C: + # in which case we will no longer be able to see /tmp. In this + # case, try to disambiguate by adding a drive. + if not fname.exists(): + fname = Path("Z:") / fname with open(str(fname), 'wb') as fh: pickle.dump(self.all_tests, fh, protocol) super(_TestResult, self).stopTestRun() diff --git a/modules/test/test/standards_exceptions b/modules/test/test/standards_exceptions index cd3f8926a5..ed1b99d37c 100644 --- a/modules/test/test/standards_exceptions +++ b/modules/test/test/standards_exceptions @@ -37,6 +37,7 @@ exceptions.extend([ 'ApplicationTestCase.assertNotImplemented', 'ApplicationTestCase.assertXYZDerivativesInTolerance', 'ApplicationTestCase.assertNumPyArrayEqual', + 'ApplicationTestCase.assertCompileFails', 'ApplicationTestCase.check_runnable_python_module', 'ApplicationTestCase.check_unary_function_deriv', 'ApplicationTestCase.check_unary_function_min', @@ -63,6 +64,7 @@ exceptions.extend([ 'TestCase.assertNotImplemented', 'TestCase.assertXYZDerivativesInTolerance', 'TestCase.assertNumPyArrayEqual', + 'TestCase.assertCompileFails', 'TestCase.check_runnable_python_module', 'TestCase.check_unary_function_deriv', 'TestCase.check_unary_function_min', diff --git a/tools/build/cmake_templates/Module.cmake b/tools/build/cmake_templates/Module.cmake index 61b8672e62..fd646afafb 100644 --- a/tools/build/cmake_templates/Module.cmake +++ b/tools/build/cmake_templates/Module.cmake @@ -15,6 +15,7 @@ imp_get_process_exit_code("Setting up module %(name)s" status ${CMAKE_BINARY_DIR --datapath=${IMP_DATAPATH} --defines=${IMP_%(name)s_CONFIG}:%(defines)s${CUDA_DEFINES} --source=${CMAKE_SOURCE_DIR} + --python_version_major=${PYTHON_VERSION_MAJOR} %(bin_names)s) if(${status} EQUAL 0) diff --git a/tools/build/container_templates/core/classname_predicates.h b/tools/build/container_templates/core/classname_predicates.h index cf99af33c5..dd5a4ac6f0 100644 --- a/tools/build/container_templates/core/classname_predicates.h +++ b/tools/build/container_templates/core/classname_predicates.h @@ -154,7 +154,7 @@ class IMPCOREEXPORT AllSameClassnamePredicate : public ClassnamePredicate { //! Return true (1) with a fixed probability. class IMPCOREEXPORT CoinFlipClassnamePredicate : public ClassnamePredicate { double p_; - mutable boost::uniform_real rng_; + mutable boost::random::uniform_real_distribution rng_; public: CoinFlipClassnamePredicate(double p, std::string name = diff --git a/tools/build/doxygen_patch_tags.py b/tools/build/doxygen_patch_tags.py index e8bccc1f6a..885fda5bd6 100755 --- a/tools/build/doxygen_patch_tags.py +++ b/tools/build/doxygen_patch_tags.py @@ -1,27 +1,27 @@ #!/usr/bin/env python -from optparse import OptionParser +from argparse import ArgumentParser -parser = OptionParser() -parser.add_option("-m", "--module", dest="module", - help="The module name.") -parser.add_option("-f", "--file", dest="file", - help="The tags file.") +parser = ArgumentParser() +parser.add_argument("-m", "--module", dest="module", + help="The module name.") +parser.add_argument("-f", "--file", dest="file", + help="The tags file.") def main(): - (options, args) = parser.parse_args() - with open(options.file, "r") as fh: + args = parser.parse_args() + with open(args.file, "r") as fh: input = fh.read() input = input.replace( "index", "IMP.%s" % - options.module) + args.module) input = input.replace( "", "IMP.%s" % - options.module) - with open(options.file, "w") as fh: + args.module) + with open(args.file, "w") as fh: fh.write(input) diff --git a/tools/build/doxygen_show_warnings.py b/tools/build/doxygen_show_warnings.py index 95867e1c75..19fed2fa3b 100755 --- a/tools/build/doxygen_show_warnings.py +++ b/tools/build/doxygen_show_warnings.py @@ -1,11 +1,11 @@ #!/usr/bin/env python -from optparse import OptionParser +from argparse import ArgumentParser import sys -parser = OptionParser() -parser.add_option("-w", "--warnings", dest="warnings", - help="The warnings file.") +parser = ArgumentParser() +parser.add_argument("-w", "--warnings", dest="warnings", + help="The warnings file.") suppress_strings = ["not generated, too many nodes", "introduction_values", @@ -17,8 +17,8 @@ def main(): - (options, args) = parser.parse_args() - with open(options.warnings, "r") as fh: + args = parser.parse_args() + with open(args.warnings, "r") as fh: input = fh.readlines() found = False for line in input: diff --git a/tools/build/make_module_version.py b/tools/build/make_module_version.py index 7a781880fb..75095bb4bb 100755 --- a/tools/build/make_module_version.py +++ b/tools/build/make_module_version.py @@ -10,24 +10,26 @@ """ import sys -from optparse import OptionParser +from argparse import ArgumentParser import os.path import tools TOPDIR = os.path.abspath(os.path.dirname(__file__)) -parser = OptionParser() -parser.add_option("-n", "--name", - dest="name", help="The name of the module.") -parser.add_option("-s", "--source", - dest="source", help="The root for IMP source.") -parser.add_option("-d", "--datapath", - dest="datapath", default="", help="An extra IMP datapath.") +parser = ArgumentParser() +parser.add_argument("-n", "--name", + dest="name", help="The name of the module.", + required=True) +parser.add_argument("-s", "--source", + dest="source", help="The root for IMP source.", + required=True) +parser.add_argument("-d", "--datapath", + dest="datapath", default="", help="An extra IMP datapath.") -def make_cpp(options): +def make_cpp(args): dir = os.path.join("src") - file = os.path.join(dir, "%s_config.cpp" % options.name) + file = os.path.join(dir, "%s_config.cpp" % args.name) cpp_template = tools.CPPFileGenerator( os.path.join(TOPDIR, "config_templates", "src.cpp")) try: @@ -36,21 +38,21 @@ def make_cpp(options): # exists pass data = {} - if options.name == 'kernel': - data["filename"] = "IMP/%s_config.h" % options.name + if args.name == 'kernel': + data["filename"] = "IMP/%s_config.h" % args.name else: - data["filename"] = "IMP/%s/%s_config.h" % (options.name, options.name) - data["cppprefix"] = "IMP%s" % options.name.upper().replace("_", "") - data["name"] = options.name - data["version"] = tools.get_module_version(options.name, options.source) + data["filename"] = "IMP/%s/%s_config.h" % (args.name, args.name) + data["cppprefix"] = "IMP%s" % args.name.upper().replace("_", "") + data["name"] = args.name + data["version"] = tools.get_module_version(args.name, args.source) cpp_template.write(file, data) -def make_version_check(options): +def make_version_check(args): dir = os.path.join("lib", "IMP", - "" if options.name == 'kernel' else options.name) + "" if args.name == 'kernel' else args.name) tools.mkdir(dir, clean=False) - version = tools.get_module_version(options.name, options.source) + version = tools.get_module_version(args.name, args.source) outf = os.path.join(dir, "_version_check.py") template = """def check_version(myversion): def _check_one(name, expected, found): @@ -63,14 +65,23 @@ def _check_one(name, expected, found): version = '%s' _check_one('%s', version, myversion) """ + if args.name == 'kernel': + template += """ + import sys + if sys.version_info[0] < 3: + import warnings + warnings.warn( + "Python 2 support is deprecated in IMP and will be removed in " + "the next major release. Please port your workflows to Python 3.") +""" g = tools.PythonFileGenerator() - g.write(outf, template % (version, options.name)) + g.write(outf, template % (version, args.name)) def main(): - (options, args) = parser.parse_args() - make_cpp(options) - make_version_check(options) + args = parser.parse_args() + make_cpp(args) + make_version_check(args) sys.exit(0) diff --git a/tools/build/make_python_init.py b/tools/build/make_python_init.py index 3de7edf07b..b8d1e4e43c 100755 --- a/tools/build/make_python_init.py +++ b/tools/build/make_python_init.py @@ -5,7 +5,7 @@ import os.path import tools import shutil -from optparse import OptionParser +from argparse import ArgumentParser def build_init(module, source, infname, outfname): @@ -41,22 +41,21 @@ def get_example_path(fname): def main(): - parser = OptionParser() - parser.add_option("--build_dir", help="IMP build directory", default=None) - parser.add_option("-s", "--source", - dest="source", help="Where to find IMP source.") - - options, args = parser.parse_args() - if len(args) != 1: - parser.error("You must specify an IMP module") - module, = args - - mf = tools.ModulesFinder(source_dir=options.source, - external_dir=options.build_dir, - module_name=module) - module = mf[module] + parser = ArgumentParser() + parser.add_argument("--build_dir", help="IMP build directory", + default=None) + parser.add_argument("-s", "--source", + dest="source", help="Where to find IMP source.") + parser.add_argument("module", help="IMP module name") + + args = parser.parse_args() + + mf = tools.ModulesFinder(source_dir=args.source, + external_dir=args.build_dir, + module_name=args.module) + module = mf[args.module] build_init( - module, options.source, + module, args.source, os.path.join(module.path, "pyext", "src", "__init__.py"), os.path.join("lib", "IMP", module.name, '__init__.py')) diff --git a/tools/build/make_swig_wrapper.py b/tools/build/make_swig_wrapper.py index a69c82d963..73fb598637 100755 --- a/tools/build/make_swig_wrapper.py +++ b/tools/build/make_swig_wrapper.py @@ -4,21 +4,22 @@ """ import tools -from optparse import OptionParser +from argparse import ArgumentParser import os.path -parser = OptionParser() -parser.add_option("--include", help="Extra header include path", default=None) -parser.add_option("--swig_include", help="Extra SWIG include path", - default=[], action="append") -parser.add_option("-s", "--swig", dest="swig", default="swig", - help="Swig command to use.") -parser.add_option("-m", "--module", - dest="module", help="Module to run.") -parser.add_option("-p", "--swigpath", dest="swigpath", action="append", - default=[], help="Module to run.") -parser.add_option("-i", "--includepath", dest="includepath", action="append", - default=[], help="Module to run.") +parser = ArgumentParser() +parser.add_argument("--include", help="Extra header include path", + default=None) +parser.add_argument("--swig_include", help="Extra SWIG include path", + default=[], action="append") +parser.add_argument("-s", "--swig", dest="swig", default="swig", + help="Swig command to use.") +parser.add_argument("-m", "--module", + dest="module", help="Module to run.") +parser.add_argument("-p", "--swigpath", dest="swigpath", action="append", + default=[], help="Module to run.") +parser.add_argument("-i", "--includepath", dest="includepath", action="append", + default=[], help="Module to run.") def run_swig(outputdir, options): @@ -127,18 +128,18 @@ def patch_file(infile, out, options): def main(): - (options, args) = parser.parse_args() + args = parser.parse_args() outputdir = os.path.abspath( os.path.join( "src", "%s_swig" % - options.module)) + args.module)) tools.mkdir(outputdir, clean=False) - run_swig(outputdir, options) + run_swig(outputdir, args) patch_file(os.path.join(outputdir, "wrap.cpp-in"), - os.path.join(outputdir, "wrap.cpp"), options) + os.path.join(outputdir, "wrap.cpp"), args) patch_file(os.path.join(outputdir, "wrap.h-in"), - os.path.join(outputdir, "wrap.h"), options) + os.path.join(outputdir, "wrap.h"), args) if __name__ == '__main__': diff --git a/tools/build/make_version.py b/tools/build/make_version.py index 383133698d..752b927656 100755 --- a/tools/build/make_version.py +++ b/tools/build/make_version.py @@ -2,25 +2,26 @@ import subprocess import tools -from optparse import OptionParser +from argparse import ArgumentParser import os def main(): - parser = OptionParser() - parser.add_option("--build_dir", help="IMP build directory", default=None) - parser.add_option("--module_name", help="Module name", default=None) - parser.add_option("-s", "--source", dest="source", - help="IMP source directory.") - options, args = parser.parse_args() + parser = ArgumentParser() + parser.add_argument("--build_dir", help="IMP build directory", + default=None) + parser.add_argument("--module_name", help="Module name", default=None) + parser.add_argument("-s", "--source", dest="source", + help="IMP source directory.") + args = parser.parse_args() # Top-level version - make_version(options.source, '.') + make_version(args.source, '.') # Submodule versions - mf = tools.ModulesFinder(source_dir=options.source, - external_dir=options.build_dir, - module_name=options.module_name) + mf = tools.ModulesFinder(source_dir=args.source, + external_dir=args.build_dir, + module_name=args.module_name) all_modules = [x for x in mf.values() if isinstance(x, tools.SourceModule)] for module in all_modules: if os.path.exists(os.path.join(module.path, ".git")): diff --git a/tools/build/setup.py b/tools/build/setup.py index 50852d2fef..456b62f888 100755 --- a/tools/build/setup.py +++ b/tools/build/setup.py @@ -21,7 +21,7 @@ import os.path import platform import tools -from optparse import OptionParser +from argparse import ArgumentParser # main loops @@ -153,7 +153,7 @@ class TestCppProgram(IMP.test.TestCase): """) -def generate_tests(modules, scons): +def generate_tests(modules): template = """import IMP import IMP.test import %(module)s @@ -222,30 +222,6 @@ def test_show(self): "medium_test_standards.py"), test, show_diff=False) - cpptests = tools.get_glob([os.path.join(module.path, "test", - "test_*.cpp")]) - ecpptests = tools.get_glob( - [os.path.join(module.path, "test", "expensive_test_*.cpp")]) - cppexamples = tools.get_glob([os.path.join(module.path, "examples", - "*.cpp")]) - - if len(cpptests) > 0 and scons: - _make_test_driver( - os.path.join( - targetdir, - "test_cpp_tests.py"), - cpptests) - if len(ecpptests) > 0 and scons: - _make_test_driver( - os.path.join(targetdir, - "expensive_test_cpp_tests.py"), - cpptests) - if len(cppexamples) > 0 and scons: - _make_test_driver( - os.path.join(targetdir, - "cpp_examples_test.py"), - cppexamples) - def clean_pyc(dir): for root, dirnames, filenames in os.walk('.'): @@ -261,37 +237,35 @@ def generate_src_dirs(modules): tools.mkdir(os.path.join("src", module.name), clean=False) -parser = OptionParser() -parser.add_option("--build_dir", help="IMP build directory", default=None) -parser.add_option("--module_name", help="Module name", default=None) -parser.add_option("-s", "--source", dest="source", - help="IMP source directory.") -parser.add_option("-d", "--datapath", dest="datapath", - help="Extra data path for IMP.") -parser.add_option("-m", "--disabled", dest="disabled", - help="Disabled modules.") -parser.add_option("--scons", default=False, action="store_true", - help="Set if we are running scons.") +parser = ArgumentParser() +parser.add_argument("--build_dir", help="IMP build directory", default=None) +parser.add_argument("--module_name", help="Module name", default=None) +parser.add_argument("-s", "--source", dest="source", + help="IMP source directory.") +parser.add_argument("-d", "--datapath", dest="datapath", + help="Extra data path for IMP.") +parser.add_argument("-m", "--disabled", dest="disabled", + help="Disabled modules.") def main(): - (options, args) = parser.parse_args() - mf = tools.ModulesFinder(source_dir=options.source, - external_dir=options.build_dir, - module_name=options.module_name) + args = parser.parse_args() + mf = tools.ModulesFinder(source_dir=args.source, + external_dir=args.build_dir, + module_name=args.module_name) all_modules = [x for x in mf.values() if isinstance(x, tools.SourceModule)] - clean_pyc(options.source) + clean_pyc(args.source) tools.mkdir(os.path.join("build_info")) tools.mkdir(os.path.join("cmake_tests")) tools.rewrite(os.path.join("build_info", "disabled"), - options.disabled.replace(":", "\n")) + args.disabled.replace(":", "\n")) tools.set_sorted_order([m.name for m in mf.get_ordered()]) link_headers(all_modules) link_examples(all_modules) link_swig(all_modules) link_python(all_modules) link_data(all_modules) - generate_tests(all_modules, options.scons) + generate_tests(all_modules) generate_src_dirs(all_modules) diff --git a/tools/build/setup_all.py b/tools/build/setup_all.py index b8382bdb95..c940a2beff 100755 --- a/tools/build/setup_all.py +++ b/tools/build/setup_all.py @@ -3,7 +3,7 @@ import os import os.path import tools -from optparse import OptionParser +from argparse import ArgumentParser # main loops @@ -23,18 +23,18 @@ def generate_all_cpp(modules): % os.path.abspath(s) for s in sources) + '\n') -parser = OptionParser() -parser.add_option("--build_dir", help="IMP build directory", default=None) -parser.add_option("--module_name", help="Module name", default=None) -parser.add_option("-s", "--source", dest="source", - help="IMP source directory.") +parser = ArgumentParser() +parser.add_argument("--build_dir", help="IMP build directory", default=None) +parser.add_argument("--module_name", help="Module name", default=None) +parser.add_argument("-s", "--source", dest="source", + help="IMP source directory.") def main(): - (options, args) = parser.parse_args() - mf = tools.ModulesFinder(source_dir=options.source, - external_dir=options.build_dir, - module_name=options.module_name) + args = parser.parse_args() + mf = tools.ModulesFinder(source_dir=args.source, + external_dir=args.build_dir, + module_name=args.module_name) generate_all_cpp([x for x in mf.values() if isinstance(x, tools.SourceModule)]) diff --git a/tools/build/setup_cmake.py b/tools/build/setup_cmake.py index 31659d38a9..5415123eac 100755 --- a/tools/build/setup_cmake.py +++ b/tools/build/setup_cmake.py @@ -10,7 +10,7 @@ import os.path import tools import subprocess -from optparse import OptionParser +from argparse import ArgumentParser TOPDIR = os.path.abspath(os.path.dirname(__file__)) @@ -287,34 +287,34 @@ def setup_module(finder, module, tools_dir, extra_include, extra_swig, return out -parser = OptionParser() -parser.add_option("--include", help="Extra header include path", default=None) -parser.add_option("--swig_include", help="Extra SWIG include path(s)", - default=[], action="append") -parser.add_option("--build_dir", help="IMP build directory", default=None) -parser.add_option("--module_name", help="Module name", default=None) -parser.add_option("--tools_dir", help="IMP tools directory", default=None) -parser.add_option("--required", action="store_true", default=False, - help="Whether to fail the build if a module cannot " - "be configured") +parser = ArgumentParser() +parser.add_argument("--include", help="Extra header include path", + default=None) +parser.add_argument("--swig_include", help="Extra SWIG include path(s)", + default=[], action="append") +parser.add_argument("--build_dir", help="IMP build directory", default=None) +parser.add_argument("--module_name", help="Module name", default=None) +parser.add_argument("--tools_dir", help="IMP tools directory", default=None) +parser.add_argument("--required", action="store_true", default=False, + help="Whether to fail the build if a module cannot " + "be configured") def main(): - (options, args) = parser.parse_args() + args = parser.parse_args() main = [] - mf = tools.ModulesFinder(source_dir='', external_dir=options.build_dir, - module_name=options.module_name) - tools_dir = options.tools_dir \ - if options.tools_dir else '${CMAKE_SOURCE_DIR}/tools' - extra_include = ' "--include=%s"' % options.include \ - if options.include else "" + mf = tools.ModulesFinder(source_dir='', external_dir=args.build_dir, + module_name=args.module_name) + tools_dir = args.tools_dir \ + if args.tools_dir else '${CMAKE_SOURCE_DIR}/tools' + extra_include = ' "--include=%s"' % args.include if args.include else "" extra_swig = ''.join(' "--swig_include=%s"' % s - for s in options.swig_include) \ - if options.swig_include else "" + for s in args.swig_include) \ + if args.swig_include else "" for m in mf.get_ordered(): if isinstance(m, tools.SourceModule): main.append(setup_module(mf, m, tools_dir, extra_include, - extra_swig, options.required)) + extra_swig, args.required)) if __name__ == '__main__': diff --git a/tools/build/setup_doxygen.py b/tools/build/setup_doxygen.py index 7ff7cfd662..ce19da3321 100755 --- a/tools/build/setup_doxygen.py +++ b/tools/build/setup_doxygen.py @@ -11,7 +11,7 @@ import os import os.path import tools -from optparse import OptionParser +from argparse import ArgumentParser # link all the dox files and other documentation related files from the # source tree into the build tree @@ -29,15 +29,15 @@ def link_dox(source): match=["*.png", "*.pdf", "*.gif"], clean=False) -parser = OptionParser() -parser.add_option("-s", "--source", dest="source", - help="IMP source directory.") +parser = ArgumentParser() +parser.add_argument("-s", "--source", dest="source", + help="IMP source directory.") def main(): - (options, args) = parser.parse_args() + args = parser.parse_args() - link_dox(options.source) + link_dox(args.source) if __name__ == '__main__': diff --git a/tools/build/setup_doxygen_config.py b/tools/build/setup_doxygen_config.py index 948e14e1c7..007651bebb 100755 --- a/tools/build/setup_doxygen_config.py +++ b/tools/build/setup_doxygen_config.py @@ -12,7 +12,7 @@ import os.path import tools import pickle -from optparse import OptionParser +from argparse import ArgumentParser class DoxConfigFileGenerator(tools.FileGenerator): @@ -141,26 +141,26 @@ def generate_overview_pages(source): g.write(name, "\n".join(contents)) -parser = OptionParser() -parser.add_option("-s", "--source", dest="source", - help="IMP source directory.") +parser = ArgumentParser() +parser.add_argument("-s", "--source", dest="source", + help="IMP source directory.") def main(): - (options, args) = parser.parse_args() + args = parser.parse_args() - generate_overview_pages(options.source) - generate_doxyfile(options.source, + generate_overview_pages(args.source) + generate_doxyfile(args.source, os.path.join("doxygen", "ref.html"), is_html=True, is_xml=False) - generate_doxyfile(options.source, + generate_doxyfile(args.source, os.path.join("doxygen", "ref.xml"), is_html=False, is_xml=True) - generate_doxyfile(options.source, + generate_doxyfile(args.source, os.path.join("doxygen", "manual.html"), is_html=True, is_xml=False, manual=True) - generate_doxyfile(options.source, + generate_doxyfile(args.source, os.path.join("doxygen", "manual.xml"), is_html=False, is_xml=True, manual=True) diff --git a/tools/build/setup_imppy.py b/tools/build/setup_imppy.py index 2bfc44ba33..a6f847ced2 100755 --- a/tools/build/setup_imppy.py +++ b/tools/build/setup_imppy.py @@ -5,7 +5,7 @@ """ import tools -from optparse import OptionParser +from argparse import ArgumentParser import os.path import os import platform @@ -31,38 +31,38 @@ class FileGenerator(object): "@IMP_BIN_DIR@", "", "@PATH@", "", "@PRECOMMAND@", "", "@TMPDIR@"] - def __init__(self, options): - self.options = options + def __init__(self, args): + self.args = args def native_paths(self, paths, also_with_suffix=False): """Convert cmake-provided paths into native paths""" ret = [tools.from_cmake_path(x) for x in paths] - if self.options.suffix and also_with_suffix: + if self.args.suffix and also_with_suffix: ret += [os.path.join(tools.from_cmake_path(x), - self.options.suffix) for x in paths] + self.args.suffix) for x in paths] return ret def get_abs_binary_path(self, reldir): """Get an absolute path to a binary directory""" - if self.options.suffix: - reldir = os.path.join(reldir, self.options.suffix) + if self.args.suffix: + reldir = os.path.join(reldir, self.args.suffix) return os.path.abspath(reldir) def get_path(self): modbin = [os.path.abspath(x) for x in tools.get_glob(["module_bin/*"])] - if self.options.suffix: - modbin += [os.path.join(x, self.options.suffix) for x in modbin] + if self.args.suffix: + modbin += [os.path.join(x, self.args.suffix) for x in modbin] return (modbin + [self.get_abs_binary_path("bin")] - + self.native_paths(self.options.path, True)) + + self.native_paths(self.args.path, True)) def write_file(self): - pypathsep = ";" if self.options.python_pathsep == 'w32' else os.pathsep - outfile = self.options.output - pythonpath = self.native_paths(self.options.python_path, True) - ldpath = self.native_paths(self.options.ld_path) - precommand = self.options.precommand + pypathsep = ";" if self.args.python_pathsep == 'w32' else os.pathsep + outfile = self.args.output + pythonpath = self.native_paths(self.args.python_path, True) + ldpath = self.native_paths(self.args.ld_path) + precommand = self.args.precommand path = self.get_path() - externdata = self.native_paths(self.options.external_data) + externdata = self.native_paths(self.args.external_data) libdir = self.get_abs_binary_path("lib") bindir = self.get_abs_binary_path("bin") @@ -101,7 +101,7 @@ def write_file(self): val = lines[line] if val[0] and len(val[1]) > 0: # ick - if self.options.propagate == "no" or not val[3]: + if self.args.propagate == "no" or not val[3]: contents.extend(self.set_variable(val[0], val[1], val[2])) else: @@ -152,38 +152,38 @@ def set_variable_propagate(self, varname, value, export, sep): def get_path(self): # Windows looks for libraries in PATH, not LD_LIBRARY_PATH return FileGenerator.get_path(self) \ - + self.native_paths(self.options.ld_path, True) - - -parser = OptionParser() -parser.add_option("-p", "--python_path", dest="python_path", default=[], - action="append", help="PYTHONPATH.") -parser.add_option("-l", "--ld_path", dest="ld_path", default=[], - action="append", help="LD_LIB_PATH.") -parser.add_option("-c", "--precommand", dest="precommand", default="", - help="Command to run before all executables.") -parser.add_option("-P", "--path", dest="path", default=[], - action="append", help="The PATH.") -parser.add_option("--python_pathsep", default="", - help="The Python path separator style " - "to use ('w32' or empty)") -parser.add_option("-d", "--external_data", dest="external_data", default=[], - action="append", help="External data.") -parser.add_option("-e", "--propagate", dest="propagate", default="no", - help="Whether to pass the relevant environment variables " - "through.") -parser.add_option("-o", "--output", dest="output", default="imppy.sh", - help="Name of the file to produce.") -parser.add_option("--suffix", default="", - help="Subdirectory to suffix to binary directories") + + self.native_paths(self.args.ld_path, True) + + +parser = ArgumentParser() +parser.add_argument("-p", "--python_path", dest="python_path", default=[], + action="append", help="PYTHONPATH.") +parser.add_argument("-l", "--ld_path", dest="ld_path", default=[], + action="append", help="LD_LIB_PATH.") +parser.add_argument("-c", "--precommand", dest="precommand", default="", + help="Command to run before all executables.") +parser.add_argument("-P", "--path", dest="path", default=[], + action="append", help="The PATH.") +parser.add_argument("--python_pathsep", default="", + help="The Python path separator style " + "to use ('w32' or empty)") +parser.add_argument("-d", "--external_data", dest="external_data", default=[], + action="append", help="External data.") +parser.add_argument("-e", "--propagate", dest="propagate", default="no", + help="Whether to pass the relevant environment variables " + "through.") +parser.add_argument("-o", "--output", dest="output", default="imppy.sh", + help="Name of the file to produce.") +parser.add_argument("--suffix", default="", + help="Subdirectory to suffix to binary directories") def main(): - (options, args) = parser.parse_args() - if options.output.endswith('.bat'): - gen = BatchFileGenerator(options) + args = parser.parse_args() + if args.output.endswith('.bat'): + gen = BatchFileGenerator(args) else: - gen = ShellScriptFileGenerator(options) + gen = ShellScriptFileGenerator(args) gen.write_file() diff --git a/tools/build/setup_module.py b/tools/build/setup_module.py index a69ad083c6..8d9bb931ca 100755 --- a/tools/build/setup_module.py +++ b/tools/build/setup_module.py @@ -11,7 +11,7 @@ """ import sys -from optparse import OptionParser +from argparse import ArgumentParser import os.path import tools import glob @@ -20,16 +20,23 @@ TOPDIR = os.path.abspath(os.path.dirname(__file__)) -parser = OptionParser() -parser.add_option("--build_dir", help="IMP build directory", default=None) -parser.add_option("-D", "--defines", dest="defines", default="", - help="Colon separated list of defines.") -parser.add_option("-n", "--name", - dest="name", help="The name of the module.") -parser.add_option("-s", "--source", - dest="source", help="The root for IMP source.") -parser.add_option("-d", "--datapath", - dest="datapath", default="", help="An extra IMP datapath.") +parser = ArgumentParser() +parser.add_argument("--build_dir", help="IMP build directory", default=None) +parser.add_argument("-D", "--defines", dest="defines", default="", + help="Colon separated list of defines.") +parser.add_argument("-n", "--name", + dest="name", help="The name of the module.", + required=True) +parser.add_argument("-s", "--source", + dest="source", help="The root for IMP source.", + required=True) +parser.add_argument("-d", "--datapath", + dest="datapath", default="", help="An extra IMP datapath.") +parser.add_argument("--python_version_major", type=int, + help="The major version of Python (2 or 3) that " + "IMP is configured to use.") +parser.add_argument("apps", metavar="BIN", type=str, nargs="*", + help="Module command line Python tool(s)") def add_list_to_defines(cppdefines, data, sym, val, names): @@ -41,7 +48,7 @@ def add_list_to_defines(cppdefines, data, sym, val, names): % (data["name"].upper(), nn, val)) -def make_header(options, module): +def make_header(args, module): if module.python_only: return if module.name == 'kernel': @@ -92,8 +99,8 @@ def make_header(options, module): else: data["showable"] = "" cppdefines = [] - if options.defines != "": - for define in tools.split(options.defines): + if args.defines != "": + for define in tools.split(args.defines): parts = define.split("=") if len(parts) == 2: cppdefines.append("#define %s %s" % (parts[0], parts[1])) @@ -123,11 +130,11 @@ def __init__(self, template_file, module, modules, finder): self.finder = finder tools.FileGenerator.__init__(self, template_file, '#') - def get_output_file_contents(self, options): + def get_output_file_contents(self, args): template = self.template module = self.module modules = self.modules - template = template.replace("@IMP_SOURCE_PATH@", options.source) + template = template.replace("@IMP_SOURCE_PATH@", args.source) template = template.replace("@VERSION@", "NONE") template = template.replace("@NAME@", module.name) template = template.replace("@PROJECT_BRIEF@", @@ -144,7 +151,7 @@ def get_output_file_contents(self, options): template = template.replace("@GENERATE_TAGFILE@", "tags") template = template.replace( "@LAYOUT_FILE@", - "%s/doc/doxygen/module_layout.xml" % options.source) + "%s/doc/doxygen/module_layout.xml" % args.source) template = template.replace("@MAINPAGE@", "README.md") template = template.replace("@INCLUDE_PATH@", "include") template = template.replace("@FILE_PATTERNS@", @@ -174,7 +181,7 @@ def get_output_file_contents(self, options): inputs.append("../generated/IMP_%s.dox" % module.name) template = template.replace( "@INPUT_PATH@", " \\\n ".join(inputs)) - tags = [os.path.join(options.source, 'doc', 'doxygen', + tags = [os.path.join(args.source, 'doc', 'doxygen', 'dummy_module_tags.xml')] for m in modules: tags.append(os.path.join("../", m.name, "tags") @@ -184,19 +191,19 @@ def get_output_file_contents(self, options): if module.name == "example": template = template.replace( "@EXAMPLE_PATH@", - "examples/example %s/modules/example" % options.source) + "examples/example %s/modules/example" % args.source) else: template = template.replace("@EXAMPLE_PATH@", "examples/" + module.name) return template -def make_doxygen(options, module, modules, finder): +def make_doxygen(args, module, modules, finder): file = os.path.join("doxygen", module.name, "Doxyfile") g = ModuleDoxFileGenerator( os.path.join(TOPDIR, "doxygen_templates", "Doxyfile.in"), module, modules, finder) - g.write(file, options) + g.write(file, args) def write_no_ok(module): @@ -226,8 +233,13 @@ def write_ok(module, modules, unfound_modules, dependencies, "\n".join(config)) -def setup_module(module, finder): +def setup_module(module, finder, python_version_major): sys.stdout.write("Configuring module %s ..." % module.name) + if module.python3_only and python_version_major < 3: + print("Module requires Python 3, but IMP is configured with Python %d" + % python_version_major) + write_no_ok(module.name) + return False, [] for d in module.required_dependencies: if not finder.get_dependency_info(d)["ok"]: print("Required dependency %s not found" % d) @@ -294,8 +306,8 @@ def link_py_apps(module): os.chmod(dest_bin, 493) # 493 = 0755, i.e. executable -def link_bin(options, module): - path = os.path.join("module_bin", options.name) +def link_bin(args, module): + path = os.path.join("module_bin", args.name) tools.mkdir(path, clean=False) for old in tools.get_glob([os.path.join(path, "*.py")]): os.unlink(old) @@ -303,8 +315,8 @@ def link_bin(options, module): path, clean=False, match=["*.py"]) -def link_benchmark(options, module): - path = os.path.join("benchmark", options.name) +def link_benchmark(args, module): + path = os.path.join("benchmark", args.name) tools.mkdir(path, clean=False) for old in tools.get_glob([os.path.join(path, "*.py")]): os.unlink(old) @@ -409,33 +421,33 @@ def make_overview(module, cmdline_tools): def main(): - options, apps = parser.parse_args() + args = parser.parse_args() with open("build_info/disabled", "r") as fh: disabled = tools.split(fh.read(), "\n") - if options.name in disabled: - print("%s is disabled" % options.name) - write_no_ok(options.name) - tools.rmdir(os.path.join("module_bin", options.name)) - tools.rmdir(os.path.join("benchmark", options.name)) - tools.rmdir(os.path.join("lib", "IMP", options.name)) + if args.name in disabled: + print("%s is disabled" % args.name) + write_no_ok(args.name) + tools.rmdir(os.path.join("module_bin", args.name)) + tools.rmdir(os.path.join("benchmark", args.name)) + tools.rmdir(os.path.join("lib", "IMP", args.name)) sys.exit(1) - mf = tools.ModulesFinder(source_dir=options.source, - external_dir=options.build_dir, - module_name=options.name) - module = mf[options.name] - success, modules = setup_module(module, mf) + mf = tools.ModulesFinder(source_dir=args.source, + external_dir=args.build_dir, + module_name=args.name) + module = mf[args.name] + success, modules = setup_module(module, mf, args.python_version_major) if success: - make_header(options, module) - make_doxygen(options, module, modules, mf) - make_overview(module, apps) - link_bin(options, module) + make_header(args, module) + make_doxygen(args, module, modules, mf) + make_overview(module, args.apps) + link_bin(args, module) link_py_apps(module) - link_benchmark(options, module) + link_benchmark(args, module) sys.exit(0) else: - tools.rmdir(os.path.join("module_bin", options.name)) - tools.rmdir(os.path.join("benchmark", options.name)) - tools.rmdir(os.path.join("lib", "IMP", options.name)) + tools.rmdir(os.path.join("module_bin", args.name)) + tools.rmdir(os.path.join("benchmark", args.name)) + tools.rmdir(os.path.join("lib", "IMP", args.name)) sys.exit(1) diff --git a/tools/build/setup_paths.py b/tools/build/setup_paths.py index be0c782d60..f8ace71aca 100755 --- a/tools/build/setup_paths.py +++ b/tools/build/setup_paths.py @@ -4,7 +4,7 @@ it is installed. """ -from optparse import OptionParser +from argparse import ArgumentParser import tools template = """ @@ -16,22 +16,22 @@ IMPKERNEL_END_INTERNAL_NAMESPACE """ -parser = OptionParser() -parser.add_option("-d", "--datapath", dest="datapath", - help="The install data path.") -parser.add_option("-e", "--examplepath", dest="examplepath", - help="The install example path.") -parser.add_option("-o", "--output", dest="output", - help="Where to put the file.") +parser = ArgumentParser() +parser.add_argument("-d", "--datapath", dest="datapath", + help="The install data path.") +parser.add_argument("-e", "--examplepath", dest="examplepath", + help="The install example path.") +parser.add_argument("-o", "--output", dest="output", + help="Where to put the file.") def main(): - (options, args) = parser.parse_args() + args = parser.parse_args() data = {} - data["examplepath"] = options.examplepath - data["datapath"] = options.datapath + data["examplepath"] = args.examplepath + data["datapath"] = args.datapath g = tools.CPPFileGenerator() - g.write(options.output, template % data) + g.write(args.output, template % data) if __name__ == '__main__': diff --git a/tools/build/setup_swig_deps.py b/tools/build/setup_swig_deps.py index 5cbc334a02..d8316a9cd2 100755 --- a/tools/build/setup_swig_deps.py +++ b/tools/build/setup_swig_deps.py @@ -4,28 +4,24 @@ """ import sys -from optparse import OptionParser +from argparse import ArgumentParser import os.path import tools import tools.thread_pool -parser = OptionParser() -parser.add_option("--build_dir", help="IMP build directory", default=None) -parser.add_option("--module_name", help="Module name", default=None) -parser.add_option("--include", help="Extra header include path", default=None) -parser.add_option("-s", "--swig", - dest="swig", default="swig", - help="The name of the swig command.") -parser.add_option("-b", "--build_system", - dest="build_system", help="The build system being used.") +parser = ArgumentParser() +parser.add_argument("--build_dir", help="IMP build directory", default=None) +parser.add_argument("--module_name", help="Module name", default=None) +parser.add_argument("--include", help="Extra header include path", + default=None) +parser.add_argument("-s", "--swig", dest="swig", default="swig", + help="The name of the swig command.") -def _fix(name, bs): +def _fix(name): if os.path.isabs(name): return name - elif bs == "scons": - return "#/build/" + name else: return os.path.join(os.getcwd(), "%s") % name @@ -42,7 +38,7 @@ def get_dep_merged(finder, modules, name, extra_data_path): return ret -def setup_one(finder, module, build_system, swig, extra_data_path, include): +def setup_one(finder, module, swig, extra_data_path, include): includepath = get_dep_merged(finder, [module], "includepath", extra_data_path) swigpath = get_dep_merged(finder, [module], "swigpath", extra_data_path) @@ -69,22 +65,22 @@ def setup_one(finder, module, build_system, swig, extra_data_path, include): continue names.append(x) - final_names = [_fix(x, build_system) for x in names] + final_names = [_fix(x) for x in names] final_list = "\n".join(final_names) tools.rewrite("src/%s_swig.deps" % module.name, final_list) def main(): - (options, args) = parser.parse_args() + args = parser.parse_args() mf = tools.ModulesFinder(configured_dir="build_info", - external_dir=options.build_dir, - module_name=options.module_name) + external_dir=args.build_dir, + module_name=args.module_name) pool = tools.thread_pool.ThreadPool() for m in [x for x in mf.values() if not isinstance(x, tools.ExternalModule) and x.ok and not x.python_only]: - pool.add_task(setup_one, mf, m, options.build_system, options.swig, - options.build_dir, options.include) + pool.add_task(setup_one, mf, m, args.swig, + args.build_dir, args.include) err = pool.wait_completion() if err: sys.stderr.write(err + '\n') diff --git a/tools/build/setup_swig_wrappers.py b/tools/build/setup_swig_wrappers.py index 1a41f7c837..55278128ed 100755 --- a/tools/build/setup_swig_wrappers.py +++ b/tools/build/setup_swig_wrappers.py @@ -6,7 +6,7 @@ import os.path import datetime import tools -from optparse import OptionParser +from argparse import ArgumentParser def write_module_cpp(m, contents): @@ -73,8 +73,7 @@ def build_wrapper(module, finder, sorted, target, source): #include #include -#include -#include +#include #include // for serialization/pickle support #include @@ -152,28 +151,28 @@ def build_wrapper(module, finder, sorted, target, source): g.write(target, "\n".join(contents)) -parser = OptionParser() -parser.add_option("--build_dir", help="IMP build directory", default=None) -parser.add_option("-d", "--datapath", - dest="datapath", default="", help="Extra data path.") -parser.add_option("-s", "--source", - dest="source", help="Where to find IMP source.") -parser.add_option("-m", "--module", - dest="module", default="", help="Only run on one module.") +parser = ArgumentParser() +parser.add_argument("--build_dir", help="IMP build directory", default=None) +parser.add_argument("-d", "--datapath", + dest="datapath", default="", help="Extra data path.") +parser.add_argument("-s", "--source", + dest="source", help="Where to find IMP source.") +parser.add_argument("-m", "--module", + dest="module", default="", help="Only run on one module.") def main(): - (options, args) = parser.parse_args() + args = parser.parse_args() sorted_order = tools.get_sorted_order() - if options.module != "": - mf = tools.ModulesFinder(source_dir=options.source, - external_dir=options.build_dir, - module_name=options.module) - module = mf[options.module] + if args.module != "": + mf = tools.ModulesFinder(source_dir=args.source, + external_dir=args.build_dir, + module_name=args.module) + module = mf[args.module] build_wrapper(module, mf, sorted_order, os.path.join("swig", "IMP_" + module.name + ".i"), - options.source) + args.source) if __name__ == '__main__': diff --git a/tools/build/tools/__init__.py b/tools/build/tools/__init__.py index 436b7da8a6..1a1dddd81a 100644 --- a/tools/build/tools/__init__.py +++ b/tools/build/tools/__init__.py @@ -381,7 +381,8 @@ def _read_dep_file(self, attr): return self._info[attr] d = {'required_modules': "", 'optional_modules': "", 'required_dependencies': "", 'optional_dependencies': "", - 'lib_only_required_modules': "", 'python_only': False} + 'lib_only_required_modules': "", 'python_only': False, + 'python3_only': False} with open(self.depends_file) as fh: exec(fh.read(), d) self._info = {"required_modules": @@ -394,7 +395,8 @@ def _read_dep_file(self, attr): split(d['required_dependencies']), "optional_dependencies": split(d['optional_dependencies']), - "python_only": d['python_only']} + "python_only": d['python_only'], + "python3_only": d['python3_only']} return self._info[attr] required_modules = property( @@ -408,6 +410,7 @@ def _read_dep_file(self, attr): optional_dependencies = property( lambda self: self._read_dep_file('optional_dependencies')) python_only = property(lambda self: self._read_dep_file('python_only')) + python3_only = property(lambda self: self._read_dep_file('python3_only')) class ModulesFinder(object): diff --git a/tools/coverage/report.py b/tools/coverage/report.py index f699e558dd..0cdc214c23 100755 --- a/tools/coverage/report.py +++ b/tools/coverage/report.py @@ -2,7 +2,7 @@ from __future__ import print_function import coverage -from optparse import OptionParser +from argparse import ArgumentParser import subprocess import os import sys @@ -167,47 +167,45 @@ def _get_components(opt, all_comps, exclude): def parse_args(): - parser = OptionParser(usage="""%prog [options] outdir - -Generate HTML coverage reports for IMP C++/Python code in the given directory. -""") - parser.add_option("--report", type="choice", - choices=['python', 'cpp', 'both'], default='both', - help="Generate reports for Python code ('python'), " - "C++ code ('cpp') or both Python and C++ ('both'). " - "Default '%default'.") - parser.add_option("--modules", metavar='STR', default=None, - help="Report only for the given colon-separated list of " - "IMP modules, e.g. 'base:kernel'. By default, " - "coverage for all modules is reported.") - parser.add_option("--dependencies", metavar='STR', default=None, - help="Report only for the given colon-separated list of " - "IMP dependencies, e.g. 'RMF'. By default, " - "coverage for all supported dependencies " - "(currently only RMF) is reported.") - parser.add_option("--exclude", metavar='PCK', default=None, - help="Don't report coverage for any of the components " - "listed in the given summary file (as generated by " - "build_all.py).") - opts, args = parser.parse_args() - if len(args) != 1: - parser.error("wrong number of arguments") - if opts.exclude: - exclude = pickle.load(open(opts.exclude, 'rb')) + parser = ArgumentParser( + description="Generate HTML coverage reports for IMP C++/Python code " + "in the given directory.") + parser.add_argument("--report", + choices=['python', 'cpp', 'both'], default='both', + help="Generate reports for Python code ('python'), " + "C++ code ('cpp') or both Python and " + "C++ ('both'). Default '%(default)s'.") + parser.add_argument("--modules", metavar='STR', default=None, + help="Report only for the given colon-separated " + "list of IMP modules, e.g. 'base:kernel'. By " + "default, coverage for all modules is reported.") + parser.add_argument("--dependencies", metavar='STR', default=None, + help="Report only for the given colon-separated list " + "of IMP dependencies, e.g. 'RMF'. By default, " + "coverage for all supported dependencies " + "(currently only RMF) is reported.") + parser.add_argument("--exclude", metavar='PCK', default=None, + help="Don't report coverage for any of the components " + "listed in the given summary file (as generated " + "by build_all.py).") + parser.add_argument("outdir", help="Directory containing C++/Python code") + args = parser.parse_args() + if args.exclude: + exclude = pickle.load(open(args.exclude, 'rb')) else: exclude = {} - opts.modules = _get_components(opts.modules, tools.get_sorted_order(), + args.modules = _get_components(args.modules, tools.get_sorted_order(), exclude) - opts.dependencies = _get_components(opts.dependencies, ['RMF'], exclude) - return opts, args[0] + args.dependencies = _get_components(args.dependencies, ['RMF'], exclude) + return args def main(): - opts, outdir = parse_args() - if opts.report in ('both', 'python'): - report_python(opts, outdir) - if opts.report in ('both', 'cpp'): - report_cpp(opts, outdir) + args = parse_args() + if args.report in ('both', 'python'): + report_python(args, args.outdir) + if args.report in ('both', 'cpp'): + report_cpp(args, args.outdir) if __name__ == '__main__': diff --git a/tools/debian-ppa/changelog b/tools/debian-ppa/changelog new file mode 100644 index 0000000000..e7e20974c6 --- /dev/null +++ b/tools/debian-ppa/changelog @@ -0,0 +1,37 @@ +imp (2.21.0-1~@CODENAME@) @CODENAME@; urgency=low + + * IMP 2.21.0 release + + -- IMP Developers Thu, 13 Jun 2024 11:19:52 -0700 + +imp (2.20.2-1~@CODENAME@) @CODENAME@; urgency=low + + * IMP 2.20.2 release + + -- IMP Developers Thu, 04 Apr 2024 18:33:05 -0700 + +imp (2.20.1-4~@CODENAME@) @CODENAME@; urgency=low + + * Add missing dependencies in order to test the install + * Recommend python3-protobuf for IMP.npctransport + + -- IMP Developers Tue, 12 Mar 2024 15:09:28 -0700 + +imp (2.20.1-3~@CODENAME@) @CODENAME@; urgency=low + + * Add tests of the installed package + * Fix package removal + + -- IMP Developers Tue, 12 Mar 2024 10:23:47 -0700 + +imp (2.20.1-2~@CODENAME@) @CODENAME@; urgency=low + + * Add protobuf-compiler (protoc) to builddeps + + -- IMP Developers Fri, 08 Mar 2024 18:49:23 -0800 + +imp (2.20.1-1~@CODENAME@) @CODENAME@; urgency=low + + * Initial .deb release + + -- IMP Developers Fri, 08 Mar 2024 10:34:17 -0800 diff --git a/tools/debian-ppa/control b/tools/debian-ppa/control new file mode 100644 index 0000000000..12849b68b6 --- /dev/null +++ b/tools/debian-ppa/control @@ -0,0 +1,53 @@ +Source: imp +Priority: optional +Maintainer: Ben Webb +Build-Depends: debhelper-compat (= 13), cmake, swig, libboost-filesystem-dev, libboost-graph-dev, libboost-iostreams-dev, libboost-program-options-dev, libboost-random-dev, libboost-regex-dev, libboost-thread-dev, libcgal-dev, libhdf5-dev, libfftw3-dev, libopencv-dev, libgsl-dev, coreutils, unzip, wget, python3-dev, symlinks, libann-dev, libeigen3-dev, libcereal-dev, libprotobuf-dev, protobuf-compiler, libopenmpi-dev, rmf-dev (>= 1.6), python3-ihm, scons, lsb-release, python3-protobuf, python3-yaml, python3-mpi4py +Standards-Version: 4.6.2 +Section: libs +Homepage: https://integrativemodeling.org/ +Vcs-Browser: https://github.com/salilab/imp/ + +Package: imp-dev +Section: libdevel +Architecture: any +Depends: imp (= ${binary:Version}), ${misc:Depends}, cmake, swig, libboost-filesystem-dev, libboost-graph-dev, libboost-iostreams-dev, libboost-program-options-dev, libboost-random-dev, libboost-regex-dev, libboost-thread-dev, libcgal-dev, libhdf5-dev, libfftw3-dev, libopencv-dev, libgsl-dev, python3-dev, libann-dev, libeigen3-dev, libcereal-dev, libprotobuf-dev, protobuf-compiler, rmf-dev (>= 1.6), python3-ihm +Description: The Integrative Modeling Platform - development files + Headers to compile against IMP. + +Package: imp +Section: libs +Architecture: any +Depends: ${shlibs:Depends}, ${misc:Depends}, python3-numpy, python3-protobuf, python3-ihm +Recommends: python3-protobuf, python3-yaml +Description: The Integrative Modeling Platform + IMP's broad goal is to contribute to a comprehensive structural + characterization of biomolecules ranging in size and complexity from small + peptides to large macromolecular assemblies. Detailed structural + characterization of assemblies is generally impossible by any single existing + experimental or computational method. This barrier can be overcome by hybrid + approaches that integrate data from diverse biochemical and biophysical + experiments (eg, x-ray crystallography, NMR spectroscopy, electron microscopy, + immuno-electron microscopy, footprinting, chemical cross-linking, FRET + spectroscopy, small angle X-ray scattering, immunoprecipitation, genetic + interactions, etc...). + . + We formulate the hybrid approach to structure determination as an optimization + problem, the solution of which requires three main components: + * the representation of the assembly, + * the scoring function and + * the optimization method. + . + The ensemble of solutions to the optimization problem embodies the most + accurate structural characterization given the available information. + . + We created IMP, the Integrative Modeling Platform, to make it easier to + implement such an integrative approach to structural and dynamics problems. + IMP is designed to allow mixing and matching of existing modeling components + as well as easy addition of new functionality. + +Package: imp-openmpi +Section: libs +Architecture: any +Depends: imp (= ${binary:Version}), ${shlibs:Depends}, ${misc:Depends}, python3-mpi4py +Description: The Integrative Modeling Platform - support for OpenMPI + IMP MPI module and dependents, for openmpi. diff --git a/tools/debian-ppa/copyright b/tools/debian-ppa/copyright new file mode 100644 index 0000000000..7e734021c2 --- /dev/null +++ b/tools/debian-ppa/copyright @@ -0,0 +1,18 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: imp +Source: https://integrativemodeling.org/ + +Copyright: 2007-2024 IMP Inventors +License: LGPL-2+ + This package is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + . + This package is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + . + You should have received a copy of the GNU Lesser General Public License + along with this program. If not, see diff --git a/tools/debian-ppa/imp-dev.install b/tools/debian-ppa/imp-dev.install new file mode 100644 index 0000000000..5dd003c73c --- /dev/null +++ b/tools/debian-ppa/imp-dev.install @@ -0,0 +1,7 @@ +usr/lib/*/libimp*.so +usr/lib/*/cmake/ +usr/include/IMP.h +usr/include/IMP +usr/share/IMP/build_info +usr/share/IMP/swig +usr/share/IMP/tools diff --git a/tools/debian-ppa/imp-openmpi.install b/tools/debian-ppa/imp-openmpi.install new file mode 100644 index 0000000000..01234e13b3 --- /dev/null +++ b/tools/debian-ppa/imp-openmpi.install @@ -0,0 +1,7 @@ +usr/lib/*/libimp_mpi.so.* +usr/lib/*/libimp_spb.so.* +usr/lib/python3*/dist-packages/IMP/mpi +usr/lib/python3*/dist-packages/_IMP_mpi.so +usr/lib/python3*/dist-packages/IMP/spb +usr/lib/python3*/dist-packages/_IMP_spb.so +usr/bin/spb* diff --git a/tools/debian-ppa/imp.install b/tools/debian-ppa/imp.install new file mode 100644 index 0000000000..59fe62a3a0 --- /dev/null +++ b/tools/debian-ppa/imp.install @@ -0,0 +1,9 @@ +usr/lib/*/libimp_kernel.so.* +usr/lib/*/libimp_@NON_MPI_CPP_MODULES@*.so.* +usr/lib/python3*/dist-packages/IMP/_*.py +usr/lib/python3*/dist-packages/IMP/@NON_MPI_MODULES@ +usr/lib/python3*/dist-packages/_IMP_kernel.so +usr/lib/python3*/dist-packages/_IMP_@NON_MPI_CPP_MODULES@.so +usr/bin/@NON_MPI_BINARIES@ +usr/share/doc/imp/* +usr/share/IMP/@DATA_MODULES@ diff --git a/tools/debian-ppa/imp.postinst b/tools/debian-ppa/imp.postinst new file mode 100755 index 0000000000..5b4c254f2b --- /dev/null +++ b/tools/debian-ppa/imp.postinst @@ -0,0 +1,2 @@ +#!/bin/sh +python3 -m compileall -q /usr/lib/python3.*/dist-packages/IMP diff --git a/tools/debian-ppa/imp.prerm b/tools/debian-ppa/imp.prerm new file mode 100755 index 0000000000..c86a0b65c8 --- /dev/null +++ b/tools/debian-ppa/imp.prerm @@ -0,0 +1,2 @@ +#!/bin/sh +find /usr/lib/python3.*/dist-packages/IMP -depth -name __pycache__ -exec rm -rf \{\} \; diff --git a/tools/debian-ppa/make-imp-install.py b/tools/debian-ppa/make-imp-install.py new file mode 100755 index 0000000000..641a1d9aaf --- /dev/null +++ b/tools/debian-ppa/make-imp-install.py @@ -0,0 +1,51 @@ +#!/usr/bin/python3 + +import sys +import os + + +def python_only(m): + depfile = os.path.join(m, 'dependencies.py') + if not os.path.exists(depfile): + return + d = {} + with open(depfile) as fh: + exec(fh.read(), d) + return d.get('python_only') + + +all_modules = sorted(d for d in os.listdir('.') if os.path.isdir(d)) +exclude_modules = frozenset(('mpi', 'spb', 'scratch', 'kernel', 'cnmultifit')) + +non_mpi_modules = [m for m in all_modules if m not in exclude_modules] +non_mpi_cpp_modules = [m for m in non_mpi_modules if not python_only(m)] +data_modules = [m for m in all_modules + if os.path.exists(os.path.join(m, 'data'))] + +non_mpi_binaries = [] +for m in non_mpi_modules: + bindir = os.path.join(m, 'bin') + if os.path.exists(bindir): + for b in os.listdir(bindir): + fname, ext = os.path.splitext(b) + if not fname.startswith('__') and ext in ('', '.cpp', '.py'): + non_mpi_binaries.append(fname) +non_mpi_binaries.remove('imp_example_app') + + +def subst_line(line, template, modules): + for m in modules: + sys.stdout.write(line.replace(template, m)) + + +for line in sys.stdin: + if "@NON_MPI_MODULES@" in line: + subst_line(line, '@NON_MPI_MODULES@', non_mpi_modules) + elif "@NON_MPI_CPP_MODULES@" in line: + subst_line(line, '@NON_MPI_CPP_MODULES@', non_mpi_cpp_modules) + elif "@DATA_MODULES@" in line: + subst_line(line, '@DATA_MODULES@', data_modules) + elif "@NON_MPI_BINARIES@" in line: + subst_line(line, '@NON_MPI_BINARIES@', non_mpi_binaries) + else: + sys.stdout.write(line) diff --git a/tools/debian-ppa/make-package.sh b/tools/debian-ppa/make-package.sh new file mode 100755 index 0000000000..a3a3df6064 --- /dev/null +++ b/tools/debian-ppa/make-package.sh @@ -0,0 +1,33 @@ +#!/bin/sh +# Build a Debian package from source + +set -e + +CODENAME=`lsb_release -c -s` + +# Make sure we can find the rest of our input files +TOOL_DIR=`dirname "$0"` +# Get absolute path to top dir +TOP_DIR=`cd "${TOOL_DIR}/../.." && pwd` + +cd ${TOP_DIR} +rm -rf debian +cp -r tools/debian-ppa debian + +# Add all module directories to imp.install +(cd modules && ${TOP_DIR}/tools/debian-ppa/make-imp-install.py < ${TOP_DIR}/tools/debian-ppa/imp.install > ${TOP_DIR}/debian/imp.install) + +rm debian/make-package.sh debian/make-imp-install.py +sed -i -e "s/\@CODENAME\@/$CODENAME/g" debian/changelog + +if [ "${CODENAME}" = "focal" ]; then + sed -i -e "s/debhelper-compat (= 13)/debhelper-compat (= 12)/" debian/control +fi + +# Workaround gcc -frounding-math bug; see +# https://bugzilla.redhat.com/show_bug.cgi?id=2085189 +if [ "${CODENAME}" = "noble" ]; then + perl -pi -e "s/CXXFLAGS :=.*/CXXFLAGS := -std=c++20/" debian/rules +fi + +dpkg-buildpackage -S diff --git a/tools/debian-ppa/patches/imp-isd-bessel.patch b/tools/debian-ppa/patches/imp-isd-bessel.patch new file mode 100644 index 0000000000..e88f14014f --- /dev/null +++ b/tools/debian-ppa/patches/imp-isd-bessel.patch @@ -0,0 +1,26 @@ +diff --git a/tools/nightly-tests/test-install/test.py b/tools/nightly-tests/test-install/test.py +index ad2ac77bc0..eeb708ce57 100644 +--- a/tools/nightly-tests/test-install/test.py ++++ b/tools/nightly-tests/test-install/test.py +@@ -2,6 +2,7 @@ import IMP + import IMP.core + import IMP.example + import IMP.atom ++import IMP.isd + import IMP.container + import unittest + import subprocess +@@ -118,6 +119,13 @@ class IMPInstallTests(unittest.TestCase): + else: + self.assertRaises(NotImplementedError, m1.get_ints_numpy, k) + ++ def test_isd_bessel(self): ++ """Test use of Bessel functions in IMP.isd""" ++ v = IMP.isd.vonMises(0., 0., 40.) ++ # A gcc bug can cause this to return nan in some cases: ++ # https://bugzilla.redhat.com/show_bug.cgi?id=2085189 ++ self.assertAlmostEqual(v.evaluate(), -0.9, delta=0.1) ++ + + if __name__ == '__main__': + unittest.main() diff --git a/tools/debian-ppa/patches/imp-no-cnmultifit.patch b/tools/debian-ppa/patches/imp-no-cnmultifit.patch new file mode 100644 index 0000000000..79033c5e7c --- /dev/null +++ b/tools/debian-ppa/patches/imp-no-cnmultifit.patch @@ -0,0 +1,34 @@ +diff --git a/tools/nightly-tests/test-install/test.py b/tools/nightly-tests/test-install/test.py +index ad2ac77bc0..79725c03a8 100644 +--- a/tools/nightly-tests/test-install/test.py ++++ b/tools/nightly-tests/test-install/test.py +@@ -70,7 +70,7 @@ class IMPInstallTests(unittest.TestCase): + + def test_applications_installed(self): + """Check install of basic applications""" +- apps = [App('cnmultifit', python=True), App('foxs'), ++ apps = [App('foxs'), + App('ligand_score'), App('multifit', python=True), + App('pdb_check'), App('pdb_rmf'), + App('rmf_cat', can_exit_1=True), +diff --git a/tools/nightly-tests/test-install/test_mock.py b/tools/nightly-tests/test-install/test_mock.py +index 82162c8f18..83f9102f34 100644 +--- a/tools/nightly-tests/test-install/test_mock.py ++++ b/tools/nightly-tests/test-install/test_mock.py +@@ -35,7 +35,6 @@ class IMPMockTests(unittest.TestCase): + _ = IMP.npctransport.Configuration + # Check that most other modules (particularly those with many + # dependencies) are present +- import IMP.cnmultifit + import IMP.domino + import IMP.em + import IMP.gsl +@@ -63,7 +62,7 @@ class IMPMockTests(unittest.TestCase): + 'interface_cross_links', 'em3d_score', + 'em3d_single_score', 'saxs_score', 'interface_rtc', + 'nmr_rtc_score', 'soap_score'] +- apps = ['cluster_profiles', 'cnmultifit', ++ apps = ['cluster_profiles', + 'complex_to_anchor_graph', 'compute_chi', + 'estimate_threshold_from_molecular_mass', 'foxs', + 'ligand_score', 'map2pca', 'mol2pca', diff --git a/tools/debian-ppa/patches/imp-test-paths.patch b/tools/debian-ppa/patches/imp-test-paths.patch new file mode 100644 index 0000000000..2a40f0e1a6 --- /dev/null +++ b/tools/debian-ppa/patches/imp-test-paths.patch @@ -0,0 +1,39 @@ +diff --git a/tools/nightly-tests/test-install/SConstruct b/tools/nightly-tests/test-install/SConstruct +index 87c0110455..dc41d4f688 100644 +--- a/tools/nightly-tests/test-install/SConstruct ++++ b/tools/nightly-tests/test-install/SConstruct +@@ -3,6 +3,10 @@ vars.Add('libpath', + 'Directory/ies where IMP or dependency libraries are installed', None) + vars.Add('cpppath', + 'Directory/ies where IMP or dependency headers are installed', None) ++vars.Add('datapath', ++ 'Directory where IMP data are installed', None) ++vars.Add('examplepath', ++ 'Directory where IMP examples are installed', None) + vars.Add('cxxflags', 'C++ compile flags', '') + vars.Add('linkflags', 'Link flags', '') + vars.Add('mock_config', 'Name of the mock config used to build IMP RPMs. ' +@@ -29,6 +33,8 @@ if cpppath is not None: + cpppath = cpppath.split(':') + pypath = env.get('pypath', None) + path = env.get('path', None) ++datapath = env.get('datapath', None) ++examplepath = env.get('examplepath', None) + mock = env.get('mock_config', None) + mpi = env.get('mpi_module', None) + cxxflags = env.get('cxxflags', '').split(' ') +@@ -43,9 +49,13 @@ env['ENV']['LIBPATH'] = libpath # AIX + # Set Python search path + env['ENV']['PYTHONPATH'] = '%s:%s' % (pypath, libpath) + +-# Set path ++# Set paths + if path: + env['ENV']['PATH'] = path + ':' + env['ENV']['PATH'] ++if datapath: ++ env['ENV']['IMP_DATA'] = datapath ++if examplepath: ++ env['ENV']['IMP_EXAMPLE_DATA'] = examplepath + + # Test compiling and running a C++ program that links against IMP + testcpp = env.Program('test.cpp', CPPPATH=cpppath, LIBPATH=libpath, diff --git a/tools/debian-ppa/patches/series b/tools/debian-ppa/patches/series new file mode 100644 index 0000000000..81a676d32a --- /dev/null +++ b/tools/debian-ppa/patches/series @@ -0,0 +1,3 @@ +imp-no-cnmultifit.patch +imp-test-paths.patch +imp-isd-bessel.patch diff --git a/tools/debian-ppa/rules b/tools/debian-ppa/rules new file mode 100755 index 0000000000..2f305fa440 --- /dev/null +++ b/tools/debian-ppa/rules @@ -0,0 +1,57 @@ +#!/usr/bin/make -f +# -*- makefile -*- + +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 + +# Don't use Debian-provided flags for now; they slow down the build +CFLAGS := +CXXFLAGS := +LDFLAGS := + +%: + dh $@ + +override_dh_auto_configure: + mkdir build + cd build && py3_ver=`python3 -c "import sys; print('%d.%d' % sys.version_info[:2])"` \ + && cmake .. -DCMAKE_BUILD_TYPE=Release \ + -DCGAL_DIR=/usr/lib/`uname -m`-linux-gnu/cmake/CGAL/ \ + -DCMAKE_INSTALL_PYTHONDIR=/usr/lib/python$${py3_ver}/dist-packages \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DCMAKE_INSTALL_DOCDIR=/usr/share/doc/imp \ + -DIMP_USE_SYSTEM_RMF=on -DIMP_USE_SYSTEM_IHM=on \ + -DIMP_DISABLED_MODULES=scratch -DUSE_PYTHON2=off + +override_dh_auto_build: + $(MAKE) -C build + +override_dh_install: + $(MAKE) -C build DESTDIR=$(CURDIR)/debian/tmp install + # Make sure all Python applications use the system Python in /usr/bin + perl -pi -e 's@^#!.*python.*$$@#!/usr/bin/python3@' debian/tmp/usr/bin/* + # Don't distribute example application or dependency + rm -rf debian/tmp/usr/bin/imp_example_app \ + debian/tmp/usr/lib/*/libexample* \ + debian/tmp/usr/include/example* + dh_install + dh_missing --fail-missing + +execute_after_dh_install: + # Run basic tests on the installation + TOPDIR=`pwd`/debian/tmp \ + && cd tools/nightly-tests/test-install \ + && py3_ver=`python3 -c "import sys; print('%d.%d' % sys.version_info[:2])"` \ + && scons path="$${TOPDIR}/usr/bin" \ + libpath="$${TOPDIR}/usr/lib/`uname -m`-linux-gnu" \ + cpppath="$${TOPDIR}/usr/include" \ + pypath="$${TOPDIR}/usr/lib/python$${py3_ver}/dist-packages" \ + datapath="$${TOPDIR}/usr/share/IMP/" \ + examplepath="$${TOPDIR}/usr/share/doc/imp/examples/" \ + python=python3 mock_config=ubuntu-`lsb_release -c -s` \ + cxxflags="${CXXFLAGS} -I/usr/include/hdf5/serial/ -I/usr/include/eigen3" \ + && find "$${TOPDIR}" -depth -name __pycache__ -exec rm -rf \{\} \; + +override_dh_compress: + # Don't compress example files, since then they won't work! + dh_compress -Xexamples diff --git a/tools/debian-ppa/source/format b/tools/debian-ppa/source/format new file mode 100644 index 0000000000..163aaf8d82 --- /dev/null +++ b/tools/debian-ppa/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/tools/debian/control b/tools/debian/control index da30e5782c..6929753ddd 100644 --- a/tools/debian/control +++ b/tools/debian/control @@ -1,7 +1,7 @@ Source: imp Priority: optional Maintainer: Ben Webb -Build-Depends: debhelper (>= 8.0.0), cmake, swig, libboost-filesystem-dev, libboost-graph-dev, libboost-iostreams-dev, libboost-program-options-dev, libboost-random-dev, libboost-regex-dev, libboost-thread-dev, libcgal-dev, libhdf5-dev, libfftw3-dev, libopencv-dev, libgsl0-dev, python2-dev, coreutils, unzip, wget, python3-dev, symlinks, libann-dev, libeigen3-dev, libcereal-dev, libprotobuf-dev, libopenmpi-dev +Build-Depends: debhelper (>= 8.0.0), cmake, swig, libboost-filesystem-dev, libboost-graph-dev, libboost-iostreams-dev, libboost-program-options-dev, libboost-random-dev, libboost-regex-dev, libboost-thread-dev, libcgal-dev, libhdf5-dev, libfftw3-dev, libopencv-dev, libgsl0-dev, python2-dev, coreutils, unzip, wget, python3-dev, symlinks, libann-dev, libeigen3-dev, libcereal-dev, libprotobuf-dev, libopenmpi-dev, python3-yaml, python3-mpi4py Standards-Version: 3.9.4 Section: libs Homepage: https://integrativemodeling.org/ @@ -18,7 +18,7 @@ Description: The Integrative Modeling Platform Package: imp Section: libs Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends}, python3-numpy, python3-protobuf +Depends: ${shlibs:Depends}, ${misc:Depends}, python3-numpy, python3-protobuf, python3-yaml Replaces: imp-python3 (<< 2.13.0) Breaks: imp-python3 (<< 2.13.0) Description: The Integrative Modeling Platform @@ -50,7 +50,7 @@ Description: The Integrative Modeling Platform Package: imp-openmpi Section: libs Architecture: any -Depends: imp (= ${binary:Version}), ${shlibs:Depends}, ${misc:Depends} +Depends: imp (= ${binary:Version}), ${shlibs:Depends}, ${misc:Depends}, python3-mpi4py Description: The Integrative Modeling Platform IMP MPI module and dependents, for openmpi. diff --git a/tools/debian/make-package.sh b/tools/debian/make-package.sh index 55302c45a9..be0ab4b8cc 100755 --- a/tools/debian/make-package.sh +++ b/tools/debian/make-package.sh @@ -26,6 +26,14 @@ cp -r tools/debian/ . || exit 1 rm debian/make-package.sh debian/make-imp-install.py || exit 1 perl -pi -e "s/\@VERSION\@/$VERSION/; s/\@DATE\@/$DATE/; s/\@CODENAME\@/$CODENAME/;" debian/changelog || exit 1 + +# Newer distributions don't have Python 2 +if [ "${CODENAME}" = "noble" ]; then + patch -p2 < debian/no-python2.patch || exit 1 + rm debian/no-python2.patch debian/imp-python2.* || exit 1 + perl -pi -e "s/python2-dev, //" debian/control || exit 1 +fi + # Newer distributions don't have Python 2 modules, and renamed libgsl0 if [ "${CODENAME}" = "jammy" ]; then perl -pi -e "s/, python-numpy, python-protobuf//" debian/control || exit 1 @@ -41,6 +49,18 @@ fi if [ "${CODENAME}" = "xenial" -o "${CODENAME}" = "trusty" ]; then perl -pi -e "s/, python3-protobuf//" debian/control || exit 1 fi + +# Workaround gcc -frounding-math bug; see +# https://bugzilla.redhat.com/show_bug.cgi?id=2085189 +if [ "${CODENAME}" = "noble" ]; then + perl -pi -e "s/CXXFLAGS :=.*/CXXFLAGS := -std=c++20/" debian/rules || exit 1 +fi + +# On low memory machines, don't run multiple simultaneous gcc processes +if [ `grep MemTotal /proc/meminfo |cut -b10-25` -lt 7340032 ]; then + perl -pi -e 's/-j4/-j1/g' debian/rules || exit 1 +fi + cd .. || exit 1 if [ "${imp_dir_name}" != "imp" ]; then mv "${imp_dir_name}" imp diff --git a/tools/debian/no-python2.patch b/tools/debian/no-python2.patch new file mode 100644 index 0000000000..b3fbe2d525 --- /dev/null +++ b/tools/debian/no-python2.patch @@ -0,0 +1,60 @@ +diff --git a/tools/debian/control b/tools/debian/control +index da30e5782c..5f47eb6548 100644 +--- a/tools/debian/control ++++ b/tools/debian/control +@@ -53,10 +53,3 @@ Architecture: any + Depends: imp (= ${binary:Version}), ${shlibs:Depends}, ${misc:Depends} + Description: The Integrative Modeling Platform + IMP MPI module and dependents, for openmpi. +- +-Package: imp-python2 +-Section: libs +-Architecture: any +-Depends: imp (= ${binary:Version}), python-numpy, python-protobuf +-Description: The Integrative Modeling Platform +- Wrappers for Python 2 (the base IMP package contains Python 3 wrappers). +diff --git a/tools/debian/rules b/tools/debian/rules +index 72e0d7009c..8a7a440813 100755 +--- a/tools/debian/rules ++++ b/tools/debian/rules +@@ -39,39 +39,12 @@ override_dh_auto_build: + + override_dh_install: + $(MAKE) -C build DESTDIR=$(CURDIR)/debian/tmp install +- # Install Python 2 extension modules +- cd build \ +- && py2_ver=`python2 -c "import sys; print('%d.%d' % sys.version_info[:2])"` \ +- && unamem=`uname -m` \ +- && py2_lib=`echo /usr/lib/$${unamem}-*/libpython2*.so` \ +- && py2_inc=/usr/include/python$${py2_ver} \ +- && cmake .. \ +- -DCMAKE_INSTALL_PYTHONDIR=/usr/lib/python2.7/dist-packages \ +- -DSWIG_PYTHON_LIBRARIES=$${py2_lib} \ +- -DPYTHON_INCLUDE_DIRS=$${py2_inc} \ +- -DPYTHON_INCLUDE_PATH=$${py2_inc} -DPYTHON_LIBRARIES=$${py2_lib} \ +- -DUSE_PYTHON2=on \ +- && $(MAKE) -j4 DESTDIR=$(CURDIR)/debian/tmp install + # Bundle libTAU so users don't have to get it separately + cp build/libTAU-1.0.4/lib/debian/libTAU.so.1 debian/tmp/usr/lib/*linux*/ + (cd debian/tmp/usr/lib/*linux*/ && ln -sf libTAU.so.1 libTAU.so) +- # Don't package MPI for Python2 or -dev +- rm -rf debian/tmp/usr/lib/python2*/dist-packages/IMP/mpi +- rm -rf debian/tmp/usr/lib/python2*/dist-packages/IMP/spb +- rm -f debian/tmp/usr/lib/python2*/dist-packages/_IMP_mpi* +- rm -f debian/tmp/usr/lib/python2*/dist-packages/_IMP_spb* ++ # Don't package MPI for -dev + rm -f debian/tmp/usr/lib/*/libimp_mpi*.so + rm -f debian/tmp/usr/lib/*/libimp_spb*.so +- # Replace Python 2 .py files with symlinks to Python 3 files +- # (since they are the same) but not the SWIG-generated __init__.py +- # files (since these contain config information which might be +- # different; e.g. in Ubuntu 22.04 the Python 3 wrappers include numpy +- # support but the Python 2 wrappers do not) +- (cd debian/tmp/usr/lib/python2* \ +- && py3_ver=`python3 -c "import sys; print('%d.%d' % sys.version_info[:2])"` \ +- && find dist-packages -name '*.py' -a ! -name __init__.py \ +- -exec ln -sf $(CURDIR)/debian/tmp/usr/lib/python$${py3_ver}/\{\} \{\} \; \ +- && symlinks -rc .) + # Make sure all Python applications use the system Python in /usr/bin + perl -pi -e 's@^#!.*python.*$$@#!/usr/bin/python3@' debian/tmp/usr/bin/* + # Don't distribute example application or dependency diff --git a/tools/make-module.py b/tools/make-module.py index caef11322f..38cdf06859 100755 --- a/tools/make-module.py +++ b/tools/make-module.py @@ -17,9 +17,13 @@ def fix_string(input, modname): .replace("SCRATCH", modname.upper()) -def copy_dir(source, dest, modname): +def copy_dir(source, dest, modname, top=True): for x in os.listdir(source): - if x == ".svn" or x == 'CMakeLists.txt': + if x == ".svn" or x == 'ModuleBuild.cmake': + continue + # We only want the top-level cmake file (for out of tree modules); + # the rest are auto-generated + if x == "CMakeLists.txt" and not top: continue if x.endswith(".pyc"): continue @@ -29,14 +33,15 @@ def copy_dir(source, dest, modname): if os.path.isdir(xspath): xdpath = os.path.join(dest, x) os.mkdir(xdpath) - copy_dir(xspath, xdpath, modname) + copy_dir(xspath, xdpath, modname, top=False) else: xdpath = os.path.join(dest, fix_string(x, modname)) with open(xspath, 'r') as fh: input = fh.read() if xspath.endswith(".cpp") or xspath.endswith(".h") \ or xspath.endswith(".i-in") or xspath.endswith(".py") \ - or xspath.endswith(".md"): + or xspath.endswith(".md") \ + or xspath.endswith("CMakeLists.txt"): output = fix_string(input, modname) else: output = input @@ -73,16 +78,18 @@ def main(): modname = sys.argv[1] if len(sys.argv) == 3: modpath = sys.argv[2] - else: + elif os.path.exists("modules"): modpath = os.path.join("modules", modname) + else: + modpath = modname if not re.match('[a-zA-Z0-9_]+$', modname): print( "Module names can only contain letters, numbers, and underscores") return if os.path.isdir(modpath): - print("Module already exists") + print("Module already exists in directory " + modpath) return - print("Creating a new module " + modname + " in " + modpath) + print("Creating a new module " + modname + " in directory: " + modpath) os.mkdir(modpath) copy_dir(os.path.join(impdir, "modules", "scratch"), modpath, modname) make_readme(modpath) diff --git a/tools/nightly-tests/build_all.py b/tools/nightly-tests/build_all.py index a506b1c7df..ae670faa62 100755 --- a/tools/nightly-tests/build_all.py +++ b/tools/nightly-tests/build_all.py @@ -402,11 +402,11 @@ def get_comps_to_build(all_comps, exclude): return comps -def build_all(builder, opts): +def build_all(builder, args): all_comps = get_all_components() - comps = get_comps_to_build(all_comps, opts.exclude) + comps = get_comps_to_build(all_comps, args.exclude) - summary_writer = SummaryWriter(opts.summary, opts.all, comps) + summary_writer = SummaryWriter(args.summary, args.all, comps) summary_writer.write() try: @@ -425,13 +425,13 @@ def build_all(builder, opts): m.build_result = 'circdep' summary_writer.write() builder.setup_coverage() - if opts.tests == 'fast': + if args.tests == 'fast': test_all(comps, builder, 'test', summary_writer, expensive=False) - elif opts.tests == 'all': + elif args.tests == 'all': test_all(comps, builder, 'test', summary_writer, expensive=True) - if opts.examples: + if args.examples: test_all(comps, builder, 'example', summary_writer) - if opts.benchmarks: + if args.benchmarks: test_all(comps, builder, 'benchmark', summary_writer) except Exception: summary_writer.complete(1) @@ -445,9 +445,8 @@ def build_all(builder, opts): def parse_args(): - from optparse import OptionParser - usage = """%prog [options] makecmd - + from argparse import ArgumentParser + desc = """ Build (and optionally test) all components (modules, dependencies) using the given makecmd (e.g. "make", "ninja", "make -j8"). @@ -462,63 +461,61 @@ def parse_args(): Exit value is 1 if a build or benchmark failed, or 0 otherwise (test or example failures are considered to be non-fatal). """ - parser = OptionParser(usage=usage) - parser.add_option("--summary", - default=None, - help="Dump summary info as a Python pickle to the " - "named file. For each component, the time taken to " - "run makecmd is recorded, plus the build result, " - "which is either the return value of makecmd, or " - "'circdep' (the component was not built due to a " - "dependency problem), 'depfail' (not built because " - "a dependency failed to build), 'disabled', " - "or 'running' (the build hasn't finished yet). " - "(If the build hasn't started yet, the key is " - "missing.) The summary info is updated after each " - "component build.") - parser.add_option("--all", default=None, - help="Record information on the entire build in the " - "summary pickle (see --summary) with the " - "given key.") - parser.add_option("--outdir", - default=None, - help="Direct build output to the given directory; one " - "file for each component is generated in the " - "directory. If not given, output is sent to " - "standard output.") - parser.add_option("--run-tests", metavar='TESTS', type='choice', - dest="tests", choices=['none', 'fast', 'all'], - default='none', - help="none: don't run tests (default); fast: run only " - "fast tests; all: run expensive and fast tests") - parser.add_option("--run-benchmarks", action="store_true", - dest="benchmarks", default=False, - help="If set, run benchmarks") - parser.add_option("--run-examples", action="store_true", - dest="examples", default=False, - help="If set, run examples") - parser.add_option("--ctest", default="ctest", - help="Command (and optional arguments) to use to run " - "tests/benchmarks/examples, e.g. \"ctest -j8\", " - "\"ctest28\". Defaults to '%default'.") - parser.add_option("--coverage", action="store_true", - dest="coverage", default=False, - help="If set, capture Python coverage information when " - "running tests.") - parser.add_option("--exclude", - default=None, - help="Build only those modules *not* mentioned in the " - "named file (which should be the output of a " - "previous run with --summary).") - opts, args = parser.parse_args() - if len(args) != 1: - parser.error("incorrect number of arguments") - return opts, args + parser = ArgumentParser(description=desc) + parser.add_argument( + "--summary", default=None, + help="Dump summary info as a Python pickle to the named file. For " + "each component, the time taken to run makecmd is recorded, " + "plus the build result, which is either the return value of " + "makecmd, or 'circdep' (the component was not built due to a " + "dependency problem), 'depfail' (not built because a dependency " + "failed to build), 'disabled', or 'running' (the build hasn't " + "finished yet). (If the build hasn't started yet, the key is " + "missing.) The summary info is updated after each component " + "build.") + parser.add_argument("--all", default=None, + help="Record information on the entire build in the " + "summary pickle (see --summary) with the " + "given key.") + parser.add_argument("--outdir", + default=None, + help="Direct build output to the given directory; one " + "file for each component is generated in the " + "directory. If not given, output is sent to " + "standard output.") + parser.add_argument("--run-tests", metavar='TESTS', + dest="tests", choices=['none', 'fast', 'all'], + default='none', + help="none: don't run tests (default); fast: run only " + "fast tests; all: run expensive and fast tests") + parser.add_argument("--run-benchmarks", action="store_true", + dest="benchmarks", default=False, + help="If set, run benchmarks") + parser.add_argument("--run-examples", action="store_true", + dest="examples", default=False, + help="If set, run examples") + parser.add_argument("--ctest", default="ctest", + help="Command (and optional arguments) to use to run " + "tests/benchmarks/examples, e.g. \"ctest -j8\", " + "\"ctest28\". Defaults to '%(default)s'.") + parser.add_argument("--coverage", action="store_true", + dest="coverage", default=False, + help="If set, capture Python coverage information " + "when running tests.") + parser.add_argument("--exclude", + default=None, + help="Build only those modules *not* mentioned in the " + "named file (which should be the output of a " + "previous run with --summary).") + parser.add_argument("makecmd", help="Command used to 'make' IMP") + args = parser.parse_args() + return args def main(): - opts, args = parse_args() - build_all(Builder(args[0], opts.ctest, opts.outdir, opts.coverage), opts) + args = parse_args() + build_all(Builder(args.makecmd, args.ctest, args.outdir, args.coverage), + args) if __name__ == '__main__': diff --git a/tools/nightly-tests/test-install/SConstruct b/tools/nightly-tests/test-install/SConstruct index 87c0110455..dc41d4f688 100644 --- a/tools/nightly-tests/test-install/SConstruct +++ b/tools/nightly-tests/test-install/SConstruct @@ -3,6 +3,10 @@ vars.Add('libpath', 'Directory/ies where IMP or dependency libraries are installed', None) vars.Add('cpppath', 'Directory/ies where IMP or dependency headers are installed', None) +vars.Add('datapath', + 'Directory where IMP data are installed', None) +vars.Add('examplepath', + 'Directory where IMP examples are installed', None) vars.Add('cxxflags', 'C++ compile flags', '') vars.Add('linkflags', 'Link flags', '') vars.Add('mock_config', 'Name of the mock config used to build IMP RPMs. ' @@ -29,6 +33,8 @@ if cpppath is not None: cpppath = cpppath.split(':') pypath = env.get('pypath', None) path = env.get('path', None) +datapath = env.get('datapath', None) +examplepath = env.get('examplepath', None) mock = env.get('mock_config', None) mpi = env.get('mpi_module', None) cxxflags = env.get('cxxflags', '').split(' ') @@ -43,9 +49,13 @@ env['ENV']['LIBPATH'] = libpath # AIX # Set Python search path env['ENV']['PYTHONPATH'] = '%s:%s' % (pypath, libpath) -# Set path +# Set paths if path: env['ENV']['PATH'] = path + ':' + env['ENV']['PATH'] +if datapath: + env['ENV']['IMP_DATA'] = datapath +if examplepath: + env['ENV']['IMP_EXAMPLE_DATA'] = examplepath # Test compiling and running a C++ program that links against IMP testcpp = env.Program('test.cpp', CPPPATH=cpppath, LIBPATH=libpath, diff --git a/tools/nightly-tests/test-install/test.py b/tools/nightly-tests/test-install/test.py index ad2ac77bc0..eeb708ce57 100644 --- a/tools/nightly-tests/test-install/test.py +++ b/tools/nightly-tests/test-install/test.py @@ -2,6 +2,7 @@ import IMP.core import IMP.example import IMP.atom +import IMP.isd import IMP.container import unittest import subprocess @@ -118,6 +119,13 @@ def test_get_numpy(self): else: self.assertRaises(NotImplementedError, m1.get_ints_numpy, k) + def test_isd_bessel(self): + """Test use of Bessel functions in IMP.isd""" + v = IMP.isd.vonMises(0., 0., 40.) + # A gcc bug can cause this to return nan in some cases: + # https://bugzilla.redhat.com/show_bug.cgi?id=2085189 + self.assertAlmostEqual(v.evaluate(), -0.9, delta=0.1) + if __name__ == '__main__': unittest.main() diff --git a/tools/rpm/IMP-copr.spec.in b/tools/rpm/IMP-copr.spec.in index 3ba2153564..dedb2e50ae 100644 --- a/tools/rpm/IMP-copr.spec.in +++ b/tools/rpm/IMP-copr.spec.in @@ -1,5 +1,5 @@ # On modern Fedora/RHEL, use Python 3 by default (and provide an IMP-python2 -# subpackage; on RHEL 9 or later, use Python 3 only). +# subpackage for RHEL 8 or older Fedora). # On older systems, the IMP package uses Python 2 only. %if 0%{?fedora} > 12 || 0%{?rhel} >= 8 %define with_python3 1 @@ -13,7 +13,7 @@ %define default_python python2 %endif -%if 0%{?rhel} >= 9 +%if 0%{?rhel} >= 9 || 0%{?fedora} > 37 %define with_python2 0 %else %define with_python2 1 @@ -215,7 +215,8 @@ This package contains wrappers for Python 2 (the base package already includes Python 3 wrappers). %endif -%define MPI_MODULES mpi spb +%define MPI_MODULES mpi spb nestor +%define MPI_CPP_MODULES mpi spb %if 0%{?with_mpich} %package mpich @@ -225,13 +226,19 @@ Requires: %{name} = %{version}-%{release} Requires: mpich %if 0%{?fedora} || 0%{?rhel} >= 8 Requires: python3-mpich +BuildRequires: python3-mpi4py-mpich %endif BuildRequires: mpich-devel +# Need for nestor module +%if 0%{?with_python3} +BuildRequires: python3-pyyaml +Requires: python3-pyyaml +%endif %description mpich This package contains an IMP.mpi module to add MPI support, using the mpich library. It also includes all IMP modules that depend -on IMP.mpi (IMP.spb). +on IMP.mpi (IMP.spb, IMP.nestor). %package mpich-devel Group: Applications/Engineering @@ -251,13 +258,19 @@ Requires: %{name} = %{version}-%{release} Requires: openmpi %if 0%{?fedora} || 0%{?rhel} >= 8 Requires: python3-openmpi +BuildRequires: python3-mpi4py-openmpi %endif BuildRequires: openmpi-devel +# Need for nestor module +%if 0%{?with_python3} +BuildRequires: python3-pyyaml +Requires: python3-pyyaml +%endif %description openmpi This package contains an IMP.mpi module to add MPI support, using the openmpi library. It also includes all IMP modules that depend -on IMP.mpi (IMP.spb). +on IMP.mpi (IMP.spb, IMP.nestor). %package openmpi-devel Group: Applications/Engineering @@ -369,7 +382,7 @@ done (cd %{buildroot}%{_libdir}/%{default_python}*/site-packages/ && mkdir mpich) (cd %{buildroot}%{_libdir} && mkdir -p mpich/bin mpich/lib) (cd %{buildroot}%{_bindir} && mv spb* %{buildroot}%{_libdir}/mpich/bin) -for mod in %{MPI_MODULES}; do +for mod in %{MPI_CPP_MODULES}; do (cd %{buildroot}%{_libdir}/%{default_python}*/site-packages/ \ && mv _IMP_${mod}.so mpich/) (cd %{buildroot}%{_libdir} && mv libimp_${mod}.so.* mpich/lib/) @@ -451,6 +464,7 @@ export PYTHONPATH=%{buildroot}%{_libdir}/python${py2_ver}/site-packages %exclude %{_prefix}/share/doc/%{name}-%{version}/examples/mpi %exclude %{_libdir}/%{default_python}*/site-packages/IMP/mpi %exclude %{_libdir}/%{default_python}*/site-packages/IMP/spb +%exclude %{_libdir}/%{default_python}*/site-packages/IMP/nestor %endif %if 0%{?with_python2} && 0%{?with_python3} @@ -472,8 +486,11 @@ export PYTHONPATH=%{buildroot}%{_libdir}/python${py2_ver}/site-packages %{_prefix}/share/IMP/build_info/IMP_mpi.pck %{_prefix}/share/IMP/build_info/IMP.spb %{_prefix}/share/IMP/build_info/IMP_spb.pck +%{_prefix}/share/IMP/build_info/IMP.nestor +%{_prefix}/share/IMP/build_info/IMP_nestor.pck %{_libdir}/%{default_python}*/site-packages/IMP/mpi %{_libdir}/%{default_python}*/site-packages/IMP/spb +%{_libdir}/%{default_python}*/site-packages/IMP/nestor %{_libdir}/%{default_python}*/site-packages/mpich/_IMP_mpi.so %{_libdir}/%{default_python}*/site-packages/mpich/_IMP_spb.so %{_libdir}/mpich/lib/libimp_mpi.so.* @@ -501,6 +518,8 @@ export PYTHONPATH=%{buildroot}%{_libdir}/python${py2_ver}/site-packages %exclude %{_prefix}/share/IMP/build_info/IMP_mpi.pck %exclude %{_prefix}/share/IMP/build_info/IMP.spb %exclude %{_prefix}/share/IMP/build_info/IMP_spb.pck +%exclude %{_prefix}/share/IMP/build_info/IMP.nestor +%exclude %{_prefix}/share/IMP/build_info/IMP_nestor.pck %endif %{_prefix}/include/IMP %{_prefix}/include/IMP.h @@ -514,6 +533,9 @@ export PYTHONPATH=%{buildroot}%{_libdir}/python${py2_ver}/site-packages %endif %changelog +* Thu Jun 13 2024 Ben Webb 2.21.0-1 +- 2.21.0 release. + * Thu Apr 04 2024 Ben Webb 2.20.2-1 - 2.20.2 release. diff --git a/tools/rpm/IMP.spec.in b/tools/rpm/IMP.spec.in index 5e408e8ae1..54f2606003 100644 --- a/tools/rpm/IMP.spec.in +++ b/tools/rpm/IMP.spec.in @@ -1,5 +1,5 @@ # On modern Fedora/RHEL, use Python 3 by default (and provide an IMP-python2 -# subpackage; on RHEL 9 or later, use Python 3 only). +# subpackage for RHEL 8 or older Fedora). # On older systems, the IMP package uses Python 2 only. %if 0%{?fedora} > 12 || 0%{?rhel} >= 8 %define with_python3 1 @@ -11,7 +11,7 @@ %define default_python python2 %endif -%if 0%{?rhel} >= 9 +%if 0%{?rhel} >= 9 || 0%{?fedora} > 37 %define with_python2 0 %else %define with_python2 1 @@ -207,7 +207,8 @@ This package contains wrappers for Python 2 (the base package already includes Python 3 wrappers). %endif -%define MPI_MODULES mpi spb +%define MPI_MODULES mpi spb nestor +%define MPI_CPP_MODULES mpi spb %if 0%{?with_mpich} %package mpich @@ -217,13 +218,19 @@ Requires: %{name} = %{version}-%{release} Requires: mpich %if 0%{?fedora} || 0%{?rhel} >= 8 Requires: python3-mpich +BuildRequires: python3-mpi4py-mpich %endif BuildRequires: mpich-devel +# Need for nestor module +%if 0%{?with_python3} +BuildRequires: python3-pyyaml +Requires: python3-pyyaml +%endif %description mpich This package contains an IMP.mpi module to add MPI support, using the mpich library. It also includes all IMP modules that depend -on IMP.mpi (IMP.spb). +on IMP.mpi (IMP.spb, IMP.nestor). %package mpich-devel Group: Applications/Engineering @@ -243,13 +250,19 @@ Requires: %{name} = %{version}-%{release} Requires: openmpi %if 0%{?fedora} || 0%{?rhel} >= 8 Requires: python3-openmpi +BuildRequires: python3-mpi4py-openmpi %endif BuildRequires: openmpi-devel +# Need for nestor module +%if 0%{?with_python3} +BuildRequires: python3-pyyaml +Requires: python3-pyyaml +%endif %description openmpi This package contains an IMP.mpi module to add MPI support, using the openmpi library. It also includes all IMP modules that depend -on IMP.mpi (IMP.spb). +on IMP.mpi (IMP.spb, IMP.nestor). %package openmpi-devel Group: Applications/Engineering @@ -351,9 +364,12 @@ module purge cd build make DESTDIR=${RPM_BUILD_ROOT} install +%if 0%{?with_python2} +py2_ver=`python2 -c "import sys; print('%d.%d' % sys.version_info[:2])"` +%endif + %if 0%{?with_python2} && 0%{?with_python3} # Build Python 2 wrappers -py2_ver=`python2 -c "import sys; print('%d.%d' % sys.version_info[:2])"` py3_ver=`python3 -c "import sys; print('%d.%d' % sys.version_info[:2])"` py2_lib=`echo %{_libdir}/libpython2.*.so` py2_inc=`echo /usr/include/python2.*` @@ -393,7 +409,7 @@ done (cd ${RPM_BUILD_ROOT}%{_libdir}/%{default_python}*/site-packages/ && mkdir mpich) (cd ${RPM_BUILD_ROOT}%{_libdir} && mkdir -p mpich/bin mpich/lib) (cd ${RPM_BUILD_ROOT}%{_bindir} && mv spb* ${RPM_BUILD_ROOT}%{_libdir}/mpich/bin) -for mod in %{MPI_MODULES}; do +for mod in %{MPI_CPP_MODULES}; do (cd ${RPM_BUILD_ROOT}%{_libdir}/%{default_python}*/site-packages/ \ && mv _IMP_${mod}.so mpich/) (cd ${RPM_BUILD_ROOT}%{_libdir} && mv libimp_${mod}.so.* mpich/lib/) @@ -438,6 +454,12 @@ cp /usr/%{_lib}/libCGAL.so.10 ${RPM_BUILD_ROOT}%{_libdir}/IMP/ %endif %endif +# Don't include Python 3-only modules in Python 2 package +%if 0%{?with_python3} == 0 +rm -rf ${RPM_BUILD_ROOT}%{_libdir}/python${py2_ver}/site-packages/IMP/nestor +rm -rf ${RPM_BUILD_ROOT}%{_prefix}/share/doc/%{name}-%{version}/examples/nestor +%endif + # Don't distribute example application or dependency rm -rf ${RPM_BUILD_ROOT}%{_prefix}/bin/imp_example_app \ ${RPM_BUILD_ROOT}%{_libdir}/libexample* \ @@ -471,6 +493,7 @@ find ${RPM_BUILD_ROOT}%{_prefix}/share/IMP/tools -name '*.py' -exec perl -pi -e %exclude %{_prefix}/share/doc/%{name}-%{version}/examples/mpi %exclude %{_libdir}/%{default_python}*/site-packages/IMP/mpi %exclude %{_libdir}/%{default_python}*/site-packages/IMP/spb +%exclude %{_libdir}/%{default_python}*/site-packages/IMP/nestor %endif %if 0%{?with_python2} && 0%{?with_python3} @@ -494,6 +517,11 @@ find ${RPM_BUILD_ROOT}%{_prefix}/share/IMP/tools -name '*.py' -exec perl -pi -e %{_prefix}/share/IMP/build_info/IMP_mpi.pck %{_prefix}/share/IMP/build_info/IMP.spb %{_prefix}/share/IMP/build_info/IMP_spb.pck +%if 0%{?with_python3} +%{_prefix}/share/IMP/build_info/IMP.nestor +%{_prefix}/share/IMP/build_info/IMP_nestor.pck +%{_libdir}/%{default_python}*/site-packages/IMP/nestor +%endif %{_libdir}/%{default_python}*/site-packages/IMP/mpi %{_libdir}/%{default_python}*/site-packages/IMP/spb %{_libdir}/%{default_python}*/site-packages/mpich/_IMP_mpi.so @@ -523,6 +551,8 @@ find ${RPM_BUILD_ROOT}%{_prefix}/share/IMP/tools -name '*.py' -exec perl -pi -e %exclude %{_prefix}/share/IMP/build_info/IMP_mpi.pck %exclude %{_prefix}/share/IMP/build_info/IMP.spb %exclude %{_prefix}/share/IMP/build_info/IMP_spb.pck +%exclude %{_prefix}/share/IMP/build_info/IMP.nestor +%exclude %{_prefix}/share/IMP/build_info/IMP_nestor.pck %endif %{_prefix}/include/IMP %{_prefix}/include/IMP.h @@ -539,6 +569,9 @@ find ${RPM_BUILD_ROOT}%{_prefix}/share/IMP/tools -name '*.py' -exec perl -pi -e %endif %changelog +* Thu Jun 13 2024 Ben Webb 2.21.0-1 +- 2.21.0 release. + * Thu Apr 04 2024 Ben Webb 2.20.2-1 - 2.20.2 release. diff --git a/tools/w32/make-package.sh b/tools/w32/make-package.sh index 9b1886b0ac..311602f236 100755 --- a/tools/w32/make-package.sh +++ b/tools/w32/make-package.sh @@ -55,6 +55,7 @@ cp ${TOOLDIR}/pkg-README.txt ${ROOT}/README.txt || exit 1 # Move pure Python code to Windows location mkdir ${ROOT}/python || exit 1 mkdir ${ROOT}/python/ihm || exit 1 +mkdir ${ROOT}/python/ihm/util || exit 1 # Drop Python 2 rm -rf ${ROOT}/pylib/2.7/ @@ -65,8 +66,9 @@ find ${ROOT} -name __pycache__ -exec rm -rf \{\} \; 2>/dev/null # Put pure Python files in correct location mv ${ROOT}/pylib/3.9/*.py ${ROOT}/pylib/3.9/IMP ${ROOT}/python || exit 1 mv ${ROOT}/pylib/3.9/ihm/*.py ${ROOT}/python/ihm || exit 1 +mv ${ROOT}/pylib/3.9/ihm/util/*.py ${ROOT}/python/ihm/util || exit 1 -rm -rf ${ROOT}/pylib/*/*.py ${ROOT}/pylib/*/ihm/*.py ${ROOT}/pylib/*/IMP || exit 1 +rm -rf ${ROOT}/pylib/*/*.py ${ROOT}/pylib/*/ihm/*.py ${ROOT}/pylib/*/ihm/util/*.py ${ROOT}/pylib/*/IMP || exit 1 # Patch IMP/__init__.py, ihm/__init__.py, and RMF.py so they can find Python # version-specific extensions and the IMP/RMF DLLs @@ -96,6 +98,7 @@ for PYVER in ${PYVERS}; do echo "pass" > ${ROOT}/python/python${PYVER}/_ihm_pyd/__init__.py || exit 1 mv ${ROOT}/pylib/${PYVER}/*.pyd ${ROOT}/python/python${PYVER} || exit 1 mv ${ROOT}/pylib/${PYVER}/ihm/*.pyd ${ROOT}/python/python${PYVER}/_ihm_pyd || exit 1 + rmdir ${ROOT}/pylib/${PYVER}/ihm/util || exit 1 rmdir ${ROOT}/pylib/${PYVER}/ihm || exit 1 rmdir ${ROOT}/pylib/${PYVER} || exit 1 done