diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index a0bf14fffe8..6ee88b1abc1 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -58,7 +58,7 @@ jobs: - name: Set up Python version uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.8' - name: Install dependencies env: @@ -123,6 +123,10 @@ jobs: # Rename docs archive: gsutil mv gs://open3d-docs/${{ github.sha }}_ready.tar.gz \ gs://open3d-docs/${{ github.sha }}.tar.gz + # Set holds on new artifacts, release on old + gsutil retention temp release gs://open3d-releases-master/* + gsutil retention temp set gs://open3d-releases-master/python-wheels/*${GITHUB_SHA:0:7}*.whl + gsutil retention temp set gs://open3d-releases-master/devel/*${GITHUB_SHA:0:7}* else echo "All wheels / docs not available yet." fi diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 516e72df47b..e24f231f2d6 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -56,7 +56,7 @@ jobs: - name: Set up Python version uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.8 - name: Install dependencies run: | brew install ccache pkg-config @@ -127,16 +127,16 @@ jobs: fail-fast: false # https://github.community/t/how-to-conditionally-include-exclude-items-in-matrix-eg-based-on-branch/16853/6 matrix: - python_version: ['3.7', '3.8', '3.9', '3.10'] + python_version: ['3.8', '3.9', '3.10', '3.11'] is_master: - ${{ github.ref == 'refs/heads/master' }} exclude: - - is_master: false - python_version: '3.7' - is_master: false python_version: '3.8' - is_master: false python_version: '3.9' + - is_master: false + python_version: '3.10' env: BUILD_CUDA_MODULE: OFF @@ -234,16 +234,16 @@ jobs: strategy: fail-fast: false matrix: - python_version: ['3.7', '3.8', '3.9', '3.10'] + python_version: ['3.8', '3.9', '3.10', '3.11'] is_master: - ${{ github.ref == 'refs/heads/master' }} exclude: - - is_master: false - python_version: '3.7' - is_master: false python_version: '3.8' - is_master: false python_version: '3.9' + - is_master: false + python_version: '3.10' env: OPEN3D_ML_ROOT: ${{ github.workspace }}/Open3D-ML @@ -311,6 +311,10 @@ jobs: # Rename docs archive: gsutil mv gs://open3d-docs/${{ github.sha }}_ready.tar.gz \ gs://open3d-docs/${{ github.sha }}.tar.gz + # Set holds on new artifacts, release on old + gsutil retention temp release gs://open3d-releases-master/* + gsutil retention temp set gs://open3d-releases-master/python-wheels/*${GITHUB_SHA:0:7}*.whl + gsutil retention temp set gs://open3d-releases-master/devel/*${GITHUB_SHA:0:7}* else echo "All wheels / docs not available yet. Docs not ready." fi diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 740ede78b16..f6dbcac232a 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -21,7 +21,7 @@ jobs: - name: Set up Python version uses: actions/setup-python@v4 with: - python-version: '3.7' + python-version: '3.8' - name: Install dependencies run: | pip install -U clang-format~=10.0.0 yapf==0.30.0 nbformat diff --git a/.github/workflows/ubuntu-wheel.yml b/.github/workflows/ubuntu-wheel.yml index bc0dcaf6c0f..218b8052ea8 100644 --- a/.github/workflows/ubuntu-wheel.yml +++ b/.github/workflows/ubuntu-wheel.yml @@ -19,6 +19,9 @@ concurrency: env: GCE_CLI_GHA_VERSION: '416.0.0' # Fixed to avoid dependency on API changes + BUILD_CUDA_MODULE: 'ON' + BUILD_PYTORCH_OPS: 'ON' + BUILD_TENSORFLOW_OPS: 'OFF' # Turn ON when cxx11_abi is same for TF and PyTorch jobs: build-wheel: @@ -27,16 +30,16 @@ jobs: strategy: fail-fast: false matrix: - python_version: ['3.7', '3.8', '3.9', '3.10'] + python_version: ['3.8', '3.9', '3.10', '3.11'] is_master: - ${{ github.ref == 'refs/heads/master' }} exclude: - - is_master: false - python_version: '3.7' - is_master: false python_version: '3.8' - is_master: false python_version: '3.9' + - is_master: false + python_version: '3.10' env: DEVELOPER_BUILD: ${{ github.event.inputs.developer_build || 'ON' }} PYTHON_VERSION: ${{ matrix.python_version }} @@ -53,22 +56,22 @@ jobs: # `docker/docker_build.sh xxx` command to execute locally. - name: Docker build run: | - if [ "${{ env.PYTHON_VERSION }}" = "3.7" ] && [ "${{ env.DEVELOPER_BUILD }}" = "ON" ]; then - docker/docker_build.sh cuda_wheel_py37_dev - elif [ "${{ env.PYTHON_VERSION }}" = "3.8" ] && [ "${{ env.DEVELOPER_BUILD }}" = "ON" ]; then + if [ "${{ env.PYTHON_VERSION }}" = "3.8" ] && [ "${{ env.DEVELOPER_BUILD }}" = "ON" ]; then docker/docker_build.sh cuda_wheel_py38_dev elif [ "${{ env.PYTHON_VERSION }}" = "3.9" ] && [ "${{ env.DEVELOPER_BUILD }}" = "ON" ]; then docker/docker_build.sh cuda_wheel_py39_dev elif [ "${{ env.PYTHON_VERSION }}" = "3.10" ] && [ "${{ env.DEVELOPER_BUILD }}" = "ON" ]; then docker/docker_build.sh cuda_wheel_py310_dev - elif [ "${{ env.PYTHON_VERSION }}" = "3.7" ] && [ "${{ env.DEVELOPER_BUILD }}" = "OFF" ]; then - docker/docker_build.sh cuda_wheel_py37 + elif [ "${{ env.PYTHON_VERSION }}" = "3.11" ] && [ "${{ env.DEVELOPER_BUILD }}" = "ON" ]; then + docker/docker_build.sh cuda_wheel_py311_dev elif [ "${{ env.PYTHON_VERSION }}" = "3.8" ] && [ "${{ env.DEVELOPER_BUILD }}" = "OFF" ]; then docker/docker_build.sh cuda_wheel_py38 elif [ "${{ env.PYTHON_VERSION }}" = "3.9" ] && [ "${{ env.DEVELOPER_BUILD }}" = "OFF" ]; then docker/docker_build.sh cuda_wheel_py39 elif [ "${{ env.PYTHON_VERSION }}" = "3.10" ] && [ "${{ env.DEVELOPER_BUILD }}" = "OFF" ]; then docker/docker_build.sh cuda_wheel_py310 + elif [ "${{ env.PYTHON_VERSION }}" = "3.11" ] && [ "${{ env.DEVELOPER_BUILD }}" = "OFF" ]; then + docker/docker_build.sh cuda_wheel_py311 fi PIP_PKG_NAME="$(basename ${GITHUB_WORKSPACE}/open3d-[0-9]*.whl)" PIP_CPU_PKG_NAME="$(basename ${GITHUB_WORKSPACE}/open3d_cpu*.whl)" @@ -109,21 +112,21 @@ jobs: test-wheel-cpu: name: Test wheel CPU - runs-on: ubuntu-18.04 + runs-on: ubuntu-20.04 needs: [build-wheel] strategy: fail-fast: false matrix: - python_version: ['3.7', '3.8', '3.9', '3.10'] + python_version: ['3.8', '3.9', '3.10', '3.11'] is_master: - ${{ github.ref == 'refs/heads/master' }} exclude: - - is_master: false - python_version: '3.7' - is_master: false python_version: '3.8' - is_master: false python_version: '3.9' + - is_master: false + python_version: '3.10' env: OPEN3D_ML_ROOT: ${{ github.workspace }}/Open3D-ML steps: @@ -199,6 +202,10 @@ jobs: # Rename docs archive: gsutil mv gs://open3d-docs/${{ github.sha }}_ready.tar.gz \ gs://open3d-docs/${{ github.sha }}.tar.gz + # Set holds on new artifacts, release on old + gsutil retention temp release gs://open3d-releases-master/* + gsutil retention temp set gs://open3d-releases-master/python-wheels/*${GITHUB_SHA:0:7}*.whl + gsutil retention temp set gs://open3d-releases-master/devel/*${GITHUB_SHA:0:7}* else echo "All wheels / docs not available yet." fi diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index d41d3a2a1ce..e1c4d0b8b44 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -35,8 +35,6 @@ jobs: env: BUILD_SHARED_LIBS: ${{ matrix.BUILD_SHARED_LIBS }} BUILD_CUDA_MODULE: OFF - BUILD_TENSORFLOW_OPS: ${{ matrix.MLOPS }} - BUILD_PYTORCH_OPS: ${{ matrix.MLOPS }} DEVELOPER_BUILD: ${{ github.event.inputs.developer_build || 'ON' }} OPEN3D_CPU_RENDERING: true steps: @@ -79,6 +77,13 @@ jobs: name: open3d-devel-linux-x86_64 path: open3d-devel-*.tar.xz if-no-files-found: error + - name: Upload viewer to GitHub artifacts + if: ${{ env.BUILD_SHARED_LIBS == 'OFF' }} + uses: actions/upload-artifact@v3 + with: + name: open3d-viewer-Linux + path: open3d-viewer-*-Linux.deb + if-no-files-found: error - name: GCloud CLI auth if: ${{ github.ref == 'refs/heads/master' }} uses: 'google-github-actions/auth@v1' diff --git a/.github/workflows/vtk_packages.yml b/.github/workflows/vtk_packages.yml index bcde81876f0..30347cc2941 100644 --- a/.github/workflows/vtk_packages.yml +++ b/.github/workflows/vtk_packages.yml @@ -9,6 +9,7 @@ on: jobs: Linux: + # TODO: Convert to docker runs-on: ubuntu-18.04 steps: - name: Checkout source code diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index c414bb7136d..430ebd73c60 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -19,15 +19,11 @@ concurrency: cancel-in-progress: true env: - PIP_VER: "21.1.1" + PIP_VER: "23.2.1" WHEEL_VER: "0.38.4" STOOLS_VER: "67.3.2" - PYTEST_VER: "7.1.2" - PYTEST_RANDOMLY_VER: "3.8.0" - SCIPY_VER: "1.7.3" JEDI_VER: "0.17.2" # https://github.com/ipython/ipython/issues/12740 IDNA_VER: "2.8" # https://github.com/psf/requests/issues/5710 - TENSORBOARD_VER: "2.8" SRC_DIR: "D:\\a\\open3d\\open3d" BUILD_DIR: "C:\\Open3D\\build" NPROC: 2 @@ -101,7 +97,7 @@ jobs: - name: Set up Python version uses: actions/setup-python@v4 with: - python-version: 3.7 + python-version: 3.8 - name: Config # Move build directory to C: https://github.com/actions/virtual-environments/issues/1341 @@ -246,16 +242,16 @@ jobs: fail-fast: false # https://github.community/t/how-to-conditionally-include-exclude-items-in-matrix-eg-based-on-branch/16853/6 matrix: - python_version: ['3.7', '3.8', '3.9', '3.10'] + python_version: ['3.8', '3.9', '3.10', '3.11'] is_master: - ${{ github.ref == 'refs/heads/master' }} exclude: - - is_master: false - python_version: '3.7' - is_master: false python_version: '3.8' - is_master: false python_version: '3.9' + - is_master: false + python_version: '3.10' steps: - name: Checkout source code @@ -351,16 +347,16 @@ jobs: strategy: fail-fast: false matrix: - python_version: ['3.7', '3.8', '3.9', '3.10'] + python_version: ['3.8', '3.9', '3.10', '3.11'] is_master: - ${{ github.ref == 'refs/heads/master' }} exclude: - - is_master: false - python_version: '3.7' - is_master: false python_version: '3.8' - is_master: false python_version: '3.9' + - is_master: false + python_version: '3.10' steps: - name: Checkout source code @@ -388,10 +384,7 @@ jobs: python -m pip install --upgrade pip==${{ env.PIP_VER }} ` wheel==${{ env.WHEEL_VER }} ` setuptools==${{ env.STOOLS_VER }} - python -m pip install -U pytest==${{ env.PYTEST_VER }} - python -m pip install -U pytest-randomly==${{ env.PYTEST_RANDOMLY_VER }} - python -m pip install -U scipy==${{ env.SCIPY_VER }} ` - tensorboard==${{ env.TENSORBOARD_VER }} + python -m pip install -U -r python/requirements_test.txt $py_tag=(python -c "import sys; print(f'cp{sys.version_info.major}{sys.version_info.minor}')") if (Test-Path -Path "pip_package") { $PIP_PKG_NAME=(Get-ChildItem pip_package\open3d*-$py_tag-*.whl).Name @@ -420,7 +413,9 @@ jobs: # no need to run on Windows runs-on: ubuntu-latest if: ${{ github.ref == 'refs/heads/master' }} - needs: [build-wheel, windows] + # temp workaround for Windows CUDA Debug CI out of space. Still update docs. + # needs: [build-wheel, windows] + needs: [build-wheel] steps: - name: GCloud CLI auth uses: google-github-actions/auth@v1 @@ -444,6 +439,10 @@ jobs: # Rename docs archive: gsutil mv gs://open3d-docs/${{ github.sha }}_ready.tar.gz \ gs://open3d-docs/${{ github.sha }}.tar.gz + # Set holds on new artifacts, release on old + gsutil retention temp release gs://open3d-releases-master/* + gsutil retention temp set gs://open3d-releases-master/python-wheels/*${GITHUB_SHA:0:7}*.whl + gsutil retention temp set gs://open3d-releases-master/devel/*${GITHUB_SHA:0:7}* else echo "All wheels / docs not available yet." fi diff --git a/.gitignore b/.gitignore index cb1dbd7c1a6..8a041878cc6 100644 --- a/.gitignore +++ b/.gitignore @@ -83,6 +83,7 @@ docs/tutorial/**/*.ply docs/tutorial/**/*.pcd docs/tutorial/**/*.json docs/_out/ +docs/_build/ docs/python_api/ docs/python_example/ docs/conf.py diff --git a/3rdparty/assimp/assimp.cmake b/3rdparty/assimp/assimp.cmake index 445f363dcb2..996a64400f3 100644 --- a/3rdparty/assimp/assimp.cmake +++ b/3rdparty/assimp/assimp.cmake @@ -26,7 +26,6 @@ ExternalProject_Add( -DCMAKE_CXX_FLAGS:STRING=${assimp_cmake_cxx_flags} -DBUILD_SHARED_LIBS=OFF -DCMAKE_INSTALL_PREFIX= - -DASSIMP_NO_EXPORT=ON -DASSIMP_BUILD_ASSIMP_TOOLS=OFF -DASSIMP_BUILD_TESTS=OFF -DASSIMP_INSTALL_PDB=OFF diff --git a/3rdparty/cmake/FindPytorch.cmake b/3rdparty/cmake/FindPytorch.cmake index 202af7c461a..eb2a53e2ec5 100644 --- a/3rdparty/cmake/FindPytorch.cmake +++ b/3rdparty/cmake/FindPytorch.cmake @@ -97,18 +97,43 @@ foreach(lib ${TORCH_LIBRARIES}) endforeach(lib) # Check if the c++11 ABI is compatible on Linux -if(UNIX AND NOT APPLE AND ((Pytorch_CXX11_ABI AND (NOT GLIBCXX_USE_CXX11_ABI)) OR - (NOT Pytorch_CXX11_ABI AND GLIBCXX_USE_CXX11_ABI))) - if(Pytorch_CXX11_ABI) - set(NEEDED_ABI_FLAG "ON") +if(UNIX AND NOT APPLE) + if((Pytorch_CXX11_ABI AND (NOT GLIBCXX_USE_CXX11_ABI)) OR + (NOT Pytorch_CXX11_ABI AND GLIBCXX_USE_CXX11_ABI)) + if(Pytorch_CXX11_ABI) + set(NEEDED_ABI_FLAG "ON") + else() + set(NEEDED_ABI_FLAG "OFF") + endif() + message(FATAL_ERROR "PyTorch and Open3D ABI mismatch: ${Pytorch_CXX11_ABI} != ${GLIBCXX_USE_CXX11_ABI}.\n" + "Please use -DGLIBCXX_USE_CXX11_ABI=${NEEDED_ABI_FLAG} " + "in the cmake config command to change the Open3D ABI.") else() - set(NEEDED_ABI_FLAG "OFF") + message(STATUS "PyTorch matches Open3D ABI: ${Pytorch_CXX11_ABI} == ${GLIBCXX_USE_CXX11_ABI}") endif() - message(FATAL_ERROR "PyTorch and Open3D ABI mismatch: ${Pytorch_CXX11_ABI} != ${GLIBCXX_USE_CXX11_ABI}.\n" - "Please use -DGLIBCXX_USE_CXX11_ABI=${NEEDED_ABI_FLAG} " - "in the cmake config command to change the Open3D ABI.") -else() - message(STATUS "PyTorch matches Open3D ABI: ${Pytorch_CXX11_ABI} == ${GLIBCXX_USE_CXX11_ABI}") +endif() + +message(STATUS "Pytorch_VERSION: ${Pytorch_VERSION}, CUDAToolkit_VERSION: ${CUDAToolkit_VERSION}") +if (BUILD_PYTORCH_OPS AND BUILD_CUDA_MODULE AND CUDAToolkit_VERSION + VERSION_GREATER_EQUAL "11.0" AND Pytorch_VERSION VERSION_LESS + "1.9") + message(WARNING + "--------------------------------------------------------------------------------\n" + " \n" + " You are compiling PyTorch ops with CUDA 11 with PyTorch version < 1.9. This \n" + " configuration may have stability issues. See \n" + " https://github.com/isl-org/Open3D/issues/3324 and \n" + " https://github.com/pytorch/pytorch/issues/52663 for more information on this \n" + " problem. \n" + " \n" + " We recommend to compile PyTorch from source with compile flags \n" + " '-Xcompiler -fno-gnu-unique' \n" + " \n" + " or use the PyTorch wheels at \n" + " https://github.com/isl-org/open3d_downloads/releases/tag/torch1.8.2 \n" + " \n" + "--------------------------------------------------------------------------------\n" + ) endif() include(FindPackageHandleStandardArgs) diff --git a/3rdparty/cmake/FindTBB.cmake b/3rdparty/cmake/FindTBB.cmake new file mode 100644 index 00000000000..da885c5fcd4 --- /dev/null +++ b/3rdparty/cmake/FindTBB.cmake @@ -0,0 +1,21 @@ +# Try to use pre-installed config +find_package(TBB CONFIG) +if(TARGET TBB::tbb) + set(TBB_FOUND TRUE) +else() + message(STATUS "Target TBB::tbb not defined, falling back to manual detection") + find_path(TBB_INCLUDE_DIR tbb/tbb.h) + find_library(TBB_LIBRARY tbb) + if(TBB_INCLUDE_DIR AND TBB_LIBRARY) + message(STATUS "TBB found: ${TBB_LIBRARY}") + add_library(TBB::tbb UNKNOWN IMPORTED) + set_target_properties(TBB::tbb PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${TBB_INCLUDE_DIR}" + IMPORTED_LOCATION "${TBB_LIBRARY}" + ) + set(TBB_FOUND TRUE) + else() + set(TBB_FOUND FALSE) + endif() +endif() + diff --git a/3rdparty/cmake/FindTensorflow.cmake b/3rdparty/cmake/FindTensorflow.cmake index 815fa497cde..6dc3d3676a2 100644 --- a/3rdparty/cmake/FindTensorflow.cmake +++ b/3rdparty/cmake/FindTensorflow.cmake @@ -43,7 +43,7 @@ if(NOT Tensorflow_FOUND) # Get Tensorflow_FRAMEWORK_LIB find_library( Tensorflow_FRAMEWORK_LIB - NAMES tensorflow_framework libtensorflow_framework.so.2 + NAMES tensorflow_framework tensorflow_framework.2 libtensorflow_framework.so.2 PATHS "${Tensorflow_LIB_DIR}" NO_DEFAULT_PATH ) @@ -59,21 +59,23 @@ if (UNIX AND NOT APPLE) endif() # Check if the c++11 ABI is compatible -if(UNIX AND NOT APPLE AND ((Tensorflow_CXX11_ABI AND (NOT GLIBCXX_USE_CXX11_ABI)) OR - (NOT Tensorflow_CXX11_ABI AND GLIBCXX_USE_CXX11_ABI))) - if(TensorFlow_CXX11_ABI) - set(NEEDED_ABI_FLAG "ON") +if (UNIX AND NOT APPLE) + if(((Tensorflow_CXX11_ABI AND (NOT GLIBCXX_USE_CXX11_ABI)) OR + (NOT Tensorflow_CXX11_ABI AND GLIBCXX_USE_CXX11_ABI))) + if(TensorFlow_CXX11_ABI) + set(NEEDED_ABI_FLAG "ON") + else() + set(NEEDED_ABI_FLAG "OFF") + endif() + message(FATAL_ERROR "TensorFlow and Open3D ABI mismatch: ${Tensorflow_CXX11_ABI} != ${GLIBCXX_USE_CXX11_ABI}.\n" + "Please use -D GLIBCXX_USE_CXX11_ABI=${NEEDED_ABI_FLAG} " + "in the cmake config command to change the Open3D ABI.") else() - set(NEEDED_ABI_FLAG "OFF") + message(STATUS "TensorFlow matches Open3D ABI: ${Tensorflow_CXX11_ABI} == ${GLIBCXX_USE_CXX11_ABI}") endif() - message(FATAL_ERROR "TensorFlow and Open3D ABI mismatch: ${Tensorflow_CXX11_ABI} != ${GLIBCXX_USE_CXX11_ABI}.\n" - "Please use -D GLIBCXX_USE_CXX11_ABI=${NEEDED_ABI_FLAG} " - "in the cmake config command to change the Open3D ABI.") -else() - message(STATUS "TensorFlow matches Open3D ABI: ${Tensorflow_CXX11_ABI} == ${GLIBCXX_USE_CXX11_ABI}") endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args( Tensorflow DEFAULT_MSG Tensorflow_INCLUDE_DIR Tensorflow_LIB_DIR - Tensorflow_FRAMEWORK_LIB Tensorflow_DEFINITIONS) + Tensorflow_DEFINITIONS) diff --git a/3rdparty/find_dependencies.cmake b/3rdparty/find_dependencies.cmake index 58b450d6472..169bfbe1a54 100644 --- a/3rdparty/find_dependencies.cmake +++ b/3rdparty/find_dependencies.cmake @@ -269,7 +269,7 @@ endfunction() # If also defines targets, use them instead and pass them via TARGETS option. # function(open3d_find_package_3rdparty_library name) - cmake_parse_arguments(arg "PUBLIC;HEADER;REQUIRED;QUIET" "PACKAGE;PACKAGE_VERSION_VAR" "TARGETS;INCLUDE_DIRS;LIBRARIES" ${ARGN}) + cmake_parse_arguments(arg "PUBLIC;HEADER;REQUIRED;QUIET" "PACKAGE;VERSION;PACKAGE_VERSION_VAR" "TARGETS;INCLUDE_DIRS;LIBRARIES" ${ARGN}) if(arg_UNPARSED_ARGUMENTS) message(STATUS "Unparsed: ${arg_UNPARSED_ARGUMENTS}") message(FATAL_ERROR "Invalid syntax: open3d_find_package_3rdparty_library(${name} ${ARGN})") @@ -281,6 +281,9 @@ function(open3d_find_package_3rdparty_library name) set(arg_PACKAGE_VERSION_VAR "${arg_PACKAGE}_VERSION") endif() set(find_package_args "") + if(arg_VERSION) + list(APPEND find_package_args "${arg_VERSION}") + endif() if(arg_REQUIRED) list(APPEND find_package_args "REQUIRED") endif() @@ -539,11 +542,26 @@ endif() # cutlass if(BUILD_CUDA_MODULE) - include(${Open3D_3RDPARTY_DIR}/cutlass/cutlass.cmake) - open3d_import_3rdparty_library(3rdparty_cutlass - INCLUDE_DIRS ${CUTLASS_INCLUDE_DIRS} - DEPENDS ext_cutlass - ) + if(USE_SYSTEM_CUTLASS) + find_path(3rdparty_cutlass_INCLUDE_DIR NAMES cutlass/cutlass.h) + if(3rdparty_cutlass_INCLUDE_DIR) + add_library(3rdparty_cutlass INTERFACE) + target_include_directories(3rdparty_cutlass INTERFACE ${3rdparty_cutlass_INCLUDE_DIR}) + add_library(Open3D::3rdparty_cutlass ALIAS 3rdparty_cutlass) + if(NOT BUILD_SHARED_LIBS) + install(TARGETS 3rdparty_cutlass EXPORT ${PROJECT_NAME}Targets) + endif() + else() + set(USE_SYSTEM_CUTLASS OFF) + endif() + endif() + if(NOT USE_SYSTEM_CUTLASS) + include(${Open3D_3RDPARTY_DIR}/cutlass/cutlass.cmake) + open3d_import_3rdparty_library(3rdparty_cutlass + INCLUDE_DIRS ${CUTLASS_INCLUDE_DIRS} + DEPENDS ext_cutlass + ) + endif() list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_CUSTOM Open3D::3rdparty_cutlass) endif() @@ -581,6 +599,7 @@ endif() if(USE_SYSTEM_NANOFLANN) open3d_find_package_3rdparty_library(3rdparty_nanoflann PACKAGE nanoflann + VERSION 1.5.0 TARGETS nanoflann::nanoflann ) if(NOT 3rdparty_nanoflann_FOUND) @@ -1109,6 +1128,14 @@ open3d_import_3rdparty_library(3rdparty_poisson ) list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_CUSTOM Open3D::3rdparty_poisson) +# Minizip +if(WITH_MINIZIP) + open3d_pkg_config_3rdparty_library(3rdparty_minizip + SEARCH_ARGS minizip + ) + list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_SYSTEM Open3D::3rdparty_minizip) +endif() + # Googletest if (BUILD_UNIT_TESTS) if(USE_SYSTEM_GOOGLETEST) @@ -1429,9 +1456,15 @@ endif() # msgpack if(USE_SYSTEM_MSGPACK) open3d_find_package_3rdparty_library(3rdparty_msgpack - PACKAGE msgpack - TARGETS msgpackc + PACKAGE msgpack-cxx + TARGETS msgpack-cxx ) + if(NOT 3rdparty_msgpack_FOUND) + open3d_find_package_3rdparty_library(3rdparty_msgpack + PACKAGE msgpack + TARGETS msgpackc + ) + endif() if(NOT 3rdparty_msgpack_FOUND) open3d_pkg_config_3rdparty_library(3rdparty_msgpack SEARCH_ARGS msgpack @@ -1668,7 +1701,12 @@ else() # if(OPEN3D_USE_ONEAPI_PACKAGES) target_link_libraries(3rdparty_blas INTERFACE ${quadmath_lib}) # Suppress Apple compiler warnigns. - target_link_options(3rdparty_blas INTERFACE "-Wl,-no_compact_unwind") + if(NOT ${CMAKE_BUILD_TYPE} STREQUAL "Debug") + message(WARNING "All link warnings have been disabled on Apple Silicon builds " + "due to the large number of spurious warnings that are generated. If you " + "need to see link warnings please build with -DCMAKE_BUILD_TYPE=Debug.") + target_link_options(3rdparty_blas INTERFACE "-Wl,-w") + endif() endif() elseif(UNIX AND NOT APPLE) # On Ubuntu 20.04 x86-64, libgfortran.a is not compiled with `-fPIC`. @@ -1821,25 +1859,47 @@ endif () # Stdgpu if (BUILD_CUDA_MODULE) - include(${Open3D_3RDPARTY_DIR}/stdgpu/stdgpu.cmake) - open3d_import_3rdparty_library(3rdparty_stdgpu - INCLUDE_DIRS ${STDGPU_INCLUDE_DIRS} - LIB_DIR ${STDGPU_LIB_DIR} - LIBRARIES ${STDGPU_LIBRARIES} - DEPENDS ext_stdgpu - ) + if(USE_SYSTEM_STDGPU) + open3d_find_package_3rdparty_library(3rdparty_stdgpu + PACKAGE stdgpu + TARGETS stdgpu::stdgpu + ) + if(NOT 3rdparty_stdgpu_FOUND) + set(USE_SYSTEM_STDGPU OFF) + endif() + endif() + if(NOT USE_SYSTEM_STDGPU) + include(${Open3D_3RDPARTY_DIR}/stdgpu/stdgpu.cmake) + open3d_import_3rdparty_library(3rdparty_stdgpu + INCLUDE_DIRS ${STDGPU_INCLUDE_DIRS} + LIB_DIR ${STDGPU_LIB_DIR} + LIBRARIES ${STDGPU_LIBRARIES} + DEPENDS ext_stdgpu + ) + endif() list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_CUSTOM Open3D::3rdparty_stdgpu) endif () # embree -include(${Open3D_3RDPARTY_DIR}/embree/embree.cmake) -open3d_import_3rdparty_library(3rdparty_embree - HIDDEN - INCLUDE_DIRS ${EMBREE_INCLUDE_DIRS} - LIB_DIR ${EMBREE_LIB_DIR} - LIBRARIES ${EMBREE_LIBRARIES} - DEPENDS ext_embree -) +if(USE_SYSTEM_EMBREE) + open3d_find_package_3rdparty_library(3rdparty_embree + PACKAGE embree + TARGETS embree + ) + if(NOT 3rdparty_embree_FOUND) + set(USE_SYSTEM_EMBREE OFF) + endif() +endif() +if(NOT USE_SYSTEM_EMBREE) + include(${Open3D_3RDPARTY_DIR}/embree/embree.cmake) + open3d_import_3rdparty_library(3rdparty_embree + HIDDEN + INCLUDE_DIRS ${EMBREE_INCLUDE_DIRS} + LIB_DIR ${EMBREE_LIB_DIR} + LIBRARIES ${EMBREE_LIBRARIES} + DEPENDS ext_embree + ) +endif() list(APPEND Open3D_3RDPARTY_PRIVATE_TARGETS_FROM_CUSTOM Open3D::3rdparty_embree) # WebRTC diff --git a/3rdparty/fmt/fmt.cmake b/3rdparty/fmt/fmt.cmake index f53e2f49214..88cd8e2fcef 100644 --- a/3rdparty/fmt/fmt.cmake +++ b/3rdparty/fmt/fmt.cmake @@ -2,9 +2,8 @@ include(ExternalProject) set(FMT_LIB_NAME fmt) -if (MSVC AND MSVC_VERSION VERSION_LESS 1930 OR - CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") - # MSVC 17.x required for building fmt >6 +if (MSVC OR CMAKE_CXX_COMPILER_ID MATCHES "IntelLLVM") + # MSVC has errors when building fmt >6, up till 9.1 # SYCL / DPC++ needs fmt ver <=6 or >= 9.2: https://github.com/fmtlib/fmt/issues/3005 set(FMT_VER "6.0.0") set(FMT_SHA256 diff --git a/3rdparty/nanoflann/nanoflann.cmake b/3rdparty/nanoflann/nanoflann.cmake index 91437f9b77c..5afa4f22d03 100644 --- a/3rdparty/nanoflann/nanoflann.cmake +++ b/3rdparty/nanoflann/nanoflann.cmake @@ -3,8 +3,8 @@ include(ExternalProject) ExternalProject_Add( ext_nanoflann PREFIX nanoflann - URL https://github.com/jlblancoc/nanoflann/archive/refs/tags/v1.3.2.tar.gz - URL_HASH SHA256=e100b5fc8d72e9426a80312d852a62c05ddefd23f17cbb22ccd8b458b11d0bea + URL https://github.com/jlblancoc/nanoflann/archive/refs/tags/v1.5.0.tar.gz + URL_HASH SHA256=89aecfef1a956ccba7e40f24561846d064f309bc547cc184af7f4426e42f8e65 DOWNLOAD_DIR "${OPEN3D_THIRD_PARTY_DOWNLOAD_DIR}/nanoflann" UPDATE_COMMAND "" CONFIGURE_COMMAND "" diff --git a/3rdparty/possionrecon/possionrecon.cmake b/3rdparty/possionrecon/possionrecon.cmake index 0328804d0ce..ef5f87f940c 100644 --- a/3rdparty/possionrecon/possionrecon.cmake +++ b/3rdparty/possionrecon/possionrecon.cmake @@ -3,8 +3,8 @@ include(ExternalProject) ExternalProject_Add( ext_poisson PREFIX poisson - URL https://github.com/isl-org/Open3D-PoissonRecon/archive/fd273ea8c77a36973d6565a495c9969ccfb12d3b.tar.gz - URL_HASH SHA256=917d98e037982d57a159fa166b259ff3dc90ffffe09c6a562a71b400f6869ddf + URL https://github.com/isl-org/Open3D-PoissonRecon/archive/90f3f064e275b275cff445881ecee5a7c495c9e0.tar.gz + URL_HASH SHA256=1310df0c80ff0616b8fcf9b2fb568aa9b2190d0e071b0ead47dba339c146b1d3 DOWNLOAD_DIR "${OPEN3D_THIRD_PARTY_DOWNLOAD_DIR}/poisson" SOURCE_DIR "poisson/src/ext_poisson/PoissonRecon" # Add extra directory level for POISSON_INCLUDE_DIRS. UPDATE_COMMAND "" diff --git a/3rdparty/pybind11/pybind11.cmake b/3rdparty/pybind11/pybind11.cmake index 5ed710aa58b..9a9c1b6df77 100644 --- a/3rdparty/pybind11/pybind11.cmake +++ b/3rdparty/pybind11/pybind11.cmake @@ -3,8 +3,8 @@ include(FetchContent) FetchContent_Declare( ext_pybind11 PREFIX pybind11 - URL https://github.com/pybind/pybind11/archive/refs/tags/v2.6.2.tar.gz - URL_HASH SHA256=8ff2fff22df038f5cd02cea8af56622bc67f5b64534f1b83b9f133b8366acff2 + URL https://github.com/pybind/pybind11/archive/refs/tags/v2.11.1.tar.gz + URL_HASH SHA256=d475978da0cdc2d43b73f30910786759d593a9d8ee05b1b6846d1eb16c6d2e0c DOWNLOAD_DIR "${OPEN3D_THIRD_PARTY_DOWNLOAD_DIR}/pybind11" ) diff --git a/3rdparty/vtk/CMakeLists.txt b/3rdparty/vtk/CMakeLists.txt index 7188cb76325..dd74c820c86 100644 --- a/3rdparty/vtk/CMakeLists.txt +++ b/3rdparty/vtk/CMakeLists.txt @@ -30,7 +30,7 @@ if(WIN32) endif() if (APPLE) -set (CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING +set (CMAKE_OSX_DEPLOYMENT_TARGET "12.6" CACHE STRING "Minimum OS X deployment version" FORCE) endif() diff --git a/CHANGELOG.md b/CHANGELOG.md index ba4d922de2d..0ad0b678ef9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,12 @@ ## Master +* Fix tensor based TSDF integration example. +* Use GLIBCXX_USE_CXX11_ABI=ON by default * Python 3.9 support. Tensorflow bump 2.4.1 -> 2.5.0. PyTorch bump 1.7.1 -> 1.8.1 (LTS) * Fix undefined names: docstr and VisibleDeprecationWarning (PR #3844) * Corrected documentation for Tensor based PointClound, LineSet, TriangleMesh (PR #4685) * Corrected documentation for KDTree (typo in Notebook) (PR #4744) +* Corrected documentation for visualisation tutorial * Remove `setuptools` and `wheel` from requirements for end users (PR #5020) * Fix various typos (PR #5070) * Exposed more functionality in SLAM and odometry pipelines @@ -12,6 +15,15 @@ * Fix raycasting scene: Allow setting of number of threads that are used for building a raycasting scene * Fix Python bindings for CUDA device synchronization, voxel grid saving (PR #5425) * Support msgpack versions without cmake +* Changed TriangleMesh to store materials in a list so they can be accessed by the material index (PR #5938) +* Support multi-threading in the RayCastingScene function to commit scene (PR #6051). +* Fix some bad triangle generation in TriangleMesh::SimplifyQuadricDecimation +* Fix printing of tensor in gpu and add validation check for bounds of axis-aligned bounding box (PR #6444) +* Python 3.11 support. bump pybind11 v2.6.2 -> v2.11.1 +* Check for support of CUDA Memory Pools at runtime (#4679) +* Fix `toString`, `CreateFromPoints` methods and improve docs in `AxisAlignedBoundingBox`. 🐛📝 +* Migrate Open3d documentation to furo theme ✨ (#6470) +* Expose Near Clip + Far Clip parameters to setup_camera in OffscreenRenderer (#6520) * Add Doppler ICP in tensor registration pipeline (PR #5237) ## 0.13 diff --git a/CMakeLists.txt b/CMakeLists.txt index 2659d7d2bab..c3d64e4b95d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -65,11 +65,7 @@ else() option(STATIC_WINDOWS_RUNTIME "Use static (MT/MTd) Windows runtime" ON ) endif() option(BUILD_SYCL_MODULE "Build SYCL module with Intel oneAPI" OFF) -if(BUILD_SYCL_MODULE) - option(GLIBCXX_USE_CXX11_ABI "Set -D_GLIBCXX_USE_CXX11_ABI=1" ON ) -else() - option(GLIBCXX_USE_CXX11_ABI "Set -D_GLIBCXX_USE_CXX11_ABI=1" OFF) -endif() +option(GLIBCXX_USE_CXX11_ABI "Set -D_GLIBCXX_USE_CXX11_ABI=1" ON ) option(ENABLE_SYCL_UNIFIED_SHARED_MEMORY "Enable SYCL unified shared memory" OFF) if(BUILD_GUI AND (WIN32 OR UNIX AND NOT LINUX_AARCH64 AND NOT APPLE_AARCH64)) option(BUILD_WEBRTC "Build WebRTC visualizer" ON ) @@ -91,7 +87,9 @@ else() endif() option(USE_SYSTEM_ASSIMP "Use system pre-installed assimp" OFF) option(USE_SYSTEM_CURL "Use system pre-installed curl" OFF) +option(USE_SYSTEM_CUTLASS "Use system pre-installed cutlass" OFF) option(USE_SYSTEM_EIGEN3 "Use system pre-installed eigen3" OFF) +option(USE_SYSTEM_EMBREE "Use system pre-installed Embree" OFF) option(USE_SYSTEM_FILAMENT "Use system pre-installed filament" OFF) option(USE_SYSTEM_FMT "Use system pre-installed fmt" OFF) option(USE_SYSTEM_GLEW "Use system pre-installed glew" OFF) @@ -107,6 +105,7 @@ option(USE_SYSTEM_OPENSSL "Use system pre-installed OpenSSL" OFF option(USE_SYSTEM_PNG "Use system pre-installed png" OFF) option(USE_SYSTEM_PYBIND11 "Use system pre-installed pybind11" OFF) option(USE_SYSTEM_QHULLCPP "Use system pre-installed qhullcpp" OFF) +option(USE_SYSTEM_STDGPU "Use system pre-installed stdgpu" OFF) option(USE_SYSTEM_TBB "Use system pre-installed TBB" OFF) option(USE_SYSTEM_TINYGLTF "Use system pre-installed tinygltf" OFF) option(USE_SYSTEM_TINYOBJLOADER "Use system pre-installed tinyobjloader" OFF) @@ -121,6 +120,7 @@ else() endif() option(PREFER_OSX_HOMEBREW "Prefer Homebrew libs over frameworks" ON ) +option(WITH_MINIZIP "Enable MiniZIP" OFF) # Sensor options option(BUILD_LIBREALSENSE "Build support for Intel RealSense camera" OFF) @@ -290,11 +290,14 @@ endif() # Global flag to set CXX standard. # This does not affect 3rd party libraries. -if (BUILD_SYCL_MODULE) +# Tensorflow 2.9+ requires cxx_17, but MSVC 19.29 throws errors with C++17 +# enabled. +if (BUILD_SYCL_MODULE OR BUILD_TENSORFLOW_OPS) set(CMAKE_CXX_STANDARD 17) else() set(CMAKE_CXX_STANDARD 14) endif() +set(CMAKE_CXX_EXTENSIONS OFF) # Improved compatibility # FIXME: Remove this workaround once a fixed Visual Studio 16.10 version is released. if (BUILD_CUDA_MODULE @@ -354,11 +357,15 @@ if(UNIX OR CYGWIN) set(Open3D_INSTALL_INCLUDE_DIR "${CMAKE_INSTALL_INCLUDEDIR}") set(Open3D_INSTALL_BIN_DIR "${CMAKE_INSTALL_BINDIR}") set(Open3D_INSTALL_LIB_DIR "${CMAKE_INSTALL_LIBDIR}") + # Put resources in */share/ + set(Open3D_INSTALL_RESOURCE_DIR "${CMAKE_INSTALL_DATADIR}") set(Open3D_INSTALL_CMAKE_DIR "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") else() set(Open3D_INSTALL_INCLUDE_DIR include) set(Open3D_INSTALL_BIN_DIR bin) set(Open3D_INSTALL_LIB_DIR lib) + # Put resources in */bin, with executables / DLLs + set(Open3D_INSTALL_RESOURCE_DIR bin) set(Open3D_INSTALL_CMAKE_DIR CMake) endif() diff --git a/README.md b/README.md index ced7235af9f..09f94e0101b 100644 --- a/README.md +++ b/README.md @@ -24,28 +24,33 @@ data structures and algorithms in both C++ and Python. The backend is highly optimized and is set up for parallelization. We welcome contributions from the open-source community. -[![Ubuntu CI](https://github.com/isl-org/Open3D/workflows/Ubuntu%20CI/badge.svg)](https://github.com/isl-org/Open3D/actions?query=workflow%3A%22Ubuntu+CI%22) -[![macOS CI](https://github.com/isl-org/Open3D/workflows/macOS%20CI/badge.svg)](https://github.com/isl-org/Open3D/actions?query=workflow%3A%22macOS+CI%22) -[![Windows CI](https://github.com/isl-org/Open3D/workflows/Windows%20CI/badge.svg)](https://github.com/isl-org/Open3D/actions?query=workflow%3A%22Windows+CI%22) +[![Ubuntu CI](https://github.com/isl-org/Open3D/actions/workflows/ubuntu.yml/badge.svg)](https://github.com/isl-org/Open3D/actions?query=workflow%3A%22Ubuntu+CI%22) +[![macOS CI](https://github.com/isl-org/Open3D/actions/workflows/macos.yml/badge.svg)](https://github.com/isl-org/Open3D/actions?query=workflow%3A%22macOS+CI%22) +[![Windows CI](https://github.com/isl-org/Open3D/actions/workflows/windows.yml/badge.svg)](https://github.com/isl-org/Open3D/actions?query=workflow%3A%22Windows+CI%22) **Core features of Open3D include:** -* 3D data structures -* 3D data processing algorithms -* Scene reconstruction -* Surface alignment -* 3D visualization -* Physically based rendering (PBR) -* 3D machine learning support with PyTorch and TensorFlow -* GPU acceleration for core 3D operations -* Available in C++ and Python +- 3D data structures +- 3D data processing algorithms +- Scene reconstruction +- Surface alignment +- 3D visualization +- Physically based rendering (PBR) +- 3D machine learning support with PyTorch and TensorFlow +- GPU acceleration for core 3D operations +- Available in C++ and Python + +Here's a brief overview of the different components of Open3D and how they fit +together to enable full end to end pipelines: + +![Open3D_layers](https://github.com/isl-org/Open3D/assets/41028320/e9b8645a-a823-4d78-8310-e85207bbc3e4) For more, please visit the [Open3D documentation](http://www.open3d.org/docs). ## Python quick start Pre-built pip packages support Ubuntu 18.04+, macOS 10.15+ and Windows 10+ -(64-bit) with Python 3.6-3.10. +(64-bit) with Python 3.8-3.11. ```bash # Install @@ -74,21 +79,21 @@ To compile Open3D from source, refer to Checkout the following links to get started with Open3D C++ API -* Download Open3D binary package: [Release](https://github.com/isl-org/Open3D/releases) or [latest development version](http://www.open3d.org/docs/latest/getting_started.html#c) -* [Compiling Open3D from source](http://www.open3d.org/docs/release/compilation.html) -* [Open3D C++ API](http://www.open3d.org/docs/release/cpp_api.html) +- Download Open3D binary package: [Release](https://github.com/isl-org/Open3D/releases) or [latest development version](http://www.open3d.org/docs/latest/getting_started.html#c) +- [Compiling Open3D from source](http://www.open3d.org/docs/release/compilation.html) +- [Open3D C++ API](http://www.open3d.org/docs/release/cpp_api.html) To use Open3D in your C++ project, checkout the following examples -* [Find Pre-Installed Open3D Package in CMake](https://github.com/isl-org/open3d-cmake-find-package) -* [Use Open3D as a CMake External Project](https://github.com/isl-org/open3d-cmake-external-project) +- [Find Pre-Installed Open3D Package in CMake](https://github.com/isl-org/open3d-cmake-find-package) +- [Use Open3D as a CMake External Project](https://github.com/isl-org/open3d-cmake-external-project) ## Open3D-Viewer app -Open3D-Viewer is a standalone 3D viewer app available on Ubuntu and macOS. -Please stay tuned for Windows. Download Open3D Viewer from the +Open3D-Viewer is a standalone 3D viewer app available on Debian (Ubuntu), macOS +and Windows. Download Open3D Viewer from the [release page](https://github.com/isl-org/Open3D/releases). ## Open3D-ML @@ -102,11 +107,11 @@ top of the Open3D core library and extends it with machine learning tools for ## Communication channels -* [GitHub Issue](https://github.com/isl-org/Open3D/issues): bug reports, - feature requests, etc. -* [Forum](https://github.com/isl-org/Open3D/discussions): discussion on the usage of Open3D. -* [Discord Chat](https://discord.gg/D35BGvn): online chats, discussions, - and collaboration with other users and developers. +- [GitHub Issue](https://github.com/isl-org/Open3D/issues): bug reports, + feature requests, etc. +- [Forum](https://github.com/isl-org/Open3D/discussions): discussion on the usage of Open3D. +- [Discord Chat](https://discord.gg/D35BGvn): online chats, discussions, + and collaboration with other users and developers. ## Citation diff --git a/cmake/Open3DPackaging.cmake b/cmake/Open3DPackaging.cmake index 1178cdd5376..e6ce4a3c15c 100644 --- a/cmake/Open3DPackaging.cmake +++ b/cmake/Open3DPackaging.cmake @@ -1,3 +1,6 @@ +# This is packaging for the Open3D library. See +# cpp/apps/Open3DViewer/Debian/CMakeLists.txt for packaging the Debian Open3D +# viewer set(CPACK_GENERATOR TXZ) if(WIN32) set(CPACK_GENERATOR ZIP) diff --git a/cmake/Open3DPrintConfigurationSummary.cmake b/cmake/Open3DPrintConfigurationSummary.cmake index 9427d08c0c7..c8e8a891a8b 100644 --- a/cmake/Open3DPrintConfigurationSummary.cmake +++ b/cmake/Open3DPrintConfigurationSummary.cmake @@ -40,27 +40,6 @@ function(open3d_print_configuration_summary) open3d_aligned_print("Build Jupyter Extension" "${BUILD_JUPYTER_EXTENSION}") open3d_aligned_print("Build TensorFlow Ops" "${BUILD_TENSORFLOW_OPS}") open3d_aligned_print("Build PyTorch Ops" "${BUILD_PYTORCH_OPS}") - if (BUILD_PYTORCH_OPS AND BUILD_CUDA_MODULE AND CUDAToolkit_VERSION - VERSION_GREATER_EQUAL "11.0" AND Pytorch_VERSION VERSION_LESS - "1.9") - message(WARNING - "--------------------------------------------------------------------------------\n" - " \n" - " You are compiling PyTorch ops with CUDA 11 with PyTorch version < 1.9. This \n" - " configuration may have stability issues. See \n" - " https://github.com/isl-org/Open3D/issues/3324 and \n" - " https://github.com/pytorch/pytorch/issues/52663 for more information on this \n" - " problem. \n" - " \n" - " We recommend to compile PyTorch from source with compile flags \n" - " '-Xcompiler -fno-gnu-unique' \n" - " \n" - " or use the PyTorch wheels at \n" - " https://github.com/isl-org/open3d_downloads/releases/tag/torch1.8.2 \n" - " \n" - "--------------------------------------------------------------------------------\n" - ) - endif() open3d_aligned_print("Build Benchmarks" "${BUILD_BENCHMARKS}") open3d_aligned_print("Bundle Open3D-ML" "${BUNDLE_OPEN3D_ML}") if(GLIBCXX_USE_CXX11_ABI) diff --git a/cpp/apps/CMakeLists.txt b/cpp/apps/CMakeLists.txt index 6380f272b2d..ac927bd147a 100644 --- a/cpp/apps/CMakeLists.txt +++ b/cpp/apps/CMakeLists.txt @@ -72,8 +72,17 @@ macro(open3d_add_app_gui SRC_DIR APP_NAME TARGET_NAME) RENAME "${APP_NAME}.xml") # Various caches need to be updated for the app to become visible install(CODE "execute_process(COMMAND ${SOURCE_DIR}/postinstall-linux.sh)") + configure_file("${SOURCE_DIR}/Debian/CMakeLists.in.txt" + "${CMAKE_BINARY_DIR}/package-${TARGET_NAME}-deb/CMakeLists.txt" @ONLY) + add_custom_target(package-${TARGET_NAME}-deb + COMMAND cp -a "${CMAKE_BINARY_DIR}/${APP_NAME}" . + COMMAND cp "${SOURCE_DIR}/icon.svg" "${APP_NAME}/${APP_NAME}.svg" + COMMAND cp "${SOURCE_DIR}/${TARGET_NAME}.xml" "${APP_NAME}/" + COMMAND "${CMAKE_COMMAND}" -S . + COMMAND "${CMAKE_COMMAND}" --build . -t package + WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/package-${TARGET_NAME}-deb/" + DEPENDS ${TARGET_NAME}) elseif (WIN32) - # Don't create a command window on launch target_sources(${TARGET_NAME} PRIVATE "${SOURCE_DIR}/icon.rc") # add icon # MSVC puts the binary in bin/Open3D/Release/Open3D.exe diff --git a/cpp/apps/OfflineReconstruction/LegacyReconstructionUtil.h b/cpp/apps/OfflineReconstruction/LegacyReconstructionUtil.h index 9b8f495b680..26d30bfe182 100644 --- a/cpp/apps/OfflineReconstruction/LegacyReconstructionUtil.h +++ b/cpp/apps/OfflineReconstruction/LegacyReconstructionUtil.h @@ -1025,7 +1025,7 @@ class ReconstructionPipeline { Eigen::Matrix6d info; const size_t num_scale = voxel_size.size(); for (size_t i = 0; i < num_scale; i++) { - const double max_dis = config_["voxel_szie"].asDouble() * 1.4; + const double max_dis = config_["voxel_size"].asDouble() * 1.4; const auto src_down = src.VoxelDownSample(voxel_size[i]); const auto dst_down = dst.VoxelDownSample(voxel_size[i]); const pipelines::registration::ICPConvergenceCriteria criteria( diff --git a/cpp/apps/Open3DViewer/Debian/CMakeLists.in.txt b/cpp/apps/Open3DViewer/Debian/CMakeLists.in.txt new file mode 100644 index 00000000000..c318c212612 --- /dev/null +++ b/cpp/apps/Open3DViewer/Debian/CMakeLists.in.txt @@ -0,0 +1,36 @@ +# Create Debian package +cmake_minimum_required(VERSION 3.8.0) +project("Open3D-Debian") + +message(STATUS "Building package for Debian") + +# Install assets +install(DIRECTORY "Open3D" + DESTINATION share + USE_SOURCE_PERMISSIONS + PATTERN "Open3D/Open3D.svg" EXCLUDE + PATTERN "Open3D/Open3D.desktop" EXCLUDE + PATTERN "Open3D/Open3DViewer.xml" EXCLUDE + PATTERN "Open3D/Open3D" EXCLUDE + PATTERN "Open3D/CMakeLists.txt" EXCLUDE +) +install(FILES "Open3D/Open3D.desktop" DESTINATION /usr/share/applications) +install(FILES "Open3D/Open3DViewer.xml" DESTINATION /usr/share/mime/packages) +install(FILES "Open3D/Open3D.svg" DESTINATION /usr/share/icons/hicolor/scalable/apps) +install(PROGRAMS "Open3D/Open3D" DESTINATION bin) + +# CPACK parameter +set(CPACK_GENERATOR "DEB") +set(CPACK_PACKAGE_NAME "open3d-viewer") +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Open3D Viewer for 3D files") +set(CPACK_PACKAGE_CONTACT "Open3D team <@PROJECT_EMAIL@>") +set(CPACK_DEBIAN_PACKAGE_SECTION "Graphics") +set(CPACK_PACKAGE_VERSION "@OPEN3D_VERSION@") +set(CPACK_DEBIAN_PACKAGE_DEPENDS "libc++1, libgomp1, libpng16-16, libglfw3") +set(CPACK_PACKAGE_HOMEPAGE_URL "@PROJECT_HOMEPAGE_URL@") + +# How to set cpack prefix: https://stackoverflow.com/a/7363073/1255535 +set(CPACK_SET_DESTDIR true) +set(CPACK_INSTALL_PREFIX /usr/local) + +include(CPack) diff --git a/cpp/apps/Open3DViewer/Open3DViewer.desktop.in b/cpp/apps/Open3DViewer/Open3DViewer.desktop.in index 196060fe9f3..3c2e73c1e43 100644 --- a/cpp/apps/Open3DViewer/Open3DViewer.desktop.in +++ b/cpp/apps/Open3DViewer/Open3DViewer.desktop.in @@ -3,7 +3,7 @@ Type=Application Name=Open3D Icon=Open3D Comment=Viewer for triangle meshes and point clouds -Exec=${CMAKE_INSTALL_PREFIX}/bin/Open3D/Open3D %f +Exec=${CMAKE_INSTALL_PREFIX}/bin/Open3D %f Terminal=false Categories=Graphics MimeType=model/stl;model/obj;model/fbx;model/gltf-binary;model/gltf+json;model/x.stl-ascii;model/x.stl-binary;model/x-ply;application/x-off;application/x-xyz;application/x-xyzn;application/x-xyzrgb;application/x-pcd;application/x-pts diff --git a/cpp/apps/Open3DViewer/postinstall-linux.sh b/cpp/apps/Open3DViewer/postinstall-linux.sh index 701af849adf..1310d306809 100755 --- a/cpp/apps/Open3DViewer/postinstall-linux.sh +++ b/cpp/apps/Open3DViewer/postinstall-linux.sh @@ -1,4 +1,4 @@ -#/bin/bash +#!/bin/sh if [ $(id -u) = 0 ]; then update-mime-database /usr/share/mime # add new MIME types diff --git a/cpp/benchmarks/t/geometry/PointCloud.cpp b/cpp/benchmarks/t/geometry/PointCloud.cpp index 39cc5f3368a..814587320a6 100644 --- a/cpp/benchmarks/t/geometry/PointCloud.cpp +++ b/cpp/benchmarks/t/geometry/PointCloud.cpp @@ -69,7 +69,7 @@ void LegacyVoxelDownSample(benchmark::State& state, float voxel_size) { void VoxelDownSample(benchmark::State& state, const core::Device& device, float voxel_size, - const core::HashBackendType& backend) { + const std::string& reduction) { t::geometry::PointCloud pcd; // t::io::CreatePointCloudFromFile lacks support of remove_inf_points and // remove_nan_points @@ -77,10 +77,10 @@ void VoxelDownSample(benchmark::State& state, pcd = pcd.To(device); // Warm up. - pcd.VoxelDownSample(voxel_size, backend); + pcd.VoxelDownSample(voxel_size, reduction); for (auto _ : state) { - pcd.VoxelDownSample(voxel_size, backend); + pcd.VoxelDownSample(voxel_size, reduction); core::cuda::Synchronize(device); } } @@ -387,28 +387,34 @@ BENCHMARK_CAPTURE(ToLegacyPointCloud, CUDA, core::Device("CUDA:0")) ->Unit(benchmark::kMillisecond); #endif -#define ENUM_VOXELSIZE(DEVICE, BACKEND) \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_01, DEVICE, 0.01, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_02, DEVICE, 0.08, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_04, DEVICE, 0.04, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_08, DEVICE, 0.08, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_16, DEVICE, 0.16, BACKEND) \ - ->Unit(benchmark::kMillisecond); \ - BENCHMARK_CAPTURE(VoxelDownSample, BACKEND##_0_32, DEVICE, 0.32, BACKEND) \ +#define ENUM_VOXELSIZE(DEVICE, REDUCTION) \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_01, DEVICE, 0.01, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_02, DEVICE, 0.08, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_04, DEVICE, 0.04, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_08, DEVICE, 0.08, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_16, DEVICE, 0.16, \ + REDUCTION) \ + ->Unit(benchmark::kMillisecond); \ + BENCHMARK_CAPTURE(VoxelDownSample, REDUCTION##_0_32, DEVICE, 0.32, \ + REDUCTION) \ ->Unit(benchmark::kMillisecond); +const std::string kReductionMean = "mean"; #ifdef BUILD_CUDA_MODULE -#define ENUM_VOXELDOWNSAMPLE_BACKEND() \ - ENUM_VOXELSIZE(core::Device("CPU:0"), core::HashBackendType::TBB) \ - ENUM_VOXELSIZE(core::Device("CUDA:0"), core::HashBackendType::Slab) \ - ENUM_VOXELSIZE(core::Device("CUDA:0"), core::HashBackendType::StdGPU) +#define ENUM_VOXELDOWNSAMPLE_REDUCTION() \ + ENUM_VOXELSIZE(core::Device("CPU:0"), kReductionMean) \ + ENUM_VOXELSIZE(core::Device("CUDA:0"), kReductionMean) #else -#define ENUM_VOXELDOWNSAMPLE_BACKEND() \ - ENUM_VOXELSIZE(core::Device("CPU:0"), core::HashBackendType::TBB) +#define ENUM_VOXELDOWNSAMPLE_REDUCTION() \ + ENUM_VOXELSIZE(core::Device("CPU:0"), kReductionMean) #endif BENCHMARK_CAPTURE(LegacyVoxelDownSample, Legacy_0_01, 0.01) @@ -423,7 +429,7 @@ BENCHMARK_CAPTURE(LegacyVoxelDownSample, Legacy_0_16, 0.16) ->Unit(benchmark::kMillisecond); BENCHMARK_CAPTURE(LegacyVoxelDownSample, Legacy_0_32, 0.32) ->Unit(benchmark::kMillisecond); -ENUM_VOXELDOWNSAMPLE_BACKEND() +ENUM_VOXELDOWNSAMPLE_REDUCTION() BENCHMARK_CAPTURE(LegacyUniformDownSample, Legacy_2, 2) ->Unit(benchmark::kMillisecond); diff --git a/cpp/open3d/CMakeLists.txt b/cpp/open3d/CMakeLists.txt index 5b938fe5378..6da3581e509 100644 --- a/cpp/open3d/CMakeLists.txt +++ b/cpp/open3d/CMakeLists.txt @@ -173,6 +173,12 @@ install(FILES "${PROJECT_BINARY_DIR}/Open3DConfigVersion.cmake" DESTINATION "${Open3D_INSTALL_CMAKE_DIR}" COMPONENT dev) +# Install GUI resources +if (BUILD_GUI) + install(DIRECTORY ${GUI_RESOURCE_DIR} + DESTINATION "${Open3D_INSTALL_RESOURCE_DIR}") +endif() + if (BUILD_SHARED_LIBS AND UNIX) file(CONFIGURE OUTPUT Open3D.pc.in CONTENT [=[ @@ -184,7 +190,7 @@ Name: Open3D Description: @PROJECT_DESCRIPTION@ URL: @PROJECT_HOMEPAGE_URL@ Version: @PROJECT_VERSION@ -Cflags: -std=c++@CMAKE_CXX_STANDARD@ -isystem ${includedir} -isystem ${includedir}/open3d/3rdparty -D$, -D> +Cflags: -std=c++@CMAKE_CXX_STANDARD@ -isystem${includedir} -isystem${includedir}/open3d/3rdparty -D$, -D> Libs: -L${libdir} -Wl,-rpath,${libdir} -lOpen3D]=] @ONLY NEWLINE_STYLE LF) file(GENERATE OUTPUT Open3D.pc INPUT "${CMAKE_CURRENT_BINARY_DIR}/Open3D.pc.in" TARGET "Open3D::Open3D") diff --git a/cpp/open3d/core/AdvancedIndexing.cpp b/cpp/open3d/core/AdvancedIndexing.cpp index f2bbf306c3f..86ce53148ba 100644 --- a/cpp/open3d/core/AdvancedIndexing.cpp +++ b/cpp/open3d/core/AdvancedIndexing.cpp @@ -206,7 +206,7 @@ void AdvancedIndexPreprocessor::RunPreprocess() { // If the indexed_shape_ contains a dimension of size 0 but the // replacement shape does not, the index is out of bounds. This is because // there is no valid number to index an empty tensor. - // Normally, out of bounds is detected in the advanded indexing kernel. We + // Normally, out of bounds is detected in the advanced indexing kernel. We // detected here for more helpful error message. auto contains_zero = [](const SizeVector& vals) -> bool { return std::any_of(vals.begin(), vals.end(), diff --git a/cpp/open3d/core/AdvancedIndexing.h b/cpp/open3d/core/AdvancedIndexing.h index b01d922a61d..17a8253085a 100644 --- a/cpp/open3d/core/AdvancedIndexing.h +++ b/cpp/open3d/core/AdvancedIndexing.h @@ -49,7 +49,7 @@ class AdvancedIndexPreprocessor { const Tensor& tensor, const std::vector& index_tensors); /// Expand all tensors to the broadcasted shape, 0-dim tensors are ignored. - /// Thorws exception if the common broadcasted shape does not exist. + /// Throws exception if the common broadcasted shape does not exist. static std::pair, SizeVector> ExpandToCommonShapeExceptZeroDim(const std::vector& index_tensors); @@ -127,7 +127,7 @@ class AdvancedIndexer { if (indexed_shape.size() != indexed_strides.size()) { utility::LogError( "Internal error: indexed_shape's ndim {} does not equal to " - "indexd_strides' ndim {}", + "indexed_strides' ndim {}", indexed_shape.size(), indexed_strides.size()); } num_indices_ = indexed_shape.size(); diff --git a/cpp/open3d/core/CMakeLists.txt b/cpp/open3d/core/CMakeLists.txt index 43f0802f4d0..d42e645da39 100644 --- a/cpp/open3d/core/CMakeLists.txt +++ b/cpp/open3d/core/CMakeLists.txt @@ -47,6 +47,8 @@ target_sources(core PRIVATE kernel/BinaryEWCPU.cpp kernel/IndexGetSet.cpp kernel/IndexGetSetCPU.cpp + kernel/IndexReduction.cpp + kernel/IndexReductionCPU.cpp kernel/Kernel.cpp kernel/NonZero.cpp kernel/NonZeroCPU.cpp @@ -90,6 +92,7 @@ if (BUILD_CUDA_MODULE) kernel/ArangeCUDA.cu kernel/BinaryEWCUDA.cu kernel/IndexGetSetCUDA.cu + kernel/IndexReductionCUDA.cu kernel/NonZeroCUDA.cu kernel/ReductionCUDA.cu kernel/UnaryEWCUDA.cu diff --git a/cpp/open3d/core/CUDAUtils.cpp b/cpp/open3d/core/CUDAUtils.cpp index 630cbf38938..1da331035e7 100644 --- a/cpp/open3d/core/CUDAUtils.cpp +++ b/cpp/open3d/core/CUDAUtils.cpp @@ -108,6 +108,27 @@ void AssertCUDADeviceAvailable(const Device& device) { } } +bool SupportsMemoryPools(const Device& device) { +#if defined(BUILD_CUDA_MODULE) && (CUDART_VERSION >= 11020) + if (device.IsCUDA()) { + int driverVersion = 0; + int deviceSupportsMemoryPools = 0; + OPEN3D_CUDA_CHECK(cudaDriverGetVersion(&driverVersion)); + if (driverVersion >= + 11020) { // avoid invalid value error in cudaDeviceGetAttribute + OPEN3D_CUDA_CHECK(cudaDeviceGetAttribute( + &deviceSupportsMemoryPools, cudaDevAttrMemoryPoolsSupported, + device.GetID())); + } + return !!deviceSupportsMemoryPools; + } else { + return false; + } +#else + return false; +#endif +} + #ifdef BUILD_CUDA_MODULE int GetDevice() { int device; diff --git a/cpp/open3d/core/CUDAUtils.h b/cpp/open3d/core/CUDAUtils.h index 2996cd0987c..15f87be040d 100644 --- a/cpp/open3d/core/CUDAUtils.h +++ b/cpp/open3d/core/CUDAUtils.h @@ -255,6 +255,13 @@ void AssertCUDADeviceAvailable(int device_id); /// \param device The device to be checked. void AssertCUDADeviceAvailable(const Device& device); +/// Checks if the CUDA device support Memory Pools +/// used by the Stream Ordered Memory Allocator, +/// see +/// https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__MEMORY__POOLS.html +/// \param device The device to be checked. +bool SupportsMemoryPools(const Device& device); + #ifdef BUILD_CUDA_MODULE int GetDevice(); diff --git a/cpp/open3d/core/Dispatch.h b/cpp/open3d/core/Dispatch.h index 466ccf86beb..fe658c8cf15 100644 --- a/cpp/open3d/core/Dispatch.h +++ b/cpp/open3d/core/Dispatch.h @@ -113,3 +113,34 @@ open3d::utility::LogError("Unsupported data type."); \ } \ }() + +#define DISPATCH_INT_DTYPE_PREFIX_TO_TEMPLATE(DTYPE, PREFIX, ...) \ + [&] { \ + if (DTYPE == open3d::core::Int8) { \ + using scalar_##PREFIX##_t = int8_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == open3d::core::Int16) { \ + using scalar_##PREFIX##_t = int16_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == open3d::core::Int32) { \ + using scalar_##PREFIX##_t = int32_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == open3d::core::Int64) { \ + using scalar_##PREFIX##_t = int64_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == open3d::core::UInt8) { \ + using scalar_##PREFIX##_t = uint8_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == open3d::core::UInt16) { \ + using scalar_##PREFIX##_t = uint16_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == open3d::core::UInt32) { \ + using scalar_##PREFIX##_t = uint32_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == open3d::core::UInt64) { \ + using scalar_##PREFIX##_t = uint64_t; \ + return __VA_ARGS__(); \ + } else { \ + open3d::utility::LogError("Unsupported data type."); \ + } \ + }() diff --git a/cpp/open3d/core/MemoryManagerCUDA.cpp b/cpp/open3d/core/MemoryManagerCUDA.cpp index 3cc2f4730bc..835ea550cfe 100644 --- a/cpp/open3d/core/MemoryManagerCUDA.cpp +++ b/cpp/open3d/core/MemoryManagerCUDA.cpp @@ -20,8 +20,12 @@ void* MemoryManagerCUDA::Malloc(size_t byte_size, const Device& device) { void* ptr; if (device.IsCUDA()) { #if CUDART_VERSION >= 11020 - OPEN3D_CUDA_CHECK(cudaMallocAsync(static_cast(&ptr), byte_size, - cuda::GetStream())); + if (cuda::SupportsMemoryPools(device)) { + OPEN3D_CUDA_CHECK(cudaMallocAsync(static_cast(&ptr), + byte_size, cuda::GetStream())); + } else { + OPEN3D_CUDA_CHECK(cudaMalloc(static_cast(&ptr), byte_size)); + } #else OPEN3D_CUDA_CHECK(cudaMalloc(static_cast(&ptr), byte_size)); #endif @@ -38,7 +42,11 @@ void MemoryManagerCUDA::Free(void* ptr, const Device& device) { if (device.IsCUDA()) { if (ptr && IsCUDAPointer(ptr, device)) { #if CUDART_VERSION >= 11020 - OPEN3D_CUDA_CHECK(cudaFreeAsync(ptr, cuda::GetStream())); + if (cuda::SupportsMemoryPools(device)) { + OPEN3D_CUDA_CHECK(cudaFreeAsync(ptr, cuda::GetStream())); + } else { + OPEN3D_CUDA_CHECK(cudaFree(ptr)); + } #else OPEN3D_CUDA_CHECK(cudaFree(ptr)); #endif diff --git a/cpp/open3d/core/Tensor.cpp b/cpp/open3d/core/Tensor.cpp index 69e04acfb94..d405c501993 100644 --- a/cpp/open3d/core/Tensor.cpp +++ b/cpp/open3d/core/Tensor.cpp @@ -22,6 +22,7 @@ #include "open3d/core/TensorFunction.h" #include "open3d/core/TensorKey.h" #include "open3d/core/kernel/Arange.h" +#include "open3d/core/kernel/IndexReduction.h" #include "open3d/core/kernel/Kernel.h" #include "open3d/core/linalg/Det.h" #include "open3d/core/linalg/Inverse.h" @@ -749,7 +750,7 @@ std::string Tensor::ToString(bool with_suffix, std::ostringstream rc; if (IsCUDA() || !IsContiguous()) { Tensor host_contiguous_tensor = Contiguous().To(Device("CPU:0")); - rc << host_contiguous_tensor.ToString(false, ""); + rc << host_contiguous_tensor.ToString(with_suffix, indent); } else { if (shape_.NumElements() == 0) { rc << indent; @@ -955,6 +956,43 @@ void Tensor::IndexSet(const std::vector& index_tensors, aip.GetIndexedShape(), aip.GetIndexedStrides()); } +void Tensor::IndexAdd_(int64_t dim, const Tensor& index, const Tensor& src) { + if (index.NumDims() != 1) { + utility::LogError("IndexAdd_ only supports 1D index tensors."); + } + + // Dim check. + if (dim < 0) { + utility::LogError("IndexAdd_ only supports sum at non-negative dim."); + } + if (NumDims() <= dim) { + utility::LogError("Sum dim {} exceeds tensor dim {}.", dim, NumDims()); + } + + // shape check + if (src.NumDims() != NumDims()) { + utility::LogError( + "IndexAdd_ only supports src tensor with same dimension as " + "this tensor."); + } + for (int64_t d = 0; d < NumDims(); ++d) { + if (d != dim && src.GetShape(d) != GetShape(d)) { + utility::LogError( + "IndexAdd_ only supports src tensor with same shape as " + "this " + "tensor except dim {}.", + dim); + } + } + + // Type check. + AssertTensorDtype(index, core::Int64); + AssertTensorDtype(*this, src.GetDtype()); + + // Apply kernel. + kernel::IndexAdd_(dim, index, src, *this); +} + Tensor Tensor::Permute(const SizeVector& dims) const { // Check dimension size if (static_cast(dims.size()) != NumDims()) { diff --git a/cpp/open3d/core/Tensor.h b/cpp/open3d/core/Tensor.h index 8b4d0280077..38422055a30 100644 --- a/cpp/open3d/core/Tensor.h +++ b/cpp/open3d/core/Tensor.h @@ -575,6 +575,16 @@ class Tensor : public IsDevice { void IndexSet(const std::vector& index_tensors, const Tensor& src_tensor); + /// \brief Advanced in-place reduction by index. + /// + /// See + /// https://pytorch.org/docs/stable/generated/torch.Tensor.index_add_.html + /// + /// self[index[i]] = operator(self[index[i]], src[i]). + /// + /// Note: Only support 1D index and src tensors now. + void IndexAdd_(int64_t dim, const Tensor& index, const Tensor& src); + /// \brief Permute (dimension shuffle) the Tensor, returns a view. /// /// \param dims The desired ordering of dimensions. diff --git a/cpp/open3d/core/kernel/IndexReduction.cpp b/cpp/open3d/core/kernel/IndexReduction.cpp new file mode 100644 index 00000000000..128da56e370 --- /dev/null +++ b/cpp/open3d/core/kernel/IndexReduction.cpp @@ -0,0 +1,49 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- + +#include "open3d/core/kernel/IndexReduction.h" + +#include "open3d/utility/Logging.h" + +namespace open3d { +namespace core { +namespace kernel { + +void IndexAdd_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst) { + // Permute the reduction dimension to the first. + SizeVector permute = {}; + for (int64_t d = 0; d <= dim; ++d) { + if (d == 0) { + permute.push_back(dim); + } else { + permute.push_back(d - 1); + } + } + for (int64_t d = dim + 1; d < src.NumDims(); ++d) { + permute.push_back(d); + } + + auto src_permute = src.Permute(permute); + auto dst_permute = dst.Permute(permute); + + if (dst.IsCPU()) { + IndexAddCPU_(dim, index, src_permute, dst_permute); + } else if (dst.IsCUDA()) { +#ifdef BUILD_CUDA_MODULE + IndexAddCUDA_(dim, index, src_permute, dst_permute); +#endif + } else { + utility::LogError("IndexAdd_: Unimplemented device"); + } +} + +} // namespace kernel +} // namespace core +} // namespace open3d diff --git a/cpp/open3d/core/kernel/IndexReduction.h b/cpp/open3d/core/kernel/IndexReduction.h new file mode 100644 index 00000000000..7035b53210b --- /dev/null +++ b/cpp/open3d/core/kernel/IndexReduction.h @@ -0,0 +1,36 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- + +#pragma once + +#include "open3d/core/Tensor.h" +#include "open3d/utility/Logging.h" + +namespace open3d { +namespace core { +namespace kernel { + +void IndexAdd_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst); + +void IndexAddCPU_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst); + +#ifdef BUILD_CUDA_MODULE +void IndexAddCUDA_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst); +#endif + +} // namespace kernel +} // namespace core +} // namespace open3d diff --git a/cpp/open3d/core/kernel/IndexReductionCPU.cpp b/cpp/open3d/core/kernel/IndexReductionCPU.cpp new file mode 100644 index 00000000000..4458832ec96 --- /dev/null +++ b/cpp/open3d/core/kernel/IndexReductionCPU.cpp @@ -0,0 +1,79 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- + +#include "open3d/core/Dispatch.h" +#include "open3d/core/Indexer.h" +#include "open3d/core/Tensor.h" +#include "open3d/utility/Logging.h" + +namespace open3d { +namespace core { +namespace kernel { + +template +void LaunchIndexReductionKernel(int64_t dim, + const Device& device, + const Tensor& index, + const Tensor& src, + Tensor& dst, + const func_t& element_kernel) { + // index: [N,], src: [N, D], dst: [M, D] + // In Indexer, output shape defines the actual master strides. + // However, in IndexAdd_, input dominates the iterations. + // So put dst (output) at indexer's input, and src (input) at output. + Indexer indexer({dst}, src, DtypePolicy::NONE); + + // Index is simply a 1D contiguous tensor, with a different stride + // behavior to src. So use raw pointer for simplicity. + auto index_ptr = index.GetDataPtr(); + + int64_t broadcasting_elems = 1; + for (int64_t d = 1; d < src.NumDims(); ++d) { + broadcasting_elems *= src.GetShape(d); + } + auto element_func = [=](int64_t workload_idx) { + int reduction_idx = workload_idx / broadcasting_elems; + int broadcasting_idx = workload_idx % broadcasting_elems; + + const int64_t idx = index_ptr[reduction_idx]; + int64_t dst_idx = idx * broadcasting_elems + broadcasting_idx; + + void* src_ptr = indexer.GetOutputPtr(0, workload_idx); + void* dst_ptr = indexer.GetInputPtr(0, dst_idx); + // Note input and output is switched here to adapt to the indexer + element_kernel(src_ptr, dst_ptr); + }; + + // TODO: check in detail + // No OpenMP could be faster, otherwise there would be thousands of atomics. + for (int64_t d = 0; d < indexer.NumWorkloads(); ++d) { + element_func(d); + } +} + +template +static OPEN3D_HOST_DEVICE void CPUSumKernel(const void* src, void* dst) { + scalar_t* dst_s_ptr = static_cast(dst); + const scalar_t* src_s_ptr = static_cast(src); + *dst_s_ptr += *src_s_ptr; +} + +void IndexAddCPU_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst) { + DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(src.GetDtype(), [&]() { + LaunchIndexReductionKernel(dim, src.GetDevice(), index, src, dst, + [](const void* src, void* dst) { + CPUSumKernel(src, dst); + }); + }); +} + +} // namespace kernel +} // namespace core +} // namespace open3d diff --git a/cpp/open3d/core/kernel/IndexReductionCUDA.cu b/cpp/open3d/core/kernel/IndexReductionCUDA.cu new file mode 100644 index 00000000000..24c913a46af --- /dev/null +++ b/cpp/open3d/core/kernel/IndexReductionCUDA.cu @@ -0,0 +1,82 @@ +// ---------------------------------------------------------------------------- +// - Open3D: www.open3d.org - +// ---------------------------------------------------------------------------- +// Copyright (c) 2018-2023 www.open3d.org +// SPDX-License-Identifier: MIT +// ---------------------------------------------------------------------------- +#include + +#include "open3d/core/CUDAUtils.h" +#include "open3d/core/Dispatch.h" +#include "open3d/core/Indexer.h" +#include "open3d/core/ParallelFor.h" +#include "open3d/core/Tensor.h" +#include "open3d/t/geometry/kernel/GeometryMacros.h" + +namespace open3d { +namespace core { +namespace kernel { + +template +void LaunchIndexReductionKernel(int64_t dim, + const Device& device, + const Tensor& index, + const Tensor& src, + Tensor& dst, + const func_t& element_kernel) { + OPEN3D_ASSERT_HOST_DEVICE_LAMBDA(func_t); + + // index: [N,], src: [N, D], dst: [M, D] + // In Indexer, output shape defines the actual master strides. + // However, in IndexAdd_, input dominates the iterations. + // So put dst (output) at indexer's input, and src (input) at output. + Indexer indexer({dst}, src, DtypePolicy::NONE); + + // Index is simply a 1D contiguous tensor, with a different stride + // behavior to src. So use raw pointer for simplicity. + auto index_ptr = index.GetDataPtr(); + + int64_t broadcasting_elems = 1; + for (int64_t d = 1; d < src.NumDims(); ++d) { + broadcasting_elems *= src.GetShape(d); + } + auto element_func = [=] OPEN3D_HOST_DEVICE(int64_t workload_idx) { + int reduction_idx = workload_idx / broadcasting_elems; + int broadcasting_idx = workload_idx % broadcasting_elems; + + const int64_t idx = index_ptr[reduction_idx]; + int64_t dst_idx = idx * broadcasting_elems + broadcasting_idx; + + void* src_ptr = indexer.GetOutputPtr(0, workload_idx); + void* dst_ptr = indexer.GetInputPtr(0, dst_idx); + // Note input and output is switched here to adapt to the indexer. + element_kernel(src_ptr, dst_ptr); + }; + + ParallelFor(device, indexer.NumWorkloads(), element_func); + OPEN3D_GET_LAST_CUDA_ERROR("LaunchIndexReductionKernel failed."); +} + +template +static OPEN3D_HOST_DEVICE void CUDASumKernel(const void* src, void* dst) { + scalar_t* dst_s_ptr = static_cast(dst); + const scalar_t* src_s_ptr = static_cast(src); + atomicAdd(dst_s_ptr, *src_s_ptr); +} + +void IndexAddCUDA_(int64_t dim, + const Tensor& index, + const Tensor& src, + Tensor& dst) { + DISPATCH_FLOAT_DTYPE_TO_TEMPLATE(src.GetDtype(), [&]() { + LaunchIndexReductionKernel( + dim, src.GetDevice(), index, src, dst, + [] OPEN3D_HOST_DEVICE(const void* src, void* dst) { + CUDASumKernel(src, dst); + }); + }); +} + +} // namespace kernel +} // namespace core +} // namespace open3d diff --git a/cpp/open3d/core/nns/NanoFlannImpl.h b/cpp/open3d/core/nns/NanoFlannImpl.h index f090611818e..027d6d0c4ee 100644 --- a/cpp/open3d/core/nns/NanoFlannImpl.h +++ b/cpp/open3d/core/nns/NanoFlannImpl.h @@ -233,7 +233,7 @@ void _RadiusSearchCPU(NanoFlannIndexHolderBase *holder, std::vector> neighbors_distances(num_queries); std::vector neighbors_count(num_queries, 0); - nanoflann::SearchParams params; + nanoflann::SearchParameters params; params.sorted = sort; auto holder_ = @@ -241,7 +241,7 @@ void _RadiusSearchCPU(NanoFlannIndexHolderBase *holder, tbb::parallel_for( tbb::blocked_range(0, num_queries), [&](const tbb::blocked_range &r) { - std::vector> search_result; + std::vector> search_result; for (size_t i = r.begin(); i != r.end(); ++i) { T radius = radii[i]; if (METRIC == L2) { @@ -346,7 +346,7 @@ void _HybridSearchCPU(NanoFlannIndexHolderBase *holder, output_allocator.AllocDistances(&distances_ptr, num_indices); output_allocator.AllocCounts(&counts_ptr, num_queries); - nanoflann::SearchParams params; + nanoflann::SearchParameters params; params.sorted = true; auto holder_ = @@ -354,7 +354,7 @@ void _HybridSearchCPU(NanoFlannIndexHolderBase *holder, tbb::parallel_for( tbb::blocked_range(0, num_queries), [&](const tbb::blocked_range &r) { - std::vector> ret_matches; + std::vector> ret_matches; for (size_t i = r.begin(); i != r.end(); ++i) { size_t num_results = holder_->index_->radiusSearch( &queries[i * dimension], radius_squared, diff --git a/cpp/open3d/data/Dataset.h b/cpp/open3d/data/Dataset.h index 66abe494fb4..35c448d11e0 100644 --- a/cpp/open3d/data/Dataset.h +++ b/cpp/open3d/data/Dataset.h @@ -758,26 +758,26 @@ class PaintedPlasterTexture : public DownloadDataset { /// cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni /// sequence, and ground-truth camera trajectory. /// -/// RedwoodIndoorLivingRoom1 -/// ├── colors -/// │ ├── 00000.jpg -/// │ ├── 00001.jpg -/// │ ├── ... -/// │ └── 02869.jpg -/// ├── depth -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02869.png -/// ├── depth_noisy -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02869.png -/// ├── dist-model.txt -/// ├── livingroom1.oni -/// ├── livingroom1-traj.txt -/// └── livingroom.ply +/// RedwoodIndoorLivingRoom1 +/// ├── colors +/// │ ├── 00000.jpg +/// │ ├── 00001.jpg +/// │ ├── ... +/// │ └── 02869.jpg +/// ├── depth +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02869.png +/// ├── depth_noisy +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02869.png +/// ├── dist-model.txt +/// ├── livingroom1.oni +/// ├── livingroom1-traj.txt +/// └── livingroom.ply class RedwoodIndoorLivingRoom1 : public DownloadDataset { public: RedwoodIndoorLivingRoom1(const std::string& data_root = ""); @@ -810,30 +810,30 @@ class RedwoodIndoorLivingRoom1 : public DownloadDataset { }; /// \class RedwoodIndoorLivingRoom2 (Augmented ICL-NUIM Dataset) -/// \brief Data class for `RedwoodIndoorLivingRoom1`, containing dense point +/// \brief Data class for `RedwoodIndoorLivingRoom2`, containing dense point /// cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni /// sequence, and ground-truth camera trajectory. /// -/// RedwoodIndoorLivingRoom2 -/// ├── colors -/// │ ├── 00000.jpg -/// │ ├── 00001.jpg -/// │ ├── ... -/// │ └── 02349.jpg -/// ├── depth -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02349.png -/// ├── depth_noisy -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02349.png -/// ├── dist-model.txt -/// ├── livingroom2.oni -/// ├── livingroom2-traj.txt -/// └── livingroom.ply +/// RedwoodIndoorLivingRoom2 +/// ├── colors +/// │ ├── 00000.jpg +/// │ ├── 00001.jpg +/// │ ├── ... +/// │ └── 02349.jpg +/// ├── depth +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02349.png +/// ├── depth_noisy +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02349.png +/// ├── dist-model.txt +/// ├── livingroom2.oni +/// ├── livingroom2-traj.txt +/// └── livingroom.ply class RedwoodIndoorLivingRoom2 : public DownloadDataset { public: RedwoodIndoorLivingRoom2(const std::string& data_root = ""); @@ -866,30 +866,30 @@ class RedwoodIndoorLivingRoom2 : public DownloadDataset { }; /// \class RedwoodIndoorOffice1 (Augmented ICL-NUIM Dataset) -/// \brief Data class for `RedwoodIndoorLivingRoom1`, containing dense point +/// \brief Data class for `RedwoodIndoorOffice1`, containing dense point /// cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni /// sequence, and ground-truth camera trajectory. /// -/// RedwoodIndoorOffice1 -/// ├── colors -/// │ ├── 00000.jpg -/// │ ├── 00001.jpg -/// │ ├── ... -/// │ └── 02689.jpg -/// ├── depth -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02689.png -/// ├── depth_noisy -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02689.png -/// ├── dist-model.txt -/// ├── office1.oni -/// ├── office1-traj.txt -/// └── office.ply +/// RedwoodIndoorOffice1 +/// ├── colors +/// │ ├── 00000.jpg +/// │ ├── 00001.jpg +/// │ ├── ... +/// │ └── 02689.jpg +/// ├── depth +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02689.png +/// ├── depth_noisy +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02689.png +/// ├── dist-model.txt +/// ├── office1.oni +/// ├── office1-traj.txt +/// └── office.ply class RedwoodIndoorOffice1 : public DownloadDataset { public: RedwoodIndoorOffice1(const std::string& data_root = ""); @@ -922,30 +922,30 @@ class RedwoodIndoorOffice1 : public DownloadDataset { }; /// \class RedwoodIndoorOffice2 (Augmented ICL-NUIM Dataset) -/// \brief Data class for `RedwoodIndoorLivingRoom1`, containing dense point +/// \brief Data class for `RedwoodIndoorOffice2`, containing dense point /// cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni /// sequence, and ground-truth camera trajectory. /// -/// RedwoodIndoorOffice2 -/// ├── colors -/// │ ├── 00000.jpg -/// │ ├── 00001.jpg -/// │ ├── ... -/// │ └── 02537.jpg -/// ├── depth -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02537.png -/// ├── depth_noisy -/// │ ├── 00000.png -/// │ ├── 00001.png -/// │ ├── ... -/// │ └── 02537.png -/// ├── dist-model.txt -/// ├── office2.oni -/// ├── office2-traj.txt -/// └── office.ply +/// RedwoodIndoorOffice2 +/// ├── colors +/// │ ├── 00000.jpg +/// │ ├── 00001.jpg +/// │ ├── ... +/// │ └── 02537.jpg +/// ├── depth +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02537.png +/// ├── depth_noisy +/// │ ├── 00000.png +/// │ ├── 00001.png +/// │ ├── ... +/// │ └── 02537.png +/// ├── dist-model.txt +/// ├── office2.oni +/// ├── office2-traj.txt +/// └── office.ply class RedwoodIndoorOffice2 : public DownloadDataset { public: RedwoodIndoorOffice2(const std::string& data_root = ""); diff --git a/cpp/open3d/geometry/BoundingVolume.cpp b/cpp/open3d/geometry/BoundingVolume.cpp index 1bac823f524..89b0ca70e5d 100644 --- a/cpp/open3d/geometry/BoundingVolume.cpp +++ b/cpp/open3d/geometry/BoundingVolume.cpp @@ -256,6 +256,22 @@ OrientedBoundingBox AxisAlignedBoundingBox::GetMinimalOrientedBoundingBox( return OrientedBoundingBox::CreateFromAxisAlignedBoundingBox(*this); } +AxisAlignedBoundingBox::AxisAlignedBoundingBox(const Eigen::Vector3d& min_bound, + const Eigen::Vector3d& max_bound) + : Geometry3D(Geometry::GeometryType::AxisAlignedBoundingBox), + min_bound_(min_bound), + max_bound_(max_bound), + color_(1, 1, 1) { + if ((max_bound_.array() < min_bound_.array()).any()) { + open3d::utility::LogWarning( + "max_bound {} of bounding box is smaller than min_bound {} in " + "one or more axes. Fix input values to remove this warning.", + max_bound_, min_bound_); + max_bound_ = max_bound.cwiseMax(min_bound); + min_bound_ = max_bound.cwiseMin(min_bound); + } +} + AxisAlignedBoundingBox& AxisAlignedBoundingBox::Transform( const Eigen::Matrix4d& transformation) { utility::LogError( @@ -287,7 +303,7 @@ AxisAlignedBoundingBox& AxisAlignedBoundingBox::Scale( AxisAlignedBoundingBox& AxisAlignedBoundingBox::Rotate( const Eigen::Matrix3d& rotation, const Eigen::Vector3d& center) { utility::LogError( - "A rotation of a AxisAlignedBoundingBox would not be axis aligned " + "A rotation of an AxisAlignedBoundingBox would not be axis-aligned " "anymore, convert it to an OrientedBoundingBox first"); return *this; } @@ -314,6 +330,9 @@ AxisAlignedBoundingBox AxisAlignedBoundingBox::CreateFromPoints( const std::vector& points) { AxisAlignedBoundingBox box; if (points.empty()) { + utility::LogWarning( + "The number of points is 0 when creating axis-aligned bounding " + "box."); box.min_bound_ = Eigen::Vector3d(0.0, 0.0, 0.0); box.max_bound_ = Eigen::Vector3d(0.0, 0.0, 0.0); } else { diff --git a/cpp/open3d/geometry/BoundingVolume.h b/cpp/open3d/geometry/BoundingVolume.h index 11586a2132d..b4b310f3ca0 100644 --- a/cpp/open3d/geometry/BoundingVolume.h +++ b/cpp/open3d/geometry/BoundingVolume.h @@ -21,7 +21,7 @@ class AxisAlignedBoundingBox; /// \brief A bounding box oriented along an arbitrary frame of reference. /// /// The oriented bounding box is defined by its center position, rotation -/// maxtrix and extent. +/// matrix and extent. class OrientedBoundingBox : public Geometry3D { public: /// \brief Default constructor. @@ -151,7 +151,8 @@ class OrientedBoundingBox : public Geometry3D { /// \class AxisAlignedBoundingBox /// -/// \brief A bounding box that is aligned along the coordinate axes. +/// \brief A bounding box that is aligned along the coordinate axes and defined +/// by the min_bound and max_bound. /// /// The AxisAlignedBoundingBox uses the coordinate axes for bounding box /// generation. This means that the bounding box is oriented along the @@ -171,11 +172,7 @@ class AxisAlignedBoundingBox : public Geometry3D { /// \param min_bound Lower bounds of the bounding box for all axes. /// \param max_bound Upper bounds of the bounding box for all axes. AxisAlignedBoundingBox(const Eigen::Vector3d& min_bound, - const Eigen::Vector3d& max_bound) - : Geometry3D(Geometry::GeometryType::AxisAlignedBoundingBox), - min_bound_(min_bound), - max_bound_(max_bound), - color_(1, 1, 1) {} + const Eigen::Vector3d& max_bound); ~AxisAlignedBoundingBox() override {} public: @@ -231,14 +228,20 @@ class AxisAlignedBoundingBox : public Geometry3D { /// extents. double GetMaxExtent() const { return (max_bound_ - min_bound_).maxCoeff(); } + /// Calculates the percentage position of the given x-coordinate within + /// the x-axis range of this AxisAlignedBoundingBox. double GetXPercentage(double x) const { return (x - min_bound_(0)) / (max_bound_(0) - min_bound_(0)); } + /// Calculates the percentage position of the given y-coordinate within + /// the y-axis range of this AxisAlignedBoundingBox. double GetYPercentage(double y) const { return (y - min_bound_(1)) / (max_bound_(1) - min_bound_(1)); } + /// Calculates the percentage position of the given z-coordinate within + /// the z-axis range of this AxisAlignedBoundingBox. double GetZPercentage(double z) const { return (z - min_bound_(2)) / (max_bound_(2) - min_bound_(2)); } diff --git a/cpp/open3d/geometry/EstimateNormals.cpp b/cpp/open3d/geometry/EstimateNormals.cpp index ac084234c25..5bc9abc0ba3 100644 --- a/cpp/open3d/geometry/EstimateNormals.cpp +++ b/cpp/open3d/geometry/EstimateNormals.cpp @@ -359,7 +359,10 @@ void PointCloud::OrientNormalsTowardsCameraLocation( } } -void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { +void PointCloud::OrientNormalsConsistentTangentPlane( + size_t k, + const double lambda /* = 0.0*/, + const double cos_alpha_tol /* = 1.0*/) { if (!HasNormals()) { utility::LogError( "No normals in the PointCloud. Call EstimateNormals() first."); @@ -380,8 +383,20 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { v1 = pt_map[v1]; size_t edge = EdgeIndex(v0, v1); if (graph_edges.count(edge) == 0) { - double dist = (points_[v0] - points_[v1]).squaredNorm(); - delaunay_graph.push_back(WeightedEdge(v0, v1, dist)); + const auto diff = points_[v0] - points_[v1]; + // penalization on normal-plane distance + double dist = diff.squaredNorm(); + double penalization = lambda * std::abs(diff.dot(normals_[v0])); + + // if cos_alpha_tol < 1 some edges will be excluded. In particular + // the ones connecting points that form an angle below a certain + // threshold (defined by the cosine) + double cos_alpha = + std::abs(diff.dot(normals_[v0])) / std::sqrt(dist); + if (cos_alpha > cos_alpha_tol) + dist = std::numeric_limits::infinity(); + + delaunay_graph.push_back(WeightedEdge(v0, v1, dist + penalization)); graph_edges.insert(edge); } }; @@ -403,12 +418,50 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { edge.weight_ = NormalWeight(edge.v0_, edge.v1_); } + // The function below takes v0 and its neighbors as inputs. + // The function returns the quartiles of the distances between the neighbors + // and a plane defined by the normal vector of v0 and the point v0. + auto compute_q1q3 = + [&](size_t v0, + std::vector neighbors) -> std::array { + std::vector dist_plane; + + for (size_t vidx1 = 0; vidx1 < neighbors.size(); ++vidx1) { + size_t v1 = size_t(neighbors[vidx1]); + const auto diff = points_[v0] - points_[v1]; + double dist = std::abs(diff.dot(normals_[v0])); + dist_plane.push_back(dist); + } + std::vector dist_plane_ord = dist_plane; + std::sort(dist_plane_ord.begin(), dist_plane_ord.end()); + // calculate quartiles + int q1_idx = static_cast(dist_plane_ord.size() * 0.25); + double q1 = dist_plane_ord[q1_idx]; + + int q3_idx = static_cast(dist_plane_ord.size() * 0.75); + double q3 = dist_plane_ord[q3_idx]; + + std::array q1q3; + q1q3[0] = q1; + q1q3[1] = q3; + + return q1q3; + }; + // Add k nearest neighbors to Riemannian graph KDTreeFlann kdtree(*this); for (size_t v0 = 0; v0 < points_.size(); ++v0) { std::vector neighbors; std::vector dists2; + kdtree.SearchKNN(points_[v0], int(k), neighbors, dists2); + + const double DEFAULT_VALUE = std::numeric_limits::quiet_NaN(); + std::array q1q3 = + lambda == 0 + ? std::array{DEFAULT_VALUE, DEFAULT_VALUE} + : compute_q1q3(v0, neighbors); + for (size_t vidx1 = 0; vidx1 < neighbors.size(); ++vidx1) { size_t v1 = size_t(neighbors[vidx1]); if (v0 == v1) { @@ -416,6 +469,25 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { } size_t edge = EdgeIndex(v0, v1); if (graph_edges.count(edge) == 0) { + const auto diff = points_[v0] - points_[v1]; + double normal_dist = std::abs(diff.dot(normals_[v0])); + + double dist = diff.squaredNorm(); + + // if cos_alpha_tol < 1 some edges will be excluded. In + // particular the ones connecting points that form an angle + // below a certain threshold (defined by the cosine) + double cos_alpha = + std::abs(diff.dot(normals_[v0])) / std::sqrt(dist); + if (cos_alpha > cos_alpha_tol) continue; + + // if we are in a penalizing framework do not consider outliers + // in terms of distance from the plane (if any) + if (lambda != 0) { + double iqr = q1q3[1] - q1q3[0]; + if (normal_dist > q1q3[1] + 1.5 * iqr) continue; + } + double weight = NormalWeight(v0, v1); mst.push_back(WeightedEdge(v0, v1, weight)); graph_edges.insert(edge); @@ -436,13 +508,14 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { } // find start node for tree traversal - // init with node that maximizes z - double max_z = std::numeric_limits::lowest(); + // init with node that minimizes z + + double min_z = std::numeric_limits::max(); size_t v0 = 0; for (size_t vidx = 0; vidx < points_.size(); ++vidx) { const Eigen::Vector3d &v = points_[vidx]; - if (v(2) > max_z) { - max_z = v(2); + if (v(2) < min_z) { + min_z = v(2); v0 = vidx; } } @@ -457,7 +530,7 @@ void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { n1 *= -1; } }; - TestAndOrientNormal(Eigen::Vector3d(0, 0, 1), normals_[v0]); + TestAndOrientNormal(Eigen::Vector3d(0, 0, -1), normals_[v0]); while (!traversal_queue.empty()) { v0 = traversal_queue.front(); traversal_queue.pop(); diff --git a/cpp/open3d/geometry/ISSKeypoints.cpp b/cpp/open3d/geometry/ISSKeypoints.cpp index 39f8602a737..f47abae1ad2 100644 --- a/cpp/open3d/geometry/ISSKeypoints.cpp +++ b/cpp/open3d/geometry/ISSKeypoints.cpp @@ -11,8 +11,10 @@ #include #include +#include #include #include +#include #include #include @@ -29,27 +31,26 @@ namespace { bool IsLocalMaxima(int query_idx, const std::vector& indices, const std::vector& third_eigen_values) { - for (const auto& idx : indices) { - if (third_eigen_values[query_idx] < third_eigen_values[idx]) { - return false; - } - } - return true; + return std::none_of( + indices.begin(), indices.end(), + [&third_eigen_values, value = third_eigen_values[query_idx]]( + const int idx) { return value < third_eigen_values[idx]; }); } double ComputeModelResolution(const std::vector& points, const geometry::KDTreeFlann& kdtree) { std::vector indices(2); std::vector distances(2); - double resolution = 0.0; - - for (const auto& point : points) { - if (kdtree.SearchKNN(point, 2, indices, distances) != 0) { - resolution += std::sqrt(distances[1]); - } - } - resolution /= points.size(); - return resolution; + const double resolution = std::accumulate( + points.begin(), points.end(), 0., + [&](double state, const Eigen::Vector3d& point) { + if (kdtree.SearchKNN(point, 2, indices, distances) >= 2) { + state += std::sqrt(distances[1]); + } + return state; + }); + + return resolution / static_cast(points.size()); } } // namespace @@ -118,6 +119,7 @@ std::shared_ptr ComputeISSKeypoints( if (nb_neighbors >= min_neighbors && IsLocalMaxima(i, nn_indices, third_eigen_values)) { +#pragma omp critical kp_indices.emplace_back(i); } } diff --git a/cpp/open3d/geometry/KDTreeFlann.cpp b/cpp/open3d/geometry/KDTreeFlann.cpp index 4324e9cbe36..d93175a8d43 100644 --- a/cpp/open3d/geometry/KDTreeFlann.cpp +++ b/cpp/open3d/geometry/KDTreeFlann.cpp @@ -104,7 +104,7 @@ int KDTreeFlann::SearchKNN(const T &query, indices.resize(knn); distance2.resize(knn); std::vector indices_eigen(knn); - int k = nanoflann_index_->index->knnSearch( + int k = nanoflann_index_->index_->knnSearch( query.data(), knn, indices_eigen.data(), distance2.data()); indices.resize(k); distance2.resize(k); @@ -125,10 +125,10 @@ int KDTreeFlann::SearchRadius(const T &query, size_t(query.rows()) != dimension_) { return -1; } - std::vector> indices_dists; - int k = nanoflann_index_->index->radiusSearch( + std::vector> indices_dists; + int k = nanoflann_index_->index_->radiusSearch( query.data(), radius * radius, indices_dists, - nanoflann::SearchParams(-1, 0.0)); + nanoflann::SearchParameters(0.0)); indices.resize(k); distance2.resize(k); for (int i = 0; i < k; ++i) { @@ -154,7 +154,7 @@ int KDTreeFlann::SearchHybrid(const T &query, } distance2.resize(max_nn); std::vector indices_eigen(max_nn); - int k = nanoflann_index_->index->knnSearch( + int k = nanoflann_index_->index_->knnSearch( query.data(), max_nn, indices_eigen.data(), distance2.data()); k = std::distance(distance2.begin(), std::lower_bound(distance2.begin(), distance2.begin() + k, @@ -178,7 +178,7 @@ bool KDTreeFlann::SetRawData(const Eigen::Map &data) { data_interface_.reset(new Eigen::Map(data)); nanoflann_index_.reset( new KDTree_t(dimension_, std::cref(*data_interface_), 15)); - nanoflann_index_->index->buildIndex(); + nanoflann_index_->index_->buildIndex(); return true; } diff --git a/cpp/open3d/geometry/PointCloud.cpp b/cpp/open3d/geometry/PointCloud.cpp index 3e55bc3acd6..b0b6e0130ae 100644 --- a/cpp/open3d/geometry/PointCloud.cpp +++ b/cpp/open3d/geometry/PointCloud.cpp @@ -541,23 +541,25 @@ std::shared_ptr PointCloud::FarthestPointDownSample( return SelectByIndex(selected_indices); } -std::shared_ptr PointCloud::Crop( - const AxisAlignedBoundingBox &bbox) const { +std::shared_ptr PointCloud::Crop(const AxisAlignedBoundingBox &bbox, + bool invert) const { if (bbox.IsEmpty()) { utility::LogError( "AxisAlignedBoundingBox either has zeros size, or has wrong " "bounds."); } - return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); + return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_), + invert); } -std::shared_ptr PointCloud::Crop( - const OrientedBoundingBox &bbox) const { +std::shared_ptr PointCloud::Crop(const OrientedBoundingBox &bbox, + bool invert) const { if (bbox.IsEmpty()) { utility::LogError( "AxisAlignedBoundingBox either has zeros size, or has wrong " "bounds."); } - return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_)); + return SelectByIndex(bbox.GetPointIndicesWithinBoundingBox(points_), + invert); } std::tuple, std::vector> diff --git a/cpp/open3d/geometry/PointCloud.h b/cpp/open3d/geometry/PointCloud.h index b55840106b6..fec0d3033d1 100644 --- a/cpp/open3d/geometry/PointCloud.h +++ b/cpp/open3d/geometry/PointCloud.h @@ -185,7 +185,9 @@ class PointCloud : public Geometry3D { /// clipped. /// /// \param bbox AxisAlignedBoundingBox to crop points. - std::shared_ptr Crop(const AxisAlignedBoundingBox &bbox) const; + /// \param invert Optional boolean to invert cropping. + std::shared_ptr Crop(const AxisAlignedBoundingBox &bbox, + bool invert = false) const; /// \brief Function to crop pointcloud into output pointcloud /// @@ -193,7 +195,9 @@ class PointCloud : public Geometry3D { /// clipped. /// /// \param bbox OrientedBoundingBox to crop points. - std::shared_ptr Crop(const OrientedBoundingBox &bbox) const; + /// \param invert Optional boolean to invert cropping. + std::shared_ptr Crop(const OrientedBoundingBox &bbox, + bool invert = false) const; /// \brief Function to remove points that have less than \p nb_points in a /// sphere of a given radius. @@ -248,10 +252,18 @@ class PointCloud : public Geometry3D { /// \brief Function to consistently orient estimated normals based on /// consistent tangent planes as described in Hoppe et al., "Surface /// Reconstruction from Unorganized Points", 1992. + /// Further details on parameters are described in + /// Piazza, Valentini, Varetti, "Mesh Reconstruction from Point Cloud", + /// 2023. /// /// \param k k nearest neighbour for graph reconstruction for normal /// propagation. - void OrientNormalsConsistentTangentPlane(size_t k); + /// \param lambda penalty constant on the distance of a point from the + /// tangent plane \param cos_alpha_tol treshold that defines the amplitude + /// of the cone spanned by the reference normal + void OrientNormalsConsistentTangentPlane(size_t k, + const double lambda = 0.0, + const double cos_alpha_tol = 1.0); /// \brief Function to compute the point to point distances between point /// clouds. diff --git a/cpp/open3d/geometry/PointCloudSegmentation.cpp b/cpp/open3d/geometry/PointCloudSegmentation.cpp index 8e4f65f626b..2d571507934 100644 --- a/cpp/open3d/geometry/PointCloudSegmentation.cpp +++ b/cpp/open3d/geometry/PointCloudSegmentation.cpp @@ -29,6 +29,7 @@ class RandomSampler { explicit RandomSampler(const size_t total_size) : total_size_(total_size) {} std::vector operator()(size_t sample_size) { + std::lock_guard lock(mutex_); std::vector samples; samples.reserve(sample_size); @@ -48,6 +49,7 @@ class RandomSampler { private: size_t total_size_; + std::mutex mutex_; }; /// \class RANSACResult @@ -64,23 +66,24 @@ class RANSACResult { }; // Calculates the number of inliers given a list of points and a plane model, -// and the total distance between the inliers and the plane. These numbers are -// then used to evaluate how well the plane model fits the given points. +// and the total squared point-to-plane distance. +// These numbers are then used to evaluate how well the plane model fits the +// given points. RANSACResult EvaluateRANSACBasedOnDistance( const std::vector &points, const Eigen::Vector4d plane_model, std::vector &inliers, - double distance_threshold, - double error) { + double distance_threshold) { RANSACResult result; + double error = 0; for (size_t idx = 0; idx < points.size(); ++idx) { Eigen::Vector4d point(points[idx](0), points[idx](1), points[idx](2), 1); double distance = std::abs(plane_model.dot(point)); if (distance < distance_threshold) { - error += distance; + error += distance * distance; inliers.emplace_back(idx); } } @@ -91,7 +94,7 @@ RANSACResult EvaluateRANSACBasedOnDistance( result.inlier_rmse_ = 0; } else { result.fitness_ = (double)inlier_num / (double)points.size(); - result.inlier_rmse_ = error / std::sqrt((double)inlier_num); + result.inlier_rmse_ = std::sqrt(error / (double)inlier_num); } return result; } @@ -202,10 +205,9 @@ std::tuple> PointCloud::SegmentPlane( continue; } - double error = 0; inliers.clear(); auto this_result = EvaluateRANSACBasedOnDistance( - points_, plane_model, inliers, distance_threshold, error); + points_, plane_model, inliers, distance_threshold); #pragma omp critical { if (this_result.fitness_ > result.fitness_ || diff --git a/cpp/open3d/geometry/RGBDImageFactory.cpp b/cpp/open3d/geometry/RGBDImageFactory.cpp index 1a2b5f0ff09..1f2819ebe3a 100644 --- a/cpp/open3d/geometry/RGBDImageFactory.cpp +++ b/cpp/open3d/geometry/RGBDImageFactory.cpp @@ -18,7 +18,9 @@ std::shared_ptr RGBDImage::CreateFromColorAndDepth( bool convert_rgb_to_intensity /* = true*/) { std::shared_ptr rgbd_image = std::make_shared(); if (color.height_ != depth.height_ || color.width_ != depth.width_) { - utility::LogError("Unsupported image format."); + utility::LogError( + "RGB image size ({} {}) and depth image size ({} {}) mismatch.", + color.height_, color.width_, depth.height_, depth.width_); } rgbd_image->depth_ = *depth.ConvertDepthToFloatImage(depth_scale, depth_trunc); @@ -55,7 +57,9 @@ std::shared_ptr RGBDImage::CreateFromSUNFormat( bool convert_rgb_to_intensity /* = true*/) { std::shared_ptr rgbd_image = std::make_shared(); if (color.height_ != depth.height_ || color.width_ != depth.width_) { - utility::LogError("Unsupported image format."); + utility::LogError( + "RGB image size ({} {}) and depth image size ({} {}) mismatch.", + color.height_, color.width_, depth.height_, depth.width_); } for (int v = 0; v < depth.height_; v++) { for (int u = 0; u < depth.width_; u++) { @@ -75,7 +79,9 @@ std::shared_ptr RGBDImage::CreateFromNYUFormat( bool convert_rgb_to_intensity /* = true*/) { std::shared_ptr rgbd_image = std::make_shared(); if (color.height_ != depth.height_ || color.width_ != depth.width_) { - utility::LogError("Unsupported image format."); + utility::LogError( + "RGB image size ({} {}) and depth image size ({} {}) mismatch.", + color.height_, color.width_, depth.height_, depth.width_); } for (int v = 0; v < depth.height_; v++) { for (int u = 0; u < depth.width_; u++) { diff --git a/cpp/open3d/geometry/TriangleMesh.cpp b/cpp/open3d/geometry/TriangleMesh.cpp index 75651d159a0..48faa3b6a19 100644 --- a/cpp/open3d/geometry/TriangleMesh.cpp +++ b/cpp/open3d/geometry/TriangleMesh.cpp @@ -1598,13 +1598,10 @@ void TriangleMesh::RemoveVerticesByMask(const std::vector &vertex_mask) { std::shared_ptr TriangleMesh::SelectByIndex( const std::vector &indices, bool cleanup) const { - if (HasTriangleUvs()) { - utility::LogWarning( - "[SelectByIndex] This mesh contains triangle uvs that are " - "not handled in this function"); - } auto output = std::make_shared(); + bool has_triangle_material_ids = HasTriangleMaterialIds(); bool has_triangle_normals = HasTriangleNormals(); + bool has_triangle_uvs = HasTriangleUvs(); bool has_vertex_normals = HasVertexNormals(); bool has_vertex_colors = HasVertexColors(); @@ -1636,9 +1633,18 @@ std::shared_ptr TriangleMesh::SelectByIndex( if (nvidx0 >= 0 && nvidx1 >= 0 && nvidx2 >= 0) { output->triangles_.push_back( Eigen::Vector3i(nvidx0, nvidx1, nvidx2)); + if (has_triangle_material_ids) { + output->triangle_material_ids_.push_back( + triangle_material_ids_[tidx]); + } if (has_triangle_normals) { output->triangle_normals_.push_back(triangle_normals_[tidx]); } + if (has_triangle_uvs) { + output->triangle_uvs_.push_back(triangle_uvs_[tidx * 3 + 0]); + output->triangle_uvs_.push_back(triangle_uvs_[tidx * 3 + 1]); + output->triangle_uvs_.push_back(triangle_uvs_[tidx * 3 + 2]); + } } } diff --git a/cpp/open3d/geometry/TriangleMesh.h b/cpp/open3d/geometry/TriangleMesh.h index 505c255ac26..8448cccbcc0 100644 --- a/cpp/open3d/geometry/TriangleMesh.h +++ b/cpp/open3d/geometry/TriangleMesh.h @@ -853,7 +853,7 @@ class TriangleMesh : public MeshBase { std::unordered_map additionalMaps; }; - std::unordered_map materials_; + std::vector> materials_; /// List of material ids. std::vector triangle_material_ids_; diff --git a/cpp/open3d/geometry/TriangleMeshSimplification.cpp b/cpp/open3d/geometry/TriangleMeshSimplification.cpp index 6603fd20c5e..90a2a166ce7 100644 --- a/cpp/open3d/geometry/TriangleMeshSimplification.cpp +++ b/cpp/open3d/geometry/TriangleMeshSimplification.cpp @@ -311,51 +311,55 @@ std::shared_ptr TriangleMesh::SimplifyQuadricDecimation( AddPerpPlaneQuadric(tria(2), tria(0), tria(1), area); } + // Clear the unused vectors to save some memory + triangle_areas.clear(); + triangle_planes.clear(); + edge_triangle_count.clear(); + // Get valid edges and compute cost - // Note: We could also select all vertex pairs as edges with dist < eps - std::unordered_map> - vbars; - std::unordered_map> - costs; auto CostEdgeComp = [](const CostEdge& a, const CostEdge& b) { return std::get<0>(a) > std::get<0>(b); }; std::priority_queue, decltype(CostEdgeComp)> queue(CostEdgeComp); - auto AddEdge = [&](int vidx0, int vidx1, bool update) { - int min = std::min(vidx0, vidx1); - int max = std::max(vidx0, vidx1); - Eigen::Vector2i edge(min, max); - if (update || vbars.count(edge) == 0) { - const Quadric& Q0 = Qs[min]; - const Quadric& Q1 = Qs[max]; - Quadric Qbar = Q0 + Q1; - double cost; - Eigen::Vector3d vbar; - if (Qbar.IsInvertible()) { - vbar = Qbar.Minimum(); - cost = Qbar.Eval(vbar); + auto compute_cost_vbar = [&](Eigen::Vector2i e) { + const Quadric& Q0 = Qs[e(0)]; + const Quadric& Q1 = Qs[e(1)]; + const Quadric Qbar = Q0 + Q1; + double cost; + Eigen::Vector3d vbar; + if (Qbar.IsInvertible()) { + vbar = Qbar.Minimum(); + cost = Qbar.Eval(vbar); + } else { + const Eigen::Vector3d& v0 = mesh->vertices_[e(0)]; + const Eigen::Vector3d& v1 = mesh->vertices_[e(1)]; + const Eigen::Vector3d vmid = (v0 + v1) / 2; + const double cost0 = Qbar.Eval(v0); + const double cost1 = Qbar.Eval(v1); + const double costmid = Qbar.Eval(vmid); + cost = std::min(cost0, std::min(cost1, costmid)); + if (cost == costmid) { + vbar = vmid; + } else if (cost == cost0) { + vbar = v0; } else { - const Eigen::Vector3d& v0 = mesh->vertices_[vidx0]; - const Eigen::Vector3d& v1 = mesh->vertices_[vidx1]; - Eigen::Vector3d vmid = (v0 + v1) / 2; - double cost0 = Qbar.Eval(v0); - double cost1 = Qbar.Eval(v1); - double costmid = Qbar.Eval(vmid); - cost = std::min(cost0, std::min(cost1, costmid)); - if (cost == costmid) { - vbar = vmid; - } else if (cost == cost0) { - vbar = v0; - } else { - vbar = v1; - } + vbar = v1; } - vbars[edge] = vbar; - costs[edge] = cost; + } + return std::make_pair(cost, vbar); + }; + + std::unordered_set> + added_edges; + auto AddEdge = [&](int vidx0, int vidx1, bool update) { + const int min = std::min(vidx0, vidx1); + const int max = std::max(vidx0, vidx1); + const Eigen::Vector2i edge(min, max); + if (update || added_edges.count(edge) == 0) { + const auto cost = compute_cost_vbar(edge).first; + added_edges.insert(edge); queue.push(CostEdge(cost, min, max)); } }; @@ -366,6 +370,7 @@ std::shared_ptr TriangleMesh::SimplifyQuadricDecimation( AddEdge(triangle(1), triangle(2), false); AddEdge(triangle(2), triangle(0), false); } + added_edges.clear(); // perform incremental edge collapse bool has_vert_normal = HasVertexNormals(); @@ -384,50 +389,71 @@ std::shared_ptr TriangleMesh::SimplifyQuadricDecimation( // test if the edge has been updated (reinserted into queue) Eigen::Vector2i edge(vidx0, vidx1); + const auto cost_vbar = compute_cost_vbar(edge); + const Eigen::Vector3d vbar = cost_vbar.second; bool valid = !vertices_deleted[vidx0] && !vertices_deleted[vidx1] && - cost == costs[edge]; + cost == cost_vbar.first; if (!valid) { continue; } - // avoid flip of triangle normal - bool flipped = false; - for (int tidx : vert_to_triangles[vidx1]) { - if (triangles_deleted[tidx]) { - continue; - } - - const Eigen::Vector3i& tria = mesh->triangles_[tidx]; - bool has_vidx0 = - vidx0 == tria(0) || vidx0 == tria(1) || vidx0 == tria(2); - bool has_vidx1 = - vidx1 == tria(0) || vidx1 == tria(1) || vidx1 == tria(2); - if (has_vidx0 && has_vidx1) { - continue; - } + // avoid flip of triangle normal and creation of degenerate triangles + bool creates_invalid_triangle = false; + const double degenerate_ratio_threshold = 0.001; + std::unordered_map edges{}; + for (int vidx : {vidx1, vidx0}) { + for (int tidx : vert_to_triangles[vidx]) { + if (triangles_deleted[tidx]) { + continue; + } - Eigen::Vector3d vert0 = mesh->vertices_[tria(0)]; - Eigen::Vector3d vert1 = mesh->vertices_[tria(1)]; - Eigen::Vector3d vert2 = mesh->vertices_[tria(2)]; - Eigen::Vector3d norm_before = (vert1 - vert0).cross(vert2 - vert0); - norm_before /= norm_before.norm(); + const Eigen::Vector3i& tria = mesh->triangles_[tidx]; + const bool has_vidx0 = vidx0 == tria(0) || vidx0 == tria(1) || + vidx0 == tria(2); + const bool has_vidx1 = vidx1 == tria(0) || vidx1 == tria(1) || + vidx1 == tria(2); + if (has_vidx0 && has_vidx1) { + continue; + } - if (vidx1 == tria(0)) { - vert0 = vbars[edge]; - } else if (vidx1 == tria(1)) { - vert1 = vbars[edge]; - } else if (vidx1 == tria(2)) { - vert2 = vbars[edge]; - } + Eigen::Vector3d verts[3] = {mesh->vertices_[tria(0)], + mesh->vertices_[tria(1)], + mesh->vertices_[tria(2)]}; + Eigen::Vector3d norm_before = + (verts[1] - verts[0]).cross(verts[2] - verts[0]); + const double area_before = 0.5 * norm_before.norm(); + norm_before /= norm_before.norm(); + + for (auto i = 0; i < 3; ++i) { + if (tria(i) == vidx) { + verts[i] = vbar; + continue; + } + auto& vert_count = edges[tria(i)]; + creates_invalid_triangle |= vert_count >= 2; + vert_count += 1; + } - Eigen::Vector3d norm_after = (vert1 - vert0).cross(vert2 - vert0); - norm_after /= norm_after.norm(); - if (norm_before.dot(norm_after) < 0) { - flipped = true; - break; + Eigen::Vector3d norm_after = + (verts[1] - verts[0]).cross(verts[2] - verts[0]); + const double area_after = 0.5 * norm_after.norm(); + norm_after /= norm_after.norm(); + // Disallow flipping the triangle normal + creates_invalid_triangle |= norm_before.dot(norm_after) < 0; + // Disallow creating very small triangles (possibly degenerate) + creates_invalid_triangle |= + area_after < degenerate_ratio_threshold * area_before; + + if (creates_invalid_triangle) { + // Goto is the only way to jump out of two loops without + // multiple redundant if()'s. Yes, it can lead to spagetti + // code if abused but we're doing a very short jump here + goto end_flip_loop; + } } } - if (flipped) { + end_flip_loop: + if (creates_invalid_triangle) { continue; } @@ -460,7 +486,7 @@ std::shared_ptr TriangleMesh::SimplifyQuadricDecimation( } // update vertex vidx0 to vbar - mesh->vertices_[vidx0] = vbars[edge]; + mesh->vertices_[vidx0] = vbar; Qs[vidx0] += Qs[vidx1]; if (has_vert_normal) { mesh->vertex_normals_[vidx0] = 0.5 * (mesh->vertex_normals_[vidx0] + diff --git a/cpp/open3d/geometry/VoxelGrid.cpp b/cpp/open3d/geometry/VoxelGrid.cpp index 4a6dac2d1a7..dafe6b52c01 100644 --- a/cpp/open3d/geometry/VoxelGrid.cpp +++ b/cpp/open3d/geometry/VoxelGrid.cpp @@ -197,6 +197,8 @@ void VoxelGrid::AddVoxel(const Voxel &voxel) { voxels_[voxel.grid_index_] = voxel; } +void VoxelGrid::RemoveVoxel(const Eigen::Vector3i &idx) { voxels_.erase(idx); } + std::vector VoxelGrid::CheckIfIncluded( const std::vector &queries) { std::vector output; diff --git a/cpp/open3d/geometry/VoxelGrid.h b/cpp/open3d/geometry/VoxelGrid.h index d12d664c5f9..30a228206e0 100644 --- a/cpp/open3d/geometry/VoxelGrid.h +++ b/cpp/open3d/geometry/VoxelGrid.h @@ -123,6 +123,9 @@ class VoxelGrid : public Geometry3D { /// Add a voxel with specified grid index and color. void AddVoxel(const Voxel &voxel); + /// Remove a voxel with specified grid index. + void RemoveVoxel(const Eigen::Vector3i &idx); + /// Return a vector of 3D coordinates that define the indexed voxel cube. std::vector GetVoxelBoundingPoints( const Eigen::Vector3i &index) const; @@ -242,7 +245,7 @@ class VoxelGrid : public Geometry3D { public: /// Size of the voxel. double voxel_size_ = 0.0; - /// Coorindate of the origin point. + /// Coordinate of the origin point. Eigen::Vector3d origin_ = Eigen::Vector3d::Zero(); /// Voxels contained in voxel grid std::unordered_map VoxelGrid::CreateFromTriangleMeshWithinBounds( box_center, box_half_size, v0, v1, v2)) { Eigen::Vector3i grid_index(widx, hidx, didx); output->AddVoxel(geometry::Voxel(grid_index)); - break; + // Don't `break` here, since a triangle can span + // across multiple voxels. } } } diff --git a/cpp/open3d/io/file_format/FileASSIMP.cpp b/cpp/open3d/io/file_format/FileASSIMP.cpp index dec40259566..e78d8be7718 100644 --- a/cpp/open3d/io/file_format/FileASSIMP.cpp +++ b/cpp/open3d/io/file_format/FileASSIMP.cpp @@ -71,6 +71,7 @@ void LoadTextures(const std::string& filename, if (mat->GetTextureCount(type) > 0) { aiString path; mat->GetTexture(type, 0, &path); + // If the texture is an embedded texture, use `GetEmbeddedTexture`. if (auto texture = scene->GetEmbeddedTexture(path.C_Str())) { if (texture->CheckFormat("png")) { @@ -91,13 +92,10 @@ void LoadTextures(const std::string& filename, if (image->HasData()) { img = image; } - } - - else { + } else { utility::LogWarning( "This format of image is not supported."); } - } // Else, build the path to it. else { @@ -122,7 +120,12 @@ void LoadTextures(const std::string& filename, } }; - texture_loader(aiTextureType_DIFFUSE, maps.albedo); + // Prefer BASE_COLOR texture as assimp now uses it for PBR workflows + if (mat->GetTextureCount(aiTextureType_BASE_COLOR) > 0) { + texture_loader(aiTextureType_BASE_COLOR, maps.albedo); + } else { + texture_loader(aiTextureType_DIFFUSE, maps.albedo); + } texture_loader(aiTextureType_NORMALS, maps.normal); // Assimp may place ambient occlusion texture in AMBIENT_OCCLUSION if // format has AO support. Prefer that texture if it is preset. Otherwise, @@ -237,12 +240,13 @@ bool ReadTriangleMeshUsingASSIMP( } // Now load the materials + mesh.materials_.resize(scene->mNumMaterials); for (size_t i = 0; i < scene->mNumMaterials; ++i) { auto* mat = scene->mMaterials[i]; - // create material structure to match this name - auto& mesh_material = - mesh.materials_[std::string(mat->GetName().C_Str())]; + // Set the material structure to match this name + auto& mesh_material = mesh.materials_[i].second; + mesh.materials_[i].first = mat->GetName().C_Str(); using MaterialParameter = geometry::TriangleMesh::Material::MaterialParameter; @@ -277,9 +281,9 @@ bool ReadTriangleMeshUsingASSIMP( // For legacy visualization support if (mesh_material.albedo) { - mesh.textures_.push_back(*mesh_material.albedo->FlipVertical()); + mesh.textures_.emplace_back(*mesh_material.albedo->FlipVertical()); } else { - mesh.textures_.push_back(geometry::Image()); + mesh.textures_.emplace_back(); } } @@ -414,7 +418,8 @@ bool ReadModelUsingAssimp(const std::string& filename, mat->Get(AI_MATKEY_SHEEN, o3d_mat.base_reflectance); mat->Get(AI_MATKEY_CLEARCOAT_THICKNESS, o3d_mat.base_clearcoat); - mat->Get(AI_MATKEY_CLEARCOAT_ROUGHNESS, + mat->Get(AI_MATKEY_CLEARCOAT_FACTOR, o3d_mat.base_clearcoat); + mat->Get(AI_MATKEY_CLEARCOAT_ROUGHNESS_FACTOR, o3d_mat.base_clearcoat_roughness); mat->Get(AI_MATKEY_ANISOTROPY, o3d_mat.base_anisotropy); aiString alpha_mode; diff --git a/cpp/open3d/io/file_format/FileOBJ.cpp b/cpp/open3d/io/file_format/FileOBJ.cpp index 3123d8ad05a..a06b30a97fb 100644 --- a/cpp/open3d/io/file_format/FileOBJ.cpp +++ b/cpp/open3d/io/file_format/FileOBJ.cpp @@ -140,8 +140,11 @@ bool ReadTriangleMeshFromOBJ(const std::string& filename, using MaterialParameter = geometry::TriangleMesh::Material::MaterialParameter; - for (auto& material : materials) { - auto& meshMaterial = mesh.materials_[material.name]; + mesh.materials_.resize(materials.size()); + for (std::size_t i = 0; i < materials.size(); ++i) { + auto& material = materials[i]; + mesh.materials_[i].first = material.name; + auto& meshMaterial = mesh.materials_[i].second; meshMaterial.baseColor = MaterialParameter::CreateRGB( material.diffuse[0], material.diffuse[1], material.diffuse[2]); diff --git a/cpp/open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh b/cpp/open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh index dc9b905f4d4..c40ae43874e 100644 --- a/cpp/open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh +++ b/cpp/open3d/ml/impl/continuous_conv/ContinuousConvBackpropFilter.cuh @@ -45,7 +45,7 @@ namespace impl { /// \param texture_alignment The texture alignment in bytes. This is used /// for allocating segments within the temporary memory. /// -/// \param filter_backrop Output array for the computed filter gradient +/// \param filter_backprop Output array for the computed filter gradient /// with shape [depth,height,width, inp channels, out channels] /// /// \param filter_dims The sizes of the filter dimensions. The size of diff --git a/cpp/open3d/ml/tensorflow/CMakeLists.txt b/cpp/open3d/ml/tensorflow/CMakeLists.txt index 756e76b07b5..cecd4152c49 100644 --- a/cpp/open3d/ml/tensorflow/CMakeLists.txt +++ b/cpp/open3d/ml/tensorflow/CMakeLists.txt @@ -136,6 +136,9 @@ set_target_properties(open3d_tf_ops PROPERTIES set_target_properties(open3d_tf_ops PROPERTIES PREFIX "") set_target_properties(open3d_tf_ops PROPERTIES DEBUG_POSTFIX "_debug") +# _GLIBCXX_USER_CXX11_ABI is set separately +list(REMOVE_ITEM Tensorflow_DEFINITIONS "_GLIBCXX_USE_CXX11_ABI=0" + "_GLIBCXX_USE_CXX11_ABI=1") target_compile_definitions(open3d_tf_ops PRIVATE "${Tensorflow_DEFINITIONS}") if (BUILD_CUDA_MODULE) diff --git a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvBackpropFilterOps.cpp b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvBackpropFilterOps.cpp index a5c9bd1ffc5..06a4df2b463 100644 --- a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvBackpropFilterOps.cpp +++ b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvBackpropFilterOps.cpp @@ -120,8 +120,8 @@ REGISTER_OP("Open3DContinuousConvBackpropFilter") if (c->ValueKnown(c->Dim(filters_shape, i))) { int64_t n = c->Value(c->Dim(filters_shape, i)); if (n < 1) - return Status(error::INVALID_ARGUMENT, - "Each filter dimension must be >= 1"); + return absl::InvalidArgumentError( + "Each filter dimension must be >= 1"); } } @@ -147,7 +147,7 @@ REGISTER_OP("Open3DContinuousConvBackpropFilter") } c->set_output(0, filters_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the backprop for the filter of the ContinuousConv diff --git a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvOps.cpp b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvOps.cpp index 549bcbc2e5e..a86f9dc7396 100644 --- a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvOps.cpp +++ b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvOps.cpp @@ -116,8 +116,8 @@ REGISTER_OP("Open3DContinuousConv") if (c->ValueKnown(c->Dim(filters_shape, i))) { int64_t n = c->Value(c->Dim(filters_shape, i)); if (n < 1) - return Status(error::INVALID_ARGUMENT, - "Each filter dimension must be >= 1"); + return absl::InvalidArgumentError( + "Each filter dimension must be >= 1"); } } @@ -139,7 +139,7 @@ REGISTER_OP("Open3DContinuousConv") c->MakeShape({output_first_dim, output_channel_dim}); c->set_output(0, output_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Continuous convolution of two pointclouds. diff --git a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeBackpropFilterOps.cpp b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeBackpropFilterOps.cpp index fe7dd41ca88..613224b562c 100644 --- a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeBackpropFilterOps.cpp +++ b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeBackpropFilterOps.cpp @@ -139,8 +139,8 @@ REGISTER_OP("Open3DContinuousConvTransposeBackpropFilter") if (c->ValueKnown(c->Dim(filters_shape, i))) { int64_t n = c->Value(c->Dim(filters_shape, i)); if (n < 1) - return Status(error::INVALID_ARGUMENT, - "Each filter dimension must be >= 1"); + return absl::InvalidArgumentError( + "Each filter dimension must be >= 1"); } } @@ -166,7 +166,7 @@ REGISTER_OP("Open3DContinuousConvTransposeBackpropFilter") } c->set_output(0, filters_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the backrop for the filter of the ContinuousConvTranspose diff --git a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeOps.cpp b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeOps.cpp index f91f2d86acf..b04c5b62afa 100644 --- a/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeOps.cpp +++ b/cpp/open3d/ml/tensorflow/continuous_conv/ContinuousConvTransposeOps.cpp @@ -145,8 +145,8 @@ REGISTER_OP("Open3DContinuousConvTranspose") if (c->ValueKnown(c->Dim(filters_shape, i))) { int64_t n = c->Value(c->Dim(filters_shape, i)); if (n < 1) - return Status(error::INVALID_ARGUMENT, - "Each filter dimension must be >= 1"); + return absl::InvalidArgumentError( + "Each filter dimension must be >= 1"); } } @@ -168,7 +168,7 @@ REGISTER_OP("Open3DContinuousConvTranspose") c->MakeShape({output_first_dim, output_channel_dim}); c->set_output(0, output_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Continuous tranpose convolution of two pointclouds. diff --git a/cpp/open3d/ml/tensorflow/misc/BuildSpatialHashTableOps.cpp b/cpp/open3d/ml/tensorflow/misc/BuildSpatialHashTableOps.cpp index 4d271d50830..ff5289e95ac 100644 --- a/cpp/open3d/ml/tensorflow/misc/BuildSpatialHashTableOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/BuildSpatialHashTableOps.cpp @@ -52,7 +52,7 @@ REGISTER_OP("Open3DBuildSpatialHashTable") hash_table_splits_shape = MakeShapeHandle(c, batch_size + 1); c->set_output(2, hash_table_splits_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Creates a spatial hash table meant as input for fixed_radius_search @@ -78,7 +78,7 @@ The following example shows how **build_spatial_hash_table** and radius = 1.0 - # build the spatial hash table for fixex_radius_search + # build the spatial hash table for fixed_radius_search table = ml3d.ops.build_spatial_hash_table(points, radius, points_row_splits=torch.LongTensor([0,5]), diff --git a/cpp/open3d/ml/tensorflow/misc/FixedRadiusSearchOps.cpp b/cpp/open3d/ml/tensorflow/misc/FixedRadiusSearchOps.cpp index a74ab8c752b..eeb2f02cffb 100644 --- a/cpp/open3d/ml/tensorflow/misc/FixedRadiusSearchOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/FixedRadiusSearchOps.cpp @@ -79,7 +79,7 @@ REGISTER_OP("Open3DFixedRadiusSearch") neighbors_distance_shape = c->MakeShape({0}); c->set_output(2, neighbors_distance_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the indices of all neighbors within a radius. diff --git a/cpp/open3d/ml/tensorflow/misc/InvertNeighborsListOps.cpp b/cpp/open3d/ml/tensorflow/misc/InvertNeighborsListOps.cpp index 5fb0a7f6457..cc2c935bbb8 100644 --- a/cpp/open3d/ml/tensorflow/misc/InvertNeighborsListOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/InvertNeighborsListOps.cpp @@ -58,7 +58,7 @@ REGISTER_OP("Open3DInvertNeighborsList") // the attributes will have the same shape c->set_output(2, inp_neighbors_attributes); - return Status::OK(); + return Status(); }) .Doc(R"doc( Inverts a neighbors list made of neighbors_index and neighbors_row_splits. diff --git a/cpp/open3d/ml/tensorflow/misc/KnnSearchOps.cpp b/cpp/open3d/ml/tensorflow/misc/KnnSearchOps.cpp index e68bcd53c50..112d022e28c 100644 --- a/cpp/open3d/ml/tensorflow/misc/KnnSearchOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/KnnSearchOps.cpp @@ -67,7 +67,7 @@ REGISTER_OP("Open3DKnnSearch") neighbors_distance_shape = c->MakeShape({0}); c->set_output(2, neighbors_distance_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the indices of k nearest neighbors. diff --git a/cpp/open3d/ml/tensorflow/misc/NmsOps.cpp b/cpp/open3d/ml/tensorflow/misc/NmsOps.cpp index 0b2ded2325f..b8341190ea1 100644 --- a/cpp/open3d/ml/tensorflow/misc/NmsOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/NmsOps.cpp @@ -34,7 +34,7 @@ REGISTER_OP("Open3DNms") keep_indices = c->MakeShape({c->UnknownDim()}); c->set_output(0, keep_indices); - return Status::OK(); + return Status(); }) .Doc(R"doc( Performs non-maximum suppression of bounding boxes. diff --git a/cpp/open3d/ml/tensorflow/misc/RadiusSearchOps.cpp b/cpp/open3d/ml/tensorflow/misc/RadiusSearchOps.cpp index 94d2e0d0097..3dd99b9e460 100644 --- a/cpp/open3d/ml/tensorflow/misc/RadiusSearchOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/RadiusSearchOps.cpp @@ -69,7 +69,7 @@ REGISTER_OP("Open3DRadiusSearch") neighbors_distance_shape = c->MakeShape({0}); c->set_output(2, neighbors_distance_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the indices and distances of all neighbours within a radius. diff --git a/cpp/open3d/ml/tensorflow/misc/ReduceSubarraysSumOps.cpp b/cpp/open3d/ml/tensorflow/misc/ReduceSubarraysSumOps.cpp index 14be37412c0..5226079e783 100644 --- a/cpp/open3d/ml/tensorflow/misc/ReduceSubarraysSumOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/ReduceSubarraysSumOps.cpp @@ -33,7 +33,7 @@ REGISTER_OP("Open3DReduceSubarraysSum") sums_shape = c->MakeShape({sums_size}); c->set_output(0, sums_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the sum for each subarray in a flat vector of arrays. diff --git a/cpp/open3d/ml/tensorflow/misc/VoxelPoolingOps.cpp b/cpp/open3d/ml/tensorflow/misc/VoxelPoolingOps.cpp index 64e88539db0..ec163ea415b 100644 --- a/cpp/open3d/ml/tensorflow/misc/VoxelPoolingOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/VoxelPoolingOps.cpp @@ -53,7 +53,7 @@ REGISTER_OP("Open3DVoxelPooling") c->WithValue(c->Dim(positions_shape, -1), 3, &d)); } - return Status::OK(); + return Status(); }) .Doc(R"doc( Spatial pooling for point clouds by combining points that fall into the same voxel bin. @@ -202,7 +202,7 @@ REGISTER_OP("Open3DVoxelPoolingGrad") c->Dim(pooled_positions_shape, -1), 3, &d)); } - return Status::OK(); + return Status(); }) .Doc(R"doc( Gradient for features in VoxelPooling. For internal use only. diff --git a/cpp/open3d/ml/tensorflow/misc/VoxelizeOps.cpp b/cpp/open3d/ml/tensorflow/misc/VoxelizeOps.cpp index e9eb3c19f63..7c345edfd3f 100644 --- a/cpp/open3d/ml/tensorflow/misc/VoxelizeOps.cpp +++ b/cpp/open3d/ml/tensorflow/misc/VoxelizeOps.cpp @@ -58,7 +58,7 @@ REGISTER_OP("Open3DVoxelize") voxel_batch_splits = c->MakeShape({c->UnknownDim()}); c->set_output(3, voxel_batch_splits); - return Status::OK(); + return Status(); }) .Doc(R"doc( Voxelization for point clouds. diff --git a/cpp/open3d/ml/tensorflow/pointnet/BallQueryOps.cpp b/cpp/open3d/ml/tensorflow/pointnet/BallQueryOps.cpp index 2b0c863e279..07d7a03d34f 100644 --- a/cpp/open3d/ml/tensorflow/pointnet/BallQueryOps.cpp +++ b/cpp/open3d/ml/tensorflow/pointnet/BallQueryOps.cpp @@ -32,6 +32,6 @@ REGISTER_OP("Open3DBallQuery") ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims1, 1), nsample}); c->set_output(0, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); diff --git a/cpp/open3d/ml/tensorflow/pointnet/InterpolateOps.cpp b/cpp/open3d/ml/tensorflow/pointnet/InterpolateOps.cpp index 18363a9e265..f16e9020609 100644 --- a/cpp/open3d/ml/tensorflow/pointnet/InterpolateOps.cpp +++ b/cpp/open3d/ml/tensorflow/pointnet/InterpolateOps.cpp @@ -30,7 +30,7 @@ REGISTER_OP("Open3DThreeNN") c->MakeShape({c->Dim(dims1, 0), c->Dim(dims1, 1), 3}); c->set_output(0, output); c->set_output(1, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); @@ -50,7 +50,7 @@ REGISTER_OP("Open3DThreeInterpolate") ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape( {c->Dim(dims1, 0), c->Dim(dims1, 1), c->Dim(dims2, 1)}); c->set_output(0, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); @@ -70,6 +70,6 @@ REGISTER_OP("Open3DThreeInterpolateGrad") c->MakeShape({c->Dim(dims1, 0), c->Dim(dims1, 1), M}); c->set_output(0, output); c->set_output(1, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); diff --git a/cpp/open3d/ml/tensorflow/pointnet/RoiPoolOps.cpp b/cpp/open3d/ml/tensorflow/pointnet/RoiPoolOps.cpp index 10b72a5b4b2..8a72b3b0027 100644 --- a/cpp/open3d/ml/tensorflow/pointnet/RoiPoolOps.cpp +++ b/cpp/open3d/ml/tensorflow/pointnet/RoiPoolOps.cpp @@ -42,6 +42,6 @@ REGISTER_OP("Open3DRoiPool") ::tensorflow::shape_inference::ShapeHandle output2 = c->MakeShape({c->Dim(dims1, 0), c->Dim(dims1, 1)}); c->set_output(1, output2); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); diff --git a/cpp/open3d/ml/tensorflow/pointnet/SamplingOps.cpp b/cpp/open3d/ml/tensorflow/pointnet/SamplingOps.cpp index 1c1cad2f3e2..b33a636d3f1 100644 --- a/cpp/open3d/ml/tensorflow/pointnet/SamplingOps.cpp +++ b/cpp/open3d/ml/tensorflow/pointnet/SamplingOps.cpp @@ -30,6 +30,6 @@ REGISTER_OP("Open3DFurthestPointSampling") ::tensorflow::shape_inference::ShapeHandle output = c->MakeShape({c->Dim(dims1, 0), npoint}); c->set_output(0, output); - return Status::OK(); + return Status(); }) .Doc(R"doc( TODO )doc"); diff --git a/cpp/open3d/ml/tensorflow/pvcnn/TrilinearDevoxelizeOps.cpp b/cpp/open3d/ml/tensorflow/pvcnn/TrilinearDevoxelizeOps.cpp index edf77e60268..7097f5f3287 100644 --- a/cpp/open3d/ml/tensorflow/pvcnn/TrilinearDevoxelizeOps.cpp +++ b/cpp/open3d/ml/tensorflow/pvcnn/TrilinearDevoxelizeOps.cpp @@ -51,7 +51,7 @@ REGISTER_OP("Open3DTrilinearDevoxelize") c->set_output(1, out2); c->set_output(2, out2); - return Status::OK(); + return Status(); }) .Doc(R"doc( Trilinear Devoxelize. @@ -167,7 +167,7 @@ REGISTER_OP("Open3DTrilinearDevoxelizeGrad") c->set_output(0, out); - return Status::OK(); + return Status(); }) .Doc(R"doc( Gradient function for Trilinear Devoxelize op. diff --git a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvBackpropFilterOps.cpp b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvBackpropFilterOps.cpp index 70916caf16e..5cc83701568 100644 --- a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvBackpropFilterOps.cpp +++ b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvBackpropFilterOps.cpp @@ -66,7 +66,7 @@ REGISTER_OP("Open3DSparseConvBackpropFilter") CHECK_SHAPE_HANDLE(c, out_features_gradient, num_out, out_channels); c->set_output(0, filters); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the backprop for the filter of the SparseConv diff --git a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvOps.cpp b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvOps.cpp index 2dd55fd3551..264a443ef25 100644 --- a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvOps.cpp +++ b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvOps.cpp @@ -66,7 +66,7 @@ REGISTER_OP("Open3DSparseConv") ShapeHandle out_features_shape = MakeShapeHandle(c, num_out, out_channels); c->set_output(0, out_features_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( General sparse convolution. diff --git a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeBackpropFilterOps.cpp b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeBackpropFilterOps.cpp index a2482d19eab..af9741ad39e 100644 --- a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeBackpropFilterOps.cpp +++ b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeBackpropFilterOps.cpp @@ -71,7 +71,7 @@ REGISTER_OP("Open3DSparseConvTransposeBackpropFilter") CHECK_SHAPE_HANDLE(c, out_features_gradient, num_out, out_channels); c->set_output(0, filters); - return Status::OK(); + return Status(); }) .Doc(R"doc( Computes the backrop for the filter of the SparseConvTranspose diff --git a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeOps.cpp b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeOps.cpp index 2b5035521b2..a1daf49a16c 100644 --- a/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeOps.cpp +++ b/cpp/open3d/ml/tensorflow/sparse_conv/SparseConvTransposeOps.cpp @@ -75,7 +75,7 @@ REGISTER_OP("Open3DSparseConvTranspose") ShapeHandle out_features_shape = MakeShapeHandle(c, num_out, out_channels); c->set_output(0, out_features_shape); - return Status::OK(); + return Status(); }) .Doc(R"doc( Sparse tranpose convolution of two pointclouds. diff --git a/cpp/open3d/ml/tensorflow/tf_subsampling/tf_batch_subsampling.cpp b/cpp/open3d/ml/tensorflow/tf_subsampling/tf_batch_subsampling.cpp index 3425a436146..74472a667f4 100644 --- a/cpp/open3d/ml/tensorflow/tf_subsampling/tf_batch_subsampling.cpp +++ b/cpp/open3d/ml/tensorflow/tf_subsampling/tf_batch_subsampling.cpp @@ -47,7 +47,7 @@ REGISTER_OP("Open3DBatchGridSubsampling") TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input0_shape)); c->set_output(0, input0_shape); c->set_output(1, c->input(1)); - return Status::OK(); + return Status(); }); class BatchGridSubsamplingOp : public OpKernel { diff --git a/cpp/open3d/ml/tensorflow/tf_subsampling/tf_subsampling.cpp b/cpp/open3d/ml/tensorflow/tf_subsampling/tf_subsampling.cpp index 31b2d51571c..fd7162966f8 100644 --- a/cpp/open3d/ml/tensorflow/tf_subsampling/tf_subsampling.cpp +++ b/cpp/open3d/ml/tensorflow/tf_subsampling/tf_subsampling.cpp @@ -44,7 +44,7 @@ REGISTER_OP("Open3DGridSubsampling") ::tensorflow::shape_inference::ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input)); c->set_output(0, input); - return Status::OK(); + return Status(); }); class GridSubsamplingOp : public OpKernel { diff --git a/cpp/open3d/pipelines/registration/Feature.cpp b/cpp/open3d/pipelines/registration/Feature.cpp index 531844a4ce1..2815159dcc4 100644 --- a/cpp/open3d/pipelines/registration/Feature.cpp +++ b/cpp/open3d/pipelines/registration/Feature.cpp @@ -139,6 +139,69 @@ std::shared_ptr ComputeFPFHFeature( return feature; } +CorrespondenceSet CorrespondencesFromFeatures(const Feature &source_features, + const Feature &target_features, + bool mutual_filter, + float mutual_consistent_ratio) { + const int num_searches = mutual_filter ? 2 : 1; + + // Access by reference, since Eigen Matrix could be copied + std::array, 2> features{ + std::reference_wrapper(source_features), + std::reference_wrapper(target_features)}; + std::array num_pts{int(source_features.data_.cols()), + int(target_features.data_.cols())}; + std::vector corres(num_searches); + + const int kMaxThreads = utility::EstimateMaxThreads(); + const int kOuterThreads = std::min(kMaxThreads, num_searches); + const int kInnerThreads = std::max(kMaxThreads / num_searches, 1); + (void)kOuterThreads; // Avoids compiler warning if OpenMP is disabled + (void)kInnerThreads; +#pragma omp parallel for num_threads(kOuterThreads) + for (int k = 0; k < num_searches; ++k) { + geometry::KDTreeFlann kdtree(features[1 - k]); + + int num_pts_k = num_pts[k]; + corres[k] = CorrespondenceSet(num_pts_k); +#pragma omp parallel for num_threads(kInnerThreads) + for (int i = 0; i < num_pts_k; i++) { + std::vector corres_tmp(1); + std::vector dist_tmp(1); + + kdtree.SearchKNN(Eigen::VectorXd(features[k].get().data_.col(i)), 1, + corres_tmp, dist_tmp); + int j = corres_tmp[0]; + corres[k][i] = Eigen::Vector2i(i, j); + } + } + + // corres[0]: corres_ij, corres[1]: corres_ji + if (!mutual_filter) return corres[0]; + + // should not use parallel for due to emplace back + CorrespondenceSet corres_mutual; + int num_src_pts = num_pts[0]; + for (int i = 0; i < num_src_pts; ++i) { + int j = corres[0][i](1); + if (corres[1][j](1) == i) { + corres_mutual.emplace_back(i, j); + } + } + + // Empirically mutual correspondence set should not be too small + if (int(corres_mutual.size()) >= + int(mutual_consistent_ratio * num_src_pts)) { + utility::LogDebug("{:d} correspondences remain after mutual filter", + corres_mutual.size()); + return corres_mutual; + } + utility::LogWarning( + "Too few correspondences ({:d}) after mutual filter, fall back to " + "original correspondences.", + corres_mutual.size()); + return corres[0]; +} } // namespace registration } // namespace pipelines } // namespace open3d diff --git a/cpp/open3d/pipelines/registration/Feature.h b/cpp/open3d/pipelines/registration/Feature.h index 540e682c257..e102883b160 100644 --- a/cpp/open3d/pipelines/registration/Feature.h +++ b/cpp/open3d/pipelines/registration/Feature.h @@ -22,6 +22,8 @@ class PointCloud; namespace pipelines { namespace registration { +typedef std::vector CorrespondenceSet; + /// \class Feature /// /// \brief Class to store featrues for registration. @@ -54,6 +56,28 @@ std::shared_ptr ComputeFPFHFeature( const geometry::KDTreeSearchParam &search_param = geometry::KDTreeSearchParamKNN()); +/// \brief Function to find correspondences via 1-nearest neighbor feature +/// matching. Target is used to construct a nearest neighbor search +/// object, in order to query source. +/// \param source_features (D, N) feature +/// \param target_features (D, M) feature +/// \param mutual_filter Boolean flag, only return correspondences (i, j) s.t. +/// source_features[i] and target_features[j] are mutually the nearest neighbor. +/// \param mutual_consistency_ratio Float threshold to decide whether the number +/// of correspondences is sufficient. Only used when mutual_filter is set to +/// True. +/// \return A CorrespondenceSet. When mutual_filter is disabled: the first +/// column is arange(0, N) of source, and the second column is the corresponding +/// index of target. When mutual_filter is enabled, return the filtering subset +/// of the aforementioned correspondence set where source[i] and target[j] are +/// mutually the nearest neighbor. If the subset size is smaller than +/// mutual_consistency_ratio * N, return the unfiltered set. +CorrespondenceSet CorrespondencesFromFeatures( + const Feature &source_features, + const Feature &target_features, + bool mutual_filter = false, + float mutual_consistency_ratio = 0.1); + } // namespace registration } // namespace pipelines } // namespace open3d diff --git a/cpp/open3d/pipelines/registration/Registration.cpp b/cpp/open3d/pipelines/registration/Registration.cpp index 2a9aa9c142b..8527d620bf0 100644 --- a/cpp/open3d/pipelines/registration/Registration.cpp +++ b/cpp/open3d/pipelines/registration/Registration.cpp @@ -280,8 +280,8 @@ RegistrationResult RegistrationRANSACBasedOnCorrespondence( RegistrationResult RegistrationRANSACBasedOnFeatureMatching( const geometry::PointCloud &source, const geometry::PointCloud &target, - const Feature &source_feature, - const Feature &target_feature, + const Feature &source_features, + const Feature &target_features, bool mutual_filter, double max_correspondence_distance, const TransformationEstimation @@ -295,62 +295,11 @@ RegistrationResult RegistrationRANSACBasedOnFeatureMatching( return RegistrationResult(); } - int num_src_pts = int(source.points_.size()); - int num_tgt_pts = int(target.points_.size()); - - geometry::KDTreeFlann kdtree_target(target_feature); - pipelines::registration::CorrespondenceSet corres_ij(num_src_pts); - -#pragma omp parallel for num_threads(utility::EstimateMaxThreads()) - for (int i = 0; i < num_src_pts; i++) { - std::vector corres_tmp(1); - std::vector dist_tmp(1); - - kdtree_target.SearchKNN(Eigen::VectorXd(source_feature.data_.col(i)), 1, - corres_tmp, dist_tmp); - int j = corres_tmp[0]; - corres_ij[i] = Eigen::Vector2i(i, j); - } - - // Do reverse check if mutual_filter is enabled - if (mutual_filter) { - geometry::KDTreeFlann kdtree_source(source_feature); - pipelines::registration::CorrespondenceSet corres_ji(num_tgt_pts); - -#pragma omp parallel for num_threads(utility::EstimateMaxThreads()) - for (int j = 0; j < num_tgt_pts; ++j) { - std::vector corres_tmp(1); - std::vector dist_tmp(1); - kdtree_source.SearchKNN( - Eigen::VectorXd(target_feature.data_.col(j)), 1, corres_tmp, - dist_tmp); - int i = corres_tmp[0]; - corres_ji[j] = Eigen::Vector2i(i, j); - } - - pipelines::registration::CorrespondenceSet corres_mutual; - for (int i = 0; i < num_src_pts; ++i) { - int j = corres_ij[i](1); - if (corres_ji[j](0) == i) { - corres_mutual.emplace_back(i, j); - } - } - - // Empirically mutual correspondence set should not be too small - if (int(corres_mutual.size()) >= ransac_n * 3) { - utility::LogDebug("{:d} correspondences remain after mutual filter", - corres_mutual.size()); - return RegistrationRANSACBasedOnCorrespondence( - source, target, corres_mutual, max_correspondence_distance, - estimation, ransac_n, checkers, criteria); - } - utility::LogDebug( - "Too few correspondences after mutual filter, fall back to " - "original correspondences."); - } + CorrespondenceSet corres = CorrespondencesFromFeatures( + source_features, target_features, mutual_filter); return RegistrationRANSACBasedOnCorrespondence( - source, target, corres_ij, max_correspondence_distance, estimation, + source, target, corres, max_correspondence_distance, estimation, ransac_n, checkers, criteria); } diff --git a/cpp/open3d/pipelines/registration/Registration.h b/cpp/open3d/pipelines/registration/Registration.h index 454f2b3d807..276ef6b64e2 100644 --- a/cpp/open3d/pipelines/registration/Registration.h +++ b/cpp/open3d/pipelines/registration/Registration.h @@ -185,8 +185,8 @@ RegistrationResult RegistrationRANSACBasedOnCorrespondence( /// /// \param source The source point cloud. /// \param target The target point cloud. -/// \param source_feature Source point cloud feature. -/// \param target_feature Target point cloud feature. +/// \param source_features Source point cloud feature. +/// \param target_features Target point cloud feature. /// \param mutual_filter Enables mutual filter such that the correspondence of /// the source point's correspondence is itself. /// \param max_correspondence_distance Maximum correspondence points-pair @@ -197,8 +197,8 @@ RegistrationResult RegistrationRANSACBasedOnCorrespondence( RegistrationResult RegistrationRANSACBasedOnFeatureMatching( const geometry::PointCloud &source, const geometry::PointCloud &target, - const Feature &source_feature, - const Feature &target_feature, + const Feature &source_features, + const Feature &target_features, bool mutual_filter, double max_correspondence_distance, const TransformationEstimation &estimation = diff --git a/cpp/open3d/t/geometry/BoundingVolume.cpp b/cpp/open3d/t/geometry/BoundingVolume.cpp index 8e437bd43b5..6a221502efe 100644 --- a/cpp/open3d/t/geometry/BoundingVolume.cpp +++ b/cpp/open3d/t/geometry/BoundingVolume.cpp @@ -42,9 +42,12 @@ AxisAlignedBoundingBox::AxisAlignedBoundingBox(const core::Tensor &min_bound, // Check if the bounding box is valid. if (Volume() < 0) { - utility::LogError( - "Invalid axis-aligned bounding box. Please make sure all " - "the elements in max bound are larger than min bound."); + utility::LogWarning( + "max_bound {} of bounding box is smaller than min_bound {} in " + "one or more axes. Fix input values to remove this warning.", + max_bound_.ToString(false), min_bound_.ToString(false)); + max_bound_ = open3d::core::Maximum(min_bound, max_bound); + min_bound_ = open3d::core::Minimum(min_bound, max_bound); } } @@ -80,7 +83,7 @@ void AxisAlignedBoundingBox::SetMinBound(const core::Tensor &min_bound) { if (Volume() < 0) { utility::LogWarning( "Invalid axis-aligned bounding box. Please make sure all " - "the elements in min bound are smaller than min bound."); + "the elements in min bound are smaller than max bound."); min_bound_ = tmp; } } @@ -110,8 +113,8 @@ void AxisAlignedBoundingBox::SetColor(const core::Tensor &color) { if (color.Max({0}).To(core::Float64).Item() > 1.0 || color.Min({0}).To(core::Float64).Item() < 0.0) { utility::LogError( - "The color must be in the range [0, 1], but for range [{}, " - "{}].", + "The color must be in the range [0, 1], but found in range " + "[{}, {}].", color.Min({0}).To(core::Float64).Item(), color.Max({0}).To(core::Float64).Item()); } @@ -220,7 +223,9 @@ core::Tensor AxisAlignedBoundingBox::GetPointIndicesWithinBoundingBox( } std::string AxisAlignedBoundingBox::ToString() const { - return fmt::format("AxisAlignedBoundingBox[{}, {}]", GetDtype().ToString(), + return fmt::format("AxisAlignedBoundingBox[{} - {}, {}, {}]", + GetMinBound().ToString(false), + GetMaxBound().ToString(false), GetDtype().ToString(), GetDevice().ToString()); } @@ -228,8 +233,10 @@ AxisAlignedBoundingBox AxisAlignedBoundingBox::CreateFromPoints( const core::Tensor &points) { core::AssertTensorShape(points, {utility::nullopt, 3}); core::AssertTensorDtypes(points, {core::Float32, core::Float64}); - if (points.GetLength() <= 3) { - utility::LogWarning("The points number is less than 3."); + if (points.GetLength() <= 0) { + utility::LogWarning( + "The number of points is 0 when creating axis-aligned bounding " + "box."); return AxisAlignedBoundingBox(points.GetDevice()); } else { const core::Tensor min_bound = points.Min({0}); @@ -318,7 +325,7 @@ OrientedBoundingBox::OrientedBoundingBox(const core::Tensor ¢er, utility::LogError( "Invalid oriented bounding box. Please make sure the values of " "extent are all positive and the rotation matrix is " - "othogonal."); + "orthogonal."); } } @@ -385,8 +392,8 @@ void OrientedBoundingBox::SetColor(const core::Tensor &color) { if (color.Max({0}).To(core::Float64).Item() > 1.0 || color.Min({0}).To(core::Float64).Item() < 0.0) { utility::LogError( - "The color must be in the range [0, 1], but for range [{}, " - "{}].", + "The color must be in the range [0, 1], but found in range " + "[{}, {}].", color.Min({0}).To(core::Float64).Item(), color.Max({0}).To(core::Float64).Item()); } @@ -405,7 +412,7 @@ core::Tensor OrientedBoundingBox::GetMaxBound() const { core::Tensor OrientedBoundingBox::GetBoxPoints() const { const t::geometry::AxisAlignedBoundingBox aabb(GetExtent() * -0.5, GetExtent() * 0.5); - return aabb.GetBoxPoints().Matmul(GetRotation()).Add(GetCenter()); + return aabb.GetBoxPoints().Matmul(GetRotation().T()).Add(GetCenter()); } OrientedBoundingBox &OrientedBoundingBox::Translate( diff --git a/cpp/open3d/t/geometry/BoundingVolume.h b/cpp/open3d/t/geometry/BoundingVolume.h index 1eec772e898..b11cae01777 100644 --- a/cpp/open3d/t/geometry/BoundingVolume.h +++ b/cpp/open3d/t/geometry/BoundingVolume.h @@ -93,12 +93,12 @@ class AxisAlignedBoundingBox : public Geometry, public DrawableGeometry { /// \param min_bound Tensor with {3,} shape, and type float32 or float64. void SetMinBound(const core::Tensor &min_bound); - /// \brief Set the max boundof the box. + /// \brief Set the max bound of the box. /// If the data type of the given tensor differs from the data type of the /// box, an exception will be thrown. /// /// If the max bound makes the box invalid, it will not be set to the box. - /// \param min_bound Tensor with {3,} shape, and type float32 or float64. + /// \param max_bound Tensor with {3,} shape, and type float32 or float64. void SetMaxBound(const core::Tensor &max_bound); /// \brief Set the color of the box. @@ -146,7 +146,7 @@ class AxisAlignedBoundingBox : public Geometry, public DrawableGeometry { const utility::optional ¢er = utility::nullopt); /// \brief Add operation for axis-aligned bounding box. - /// The device of ohter box must be the same as the device of the current + /// The device of other box must be the same as the device of the current /// box. AxisAlignedBoundingBox &operator+=(const AxisAlignedBoundingBox &other); @@ -156,16 +156,22 @@ class AxisAlignedBoundingBox : public Geometry, public DrawableGeometry { /// Returns the half extent of the bounding box. core::Tensor GetHalfExtent() const { return GetExtent() / 2; } - /// Returns the maximum extent, i.e. the maximum of X, Y and Z axis' + /// \brief Returns the maximum extent, i.e. the maximum of X, Y and Z axis' /// extents. double GetMaxExtent() const { return GetExtent().Max({0}).To(core::Float64).Item(); } + /// Calculates the percentage position of the given x-coordinate within + /// the x-axis range of this AxisAlignedBoundingBox. double GetXPercentage(double x) const; + /// Calculates the percentage position of the given y-coordinate within + /// the y-axis range of this AxisAlignedBoundingBox. double GetYPercentage(double y) const; + /// Calculates the percentage position of the given z-coordinate within + /// the z-axis range of this AxisAlignedBoundingBox. double GetZPercentage(double z) const; /// Returns the volume of the bounding box. @@ -173,8 +179,9 @@ class AxisAlignedBoundingBox : public Geometry, public DrawableGeometry { return GetExtent().Prod({0}).To(core::Float64).Item(); } - /// Returns the eight points that define the bounding box. The Return tensor - /// has shape {8, 3} and data type same as the box. + /// \brief Returns the eight points that define the bounding box. + /// + /// The Return tensor has shape {8, 3} and data type same as the box. core::Tensor GetBoxPoints() const; /// \brief Indices to points that are within the bounding box. @@ -206,16 +213,21 @@ class AxisAlignedBoundingBox : public Geometry, public DrawableGeometry { /// Creates the axis-aligned box that encloses the set of points. /// \param points A list of points with data type of float32 or float64 (N x - /// 3 tensor, where N must be larger than 3). + /// 3 tensor). /// \return AxisAlignedBoundingBox with same data type and device as input /// points. static AxisAlignedBoundingBox CreateFromPoints(const core::Tensor &points); protected: + /// The device to use for the bounding box. The default is CPU:0. core::Device device_ = core::Device("CPU:0"); + /// The data type of the bounding box. core::Dtype dtype_ = core::Float32; + /// The lower x, y, z bounds of the bounding box. core::Tensor min_bound_; + /// The upper x, y, z bounds of the bounding box. core::Tensor max_bound_; + /// The color of the bounding box in RGB. The default is white. core::Tensor color_; }; @@ -223,7 +235,7 @@ class AxisAlignedBoundingBox : public Geometry, public DrawableGeometry { /// \brief A bounding box oriented along an arbitrary frame of reference. /// /// - (center, rotation, extent): The oriented bounding box is defined by its -/// center position, rotation maxtrix and extent. +/// center position, rotation matrix and extent. /// - Usage /// - OrientedBoundingBox::GetCenter() /// - OrientedBoundingBox::SetCenter(const core::Tensor ¢er) @@ -373,8 +385,9 @@ class OrientedBoundingBox : public Geometry, public DrawableGeometry { return GetExtent().Prod({0}).To(core::Float64).Item(); } - /// Returns the eight points that define the bounding box. The Return tensor - /// has shape {8, 3} and data type same as the box. + /// \brief Returns the eight points that define the bounding box. + /// + /// The Return tensor has shape {8, 3} and data type same as the box. /// /// \verbatim /// ------- x diff --git a/cpp/open3d/t/geometry/PointCloud.cpp b/cpp/open3d/t/geometry/PointCloud.cpp index 6ec721ffa4a..f4c5b47f068 100644 --- a/cpp/open3d/t/geometry/PointCloud.cpp +++ b/cpp/open3d/t/geometry/PointCloud.cpp @@ -275,30 +275,66 @@ PointCloud PointCloud::SelectByIndex( return pcd; } -PointCloud PointCloud::VoxelDownSample( - double voxel_size, const core::HashBackendType &backend) const { +PointCloud PointCloud::VoxelDownSample(double voxel_size, + const std::string &reduction) const { if (voxel_size <= 0) { utility::LogError("voxel_size must be positive."); } - core::Tensor points_voxeld = GetPointPositions() / voxel_size; - core::Tensor points_voxeli = points_voxeld.Floor().To(core::Int64); - - core::HashSet points_voxeli_hashset(points_voxeli.GetLength(), core::Int64, - {3}, device_, backend); - - core::Tensor buf_indices, masks; - points_voxeli_hashset.Insert(points_voxeli, buf_indices, masks); + if (reduction != "mean") { + utility::LogError("Reduction can only be 'mean' for VoxelDownSample."); + } - PointCloud pcd_down(GetPointPositions().GetDevice()); + // Discretize voxels. + core::Tensor voxeld = GetPointPositions() / voxel_size; + core::Tensor voxeli = voxeld.Floor().To(core::Int64); + + // Map discrete voxels to indices. + core::HashSet voxeli_hashset(voxeli.GetLength(), core::Int64, {3}, device_); + + // Index map: (0, original_points) -> (0, unique_points). + core::Tensor index_map_point2voxel, masks; + voxeli_hashset.Insert(voxeli, index_map_point2voxel, masks); + + // Insert and find are two different passes. + // In the insertion pass, -1/false is returned for already existing + // downsampled corresponding points. + // In the find pass, actual indices are returned corresponding downsampled + // points. + voxeli_hashset.Find(voxeli, index_map_point2voxel, masks); + index_map_point2voxel = index_map_point2voxel.To(core::Int64); + + int64_t num_points = voxeli.GetLength(); + int64_t num_voxels = voxeli_hashset.Size(); + + // Count the number of points in each voxel. + auto voxel_num_points = + core::Tensor::Zeros({num_voxels}, core::Float32, device_); + voxel_num_points.IndexAdd_( + /*dim*/ 0, index_map_point2voxel, + core::Tensor::Ones({num_points}, core::Float32, device_)); + + // Create a new point cloud. + PointCloud pcd_down(device_); for (auto &kv : point_attr_) { - if (kv.first == "positions") { - pcd_down.SetPointAttr(kv.first, - points_voxeli.IndexGet({masks}).To( - GetPointPositions().GetDtype()) * - voxel_size); + auto point_attr = kv.second; + + std::string attr_string = kv.first; + auto attr_dtype = point_attr.GetDtype(); + + // Use float to avoid unsupported tensor types. + core::SizeVector attr_shape = point_attr.GetShape(); + attr_shape[0] = num_voxels; + auto voxel_attr = + core::Tensor::Zeros(attr_shape, core::Float32, device_); + if (reduction == "mean") { + voxel_attr.IndexAdd_(0, index_map_point2voxel, + point_attr.To(core::Float32)); + voxel_attr /= voxel_num_points.View({-1, 1}); + voxel_attr = voxel_attr.To(attr_dtype); } else { - pcd_down.SetPointAttr(kv.first, kv.second.IndexGet({masks})); + utility::LogError("Unsupported reduction type {}.", reduction); } + pcd_down.SetPointAttr(attr_string, voxel_attr); } return pcd_down; @@ -695,12 +731,15 @@ void PointCloud::OrientNormalsTowardsCameraLocation( } } -void PointCloud::OrientNormalsConsistentTangentPlane(size_t k) { +void PointCloud::OrientNormalsConsistentTangentPlane( + size_t k, + const double lambda /* = 0.0*/, + const double cos_alpha_tol /* = 1.0*/) { PointCloud tpcd(GetPointPositions()); tpcd.SetPointNormals(GetPointNormals()); open3d::geometry::PointCloud lpcd = tpcd.ToLegacy(); - lpcd.OrientNormalsConsistentTangentPlane(k); + lpcd.OrientNormalsConsistentTangentPlane(k, lambda, cos_alpha_tol); SetPointNormals(core::eigen_converter::EigenVector3dVectorToTensor( lpcd.normals_, GetPointPositions().GetDtype(), GetDevice())); diff --git a/cpp/open3d/t/geometry/PointCloud.h b/cpp/open3d/t/geometry/PointCloud.h index 68827fa489c..995e1b5ba50 100644 --- a/cpp/open3d/t/geometry/PointCloud.h +++ b/cpp/open3d/t/geometry/PointCloud.h @@ -329,9 +329,9 @@ class PointCloud : public Geometry, public DrawableGeometry { /// \brief Downsamples a point cloud with a specified voxel size. /// /// \param voxel_size Voxel size. A positive number. + /// \param reduction Reduction type. Currently only support "mean". PointCloud VoxelDownSample(double voxel_size, - const core::HashBackendType &backend = - core::HashBackendType::Default) const; + const std::string &reduction = "mean") const; /// \brief Downsamples a point cloud by selecting every kth index point and /// its attributes. @@ -517,10 +517,18 @@ class PointCloud : public Geometry, public DrawableGeometry { /// \brief Function to consistently orient estimated normals based on /// consistent tangent planes as described in Hoppe et al., "Surface /// Reconstruction from Unorganized Points", 1992. + /// Further details on parameters are described in + /// Piazza, Valentini, Varetti, "Mesh Reconstruction from Point Cloud", + /// 2023. /// /// \param k k nearest neighbour for graph reconstruction for normal /// propagation. - void OrientNormalsConsistentTangentPlane(size_t k); + /// \param lambda penalty constant on the distance of a point from the + /// tangent plane \param cos_alpha_tol treshold that defines the amplitude + /// of the cone spanned by the reference normal + void OrientNormalsConsistentTangentPlane(size_t k, + const double lambda = 0.0, + const double cos_alpha_tol = 1.0); /// \brief Function to compute point color gradients. If radius is provided, /// then HybridSearch is used, otherwise KNN-Search is used. diff --git a/cpp/open3d/t/geometry/RaycastingScene.cpp b/cpp/open3d/t/geometry/RaycastingScene.cpp index ce7982baac6..af7dd3f1480 100644 --- a/cpp/open3d/t/geometry/RaycastingScene.cpp +++ b/cpp/open3d/t/geometry/RaycastingScene.cpp @@ -110,6 +110,75 @@ void CountIntersectionsFunc(const RTCFilterFunctionNArguments* args) { } } +struct ListIntersectionsContext { + RTCIntersectContext context; + std::vector>* + previous_geom_prim_ID_tfar; + unsigned int* ray_ids; + unsigned int* geometry_ids; + unsigned int* primitive_ids; + float* primitive_uvs; + float* t_hit; + Eigen::VectorXi cumsum; + unsigned int* track_intersections; +}; + +void ListIntersectionsFunc(const RTCFilterFunctionNArguments* args) { + int* valid = args->valid; + const ListIntersectionsContext* context = + reinterpret_cast(args->context); + struct RTCRayN* rayN = args->ray; + struct RTCHitN* hitN = args->hit; + const unsigned int N = args->N; + + // Avoid crashing when debug visualizations are used. + if (context == nullptr) return; + + std::vector>* + previous_geom_prim_ID_tfar = context->previous_geom_prim_ID_tfar; + unsigned int* ray_ids = context->ray_ids; + unsigned int* geometry_ids = context->geometry_ids; + unsigned int* primitive_ids = context->primitive_ids; + float* primitive_uvs = context->primitive_uvs; + float* t_hit = context->t_hit; + Eigen::VectorXi cumsum = context->cumsum; + unsigned int* track_intersections = context->track_intersections; + + // Iterate over all rays in ray packet. + for (unsigned int ui = 0; ui < N; ui += 1) { + // Calculate loop and execution mask + unsigned int vi = ui + 0; + if (vi >= N) continue; + + // Ignore inactive rays. + if (valid[vi] != -1) continue; + + // Read ray/hit from ray structure. + RTCRay ray = rtcGetRayFromRayN(rayN, N, ui); + RTCHit hit = rtcGetHitFromHitN(hitN, N, ui); + + unsigned int ray_id = ray.id; + std::tuple gpID(hit.geomID, hit.primID, + ray.tfar); + auto& prev_gpIDtfar = previous_geom_prim_ID_tfar->operator[](ray_id); + if (std::get<0>(prev_gpIDtfar) != hit.geomID || + (std::get<1>(prev_gpIDtfar) != hit.primID && + std::get<2>(prev_gpIDtfar) != ray.tfar)) { + size_t idx = cumsum[ray_id] + track_intersections[ray_id]; + ray_ids[idx] = ray_id; + geometry_ids[idx] = hit.geomID; + primitive_ids[idx] = hit.primID; + primitive_uvs[idx * 2 + 0] = hit.u; + primitive_uvs[idx * 2 + 1] = hit.v; + t_hit[idx] = ray.tfar; + previous_geom_prim_ID_tfar->operator[](ray_id) = gpID; + ++(track_intersections[ray_id]); + } + // Always ignore hit + valid[ui] = 0; + } +} + // Adapted from common/math/closest_point.h inline Vec3fa closestPointTriangle(Vec3fa const& p, Vec3fa const& a, @@ -267,6 +336,19 @@ struct RaycastingScene::Impl { geometry_ptrs_; core::Device tensor_device_; // cpu + bool devprop_join_commit; + + void CommitScene() { + if (!scene_committed_) { + if (devprop_join_commit) { + rtcJoinCommitScene(scene_); + } else { + rtcCommitScene(scene_); + } + scene_committed_ = true; + } + } + template void CastRays(const float* const rays, const size_t num_rays, @@ -276,10 +358,7 @@ struct RaycastingScene::Impl { float* primitive_uvs, float* primitive_normals, const int nthreads) { - if (!scene_committed_) { - rtcCommitScene(scene_); - scene_committed_ = true; - } + CommitScene(); struct RTCIntersectContext context; rtcInitIntersectContext(&context); @@ -365,10 +444,7 @@ struct RaycastingScene::Impl { const float tfar, int8_t* occluded, const int nthreads) { - if (!scene_committed_) { - rtcCommitScene(scene_); - scene_committed_ = true; - } + CommitScene(); struct RTCIntersectContext context; rtcInitIntersectContext(&context); @@ -420,10 +496,7 @@ struct RaycastingScene::Impl { const size_t num_rays, int* intersections, const int nthreads) { - if (!scene_committed_) { - rtcCommitScene(scene_); - scene_committed_ = true; - } + CommitScene(); memset(intersections, 0, sizeof(int) * num_rays); @@ -478,6 +551,83 @@ struct RaycastingScene::Impl { } } + void ListIntersections(const float* const rays, + const size_t num_rays, + const size_t num_intersections, + const Eigen::VectorXi& cumsum, + unsigned int* track_intersections, + unsigned int* ray_ids, + unsigned int* geometry_ids, + unsigned int* primitive_ids, + float* primitive_uvs, + float* t_hit, + const int nthreads) { + CommitScene(); + + memset(track_intersections, 0, sizeof(uint32_t) * num_rays); + memset(ray_ids, 0, sizeof(uint32_t) * num_intersections); + memset(geometry_ids, 0, sizeof(uint32_t) * num_intersections); + memset(primitive_ids, 0, sizeof(uint32_t) * num_intersections); + memset(primitive_uvs, 0, sizeof(float) * num_intersections * 2); + memset(t_hit, 0, sizeof(float) * num_intersections); + + std::vector> + previous_geom_prim_ID_tfar( + num_rays, + std::make_tuple(uint32_t(RTC_INVALID_GEOMETRY_ID), + uint32_t(RTC_INVALID_GEOMETRY_ID), + 0.f)); + + ListIntersectionsContext context; + rtcInitIntersectContext(&context.context); + context.context.filter = ListIntersectionsFunc; + context.previous_geom_prim_ID_tfar = &previous_geom_prim_ID_tfar; + context.ray_ids = ray_ids; + context.geometry_ids = geometry_ids; + context.primitive_ids = primitive_ids; + context.primitive_uvs = primitive_uvs; + context.t_hit = t_hit; + context.cumsum = cumsum; + context.track_intersections = track_intersections; + + auto LoopFn = [&](const tbb::blocked_range& range) { + std::vector rayhits(range.size()); + + for (size_t i = range.begin(); i < range.end(); ++i) { + RTCRayHit* rh = &rayhits[i - range.begin()]; + const float* r = &rays[i * 6]; + rh->ray.org_x = r[0]; + rh->ray.org_y = r[1]; + rh->ray.org_z = r[2]; + rh->ray.dir_x = r[3]; + rh->ray.dir_y = r[4]; + rh->ray.dir_z = r[5]; + rh->ray.tnear = 0; + rh->ray.tfar = std::numeric_limits::infinity(); + rh->ray.mask = 0; + rh->ray.flags = 0; + rh->ray.id = i; + rh->hit.geomID = RTC_INVALID_GEOMETRY_ID; + rh->hit.instID[0] = RTC_INVALID_GEOMETRY_ID; + } + rtcIntersect1M(scene_, &context.context, &rayhits[0], range.size(), + sizeof(RTCRayHit)); + }; + + if (nthreads > 0) { + tbb::task_arena arena(nthreads); + arena.execute([&]() { + tbb::parallel_for( + tbb::blocked_range(0, num_rays, BATCH_SIZE), + LoopFn); + }); + } else { + tbb::parallel_for( + tbb::blocked_range(0, num_rays, BATCH_SIZE), + LoopFn); + } + } + void ComputeClosestPoints(const float* const query_points, const size_t num_query_points, float* closest_points, @@ -486,10 +636,7 @@ struct RaycastingScene::Impl { float* primitive_uvs, float* primitive_normals, const int nthreads) { - if (!scene_committed_) { - rtcCommitScene(scene_); - scene_committed_ = true; - } + CommitScene(); auto LoopFn = [&](const tbb::blocked_range& range) { for (size_t i = range.begin(); i < range.end(); ++i) { @@ -552,6 +699,9 @@ RaycastingScene::RaycastingScene(int64_t nthreads) impl_->scene_, RTC_SCENE_FLAG_ROBUST | RTC_SCENE_FLAG_CONTEXT_FILTER_FUNCTION); + impl_->devprop_join_commit = rtcGetDeviceProperty( + impl_->device_, RTC_DEVICE_PROPERTY_JOIN_COMMIT_SUPPORTED); + impl_->scene_committed_ = false; } @@ -687,6 +837,64 @@ core::Tensor RaycastingScene::CountIntersections(const core::Tensor& rays, return intersections; } +std::unordered_map +RaycastingScene::ListIntersections(const core::Tensor& rays, + const int nthreads) { + AssertTensorDtypeLastDimDeviceMinNDim(rays, "rays", 6, + impl_->tensor_device_); + + auto shape = rays.GetShape(); + shape.pop_back(); // Remove last dim, we want to use this shape for the + // results. + size_t num_rays = shape.NumElements(); + + // determine total number of intersections + core::Tensor intersections(shape, core::Dtype::FromType()); + core::Tensor track_intersections(shape, core::Dtype::FromType()); + auto data = rays.Contiguous(); + impl_->CountIntersections(data.GetDataPtr(), num_rays, + intersections.GetDataPtr(), nthreads); + + // prepare shape with that number of elements + Eigen::Map intersections_vector( + intersections.GetDataPtr(), num_rays); + size_t num_intersections = intersections_vector.sum(); + + // prepare ray allocations (cumsum) + Eigen::VectorXi cumsum = Eigen::MatrixXi::Zero(num_rays, 1); + std::partial_sum(intersections_vector.begin(), + intersections_vector.end() - 1, cumsum.begin() + 1, + std::plus()); + + // generate results structure + std::unordered_map result; + shape.clear(); + shape.push_back(num_rays + 1); + result["ray_splits"] = core::Tensor(shape, core::UInt32); + uint32_t* ptr = result["ray_splits"].GetDataPtr(); + for (int i = 0; i < cumsum.size(); ++i) { + ptr[i] = cumsum[i]; + } + ptr[num_rays] = num_intersections; + shape[0] = intersections_vector.sum(); + result["ray_ids"] = core::Tensor(shape, core::UInt32); + result["geometry_ids"] = core::Tensor(shape, core::UInt32); + result["primitive_ids"] = core::Tensor(shape, core::UInt32); + result["t_hit"] = core::Tensor(shape, core::Float32); + shape.push_back(2); + result["primitive_uvs"] = core::Tensor(shape, core::Float32); + + impl_->ListIntersections(data.GetDataPtr(), num_rays, + num_intersections, cumsum, + track_intersections.GetDataPtr(), + result["ray_ids"].GetDataPtr(), + result["geometry_ids"].GetDataPtr(), + result["primitive_ids"].GetDataPtr(), + result["primitive_uvs"].GetDataPtr(), + result["t_hit"].GetDataPtr(), nthreads); + return result; +} + std::unordered_map RaycastingScene::ComputeClosestPoints(const core::Tensor& query_points, const int nthreads) { @@ -957,4 +1165,4 @@ uint32_t RaycastingScene::INVALID_ID() { return RTC_INVALID_GEOMETRY_ID; } } // namespace geometry } // namespace t -} // namespace open3d +} // namespace open3d \ No newline at end of file diff --git a/cpp/open3d/t/geometry/RaycastingScene.h b/cpp/open3d/t/geometry/RaycastingScene.h index b22639148f5..f25d994b0b5 100644 --- a/cpp/open3d/t/geometry/RaycastingScene.h +++ b/cpp/open3d/t/geometry/RaycastingScene.h @@ -104,6 +104,33 @@ class RaycastingScene { core::Tensor CountIntersections(const core::Tensor &rays, const int nthreads = 0); + /// \brief Lists the intersections of the rays with the scene + /// \param rays A tensor with >=2 dims, shape {.., 6}, and Dtype Float32 + /// describing the rays; {..} can be any number of dimensions. + /// The last dimension must be 6 and has the format [ox, oy, oz, dx, dy, dz] + /// with [ox,oy,oz] as the origin and [dx,dy,dz] as the direction. It is not + /// necessary to normalize the direction although it should be normalised if + /// t_hit is to be calculated in coordinate units. + /// \param nthreads The number of threads to use. Set to 0 for automatic. + /// \return The returned dictionary contains: /// + /// - \b ray_splits A tensor with ray intersection splits. Can be + /// used to iterate over all intersections for each ray. The shape + /// is {num_rays + 1}. + /// - \b ray_ids A tensor with ray IDs. The shape is + /// {num_intersections}. + /// - \b t_hit A tensor with the distance to the hit. The shape is + /// {num_intersections}. + /// - \b geometry_ids A tensor with the geometry IDs. The shape is + /// {num_intersections}. + /// - \b primitive_ids A tensor with the primitive IDs, which + /// corresponds to the triangle index. The shape is + /// {num_intersections}. + /// - \b primitive_uvs A tensor with the barycentric coordinates of + /// the intersection points within the triangles. The shape is + /// {num_intersections, 2}. + std::unordered_map ListIntersections( + const core::Tensor &rays, const int nthreads = 0); + /// \brief Computes the closest points on the surfaces of the scene. /// \param query_points A tensor with >=2 dims, shape {.., 3} and Dtype /// Float32 describing the query points. {..} can be any number of diff --git a/cpp/open3d/t/geometry/TensorMap.cpp b/cpp/open3d/t/geometry/TensorMap.cpp index 3eaefdecb5e..f47bcaf0955 100644 --- a/cpp/open3d/t/geometry/TensorMap.cpp +++ b/cpp/open3d/t/geometry/TensorMap.cpp @@ -55,8 +55,8 @@ void TensorMap::AssertSizeSynchronized() const { for (auto& kv : *this) { if (kv.first != primary_key_ && kv.second.GetLength() != primary_size) { - fmt::format(" > Tensor \"{}\" has size {}.\n", kv.first, - kv.second.GetLength()); + ss << fmt::format(" > Tensor \"{}\" has size {}.\n", + kv.first, kv.second.GetLength()); } } utility::LogError("{}", ss.str()); diff --git a/cpp/open3d/t/geometry/TriangleMesh.cpp b/cpp/open3d/t/geometry/TriangleMesh.cpp index 4134ee6f306..d96e9a55b75 100644 --- a/cpp/open3d/t/geometry/TriangleMesh.cpp +++ b/cpp/open3d/t/geometry/TriangleMesh.cpp @@ -409,7 +409,9 @@ open3d::geometry::TriangleMesh TriangleMesh::ToLegacy() const { // Convert material if the t geometry has a valid one auto &tmat = GetMaterial(); if (tmat.IsValid()) { - auto &legacy_mat = mesh_legacy.materials_["Mat1"]; + mesh_legacy.materials_.emplace_back(); + mesh_legacy.materials_.front().first = "Mat1"; + auto &legacy_mat = mesh_legacy.materials_.front().second; // Convert scalar properties if (tmat.HasBaseColor()) { legacy_mat.baseColor.f4[0] = tmat.GetBaseColor().x(); @@ -993,7 +995,66 @@ int TriangleMesh::PCAPartition(int max_faces) { return num_parititions; } +/// A helper to compute new vertex indices out of vertex mask. +/// \param tris_cpu tensor with triangle indices to update. +/// \param vertex_mask tensor with the mask for vertices. +template +static void UpdateTriangleIndicesByVertexMask(core::Tensor &tris_cpu, + const core::Tensor &vertex_mask) { + int64_t num_verts = vertex_mask.GetLength(); + int64_t num_tris = tris_cpu.GetLength(); + const T *vertex_mask_ptr = vertex_mask.GetDataPtr(); + std::vector prefix_sum(num_verts + 1, 0); + utility::InclusivePrefixSum(vertex_mask_ptr, vertex_mask_ptr + num_verts, + &prefix_sum[1]); + + // update triangle indices + T *vert_idx_ptr = tris_cpu.GetDataPtr(); + for (int64_t i = 0; i < num_tris * 3; ++i) { + vert_idx_ptr[i] = prefix_sum[vert_idx_ptr[i]]; + } +} + +/// A helper to copy mesh attributes. +/// \param dst destination mesh +/// \param src source mesh +/// \param vertex_mask vertex mask of the source mesh +/// \param tri_mask triangle mask of the source mesh +static void CopyAttributesByMasks(TriangleMesh &dst, + const TriangleMesh &src, + const core::Tensor &vertex_mask, + const core::Tensor &tri_mask) { + if (src.HasVertexPositions() && dst.HasVertexPositions()) { + for (auto item : src.GetVertexAttr()) { + if (!dst.HasVertexAttr(item.first)) { + dst.SetVertexAttr(item.first, + item.second.IndexGet({vertex_mask})); + } + } + } + + if (src.HasTriangleIndices() && dst.HasTriangleIndices()) { + for (auto item : src.GetTriangleAttr()) { + if (!dst.HasTriangleAttr(item.first)) { + dst.SetTriangleAttr(item.first, + item.second.IndexGet({tri_mask})); + } + } + } +} + TriangleMesh TriangleMesh::SelectFacesByMask(const core::Tensor &mask) const { + if (!HasVertexPositions()) { + utility::LogWarning( + "[SelectFacesByMask] mesh has no vertex positions."); + return {}; + } + if (!HasTriangleIndices()) { + utility::LogWarning( + "[SelectFacesByMask] mesh has no triangle indices."); + return {}; + } + core::AssertTensorShape(mask, {GetTriangleIndices().GetLength()}); core::AssertTensorDtype(mask, core::Bool); GetTriangleAttr().AssertSizeSynchronized(); @@ -1002,62 +1063,155 @@ TriangleMesh TriangleMesh::SelectFacesByMask(const core::Tensor &mask) const { // select triangles core::Tensor tris = GetTriangleIndices().IndexGet({mask}); core::Tensor tris_cpu = tris.To(core::Device()).Contiguous(); - const int64_t num_tris = tris_cpu.GetLength(); // create mask for vertices that are part of the selected faces const int64_t num_verts = GetVertexPositions().GetLength(); - core::Tensor vertex_mask = core::Tensor::Zeros({num_verts}, core::Int32); - std::vector prefix_sum(num_verts + 1, 0); - { - int32_t *vertex_mask_ptr = vertex_mask.GetDataPtr(); - if (tris_cpu.GetDtype() == core::Int32) { - int32_t *vert_idx_ptr = tris_cpu.GetDataPtr(); - for (int64_t i = 0; i < tris_cpu.GetLength() * 3; ++i) { - vertex_mask_ptr[vert_idx_ptr[i]] = 1; - } - } else { - int64_t *vert_idx_ptr = tris_cpu.GetDataPtr(); - for (int64_t i = 0; i < tris_cpu.GetLength() * 3; ++i) { - vertex_mask_ptr[vert_idx_ptr[i]] = 1; - } - } - utility::InclusivePrefixSum( - vertex_mask_ptr, vertex_mask_ptr + num_verts, &prefix_sum[1]); - } - - // update triangle indices - if (tris_cpu.GetDtype() == core::Int32) { - int32_t *vert_idx_ptr = tris_cpu.GetDataPtr(); + // empty tensor to further construct the vertex mask + core::Tensor vertex_mask; + + DISPATCH_INT_DTYPE_PREFIX_TO_TEMPLATE(tris_cpu.GetDtype(), tris, [&]() { + vertex_mask = core::Tensor::Zeros( + {num_verts}, core::Dtype::FromType()); + const int64_t num_tris = tris_cpu.GetLength(); + scalar_tris_t *vertex_mask_ptr = + vertex_mask.GetDataPtr(); + scalar_tris_t *vert_idx_ptr = tris_cpu.GetDataPtr(); + // mask for the vertices, which are used in the triangles for (int64_t i = 0; i < num_tris * 3; ++i) { - int64_t new_idx = prefix_sum[vert_idx_ptr[i]]; - vert_idx_ptr[i] = int32_t(new_idx); + vertex_mask_ptr[vert_idx_ptr[i]] = 1; } - } else { - int64_t *vert_idx_ptr = tris_cpu.GetDataPtr(); - for (int64_t i = 0; i < num_tris * 3; ++i) { - int64_t new_idx = prefix_sum[vert_idx_ptr[i]]; - vert_idx_ptr[i] = new_idx; - } - } + UpdateTriangleIndicesByVertexMask(tris_cpu, vertex_mask); + }); tris = tris_cpu.To(GetDevice()); vertex_mask = vertex_mask.To(GetDevice(), core::Bool); core::Tensor verts = GetVertexPositions().IndexGet({vertex_mask}); TriangleMesh result(verts, tris); - // copy attributes - for (auto item : GetVertexAttr()) { - if (!result.HasVertexAttr(item.first)) { - result.SetVertexAttr(item.first, - item.second.IndexGet({vertex_mask})); - } + CopyAttributesByMasks(result, *this, vertex_mask, mask); + + return result; +} + +/// brief Static negative checker for signed integer types +template ::value && + !std::is_same::value && + std::is_signed::value, + T>::type * = nullptr> +static bool IsNegative(T val) { + return val < 0; +} + +/// brief Overloaded static negative checker for unsigned integer types. +/// It unconditionally returns false, but we need it for template functions. +template ::value && + !std::is_same::value && + !std::is_signed::value, + T>::type * = nullptr> +static bool IsNegative(T val) { + return false; +} + +TriangleMesh TriangleMesh::SelectByIndex(const core::Tensor &indices) const { + TriangleMesh result; + core::AssertTensorShape(indices, {indices.GetLength()}); + if (!HasVertexPositions()) { + utility::LogWarning("[SelectByIndex] TriangleMesh has no vertices."); + return result; } - for (auto item : GetTriangleAttr()) { - if (!result.HasTriangleAttr(item.first)) { - result.SetTriangleAttr(item.first, item.second.IndexGet({mask})); - } + GetVertexAttr().AssertSizeSynchronized(); + + // we allow indices of an integral type only + core::Dtype::DtypeCode indices_dtype_code = + indices.GetDtype().GetDtypeCode(); + if (indices_dtype_code != core::Dtype::DtypeCode::Int && + indices_dtype_code != core::Dtype::DtypeCode::UInt) { + utility::LogError( + "[SelectByIndex] indices are not of integral type {}.", + indices.GetDtype().ToString()); + } + core::Tensor indices_cpu = indices.To(core::Device()).Contiguous(); + core::Tensor tris_cpu, tri_mask; + core::Dtype tri_dtype; + if (HasTriangleIndices()) { + GetTriangleAttr().AssertSizeSynchronized(); + tris_cpu = GetTriangleIndices().To(core::Device()).Contiguous(); + // bool mask for triangles. + tri_mask = core::Tensor::Zeros({tris_cpu.GetLength()}, core::Bool); + tri_dtype = tris_cpu.GetDtype(); + } else { + utility::LogWarning("TriangleMesh has no triangle indices."); + tri_dtype = core::Int64; + } + + // int mask to select vertices for the new mesh. We need it as int as we + // will use its values to sum up and get the map of new indices + core::Tensor vertex_mask = + core::Tensor::Zeros({GetVertexPositions().GetLength()}, tri_dtype); + + DISPATCH_INT_DTYPE_PREFIX_TO_TEMPLATE(tri_dtype, tris, [&]() { + DISPATCH_INT_DTYPE_PREFIX_TO_TEMPLATE( + indices_cpu.GetDtype(), indices, [&]() { + const int64_t num_tris = tris_cpu.GetLength(); + const int64_t num_verts = vertex_mask.GetLength(); + + // compute the vertices mask + scalar_tris_t *vertex_mask_ptr = + vertex_mask.GetDataPtr(); + const scalar_indices_t *indices_ptr = + indices.GetDataPtr(); + for (int64_t i = 0; i < indices.GetLength(); ++i) { + if (IsNegative(indices_ptr[i]) || + indices_ptr[i] >= + static_cast(num_verts)) { + utility::LogWarning( + "[SelectByIndex] indices contains index {} " + "out of range. " + "It is ignored.", + indices_ptr[i]); + continue; + } + vertex_mask_ptr[indices_ptr[i]] = 1; + } + + if (tri_mask.GetDtype() == core::Undefined) { + // we don't need to compute triangles, if there are none + return; + } + + // Build the triangle mask + scalar_tris_t *tris_cpu_ptr = + tris_cpu.GetDataPtr(); + bool *tri_mask_ptr = tri_mask.GetDataPtr(); + for (int64_t i = 0; i < num_tris; ++i) { + if (vertex_mask_ptr[tris_cpu_ptr[3 * i]] == 1 && + vertex_mask_ptr[tris_cpu_ptr[3 * i + 1]] == 1 && + vertex_mask_ptr[tris_cpu_ptr[3 * i + 2]] == 1) { + tri_mask_ptr[i] = true; + } + } + // select only needed triangles + tris_cpu = tris_cpu.IndexGet({tri_mask}); + // update the triangle indices + UpdateTriangleIndicesByVertexMask( + tris_cpu, vertex_mask); + }); + }); + + // send the vertex mask to original device and apply to vertices + vertex_mask = vertex_mask.To(GetDevice(), core::Bool); + core::Tensor new_vertices = GetVertexPositions().IndexGet({vertex_mask}); + result.SetVertexPositions(new_vertices); + + if (HasTriangleIndices()) { + // select triangles and send the selected ones to the original device + result.SetTriangleIndices(tris_cpu.To(GetDevice())); } + CopyAttributesByMasks(result, *this, vertex_mask, tri_mask); + return result; } diff --git a/cpp/open3d/t/geometry/TriangleMesh.h b/cpp/open3d/t/geometry/TriangleMesh.h index 7828ac16b02..99417f8433c 100644 --- a/cpp/open3d/t/geometry/TriangleMesh.h +++ b/cpp/open3d/t/geometry/TriangleMesh.h @@ -91,7 +91,7 @@ class LineSet; /// default and common attributes. class TriangleMesh : public Geometry, public DrawableGeometry { public: - /// Construct an empty pointcloud on the provided device. + /// Construct an empty trianglemesh on the provided device. /// \param device The device on which to initialize the trianglemesh /// (default: 'CPU:0'). TriangleMesh(const core::Device &device = core::Device("CPU:0")); @@ -927,9 +927,20 @@ class TriangleMesh : public Geometry, public DrawableGeometry { /// Returns a new mesh with the faces selected by a boolean mask. /// \param mask A boolean mask with the shape (N) with N as the number of /// faces in the mesh. - /// \return A new mesh with the selected faces. + /// \return A new mesh with the selected faces. If the original mesh is + /// empty, return an empty mesh. TriangleMesh SelectFacesByMask(const core::Tensor &mask) const; + /// Returns a new mesh with the vertices selected by a vector of indices. + /// If an item from the indices list exceeds the max vertex number of + /// the mesh or has a negative value, it is ignored. + /// \param indices An integer list of indices. Duplicates are + /// allowed, but ignored. Signed and unsigned integral types are allowed. + /// \return A new mesh with the selected vertices and faces built + /// from the selected vertices. If the original mesh is empty, return + /// an empty mesh. + TriangleMesh SelectByIndex(const core::Tensor &indices) const; + protected: core::Device device_ = core::Device("CPU:0"); TensorMap vertex_attr_; diff --git a/cpp/open3d/t/io/ImageIO.h b/cpp/open3d/t/io/ImageIO.h index 211dac41325..6a7edd6e17b 100644 --- a/cpp/open3d/t/io/ImageIO.h +++ b/cpp/open3d/t/io/ImageIO.h @@ -61,6 +61,10 @@ bool WriteImageToPNG(const std::string &filename, const geometry::Image &image, int quality = kOpen3DImageIODefaultQuality); +bool WriteImageToPNGInMemory(std::vector &output_buffer, + const geometry::Image &image, + int quality = kOpen3DImageIODefaultQuality); + bool ReadImageFromJPG(const std::string &filename, geometry::Image &image); bool WriteImageToJPG(const std::string &filename, diff --git a/cpp/open3d/t/io/TriangleMeshIO.cpp b/cpp/open3d/t/io/TriangleMeshIO.cpp index 5aaa90eb705..3f511eb984e 100644 --- a/cpp/open3d/t/io/TriangleMeshIO.cpp +++ b/cpp/open3d/t/io/TriangleMeshIO.cpp @@ -7,8 +7,10 @@ #include "open3d/t/io/TriangleMeshIO.h" +#include #include +#include "open3d/t/io/NumpyIO.h" #include "open3d/utility/FileSystem.h" #include "open3d/utility/Logging.h" @@ -22,6 +24,7 @@ static const std::unordered_map< geometry::TriangleMesh &, const open3d::io::ReadTriangleMeshOptions &)>> file_extension_to_trianglemesh_read_function{ + {"npz", ReadTriangleMeshFromNPZ}, {"stl", ReadTriangleMeshUsingASSIMP}, {"obj", ReadTriangleMeshUsingASSIMP}, {"off", ReadTriangleMeshUsingASSIMP}, @@ -40,7 +43,10 @@ static const std::unordered_map< const bool, const bool, const bool)>> - file_extension_to_trianglemesh_write_function{}; + file_extension_to_trianglemesh_write_function{ + {"npz", WriteTriangleMeshToNPZ}, + {"glb", WriteTriangleMeshUsingASSIMP}, + }; std::shared_ptr CreateMeshFromFile( const std::string &filename, bool print_progress) { @@ -137,6 +143,160 @@ bool WriteTriangleMesh(const std::string &filename, return success; } +bool ReadTriangleMeshFromNPZ( + const std::string &filename, + geometry::TriangleMesh &mesh, + const open3d::io::ReadTriangleMeshOptions ¶ms) { + auto attribute_map = ReadNpz(filename); + + // At a minimum there should be 'vertices' and 'triangles' + if (!(attribute_map.count("vertices") > 0) || + !(attribute_map.count("triangles") > 0)) { + utility::LogWarning( + "Read geometry::TriangleMesh failed: Could not find 'vertices' " + "or 'triangles' attributes in {}", + filename); + return false; + } + + // Fill mesh with attributes + for (auto &attr : attribute_map) { + if (attr.first == "vertices") { + mesh.SetVertexPositions(attr.second); + } else if (attr.first == "triangles") { + mesh.SetTriangleIndices(attr.second); + } else if (attr.first == "vertex_normals") { + mesh.SetVertexNormals(attr.second); + } else if (attr.first == "triangle_normals") { + mesh.SetTriangleNormals(attr.second); + } else if (attr.first == "vertex_colors") { + mesh.SetVertexColors(attr.second); + } else if (attr.first == "triangle_colors") { + mesh.SetTriangleColors(attr.second); + } else if (attr.first == "triangle_texture_uvs") { + mesh.SetTriangleAttr("texture_uvs", attr.second); + } else if (attr.first.find("tex_") != std::string::npos) { + // Get texture map + auto key = attr.first.substr(4); + if (!mesh.GetMaterial().IsValid()) { + mesh.GetMaterial().SetDefaultProperties(); + } + mesh.GetMaterial().SetTextureMap(key, geometry::Image(attr.second)); + // Note: due to quirk of Open3D shader implementation if we have a + // metallic texture we need to set the metallic scalar propert to + // 1.0 + if (key == "metallic") { + mesh.GetMaterial().SetScalarProperty("metallic", 1.0); + } + } else if (attr.first.find("vertex_") != std::string::npos) { + // Generic vertex attribute + auto key = attr.first.substr(7); + mesh.SetVertexAttr(key, attr.second); + } else if (attr.first.find("triangle_") != std::string::npos) { + // Generic triangle attribute + auto key = attr.first.substr(9); + mesh.SetTriangleAttr(key, attr.second); + } else if (attr.first == "material_name") { + if (!mesh.GetMaterial().IsValid()) { + mesh.GetMaterial().SetDefaultProperties(); + } + const uint8_t *str_ptr = attr.second.GetDataPtr(); + std::string mat_name(attr.second.GetShape().GetLength(), 'a'); + std::memcpy((void *)mat_name.data(), str_ptr, + attr.second.GetShape().GetLength()); + mesh.GetMaterial().SetMaterialName(mat_name); + } + } + + return true; +} + +bool WriteTriangleMeshToNPZ(const std::string &filename, + const geometry::TriangleMesh &mesh, + const bool write_ascii, + const bool compressed, + const bool write_vertex_normals, + const bool write_vertex_colors, + const bool write_triangle_uvs, + const bool print_progress) { + // Sanity checks... + if (write_ascii) { + utility::LogWarning( + "TriangleMesh can't be saved in ASCII fromat as .npz"); + return false; + } + if (compressed) { + utility::LogWarning( + "TriangleMesh can't be saved in compressed format as .npz"); + return false; + } + + // Map attribute names to names already used by convention in other software + std::set known_attributes( + {"positions", "normals", "texture_uvs", "indices", "colors"}); + + // Build map of known attributes + std::unordered_map mesh_attributes; + if (mesh.HasVertexPositions()) { + mesh_attributes["vertices"] = mesh.GetVertexPositions(); + } + if (mesh.HasVertexNormals()) { + mesh_attributes["vertex_normals"] = mesh.GetVertexNormals(); + } + if (mesh.HasVertexColors()) { + mesh_attributes["vertex_colors"] = mesh.GetVertexColors(); + } + if (mesh.HasTriangleIndices()) { + mesh_attributes["triangles"] = mesh.GetTriangleIndices(); + } + if (mesh.HasTriangleNormals()) { + mesh_attributes["triangle_normals"] = mesh.GetTriangleNormals(); + } + if (mesh.HasTriangleColors()) { + mesh_attributes["triangle_colors"] = mesh.GetTriangleColors(); + } + if (mesh.HasTriangleAttr("texture_uvs")) { + mesh_attributes["triangle_texture_uvs"] = + mesh.GetTriangleAttr("texture_uvs"); + } + + // Add "generic" attributes + for (auto attr : mesh.GetVertexAttr()) { + if (known_attributes.count(attr.first) > 0) { + continue; + } + std::string key_name("vertex_"); + key_name += attr.first; + mesh_attributes[key_name] = attr.second; + } + for (auto attr : mesh.GetTriangleAttr()) { + if (known_attributes.count(attr.first) > 0) { + continue; + } + std::string key_name("triangle_"); + key_name += attr.first; + mesh_attributes[key_name] = attr.second; + } + + // Output texture maps + if (mesh.GetMaterial().IsValid()) { + auto &mat = mesh.GetMaterial(); + // Get material name in Tensor form + std::vector mat_name_vec( + {mat.GetMaterialName().begin(), mat.GetMaterialName().end()}); + core::Tensor mat_name_tensor(std::move(mat_name_vec)); + mesh_attributes["material_name"] = mat_name_tensor; + for (auto &tex : mat.GetTextureMaps()) { + std::string key = std::string("tex_") + tex.first; + mesh_attributes[key] = tex.second.AsTensor(); + } + } + + WriteNpz(filename, mesh_attributes); + + return true; +} + } // namespace io } // namespace t } // namespace open3d diff --git a/cpp/open3d/t/io/TriangleMeshIO.h b/cpp/open3d/t/io/TriangleMeshIO.h index 3498644fbb7..a4d975d471a 100644 --- a/cpp/open3d/t/io/TriangleMeshIO.h +++ b/cpp/open3d/t/io/TriangleMeshIO.h @@ -54,6 +54,28 @@ bool ReadTriangleMeshUsingASSIMP( geometry::TriangleMesh &mesh, const open3d::io::ReadTriangleMeshOptions ¶ms); +bool ReadTriangleMeshFromNPZ(const std::string &filename, + geometry::TriangleMesh &mesh, + const open3d::io::ReadTriangleMeshOptions ¶ms); + +bool WriteTriangleMeshUsingASSIMP(const std::string &filename, + const geometry::TriangleMesh &mesh, + const bool write_ascii, + const bool compressed, + const bool write_vertex_normals, + const bool write_vertex_colors, + const bool write_triangle_uvs, + const bool print_progress); + +bool WriteTriangleMeshToNPZ(const std::string &filename, + const geometry::TriangleMesh &mesh, + const bool write_ascii, + const bool compressed, + const bool write_vertex_normals, + const bool write_vertex_colors, + const bool write_triangle_uvs, + const bool print_progress); + } // namespace io } // namespace t } // namespace open3d diff --git a/cpp/open3d/t/io/file_format/FileASSIMP.cpp b/cpp/open3d/t/io/file_format/FileASSIMP.cpp index 0d5116cabb8..5eae7a9ffdc 100644 --- a/cpp/open3d/t/io/file_format/FileASSIMP.cpp +++ b/cpp/open3d/t/io/file_format/FileASSIMP.cpp @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -17,6 +18,7 @@ #include "open3d/core/ParallelFor.h" #include "open3d/core/TensorFunction.h" +#include "open3d/t/io/ImageIO.h" #include "open3d/t/io/TriangleMeshIO.h" #include "open3d/utility/FileSystem.h" #include "open3d/utility/Logging.h" @@ -174,6 +176,301 @@ bool ReadTriangleMeshUsingASSIMP( return true; } +static void SetTextureMaterialProperty(aiMaterial* mat, + aiScene* scene, + int texture_idx, + aiTextureType tt, + t::geometry::Image& img) { + // Encode image as PNG + std::vector img_buffer; + WriteImageToPNGInMemory(img_buffer, img, 6); + + // Fill in Assimp's texture class and add to its material + auto tex = scene->mTextures[texture_idx]; + std::string tex_id("*"); + tex_id += std::to_string(texture_idx); + tex->mFilename = tex_id.c_str(); + tex->mHeight = 0; + tex->mWidth = img_buffer.size(); + // NOTE: Assimp takes ownership of the data so we need to copy it + // into a separate buffer that Assimp can take care of delete []-ing + uint8_t* img_data = new uint8_t[img_buffer.size()]; + memcpy(img_data, img_buffer.data(), img_buffer.size()); + tex->pcData = reinterpret_cast(img_data); + strcpy(tex->achFormatHint, "png"); + aiString uri(tex_id); + const int uv_index = 0; + const aiTextureMapMode mode = aiTextureMapMode_Wrap; + mat->AddProperty(&uri, AI_MATKEY_TEXTURE(tt, 0)); + mat->AddProperty(&uv_index, 1, AI_MATKEY_UVWSRC(tt, 0)); + mat->AddProperty(&mode, 1, AI_MATKEY_MAPPINGMODE_U(tt, 0)); + mat->AddProperty(&mode, 1, AI_MATKEY_MAPPINGMODE_V(tt, 0)); +} + +bool WriteTriangleMeshUsingASSIMP(const std::string& filename, + const geometry::TriangleMesh& mesh, + const bool write_ascii, + const bool compressed, + const bool write_vertex_normals, + const bool write_vertex_colors, + const bool write_triangle_uvs, + const bool print_progress) { + // Sanity checks... + if (write_ascii) { + utility::LogWarning( + "TriangleMesh can't be saved in ASCII fromat as .glb"); + return false; + } + if (compressed) { + utility::LogWarning( + "TriangleMesh can't be saved in compressed format as .glb"); + return false; + } + if (!mesh.HasVertexPositions()) { + utility::LogWarning( + "TriangleMesh has no vertex positions and can't be saved as " + ".glb"); + return false; + } + // Check for unsupported features + if (mesh.HasTriangleNormals()) { + utility::LogWarning( + "Exporting triangle normals is not supported. Please convert " + "to vertex normals or export to a format that supports it."); + } + if (mesh.HasTriangleColors()) { + utility::LogWarning( + "Exporting triangle colors is not supported. Please convert to " + "vertex colors or export to a format that supporst it."); + } + + Assimp::Exporter exporter; + auto ai_scene = std::unique_ptr(new aiScene); + + // Fill mesh data... + ai_scene->mNumMeshes = 1; + ai_scene->mMeshes = new aiMesh*[1]; + auto ai_mesh = new aiMesh; + ai_mesh->mName.Set("Object1"); + ai_mesh->mPrimitiveTypes = aiPrimitiveType_TRIANGLE; + // Guaranteed to have both vertex positions and triangle indices + auto vertices = mesh.GetVertexPositions().Contiguous(); + auto indices = + mesh.GetTriangleIndices().To(core::Dtype::UInt32).Contiguous(); + ai_mesh->mNumVertices = vertices.GetShape(0); + ai_mesh->mVertices = new aiVector3D[ai_mesh->mNumVertices]; + memcpy(&ai_mesh->mVertices->x, vertices.GetDataPtr(), + sizeof(float) * ai_mesh->mNumVertices * 3); + ai_mesh->mNumFaces = indices.GetShape(0); + ai_mesh->mFaces = new aiFace[ai_mesh->mNumFaces]; + for (unsigned int i = 0; i < ai_mesh->mNumFaces; ++i) { + ai_mesh->mFaces[i].mNumIndices = 3; + // NOTE: Yes, dynamically allocating 3 ints for each face is inefficient + // but this is what Assimp seems to require as it deletes each mIndices + // on destruction. We could block allocate space for all the faces, + // assign pointers here then zero out the pointers before destruction so + // the delete becomes a no-op, but that seems error prone. Could revisit + // if this becomes an IO bottleneck. + ai_mesh->mFaces[i].mIndices = new unsigned int[3]; // triangles + ai_mesh->mFaces[i].mIndices[0] = indices[i][0].Item(); + ai_mesh->mFaces[i].mIndices[1] = indices[i][1].Item(); + ai_mesh->mFaces[i].mIndices[2] = indices[i][2].Item(); + } + + if (write_vertex_normals && mesh.HasVertexNormals()) { + auto normals = mesh.GetVertexNormals().Contiguous(); + auto m_normals = normals.GetShape(0); + ai_mesh->mNormals = new aiVector3D[m_normals]; + memcpy(&ai_mesh->mNormals->x, normals.GetDataPtr(), + sizeof(float) * m_normals * 3); + } + + if (write_vertex_colors && mesh.HasVertexColors()) { + auto colors = mesh.GetVertexColors().Contiguous(); + auto m_colors = colors.GetShape(0); + ai_mesh->mColors[0] = new aiColor4D[m_colors]; + if (colors.GetShape(1) == 4) { + memcpy(&ai_mesh->mColors[0][0].r, colors.GetDataPtr(), + sizeof(float) * m_colors * 4); + } else { // must be 3 components + auto color_ptr = reinterpret_cast(colors.GetDataPtr()); + for (unsigned int i = 0; i < m_colors; ++i) { + ai_mesh->mColors[0][i].r = *color_ptr++; + ai_mesh->mColors[0][i].g = *color_ptr++; + ai_mesh->mColors[0][i].b = *color_ptr++; + ai_mesh->mColors[0][i].a = 1.0f; + } + } + } + + if (write_triangle_uvs && mesh.HasTriangleAttr("texture_uvs")) { + auto triangle_uvs = mesh.GetTriangleAttr("texture_uvs").Contiguous(); + auto vertex_uvs = core::Tensor::Empty({ai_mesh->mNumVertices, 2}, + core::Dtype::Float32); + auto n_uvs = ai_mesh->mNumVertices; + for (int64_t i = 0; i < indices.GetShape(0); i++) { + vertex_uvs[indices[i][0].Item()] = triangle_uvs[i][0]; + vertex_uvs[indices[i][1].Item()] = triangle_uvs[i][1]; + vertex_uvs[indices[i][2].Item()] = triangle_uvs[i][2]; + } + ai_mesh->mTextureCoords[0] = new aiVector3D[n_uvs]; + auto uv_ptr = reinterpret_cast(vertex_uvs.GetDataPtr()); + for (unsigned int i = 0; i < n_uvs; ++i) { + ai_mesh->mTextureCoords[0][i].x = *uv_ptr++; + ai_mesh->mTextureCoords[0][i].y = *uv_ptr++; + } + ai_mesh->mNumUVComponents[0] = 2; + } + ai_scene->mMeshes[0] = ai_mesh; + + // Fill material data... + ai_scene->mNumMaterials = 1; + ai_scene->mMaterials = new aiMaterial*[ai_scene->mNumMaterials]; + auto ai_mat = new aiMaterial; + if (mesh.HasMaterial()) { + ai_mat->GetName().Set("mat1"); + auto shading_mode = aiShadingMode_PBR_BRDF; + ai_mat->AddProperty(&shading_mode, 1, AI_MATKEY_SHADING_MODEL); + + // Set base material properties + // NOTE: not all properties supported by Open3D are supported by Assimp. + // Those properties (reflectivity, anisotropy) are not exported + if (mesh.GetMaterial().HasBaseColor()) { + auto c = mesh.GetMaterial().GetBaseColor(); + auto ac = aiColor4D(c.x(), c.y(), c.z(), c.w()); + ai_mat->AddProperty(&ac, 1, AI_MATKEY_COLOR_DIFFUSE); + ai_mat->AddProperty(&ac, 1, AI_MATKEY_BASE_COLOR); + } + if (mesh.GetMaterial().HasBaseRoughness()) { + auto r = mesh.GetMaterial().GetBaseRoughness(); + ai_mat->AddProperty(&r, 1, AI_MATKEY_ROUGHNESS_FACTOR); + } + if (mesh.GetMaterial().HasBaseMetallic()) { + auto m = mesh.GetMaterial().GetBaseMetallic(); + ai_mat->AddProperty(&m, 1, AI_MATKEY_METALLIC_FACTOR); + } + if (mesh.GetMaterial().HasBaseClearcoat()) { + auto c = mesh.GetMaterial().GetBaseClearcoat(); + ai_mat->AddProperty(&c, 1, AI_MATKEY_CLEARCOAT_FACTOR); + } + if (mesh.GetMaterial().HasBaseClearcoatRoughness()) { + auto r = mesh.GetMaterial().GetBaseClearcoatRoughness(); + ai_mat->AddProperty(&r, 1, AI_MATKEY_CLEARCOAT_ROUGHNESS_FACTOR); + } + + // Count texture maps... + // NOTE: GLTF2 expects a single combined roughness/metal map. If the + // model has one we just export it, otherwise if both roughness and + // metal maps are avaialbe we combine them, otherwise if only one or the + // other is available we just export the one map. + int n_textures = 0; + if (mesh.GetMaterial().HasAlbedoMap()) ++n_textures; + if (mesh.GetMaterial().HasNormalMap()) ++n_textures; + if (mesh.GetMaterial().HasAORoughnessMetalMap()) { + ++n_textures; + } else if (mesh.GetMaterial().HasRoughnessMap() && + mesh.GetMaterial().HasMetallicMap()) { + ++n_textures; + } else { + if (mesh.GetMaterial().HasRoughnessMap()) ++n_textures; + if (mesh.GetMaterial().HasMetallicMap()) ++n_textures; + } + if (n_textures > 0) { + ai_scene->mTextures = new aiTexture*[n_textures]; + for (int i = 0; i < n_textures; ++i) { + ai_scene->mTextures[i] = new aiTexture(); + } + ai_scene->mNumTextures = n_textures; + } + + // Now embed the textures that are available... + int current_idx = 0; + if (mesh.GetMaterial().HasAlbedoMap()) { + auto img = mesh.GetMaterial().GetAlbedoMap(); + SetTextureMaterialProperty(ai_mat, ai_scene.get(), current_idx, + aiTextureType_DIFFUSE, img); + SetTextureMaterialProperty(ai_mat, ai_scene.get(), current_idx, + aiTextureType_BASE_COLOR, img); + ++current_idx; + } + if (mesh.GetMaterial().HasAORoughnessMetalMap()) { + auto img = mesh.GetMaterial().GetAORoughnessMetalMap(); + SetTextureMaterialProperty(ai_mat, ai_scene.get(), current_idx, + aiTextureType_UNKNOWN, img); + ++current_idx; + } else if (mesh.GetMaterial().HasRoughnessMap() && + mesh.GetMaterial().HasMetallicMap()) { + auto rough = mesh.GetMaterial().GetRoughnessMap(); + auto metal = mesh.GetMaterial().GetMetallicMap(); + auto rows = rough.GetRows(); + auto cols = rough.GetCols(); + auto rough_metal = + geometry::Image(rows, cols, 4, core::Dtype::UInt8); + rough_metal.AsTensor() = + core::Tensor::Ones(rough_metal.AsTensor().GetShape(), + core::Dtype::UInt8) * + 255; + auto metal_channel = metal.AsTensor().GetItem( + {core::TensorKey::Slice(0, rows + 1, core::None), + core::TensorKey::Slice(0, cols + 1, core::None), + core::TensorKey::Index(0)}); + auto rough_channel = rough.AsTensor().GetItem( + {core::TensorKey::Slice(0, rows + 1, core::None), + core::TensorKey::Slice(0, cols + 1, core::None), + core::TensorKey::Index(0)}); + rough_metal.AsTensor().SetItem( + {core::TensorKey::Slice(0, rows + 1, core::None), + core::TensorKey::Slice(0, cols + 1, core::None), + core::TensorKey::Index(2)}, + metal_channel); + rough_metal.AsTensor().SetItem( + {core::TensorKey::Slice(0, rows + 1, core::None), + core::TensorKey::Slice(0, cols + 1, core::None), + core::TensorKey::Index(1)}, + rough_channel); + SetTextureMaterialProperty(ai_mat, ai_scene.get(), current_idx, + aiTextureType_UNKNOWN, rough_metal); + ++current_idx; + } else { + if (mesh.GetMaterial().HasRoughnessMap()) { + auto img = mesh.GetMaterial().GetRoughnessMap(); + SetTextureMaterialProperty(ai_mat, ai_scene.get(), current_idx, + aiTextureType_UNKNOWN, img); + ++current_idx; + } + if (mesh.GetMaterial().HasMetallicMap()) { + auto img = mesh.GetMaterial().GetMetallicMap(); + SetTextureMaterialProperty(ai_mat, ai_scene.get(), current_idx, + aiTextureType_UNKNOWN, img); + ++current_idx; + } + } + if (mesh.GetMaterial().HasNormalMap()) { + auto img = mesh.GetMaterial().GetNormalMap(); + SetTextureMaterialProperty(ai_mat, ai_scene.get(), current_idx, + aiTextureType_NORMALS, img); + ++current_idx; + } + } + ai_scene->mMaterials[0] = ai_mat; + + auto root_node = new aiNode; + root_node->mName.Set("root"); + root_node->mNumMeshes = 1; + root_node->mMeshes = new unsigned int[root_node->mNumMeshes]; + root_node->mMeshes[0] = 0; + ai_scene->mRootNode = root_node; + + // Export + if (exporter.Export(ai_scene.get(), "glb2", filename.c_str()) == + AI_FAILURE) { + utility::LogWarning("Got error: {}", exporter.GetErrorString()); + return false; + } + + return true; +} + } // namespace io } // namespace t } // namespace open3d diff --git a/cpp/open3d/t/io/file_format/FilePNG.cpp b/cpp/open3d/t/io/file_format/FilePNG.cpp index 2c4e40a9f48..2497e2a71c8 100644 --- a/cpp/open3d/t/io/file_format/FilePNG.cpp +++ b/cpp/open3d/t/io/file_format/FilePNG.cpp @@ -103,6 +103,50 @@ bool WriteImageToPNG(const std::string &filename, return true; } +bool WriteImageToPNGInMemory(std::vector &buffer, + const t::geometry::Image &image, + int quality) { + if (image.IsEmpty()) { + utility::LogWarning("Write PNG failed: image has no data."); + return false; + } + if (image.GetDtype() != core::UInt8 && image.GetDtype() != core::UInt16) { + utility::LogWarning("Write PNG failed: unsupported image data."); + return false; + } + if (quality == kOpen3DImageIODefaultQuality) // Set default quality + { + quality = 6; + } + if (quality < 0 || quality > 9) { + utility::LogWarning( + "Write PNG failed: quality ({}) must be in the range [0,9]", + quality); + return false; + } + png_image pngimage; + memset(&pngimage, 0, sizeof(pngimage)); + pngimage.version = PNG_IMAGE_VERSION; + SetPNGImageFromImage(image, quality, pngimage); + + // Compute bytes required + size_t mem_bytes = 0; + if (png_image_write_to_memory(&pngimage, nullptr, &mem_bytes, 0, + image.GetDataPtr(), 0, nullptr) == 0) { + utility::LogWarning( + "Could not compute bytes needed for encoding to PNG in " + "memory."); + return false; + } + buffer.resize(mem_bytes); + if (png_image_write_to_memory(&pngimage, &buffer[0], &mem_bytes, 0, + image.GetDataPtr(), 0, nullptr) == 0) { + utility::LogWarning("Unable to encode to encode to PNG in memory."); + return false; + } + return true; +} + } // namespace io } // namespace t } // namespace open3d diff --git a/cpp/open3d/t/pipelines/registration/Feature.cpp b/cpp/open3d/t/pipelines/registration/Feature.cpp index c65132abcf9..d20e1a1b58c 100644 --- a/cpp/open3d/t/pipelines/registration/Feature.cpp +++ b/cpp/open3d/t/pipelines/registration/Feature.cpp @@ -10,6 +10,7 @@ #include "open3d/core/nns/NearestNeighborSearch.h" #include "open3d/t/geometry/PointCloud.h" #include "open3d/t/pipelines/kernel/Feature.h" +#include "open3d/utility/Parallel.h" namespace open3d { namespace t { @@ -90,6 +91,64 @@ core::Tensor ComputeFPFHFeature(const geometry::PointCloud &input, return fpfh; } +core::Tensor CorrespondencesFromFeatures(const core::Tensor &source_features, + const core::Tensor &target_features, + bool mutual_filter, + float mutual_consistent_ratio) { + const int num_searches = mutual_filter ? 2 : 1; + + std::array features{source_features, target_features}; + std::vector corres(num_searches); + + const int kMaxThreads = utility::EstimateMaxThreads(); + const int kOuterThreads = std::min(kMaxThreads, num_searches); + (void)kOuterThreads; // Avoids compiler warning if OpenMP is disabled + + // corres[0]: corres_ij, corres[1]: corres_ji +#pragma omp parallel for num_threads(kOuterThreads) + for (int i = 0; i < num_searches; ++i) { + core::nns::NearestNeighborSearch nns(features[1 - i], + core::Dtype::Int64); + nns.KnnIndex(); + auto result = nns.KnnSearch(features[i], 1); + + corres[i] = result.first.View({-1}); + } + + auto corres_ij = corres[0]; + core::Tensor arange_source = + core::Tensor::Arange(0, source_features.GetLength(), 1, + corres_ij.GetDtype(), corres_ij.GetDevice()); + + // Change view for the appending axis + core::Tensor result_ij = + arange_source.View({-1, 1}).Append(corres_ij.View({-1, 1}), 1); + + if (!mutual_filter) { + return result_ij; + } + + auto corres_ji = corres[1]; + // Mutually consistent + core::Tensor corres_ii = corres_ji.IndexGet({corres_ij}); + core::Tensor identical = corres_ii.Eq(arange_source); + core::Tensor result_mutual = corres_ij.IndexGet({identical}); + if (result_mutual.GetLength() > + mutual_consistent_ratio * arange_source.GetLength()) { + utility::LogDebug("{:d} correspondences remain after mutual filter", + result_mutual.GetLength()); + return arange_source.IndexGet({identical}) + .View({-1, 1}) + .Append(result_mutual.View({-1, 1}), 1); + } + // fall back to full correspondences + utility::LogWarning( + "Too few correspondences ({:d}) after mutual filter, fall back to " + "original correspondences.", + result_mutual.GetLength()); + return result_ij; +} + } // namespace registration } // namespace pipelines } // namespace t diff --git a/cpp/open3d/t/pipelines/registration/Feature.h b/cpp/open3d/t/pipelines/registration/Feature.h index 7beb6fc6e87..59b6ba489d8 100644 --- a/cpp/open3d/t/pipelines/registration/Feature.h +++ b/cpp/open3d/t/pipelines/registration/Feature.h @@ -37,6 +37,26 @@ core::Tensor ComputeFPFHFeature( const utility::optional max_nn = 100, const utility::optional radius = utility::nullopt); +/// \brief Function to find correspondences via 1-nearest neighbor feature +/// matching. Target is used to construct a nearest neighbor search +/// object, in order to query source. +/// \param source_feats (N, D) tensor +/// \param target_feats (M, D) tensor +/// \param mutual_filter Boolean flag, only return correspondences (i, j) s.t. +/// source_features[i] and target_features[j] are mutually the nearest neighbor. +/// \param mutual_consistency_ratio Float threshold to decide whether the number +/// of correspondences is sufficient. Only used when mutual_filter is set to +/// True. +/// \return (K, 2, Int64) tensor. When mutual_filter is disabled: the first +/// column is arange(0, N) of source, and the second column is the corresponding +/// index of target. When mutual_filter is enabled, return the filtering subset +/// of the aforementioned correspondence set where source[i] and target[j] are +/// mutually the nearest neighbor. If the subset size is smaller than +/// mutual_consistency_ratio * N, return the unfiltered set. +core::Tensor CorrespondencesFromFeatures(const core::Tensor &source_features, + const core::Tensor &target_features, + bool mutual_filter = false, + float mutual_consistency_ratio = 0.1); } // namespace registration } // namespace pipelines } // namespace t diff --git a/cpp/open3d/utility/FileSystem.cpp b/cpp/open3d/utility/FileSystem.cpp index 5a2ce2748cc..98ff4926f2b 100644 --- a/cpp/open3d/utility/FileSystem.cpp +++ b/cpp/open3d/utility/FileSystem.cpp @@ -32,11 +32,12 @@ #define _SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING #endif #ifdef __APPLE__ -// CMAKE_OSX_DEPLOYMENT_TARGET "10.15" or newer -#define _LIBCPP_NO_EXPERIMENTAL_DEPRECATION_WARNING_FILESYSTEM -#endif +#include +namespace fs = std::__fs::filesystem; +#else #include namespace fs = std::experimental::filesystem; +#endif #include "open3d/utility/Logging.h" diff --git a/cpp/open3d/version.txt b/cpp/open3d/version.txt index 14b8ec6ff43..f0ca441ca4e 100644 --- a/cpp/open3d/version.txt +++ b/cpp/open3d/version.txt @@ -1,3 +1,3 @@ OPEN3D_VERSION_MAJOR 0 -OPEN3D_VERSION_MINOR 16 -OPEN3D_VERSION_PATCH 1 +OPEN3D_VERSION_MINOR 17 +OPEN3D_VERSION_PATCH 0 diff --git a/cpp/open3d/visualization/gui/Application.cpp b/cpp/open3d/visualization/gui/Application.cpp index 956999a543f..59290cfaba0 100644 --- a/cpp/open3d/visualization/gui/Application.cpp +++ b/cpp/open3d/visualization/gui/Application.cpp @@ -87,7 +87,10 @@ std::string FindResourcePath(int argc, const char *argv[]) { auto resource_path = path + "/resources"; if (!open3d::utility::filesystem::DirectoryExists(resource_path)) { - return path + "/../resources"; // building with Xcode + resource_path = path + "/../resources"; // building with Xcode + if (!open3d::utility::filesystem::DirectoryExists(resource_path)) { + resource_path = path + "/share/resources"; // GNU + } } return resource_path; } @@ -451,18 +454,21 @@ void Application::AddWindow(std::shared_ptr window) { } void Application::RemoveWindow(Window *window) { + if (impl_->should_quit_) { + return; + } + for (auto it = impl_->windows_.begin(); it != impl_->windows_.end(); ++it) { if (it->get() == window) { - window->Show(false); impl_->windows_to_be_destroyed_.insert(*it); impl_->windows_.erase(it); + if (impl_->windows_.empty()) { + impl_->should_quit_ = true; + } break; } } - - if (impl_->windows_.empty()) { - impl_->should_quit_ = true; - } + window->Show(false); } void Application::Quit() { diff --git a/cpp/open3d/visualization/gui/Combobox.cpp b/cpp/open3d/visualization/gui/Combobox.cpp index 9db8a2f7159..2bba54cc0f5 100644 --- a/cpp/open3d/visualization/gui/Combobox.cpp +++ b/cpp/open3d/visualization/gui/Combobox.cpp @@ -147,8 +147,6 @@ Size Combobox::CalcPreferredSize(const LayoutContext& context, Combobox::DrawResult Combobox::Draw(const DrawContext& context) { bool value_changed = false; - bool was_open = ImGui::IsPopupOpen(impl_->imgui_id_.c_str()); - bool did_open = false; auto& frame = GetFrame(); ImGui::SetCursorScreenPos( @@ -166,10 +164,8 @@ Combobox::DrawResult Combobox::Draw(const DrawContext& context) { DrawImGuiPushEnabledState(); ImGui::PushItemWidth(float(frame.width)); + if (ImGui::BeginCombo(impl_->imgui_id_.c_str(), GetSelectedValue())) { - if (!was_open) { - did_open = true; - } for (size_t i = 0; i < impl_->items_.size(); ++i) { bool isSelected = false; if (ImGui::Selectable(impl_->items_[i].c_str(), &isSelected, 0)) { @@ -185,13 +181,14 @@ Combobox::DrawResult Combobox::Draw(const DrawContext& context) { } ImGui::EndCombo(); } + ImGui::PopItemWidth(); DrawImGuiPopEnabledState(); ImGui::PopStyleColor(3); - return ((value_changed || did_open) ? Widget::DrawResult::REDRAW - : Widget::DrawResult::NONE); + return value_changed ? Widget::DrawResult::REDRAW + : Widget::DrawResult::NONE; } } // namespace gui diff --git a/cpp/open3d/visualization/gui/Font.h b/cpp/open3d/visualization/gui/Font.h index ba6f7d38df6..62467154ac7 100644 --- a/cpp/open3d/visualization/gui/Font.h +++ b/cpp/open3d/visualization/gui/Font.h @@ -7,6 +7,7 @@ #pragma once +#include #include #include diff --git a/cpp/open3d/visualization/gui/ProgressBar.cpp b/cpp/open3d/visualization/gui/ProgressBar.cpp index a55e32bb090..0c930155a41 100644 --- a/cpp/open3d/visualization/gui/ProgressBar.cpp +++ b/cpp/open3d/visualization/gui/ProgressBar.cpp @@ -42,16 +42,19 @@ Widget::DrawResult ProgressBar::Draw(const DrawContext& context) { auto fg = context.theme.border_color; auto color = colorToImguiRGBA(fg); float rounding = frame.height / 2.0f; + ImGui::GetWindowDrawList()->AddRect( - ImVec2(float(frame.x), float(frame.y)), - ImVec2(float(frame.GetRight()), float(frame.GetBottom())), color, - rounding); + ImVec2(float(frame.x), float(frame.y) - ImGui::GetScrollY()), + ImVec2(float(frame.GetRight()), + float(frame.GetBottom()) - ImGui::GetScrollY()), + color, rounding); float x = float(frame.x) + float(frame.width) * impl_->value_; x = std::max(x, float(frame.x + rounding)); + ImGui::GetWindowDrawList()->AddRectFilled( - ImVec2(float(frame.x), float(frame.y)), - ImVec2(float(x), float(frame.GetBottom())), color, - frame.height / 2.0f); + ImVec2(float(frame.x), float(frame.y) - ImGui::GetScrollY()), + ImVec2(float(x), float(frame.GetBottom()) - ImGui::GetScrollY()), + color, frame.height / 2.0f); return DrawResult::NONE; } diff --git a/cpp/open3d/visualization/gui/Util.h b/cpp/open3d/visualization/gui/Util.h index a1fce75796f..375933bd6dd 100644 --- a/cpp/open3d/visualization/gui/Util.h +++ b/cpp/open3d/visualization/gui/Util.h @@ -10,6 +10,7 @@ #include +#include #include #include diff --git a/cpp/open3d/visualization/gui/Window.cpp b/cpp/open3d/visualization/gui/Window.cpp index 0ee562b4310..bb023903792 100644 --- a/cpp/open3d/visualization/gui/Window.cpp +++ b/cpp/open3d/visualization/gui/Window.cpp @@ -707,7 +707,7 @@ Widget::DrawResult DrawChild(DrawContext& dc, const char* name, std::shared_ptr child, Mode mode) { - // Note: ImGUI's concept of a "window" is really a moveable child of the + // Note: ImGUI's concept of a "window" is really a movable child of the // OS window. We want a child to act like a child of the OS window, // like native UI toolkits, Qt, etc. So the top-level widgets of // a window are drawn using ImGui windows whose frame is specified diff --git a/cpp/open3d/visualization/visualizer/O3DVisualizer.cpp b/cpp/open3d/visualization/visualizer/O3DVisualizer.cpp index 8e8b9e249bc..3ab06cec1b9 100644 --- a/cpp/open3d/visualization/visualizer/O3DVisualizer.cpp +++ b/cpp/open3d/visualization/visualizer/O3DVisualizer.cpp @@ -941,11 +941,24 @@ struct O3DVisualizer::Impl { // Finally assign material properties if geometry is a triangle mesh if (tmesh && tmesh->materials_.size() > 0) { - // Only a single material is supported for TriangleMesh so we - // just grab the first one we find. Users should be using - // TriangleMeshModel if they have a model with multiple - // materials. - auto &mesh_material = tmesh->materials_.begin()->second; + std::size_t material_index; + if (tmesh->HasTriangleMaterialIds()) { + auto minmax_it = std::minmax_element( + tmesh->triangle_material_ids_.begin(), + tmesh->triangle_material_ids_.end()); + if (*minmax_it.first != *minmax_it.second) { + utility::LogWarning( + "Only a single material is " + "supported for TriangleMesh visualization, " + "only the first referenced material will be " + "used. Use TriangleMeshModel if more than one " + "material is required."); + } + material_index = *minmax_it.first; + } else { + material_index = 0; + } + auto &mesh_material = tmesh->materials_[material_index].second; mat.base_color = {mesh_material.baseColor.r(), mesh_material.baseColor.g(), mesh_material.baseColor.b(), @@ -1521,6 +1534,9 @@ struct O3DVisualizer::Impl { px = int(ConvertToScaledPixels(px)); for (auto &o : objects_) { + // Ignore Models since they can never be point clouds + if (o.model) continue; + o.material.point_size = float(px); OverrideMaterial(o.name, o.material, ui_state_.scene_shader); } @@ -1546,6 +1562,9 @@ struct O3DVisualizer::Impl { px = int(ConvertToScaledPixels(px)); for (auto &o : objects_) { + // Ignore Models since they can never be point clouds + if (o.model) continue; + o.material.line_width = float(px); OverrideMaterial(o.name, o.material, ui_state_.scene_shader); } @@ -2026,12 +2045,32 @@ struct O3DVisualizer::Impl { void UpdateSelectableGeometry() { std::vector pickable; - pickable.reserve(objects_.size()); + // Count number of meshes stored in TriangleMeshModels + size_t model_mesh_count = 0; + size_t model_count = 0; + for (auto &o : objects_) { + if (!IsGeometryVisible(o)) { + continue; + } + if (o.model.get()) { + model_count++; + model_mesh_count += o.model.get()->meshes_.size(); + } + } + pickable.reserve(objects_.size() + model_mesh_count - model_count); for (auto &o : objects_) { if (!IsGeometryVisible(o)) { continue; } - pickable.emplace_back(o.name, o.geometry.get(), o.tgeometry.get()); + if (o.model.get()) { + for (auto &g : o.model->meshes_) { + pickable.emplace_back(g.mesh_name, g.mesh.get(), + o.tgeometry.get()); + } + } else { + pickable.emplace_back(o.name, o.geometry.get(), + o.tgeometry.get()); + } } selections_->SetSelectableGeometry(pickable); } diff --git a/cpp/open3d/visualization/visualizer/Visualizer.h b/cpp/open3d/visualization/visualizer/Visualizer.h index 76850aced4a..247ee2c91fe 100644 --- a/cpp/open3d/visualization/visualizer/Visualizer.h +++ b/cpp/open3d/visualization/visualizer/Visualizer.h @@ -127,6 +127,7 @@ class Visualizer { /// Visualizer should be updated accordingly. /// /// \param geometry_ptr The Geometry object. + /// \param reset_bounding_box Reset viewpoint to view all geometries. virtual bool AddGeometry( std::shared_ptr geometry_ptr, bool reset_bounding_box = true); @@ -140,6 +141,7 @@ class Visualizer { /// added by AddGeometry /// /// \param geometry_ptr The Geometry object. + /// \param reset_bounding_box Reset viewpoint to view all geometries. virtual bool RemoveGeometry( std::shared_ptr geometry_ptr, bool reset_bounding_box = true); @@ -173,7 +175,6 @@ class Visualizer { /// Function to retrieve the associated ViewControl ViewControl &GetViewControl() { return *view_control_ptr_; } - const ViewControl &GetViewControl() const { return *view_control_ptr_; } /// Function to retrieve the associated RenderOption. RenderOption &GetRenderOption() { return *render_option_ptr_; } /// \brief Function to capture screen and store RGB in a float buffer. @@ -210,11 +211,18 @@ class Visualizer { bool do_render = true, bool convert_to_world_coordinate = false); void CaptureRenderOption(const std::string &filename = ""); + /// Function to reset view point. void ResetViewPoint(bool reset_bounding_box = false); const std::string &GetWindowName() const { return window_name_; } + /// Get the current view status as a json string of ViewTrajectory. + std::string GetViewStatus(); + + /// Set the current view status from a json string of ViewTrajectory. + void SetViewStatus(const std::string &view_status_str); + protected: /// Function to initialize OpenGL virtual bool InitOpenGL(); @@ -230,11 +238,13 @@ class Visualizer { /// meshes individually). virtual void Render(bool render_screen = false); + /// Copy the current view status to clipboard. void CopyViewStatusToClipboard(); + /// Apply the view point from clipboard. void CopyViewStatusFromClipboard(); - // callback functions + /// Callback functions virtual void WindowRefreshCallback(GLFWwindow *window); virtual void WindowResizeCallback(GLFWwindow *window, int w, int h); virtual void MouseMoveCallback(GLFWwindow *window, double x, double y); diff --git a/cpp/open3d/visualization/visualizer/VisualizerRender.cpp b/cpp/open3d/visualization/visualizer/VisualizerRender.cpp index 9669f995249..175d11f99f8 100644 --- a/cpp/open3d/visualization/visualizer/VisualizerRender.cpp +++ b/cpp/open3d/visualization/visualizer/VisualizerRender.cpp @@ -143,34 +143,40 @@ void Visualizer::ResetViewPoint(bool reset_bounding_box /* = false*/) { } void Visualizer::CopyViewStatusToClipboard() { + glfwSetClipboardString(window_, GetViewStatus().c_str()); +} + +void Visualizer::CopyViewStatusFromClipboard() { + const char *clipboard_string_buffer = glfwGetClipboardString(window_); + if (clipboard_string_buffer != nullptr) { + std::string clipboard_string(clipboard_string_buffer); + SetViewStatus(clipboard_string); + } +} + +std::string Visualizer::GetViewStatus() { ViewParameters current_status; if (!view_control_ptr_->ConvertToViewParameters(current_status)) { - utility::LogWarning("Something is wrong copying view status."); + utility::LogWarning("Cannot convert to view parameters."); } ViewTrajectory trajectory; trajectory.view_status_.push_back(current_status); - std::string clipboard_string; - if (!io::WriteIJsonConvertibleToJSONString(clipboard_string, trajectory)) { - utility::LogWarning("Something is wrong copying view status."); + std::string view_status_str; + if (!io::WriteIJsonConvertibleToJSONString(view_status_str, trajectory)) { + utility::LogWarning("Cannot convert ViewTrajectory to json string."); } - glfwSetClipboardString(window_, clipboard_string.c_str()); + return view_status_str; } -void Visualizer::CopyViewStatusFromClipboard() { - const char *clipboard_string_buffer = glfwGetClipboardString(window_); - if (clipboard_string_buffer != NULL) { - std::string clipboard_string(clipboard_string_buffer); - ViewTrajectory trajectory; - if (!io::ReadIJsonConvertibleFromJSONString(clipboard_string, - trajectory)) { - utility::LogWarning("Something is wrong copying view status."); - } - if (trajectory.view_status_.size() != 1) { - utility::LogWarning("Something is wrong copying view status."); - } - view_control_ptr_->ConvertFromViewParameters( - trajectory.view_status_[0]); +void Visualizer::SetViewStatus(const std::string &view_status_str) { + ViewTrajectory trajectory; + if (!io::ReadIJsonConvertibleFromJSONString(view_status_str, trajectory)) { + utility::LogWarning("Cannot convert string to view status."); + } + if (trajectory.view_status_.size() != 1) { + utility::LogWarning("Cannot convert string to view status."); } + view_control_ptr_->ConvertFromViewParameters(trajectory.view_status_[0]); } std::shared_ptr Visualizer::CaptureScreenFloatBuffer( diff --git a/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.cpp b/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.cpp index aef757932f9..7b61ac144f5 100644 --- a/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.cpp +++ b/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.cpp @@ -140,19 +140,19 @@ PeerConnectionManager::PeerConnectionManager( // Register api in http server. func_["/api/getMediaList"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/getMediaList"); + utility::LogDebug("[Called HTTP API] /api/getMediaList"); return this->GetMediaList(); }; func_["/api/getIceServers"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/getIceServers"); + utility::LogDebug("[Called HTTP API] /api/getIceServers"); return this->GetIceServers(); }; func_["/api/call"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/call"); + utility::LogDebug("[Called HTTP API] /api/call"); std::string peerid; std::string url; // window_uid. std::string options; @@ -167,7 +167,7 @@ PeerConnectionManager::PeerConnectionManager( func_["/api/getIceCandidate"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/getIceCandidate"); + utility::LogDebug("[Called HTTP API] /api/getIceCandidate"); std::string peerid; if (req_info->query_string) { CivetServer::getParam(req_info->query_string, "peerid", peerid); @@ -178,7 +178,7 @@ PeerConnectionManager::PeerConnectionManager( func_["/api/addIceCandidate"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/addIceCandidate"); + utility::LogDebug("[Called HTTP API] /api/addIceCandidate"); std::string peerid; if (req_info->query_string) { CivetServer::getParam(req_info->query_string, "peerid", peerid); @@ -188,7 +188,7 @@ PeerConnectionManager::PeerConnectionManager( func_["/api/hangup"] = [this](const struct mg_request_info *req_info, const Json::Value &in) -> Json::Value { - utility::LogInfo("[Called HTTP API] /api/hangup"); + utility::LogDebug("[Called HTTP API] /api/hangup"); std::string peerid; if (req_info->query_string) { CivetServer::getParam(req_info->query_string, "peerid", peerid); @@ -713,8 +713,8 @@ void PeerConnectionManager::SendInitFramesToPeer(const std::string &peerid) { void PeerConnectionManager::CloseWindowConnections( const std::string &window_uid) { - utility::LogInfo("PeerConnectionManager::CloseWindowConnections: {}", - window_uid); + utility::LogDebug("PeerConnectionManager::CloseWindowConnections: {}", + window_uid); std::set peerids; { std::lock_guard mlock(window_uid_to_peerids_mutex_); diff --git a/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.h b/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.h index 173f642fae2..ceb0fc9df74 100644 --- a/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.h +++ b/cpp/open3d/visualization/webrtc_server/PeerConnectionManager.h @@ -204,7 +204,7 @@ class PeerConnectionManager { const std::string state = webrtc::DataChannelInterface::DataStateString( data_channel_->state()); - utility::LogInfo( + utility::LogDebug( "DataChannelObserver::OnStateChange label: {}, state: {}, " "peerid: {}", label, state, peerid_); diff --git a/cpp/open3d/visualization/webrtc_server/WebRTCWindowSystem.cpp b/cpp/open3d/visualization/webrtc_server/WebRTCWindowSystem.cpp index e83939ee82f..4440f0d865b 100644 --- a/cpp/open3d/visualization/webrtc_server/WebRTCWindowSystem.cpp +++ b/cpp/open3d/visualization/webrtc_server/WebRTCWindowSystem.cpp @@ -419,7 +419,7 @@ void WebRTCWindowSystem::SendInitFrames(const std::string &window_uid) { std::string WebRTCWindowSystem::CallHttpAPI(const std::string &entry_point, const std::string &query_string, const std::string &data) const { - utility::LogInfo("[Called HTTP API (custom handshake)] {}", entry_point); + utility::LogDebug("[Called HTTP API (custom handshake)] {}", entry_point); std::string query_string_trimmed = ""; if (!query_string.empty() && query_string[0] == '?') { diff --git a/cpp/pybind/CMakeLists.txt b/cpp/pybind/CMakeLists.txt index ca5bcf16198..6d7991d44e6 100644 --- a/cpp/pybind/CMakeLists.txt +++ b/cpp/pybind/CMakeLists.txt @@ -66,6 +66,7 @@ elseif (UNIX) # Linux };]=]) target_link_options(pybind PRIVATE $<$: "-Wl,--version-script=${CMAKE_CURRENT_BINARY_DIR}/pybind.map" >) + target_link_options(pybind PRIVATE "-flto=auto") endif() diff --git a/cpp/pybind/data/dataset.cpp b/cpp/pybind/data/dataset.cpp index f6e39d90163..6526088fc5c 100644 --- a/cpp/pybind/data/dataset.cpp +++ b/cpp/pybind/data/dataset.cpp @@ -1007,28 +1007,28 @@ void pybind_redwood_indoor_living_room1(py::module& m) { R"doc(RedwoodIndoorLivingRoom1 (Augmented ICL-NUIM Dataset) Data class for `RedwoodIndoorLivingRoom1`, containing dense point cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni -sequence, and ground-truth camera trajectory. +sequence, and ground-truth camera trajectory. :: -RedwoodIndoorLivingRoom1 -├── colors -│  ├── 00000.jpg -│ ├── 00001.jpg -│ ├── ... -│ └── 02869.jpg -├── depth -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02869.png -├── depth_noisy -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02869.png -├── dist-model.txt -├── livingroom1.oni -├── livingroom1-traj.txt -└── livingroom.ply + RedwoodIndoorLivingRoom1 + |-- colors + | |-- 00000.jpg + | |-- 00001.jpg + | |-- ... + | '-- 02869.jpg + |-- depth + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02869.png + |-- depth_noisy + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02869.png + |-- dist-model.txt + |-- livingroom1.oni + |-- livingroom1-traj.txt + '-- livingroom.ply )doc"); dataset.def(py::init(), "data_root"_a = ""); dataset.def_property_readonly("point_cloud_path", @@ -1062,28 +1062,28 @@ void pybind_redwood_indoor_living_room2(py::module& m) { R"doc(RedwoodIndoorLivingRoom2 (Augmented ICL-NUIM Dataset) Data class for `RedwoodIndoorLivingRoom2`, containing dense point cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni -sequence, and ground-truth camera trajectory. +sequence, and ground-truth camera trajectory. :: -RedwoodIndoorLivingRoom2 -├── colors -│  ├── 00000.jpg -│ ├── 00001.jpg -│ ├── ... -│ └── 02349.jpg -├── depth -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02349.png -├── depth_noisy -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02349.png -├── dist-model.txt -├── livingroom2.oni -├── livingroom2-traj.txt -└── livingroom.ply + RedwoodIndoorLivingRoom2 + |-- colors + | |-- 00000.jpg + | |-- 00001.jpg + | |-- ... + | '-- 02349.jpg + |-- depth + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02349.png + |-- depth_noisy + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02349.png + |-- dist-model.txt + |-- livingroom2.oni + |-- livingroom2-traj.txt + '-- livingroom.ply )doc"); dataset.def(py::init(), "data_root"_a = ""); dataset.def_property_readonly("point_cloud_path", @@ -1116,28 +1116,28 @@ void pybind_redwood_indoor_office1(py::module& m) { R"doc(RedwoodIndoorOffice1 (Augmented ICL-NUIM Dataset) Data class for `RedwoodIndoorOffice1`, containing dense point cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni -sequence, and ground-truth camera trajectory. +sequence, and ground-truth camera trajectory. :: -RedwoodIndoorOffice1 -├── colors -│  ├── 00000.jpg -│ ├── 00001.jpg -│ ├── ... -│ └── 02689.jpg -├── depth -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02689.png -├── depth_noisy -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02689.png -├── dist-model.txt -├── office1.oni -├── office1-traj.txt -└── office.ply + RedwoodIndoorOffice1 + |-- colors + | |-- 00000.jpg + | |-- 00001.jpg + | |-- ... + | '-- 02689.jpg + |-- depth + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02689.png + |-- depth_noisy + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02689.png + |-- dist-model.txt + |-- office1.oni + |-- office1-traj.txt + '-- office.ply )doc"); dataset.def(py::init(), "data_root"_a = ""); dataset.def_property_readonly("point_cloud_path", @@ -1169,28 +1169,28 @@ void pybind_redwood_indoor_office2(py::module& m) { R"doc(RedwoodIndoorOffice2 (Augmented ICL-NUIM Dataset) Data class for `RedwoodIndoorOffice2`, containing dense point cloud, rgb sequence, clean depth sequence, noisy depth sequence, oni -sequence, and ground-truth camera trajectory. +sequence, and ground-truth camera trajectory. :: -RedwoodIndoorOffice2 -├── colors -│  ├── 00000.jpg -│ ├── 00001.jpg -│ ├── ... -│ └── 02537.jpg -├── depth -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02537.png -├── depth_noisy -│ ├── 00000.png -│ ├── 00001.png -│ ├── ... -│ └── 02537.png -├── dist-model.txt -├── office2.oni -├── office2-traj.txt -└── office.ply + RedwoodIndoorOffice2 + |-- colors + | |-- 00000.jpg + | |-- 00001.jpg + | |-- ... + | '-- 02537.jpg + |-- depth + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02537.png + |-- depth_noisy + | |-- 00000.png + | |-- 00001.png + | |-- ... + | '-- 02537.png + |-- dist-model.txt + |-- office2.oni + |-- office2-traj.txt + '-- office.ply )doc"); dataset.def(py::init(), "data_root"_a = ""); dataset.def_property_readonly("point_cloud_path", diff --git a/cpp/pybind/geometry/image.cpp b/cpp/pybind/geometry/image.cpp index 0a18be65e7f..70e63184e64 100644 --- a/cpp/pybind/geometry/image.cpp +++ b/cpp/pybind/geometry/image.cpp @@ -86,6 +86,13 @@ void pybind_image(py::module &m) { } height = (int)info.shape[0]; width = (int)info.shape[1]; + if (info.strides[1] != num_of_channels * bytes_per_channel || + info.strides[0] != + width * num_of_channels * bytes_per_channel) { + throw std::runtime_error( + "Image can only be initialized from a contiguous " + "buffer."); + } auto img = new Image(); img->Prepare(width, height, num_of_channels, bytes_per_channel); memcpy(img->data_.data(), info.ptr, img->data_.size()); diff --git a/cpp/pybind/geometry/pointcloud.cpp b/cpp/pybind/geometry/pointcloud.cpp index d9e2ce935c6..2af8319e93d 100644 --- a/cpp/pybind/geometry/pointcloud.cpp +++ b/cpp/pybind/geometry/pointcloud.cpp @@ -86,16 +86,16 @@ void pybind_pointcloud(py::module &m) { "num_samples"_a) .def("crop", (std::shared_ptr(PointCloud::*)( - const AxisAlignedBoundingBox &) const) & + const AxisAlignedBoundingBox &, bool) const) & PointCloud::Crop, "Function to crop input pointcloud into output pointcloud", - "bounding_box"_a) + "bounding_box"_a, "invert"_a = false) .def("crop", (std::shared_ptr(PointCloud::*)( - const OrientedBoundingBox &) const) & + const OrientedBoundingBox &, bool) const) & PointCloud::Crop, "Function to crop input pointcloud into output pointcloud", - "bounding_box"_a) + "bounding_box"_a, "invert"_a = false) .def("remove_non_finite_points", &PointCloud::RemoveNonFinitePoints, "Removes all points from the point cloud that have a nan " "entry, or infinite entries. It also removes the " @@ -139,7 +139,7 @@ void pybind_pointcloud(py::module &m) { &PointCloud::OrientNormalsConsistentTangentPlane, "Function to orient the normals with respect to consistent " "tangent planes", - "k"_a) + "k"_a, "lambda"_a = 0.0, "cos_alpha_tol"_a = 1.0) .def("compute_point_cloud_distance", &PointCloud::ComputePointCloudDistance, "For each point in the source point cloud, compute the " @@ -289,7 +289,8 @@ camera. Given depth value d at (u, v) image coordinate, the corresponding 3d poi "number of points[0-1]"}}); docstring::ClassMethodDocInject( m, "PointCloud", "crop", - {{"bounding_box", "AxisAlignedBoundingBox to crop points"}}); + {{"bounding_box", "AxisAlignedBoundingBox to crop points"}, + {"invert", "optional boolean to invert cropping"}}); docstring::ClassMethodDocInject( m, "PointCloud", "remove_non_finite_points", {{"remove_nan", "Remove NaN values from the PointCloud"}, diff --git a/cpp/pybind/geometry/voxelgrid.cpp b/cpp/pybind/geometry/voxelgrid.cpp index 7abe1872e91..0528979b0f3 100644 --- a/cpp/pybind/geometry/voxelgrid.cpp +++ b/cpp/pybind/geometry/voxelgrid.cpp @@ -77,6 +77,10 @@ void pybind_voxelgrid(py::module &m) { "Returns ``True`` if the voxel grid contains voxels.") .def("get_voxel", &VoxelGrid::GetVoxel, "point"_a, "Returns voxel index given query point.") + .def("add_voxel", &VoxelGrid::AddVoxel, "voxel"_a, + "Add a new voxel into the VoxelGrid.") + .def("remove_voxel", &VoxelGrid::RemoveVoxel, "idx"_a, + "Remove a voxel given index.") .def("check_if_included", &VoxelGrid::CheckIfIncluded, "queries"_a, "Element-wise check if a query in the list is included in " "the VoxelGrid. Queries are double precision and " @@ -155,6 +159,11 @@ void pybind_voxelgrid(py::module &m) { docstring::ClassMethodDocInject(m, "VoxelGrid", "has_voxels"); docstring::ClassMethodDocInject(m, "VoxelGrid", "get_voxel", {{"point", "The query point."}}); + docstring::ClassMethodDocInject(m, "VoxelGrid", "add_voxel", + {{"Voxel", "A new voxel."}}); + docstring::ClassMethodDocInject( + m, "VoxelGrid", "remove_voxel", + {{"idx", "The grid index of the target voxel."}}); docstring::ClassMethodDocInject( m, "VoxelGrid", "check_if_included", {{"query", "a list of voxel indices to check."}}); diff --git a/cpp/pybind/make_install_pip_package.cmake b/cpp/pybind/make_install_pip_package.cmake index a5c9fcdd05c..98754677d11 100644 --- a/cpp/pybind/make_install_pip_package.cmake +++ b/cpp/pybind/make_install_pip_package.cmake @@ -4,5 +4,5 @@ # Note: Since `make python-package` clears PYTHON_COMPILED_MODULE_DIR every time, # it is guaranteed that there is only one wheel in ${PYTHON_PACKAGE_DST_DIR}/pip_package/*.whl file(GLOB WHEEL_FILE "${PYTHON_PACKAGE_DST_DIR}/pip_package/*.whl") -execute_process(COMMAND ${Python3_EXECUTABLE} -m pip uninstall open3d --yes) +execute_process(COMMAND ${Python3_EXECUTABLE} -m pip uninstall open3d open3d-cpu --yes) execute_process(COMMAND ${Python3_EXECUTABLE} -m pip install ${WHEEL_FILE} -U) diff --git a/cpp/pybind/pipelines/registration/feature.cpp b/cpp/pybind/pipelines/registration/feature.cpp index 1385b2b0fc4..bdc4b5d8e51 100644 --- a/cpp/pybind/pipelines/registration/feature.cpp +++ b/cpp/pybind/pipelines/registration/feature.cpp @@ -53,6 +53,23 @@ void pybind_feature_methods(py::module &m) { m, "compute_fpfh_feature", {{"input", "The Input point cloud."}, {"search_param", "KDTree KNN search parameter."}}); + + m.def("correspondences_from_features", &CorrespondencesFromFeatures, + "Function to find nearest neighbor correspondences from features", + "source_features"_a, "target_features"_a, "mutual_filter"_a = false, + "mutual_consistency_ratio"_a = 0.1f); + docstring::FunctionDocInject( + m, "correspondences_from_features", + {{"source_features", "The source features stored in (dim, N)."}, + {"target_features", "The target features stored in (dim, M)."}, + {"mutual_filter", + "filter correspondences and return the collection of (i, j) s.t. " + "source_features[i] and target_features[j] are mutually the " + "nearest neighbor."}, + {"mutual_consistency_ratio", + "Threshold to decide whether the number of filtered " + "correspondences is sufficient. Only used when mutual_filter is " + "enabled."}}); } } // namespace registration diff --git a/cpp/pybind/t/geometry/boundingvolume.cpp b/cpp/pybind/t/geometry/boundingvolume.cpp index 7505e02ae52..946fb9617df 100644 --- a/cpp/pybind/t/geometry/boundingvolume.cpp +++ b/cpp/pybind/t/geometry/boundingvolume.cpp @@ -197,7 +197,7 @@ The scaling center will be the box center if it is not specified.)", m, "AxisAlignedBoundingBox", "create_from_points", {{"points", "A list of points with data type of float32 or float64 (N x 3 " - "tensor, where N must be larger than 3)."}}); + "tensor)."}}); py::class_, std::shared_ptr, Geometry, DrawableGeometry> diff --git a/cpp/pybind/t/geometry/pointcloud.cpp b/cpp/pybind/t/geometry/pointcloud.cpp index cc7f98eca59..a36813fd353 100644 --- a/cpp/pybind/t/geometry/pointcloud.cpp +++ b/cpp/pybind/t/geometry/pointcloud.cpp @@ -72,8 +72,8 @@ The attributes of the point cloud have different levels:: # The shape must be (N, 3). The device of "positions" determines the device # of the point cloud. pcd.point.positions = o3d.core.Tensor([[0, 0, 0], - [1, 1, 1], - [2, 2, 2]], dtype, device) + [1, 1, 1], + [2, 2, 2]], dtype, device) # Common attributes: "normals", "colors". # Common attributes are used in built-in point cloud operations. The @@ -82,11 +82,11 @@ The attributes of the point cloud have different levels:: # "normals" and "colors" must have shape (N, 3) and must be on the same # device as the point cloud. pcd.point.normals = o3d.core.Tensor([[0, 0, 1], - [0, 1, 0], - [1, 0, 0]], dtype, device) + [0, 1, 0], + [1, 0, 0]], dtype, device) pcd.point.colors = o3d.core.Tensor([[0.0, 0.0, 0.0], - [0.1, 0.1, 0.1], - [0.2, 0.2, 0.2]], dtype, device) + [0.1, 0.1, 0.1], + [0.2, 0.2, 0.2]], dtype, device) # User-defined attributes. # You can also attach custom attributes. The value tensor must be on the @@ -211,12 +211,32 @@ The attributes of the point cloud have different levels:: "output point cloud."); pointcloud.def( "voxel_down_sample", - [](const PointCloud& pointcloud, const double voxel_size) { - return pointcloud.VoxelDownSample( - voxel_size, core::HashBackendType::Default); + [](const PointCloud& pointcloud, const double voxel_size, + const std::string& reduction) { + return pointcloud.VoxelDownSample(voxel_size, reduction); }, - "Downsamples a point cloud with a specified voxel size.", - "voxel_size"_a); + "Downsamples a point cloud with a specified voxel size and a " + "reduction type.", + "voxel_size"_a, "reduction"_a = "mean", + R"doc(Downsamples a point cloud with a specified voxel size. + +Args: + voxel_size (float): The size of the voxel used to downsample the point cloud. + + reduction (str): The approach to pool point properties in a voxel. Can only be "mean" at current. + +Return: + A downsampled point cloud with point properties reduced in each voxel. + +Example: + + We will load the Eagle dataset, downsample it, and show the result:: + + eagle = o3d.data.EaglePointCloud() + pcd = o3d.t.io.read_point_cloud(eagle.path) + pcd_down = pcd.voxel_down_sample(voxel_size=0.05) + o3d.visualization.draw([{'name': 'pcd', 'geometry': pcd}, {'name': 'pcd_down', 'geometry': pcd_down}]) + )doc"); pointcloud.def("uniform_down_sample", &PointCloud::UniformDownSample, "Downsamples a point cloud by selecting every kth index " "point and its attributes.", @@ -239,8 +259,8 @@ The attributes of the point cloud have different levels:: sphere of a given search radius. Args: - nb_points. Number of neighbor points required within the radius. - search_radius. Radius of the sphere. + nb_points: Number of neighbor points required within the radius. + search_radius: Radius of the sphere. Return: Tuple of filtered point cloud and boolean mask tensor for selected values @@ -253,8 +273,8 @@ sphere of a given search radius. neighbors in average. This function is not recommended to use on GPU. Args: - nb_neighbors. Number of neighbors around the target point. - std_ratio. Standard deviation ratio. + nb_neighbors: Number of neighbors around the target point. + std_ratio: Standard deviation ratio. Return: Tuple of filtered point cloud and boolean mask tensor for selected values @@ -269,8 +289,8 @@ neighbors in average. This function is not recommended to use on GPU. infinite value. It also removes the corresponding attributes. Args: - remove_nan. Remove NaN values from the PointCloud. - remove_infinite. Remove infinite values from the PointCloud. + remove_nan: Remove NaN values from the PointCloud. + remove_infinite: Remove infinite values from the PointCloud. Return: Tuple of filtered point cloud and boolean mask tensor for selected values @@ -300,11 +320,70 @@ infinite value. It also removes the corresponding attributes. "Function to orient the normals of a point cloud.", "camera_location"_a = core::Tensor::Zeros( {3}, core::Float32, core::Device("CPU:0"))); - pointcloud.def("orient_normals_consistent_tangent_plane", - &PointCloud::OrientNormalsConsistentTangentPlane, - "Function to orient the normals with respect to consistent " - "tangent planes.", - "k"_a); + pointcloud.def( + "orient_normals_consistent_tangent_plane", + &PointCloud::OrientNormalsConsistentTangentPlane, "k"_a, + "lambda"_a = 0.0, "cos_alpha_tol"_a = 1.0, + R"(Function to consistently orient the normals of a point cloud based on tangent planes. + +The algorithm is described in Hoppe et al., "Surface Reconstruction from Unorganized Points", 1992. +Additional information about the choice of lambda and cos_alpha_tol for complex +point clouds can be found in Piazza, Valentini, Varetti, "Mesh Reconstruction from Point Cloud", 2023 +(https://eugeniovaretti.github.io/meshreco/Piazza_Valentini_Varetti_MeshReconstructionFromPointCloud_2023.pdf). + +Args: + k (int): Number of neighbors to use for tangent plane estimation. + lambda (float): A non-negative real parameter that influences the distance + metric used to identify the true neighbors of a point in complex + geometries. It penalizes the distance between a point and the tangent + plane defined by the reference point and its normal vector, helping to + mitigate misclassification issues encountered with traditional + Euclidean distance metrics. + cos_alpha_tol (float): Cosine threshold angle used to determine the + inclusion boundary of neighbors based on the direction of the normal + vector. + +Example: + We use Bunny point cloud to compute its normals and orient them consistently. + The initial reconstruction adheres to Hoppe's algorithm (raw), whereas the + second reconstruction utilises the lambda and cos_alpha_tol parameters. + Due to the high density of the Bunny point cloud available in Open3D a larger + value of the parameter k is employed to test the algorithm. Usually you do + not have at disposal such a refined point clouds, thus you cannot find a + proper choice of k: refer to + https://eugeniovaretti.github.io/meshreco for these cases.:: + + import open3d as o3d + import numpy as np + # Load point cloud + data = o3d.data.BunnyMesh() + + # Case 1, Hoppe (raw): + pcd = o3d.io.read_point_cloud(data.path) + + # Compute normals and orient them consistently, using k=100 neighbours + pcd.estimate_normals() + pcd.orient_normals_consistent_tangent_plane(100) + + # Create mesh from point cloud using Poisson Algorithm + poisson_mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd, depth=8, width=0, scale=1.1, linear_fit=False)[0] + poisson_mesh.paint_uniform_color(np.array([[0.5],[0.5],[0.5]])) + poisson_mesh.compute_vertex_normals() + o3d.visualization.draw_geometries([poisson_mesh]) + + # Case 2, reconstruction using lambda and cos_alpha_tol parameters: + pcd_robust = o3d.io.read_point_cloud(data.path) + + # Compute normals and orient them consistently, using k=100 neighbours + pcd_robust.estimate_normals() + pcd_robust.orient_normals_consistent_tangent_plane(100, 10, 0.5) + + # Create mesh from point cloud using Poisson Algorithm + poisson_mesh_robust = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(pcd_robust, depth=8, width=0, scale=1.1, linear_fit=False)[0] + poisson_mesh_robust.paint_uniform_color(np.array([[0.5],[0.5],[0.5]])) + poisson_mesh_robust.compute_vertex_normals() + + o3d.visualization.draw_geometries([poisson_mesh_robust]) )"); pointcloud.def( "estimate_color_gradients", &PointCloud::EstimateColorGradients, py::call_guard(), py::arg("max_nn") = 30, @@ -325,7 +404,7 @@ infinite value. It also removes the corresponding attributes. "with_normals"_a = false, "Factory function to create a pointcloud (with only 'points') from " "a depth image and a camera model.\n\n Given depth value d at (u, " - "v) image coordinate, the corresponding 3d point is:\n z = d / " + "v) image coordinate, the corresponding 3d point is:\n\n z = d / " "depth_scale\n\n x = (u - cx) * z / fx\n\n y = (v - cy) * z / fy"); pointcloud.def_static( "create_from_rgbd_image", &PointCloud::CreateFromRGBDImage, @@ -370,14 +449,16 @@ This is a wrapper for a CPU implementation and a copy of the point cloud data and resulting visible triangle mesh and indiecs will be made. Args: - camera_location. All points not visible from that location will be removed. - radius. The radius of the spherical projection. + camera_location: All points not visible from that location will be removed. + + radius: The radius of the spherical projection. Return: Tuple of visible triangle mesh and indices of visible points on the same device as the point cloud. Example: + We use armadillo mesh to compute the visible points from given camera:: # Convert mesh to a point cloud and estimate dimensions. @@ -406,15 +487,18 @@ with Noise', 1996. This is a wrapper for a CPU implementation and a copy of the point cloud data and resulting labels will be made. Args: - eps. Density parameter that is used to find neighbouring points. - min_points. Minimum number of points to form a cluster. - print_progress (default False). If 'True' the progress is visualized in the console. + eps: Density parameter that is used to find neighbouring points. + + min_points: Minimum number of points to form a cluster. + +print_progress (default False): If 'True' the progress is visualized in the console. Return: A Tensor list of point labels on the same device as the point cloud, -1 indicates noise according to the algorithm. Example: + We use Redwood dataset for demonstration:: import matplotlib.pyplot as plt @@ -433,23 +517,25 @@ point cloud data and resulting labels will be made. pointcloud.def( "segment_plane", &PointCloud::SegmentPlane, "distance_threshold"_a = 0.01, "ransac_n"_a = 3, - "num_iterations"_a = 100, "probability"_a = 0.99999999, + "num_iterations"_a = 100, "probability"_a = 0.999, R"(Segments a plane in the point cloud using the RANSAC algorithm. This is a wrapper for a CPU implementation and a copy of the point cloud data and resulting plane model and inlier indiecs will be made. Args: - distance_threshold (default 0.01). Max distance a point can be from the plane - model, and still be considered an inlier. - ransac_n (default 3). Number of initial points to be considered inliers in each iteration. - num_iterations (default 100). Maximum number of iterations. - probability (default 0.99999999). Expected probability of finding the optimal plane. + distance_threshold (default 0.01): Max distance a point can be from the plane model, and still be considered an inlier. + + ransac_n (default 3): Number of initial points to be considered inliers in each iteration. + num_iterations (default 100): Maximum number of iterations. + + probability (default 0.999): Expected probability of finding the optimal plane. Return: - Tuple of the plane model ax + by + cz + d = 0 and the indices of + Tuple of the plane model `ax + by + cz + d = 0` and the indices of the plane inliers on the same device as the point cloud. Example: + We use Redwood dataset to compute its plane model and inliers:: sample_pcd_data = o3d.data.PCDPointCloud() @@ -467,16 +553,11 @@ resulting plane model and inlier indiecs will be made. R"doc(Compute the convex hull of a triangle mesh using qhull. This runs on the CPU. Args: - joggle_inputs (default False). Handle precision problems by - randomly perturbing the input data. Set to True if perturbing the input - iis acceptable but you need convex simplicial output. If False, - neighboring facets may be merged in case of precision problems. See - `QHull docs `__ for more - details. + joggle_inputs (default False): Handle precision problems by randomly perturbing the input data. Set to True if perturbing the input is acceptable but you need convex simplicial output. If False, neighboring facets may be merged in case of precision problems. See `QHull docs `__ for more details. Return: TriangleMesh representing the convexh hull. This contains an - extra vertex property "point_indices" that contains the index of the + extra vertex property `point_indices` that contains the index of the corresponding vertex in the original mesh. Example: @@ -495,9 +576,9 @@ The implementation is inspired by the PCL implementation. Reference: https://pointclouds.org/documentation/classpcl_1_1_boundary_estimation.html Args: - radius. Neighbor search radius parameter. - max_nn (default 30). Maximum number of neighbors to search. - angle_threshold (default 90.0). Angle threshold to decide if a point is on the boundary. + radius: Neighbor search radius parameter. + max_nn (default 30): Maximum number of neighbors to search. + angle_threshold (default 90.0): Angle threshold to decide if a point is on the boundary. Return: Tensor of boundary points and its boolean mask tensor. @@ -590,11 +671,6 @@ The implementation is inspired by the PCL implementation. Reference: m, "PointCloud", "orient_normals_towards_camera_location", {{"camera_location", "Normals are oriented with towards the camera_location."}}); - docstring::ClassMethodDocInject( - m, "PointCloud", "orient_normals_consistent_tangent_plane", - {{"k", - "Number of k nearest neighbors used in constructing the " - "Riemannian graph used to propagate normal orientation."}}); docstring::ClassMethodDocInject( m, "PointCloud", "crop", {{"aabb", "AxisAlignedBoundingBox to crop points."}, @@ -655,6 +731,7 @@ The implementation is inspired by the PCL implementation. Reference: Example: This code generates a set of straight lines from a point cloud:: + import open3d as o3d import numpy as np pcd = o3d.t.geometry.PointCloud(np.random.rand(10,3)) diff --git a/cpp/pybind/t/geometry/raycasting_scene.cpp b/cpp/pybind/t/geometry/raycasting_scene.cpp index bdab66e02e1..e7f6dcecbd5 100644 --- a/cpp/pybind/t/geometry/raycasting_scene.cpp +++ b/cpp/pybind/t/geometry/raycasting_scene.cpp @@ -177,6 +177,102 @@ Computes the number of intersection of the rays with the scene. Returns: A tensor with the number of intersections. The shape is {..}. +)doc"); + + raycasting_scene.def("list_intersections", + &RaycastingScene::ListIntersections, "rays"_a, + "nthreads"_a = 0, R"doc( +Lists the intersections of the rays with the scene:: + + import open3d as o3d + import numpy as np + + # Create scene and add the monkey model. + scene = o3d.t.geometry.RaycastingScene() + d = o3d.data.MonkeyModel() + mesh = o3d.t.io.read_triangle_mesh(d.path) + mesh_id = scene.add_triangles(mesh) + + # Create a grid of rays covering the bounding box + bb_min = mesh.vertex['positions'].min(dim=0).numpy() + bb_max = mesh.vertex['positions'].max(dim=0).numpy() + x,y = np.linspace(bb_min, bb_max, num=10)[:,:2].T + xv, yv = np.meshgrid(x,y) + orig = np.stack([xv, yv, np.full_like(xv, bb_min[2]-1)], axis=-1).reshape(-1,3) + dest = orig + np.full(orig.shape, (0,0,2+bb_max[2]-bb_min[2]),dtype=np.float32) + rays = np.concatenate([orig, dest-orig], axis=-1).astype(np.float32) + + # Compute the ray intersections. + lx = scene.list_intersections(rays) + lx = {k:v.numpy() for k,v in lx.items()} + + # Calculate intersection coordinates using the primitive uvs and the mesh + v = mesh.vertex['positions'].numpy() + t = mesh.triangle['indices'].numpy() + tidx = lx['primitive_ids'] + uv = lx['primitive_uvs'] + w = 1 - np.sum(uv, axis=1) + c = \ + v[t[tidx, 1].flatten(), :] * uv[:, 0][:, None] + \ + v[t[tidx, 2].flatten(), :] * uv[:, 1][:, None] + \ + v[t[tidx, 0].flatten(), :] * w[:, None] + + # Calculate intersection coordinates using ray_ids + c = rays[lx['ray_ids']][:,:3] + rays[lx['ray_ids']][:,3:]*lx['t_hit'][...,None] + + # Visualize the rays and intersections. + lines = o3d.t.geometry.LineSet() + lines.point.positions = np.hstack([orig,dest]).reshape(-1,3) + lines.line.indices = np.arange(lines.point.positions.shape[0]).reshape(-1,2) + lines.line.colors = np.full((lines.line.indices.shape[0],3), (1,0,0)) + x = o3d.t.geometry.PointCloud(positions=c) + o3d.visualization.draw([mesh, lines, x], point_size=8) + + +Args: + rays (open3d.core.Tensor): A tensor with >=2 dims, shape {.., 6}, and Dtype + Float32 describing the rays; {..} can be any number of dimensions. + The last dimension must be 6 and has the format [ox, oy, oz, dx, dy, dz] + with [ox,oy,oz] as the origin and [dx,dy,dz] as the direction. It is not + necessary to normalize the direction although it should be normalised if + t_hit is to be calculated in coordinate units. + + nthreads (int): The number of threads to use. Set to 0 for automatic. + +Returns: + The returned dictionary contains + + ray_splits + A tensor with ray intersection splits. Can be used to iterate over all intersections for each ray. The shape is {num_rays + 1}. + + ray_ids + A tensor with ray IDs. The shape is {num_intersections}. + + t_hit + A tensor with the distance to the hit. The shape is {num_intersections}. + + geometry_ids + A tensor with the geometry IDs. The shape is {num_intersections}. + + primitive_ids + A tensor with the primitive IDs, which corresponds to the triangle + index. The shape is {num_intersections}. + + primitive_uvs + A tensor with the barycentric coordinates of the intersection points within + the triangles. The shape is {num_intersections, 2}. + + +An example of using ray_splits:: + + ray_splits: [0, 2, 3, 6, 6, 8] # note that the length of this is num_rays+1 + t_hit: [t1, t2, t3, t4, t5, t6, t7, t8] + + for ray_id, (start, end) in enumerate(zip(ray_splits[:-1], ray_splits[1:])): + for i,t in enumerate(t_hit[start:end]): + print(f'ray {ray_id}, intersection {i} at {t}') + + )doc"); raycasting_scene.def("compute_closest_points", @@ -350,4 +446,4 @@ The value for invalid IDs } } // namespace geometry } // namespace t -} // namespace open3d +} // namespace open3d \ No newline at end of file diff --git a/cpp/pybind/t/geometry/trianglemesh.cpp b/cpp/pybind/t/geometry/trianglemesh.cpp index 2954495f5f7..06cacf404a5 100644 --- a/cpp/pybind/t/geometry/trianglemesh.cpp +++ b/cpp/pybind/t/geometry/trianglemesh.cpp @@ -897,12 +897,11 @@ the partition id for each face. R"(Returns a new mesh with the faces selected by a boolean mask. Args: - mask () mask (open3d.core.Tensor): A boolean mask with the shape (N) with N as the number of faces in the mesh. Returns: - A new mesh with the selected faces. + A new mesh with the selected faces. If the original mesh is empty, return an empty mesh. Example: @@ -924,6 +923,30 @@ the partition id for each face. o3d.visualization.draw(parts) +)"); + + triangle_mesh.def( + "select_by_index", &TriangleMesh::SelectByIndex, "indices"_a, + R"(Returns a new mesh with the vertices selected according to the indices list. +If an item from the indices list exceeds the max vertex number of the mesh +or has a negative value, it is ignored. + +Args: + indices (open3d.core.Tensor): An integer list of indices. Duplicates are + allowed, but ignored. Signed and unsigned integral types are accepted. + +Returns: + A new mesh with the selected vertices and faces built from these vertices. + If the original mesh is empty, return an empty mesh. + +Example: + + This code selects the top face of a box, which has indices [2, 3, 6, 7]:: + + import open3d as o3d + import numpy as np + box = o3d.t.geometry.TriangleMesh.create_box() + top_face = box.select_by_index([2, 3, 6, 7]) )"); } diff --git a/cpp/pybind/t/pipelines/registration/feature.cpp b/cpp/pybind/t/pipelines/registration/feature.cpp index dc46b21a188..74de409115d 100644 --- a/cpp/pybind/t/pipelines/registration/feature.cpp +++ b/cpp/pybind/t/pipelines/registration/feature.cpp @@ -35,6 +35,26 @@ parameter is provided, and Hybrid search (Recommended) if both are provided.)", {"radius", "[optional] Neighbor search radius parameter. [Recommended ~5x " "voxel size]"}}); + + m.def("correspondences_from_features", &CorrespondencesFromFeatures, + py::call_guard(), + R"(Function to query nearest neighbors of source_features in target_features.)", + "source_features"_a, "target_features"_a, "mutual_filter"_a = false, + "mutual_consistency_ratio"_a = 0.1f); + docstring::FunctionDocInject( + m, "correspondences_from_features", + {{"source_features", "The source features in shape (N, dim)."}, + {"target_features", "The target features in shape (M, dim)."}, + {"mutual_filter", + "filter correspondences and return the collection of (i, j) " + "s.t. " + "source_features[i] and target_features[j] are mutually the " + "nearest neighbor."}, + {"mutual_consistency_ratio", + "Threshold to decide whether the number of filtered " + "correspondences is sufficient. Only used when " + "mutual_filter is " + "enabled."}}); } } // namespace registration diff --git a/cpp/pybind/visualization/rendering/rendering.cpp b/cpp/pybind/visualization/rendering/rendering.cpp index b80590710f4..51619f5cc68 100644 --- a/cpp/pybind/visualization/rendering/rendering.cpp +++ b/cpp/pybind/visualization/rendering/rendering.cpp @@ -81,16 +81,22 @@ class PyOffscreenRenderer { void SetupCamera(float verticalFoV, const Eigen::Vector3f ¢er, const Eigen::Vector3f &eye, - const Eigen::Vector3f &up) { + const Eigen::Vector3f &up, + float nearClip = -1.0f, + float farClip = -1.0f) { float aspect = 1.0f; if (height_ > 0) { aspect = float(width_) / float(height_); } auto *camera = scene_->GetCamera(); - auto far_plane = - Camera::CalcFarPlane(*camera, scene_->GetBoundingBox()); - camera->SetProjection(verticalFoV, aspect, Camera::CalcNearPlane(), - far_plane, rendering::Camera::FovType::Vertical); + auto far_plane = farClip > 0.0 + ? farClip + : Camera::CalcFarPlane( + *camera, scene_->GetBoundingBox()); + camera->SetProjection( + verticalFoV, aspect, + nearClip > 0.0 ? nearClip : Camera::CalcNearPlane(), far_plane, + rendering::Camera::FovType::Vertical); camera->LookAt(center, eye, up); } @@ -164,10 +170,15 @@ void pybind_rendering_classes(py::module &m) { .def("setup_camera", py::overload_cast( + const Eigen::Vector3f &, float, float>( &PyOffscreenRenderer::SetupCamera), - "setup_camera(vertical_field_of_view, center, eye, up): " - "sets camera view using bounding box of current geometry") + "setup_camera(vertical_field_of_view, center, eye, up, " + "near_clip, far_clip): " + "sets camera view using bounding box of current geometry " + "if the near_clip and far_clip parameters are not set", + py::arg("verticalFoV"), py::arg("center"), py::arg("eye"), + py::arg("up"), py::arg("nearClip") = -1.0f, + py::arg("farClip") = -1.0f) .def("setup_camera", py::overload_cast( diff --git a/cpp/pybind/visualization/visualizer.cpp b/cpp/pybind/visualization/visualizer.cpp index cec58d606bc..7b76396c7c3 100644 --- a/cpp/pybind/visualization/visualizer.cpp +++ b/cpp/pybind/visualization/visualizer.cpp @@ -93,11 +93,9 @@ void pybind_visualizer(py::module &m) { "reset_bounding_box"_a = true) .def("clear_geometries", &Visualizer::ClearGeometries, "Function to clear geometries from the visualizer") - .def( - "get_view_control", - [](Visualizer &vis) { return vis.GetViewControl(); }, - "Function to retrieve the associated ``ViewControl``", - py::return_value_policy::reference_internal) + .def("get_view_control", &Visualizer::GetViewControl, + "Function to retrieve the associated ``ViewControl``", + py::return_value_policy::reference_internal) .def("get_render_option", &Visualizer::GetRenderOption, "Function to retrieve the associated ``RenderOption``", py::return_value_policy::reference_internal) @@ -119,7 +117,14 @@ void pybind_visualizer(py::module &m) { &Visualizer::CaptureDepthPointCloud, "Function to capture and save local point cloud", "filename"_a, "do_render"_a = false, "convert_to_world_coordinate"_a = false) - .def("get_window_name", &Visualizer::GetWindowName); + .def("get_window_name", &Visualizer::GetWindowName) + .def("get_view_status", &Visualizer::GetViewStatus, + "Get the current view status as a json string of " + "ViewTrajectory.") + .def("set_view_status", &Visualizer::SetViewStatus, + "Set the current view status from a json string of " + "ViewTrajectory.", + "view_status_str"_a); py::class_, diff --git a/cpp/tests/core/Tensor.cpp b/cpp/tests/core/Tensor.cpp index 46ca362735c..ad377274169 100644 --- a/cpp/tests/core/Tensor.cpp +++ b/cpp/tests/core/Tensor.cpp @@ -1415,6 +1415,40 @@ TEST_P(TensorPermuteDevicePairs, IndexSetBroadcast) { 0, 0, 0, 0, 20, 20, 20, 0, 0, 0, 0, 0})); } +TEST_P(TensorPermuteDevices, IndexAdd_) { + core::Device device = GetParam(); + + const int tensor_size = 100; + + // Test one: dst_t[np.array([0, 1, 2, 3, 4])] += np.array([1, 1, 1, 1, 1]) + { + core::Tensor index = + core::Tensor::Arange(0, tensor_size, 1, core::Int64, device); + core::Tensor src = + core::Tensor::Zeros({tensor_size}, core::Float32, device); + src.IndexAdd_( + /*dim=*/0, index, + core::Tensor::Ones({tensor_size}, core::Float32, device)); + EXPECT_TRUE(src.AllClose( + core::Tensor::Ones({tensor_size}, core::Float32, device))); + } + + // Test two: dst_t[np.array([0, 0, 0, 0, 0])] += np.array([1, 1, 1, 1, 1]) + { + core::Tensor index = + core::Tensor::Zeros({tensor_size}, core::Int64, device); + core::Tensor src = + core::Tensor::Zeros({tensor_size}, core::Float32, device); + src.IndexAdd_( + /*dim=*/0, index, + core::Tensor::Ones({tensor_size}, core::Float32, device)); + EXPECT_EQ(src[0].Item(), tensor_size); + EXPECT_TRUE(src.Slice(0, 1, tensor_size) + .AllClose(core::Tensor::Zeros( + {tensor_size - 1}, core::Float32, device))); + } +} + TEST_P(TensorPermuteDevices, Permute) { core::Device device = GetParam(); diff --git a/cpp/tests/geometry/PointCloud.cpp b/cpp/tests/geometry/PointCloud.cpp index 2777b7d68a3..21bfe5659bf 100644 --- a/cpp/tests/geometry/PointCloud.cpp +++ b/cpp/tests/geometry/PointCloud.cpp @@ -18,6 +18,7 @@ #include "open3d/io/ImageIO.h" #include "open3d/io/PinholeCameraTrajectoryIO.h" #include "open3d/io/PointCloudIO.h" +#include "open3d/utility/Random.h" #include "open3d/visualization/utility/DrawGeometry.h" #include "tests/Tests.h" @@ -981,6 +982,38 @@ TEST(PointCloud, Crop_OrientedBoundingBox) { })); } +TEST(PointCloud, Crop_AxisAlignedBoundingBox_Invert) { + geometry::AxisAlignedBoundingBox aabb({0, 0, 0}, {2, 2, 2}); + geometry::PointCloud pcd({{0, 0, 0}, + {2, 2, 2}, + {1, 1, 1}, + {1, 1, 2}, + {3, 1, 1}, + {-1, 1, 1}}); + pcd.normals_ = {{0, 0, 0}, {1, 0, 0}, {2, 0, 0}, + {3, 0, 0}, {4, 0, 0}, {5, 0, 0}}; + pcd.colors_ = {{0.0, 0.0, 0.0}, {0.1, 0.0, 0.0}, {0.2, 0.0, 0.0}, + {0.3, 0.0, 0.0}, {0.4, 0.0, 0.0}, {0.5, 0.0, 0.0}}; + pcd.covariances_ = { + 0.0 * Eigen::Matrix3d::Identity(), + 1.0 * Eigen::Matrix3d::Identity(), + 2.0 * Eigen::Matrix3d::Identity(), + 3.0 * Eigen::Matrix3d::Identity(), + 4.0 * Eigen::Matrix3d::Identity(), + 5.0 * Eigen::Matrix3d::Identity(), + }; + std::shared_ptr pc_crop = pcd.Crop(aabb, true); + ExpectEQ(pc_crop->points_, + std::vector({{3, 1, 1}, {-1, 1, 1}})); + ExpectEQ(pc_crop->normals_, + std::vector({{4, 0, 0}, {5, 0, 0}})); + ExpectEQ(pc_crop->colors_, + std::vector({{0.4, 0.0, 0.0}, {0.5, 0.0, 0.0}})); + ExpectEQ(pc_crop->covariances_, + std::vector({4.0 * Eigen::Matrix3d::Identity(), + 5.0 * Eigen::Matrix3d::Identity()})); +} + TEST(PointCloud, EstimateNormals) { geometry::PointCloud pcd({ {0, 0, 0}, @@ -1371,6 +1404,31 @@ TEST(PointCloud, SegmentPlaneSpecialCase) { EXPECT_ANY_THROW(pcd.SegmentPlane(0.01, 3, 10, 1.5)); } +TEST(PointCloud, SegmentPlaneDeterministic) { + geometry::PointCloud pcd; + data::PCDPointCloud pointcloud_pcd; + io::ReadPointCloud(pointcloud_pcd.GetPath(), pcd); + EXPECT_EQ(pcd.points_.size(), 113662); + + // Hard-coded test + Eigen::Vector4d plane_model; + std::vector inliers; + utility::random::Seed(0); + std::tie(plane_model, inliers) = pcd.SegmentPlane(0.01, 3, 1000, 1.0); + ExpectEQ(plane_model, Eigen::Vector4d(-0.06, -0.10, 0.99, -1.06), 0.1); + + // Test segment plane for 10 times with the same random seed. + for (int i = 0; i < 10; ++i) { + // Reset random seed. + utility::random::Seed(0); + Eigen::Vector4d plane_model_d; + std::vector inliers_d; + std::tie(plane_model_d, inliers_d) = + pcd.SegmentPlane(0.01, 3, 1000, 1.0); + ExpectEQ(plane_model, plane_model_d); + } +} + TEST(PointCloud, DetectPlanarPatches) { geometry::PointCloud pcd; data::PCDPointCloud pointcloud_pcd; diff --git a/cpp/tests/t/geometry/AxisAlignedBoundingBox.cpp b/cpp/tests/t/geometry/AxisAlignedBoundingBox.cpp index 333895493ba..54405b414ff 100644 --- a/cpp/tests/t/geometry/AxisAlignedBoundingBox.cpp +++ b/cpp/tests/t/geometry/AxisAlignedBoundingBox.cpp @@ -32,6 +32,7 @@ INSTANTIATE_TEST_SUITE_P( AxisAlignedBoundingBoxPermuteDevicePairs::TestCases())); TEST_P(AxisAlignedBoundingBoxPermuteDevices, ConstructorNoArg) { + using ::testing::AnyOf; t::geometry::AxisAlignedBoundingBox aabb; // Inherited from Geometry3D. @@ -51,7 +52,11 @@ TEST_P(AxisAlignedBoundingBoxPermuteDevices, ConstructorNoArg) { EXPECT_EQ(aabb.GetDevice(), core::Device("CPU:0")); // Print Information. - EXPECT_EQ(aabb.ToString(), "AxisAlignedBoundingBox[Float32, CPU:0]"); + EXPECT_THAT( + aabb.ToString(), // Compiler dependent output + AnyOf("AxisAlignedBoundingBox[[0 0 0] - [0 0 0], Float32, CPU:0]", + "AxisAlignedBoundingBox[[0.0 0.0 0.0] - [0.0 0.0 0.0], " + "Float32, CPU:0]")); } TEST_P(AxisAlignedBoundingBoxPermuteDevices, Constructor) { @@ -60,10 +65,6 @@ TEST_P(AxisAlignedBoundingBoxPermuteDevices, Constructor) { core::Tensor min_bound = core::Tensor::Init({-1, -1, -1}, device); core::Tensor max_bound = core::Tensor::Init({1, 1, 1}, device); - // Attempt to construct with invalid min/max bound. - EXPECT_THROW(t::geometry::AxisAlignedBoundingBox(max_bound, min_bound), - std::runtime_error); - t::geometry::AxisAlignedBoundingBox aabb(min_bound, max_bound); // Public members. @@ -76,6 +77,11 @@ TEST_P(AxisAlignedBoundingBoxPermuteDevices, Constructor) { core::Tensor::Init({1, 1, 1}, device))); EXPECT_EQ(aabb.GetDevice(), device); + + // Attempt to construct with invalid min/max bound should create a valid + // bounding box with a warning. + t::geometry::AxisAlignedBoundingBox aabb_invalid(max_bound, min_bound); + EXPECT_TRUE(aabb_invalid.GetBoxPoints().AllClose(aabb.GetBoxPoints())); } TEST_P(AxisAlignedBoundingBoxPermuteDevicePairs, CopyDevice) { diff --git a/cpp/tests/t/geometry/OrientedBoundingBox.cpp b/cpp/tests/t/geometry/OrientedBoundingBox.cpp index d7450fabddb..95a0b4fe983 100644 --- a/cpp/tests/t/geometry/OrientedBoundingBox.cpp +++ b/cpp/tests/t/geometry/OrientedBoundingBox.cpp @@ -226,8 +226,8 @@ TEST_P(OrientedBoundingBoxPermuteDevices, Scale) { TEST_P(OrientedBoundingBoxPermuteDevices, GetBoxPoints) { core::Device device = GetParam(); - core::Tensor center = core::Tensor::Init({-1, -1, -1}, device); - core::Tensor extent = core::Tensor::Init({1.0, 1.0, 1.0}, device); + core::Tensor center = core::Tensor::Init({-1., -1., -1.}, device); + core::Tensor extent = core::Tensor::Init({0.0, 0.0, 1.0}, device); core::Tensor rotation = core::Tensor::Eye(3, core::Float32, device); t::geometry::OrientedBoundingBox obb(center, rotation, extent); @@ -235,14 +235,14 @@ TEST_P(OrientedBoundingBoxPermuteDevices, GetBoxPoints) { auto box_points = obb.GetBoxPoints(); EXPECT_TRUE( - box_points.AllClose(core::Tensor::Init({{-1.5, -1.5, -1.5}, - {-0.5, -1.5, -1.5}, - {-1.5, -0.5, -1.5}, - {-1.5, -1.5, -0.5}, - {-0.5, -0.5, -0.5}, - {-1.5, -0.5, -0.5}, - {-0.5, -1.5, -0.5}, - {-0.5, -0.5, -1.5}}, + box_points.AllClose(core::Tensor::Init({{-1.0, -1.0, -1.5}, + {-1.0, -1.0, -1.5}, + {-1.0, -1.0, -1.5}, + {-1.0, -1.0, -0.5}, + {-1.0, -1.0, -0.5}, + {-1.0, -1.0, -0.5}, + {-1.0, -1.0, -0.5}, + {-1.0, -1.0, -1.5}}, device))); } diff --git a/cpp/tests/t/geometry/PointCloud.cpp b/cpp/tests/t/geometry/PointCloud.cpp index 1134a33c472..0b4d7365de6 100644 --- a/cpp/tests/t/geometry/PointCloud.cpp +++ b/cpp/tests/t/geometry/PointCloud.cpp @@ -901,7 +901,7 @@ TEST_P(PointCloudPermuteDevices, VoxelDownSample) { device)); auto pcd_small_down = pcd_small.VoxelDownSample(1); EXPECT_TRUE(pcd_small_down.GetPointPositions().AllClose( - core::Tensor::Init({{0, 0, 0}}, device))); + core::Tensor::Init({{0.375, 0.375, 0.575}}, device))); } TEST_P(PointCloudPermuteDevices, UniformDownSample) { diff --git a/cpp/tests/t/geometry/TriangleMesh.cpp b/cpp/tests/t/geometry/TriangleMesh.cpp index 4e51b34e31d..d1e2d819eac 100644 --- a/cpp/tests/t/geometry/TriangleMesh.cpp +++ b/cpp/tests/t/geometry/TriangleMesh.cpp @@ -394,7 +394,9 @@ TEST_P(TriangleMeshPermuteDevices, FromLegacy) { Eigen::Vector2d(0.4, 0.5), Eigen::Vector2d(0.6, 0.7), Eigen::Vector2d(0.8, 0.9), Eigen::Vector2d(1.0, 1.1)}; - auto& mat = legacy_mesh.materials_["Mat1"]; + legacy_mesh.materials_.emplace_back(); + legacy_mesh.materials_.front().first = "Mat1"; + auto& mat = legacy_mesh.materials_.front().second; mat.baseColor = mat.baseColor.CreateRGB(1, 1, 1); core::Dtype float_dtype = core::Float32; @@ -497,8 +499,11 @@ TEST_P(TriangleMeshPermuteDevices, ToLegacy) { Pointwise(FloatEq(), {0.8, 0.9}), Pointwise(FloatEq(), {1.0, 1.1})})); - EXPECT_TRUE(legacy_mesh.materials_.count("Mat1") > 0); - auto& mat = legacy_mesh.materials_["Mat1"]; + auto mat_iterator = std::find_if( + legacy_mesh.materials_.begin(), legacy_mesh.materials_.end(), + [](const auto& pair) -> bool { return pair.first == "Mat1"; }); + EXPECT_TRUE(mat_iterator != legacy_mesh.materials_.end()); + auto& mat = mat_iterator->second; EXPECT_TRUE(Eigen::Vector4f(mat.baseColor.f4) == Eigen::Vector4f(1, 1, 1, 1)); EXPECT_TRUE(mat.baseMetallic == 0.0); @@ -941,5 +946,271 @@ TEST_P(TriangleMeshPermuteDevices, CreateMobius) { triangle_indices_custom)); } +TEST_P(TriangleMeshPermuteDevices, SelectFacesByMask) { + // check that an exception is thrown if the mesh is empty + t::geometry::TriangleMesh mesh_empty; + core::Tensor mask_empty = + core::Tensor::Zeros({12}, core::Bool, mesh_empty.GetDevice()); + core::Tensor mask_full = + core::Tensor::Ones({12}, core::Bool, mesh_empty.GetDevice()); + + // check completely empty mesh + EXPECT_TRUE(mesh_empty.SelectFacesByMask(mask_empty).IsEmpty()); + EXPECT_TRUE(mesh_empty.SelectFacesByMask(mask_full).IsEmpty()); + + // check mesh w/o triangles + core::Tensor cpu_vertices = + core::Tensor::Ones({2, 3}, core::Float32, mesh_empty.GetDevice()); + mesh_empty.SetVertexPositions(cpu_vertices); + EXPECT_TRUE(mesh_empty.SelectFacesByMask(mask_empty).IsEmpty()); + EXPECT_TRUE(mesh_empty.SelectFacesByMask(mask_full).IsEmpty()); + + // create box with normals, colors and labels defined. + t::geometry::TriangleMesh box = t::geometry::TriangleMesh::CreateBox(); + core::Tensor vertex_colors = core::Tensor::Init({{0.0, 0.0, 0.0}, + {1.0, 1.0, 1.0}, + {2.0, 2.0, 2.0}, + {3.0, 3.0, 3.0}, + {4.0, 4.0, 4.0}, + {5.0, 5.0, 5.0}, + {6.0, 6.0, 6.0}, + {7.0, 7.0, 7.0}}); + ; + core::Tensor vertex_labels = core::Tensor::Init({{0.0, 0.0, 0.0}, + {1.0, 1.0, 1.0}, + {2.0, 2.0, 2.0}, + {3.0, 3.0, 3.0}, + {4.0, 4.0, 4.0}, + {5.0, 5.0, 5.0}, + {6.0, 6.0, 6.0}, + {7.0, 7.0, 7.0}}) * + 10; + ; + core::Tensor triangle_labels = + core::Tensor::Init({{0.0, 0.0, 0.0}, + {1.0, 1.0, 1.0}, + {2.0, 2.0, 2.0}, + {3.0, 3.0, 3.0}, + {4.0, 4.0, 4.0}, + {5.0, 5.0, 5.0}, + {6.0, 6.0, 6.0}, + {7.0, 7.0, 7.0}, + {8.0, 8.0, 8.0}, + {9.0, 9.0, 9.0}, + {10.0, 10.0, 10.0}, + {11.0, 11.0, 11.0}}) * + 100; + box.SetVertexColors(vertex_colors); + box.SetVertexAttr("labels", vertex_labels); + box.ComputeTriangleNormals(); + box.SetTriangleAttr("labels", triangle_labels); + + // empty index list + EXPECT_TRUE(box.SelectFacesByMask(mask_empty).IsEmpty()); + + // set the expected value + core::Tensor expected_verts = core::Tensor::Init({{0.0, 0.0, 1.0}, + {1.0, 0.0, 1.0}, + {0.0, 1.0, 1.0}, + {1.0, 1.0, 1.0}}); + core::Tensor expected_vert_colors = + core::Tensor::Init({{2.0, 2.0, 2.0}, + {3.0, 3.0, 3.0}, + {6.0, 6.0, 6.0}, + {7.0, 7.0, 7.0}}); + core::Tensor expected_vert_labels = + core::Tensor::Init({{20.0, 20.0, 20.0}, + {30.0, 30.0, 30.0}, + {60.0, 60.0, 60.0}, + {70.0, 70.0, 70.0}}); + core::Tensor expected_tris = + core::Tensor::Init({{0, 1, 3}, {0, 3, 2}}); + core::Tensor tris_mask = + core::Tensor::Init({0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0}); + core::Tensor expected_tri_normals = + box.GetTriangleNormals().IndexGet({tris_mask}); + core::Tensor expected_tri_labels = core::Tensor::Init( + {{800.0, 800.0, 800.0}, {900.0, 900.0, 900.0}}); + + // check basic case + t::geometry::TriangleMesh selected = box.SelectFacesByMask(tris_mask); + + EXPECT_TRUE(selected.GetVertexPositions().AllClose(expected_verts)); + EXPECT_TRUE(selected.GetVertexColors().AllClose(expected_vert_colors)); + EXPECT_TRUE( + selected.GetVertexAttr("labels").AllClose(expected_vert_labels)); + EXPECT_TRUE(selected.GetTriangleIndices().AllClose(expected_tris)); + EXPECT_TRUE(selected.GetTriangleNormals().AllClose(expected_tri_normals)); + EXPECT_TRUE( + selected.GetTriangleAttr("labels").AllClose(expected_tri_labels)); + + // Check that initial mesh is unchanged. + t::geometry::TriangleMesh box_untouched = + t::geometry::TriangleMesh::CreateBox(); + EXPECT_TRUE(box.GetVertexPositions().AllClose( + box_untouched.GetVertexPositions())); + EXPECT_TRUE(box.GetTriangleIndices().AllClose( + box_untouched.GetTriangleIndices())); +} + +TEST_P(TriangleMeshPermuteDevices, SelectByIndex) { + // check that an exception is thrown if the mesh is empty + t::geometry::TriangleMesh mesh_empty; + core::Tensor indices_empty = core::Tensor::Init({}); + + // check completely empty mesh + EXPECT_TRUE(mesh_empty.SelectByIndex(indices_empty).IsEmpty()); + EXPECT_TRUE(mesh_empty.SelectByIndex(core::Tensor::Init({0})) + .IsEmpty()); + + // check mesh w/o triangles + core::Tensor vertices_no_tris_orig = + core::Tensor::Ones({2, 3}, core::Float32, mesh_empty.GetDevice()); + core::Tensor expected_vertices_no_tris_orig = + core::Tensor::Ones({1, 3}, core::Float32, mesh_empty.GetDevice()); + mesh_empty.SetVertexPositions(vertices_no_tris_orig); + t::geometry::TriangleMesh selected_no_tris_orig = + mesh_empty.SelectByIndex(core::Tensor::Init({0})); + EXPECT_TRUE(selected_no_tris_orig.GetVertexPositions().AllClose( + expected_vertices_no_tris_orig)); + + // create box with normals, colors and labels defined. + t::geometry::TriangleMesh box = t::geometry::TriangleMesh::CreateBox(); + core::Tensor vertex_colors = core::Tensor::Init({{0.0, 0.0, 0.0}, + {1.0, 1.0, 1.0}, + {2.0, 2.0, 2.0}, + {3.0, 3.0, 3.0}, + {4.0, 4.0, 4.0}, + {5.0, 5.0, 5.0}, + {6.0, 6.0, 6.0}, + {7.0, 7.0, 7.0}}); + ; + core::Tensor vertex_labels = core::Tensor::Init({{0.0, 0.0, 0.0}, + {1.0, 1.0, 1.0}, + {2.0, 2.0, 2.0}, + {3.0, 3.0, 3.0}, + {4.0, 4.0, 4.0}, + {5.0, 5.0, 5.0}, + {6.0, 6.0, 6.0}, + {7.0, 7.0, 7.0}}) * + 10; + ; + core::Tensor triangle_labels = + core::Tensor::Init({{0.0, 0.0, 0.0}, + {1.0, 1.0, 1.0}, + {2.0, 2.0, 2.0}, + {3.0, 3.0, 3.0}, + {4.0, 4.0, 4.0}, + {5.0, 5.0, 5.0}, + {6.0, 6.0, 6.0}, + {7.0, 7.0, 7.0}, + {8.0, 8.0, 8.0}, + {9.0, 9.0, 9.0}, + {10.0, 10.0, 10.0}, + {11.0, 11.0, 11.0}}) * + 100; + box.SetVertexColors(vertex_colors); + box.SetVertexAttr("labels", vertex_labels); + box.ComputeTriangleNormals(); + box.SetTriangleAttr("labels", triangle_labels); + + // empty index list + EXPECT_TRUE(box.SelectByIndex(indices_empty).IsEmpty()); + + // set the expected value + core::Tensor expected_verts = core::Tensor::Init({{0.0, 0.0, 1.0}, + {1.0, 0.0, 1.0}, + {0.0, 1.0, 1.0}, + {1.0, 1.0, 1.0}}); + core::Tensor expected_vert_colors = + core::Tensor::Init({{2.0, 2.0, 2.0}, + {3.0, 3.0, 3.0}, + {6.0, 6.0, 6.0}, + {7.0, 7.0, 7.0}}); + core::Tensor expected_vert_labels = + core::Tensor::Init({{20.0, 20.0, 20.0}, + {30.0, 30.0, 30.0}, + {60.0, 60.0, 60.0}, + {70.0, 70.0, 70.0}}); + core::Tensor expected_tris = + core::Tensor::Init({{0, 1, 3}, {0, 3, 2}}); + core::Tensor tris_mask = + core::Tensor::Init({0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0}); + core::Tensor expected_tri_normals = + box.GetTriangleNormals().IndexGet({tris_mask}); + core::Tensor expected_tri_labels = core::Tensor::Init( + {{800.0, 800.0, 800.0}, {900.0, 900.0, 900.0}}); + + // check basic case + core::Tensor indices = core::Tensor::Init({2, 3, 6, 7}); + t::geometry::TriangleMesh selected_basic = box.SelectByIndex(indices); + + EXPECT_TRUE(selected_basic.GetVertexPositions().AllClose(expected_verts)); + EXPECT_TRUE( + selected_basic.GetVertexColors().AllClose(expected_vert_colors)); + EXPECT_TRUE(selected_basic.GetVertexAttr("labels").AllClose( + expected_vert_labels)); + EXPECT_TRUE(selected_basic.GetTriangleIndices().AllClose(expected_tris)); + EXPECT_TRUE( + selected_basic.GetTriangleNormals().AllClose(expected_tri_normals)); + EXPECT_TRUE(selected_basic.GetTriangleAttr("labels").AllClose( + expected_tri_labels)); + + // check duplicated indices case + core::Tensor indices_duplicate = + core::Tensor::Init({2, 2, 3, 3, 6, 7, 7}); + t::geometry::TriangleMesh selected_duplicate = + box.SelectByIndex(indices_duplicate); + EXPECT_TRUE( + selected_duplicate.GetVertexPositions().AllClose(expected_verts)); + EXPECT_TRUE(selected_duplicate.GetVertexColors().AllClose( + expected_vert_colors)); + EXPECT_TRUE(selected_duplicate.GetVertexAttr("labels").AllClose( + expected_vert_labels)); + EXPECT_TRUE( + selected_duplicate.GetTriangleIndices().AllClose(expected_tris)); + EXPECT_TRUE(selected_duplicate.GetTriangleNormals().AllClose( + expected_tri_normals)); + EXPECT_TRUE(selected_duplicate.GetTriangleAttr("labels").AllClose( + expected_tri_labels)); + + core::Tensor indices_negative = + core::Tensor::Init({2, -4, 3, 6, 7}); + t::geometry::TriangleMesh selected_negative = + box.SelectByIndex(indices_negative); + EXPECT_TRUE( + selected_negative.GetVertexPositions().AllClose(expected_verts)); + EXPECT_TRUE(selected_negative.GetTriangleIndices().AllClose(expected_tris)); + + // select with empty triangles as result + // set the expected value + core::Tensor expected_verts_no_tris = core::Tensor::Init( + {{0.0, 0.0, 0.0}, {1.0, 0.0, 1.0}, {0.0, 1.0, 0.0}}); + core::Tensor expected_vert_colors_no_tris = core::Tensor::Init( + {{0.0, 0.0, 0.0}, {3.0, 3.0, 3.0}, {4.0, 4.0, 4.0}}); + core::Tensor expected_vert_labels_no_tris = core::Tensor::Init( + {{0.0, 0.0, 0.0}, {30.0, 30.0, 30.0}, {40.0, 40.0, 40.0}}); + + core::Tensor indices_no_tris = core::Tensor::Init({0, 3, 4}); + t::geometry::TriangleMesh selected_no_tris = + box.SelectByIndex(indices_no_tris); + + EXPECT_TRUE(selected_no_tris.GetVertexPositions().AllClose( + expected_verts_no_tris)); + EXPECT_TRUE(selected_no_tris.GetVertexColors().AllClose( + expected_vert_colors_no_tris)); + EXPECT_TRUE(selected_no_tris.GetVertexAttr("labels").AllClose( + expected_vert_labels_no_tris)); + EXPECT_FALSE(selected_no_tris.HasTriangleIndices()); + + // check that initial mesh is unchanged + t::geometry::TriangleMesh box_untouched = + t::geometry::TriangleMesh::CreateBox(); + EXPECT_TRUE(box.GetVertexPositions().AllClose( + box_untouched.GetVertexPositions())); + EXPECT_TRUE(box.GetTriangleIndices().AllClose( + box_untouched.GetTriangleIndices())); +} + } // namespace tests } // namespace open3d diff --git a/cpp/tests/t/io/TriangleMeshIO.cpp b/cpp/tests/t/io/TriangleMeshIO.cpp index dbf2f828192..acfd8f1234f 100644 --- a/cpp/tests/t/io/TriangleMeshIO.cpp +++ b/cpp/tests/t/io/TriangleMeshIO.cpp @@ -71,6 +71,20 @@ TEST(TriangleMeshIO, ReadWriteTriangleMeshOBJ) { EXPECT_TRUE(mesh.GetTriangleIndices().AllClose(triangles)); } +TEST(TriangleMeshIO, ReadWriteTriangleMeshNPZ) { + auto cube_mesh = t::geometry::TriangleMesh::CreateBox(); + + const std::string filename = + utility::filesystem::GetTempDirectoryPath() + "/cube.npz"; + EXPECT_TRUE(t::io::WriteTriangleMesh(filename, cube_mesh)); + t::geometry::TriangleMesh mesh; + EXPECT_TRUE(t::io::ReadTriangleMesh(filename, mesh)); + EXPECT_TRUE( + mesh.GetVertexPositions().AllClose(cube_mesh.GetVertexPositions())); + EXPECT_TRUE( + mesh.GetTriangleIndices().AllClose(cube_mesh.GetTriangleIndices())); +} + // TODO: Add tests for triangle_uvs, materials, triangle_material_ids and // textures once these are supported. TEST(TriangleMeshIO, TriangleMeshLegecyCompatibility) { diff --git a/cpp/tests/t/pipelines/registration/Feature.cpp b/cpp/tests/t/pipelines/registration/Feature.cpp index 43c6a4e5ff3..f4eb9dffa6d 100644 --- a/cpp/tests/t/pipelines/registration/Feature.cpp +++ b/cpp/tests/t/pipelines/registration/Feature.cpp @@ -49,5 +49,62 @@ TEST_P(FeaturePermuteDevices, ComputeFPFHFeature) { 1e-4, 1e-4)); } +TEST_P(FeaturePermuteDevices, CorrespondencesFromFeatures) { + core::Device device = GetParam(); + + const float kVoxelSize = 0.05f; + const float kFPFHRadius = kVoxelSize * 5; + + t::geometry::PointCloud source_tpcd, target_tpcd; + data::DemoICPPointClouds pcd_fragments; + t::io::ReadPointCloud(pcd_fragments.GetPaths()[0], source_tpcd); + t::io::ReadPointCloud(pcd_fragments.GetPaths()[1], target_tpcd); + source_tpcd = source_tpcd.To(device).VoxelDownSample(kVoxelSize); + target_tpcd = target_tpcd.To(device).VoxelDownSample(kVoxelSize); + + auto t_source_fpfh = t::pipelines::registration::ComputeFPFHFeature( + source_tpcd, 100, kFPFHRadius); + auto t_target_fpfh = t::pipelines::registration::ComputeFPFHFeature( + target_tpcd, 100, kFPFHRadius); + + pipelines::registration::Feature source_fpfh, target_fpfh; + source_fpfh.data_ = + core::eigen_converter::TensorToEigenMatrixXd(t_source_fpfh.T()); + target_fpfh.data_ = + core::eigen_converter::TensorToEigenMatrixXd(t_target_fpfh.T()); + + for (auto mutual_filter : std::vector{true, false}) { + auto t_correspondences = + t::pipelines::registration::CorrespondencesFromFeatures( + t_source_fpfh, t_target_fpfh, mutual_filter); + + auto correspondences = + pipelines::registration::CorrespondencesFromFeatures( + source_fpfh, target_fpfh, mutual_filter); + + auto t_correspondence_idx = + t_correspondences.T().GetItem(core::TensorKey::Index(1)); + auto correspondence_idx = + core::eigen_converter::EigenVector2iVectorToTensor( + correspondences, core::Dtype::Int64, device) + .T() + .GetItem(core::TensorKey::Index(1)); + + // TODO(wei): mask.to(float).sum() has ISPC issues. Use advanced + // indexing instead. + if (!mutual_filter) { + auto mask = t_correspondence_idx.Eq(correspondence_idx); + auto masked_idx = t_correspondence_idx.IndexGet({mask}); + float valid_ratio = float(masked_idx.GetLength()) / + float(t_correspondence_idx.GetLength()); + EXPECT_NEAR(valid_ratio, 1.0, 1e-2); + } else { + auto consistent_ratio = float(t_correspondence_idx.GetLength()) / + float(correspondences.size()); + EXPECT_NEAR(consistent_ratio, 1.0, 1e-2); + } + } +} + } // namespace tests } // namespace open3d diff --git a/docker/Dockerfile.ci b/docker/Dockerfile.ci index 5474b1aee58..dc6b1af2245 100644 --- a/docker/Dockerfile.ci +++ b/docker/Dockerfile.ci @@ -166,17 +166,16 @@ RUN curl -fsSL https://deb.nodesource.com/setup_16.x | bash - \ && yarn --version # Build all -RUN if [ "${BUILD_PYTORCH_OPS}" = "ON" ] || [ "${BUILD_TENSORFLOW_OPS}" = "ON" ]; then \ - export GLIBCXX_USE_CXX11_ABI=OFF; \ - else \ - export GLIBCXX_USE_CXX11_ABI=ON; \ - fi \ - && if [ "${BUILD_SYCL_MODULE}" = "ON" ]; then \ +RUN \ + if [ "${BUILD_SYCL_MODULE}" = "ON" ]; then \ export CMAKE_CXX_COMPILER=icpx; \ export CMAKE_C_COMPILER=icx; \ + export GLIBCXX_USE_CXX11_ABI=ON; \ else \ export CMAKE_CXX_COMPILER=g++; \ export CMAKE_C_COMPILER=gcc; \ + # TODO: PyTorch still use old CXX ABI, remove this line when PyTorch is updated + export GLIBCXX_USE_CXX11_ABI=OFF; \ fi \ && mkdir build \ && cd build \ @@ -201,7 +200,8 @@ RUN if [ "${BUILD_PYTORCH_OPS}" = "ON" ] || [ "${BUILD_TENSORFLOW_OPS}" = "ON" ] && make VERBOSE=1 -j$(nproc) \ && make install-pip-package -j$(nproc) \ && make install -j$(nproc) \ - && if [ "${PACKAGE}" = "ON" ]; then make package; fi + && if [ "${PACKAGE}" = "ON" ]; then make package; fi \ + && if [ "${PACKAGE}" = "VIEWER" ]; then make package-Open3DViewer-deb; fi # Compress ccache folder, move to / directory RUN ccache -s \ @@ -211,6 +211,7 @@ RUN ccache -s \ && cd ${CCACHE_DIR_PARENT} \ && tar -czf /${CCACHE_TAR_NAME}.tar.gz ${CCACHE_DIR_NAME} \ && if [ "${PACKAGE}" = "ON" ]; then mv /root/Open3D/build/package/open3d-devel*.tar.xz /; fi \ + && if [ "${PACKAGE}" = "VIEWER" ]; then mv /root/Open3D/build/package-Open3DViewer-deb/open3d-viewer-*-Linux.deb /; fi \ && ls -alh / RUN echo "Docker build done." diff --git a/docker/Dockerfile.wheel b/docker/Dockerfile.wheel index 59d33503ab4..9b9fac01935 100644 --- a/docker/Dockerfile.wheel +++ b/docker/Dockerfile.wheel @@ -1,5 +1,5 @@ # FROM must be called before other ARGS except for ARG BASE_IMAGE -ARG BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu18.04 +ARG BASE_IMAGE=nvidia/cuda:11.7.1-cudnn8-devel-ubuntu18.04 FROM ${BASE_IMAGE} # Customizable build arguments from cuda.yml @@ -8,14 +8,18 @@ ARG CCACHE_TAR_NAME ARG CMAKE_VERSION ARG CCACHE_VERSION ARG PYTHON_VERSION +ARG BUILD_TENSORFLOW_OPS +ARG BUILD_PYTORCH_OPS # Forward all ARG to ENV # ci_utils.sh requires these environment variables ENV DEVELOPER_BUILD=${DEVELOPER_BUILD} ENV CCACHE_TAR_NAME=${CCACHE_TAR_NAME} ENV CMAKE_VERSION=${CMAKE_VERSION} -ARG CCACHE_VERSION=${CCACHE_VERSION} +ENV CCACHE_VERSION=${CCACHE_VERSION} ENV PYTHON_VERSION=${PYTHON_VERSION} +ENV BUILD_PYTORCH_OPS=${BUILD_PYTORCH_OPS} +ENV BUILD_TENSORFLOW_OPS=${BUILD_TENSORFLOW_OPS} # Prevent interactive inputs when installing packages ENV DEBIAN_FRONTEND=noninteractive @@ -125,8 +129,6 @@ WORKDIR /root/Open3D # Build python wheel RUN export NPROC=$(nproc) \ && export BUILD_SHARED_LIBS=OFF \ - && export BUILD_TENSORFLOW_OPS=ON \ - && export BUILD_PYTORCH_OPS=ON \ && source /root/Open3D/util/ci_utils.sh \ && build_pip_package build_azure_kinect build_jupyter diff --git a/docker/README.md b/docker/README.md index 89c5d34cabf..d14d9521c19 100644 --- a/docker/README.md +++ b/docker/README.md @@ -26,7 +26,7 @@ to install Nvidia Docker to run the CUDA container. To verify that the Nvidia Docker is working, run: ```bash -docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi +docker run --rm --gpus all nvidia/cuda:11.7-base nvidia-smi ``` ### ARM64 Docker diff --git a/docker/docker_build.sh b/docker/docker_build.sh index c30d6571788..b0b405d9de2 100755 --- a/docker/docker_build.sh +++ b/docker/docker_build.sh @@ -23,24 +23,24 @@ __usage_docker_build="USAGE: OPTION: # OpenBLAS AMD64 (Dockerfile.openblas) - openblas-amd64-py37-dev : OpenBLAS AMD64 3.7 wheel, developer mode openblas-amd64-py38-dev : OpenBLAS AMD64 3.8 wheel, developer mode openblas-amd64-py39-dev : OpenBLAS AMD64 3.9 wheel, developer mode openblas-amd64-py310-dev : OpenBLAS AMD64 3.10 wheel, developer mode - openblas-amd64-py37 : OpenBLAS AMD64 3.7 wheel, release mode + openblas-amd64-py311-dev : OpenBLAS AMD64 3.11 wheel, developer mode openblas-amd64-py38 : OpenBLAS AMD64 3.8 wheel, release mode openblas-amd64-py39 : OpenBLAS AMD64 3.9 wheel, release mode openblas-amd64-py310 : OpenBLAS AMD64 3.10 wheel, release mode + openblas-amd64-py311 : OpenBLAS AMD64 3.11 wheel, release mode # OpenBLAS ARM64 (Dockerfile.openblas) - openblas-arm64-py37-dev : OpenBLAS ARM64 3.7 wheel, developer mode openblas-arm64-py38-dev : OpenBLAS ARM64 3.8 wheel, developer mode openblas-arm64-py39-dev : OpenBLAS ARM64 3.9 wheel, developer mode openblas-arm64-py310-dev : OpenBLAS ARM64 3.10 wheel, developer mode - openblas-arm64-py37 : OpenBLAS ARM64 3.7 wheel, release mode + openblas-arm64-py311-dev : OpenBLAS ARM64 3.11 wheel, developer mode openblas-arm64-py38 : OpenBLAS ARM64 3.8 wheel, release mode openblas-arm64-py39 : OpenBLAS ARM64 3.9 wheel, release mode openblas-arm64-py310 : OpenBLAS ARM64 3.10 wheel, release mode + openblas-arm64-py311 : OpenBLAS ARM64 3.11 wheel, release mode # Ubuntu CPU CI (Dockerfile.ci) cpu-static : Ubuntu CPU static @@ -62,14 +62,14 @@ OPTION: 5-ml-focal : CUDA CI, 5-ml-focal, developer mode # CUDA wheels (Dockerfile.wheel) - cuda_wheel_py37_dev : CUDA Python 3.7 wheel, developer mode cuda_wheel_py38_dev : CUDA Python 3.8 wheel, developer mode cuda_wheel_py39_dev : CUDA Python 3.9 wheel, developer mode cuda_wheel_py310_dev : CUDA Python 3.10 wheel, developer mode - cuda_wheel_py37 : CUDA Python 3.7 wheel, release mode + cuda_wheel_py311_dev : CUDA Python 3.11 wheel, developer mode cuda_wheel_py38 : CUDA Python 3.8 wheel, release mode cuda_wheel_py39 : CUDA Python 3.9 wheel, release mode cuda_wheel_py310 : CUDA Python 3.10 wheel, release mode + cuda_wheel_py311 : CUDA Python 3.11 wheel, release mode " HOST_OPEN3D_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null 2>&1 && pwd)" @@ -78,6 +78,7 @@ HOST_OPEN3D_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null 2>&1 && pw CCACHE_VERSION=4.3 CMAKE_VERSION=cmake-3.20.6-linux-x86_64 CMAKE_VERSION_AARCH64=cmake-3.20.6-linux-aarch64 +CUDA_VERSION=11.7.1-cudnn8 print_usage_and_exit_docker_build() { echo "$__usage_docker_build" @@ -114,10 +115,7 @@ openblas_export_env() { print_usage_and_exit_docker_build fi - if [[ "py37" =~ ^($options)$ ]]; then - export PYTHON_VERSION=3.7 - export DOCKER_TAG=${DOCKER_TAG}-py37 - elif [[ "py38" =~ ^($options)$ ]]; then + if [[ "py38" =~ ^($options)$ ]]; then export PYTHON_VERSION=3.8 export DOCKER_TAG=${DOCKER_TAG}-py38 elif [[ "py39" =~ ^($options)$ ]]; then @@ -126,6 +124,9 @@ openblas_export_env() { elif [[ "py310" =~ ^($options)$ ]]; then export PYTHON_VERSION=3.10 export DOCKER_TAG=${DOCKER_TAG}-py310 + elif [[ "py311" =~ ^($options)$ ]]; then + export PYTHON_VERSION=3.11 + export DOCKER_TAG=${DOCKER_TAG}-py311 else echo "Invalid python version." print_usage_and_exit_docker_build @@ -168,19 +169,19 @@ openblas_build() { } cuda_wheel_build() { - BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu18.04 + BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu18.04 CCACHE_TAR_NAME=open3d-ubuntu-1804-cuda-ci-ccache options="$(echo "$@" | tr ' ' '|')" echo "[cuda_wheel_build()] options: ${options}" - if [[ "py37" =~ ^($options)$ ]]; then - PYTHON_VERSION=3.7 - elif [[ "py38" =~ ^($options)$ ]]; then + if [[ "py38" =~ ^($options)$ ]]; then PYTHON_VERSION=3.8 elif [[ "py39" =~ ^($options)$ ]]; then PYTHON_VERSION=3.9 elif [[ "py310" =~ ^($options)$ ]]; then PYTHON_VERSION=3.10 + elif [[ "py311" =~ ^($options)$ ]]; then + PYTHON_VERSION=3.11 else echo "Invalid python version." print_usage_and_exit_docker_build @@ -192,6 +193,8 @@ cuda_wheel_build() { fi echo "[cuda_wheel_build()] PYTHON_VERSION: ${PYTHON_VERSION}" echo "[cuda_wheel_build()] DEVELOPER_BUILD: ${DEVELOPER_BUILD}" + echo "[cuda_wheel_build()] BUILD_TENSORFLOW_OPS=${BUILD_TENSORFLOW_OPS:?'env var must be set.'}" + echo "[cuda_wheel_build()] BUILD_PYTORCH_OPS=${BUILD_PYTORCH_OPS:?'env var must be set.'}" pushd "${HOST_OPEN3D_ROOT}" docker build \ @@ -202,6 +205,8 @@ cuda_wheel_build() { --build-arg CMAKE_VERSION="${CMAKE_VERSION}" \ --build-arg CCACHE_VERSION="${CCACHE_VERSION}" \ --build-arg PYTHON_VERSION="${PYTHON_VERSION}" \ + --build-arg BUILD_TENSORFLOW_OPS="${BUILD_TENSORFLOW_OPS}" \ + --build-arg BUILD_PYTORCH_OPS="${BUILD_PYTORCH_OPS}" \ -t open3d-ci:wheel \ -f docker/Dockerfile.wheel . popd @@ -249,17 +254,17 @@ ci_build() { popd docker run -v "${PWD}:/opt/mount" --rm "${DOCKER_TAG}" \ - bash -cx "cp /open3d*.tar* /opt/mount \ - && chown $(id -u):$(id -g) /opt/mount/open3d*.tar*" + bash -cx "cp /open3d* /opt/mount \ + && chown $(id -u):$(id -g) /opt/mount/open3d*" } 2-bionic_export_env() { export DOCKER_TAG=open3d-ci:2-bionic - export BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu18.04 + export BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu18.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-2-bionic - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=OFF export BUILD_CUDA_MODULE=ON export BUILD_TENSORFLOW_OPS=OFF @@ -271,13 +276,14 @@ ci_build() { 3-ml-shared-bionic_export_env() { export DOCKER_TAG=open3d-ci:3-ml-shared-bionic - export BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu18.04 + export BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu18.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-3-ml-shared-bionic - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=ON - export BUILD_TENSORFLOW_OPS=ON + # TODO: re-enable tensorflow support, off due to due to cxx11_abi issue with PyTorch + export BUILD_TENSORFLOW_OPS=OFF export BUILD_PYTORCH_OPS=ON export PACKAGE=ON export BUILD_SYCL_MODULE=OFF @@ -286,13 +292,14 @@ ci_build() { 3-ml-shared-bionic-release_export_env() { export DOCKER_TAG=open3d-ci:3-ml-shared-bionic - export BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu18.04 + export BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu18.04 export DEVELOPER_BUILD=OFF export CCACHE_TAR_NAME=open3d-ci-3-ml-shared-bionic - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=ON - export BUILD_TENSORFLOW_OPS=ON + # TODO: re-enable tensorflow support, off due to due to cxx11_abi issue with PyTorch + export BUILD_TENSORFLOW_OPS=OFF export BUILD_PYTORCH_OPS=ON export PACKAGE=ON export BUILD_SYCL_MODULE=OFF @@ -301,10 +308,10 @@ ci_build() { 4-shared-bionic_export_env() { export DOCKER_TAG=open3d-ci:4-shared-bionic - export BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu18.04 + export BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu18.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-4-shared-bionic - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=ON export BUILD_TENSORFLOW_OPS=OFF @@ -316,10 +323,10 @@ ci_build() { 4-shared-bionic-release_export_env() { export DOCKER_TAG=open3d-ci:4-shared-bionic - export BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu18.04 + export BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu18.04 export DEVELOPER_BUILD=OFF export CCACHE_TAR_NAME=open3d-ci-4-shared-bionic - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=ON export BUILD_TENSORFLOW_OPS=OFF @@ -331,13 +338,14 @@ ci_build() { 5-ml-focal_export_env() { export DOCKER_TAG=open3d-ci:5-ml-focal - export BASE_IMAGE=nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04 + export BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-5-ml-focal - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=OFF export BUILD_CUDA_MODULE=ON - export BUILD_TENSORFLOW_OPS=ON + # TODO: re-enable tensorflow support, off due to due to cxx11_abi issue with PyTorch + export BUILD_TENSORFLOW_OPS=OFF export BUILD_PYTORCH_OPS=ON export PACKAGE=OFF export BUILD_SYCL_MODULE=OFF @@ -349,12 +357,12 @@ cpu-static_export_env() { export BASE_IMAGE=ubuntu:18.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-cpu - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=OFF export BUILD_CUDA_MODULE=OFF export BUILD_TENSORFLOW_OPS=OFF export BUILD_PYTORCH_OPS=OFF - export PACKAGE=OFF + export PACKAGE=VIEWER export BUILD_SYCL_MODULE=OFF } @@ -364,7 +372,7 @@ cpu-shared_export_env() { export BASE_IMAGE=ubuntu:18.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-cpu - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=OFF export BUILD_TENSORFLOW_OPS=OFF @@ -379,10 +387,11 @@ cpu-shared-ml_export_env() { export BASE_IMAGE=ubuntu:18.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-cpu - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=OFF - export BUILD_TENSORFLOW_OPS=ON + # TODO: re-enable tensorflow support, off due to due to cxx11_abi issue with PyTorch + export BUILD_TENSORFLOW_OPS=OFF export BUILD_PYTORCH_OPS=ON export PACKAGE=ON export BUILD_SYCL_MODULE=OFF @@ -394,7 +403,7 @@ cpu-shared-release_export_env() { export BASE_IMAGE=ubuntu:18.04 export DEVELOPER_BUILD=OFF export CCACHE_TAR_NAME=open3d-ci-cpu - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=OFF export BUILD_TENSORFLOW_OPS=OFF @@ -409,10 +418,11 @@ cpu-shared-ml-release_export_env() { export BASE_IMAGE=ubuntu:18.04 export DEVELOPER_BUILD=OFF export CCACHE_TAR_NAME=open3d-ci-cpu - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=OFF - export BUILD_TENSORFLOW_OPS=ON + # TODO: re-enable tensorflow support, off due to due to cxx11_abi issue with PyTorch + export BUILD_TENSORFLOW_OPS=OFF export BUILD_PYTORCH_OPS=ON export PACKAGE=ON export BUILD_SYCL_MODULE=OFF @@ -426,7 +436,7 @@ sycl-shared_export_env() { export BASE_IMAGE=intel/oneapi-basekit:2022.2-devel-ubuntu20.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-sycl - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=ON export BUILD_CUDA_MODULE=OFF export BUILD_TENSORFLOW_OPS=OFF @@ -443,7 +453,7 @@ sycl-static_export_env() { export BASE_IMAGE=intel/oneapi-basekit:2022.2-devel-ubuntu20.04 export DEVELOPER_BUILD=ON export CCACHE_TAR_NAME=open3d-ci-sycl - export PYTHON_VERSION=3.7 + export PYTHON_VERSION=3.8 export BUILD_SHARED_LIBS=OFF export BUILD_CUDA_MODULE=OFF export BUILD_TENSORFLOW_OPS=OFF @@ -460,10 +470,6 @@ function main() { echo "[$(basename $0)] building $1" case "$1" in # OpenBLAS AMD64 - openblas-amd64-py37-dev) - openblas_export_env amd64 py37 dev - openblas_build - ;; openblas-amd64-py38-dev) openblas_export_env amd64 py38 dev openblas_build @@ -476,8 +482,8 @@ function main() { openblas_export_env amd64 py310 dev openblas_build ;; - openblas-amd64-py37) - openblas_export_env amd64 py37 + openblas-amd64-py311-dev) + openblas_export_env amd64 py311 dev openblas_build ;; openblas-amd64-py38) @@ -492,12 +498,12 @@ function main() { openblas_export_env amd64 py310 openblas_build ;; - - # OpenBLAS ARM64 - openblas-arm64-py37-dev) - openblas_export_env arm64 py37 dev + openblas-amd64-py311) + openblas_export_env amd64 py311 openblas_build ;; + + # OpenBLAS ARM64 openblas-arm64-py38-dev) openblas_export_env arm64 py38 dev openblas_build @@ -510,8 +516,8 @@ function main() { openblas_export_env arm64 py310 dev openblas_build ;; - openblas-arm64-py37) - openblas_export_env arm64 py37 + openblas-arm64-py311-dev) + openblas_export_env arm64 py311 dev openblas_build ;; openblas-arm64-py38) @@ -526,6 +532,10 @@ function main() { openblas_export_env arm64 py310 openblas_build ;; + openblas-arm64-py311) + openblas_export_env arm64 py311 + openblas_build + ;; # CPU CI cpu-static) @@ -560,9 +570,6 @@ function main() { ;; # CUDA wheels - cuda_wheel_py37_dev) - cuda_wheel_build py37 dev - ;; cuda_wheel_py38_dev) cuda_wheel_build py38 dev ;; @@ -572,8 +579,8 @@ function main() { cuda_wheel_py310_dev) cuda_wheel_build py310 dev ;; - cuda_wheel_py37) - cuda_wheel_build py37 + cuda_wheel_py311_dev) + cuda_wheel_build py311 dev ;; cuda_wheel_py38) cuda_wheel_build py38 @@ -584,6 +591,9 @@ function main() { cuda_wheel_py310) cuda_wheel_build py310 ;; + cuda_wheel_py311) + cuda_wheel_build py311 + ;; # ML CIs 2-bionic) diff --git a/docker/docker_test.sh b/docker/docker_test.sh index 9993024a90f..877bbe1e5f8 100755 --- a/docker/docker_test.sh +++ b/docker/docker_test.sh @@ -16,24 +16,24 @@ __usage_docker_test="USAGE: OPTION: # OpenBLAS AMD64 (Dockerfile.openblas) - openblas-amd64-py37-dev : OpenBLAS AMD64 3.7 wheel, developer mode openblas-amd64-py38-dev : OpenBLAS AMD64 3.8 wheel, developer mode openblas-amd64-py39-dev : OpenBLAS AMD64 3.9 wheel, developer mode openblas-amd64-py310-dev : OpenBLAS AMD64 3.10 wheel, developer mode - openblas-amd64-py37 : OpenBLAS AMD64 3.7 wheel, release mode + openblas-amd64-py311-dev : OpenBLAS AMD64 3.11 wheel, developer mode openblas-amd64-py38 : OpenBLAS AMD64 3.8 wheel, release mode openblas-amd64-py39 : OpenBLAS AMD64 3.9 wheel, release mode openblas-amd64-py310 : OpenBLAS AMD64 3.10 wheel, release mode + openblas-amd64-py311 : OpenBLAS AMD64 3.11 wheel, release mode # OpenBLAS ARM64 (Dockerfile.openblas) - openblas-arm64-py37-dev : OpenBLAS ARM64 3.7 wheel, developer mode openblas-arm64-py38-dev : OpenBLAS ARM64 3.8 wheel, developer mode openblas-arm64-py39-dev : OpenBLAS ARM64 3.9 wheel, developer mode openblas-arm64-py310-dev : OpenBLAS ARM64 3.10 wheel, developer mode - openblas-arm64-py37 : OpenBLAS ARM64 3.7 wheel, release mode + openblas-arm64-py311-dev : OpenBLAS ARM64 3.11 wheel, developer mode openblas-arm64-py38 : OpenBLAS ARM64 3.8 wheel, release mode openblas-arm64-py39 : OpenBLAS ARM64 3.9 wheel, release mode openblas-arm64-py310 : OpenBLAS ARM64 3.10 wheel, release mode + openblas-arm64-py311 : OpenBLAS ARM64 3.11 wheel, release mode # Ubuntu CPU CI (Dockerfile.ci) cpu-static : Ubuntu CPU static @@ -208,11 +208,6 @@ echo "[$(basename $0)] building $1" source "${HOST_OPEN3D_ROOT}/docker/docker_build.sh" case "$1" in # OpenBLAS AMD64 -openblas-amd64-py37-dev) - openblas_export_env amd64 py37 dev - openblas_print_env - cpp_python_linking_uninstall_test - ;; openblas-amd64-py38-dev) openblas_export_env amd64 py38 dev openblas_print_env @@ -228,8 +223,8 @@ openblas-amd64-py310-dev) openblas_print_env cpp_python_linking_uninstall_test ;; -openblas-amd64-py37) - openblas_export_env amd64 py37 +openblas-amd64-py311-dev) + openblas_export_env amd64 py311 dev openblas_print_env cpp_python_linking_uninstall_test ;; @@ -248,13 +243,13 @@ openblas-amd64-py310) openblas_print_env cpp_python_linking_uninstall_test ;; - -# OpenBLAS ARM64 -openblas-arm64-py37-dev) - openblas_export_env arm64 py37 dev +openblas-amd64-py311) + openblas_export_env amd64 py311 openblas_print_env cpp_python_linking_uninstall_test ;; + +# OpenBLAS ARM64 openblas-arm64-py38-dev) openblas_export_env arm64 py38 dev openblas_print_env @@ -270,8 +265,8 @@ openblas-arm64-py310-dev) openblas_print_env cpp_python_linking_uninstall_test ;; -openblas-arm64-py37) - openblas_export_env arm64 py37 +openblas-arm64-py311-dev) + openblas_export_env arm64 py311 dev openblas_print_env cpp_python_linking_uninstall_test ;; @@ -290,6 +285,11 @@ openblas-arm64-py310) openblas_print_env cpp_python_linking_uninstall_test ;; +openblas-arm64-py311) + openblas_export_env arm64 py311 + openblas_print_env + cpp_python_linking_uninstall_test + ;; # CPU CI cpu-static) diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 564546e58a0..b7565cd95c2 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -4,22 +4,3 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/getting_started.in.rst ${CMAKE_CURRENT_SOURCE_DIR}/getting_started.rst @ONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/docker.in.rst ${CMAKE_CURRENT_SOURCE_DIR}/docker.rst @ONLY) - -include(FetchContent) - -FetchContent_Declare( - ext_open3d_sphinx_theme - PREFIX open3d_sphinx_theme - URL https://github.com/isl-org/open3d_sphinx_theme/archive/c71d2728eb5afd1aeeb20dc27a5a0d42bb402d83.tar.gz - URL_HASH SHA256=98af8b7fdb75a74280b6187dbb58ea601db978d4f3f8956d3d87c59c20786f73 - DOWNLOAD_DIR "${OPEN3D_THIRD_PARTY_DOWNLOAD_DIR}/open3d_sphinx_theme" -) - -if(NOT ext_open3d_sphinx_theme_POPULATED) - FetchContent_Populate(ext_open3d_sphinx_theme) - - set(OPEN3D_SPHINX_THEME_SOURCE_DIR ${ext_open3d_sphinx_theme_SOURCE_DIR}) -endif() - -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/conf.in.py - ${CMAKE_CURRENT_SOURCE_DIR}/conf.py @ONLY) diff --git a/docs/Doxyfile.in b/docs/Doxyfile.in index 9a514ce1535..0ed539ff60e 100644 --- a/docs/Doxyfile.in +++ b/docs/Doxyfile.in @@ -1146,13 +1146,6 @@ VERBATIM_HEADERS = YES ALPHABETICAL_INDEX = YES -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored diff --git a/docs/_static/css/furo_overrides.css b/docs/_static/css/furo_overrides.css new file mode 100644 index 00000000000..9451fd0baf7 --- /dev/null +++ b/docs/_static/css/furo_overrides.css @@ -0,0 +1,55 @@ +/* In furo, toctree captions needs to be enlarged. */ +.toctree-wrapper.compound .caption { + font-size: 150%; + font-weight: bold; +} + +/* Apply custom CSS for displaying docs version - https://github.com/pradyunsg/furo/pull/500 */ +#furo-versions { + font-size: var(--sidebar-item-font-size); +} + +#furo-versions .caption { + display: inline-block; + color: var(--color-sidebar-caption-text); + font-weight: bold; + text-transform: uppercase; + font-size: var(--sidebar-caption-font-size); + padding-right: var(--sidebar-expander-width); + margin-top: 0; +} + +#furo-versions input[type=checkbox] { + display: none; + position: absolute; + box-sizing: border-box; + padding: 0; + overflow: visible; +} + +#furo-versions input[type=checkbox]:checked ~ .rst-other-versions { + display: inline-block; + line-height: var(--sidebar-item-line-height); + padding: var(--sidebar-item-spacing-vertical) var(--sidebar-item-spacing-horizontal); + text-decoration: none; +} + +#furo-versions .rst-other-versions { + display: none; +} + +#furo-versions .versions-label { + position: relative; + float: right; +} + +/* Fix for references. See Open3D PR #6470 */ +div.citation > span { + padding-right: 2rem; +} + +/* Limit output of jupyter notebook within a scrollable area in generated documentation */ +.nboutput .output_area div > pre { + overflow-y: scroll !important; + max-height: 30em; +} diff --git a/docs/_static/theme_overrides.css b/docs/_static/theme_overrides.css deleted file mode 100644 index 250bb7df602..00000000000 --- a/docs/_static/theme_overrides.css +++ /dev/null @@ -1,20 +0,0 @@ -/* Force table wrap: https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html */ -/* override table width restrictions */ -@media screen and (min-width: 767px) { - - .wy-table-responsive table td { - /* !important prevents the common CSS stylesheets from overriding - this as on RTD they are loaded after this stylesheet */ - white-space: normal !important; - } - - .wy-table-responsive { - overflow: visible !important; - } - } - -/* Limit visible text output to about 30 lines and show scrollbar */ -.nboutput .output_area div > pre { - overflow-y: scroll !important; - max-height: 30em; -} diff --git a/docs/_templates/sidebar/variant-selector.html b/docs/_templates/sidebar/variant-selector.html new file mode 100644 index 00000000000..cb7a7e9c727 --- /dev/null +++ b/docs/_templates/sidebar/variant-selector.html @@ -0,0 +1,21 @@ +{% if display_all_docs_versions %} +{# Add rst-badge after rst-versions for small badge style. #} + +{% endif %} \ No newline at end of file diff --git a/docs/arm.rst b/docs/arm.rst index dddb894987d..584d0d007ad 100644 --- a/docs/arm.rst +++ b/docs/arm.rst @@ -67,10 +67,10 @@ commands: cd docker - ./docker_build.sh openblas-arm64-py37 # Python 3.7 ./docker_build.sh openblas-arm64-py38 # Python 3.8 ./docker_build.sh openblas-arm64-py39 # Python 3.9 ./docker_build.sh openblas-arm64-py310 # Python 3.10 + ./docker_build.sh openblas-arm64-py311 # Python 3.11 After running ``docker_build.sh``, you shall see a ``.whl`` file generated the current directly on the host. Then simply install the ``.whl`` file by: diff --git a/docs/builddocs.rst b/docs/builddocs.rst index 5e0d39f8b77..2f8e9539957 100644 --- a/docs/builddocs.rst +++ b/docs/builddocs.rst @@ -9,16 +9,14 @@ generated by `sphinx `_. The C++ API documentation is generated by `Doxygen `_. Documentation can be built on Ubuntu or macOS. Building documentation on Windows -may also be possible but it is not officially tested. +may also be possible but is not officially tested. If you're building documentation on a computer without a display, please use :ref:`headless_rendering`, otherwise the Jupyter tutorials will fail to execute. -Prerequisites -------------- -1. Install system dependencies -`````````````````````````````` +Install system dependencies +--------------------------- **Ubuntu** @@ -34,23 +32,44 @@ First, install a TeX distribution such as `MacTeX `_ brew install ghostscript pandoc doxygen -2. Install Python dependencies -`````````````````````````````` + +Building C++ documentation +-------------------------- + +If you only want to build C++ API documentation, clone the Open3D repo and run +doxygen. .. code-block:: bash - pip install -r docs/requirements.txt + git clone https://github.com/isl-org/open3d + cd open3d/docs + doxygen Doxyfile.in + +Start browsing the generated C++ API documentation from the file +``docs/doxygen/html/index.html``. Read on if you want to build the full +documentation (including Python API and tutorials). -Build ------ -First, clone and build Open3D from source and install the Open3D Python package. -Visit :ref:`compilation` for details. After configuration, install the Open3D -Python package with: +Install or Build Open3D +----------------------- .. code-block:: bash - make install-pip-package -j$(nproc) + pip install open3d + +To instead build Open3D from source, see :ref:`compilation`. + + +Install Python dependencies +--------------------------- + +.. code-block:: bash + + pip install -r docs/requirements.txt + +Build docs +---------- + .. code-block:: bash diff --git a/docs/conf.in.py b/docs/conf.py similarity index 90% rename from docs/conf.in.py rename to docs/conf.py index 0954849a09b..a02dfcf53e5 100644 --- a/docs/conf.in.py +++ b/docs/conf.py @@ -8,7 +8,7 @@ # -*- coding: utf-8 -*- # # Open3D documentation build configuration file, created by -# sphinx-quickstart on Mon Apr 3 14:18:28 2017. +# sphinx-quickstart on Mon Apr 3 14:18:28 2017. # # This file is execfile()d with the current directory set to its # containing dir. @@ -27,12 +27,10 @@ # import sys # sys.path.insert(0, os.path.abspath('.')) -import sys import os import re import subprocess -from pathlib import Path -import shutil +import sys def get_git_short_hash(): @@ -70,10 +68,10 @@ def get_git_short_hash(): "sphinx.ext.napoleon", "sphinx.ext.todo", "nbsphinx", - 'm2r2', + "m2r2", ] -if os.environ["skip_notebooks"] == "true": +if os.environ.get("skip_notebooks", "false") == "true": print("Skipping Jupyter notebooks") extensions = [e for e in extensions if e != "nbsphinx"] @@ -96,7 +94,7 @@ def get_git_short_hash(): # General information about the project. project = u"Open3D" -copyright = u"2018 - 2021, www.open3d.org" +copyright = u"2018 - 2023, www.open3d.org" author = u"www.open3d.org" # The version info for the project you're documenting, acts as replacement for @@ -120,29 +118,31 @@ def get_git_short_hash(): # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] +exclude_patterns = [ + "_build", + "Thumbs.db", + ".DS_Store", + "**.ipynb_checkpoints", + "docker.in.rst", + "getting_started.in.rst", + "jupyter/*/*.ipynb", + "python_api_in/*.rst", +] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False +pygments_dark_style = "monokai" # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -# -# html_theme = 'alabaster' -theme_path = "@OPEN3D_SPHINX_THEME_SOURCE_DIR@" -html_theme = "sphinx_rtd_theme" -html_theme_path = [theme_path] +html_theme = "furo" html_favicon = "_static/open3d_logo.ico" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -# html_theme_options = { # 'display_version': True } @@ -152,15 +152,13 @@ def get_git_short_hash(): # so a file named "default.css" will overwrite the builtin "default.css". # '_static' contains the theme overwrite -static_path = os.path.join(theme_path, "sphinx_rtd_theme", "static") -html_static_path = [static_path, "_static"] - -# Force table wrap: https://rackerlabs.github.io/docs-rackspace/tools/rtd-tables.html -html_context = { - "css_files": [ - "_static/theme_overrides.css" # override wide tables in RTD theme - ] -} +html_static_path = ["_static"] + +# Theme overrides +html_css_files = ['css/furo_overrides.css'] + +# Display selection of all documentation versions. +html_context = {'display_all_docs_versions': True} # added by Jaesik to hide "View page source" html_show_sourcelink = False diff --git a/docs/contribute/contribution_recipes.rst b/docs/contribute/contribution_recipes.rst index b3ff00c4bfb..03102849214 100644 --- a/docs/contribute/contribution_recipes.rst +++ b/docs/contribute/contribution_recipes.rst @@ -264,7 +264,7 @@ Case 4: When adding a Python tutorial .. note:: When you commit a ipynb notebook file make sure to remove the output cells to keep the commit sizes small. - You can use the script ``docs/jupyter/jupyter_strip_output.sh`` for + You can use the script ``docs/jupyter/jupyter_strip_output.py`` for stripping the output cells of all tutorials. Dos diff --git a/docs/cpp_project.rst b/docs/cpp_project.rst index ef931912723..89bfd98b70f 100644 --- a/docs/cpp_project.rst +++ b/docs/cpp_project.rst @@ -10,7 +10,7 @@ We provide two example CMake projects to demonstrate how to use Open3D in your CMake projects. * `Find Pre-Installed Open3D Package in CMake `_ - This option can be used if you'd like Open3D build and install Open3D first, + This option can be used if you'd like to build and install Open3D first, then link your project to Open3D. * `Use Open3D as a CMake External Project `_ This option can be used if you'd like Open3D to build alongside with your diff --git a/docs/docker.in.rst b/docs/docker.in.rst index f22c0a3b03d..3647b99d48f 100644 --- a/docs/docker.in.rst +++ b/docs/docker.in.rst @@ -30,10 +30,11 @@ Python applications looks like this: .. code-block:: dockerfile # This could also be another Ubuntu or Debian based distribution - FROM ubuntu:latest + FROM ubuntu:22.04 # Install Open3D system dependencies and pip RUN apt-get update && apt-get install --no-install-recommends -y \ + libegl1 \ libgl1 \ libgomp1 \ python3-pip \ @@ -52,11 +53,11 @@ To run GUI applications from the docker container, add these options to the 1. GPU: - - Intel (Mesa drivers): ``--device=/dev/dri:/dev/dri`` + - Intel (Mesa drivers): ``--device=/dev/dri:/dev/dri`` or ``--device=/dev/dri/card0:/dev/dri/card0 --device=/dev/dri/renderD128:/dev/dri/renderD128``, depending on your hardware. - NVIDIA: ``--gpus 'all,"capabilities=compute,utility,graphics"'`` - - No GPU (CPU rendering): ``--env OPEN3D_CPU_RENDERING=true`` + - No GPU (CPU rendering): ``--env OPEN3D_CPU_RENDERING=true`` on Ubuntu 18.04. Later versions automaticaly select CPU rendering if a GPU is not available. 2. X server: ``-v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY`` @@ -72,7 +73,7 @@ folder that contains data you wish to visualize. wget https://github.com/isl-org/Open3D/releases/download/v@OPEN3D_VERSION@/open3d-app-@OPEN3D_VERSION@-Ubuntu.deb # Build docker image in folder containing Open3D deb package. docker build -t open3d-viewer -f- . <`__ for more -information. +information. Note that differences in hardware, OS drivers and OS packages may +require you to modify these instructions. Headless rendering ------------------ If a GUI display server (X11 or Wayland) is not available (either in the docker -container or the host OS), Open3D can still be used for headless rendering. This -requires installing some additional dependencies. Here is an example Ubuntu / -Debian based docker file that runs the ``render_to_image.py`` rendering example. -Other Linux (e.g. RHEL) distributions will need different dependency packages. +container or the host OS), Open3D can still be used for headless rendering. In +Ubuntu 20.04+ (with Mesa version 20.2+) this requires configuring the Mesa +driver with an environment variable (``EGL_PLATFORM=surfaceless``): .. code-block:: bash @@ -114,7 +114,43 @@ Other Linux (e.g. RHEL) distributions will need different dependency packages. wget https://raw.githubusercontent.com/isl-org/Open3D/v@OPEN3D_VERSION@/examples/python/visualization/render_to_image.py # Build docker image docker build -t open3d-headless -f- . <=20.3 to install Open3D in Linux, e.g. with - ``pip install -U pip>=20.3`` + .. code-block:: bash + + pip install -U pip>=20.3 .. note:: In general, we recommend using a `virtual environment `_ or `conda environment `_. - Otherwise, depending on the configurations, ``pip3`` may be needed for - Python 3, or the ``--user`` option may need to be used to avoid permission - issues. For example: + Otherwise, depending on the configurations, you may need ``pip3`` for + Python 3, or the ``--user`` option to avoid permission issues. For example: .. code-block:: bash @@ -72,39 +70,38 @@ version (``HEAD`` of ``master`` branch): :widths: auto * - Linux - - `Python 3.7 `__ - `Python 3.8 `__ - `Python 3.9 `__ - `Python 3.10 `__ + - `Python 3.11 `__ * - Linux (CPU) - - `Python 3.7 `__ - `Python 3.8 `__ - `Python 3.9 `__ - `Python 3.10 `__ + - `Python 3.11 `__ * - MacOS - - `Python 3.7 `__ - - `Python 3.8 `__ - - `Python 3.9 `__ - - `Python 3.10 `__ + - `Python 3.8 `__ + - `Python 3.9 `__ + - `Python 3.10 `__ + - `Python 3.11 `__ * - Windows - - `Python 3.7 `__ - `Python 3.8 `__ - `Python 3.9 `__ - `Python 3.10 `__ + - `Python 3.11 `__ Please use these links from the `latest version of this page -`__ only. For example, -to install the latest development version on Linux for Python 3.9: +`__ only. You can also +install the latest development version directly with pip: .. code-block:: bash - pip install --user --pre \ - https://storage.googleapis.com/open3d-releases-master/python-wheels/open3d-@OPEN3D_VERSION_FULL@-cp39-cp39-linux_x86_64.whl + pip install -U --trusted-host www.open3d.org -f http://www.open3d.org/docs/latest/getting_started.html open3d -.. note:: +.. warning:: The development wheels for Linux are named according to PEP600. Please use ``pip`` version >=20.3 to install them. The wheels are not yet fully PEP600 compliant. @@ -146,9 +143,7 @@ demonstrate the usage of Open3D Python interface. See ``examples/python`` for all Python examples. .. note:: Open3D's Python tutorial utilizes some external packages: ``numpy``, - ``matplotlib``, ``opencv-python``. OpenCV is only used for reconstruction - system. Please read ``util/install-deps-python.sh`` for installing these - packages. + ``matplotlib``, ``opencv-python``. .. _install_open3d_c++: @@ -189,13 +184,13 @@ provided here [#]_: only. .. [#] To check the `glibc` version on your system, run :code:`ldd --version`. -.. note:: In Linux, do not link code with different CXX11 ABIs, since this will +.. warning:: In Linux, do not link code with different CXX11 ABIs, since this will most likely cause linker errors or crashes. Most system libraries in recent Linux versions (e.g. if the OS came with GCC versions 5+) use the CXX11 ABI, while PyTorch and Tensorflow libraries typically use the pre CXX11 ABI. -If you need only a subset of features, or a custom build configuration, please -refer to :ref:`compilation` and compile Open3D from source. +If you need a subset of features, or a custom build configuration, please refer +to :ref:`compilation` and compile Open3D from source. Try it ------ diff --git a/docs/index.rst b/docs/index.rst index 391c0298e64..6be002a714b 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -13,10 +13,6 @@ Open3D: A Modern Library for 3D Data Processing =============================================== -.. only: not latex - - Contents: - .. toctree:: :maxdepth: 1 :caption: Getting Started @@ -26,43 +22,26 @@ Open3D: A Modern Library for 3D Data Processing compilation cpp_project builddocs - open3d_ml - arm docker + arm + open3d_ml .. toctree:: :maxdepth: 2 :caption: Tutorial + tutorial/core/index tutorial/geometry/index tutorial/t_geometry/index + tutorial/data/index + tutorial/visualization/index tutorial/pipelines/index tutorial/t_pipelines/index - tutorial/visualization/index - tutorial/core/index - tutorial/data/index tutorial/reconstruction_system/index tutorial/t_reconstruction_system/index tutorial/sensor/index tutorial/reference -.. toctree:: - :maxdepth: 1 - :caption: Contribute - - contribute/contribute - contribute/contribution_recipes - contribute/styleguide - -.. toctree:: - :maxdepth: 1 - :caption: C++ API - - cpp_api - -.. - Note: when adding new modules, please also update documented_modules.txt. - .. toctree:: :maxdepth: 1 :caption: Python API @@ -88,3 +67,20 @@ Open3D: A Modern Library for 3D Data Processing python_example/pipelines/index python_example/utility/index python_example/visualization/index + +.. toctree:: + :maxdepth: 1 + :caption: C++ API + + cpp_api + +.. toctree:: + :maxdepth: 1 + :caption: Contribute + + contribute/contribute + contribute/contribution_recipes + contribute/styleguide + +.. + Note: when adding new modules, please also update documented_modules.txt. diff --git a/docs/jupyter/core/hashmap.ipynb b/docs/jupyter/core/hashmap.ipynb index 31f57aace92..6d05755c4f9 100644 --- a/docs/jupyter/core/hashmap.ipynb +++ b/docs/jupyter/core/hashmap.ipynb @@ -88,19 +88,23 @@ "Next we show how to insert a batch of (key, value) pairs. You'll need to prepare two tensors:\n", "\n", "The `keys` tensor contains all keys. \n", + "\n", "- The `keys` tensor must be on the same device as the hash map. \n", "- The shape of the `keys` tensor is `key_elment_shape` with `N` prefixed to the front. \n", "\n", "For example \n", + " \n", "1. if `key_element_shape == ()`, `keys.shape == (N,)`; \n", "2. if `key_element_shape == (3,)`, `keys.shape == (N, 3).`; \n", "3. if `key_element_shape == (8, 8, 8)`, `keys.shape == (N, 8, 8, 8).`\n", " \n", "The `vals` tensor contains all values. \n", + " \n", "- The `vals` tensor must be on the same device as the hash map. \n", "- The shape of the `vals` tensor is `val_elment_shape` with `N` prefixed to the front. \n", "\n", "For example \n", + "\n", "1. if `val_elment_shape == ()`, `vals.shape == (N,)`; \n", "2. if `val_elment_shape == (3,)`, `vals.shape == (N, 3).`;\n", "3. if `val_elment_shape == (8, 8, 8)`, `vals.shape == (N, 8, 8, 8).`" diff --git a/docs/jupyter/geometry/distance_queries.ipynb b/docs/jupyter/geometry/distance_queries.ipynb index 36038e99e9e..3c7eada9fea 100644 --- a/docs/jupyter/geometry/distance_queries.ipynb +++ b/docs/jupyter/geometry/distance_queries.ipynb @@ -308,7 +308,7 @@ " axis=-1)\n", "intersection_counts = scene.count_intersections(rays).numpy()\n", "# A point is inside if the number of intersections with the scene is even\n", - "# This sssumes that inside and outside is we ll defined for the scene.\n", + "# This assumes that inside and outside are well-defined for the scene.\n", "is_inside = intersection_counts % 2 == 1" ] }, diff --git a/docs/jupyter/geometry/rgbd_image.ipynb b/docs/jupyter/geometry/rgbd_image.ipynb index 439223dee43..4d696912e44 100644 --- a/docs/jupyter/geometry/rgbd_image.ipynb +++ b/docs/jupyter/geometry/rgbd_image.ipynb @@ -99,7 +99,7 @@ " o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n", "# Flip it, otherwise the pointcloud will be upside down\n", "pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n", - "o3d.visualization.draw_geometries([pcd], zoom=0.5)" + "o3d.visualization.draw_geometries([pcd])" ] }, { @@ -160,7 +160,7 @@ " o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n", "# Flip it, otherwise the pointcloud will be upside down\n", "pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n", - "o3d.visualization.draw_geometries([pcd], zoom=0.5)" + "o3d.visualization.draw_geometries([pcd])" ] }, { @@ -243,7 +243,7 @@ " o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))\n", "# Flip it, otherwise the pointcloud will be upside down\n", "pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])\n", - "o3d.visualization.draw_geometries([pcd], zoom=0.5)" + "o3d.visualization.draw_geometries([pcd])" ] }, { diff --git a/docs/jupyter/t_pipelines/t_icp_registration.ipynb b/docs/jupyter/t_pipelines/t_icp_registration.ipynb index 44a21f39584..cb22159bc7f 100644 --- a/docs/jupyter/t_pipelines/t_icp_registration.ipynb +++ b/docs/jupyter/t_pipelines/t_icp_registration.ipynb @@ -405,7 +405,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 1. Set Inputs and Parameters" + "### 1. Set Inputs and Parameters" ] }, { @@ -453,7 +453,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 2. Get Registration Result from ICP" + "### 2. Get Registration Result from ICP" ] }, { @@ -602,7 +602,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 1. Set Inputs and Parameters" + "### 1. Set Inputs and Parameters" ] }, { @@ -656,7 +656,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 2. Get Registration Result from Multi-Scale ICP" + "### 2. Get Registration Result from Multi-Scale ICP" ] }, { @@ -884,7 +884,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### 1. Set Inputs and Parameters" + "### 1. Set Inputs and Parameters" ] }, { diff --git a/docs/jupyter/visualization/visualization.ipynb b/docs/jupyter/visualization/visualization.ipynb index 63dcf2ed863..4fb1a00a5d6 100644 --- a/docs/jupyter/visualization/visualization.ipynb +++ b/docs/jupyter/visualization/visualization.ipynb @@ -129,7 +129,7 @@ "metadata": {}, "source": [ "## Geometry primitives\n", - "The code below generates a box, a sphere, and a cylinder using `create_box`, `create_sphere`, and `create_cylinder`. The box is painted in red, the sphere is painted in blue, and the cylinder is painted in green. Normals are computed for all meshes to support Phong shading (see [Visualize 3D mesh](mesh.ipynb#visualize-a-3d-mesh) and [Surface normal estimation](mesh.ipynb#surface-normal-estimation)). We can even create a coordinate axis using `create_coordinate_frame`, with its origin point set at (-2, -2, -2)." + "The code below generates a box, a sphere, and a cylinder using `create_box`, `create_sphere`, and `create_cylinder`. The box is painted in red, the sphere is painted in blue, and the cylinder is painted in green. Normals are computed for all meshes to support Phong shading (see [Visualize 3D mesh](../geometry/mesh.ipynb#visualize-a-3d-mesh) and [Surface normal estimation](../geometry/mesh.ipynb#surface-normal-estimation)). We can even create a coordinate axis using `create_coordinate_frame`, with its origin point set at (-2, -2, -2)." ] }, { @@ -222,7 +222,7 @@ " lines=o3d.utility.Vector2iVector(lines),\n", ")\n", "line_set.colors = o3d.utility.Vector3dVector(colors)\n", - "o3d.visualization.draw_geometries([line_set], zoom=0.8)" + "o3d.visualization.draw_geometries([line_set])" ] } ], diff --git a/docs/make.bat b/docs/make.bat index f21f1e1cf7a..7db5aa6b4c8 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -21,7 +21,7 @@ if errorlevel 9009 ( exit /b 1 ) -python make.py %1 +python make_docs.py %1 goto end :end diff --git a/docs/make_docs.py b/docs/make_docs.py index 7a3b77deee4..0803b36bba8 100644 --- a/docs/make_docs.py +++ b/docs/make_docs.py @@ -11,20 +11,21 @@ # (3) make.py calls the actual `sphinx-build` import argparse -import subprocess -import sys import importlib -import os import inspect -import shutil +import multiprocessing +import os import re -from pathlib import Path -import nbformat -import nbconvert +import shutil import ssl -import certifi +import subprocess +import sys import urllib.request -import multiprocessing +from pathlib import Path + +import certifi +import nbconvert +import nbformat def _create_or_clear_dir(dir_path): @@ -66,10 +67,10 @@ def __init__(self, output_dir="python_api", input_dir="python_api_in"): self.output_dir = output_dir self.input_dir = input_dir self.module_names = PyAPIDocsBuilder._get_documented_module_names() - print("Generating *.rst Python API docs in directory: %s" % - self.output_dir) def generate_rst(self): + print(f"Generating *.rst Python API docs in directory: " + f"{self.output_dir}") _create_or_clear_dir(self.output_dir) for module_name in self.module_names: @@ -279,8 +280,6 @@ def __init__(self, input_dir, pwd, output_dir="python_example"): sys.path.append(os.path.join(pwd, "..", "python", "tools")) from cli import _get_all_examples_dict self.get_all_examples_dict = _get_all_examples_dict - print("Generating *.rst Python example docs in directory: %s" % - self.output_dir) def _get_examples_dict(self): examples_dict = self.get_all_examples_dict() @@ -305,21 +304,21 @@ def _generate_index(title, output_path): f.write(out_string) @staticmethod - def _add_example_to_docs(example, output_path): + def _add_example_to_docs(example: Path, output_path): shutil.copy(example, output_path) - out_string = (f"{example.stem}.py" - f"\n```````````````````````````````````````\n" - f"\n.. literalinclude:: {example.stem}.py" + out_string = (f"{example.name}" + f"\n{'`' * (len(example.name))}\n" + f"\n.. literalinclude:: {example.name}" f"\n :language: python" f"\n :linenos:" - f"\n :lineno-start: 27" - f"\n :lines: 27-" f"\n\n\n") with open(output_path / "index.rst", "a") as f: f.write(out_string) def generate_rst(self): + print(f"Generating *.rst Python example docs in directory: " + f"{self.output_dir}") _create_or_clear_dir(self.output_dir) examples_dict = self._get_examples_dict() @@ -392,6 +391,11 @@ def run(self): build_dir = os.path.join(self.html_output_dir, "html") nproc = multiprocessing.cpu_count() if self.parallel else 1 print(f"Building docs with {nproc} processes") + today = os.environ.get("SPHINX_TODAY", None) + if today: + cmd_args_today = ["-D", "today=" + today] + else: + cmd_args_today = [] if self.is_release: version_list = [ @@ -402,15 +406,10 @@ def run(self): print("Building docs for release:", release_version) cmd = [ - "sphinx-build", - "-j", - str(nproc), - "-b", - "html", - "-D", - "version=" + release_version, - "-D", - "release=" + release_version, + "sphinx-build", "-j", + str(nproc), "-b", "html", "-D", "version=" + release_version, + "-D", "release=" + release_version + ] + cmd_args_today + [ ".", build_dir, ] @@ -421,6 +420,7 @@ def run(self): str(nproc), "-b", "html", + ] + cmd_args_today + [ ".", build_dir, ] @@ -435,6 +435,9 @@ def run(self): env=sphinx_env, stdout=sys.stdout, stderr=sys.stderr) + print( + f"Sphinx docs are generated at {(Path(build_dir)/'index.html').as_uri()}" + ) class DoxygenDocsBuilder: @@ -449,9 +452,13 @@ def run(self): cmd = ["doxygen", "Doxyfile"] print('Calling: "%s"' % " ".join(cmd)) subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr) + output_path = os.path.join(self.html_output_dir, "html", "cpp_api") shutil.copytree( os.path.join("doxygen", "html"), - os.path.join(self.html_output_dir, "html", "cpp_api"), + output_path, + ) + print( + f"Doxygen docs are generated at {(Path(output_path)/'index.html').as_uri()}" ) if os.path.exists(doxygen_temp_dir): @@ -483,27 +490,26 @@ def run(self): # Jupyter notebooks os.environ["CI"] = "true" - # Copy and execute notebooks in the tutorial folder + # Copy from jupyter to the tutorial folder. nb_paths = [] - nb_direct_copy = [ - 'draw_plotly.ipynb', - 'hashmap.ipynb', - 'jupyter_visualization.ipynb', - 't_icp_registration.ipynb', - 'tensor.ipynb', - ] + nb_parent_src = Path(self.current_file_dir) / "jupyter" + nb_parent_dst = Path(self.current_file_dir) / "tutorial" example_dirs = [ - "geometry", "t_geometry", "core", "data", "pipelines", - "visualization", "t_pipelines" + name for name in os.listdir(nb_parent_src) + if os.path.isdir(nb_parent_src / name) ] + + print(f"Copying {nb_parent_src / 'open3d_tutorial.py'} " + f"to {nb_parent_dst / 'open3d_tutorial.py'}") + shutil.copy( + nb_parent_src / "open3d_tutorial.py", + nb_parent_dst / "open3d_tutorial.py", + ) + for example_dir in example_dirs: - in_dir = (Path(self.current_file_dir) / "jupyter" / example_dir) - out_dir = Path(self.current_file_dir) / "tutorial" / example_dir + in_dir = nb_parent_src / example_dir + out_dir = nb_parent_dst / example_dir out_dir.mkdir(parents=True, exist_ok=True) - shutil.copy( - in_dir.parent / "open3d_tutorial.py", - out_dir.parent / "open3d_tutorial.py", - ) if self.clean_notebooks: for nb_out_path in out_dir.glob("*.ipynb"): @@ -524,6 +530,15 @@ def run(self): shutil.copytree(in_dir / "images", out_dir / "images") # Execute Jupyter notebooks + # Files that should not be executed. + nb_direct_copy = [ + 'draw_plotly.ipynb', + 'hashmap.ipynb', + 'jupyter_visualization.ipynb', + 't_icp_registration.ipynb', + 'tensor.ipynb', + ] + for nb_path in nb_paths: if nb_path.name in nb_direct_copy: print("[Processing notebook {}, directly copied]".format( @@ -585,7 +600,7 @@ def run(self): action="store_true", default=False, help=("Whether to clean existing notebooks in docs/tutorial. " - "Notebooks are copied from examples/python to docs/tutorial."), + "Notebooks are copied from docs/jupyter to docs/tutorial."), ) parser.add_argument( "--execute_notebooks", diff --git a/docs/open3d_ml.rst b/docs/open3d_ml.rst index a4afdcd0e5f..c8a8a5d68ba 100644 --- a/docs/open3d_ml.rst +++ b/docs/open3d_ml.rst @@ -5,6 +5,7 @@ Open3D-ML .. image:: https://raw.githubusercontent.com/isl-org/Open3D-ML/master/docs/images/getting_started_ml_visualizer.gif :width: 480px + :align: center Open3D-ML is an extension of Open3D for 3D machine learning tasks. It builds on top of the Open3D core library and extends it with machine learning tools for diff --git a/docs/requirements.txt b/docs/requirements.txt index 5475bcd5392..ab297f6eb30 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,7 +1,7 @@ -sphinx==3.3.1 -sphinx-rtd-theme==0.5.2 -nbsphinx==0.8.3 -matplotlib==3.3.3 -jinja2==3.0.3 -m2r2==0.2.7 -mistune==0.8.4 +docutils==0.20.1 +furo==2023.9.10 +jinja2==3.1.2 +m2r2==0.3.3.post2 +matplotlib==3.7.3 +nbsphinx==0.9.3 +sphinx==7.1.2 \ No newline at end of file diff --git a/docs/tutorial/geometry/index.rst b/docs/tutorial/geometry/index.rst index 2cade36c5ed..ea7292387c5 100644 --- a/docs/tutorial/geometry/index.rst +++ b/docs/tutorial/geometry/index.rst @@ -22,6 +22,7 @@ Geometry iss_keypoint_detector ray_casting distance_queries + uvmaps .. toctree:: :caption: Interface diff --git a/docs/tutorial/geometry/uvmaps.rst b/docs/tutorial/geometry/uvmaps.rst index 7fccce5008f..7d4c7aa7344 100644 --- a/docs/tutorial/geometry/uvmaps.rst +++ b/docs/tutorial/geometry/uvmaps.rst @@ -24,7 +24,8 @@ Quick Reference to default UV Maps for some primitive shapes provided by Open3D The examples below all assume the following code preamble: -.. code_block:: python +.. code-block:: python + import open3d as o3d import open3d.visualization.rendering as rendering @@ -45,7 +46,9 @@ Example Texture Map Box (map uv to each face = false) ************************************ -.. code_block:: python +.. code-block:: python + + box = o3d.geometry.TriangleMesh.create_box(create_uv_map=True) o3d.visualization.draw({'name': 'box', 'geometry': box, 'material': material}) @@ -62,7 +65,9 @@ Box (map uv to each face = false) Box (map uv to each face = true) ************************************** -.. code_block:: python +.. code-block:: python + + box = o3d.geometry.TriangleMesh.create_box(create_uv_map=True, map_texture_to_each_face=True) o3d.visualization.draw({'name': 'box', 'geometry': box, 'material': material}) @@ -80,7 +85,9 @@ Box (map uv to each face = true) Tetrahedral ************* -.. code_block:: python +.. code-block:: python + + tetra = o3d.geometry.TriangleMesh.create_tetrahedron(create_uv_map=True) o3d.visualization.draw({'name': 'tetrahedron', 'geometry': tetra, 'material': material}) @@ -98,7 +105,9 @@ Tetrahedral Octahedral *************** -.. code_block:: python +.. code-block:: python + + octo = o3d.geometry.TriangleMesh.create_octahedron(create_uv_map=True) o3d.visualization.draw({'name': 'octahedron', 'geometry': octo, 'material': material}) @@ -115,7 +124,9 @@ Octahedral Icosahedron ************** -.. code_block:: python +.. code-block:: python + + ico = o3d.geometry.TriangleMesh.create_icosahedron(create_uv_map=True) o3d.visualization.draw({'name': 'icosahedron', 'geometry': ico, 'material': material}) @@ -132,7 +143,9 @@ Icosahedron Cylinder ************* -.. code_block:: python +.. code-block:: python + + cylinder = o3d.geometry.TriangleMesh.create_cylinder(create_uv_map=True) o3d.visualization.draw({'name': 'cylinder', 'geometry': cylinder, 'material': material}) @@ -149,7 +162,9 @@ Cylinder Cone ******* -.. code_block:: python +.. code-block:: python + + cone = o3d.geometry.TriangleMesh.create_cone(create_uv_map=True) o3d.visualization.draw({'name': 'cone', 'geometry': cone, 'material': material}) @@ -166,7 +181,9 @@ Cone Sphere ******* -.. code_block:: python +.. code-block:: python + + sphere = o3d.geometry.TriangleMesh.create_sphere(create_uv_map=True) o3d.visualization.draw({'name': 'sphere', 'geometry': sphere, 'material': material}) diff --git a/docs/tutorial/reconstruction_system/integrate_scene.rst b/docs/tutorial/reconstruction_system/integrate_scene.rst index 4a56b188ab3..d480d782219 100644 --- a/docs/tutorial/reconstruction_system/integrate_scene.rst +++ b/docs/tutorial/reconstruction_system/integrate_scene.rst @@ -21,9 +21,10 @@ Integrate RGBD frames .. literalinclude:: ../../../examples/python/reconstruction_system/integrate_scene.py :language: python - :lineno-start: 38 - :lines: 27,40-72 + :pyobject: scalable_integrate_rgb_frames + :end-at: o3d.visualization.draw_geometries([mesh]) :linenos: + :lineno-match: This function first reads the alignment results from both :ref:`reconstruction_system_make_fragments` and diff --git a/docs/tutorial/reconstruction_system/make_fragments.rst b/docs/tutorial/reconstruction_system/make_fragments.rst index 2485bfad2b3..d3f53b0575f 100644 --- a/docs/tutorial/reconstruction_system/make_fragments.rst +++ b/docs/tutorial/reconstruction_system/make_fragments.rst @@ -25,9 +25,9 @@ Register RGBD image pairs .. literalinclude:: ../../../examples/python/reconstruction_system/make_fragments.py :language: python - :lineno-start: 46 - :lines: 27,47-76 + :pyobject: register_one_rgbd_pair :linenos: + :lineno-match: The function reads a pair of RGBD images and registers the ``source_rgbd_image`` to the ``target_rgbd_image``. The Open3D function ``compute_rgbd_odometry`` is @@ -45,9 +45,9 @@ Multiway registration .. literalinclude:: ../../../examples/python/reconstruction_system/make_fragments.py :language: python - :lineno-start: 76 - :lines: 27,77-123 + :pyobject: make_posegraph_for_fragment :linenos: + :lineno-match: This script uses the technique demonstrated in :ref:`/tutorial/pipelines/multiway_registration.ipynb`. The function @@ -61,9 +61,9 @@ function ``optimize_posegraph_for_fragment``. .. literalinclude:: ../../../examples/python/reconstruction_system/optimize_posegraph.py :language: python - :lineno-start: 51 - :lines: 27,52-63 + :pyobject: optimize_posegraph_for_fragment :linenos: + :lineno-match: This function calls ``global_optimization`` to estimate poses of the RGBD images. @@ -74,11 +74,11 @@ Make a fragment .. literalinclude:: ../../../examples/python/reconstruction_system/make_fragments.py :language: python - :lineno-start: 124 - :lines: 27,125-146 + :pyobject: integrate_rgb_frames_for_fragment :linenos: + :lineno-match: -Once the poses are estimates, :ref:`/tutorial/pipelines/rgbd_integration.ipynb` +Once the poses are estimated, :ref:`/tutorial/pipelines/rgbd_integration.ipynb` is used to reconstruct a colored fragment from each RGBD sequence. Batch processing @@ -86,11 +86,16 @@ Batch processing .. literalinclude:: ../../../examples/python/reconstruction_system/make_fragments.py :language: python - :lineno-start: 181 - :lines: 27,182-205 + :start-at: def process_single_fragment(fragment_id, color_files, depth_files, n_files, :linenos: + :lineno-match: + +The ``process_single_fragment`` function calls each individual function explained above. +The ``run`` function determines the number of fragments to generate based on the number +of images in the dataset and the configuration value ``n_frames_per_fragment``. +Subsequently, it invokes ``process_single_fragment`` for each of these fragments. +Furthermore, it leverages multiprocessing to speed up computation of all fragments. -The main function calls each individual function explained above. .. _reconstruction_system_make_fragments_results: diff --git a/docs/tutorial/reconstruction_system/refine_registration.rst b/docs/tutorial/reconstruction_system/refine_registration.rst index 2afcb24b6bc..e512255f671 100644 --- a/docs/tutorial/reconstruction_system/refine_registration.rst +++ b/docs/tutorial/reconstruction_system/refine_registration.rst @@ -20,9 +20,9 @@ Fine-grained registration .. literalinclude:: ../../../examples/python/reconstruction_system/refine_registration.py :language: python - :lineno-start: 63 - :lines: 27,64-136 :linenos: + :pyobject: multiscale_icp + :lineno-match: Two options are given for the fine-grained registration. The ``color`` option is recommended since it uses color information to prevent drift. See [Park2017]_ @@ -33,9 +33,9 @@ Multiway registration .. literalinclude:: ../../../examples/python/reconstruction_system/refine_registration.py :language: python - :lineno-start: 40 - :lines: 27,41-63 :linenos: + :pyobject: update_posegraph_for_scene + :lineno-match: This script uses the technique demonstrated in :ref:`/tutorial/pipelines/multiway_registration.ipynb`. Function ``update_posegraph_for_scene`` builds a pose graph for multiway registration of all fragments. Each graph node represents a fragment and its pose which transforms the geometry to the global space. @@ -44,21 +44,20 @@ for multiway registration. .. literalinclude:: ../../../examples/python/reconstruction_system/optimize_posegraph.py :language: python - :lineno-start: 63 - :lines: 27,64-73 :linenos: + :pyobject: optimize_posegraph_for_scene + :lineno-match: Main registration loop `````````````````````````````````````` -The function ``make_posegraph_for_refined_scene`` below calls all the functions - introduced above. +The function ``make_posegraph_for_refined_scene`` below calls all the functions introduced above. .. literalinclude:: ../../../examples/python/reconstruction_system/refine_registration.py :language: python - :lineno-start: 173 - :lines: 27,174-223 :linenos: + :pyobject: make_posegraph_for_refined_scene + :lineno-match: The main workflow is: pairwise local refinement -> multiway registration. diff --git a/docs/tutorial/reconstruction_system/register_fragments.rst b/docs/tutorial/reconstruction_system/register_fragments.rst index 7e3486589a8..b86adc62fda 100644 --- a/docs/tutorial/reconstruction_system/register_fragments.rst +++ b/docs/tutorial/reconstruction_system/register_fragments.rst @@ -20,9 +20,9 @@ Preprocess point cloud .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 41 - :lines: 27,42-54 + :pyobject: preprocess_point_cloud :linenos: + :lineno-match: This function downsamples a point cloud to make it sparser and regularly distributed. Normals and FPFH feature are precomputed. See @@ -36,9 +36,9 @@ Compute initial registration .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 85 - :lines: 27,86-114 + :pyobject: compute_initial_registration :linenos: + :lineno-match: This function computes a rough alignment between two fragments. If the fragments are neighboring fragments, the rough alignment is determined by an aggregating @@ -53,9 +53,9 @@ Pairwise global registration .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 54 - :lines: 27,55-85 + :pyobject: register_point_cloud_fpfh :linenos: + :lineno-match: This function uses :ref:`/tutorial/pipelines/global_registration.ipynb#RANSAC` or :ref:`/tutorial/pipelines/global_registration.ipynb#fast-global-registration` for pairwise global registration. @@ -66,9 +66,9 @@ Multiway registration .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 114 - :lines: 27,115-137 + :pyobject: update_posegraph_for_scene :linenos: + :lineno-match: This script uses the technique demonstrated in :ref:`/tutorial/pipelines/multiway_registration.ipynb`. The function @@ -81,9 +81,9 @@ called for multiway registration. .. literalinclude:: ../../../examples/python/reconstruction_system/optimize_posegraph.py :language: python - :lineno-start: 63 - :lines: 27,64-73 + :pyobject: optimize_posegraph_for_scene :linenos: + :lineno-match: Main registration loop `````````````````````````````````````` @@ -94,9 +94,9 @@ multiway registration. .. literalinclude:: ../../../examples/python/reconstruction_system/register_fragments.py :language: python - :lineno-start: 167 - :lines: 27,168-210 + :pyobject: make_posegraph_for_scene :linenos: + :lineno-match: Results `````````````````````````````````````` diff --git a/docs/tutorial/reconstruction_system/system_overview.rst b/docs/tutorial/reconstruction_system/system_overview.rst index 2cae7ed387e..0394d629c1e 100644 --- a/docs/tutorial/reconstruction_system/system_overview.rst +++ b/docs/tutorial/reconstruction_system/system_overview.rst @@ -48,7 +48,7 @@ Getting the example code .. code-block:: sh - # Activate your conda enviroment, where you have installed open3d pip package. + # Activate your conda environment, where you have installed open3d pip package. # Clone the Open3D github repository and go to the example. cd examples/python/reconstruction_system/ @@ -69,7 +69,7 @@ Running the example with default dataset. python run_system.py --make --register --refine --integrate Changing the default dataset. -One may change the default dataset to other avaialble datasets. +One may change the default dataset to other available datasets. Currently the following datasets are available: 1. Lounge (keyword: ``lounge``) (Default) @@ -81,7 +81,7 @@ Currently the following datasets are available: .. code-block:: sh - # Using jack_jack as the default dataset. + # Using bedroom as the default dataset. python run_system.py --default_dataset 'bedroom' --make --register --refine --integrate Running the example with custom dataset using config file. diff --git a/docs/tutorial/t_geometry/index.rst b/docs/tutorial/t_geometry/index.rst index e1f1e9a4bc8..7c24fb565cb 100644 --- a/docs/tutorial/t_geometry/index.rst +++ b/docs/tutorial/t_geometry/index.rst @@ -1,5 +1,5 @@ Geometry (Tensor) -======== +================= .. toctree:: :caption: Basics diff --git a/docs/tutorial/t_pipelines/index.rst b/docs/tutorial/t_pipelines/index.rst index b687d2ce428..b10fa1b0e09 100644 --- a/docs/tutorial/t_pipelines/index.rst +++ b/docs/tutorial/t_pipelines/index.rst @@ -1,7 +1,7 @@ .. _t_pipelines: Pipelines (Tensor) -========= +================== .. toctree:: diff --git a/docs/tutorial/t_reconstruction_system/customized_integration.rst b/docs/tutorial/t_reconstruction_system/customized_integration.rst index 79625fd5ec2..78e252fa8d5 100644 --- a/docs/tutorial/t_reconstruction_system/customized_integration.rst +++ b/docs/tutorial/t_reconstruction_system/customized_integration.rst @@ -10,8 +10,8 @@ The frustum **block** selection remains the same, but then we manually activate .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate_custom.py :language: python - :lineno-start: 78 - :lines: 27,79-87 + :lineno-start: 60 + :lines: 8,61-68 Voxel Indices `````````````` @@ -19,8 +19,8 @@ We can then unroll **voxel** indices in these **blocks** into a flattened array, .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate_custom.py :language: python - :lineno-start: 91 - :lines: 27,92-93 + :lineno-start: 72 + :lines: 8,73-74 Up to now we have finished preparation. Then we can perform customized geometry transformation in the Tensor interface, with the same fashion as we conduct in numpy or pytorch. @@ -30,8 +30,8 @@ We first transform the voxel coordinates to the frame's coordinate system, proje .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate_custom.py :language: python - :lineno-start: 99 - :lines: 27,100-118 + :lineno-start: 80 + :lines: 8,81-98 Customized integration ```````````````````````` @@ -43,7 +43,7 @@ With the data association, we are able to conduct integration. In this example, .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate_custom.py :language: python - :lineno-start: 118 - :lines: 27,119-128,133-151 + :lineno-start: 98 + :lines: 8,99-108,113-132 You may follow the example and adapt it to your customized properties. Open3D supports conversion from and to PyTorch tensors without memory any copy, see :ref:`/tutorial/core/tensor.ipynb#PyTorch-I/O-with-DLPack-memory-map`. This can be use to leverage PyTorch's capabilities such as automatic differentiation and other operators. diff --git a/docs/tutorial/t_reconstruction_system/dense_slam.rst b/docs/tutorial/t_reconstruction_system/dense_slam.rst index b11a76ed374..84b2c382fca 100644 --- a/docs/tutorial/t_reconstruction_system/dense_slam.rst +++ b/docs/tutorial/t_reconstruction_system/dense_slam.rst @@ -13,8 +13,8 @@ In a SLAM system, we maintain a ``model`` built upon a :ref:`voxel_block_grid`, .. literalinclude:: ../../../examples/python/t_reconstruction_system/dense_slam.py :language: python - :lineno-start: 45 - :lines: 27,46-54 + :lineno-start: 26 + :lines: 8,27-35 Frame-to-model tracking ```````````````````````` @@ -22,8 +22,8 @@ The frame-to-model tracking runs in a loop: .. literalinclude:: ../../../examples/python/t_reconstruction_system/dense_slam.py :language: python - :lineno-start: 57 - :lines: 27,58-78 + :lineno-start: 38 + :lines: 8,39,42-61 where we iteratively update the synthesized frame via ray-casting from the model, and perform the tensor version of :ref:`/tutorial/pipelines/rgbd_odometry.ipynb` between the input frame and the synthesized frame. @@ -44,7 +44,7 @@ If all above have been correctly set but still no luck, please file an issue. **Q**: So WHY did my tracking fail? -**A**: For the front end, we are using direct RGB-D odometry. Comparing to feature-based odometry, RGB-D odometry is more accurate when it completes successfully but is less robust. We will add support for feature-based tracking in the future. For the backend, unlike our offline reconstruction system, we do not detect loop closures, and do not perform pose graph optimization or bundle adjustment at the moment. +**A**: For the front end, we are using direct RGB-D odometry. Compared to feature-based odometry, RGB-D odometry is more accurate when it completes successfully but is less robust. We will add support for feature-based tracking in the future. For the backend, unlike our offline reconstruction system, we do not detect loop closures, and do not perform pose graph optimization or bundle adjustment at the moment. **Q**: Why don't you implement loop closure or relocalization? diff --git a/docs/tutorial/t_reconstruction_system/index.rst b/docs/tutorial/t_reconstruction_system/index.rst index 31322f613c0..57fb76950b1 100644 --- a/docs/tutorial/t_reconstruction_system/index.rst +++ b/docs/tutorial/t_reconstruction_system/index.rst @@ -25,7 +25,7 @@ Getting the example code .. code-block:: sh - # Activate your conda enviroment, where you have installed open3d pip package. + # Activate your conda environment, where you have installed open3d pip package. # Clone the Open3D github repository and go to the example. cd examples/python/t_reconstruction_system/ @@ -40,7 +40,7 @@ Running the example with default dataset. # which is ``lounge`` dataset from stanford. python dense_slam_gui.py -It is recommended to use CUDA if avaialble. +It is recommended to use CUDA if available. .. code-block:: sh @@ -49,7 +49,7 @@ It is recommended to use CUDA if avaialble. python dense_slam_gui.py --device 'cuda:0' Changing the default dataset. -One may change the default dataset to other avaialble datasets. +One may change the default dataset to other available datasets. Currently the following datasets are available: 1. Lounge (keyword: ``lounge``) (Default) @@ -74,7 +74,7 @@ Example config file for online reconstruction system has been provided in ``examples/python/t_reconstruction_system/default_config.yml``, which looks like the following: .. literalinclude:: ../../../examples/python/t_reconstruction_system/default_config.yml - :language: yml + :language: yaml :lineno-start: 1 :lines: 1- :linenos: @@ -87,7 +87,7 @@ images using the Intel RealSense camera. For more details, please see :ref:`capture_your_own_dataset`. Getting started with online reconstruction system -`````````````````````````````````````` +````````````````````````````````````````````````` .. toctree:: diff --git a/docs/tutorial/t_reconstruction_system/integration.rst b/docs/tutorial/t_reconstruction_system/integration.rst index e62d9bf3cfc..417f723205d 100644 --- a/docs/tutorial/t_reconstruction_system/integration.rst +++ b/docs/tutorial/t_reconstruction_system/integration.rst @@ -12,8 +12,8 @@ In the activation step, we first locate blocks that contain points unprojected f .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python - :lineno-start: 82 - :lines: 27,83-85 + :lineno-start: 51 + :lines: 8,52-54 Integration ```````````` @@ -23,24 +23,24 @@ We may use optimized functions, along with raw depth images with calibration par .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python - :lineno-start: 86 - :lines: 27,87-93 + :lineno-start: 55 + :lines: 8,56-63 Currently, to use our optimized function, we assume the below combinations of data types, in the order of ``depth image``, ``color image``, ``tsdf in voxel grid``, ``weight in voxel grid``, ``color in voxel grid`` in CPU .. literalinclude:: ../../../cpp/open3d/t/geometry/kernel/VoxelBlockGridCPU.cpp :language: cpp - :lineno-start: 229 - :lines: 230-236 + :lineno-start: 212 + :lines: 212-218 and CUDA .. literalinclude:: ../../../cpp/open3d/t/geometry/kernel/VoxelBlockGridCUDA.cu :language: cpp - :lineno-start: 255 - :lines: 256-262 + :lineno-start: 238 + :lines: 238-244 -For more generalized functionalities, you may extend the macros and/or the kernel functions and compile Open3D from scratch achieve the maximal performance (~100Hz on a GTX 1070), or follow :ref:`customized_integration` and implement a fast prototype (~25Hz). +For more generalized functionalities, you may extend the macros and/or the kernel functions and compile Open3D from scratch to achieve the maximal performance (~100Hz on a GTX 1070), or follow :ref:`customized_integration` and implement a fast prototype (~25Hz). Surface extraction `````````````````` @@ -48,10 +48,10 @@ You may use the provided APIs to extract surface points. .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python - :lineno-start: 135 - :lines: 27,136-140 + :lineno-start: 105 + :lines: 8, 106-110 -Note ``extract_triangle_mesh`` applies marching cubes and generate mesh. ``extract_point_cloud`` uses the similar algorithm, but skips the triangle face generation step. +Note ``extract_triangle_mesh`` applies marching cubes and generates mesh. ``extract_point_cloud`` uses a similar algorithm, but skips the triangle face generation step. Save and load `````````````` @@ -60,7 +60,7 @@ The voxel block grids can be saved to and loaded from `.npz` files that are acce .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python :lineno-start: 47 - :lines: 27,48,98 + :lines: 8,48,98 The ``.npz`` file of the aforementioned voxel block grid contains the following entries: diff --git a/docs/tutorial/t_reconstruction_system/ray_casting.rst b/docs/tutorial/t_reconstruction_system/ray_casting.rst index ca74ee98b02..3809a7941a7 100644 --- a/docs/tutorial/t_reconstruction_system/ray_casting.rst +++ b/docs/tutorial/t_reconstruction_system/ray_casting.rst @@ -12,19 +12,19 @@ We provide optimized conventional rendering, and basic support for customized re Conventional rendering `````````````````````` -From a reconstructed voxel block grid from :ref:`optimized_integration`, we can efficiently render the scene given the input depth as a rough range estimate. +From a reconstructed voxel block grid :code:`vbg` from :ref:`optimized_integration`, we can efficiently render the scene given the input depth as a rough range estimate. .. literalinclude:: ../../../examples/python/t_reconstruction_system/ray_casting.py :language: python - :lineno-start: 76 - :lines: 27,77-92 + :lineno-start: 68 + :lines: 8,69-82 The results could be directly obtained and visualized by .. literalinclude:: ../../../examples/python/t_reconstruction_system/ray_casting.py :language: python - :lineno-start: 90 - :lines: 27,91,93-95,105-112 + :lineno-start: 83 + :lines: 8,84,86-88,98-105 Customized rendering ````````````````````` @@ -32,7 +32,7 @@ In customized rendering, we manually perform trilinear-interpolation by accessin .. literalinclude:: ../../../examples/python/t_reconstruction_system/ray_casting.py :language: python - :lineno-start: 97 - :lines: 27,98-103,114-115 + :lineno-start: 90 + :lines: 8,91-96,107-108 Since the output is rendered via indices, the rendering process could be rewritten in differentiable engines like PyTorch seamlessly via :ref:`/tutorial/core/tensor.ipynb#PyTorch-I/O-with-DLPack-memory-map`. diff --git a/docs/tutorial/t_reconstruction_system/voxel_block_grid.rst b/docs/tutorial/t_reconstruction_system/voxel_block_grid.rst index 0836bd471f0..cf8eed1f2fa 100644 --- a/docs/tutorial/t_reconstruction_system/voxel_block_grid.rst +++ b/docs/tutorial/t_reconstruction_system/voxel_block_grid.rst @@ -14,21 +14,21 @@ A voxel block grid can be constructed by: .. literalinclude:: ../../../examples/python/t_reconstruction_system/integrate.py :language: python - :lineno-start: 56 - :lines: 27,57-74 + :lineno-start: 27 + :lines: 8,28-45 -In this example, the multi-value hash map has key shape shape ``(3,)`` and dtype ``int32``. The hash map values are organized as a structure of array (SoA). The hash map values include: +In this example, the multi-value hash map has key of shape ``(3,)`` and dtype ``float32``. The hash map values are organized as a structure of array (SoA). The hash map values include: By default it contains: -- Truncated Signed Distance Function (TSDF) of element shape ``(8, 8, 8, 1)`` -- Weight of element shape ``(8, 8, 8, 1)`` -- (Optionally) RGB color of element shape ``(8, 8, 8, 3)`` +- Truncated Signed Distance Function (TSDF) of element shape ``(16, 16, 16, 1)`` +- Weight of element shape ``(16, 16, 16, 1)`` +- (Optionally) RGB color of element shape ``(16, 16, 16, 3)`` -where ``8`` stands for the local voxel block grid resolution. +where ``16`` stands for the local voxel block grid resolution. By convention, we use ``3.0 / 512`` as the voxel resolution. This spatial resolution is equivalent to representing a ``3m x 3m x 3m`` (m = meter) room with a dense ``512 x 512 x 512`` voxel grid. -The voxel block grid is optimized to run fast on GPU. On CPU the it runs slower. Empirically, we reserve ``100000`` such blocks for a living room-scale scene to avoid frequent rehashing. +The voxel block grid is optimized to run fast on GPU. On CPU, it runs slower. Empirically, we reserve ``50000`` such blocks for a living room-scale scene to avoid frequent rehashing. -You can always customize your own properties, e.g., ``intensity`` of element shape ``(8, 8, 8, 1)`` in ``float32``, ``label`` of element shape ``(8, 8, 8, 1)`` in ``int32``, etc. To know how to process the data, please refer to :ref:`customized_integration`. +You can always customize your own properties, e.g., ``intensity`` of element shape ``(16, 16, 16, 1)`` in ``float32``, ``label`` of element shape ``(16, 16, 16, 1)`` in ``int32``, etc. To know how to process the data, please refer to :ref:`customized_integration`. diff --git a/docs/tutorial/visualization/cpu_rendering.rst b/docs/tutorial/visualization/cpu_rendering.rst index 7eed9efa007..f504382469e 100644 --- a/docs/tutorial/visualization/cpu_rendering.rst +++ b/docs/tutorial/visualization/cpu_rendering.rst @@ -27,28 +27,35 @@ Headless CPU Rendering ---------------------- For Python code, you can enable CPU rendering for headless rendering when using -the :class: `.OffscreenRenderer` for a process by setting the environment -variable ``OPEN3D_CPU_RENDERING=true`` before importing Open3D. Here are the -different ways to do that: +the :class: `.OffscreenRenderer` for a process by setting an environment +variable before importing Open3D:: + + - ``EGL_PLATFORM=surfaceless`` for Ubuntu 20.04+ (Mesa v20.2 or newer) + - ``OPEN3D_CPU_RENDERING=true`` for Ubuntu 18.04 (Mesa older than v20.2). + +Here are the different ways to do that: .. code:: bash - # from the command line - OPEN3D_CPU_RENDERING=true python - examples/python/visualization/render_to_image.py + # from the command line (Ubuntu 20.04+) + EGL_PLATFORM=surfaceless python examples/python/visualization/render_to_image.py + # or Ubuntu 18.04 + OPEN3D_CPU_RENDERING=true python examples/python/visualization/render_to_image.py .. code:: python # In Python code import os - os.environ['OPEN3D_CPU_RENDERING'] = 'true' + os.environ['EGL_PLATFORM'] = 'surfaceless' # Ubuntu 20.04+ + os.environ['OPEN3D_CPU_RENDERING'] = 'true' # Ubuntu 18.04 import open3d as o3d # In a Jupyter notebook - %env OPEN3D_CPU_RENDERING true + %env EGL_PLATFORM surfaceless # Ubuntu 20.04+ + %env OPEN3D_CPU_RENDERING true # Ubuntu 18.04 import open3d as o3d -.. note:: Seeting the environment variable after importing ``open3d`` will not work, +.. note:: Setting the environment variable after importing ``open3d`` will not work, even if ``open3d`` is re-imported. In this case, if no usable GPU is present, the Python interpreter or Jupyter kernel will crash when visualization functions are used. @@ -84,13 +91,31 @@ The method for enabling interactive CPU rendering depends on your system: them installed is not sufficient. You can check the drivers in use with the ``glxinfo`` command. -2. **You use Nvidia or AMD drivers or old Mesa drivers (< v20.2).** We provide +2. **You use Nvidia or AMD drivers, but your OS comes with recent Mesa drivers (>= v20.2).** + Install Mesa drivers if they are not installed in your system (e.g. `sudo apt install libglx0-mesa` + in Ubuntu). Preload the Mesa driver library before running any Open3D application requiring CPU rendering. + For example: + + .. code:: bash + + export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libGLX_mesa.so.0 + Open3D + + Or with Python code: + + .. code:: bash + + export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libGLX_mesa.so.0 + python examples/python/visualization/draw.py + +3. **Your OS has old Mesa drivers (< v20.2).** We provide the Mesa software rendering library binary for download `here `__. This is automatically downloaded to `build/_deps/download_mesa_libgl-src/libGL.so.1.5.0` when you build Open3D - from source. If you want to use CPU rendering all the time, install this - library to ``/usr/local/lib`` or ``$HOME/.local/lib`` and *prepend* it to your + from source. The prebuilt version works on Ubuntu 18.04 and Ubuntu 20.04. If + you want to use CPU rendering all the time, install this library to + ``/usr/local/lib`` or ``$HOME/.local/lib`` and *prepend* it to your ``LD_LIBRARY_PATH``: .. code:: bash @@ -107,5 +132,4 @@ The method for enabling interactive CPU rendering depends on your system: .. code:: bash - LD_PRELOAD=$HOME/.local/lib/libGL.so.1.5.0 python - examples/python/visualization/draw.py + LD_PRELOAD=$HOME/.local/lib/libGL.so.1.5.0 python examples/python/visualization/draw.py diff --git a/docs/tutorial/visualization/customized_visualization.rst b/docs/tutorial/visualization/customized_visualization.rst index c410571e03e..b437fbb932b 100644 --- a/docs/tutorial/visualization/customized_visualization.rst +++ b/docs/tutorial/visualization/customized_visualization.rst @@ -12,8 +12,8 @@ Mimic draw_geometries() with Visualizer class .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 37 - :lines: 37-44 + :lineno-start: 39 + :lines: 39-47 :linenos: This function produces exactly the same functionality as the convenience function ``draw_geometries``. @@ -25,8 +25,8 @@ Class ``Visualizer`` has a couple of variables such as a ``ViewControl`` and a ` .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 70 - :lines: 70-76 + :lineno-start: 50 + :lines: 50-56 :linenos: Outputs: @@ -40,8 +40,8 @@ To change field of view of the camera, it is first necessary to get an instance .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 47 - :lines: 47-56 + :lineno-start: 27 + :lines: 27-36 :linenos: The field of view (FoV) can be set to a degree in the range [5,90]. Note that ``change_field_of_view`` adds the specified FoV to the current FoV. By default, the visualizer has an FoV of 60 degrees. Calling the following code @@ -71,8 +71,8 @@ Callback functions .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 59 - :lines: 59-67 + :lineno-start: 39 + :lines: 39-47 :linenos: Function ``draw_geometries_with_animation_callback`` registers a Python callback function ``rotate_view`` as the idle function of the main loop. It rotates the view along the x-axis whenever the visualizer is idle. This defines an animation behavior. @@ -82,8 +82,8 @@ Function ``draw_geometries_with_animation_callback`` registers a Python callback .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 79 - :lines: 79-108 + :lineno-start: 59 + :lines: 59-87 :linenos: Callback functions can also be registered upon key press event. This script registered four keys. For example, pressing :kbd:`k` changes the background color to black. @@ -96,8 +96,8 @@ Capture images in a customized animation .. literalinclude:: ../../../examples/python/visualization/customized_visualization.py :language: python - :lineno-start: 109 - :lines: 111-162 + :lineno-start: 90 + :lines: 90-141 :linenos: This function reads a camera trajectory, then defines an animation function ``move_forward`` to travel through the camera trajectory. In this animation function, both color image and depth image are captured using ``Visualizer.capture_depth_float_buffer`` and ``Visualizer.capture_screen_float_buffer`` respectively. The images are saved as png files. diff --git a/docs/tutorial/visualization/interactive_visualization.rst b/docs/tutorial/visualization/interactive_visualization.rst index d886466e568..8dfb03edebb 100644 --- a/docs/tutorial/visualization/interactive_visualization.rst +++ b/docs/tutorial/visualization/interactive_visualization.rst @@ -3,13 +3,16 @@ Interactive visualization ------------------------------------- -This tutorial introduces user interaction features of the visualizer window. +This tutorial introduces user interaction features of the visualizer window provided by:- + +#. ``open3d.visualization.draw_geometries_with_editing`` +#. ``open3d.visualization.VisualizerWithEditing`` .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 27 - :lines: 27- + :start-at: # examples/python/visualization/interactive_visualization.py :linenos: + :lineno-match: This script executes two applications of user interaction: ``demo_crop_geometry`` and ``demo_manual_registration``. @@ -20,9 +23,9 @@ Crop geometry .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 37 - :lines: 37-51 + :pyobject: demo_crop_geometry :linenos: + :lineno-match: This function simply reads a point cloud and calls ``draw_geometries_with_editing``. This function provides vertex selection and cropping. @@ -58,27 +61,30 @@ To finish selection mode, press ``F`` to switch to freeview mode. Manual registration ````````````````````````````````````````````` -Select correspondences -===================================== - The following script registers two point clouds using point-to-point ICP. It gets initial alignment via user interaction. +Prepare data +===================================== + .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 61 - :lines: 61-76 + :pyobject: prepare_data :linenos: + :lineno-match: -The script reads two point clouds, and visualizes the point clouds before alignment. +This function reads two point clouds, and visualizes the point clouds before performing manual alignment. .. image:: ../../_static/visualization/interactive_visualization/manual_icp_initial.png :width: 400px +Select correspondences +===================================== + .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 52 - :lines: 52-60 + :pyobject: pick_points :linenos: + :lineno-match: The function ``pick_points(pcd)`` makes an instance of ``VisualizerWithEditing``. To mimic ``draw_geometries``, it creates windows, adds the geometry, visualizes the geometry, and then terminates. A novel interface function from ``VisualizerWithEditing`` is ``get_picked_points()`` that returns the indices of user-picked vertices. @@ -115,9 +121,9 @@ Registration using user correspondences .. literalinclude:: ../../../examples/python/visualization/interactive_visualization.py :language: python - :lineno-start: 77 - :lines: 77-110 + :pyobject: register_via_correspondences :linenos: + :lineno-match: The later part of the demo computes an initial transformation based on the user-provided correspondences. This script builds pairs of correspondences using ``Vector2iVector(corr)``. It utilizes ``TransformationEstimationPointToPoint.compute_transformation`` to compute the initial transformation from the correspondences. The initial transformation is refined using ``registration_icp``. diff --git a/docs/tutorial/visualization/non_blocking_visualization.rst b/docs/tutorial/visualization/non_blocking_visualization.rst index 2a3397f1578..9d3e932646a 100644 --- a/docs/tutorial/visualization/non_blocking_visualization.rst +++ b/docs/tutorial/visualization/non_blocking_visualization.rst @@ -28,6 +28,7 @@ This rendering loop can be readily customized. For example, a custom loop can be vis = Visualizer() vis.create_window() + vis.add_geometry(geometry) for i in range(icp_iteration): # do ICP single iteration # transform geometry using ICP @@ -39,9 +40,9 @@ The full script implementing this idea is displayed below. .. literalinclude:: ../../../examples/python/visualization/non_blocking_visualization.py :language: python - :lineno-start: 27 - :lines: 27- + :start-at: # examples/python/visualization/non_blocking_visualization.py :linenos: + :lineno-match: The following sections explain this script. @@ -50,9 +51,9 @@ Prepare example data .. literalinclude:: ../../../examples/python/visualization/non_blocking_visualization.py :language: python - :lineno-start: 35 - :lines: 35-46 + :pyobject: prepare_data :linenos: + :lineno-match: This part reads two point clouds and downsamples them. The source point cloud is intentionally transformed for the misalignment. Both point clouds are flipped for better visualization. @@ -61,9 +62,10 @@ Initialize Visualizer class .. literalinclude:: ../../../examples/python/visualization/non_blocking_visualization.py :language: python - :lineno-start: 47 - :lines: 47-59 + :start-at: def demo_non_blocking_visualization(): + :end-at: save_image = False :linenos: + :lineno-match: These lines make an instance of the visualizer class, open a visualizer window, and add two geometries to the visualizer. @@ -72,9 +74,10 @@ Transform geometry and visualize it .. literalinclude:: ../../../examples/python/visualization/non_blocking_visualization.py :language: python - :lineno-start: 59 - :lines: 59-72 + :start-at: for i in range(icp_iteration): + :end-at: vis.destroy_window() :linenos: + :lineno-match: This script calls ``registration_icp`` for every iteration. Note that it explicitly forces only one ICP iteration via ``ICPConvergenceCriteria(max_iteration = 1)``. This is a trick to retrieve a slight pose update from a single ICP iteration. After ICP, source geometry is transformed accordingly. diff --git a/docs/versions.js b/docs/versions.js index 2da78b6af4e..1522323da11 100644 --- a/docs/versions.js +++ b/docs/versions.js @@ -18,8 +18,12 @@ document.write('\ master C++\ \ \ - 0.16.1 (release)\ - 0.16.1 C++ (release)\ + 0.17.0 (release)\ + 0.17.0 C++ (release)\ + \ + \ + 0.16.0\ + 0.16.0 C++\ \ \ 0.15.1\ diff --git a/examples/cpp/IntegrateRGBD.cpp b/examples/cpp/IntegrateRGBD.cpp index dd5a8eedddb..39fc3d98225 100644 --- a/examples/cpp/IntegrateRGBD.cpp +++ b/examples/cpp/IntegrateRGBD.cpp @@ -68,7 +68,6 @@ int main(int argc, char *argv[]) { FILE *file = utility::filesystem::FOpen(match_filename, "r"); if (file == NULL) { utility::LogWarning("Unable to open file {}", match_filename); - fclose(file); return 0; } char buffer[DEFAULT_IO_BUFFER_SIZE]; diff --git a/examples/python/io/realsense_io.py b/examples/python/io/realsense_io.py index 3e64a76dd1f..fa386554034 100644 --- a/examples/python/io/realsense_io.py +++ b/examples/python/io/realsense_io.py @@ -4,20 +4,159 @@ # Copyright (c) 2018-2023 www.open3d.org # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -"""Demonstrate RealSense camera discovery and frame capture""" +"""Demonstrate RealSense camera discovery and frame capture. An RS bag file is +used if a RealSense camera is not available. Captured frames are +displayed as a live point cloud. Also frames are saved to ./capture/{color,depth} +folders. +Usage: + + - Display live point cloud from RS camera: + python realsense_io.py + + - Display live point cloud from RS bag file: + python realsense_io.py rgbd.bag + + If no device is detected and no bag file is supplied, uses the Open3D + example JackJackL515Bag dataset. +""" + +import sys +import os +from concurrent.futures import ThreadPoolExecutor import open3d as o3d +import open3d.t.io as io3d +from open3d.t.geometry import PointCloud +import open3d.visualization.gui as gui +import open3d.visualization.rendering as rendering -if __name__ == "__main__": +DEPTH_MAX = 3 + + +def point_cloud_video(executor, rgbd_frame, mdata, timestamp, o3dvis): + """Update window to display the next point cloud frame.""" + app = gui.Application.instance + update_flag = rendering.Scene.UPDATE_POINTS_FLAG | rendering.Scene.UPDATE_COLORS_FLAG - o3d.t.io.RealSenseSensor.list_devices() - rscam = o3d.t.io.RealSenseSensor() + executor.submit(io3d.write_image, + f"capture/color/{point_cloud_video.fid:05d}.jpg", + rgbd_frame.color) + executor.submit(io3d.write_image, + f"capture/depth/{point_cloud_video.fid:05d}.png", + rgbd_frame.depth) + print(f"Frame: {point_cloud_video.fid}, timestamp: {timestamp * 1e-6:.3f}s", + end="\r") + if point_cloud_video.fid == 0: + # Start with a dummy max sized point cloud to allocate GPU buffers + # for update_geometry() + max_pts = rgbd_frame.color.rows * rgbd_frame.color.columns + pcd = PointCloud(o3d.core.Tensor.zeros((max_pts, 3))) + pcd.paint_uniform_color([1., 1., 1.]) + app.post_to_main_thread(o3dvis, + lambda: o3dvis.add_geometry("Point Cloud", pcd)) + pcd = PointCloud.create_from_rgbd_image( + rgbd_frame, + # Intrinsic matrix: Tensor([[fx, 0., cx], [0., fy, cy], [0., 0., 1.]]) + mdata.intrinsics.intrinsic_matrix, + depth_scale=mdata.depth_scale, + depth_max=DEPTH_MAX) + # GUI operations MUST run in the main thread. + app.post_to_main_thread( + o3dvis, lambda: o3dvis.update_geometry("Point Cloud", pcd, update_flag)) + point_cloud_video.fid += 1 + + +point_cloud_video.fid = 0 + + +def pcd_video_from_camera(executor, o3dvis): + """Show point cloud video coming from a RealSense camera. Save frames to + disk in capture/{color,depth} folders. + """ + rscam = io3d.RealSenseSensor() rscam.start_capture() - print(rscam.get_metadata()) - for fid in range(5): - rgbd_frame = rscam.capture_frame() - o3d.io.write_image(f"color{fid:05d}.jpg", rgbd_frame.color.to_legacy()) - o3d.io.write_image(f"depth{fid:05d}.png", rgbd_frame.depth.to_legacy()) - print("Frame: {}, time: {}s".format(fid, rscam.get_timestamp() * 1e-6)) - - rscam.stop_capture() + mdata = rscam.get_metadata() + print(mdata) + os.makedirs("capture/color") + os.makedirs("capture/depth") + rgbd_frame_future = executor.submit(rscam.capture_frame) + + def on_window_close(): + nonlocal rscam, executor + executor.shutdown() + rscam.stop_capture() + return True # OK to close window + + o3dvis.set_on_close(on_window_close) + + while True: + rgbd_frame = rgbd_frame_future.result() + # Run each IO operation in the threadpool + rgbd_frame_future = executor.submit(rscam.capture_frame) + point_cloud_video(executor, rgbd_frame, mdata, rscam.get_timestamp(), + o3dvis) + + +def pcd_video_from_bag(rsbagfile, executor, o3dvis): + """Show point cloud video coming from a RealSense bag file. Save frames to + disk in capture/{color,depth} folders. + """ + rsbag = io3d.RSBagReader.create(rsbagfile) + if not rsbag.is_opened(): + raise RuntimeError(f"RS bag file {rsbagfile} could not be opened.") + mdata = rsbag.metadata + print(mdata) + os.makedirs("capture/color") + os.makedirs("capture/depth") + + def on_window_close(): + nonlocal rsbag, executor + executor.shutdown() + rsbag.close() + return True # OK to close window + + o3dvis.set_on_close(on_window_close) + + rgbd_frame = rsbag.next_frame() + while not rsbag.is_eof(): + # Run each IO operation in the threadpool + rgbd_frame_future = executor.submit(rsbag.next_frame) + point_cloud_video(executor, rgbd_frame, mdata, rsbag.get_timestamp(), + o3dvis) + rgbd_frame = rgbd_frame_future.result() + + +def main(): + if os.path.exists("capture"): + raise RuntimeError( + "Frames saving destination folder 'capture' already exists. Please move it." + ) + + # Initialize app and create GUI window + app = gui.Application.instance + app.initialize() + o3dvis = o3d.visualization.O3DVisualizer("Open3D: PointCloud video", 1024, + 768) + o3dvis.show_axes = True + # set view: fov, eye, lookat, up + o3dvis.setup_camera(45, [0., 0., 0.], [0., 0., -1.], [0., -1., 0.]) + app.add_window(o3dvis) + + have_cam = io3d.RealSenseSensor.list_devices() + have_bag = (len(sys.argv) == 2) + + with ThreadPoolExecutor(max_workers=4) as executor: + # Run IO and compute in threadpool + if have_bag: + executor.submit(pcd_video_from_bag, sys.argv[1], executor, o3dvis) + elif have_cam: + executor.submit(pcd_video_from_camera, executor, o3dvis) + else: + executor.submit(pcd_video_from_bag, + o3d.data.JackJackL515Bag().path, executor, o3dvis) + + app.run() # GUI runs in the main thread. + + +if __name__ == "__main__": + main() diff --git a/examples/python/pipelines/registration_ransac.py b/examples/python/pipelines/registration_ransac.py index 9fa35b1d13c..df7e73dfa51 100644 --- a/examples/python/pipelines/registration_ransac.py +++ b/examples/python/pipelines/registration_ransac.py @@ -7,9 +7,22 @@ import open3d as o3d +import numpy as np +from copy import deepcopy import argparse +def visualize_registration(src, dst, transformation=np.eye(4)): + src_trans = deepcopy(src) + src_trans.transform(transformation) + src_trans.paint_uniform_color([1, 0, 0]) + + dst_clone = deepcopy(dst) + dst_clone.paint_uniform_color([0, 1, 0]) + + o3d.visualization.draw([src_trans, dst_clone]) + + def preprocess_point_cloud(pcd, voxel_size): pcd_down = pcd.voxel_down_sample(voxel_size) pcd_down.estimate_normals( @@ -18,82 +31,119 @@ def preprocess_point_cloud(pcd, voxel_size): pcd_fpfh = o3d.pipelines.registration.compute_fpfh_feature( pcd_down, o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0, - max_nn=100)) + max_nn=100), + ) return (pcd_down, pcd_fpfh) -if __name__ == '__main__': +if __name__ == "__main__": pcd_data = o3d.data.DemoICPPointClouds() + + # yapf: disable parser = argparse.ArgumentParser( - 'Global point cloud registration example with RANSAC') - parser.add_argument('src', - type=str, - default=pcd_data.paths[0], - nargs='?', - help='path to src point cloud') - parser.add_argument('dst', - type=str, - default=pcd_data.paths[1], - nargs='?', - help='path to dst point cloud') - parser.add_argument('--voxel_size', - type=float, - default=0.05, - help='voxel size in meter used to downsample inputs') + "Global point cloud registration example with RANSAC" + ) + parser.add_argument( + "src", type=str, default=pcd_data.paths[0], nargs="?", + help="path to src point cloud", + ) + parser.add_argument( + "dst", type=str, default=pcd_data.paths[1], nargs="?", + help="path to dst point cloud", + ) + parser.add_argument( + "--voxel_size", type=float, default=0.05, + help="voxel size in meter used to downsample inputs", + ) + parser.add_argument( + "--distance_multiplier", type=float, default=1.5, + help="multipler used to compute distance threshold" + "between correspondences." + "Threshold is computed by voxel_size * distance_multiplier.", + ) parser.add_argument( - '--distance_multiplier', - type=float, - default=1.5, - help='multipler used to compute distance threshold' - 'between correspondences.' - 'Threshold is computed by voxel_size * distance_multiplier.') - parser.add_argument('--max_iterations', - type=int, - default=1000000, - help='number of max RANSAC iterations') - parser.add_argument('--confidence', - type=float, - default=0.999, - help='RANSAC confidence') + "--max_iterations", type=int, default=100000, + help="number of max RANSAC iterations", + ) parser.add_argument( - '--mutual_filter', - action='store_true', - help='whether to use mutual filter for putative correspondences') + "--confidence", type=float, default=0.999, help="RANSAC confidence" + ) + parser.add_argument( + "--mutual_filter", action="store_true", + help="whether to use mutual filter for putative correspondences", + ) + parser.add_argument( + "--method", choices=["from_features", "from_correspondences"], default="from_correspondences" + ) + # yapf: enable args = parser.parse_args() voxel_size = args.voxel_size distance_threshold = args.distance_multiplier * voxel_size - o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) - print('Reading inputs') + + print("Reading inputs") src = o3d.io.read_point_cloud(args.src) dst = o3d.io.read_point_cloud(args.dst) - print('Downsampling inputs') + print("Downsampling inputs") src_down, src_fpfh = preprocess_point_cloud(src, voxel_size) dst_down, dst_fpfh = preprocess_point_cloud(dst, voxel_size) - print('Running RANSAC') - result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching( - src_down, - dst_down, - src_fpfh, - dst_fpfh, - mutual_filter=args.mutual_filter, - max_correspondence_distance=distance_threshold, - estimation_method=o3d.pipelines.registration. - TransformationEstimationPointToPoint(False), - ransac_n=3, - checkers=[ - o3d.pipelines.registration.CorrespondenceCheckerBasedOnEdgeLength( - 0.9), - o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance( - distance_threshold) - ], - criteria=o3d.pipelines.registration.RANSACConvergenceCriteria( - args.max_iterations, args.confidence)) - - src.paint_uniform_color([1, 0, 0]) - dst.paint_uniform_color([0, 1, 0]) - o3d.visualization.draw([src.transform(result.transformation), dst]) + if args.method == "from_features": + print("Running RANSAC from features") + result = o3d.pipelines.registration.registration_ransac_based_on_feature_matching( + src_down, + dst_down, + src_fpfh, + dst_fpfh, + mutual_filter=args.mutual_filter, + max_correspondence_distance=distance_threshold, + estimation_method=o3d.pipelines.registration. + TransformationEstimationPointToPoint(False), + ransac_n=3, + checkers=[ + o3d.pipelines.registration. + CorrespondenceCheckerBasedOnEdgeLength(0.9), + o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance( + distance_threshold), + ], + criteria=o3d.pipelines.registration.RANSACConvergenceCriteria( + args.max_iterations, args.confidence), + ) + visualize_registration(src, dst, result.transformation) + + elif args.method == "from_correspondences": + print("Running RANSAC from correspondences") + # Mimic importing customized external features (e.g. learned FCGF features) in numpy + # shape: (feature_dim, num_features) + src_fpfh_np = np.asarray(src_fpfh.data).copy() + dst_fpfh_np = np.asarray(dst_fpfh.data).copy() + + src_fpfh_import = o3d.pipelines.registration.Feature() + src_fpfh_import.data = src_fpfh_np + + dst_fpfh_import = o3d.pipelines.registration.Feature() + dst_fpfh_import.data = dst_fpfh_np + + corres = o3d.pipelines.registration.correspondences_from_features( + src_fpfh_import, dst_fpfh_import, args.mutual_filter) + result = o3d.pipelines.registration.registration_ransac_based_on_correspondence( + src_down, + dst_down, + corres, + max_correspondence_distance=distance_threshold, + estimation_method=o3d.pipelines.registration. + TransformationEstimationPointToPoint(False), + ransac_n=3, + checkers=[ + o3d.pipelines.registration. + CorrespondenceCheckerBasedOnEdgeLength(0.9), + o3d.pipelines.registration.CorrespondenceCheckerBasedOnDistance( + distance_threshold), + ], + criteria=o3d.pipelines.registration.RANSACConvergenceCriteria( + args.max_iterations, args.confidence), + ) + visualize_registration(src, dst, result.transformation) diff --git a/examples/python/reconstruction_system/debug/visualize_fragments.py b/examples/python/reconstruction_system/debug/visualize_fragments.py index c969a86e074..2c05bd19142 100644 --- a/examples/python/reconstruction_system/debug/visualize_fragments.py +++ b/examples/python/reconstruction_system/debug/visualize_fragments.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/reconstruction_system/debug/visualize_fragment.py +# examples/python/reconstruction_system/debug/visualize_fragments.py import argparse import json diff --git a/examples/python/reconstruction_system/make_fragments.py b/examples/python/reconstruction_system/make_fragments.py index 138fe567aaf..dc4e59d0780 100644 --- a/examples/python/reconstruction_system/make_fragments.py +++ b/examples/python/reconstruction_system/make_fragments.py @@ -8,6 +8,7 @@ # examples/python/reconstruction_system/make_fragments.py import math +import multiprocessing import os, sys import numpy as np import open3d as o3d @@ -172,13 +173,14 @@ def run(config): math.ceil(float(n_files) / config['n_frames_per_fragment'])) if config["python_multi_threading"] is True: - from joblib import Parallel, delayed - import multiprocessing - import subprocess - MAX_THREAD = min(multiprocessing.cpu_count(), n_fragments) - Parallel(n_jobs=MAX_THREAD)(delayed(process_single_fragment)( - fragment_id, color_files, depth_files, n_files, n_fragments, config) - for fragment_id in range(n_fragments)) + max_workers = min(max(1, multiprocessing.cpu_count() - 1), n_fragments) + # Prevent over allocation of open mp threads in child processes + os.environ['OMP_NUM_THREADS'] = '1' + mp_context = multiprocessing.get_context('spawn') + with mp_context.Pool(processes=max_workers) as pool: + args = [(fragment_id, color_files, depth_files, n_files, + n_fragments, config) for fragment_id in range(n_fragments)] + pool.starmap(process_single_fragment, args) else: for fragment_id in range(n_fragments): process_single_fragment(fragment_id, color_files, depth_files, diff --git a/examples/python/reconstruction_system/opencv_pose_estimation.py b/examples/python/reconstruction_system/opencv_pose_estimation.py index fc42b22f5ad..47714065858 100644 --- a/examples/python/reconstruction_system/opencv_pose_estimation.py +++ b/examples/python/reconstruction_system/opencv_pose_estimation.py @@ -178,7 +178,7 @@ def estimate_3D_transform_RANSAC(pts_xyz_s, pts_xyz_t): if (n_inlier > max_inlier) and (np.linalg.det(R_approx) != 0.0) and \ (R_approx[0,0] > 0 and R_approx[1,1] > 0 and R_approx[2,2] > 0): Transform_good[:3, :3] = R_approx - Transform_good[:3, 3] = [t_approx[0], t_approx[1], t_approx[2]] + Transform_good[:3, 3] = t_approx.squeeze(1) max_inlier = n_inlier inlier_vec = [id_iter for diff_iter, id_iter \ in zip(diff, range(n_points)) \ diff --git a/examples/python/reconstruction_system/refine_registration.py b/examples/python/reconstruction_system/refine_registration.py index 85cb5311624..bd555b150f9 100644 --- a/examples/python/reconstruction_system/refine_registration.py +++ b/examples/python/reconstruction_system/refine_registration.py @@ -7,9 +7,12 @@ # examples/python/reconstruction_system/refine_registration.py +import multiprocessing +import os +import sys + import numpy as np import open3d as o3d -import os, sys pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(pyexample_path) @@ -163,29 +166,28 @@ def make_posegraph_for_refined_scene(ply_file_names, config): s = edge.source_node_id t = edge.target_node_id matching_results[s * n_files + t] = \ - matching_result(s, t, edge.transformation) - - if config["python_multi_threading"] == True: - from joblib import Parallel, delayed - import multiprocessing - import subprocess - MAX_THREAD = min(multiprocessing.cpu_count(), - max(len(pose_graph.edges), 1)) - results = Parallel(n_jobs=MAX_THREAD)( - delayed(register_point_cloud_pair)( - ply_file_names, matching_results[r].s, matching_results[r].t, - matching_results[r].transformation, config) - for r in matching_results) + matching_result(s, t, edge.transformation) + + if config["python_multi_threading"] is True: + os.environ['OMP_NUM_THREADS'] = '1' + max_workers = max( + 1, min(multiprocessing.cpu_count() - 1, len(pose_graph.edges))) + mp_context = multiprocessing.get_context('spawn') + with mp_context.Pool(processes=max_workers) as pool: + args = [(ply_file_names, v.s, v.t, v.transformation, config) + for k, v in matching_results.items()] + results = pool.starmap(register_point_cloud_pair, args) + for i, r in enumerate(matching_results): matching_results[r].transformation = results[i][0] matching_results[r].information = results[i][1] else: for r in matching_results: (matching_results[r].transformation, - matching_results[r].information) = \ - register_point_cloud_pair(ply_file_names, - matching_results[r].s, matching_results[r].t, - matching_results[r].transformation, config) + matching_results[r].information) = \ + register_point_cloud_pair(ply_file_names, + matching_results[r].s, matching_results[r].t, + matching_results[r].transformation, config) pose_graph_new = o3d.pipelines.registration.PoseGraph() odometry = np.identity(4) diff --git a/examples/python/reconstruction_system/register_fragments.py b/examples/python/reconstruction_system/register_fragments.py index 82e31fba9d7..f7d3c72e664 100644 --- a/examples/python/reconstruction_system/register_fragments.py +++ b/examples/python/reconstruction_system/register_fragments.py @@ -7,9 +7,12 @@ # examples/python/reconstruction_system/register_fragments.py +import multiprocessing +import os +import sys + import numpy as np import open3d as o3d -import os, sys pyexample_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(pyexample_path) @@ -158,16 +161,16 @@ def make_posegraph_for_scene(ply_file_names, config): for t in range(s + 1, n_files): matching_results[s * n_files + t] = matching_result(s, t) - if config["python_multi_threading"] == True: - from joblib import Parallel, delayed - import multiprocessing - import subprocess - MAX_THREAD = min(multiprocessing.cpu_count(), - max(len(matching_results), 1)) - results = Parallel(n_jobs=MAX_THREAD)(delayed( - register_point_cloud_pair)(ply_file_names, matching_results[r].s, - matching_results[r].t, config) - for r in matching_results) + if config["python_multi_threading"] is True: + os.environ['OMP_NUM_THREADS'] = '1' + max_workers = max( + 1, min(multiprocessing.cpu_count() - 1, len(matching_results))) + mp_context = multiprocessing.get_context('spawn') + with mp_context.Pool(processes=max_workers) as pool: + args = [(ply_file_names, v.s, v.t, config) + for k, v in matching_results.items()] + results = pool.starmap(register_point_cloud_pair, args) + for i, r in enumerate(matching_results): matching_results[r].success = results[i][0] matching_results[r].transformation = results[i][1] @@ -175,9 +178,9 @@ def make_posegraph_for_scene(ply_file_names, config): else: for r in matching_results: (matching_results[r].success, matching_results[r].transformation, - matching_results[r].information) = \ - register_point_cloud_pair(ply_file_names, - matching_results[r].s, matching_results[r].t, config) + matching_results[r].information) = \ + register_point_cloud_pair(ply_file_names, + matching_results[r].s, matching_results[r].t, config) for r in matching_results: if matching_results[r].success: diff --git a/examples/python/reconstruction_system/run_system.py b/examples/python/reconstruction_system/run_system.py index 91fd073b3c4..dedfc5552d2 100644 --- a/examples/python/reconstruction_system/run_system.py +++ b/examples/python/reconstruction_system/run_system.py @@ -86,7 +86,7 @@ initialize_config(config) check_folder_structure(config['path_dataset']) else: - # load deafult dataset. + # load default dataset. config = dataset_loader(args.default_dataset) assert config is not None diff --git a/examples/python/reconstruction_system/sensors/realsense_helper.py b/examples/python/reconstruction_system/sensors/realsense_helper.py index 65c68014553..5e37b0c50bb 100644 --- a/examples/python/reconstruction_system/sensors/realsense_helper.py +++ b/examples/python/reconstruction_system/sensors/realsense_helper.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/reconstruction_system/sensors/realsense_pcd_visualizer.py +# examples/python/reconstruction_system/sensors/realsense_helper.py # pyrealsense2 is required. # Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python diff --git a/examples/python/t_reconstruction_system/common.py b/examples/python/t_reconstruction_system/common.py index 1831545388b..d6331920d87 100644 --- a/examples/python/t_reconstruction_system/common.py +++ b/examples/python/t_reconstruction_system/common.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/reconstruction_system/common.py +# examples/python/t_reconstruction_system/common.py import open3d as o3d @@ -95,7 +95,7 @@ def get_default_dataset(config): def load_depth_file_names(config): if not os.path.exists(config.path_dataset): print( - 'Path \'{}\' not found.'.format(config.path_dataset), + f"Path '{config.path_dataset}' not found.", 'Please provide --path_dataset in the command line or the config file.' ) return [], [] @@ -106,7 +106,7 @@ def load_depth_file_names(config): depth_file_names = glob.glob(os.path.join(depth_folder, '*.png')) n_depth = len(depth_file_names) if n_depth == 0: - print('Depth image not found in {}, abort!'.format(depth_folder)) + print(f'Depth image not found in {depth_folder}, abort!') return [] return sorted(depth_file_names) diff --git a/examples/python/t_reconstruction_system/config.py b/examples/python/t_reconstruction_system/config.py index 5d87a561c9f..59b9ec40e2f 100644 --- a/examples/python/t_reconstruction_system/config.py +++ b/examples/python/t_reconstruction_system/config.py @@ -6,7 +6,9 @@ # ---------------------------------------------------------------------------- import os + import configargparse +import open3d as o3d class ConfigParser(configargparse.ArgParser): @@ -107,9 +109,8 @@ def __init__(self): integration_parser = self.add_argument_group('integration') integration_parser.add( - '--integration_mode',type=str, - choices=['color', 'depth'], - help='Volumetric integration mode.') + '--integrate_color', action='store_true', + default=False, help='Volumetric integration mode.') integration_parser.add( '--voxel_size', type=float, help='Voxel size in meter for volumetric integration.') @@ -141,7 +142,7 @@ def get_config(self): 'Fallback to hybrid odometry.') config.odometry_method = 'hybrid' - if config.engine == 'tensor': + elif config.engine == 'tensor': if config.icp_method == 'generalized': print('Tensor engine does not support generalized ICP.', 'Fallback to colored ICP.') @@ -152,6 +153,13 @@ def get_config(self): 'Disabled.') config.multiprocessing = False + if (config.device.lower().startswith('cuda') and + (not o3d.core.cuda.is_available())): + print( + 'Open3d not built with cuda support or no cuda device available. ', + 'Fallback to CPU.') + config.device = 'CPU:0' + return config diff --git a/examples/python/t_reconstruction_system/default_config.yml b/examples/python/t_reconstruction_system/default_config.yml index 36e4030efce..3e32515cbf4 100644 --- a/examples/python/t_reconstruction_system/default_config.yml +++ b/examples/python/t_reconstruction_system/default_config.yml @@ -8,7 +8,6 @@ depth_folder: depth color_folder: color path_intrinsic: '' path_color_intrinsic: '' -fragment_size: 100 depth_min: 0.1 depth_max: 3.0 depth_scale: 1000.0 @@ -21,7 +20,7 @@ icp_voxelsize: 0.05 icp_distance_thr: 0.07 global_registration_method: ransac registration_loop_weight: 0.1 -integration_mode: color +integrate_color: true voxel_size: 0.0058 trunc_voxel_multiplier: 8.0 block_count: 40000 diff --git a/examples/python/t_reconstruction_system/dense_slam.py b/examples/python/t_reconstruction_system/dense_slam.py index a2e03795af2..374b6dce91c 100644 --- a/examples/python/t_reconstruction_system/dense_slam.py +++ b/examples/python/t_reconstruction_system/dense_slam.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/t_reconstruction_system/ray_casting.py +# examples/python/t_reconstruction_system/dense_slam.py # P.S. This example is used in documentation, so, please ensure the changes are # synchronized. @@ -16,7 +16,8 @@ import time from config import ConfigParser -from common import get_default_dataset, load_rgbd_file_names, save_poses, load_intrinsic, extract_trianglemesh, get_default_testdata, extract_rgbd_frames +from common import (get_default_dataset, load_rgbd_file_names, save_poses, + load_intrinsic, extract_trianglemesh, extract_rgbd_frames) def slam(depth_file_names, color_file_names, intrinsic, config): @@ -90,7 +91,7 @@ def slam(depth_file_names, color_file_names, intrinsic, config): # Extract RGB-D frames and intrinsic from bag file. if config.path_dataset.endswith(".bag"): assert os.path.isfile( - config.path_dataset), (f"File {config.path_dataset} not found.") + config.path_dataset), f"File {config.path_dataset} not found." print("Extracting frames from RGBD video file") config.path_dataset, config.path_intrinsic, config.depth_scale = extract_rgbd_frames( config.path_dataset) diff --git a/examples/python/t_reconstruction_system/dense_slam_gui.py b/examples/python/t_reconstruction_system/dense_slam_gui.py index a4e68526754..24744b9e87d 100644 --- a/examples/python/t_reconstruction_system/dense_slam_gui.py +++ b/examples/python/t_reconstruction_system/dense_slam_gui.py @@ -17,7 +17,7 @@ from config import ConfigParser -import os, sys +import os import numpy as np import threading import time @@ -459,7 +459,7 @@ def update_main(self): # Extract RGB-D frames and intrinsic from bag file. if config.path_dataset.endswith(".bag"): assert os.path.isfile( - config.path_dataset), (f"File {config.path_dataset} not found.") + config.path_dataset), f"File {config.path_dataset} not found." print("Extracting frames from RGBD video file") config.path_dataset, config.path_intrinsic, config.depth_scale = extract_rgbd_frames( config.path_dataset) diff --git a/examples/python/t_reconstruction_system/integrate.py b/examples/python/t_reconstruction_system/integrate.py index 2c3e011cdea..3d86ec02943 100644 --- a/examples/python/t_reconstruction_system/integrate.py +++ b/examples/python/t_reconstruction_system/integrate.py @@ -10,25 +10,22 @@ # P.S. This example is used in documentation, so, please ensure the changes are # synchronized. -import os -import numpy as np +import time + import open3d as o3d import open3d.core as o3c -import time -import matplotlib.pyplot as plt from tqdm import tqdm -from config import ConfigParser from common import load_rgbd_file_names, load_depth_file_names, load_intrinsic, load_extrinsics, get_default_dataset +from config import ConfigParser def integrate(depth_file_names, color_file_names, depth_intrinsic, - color_intrinsic, extrinsics, integrate_color, config): - + color_intrinsic, extrinsics, config): n_files = len(depth_file_names) device = o3d.core.Device(config.device) - if integrate_color: + if config.integrate_color: vbg = o3d.t.geometry.VoxelBlockGrid( attr_names=('tsdf', 'weight', 'color'), attr_dtypes=(o3c.float32, o3c.float32, o3c.float32), @@ -56,7 +53,7 @@ def integrate(depth_file_names, color_file_names, depth_intrinsic, depth, depth_intrinsic, extrinsic, config.depth_scale, config.depth_max) - if integrate_color: + if config.integrate_color: color = o3d.t.io.read_image(color_file_names[i]).to(device) vbg.integrate(frustum_block_coords, depth, color, depth_intrinsic, color_intrinsic, extrinsic, config.depth_scale, @@ -83,7 +80,6 @@ def integrate(depth_file_names, color_file_names, depth_intrinsic, 'Default dataset may be selected from the following options: ' '[lounge, jack_jack]', default='lounge') - parser.add('--integrate_color', action='store_true') parser.add('--path_trajectory', help='path to the trajectory .log or .json file.') parser.add('--path_npz', diff --git a/examples/python/t_reconstruction_system/integrate_custom.py b/examples/python/t_reconstruction_system/integrate_custom.py index c6b8df3c7f4..8b352780b04 100644 --- a/examples/python/t_reconstruction_system/integrate_custom.py +++ b/examples/python/t_reconstruction_system/integrate_custom.py @@ -5,139 +5,137 @@ # SPDX-License-Identifier: MIT # ---------------------------------------------------------------------------- -# examples/python/t_reconstruction_system/ray_casting.py +# examples/python/t_reconstruction_system/integrate_custom.py # P.S. This example is used in documentation, so, please ensure the changes are # synchronized. import os -import numpy as np +import time + import open3d as o3d import open3d.core as o3c -import time -import matplotlib.pyplot as plt +from tqdm import tqdm +from common import get_default_dataset, load_rgbd_file_names, load_depth_file_names, load_intrinsic, load_extrinsics, extract_rgbd_frames from config import ConfigParser -from common import get_default_dataset, load_rgbd_file_names, load_depth_file_names, save_poses, load_intrinsic, load_extrinsics, extract_rgbd_frames - -from tqdm import tqdm def integrate(depth_file_names, color_file_names, intrinsic, extrinsics, config): if os.path.exists(config.path_npz): - print('Voxel block grid npz file {} found, trying to load...'.format( - config.path_npz)) + print( + f'Voxel block grid npz file {config.path_npz} found, trying to load...' + ) vbg = o3d.t.geometry.VoxelBlockGrid.load(config.path_npz) print('Loading finished.') + return vbg + + print( + f'Voxel block grid npz file {config.path_npz} not found, trying to integrate...' + ) + + n_files = len(depth_file_names) + device = o3d.core.Device(config.device) + + voxel_size = 3.0 / 512 + trunc = voxel_size * 4 + res = 8 + + if config.integrate_color: + vbg = o3d.t.geometry.VoxelBlockGrid( + ('tsdf', 'weight', 'color'), + (o3c.float32, o3c.float32, o3c.float32), ((1), (1), (3)), 3.0 / 512, + 8, 100000, device) else: - print('Voxel block grid npz file {} not found, trying to integrate...'. - format(config.path_npz)) + vbg = o3d.t.geometry.VoxelBlockGrid( + ('tsdf', 'weight'), (o3c.float32, o3c.float32), ((1), (1)), + 3.0 / 512, 8, 100000, device) - n_files = len(depth_file_names) - device = o3d.core.Device(config.device) + start = time.time() + for i in tqdm(range(n_files)): + depth = o3d.t.io.read_image(depth_file_names[i]).to(device) + extrinsic = extrinsics[i] - voxel_size = 3.0 / 512 - trunc = voxel_size * 4 - res = 8 + start = time.time() + # Get active frustum block coordinates from input + frustum_block_coords = vbg.compute_unique_block_coordinates( + depth, intrinsic, extrinsic, config.depth_scale, config.depth_max) + # Activate them in the underlying hash map (may have been inserted) + vbg.hashmap().activate(frustum_block_coords) - if config.integrate_color: - vbg = o3d.t.geometry.VoxelBlockGrid( - ('tsdf', 'weight', 'color'), - (o3c.float32, o3c.float32, o3c.float32), ((1), (1), (3)), - 3.0 / 512, 8, 100000, device) - else: - vbg = o3d.t.geometry.VoxelBlockGrid( - ('tsdf', 'weight'), (o3c.float32, o3c.float32), ((1), (1)), - 3.0 / 512, 8, 100000, device) + # Find buf indices in the underlying engine + buf_indices, masks = vbg.hashmap().find(frustum_block_coords) + o3d.core.cuda.synchronize() + end = time.time() + + start = time.time() + voxel_coords, voxel_indices = vbg.voxel_coordinates_and_flattened_indices( + buf_indices) + o3d.core.cuda.synchronize() + end = time.time() + # Now project them to the depth and find association + # (3, N) -> (2, N) start = time.time() - for i in tqdm(range(n_files)): - depth = o3d.t.io.read_image(depth_file_names[i]).to(device) - extrinsic = extrinsics[i] - - start = time.time() - # Get active frustum block coordinates from input - frustum_block_coords = vbg.compute_unique_block_coordinates( - depth, intrinsic, extrinsic, config.depth_scale, - config.depth_max) - # Activate them in the underlying hash map (may have been inserted) - vbg.hashmap().activate(frustum_block_coords) - - # Find buf indices in the underlying engine - buf_indices, masks = vbg.hashmap().find(frustum_block_coords) - o3d.core.cuda.synchronize() - end = time.time() - - start = time.time() - voxel_coords, voxel_indices = vbg.voxel_coordinates_and_flattened_indices( - buf_indices) - o3d.core.cuda.synchronize() - end = time.time() - - # Now project them to the depth and find association - # (3, N) -> (2, N) - start = time.time() - extrinsic_dev = extrinsic.to(device, o3c.float32) - xyz = extrinsic_dev[:3, :3] @ voxel_coords.T() + extrinsic_dev[:3, - 3:] - - intrinsic_dev = intrinsic.to(device, o3c.float32) - uvd = intrinsic_dev @ xyz - d = uvd[2] - u = (uvd[0] / d).round().to(o3c.int64) - v = (uvd[1] / d).round().to(o3c.int64) - o3d.core.cuda.synchronize() - end = time.time() - - start = time.time() - mask_proj = (d > 0) & (u >= 0) & (v >= 0) & (u < depth.columns) & ( - v < depth.rows) - - v_proj = v[mask_proj] - u_proj = u[mask_proj] - d_proj = d[mask_proj] - depth_readings = depth.as_tensor()[v_proj, u_proj, 0].to( - o3c.float32) / config.depth_scale - sdf = depth_readings - d_proj - - mask_inlier = (depth_readings > 0) \ - & (depth_readings < config.depth_max) \ - & (sdf >= -trunc) - - sdf[sdf >= trunc] = trunc - sdf = sdf / trunc - o3d.core.cuda.synchronize() - end = time.time() - - start = time.time() - weight = vbg.attribute('weight').reshape((-1, 1)) - tsdf = vbg.attribute('tsdf').reshape((-1, 1)) - - valid_voxel_indices = voxel_indices[mask_proj][mask_inlier] - w = weight[valid_voxel_indices] - wp = w + 1 - - tsdf[valid_voxel_indices] \ - = (tsdf[valid_voxel_indices] * w + - sdf[mask_inlier].reshape(w.shape)) / (wp) - if config.integrate_color: - color = o3d.t.io.read_image(color_file_names[i]).to(device) - color_readings = color.as_tensor()[v_proj, - u_proj].to(o3c.float32) - - color = vbg.attribute('color').reshape((-1, 3)) - color[valid_voxel_indices] \ - = (color[valid_voxel_indices] * w + - color_readings[mask_inlier]) / (wp) - - weight[valid_voxel_indices] = wp - o3d.core.cuda.synchronize() - end = time.time() - - print('Saving to {}...'.format(config.path_npz)) - vbg.save(config.path_npz) - print('Saving finished') + extrinsic_dev = extrinsic.to(device, o3c.float32) + xyz = extrinsic_dev[:3, :3] @ voxel_coords.T() + extrinsic_dev[:3, 3:] + + intrinsic_dev = intrinsic.to(device, o3c.float32) + uvd = intrinsic_dev @ xyz + d = uvd[2] + u = (uvd[0] / d).round().to(o3c.int64) + v = (uvd[1] / d).round().to(o3c.int64) + o3d.core.cuda.synchronize() + end = time.time() + + start = time.time() + mask_proj = (d > 0) & (u >= 0) & (v >= 0) & (u < depth.columns) & ( + v < depth.rows) + + v_proj = v[mask_proj] + u_proj = u[mask_proj] + d_proj = d[mask_proj] + depth_readings = depth.as_tensor()[v_proj, u_proj, 0].to( + o3c.float32) / config.depth_scale + sdf = depth_readings - d_proj + + mask_inlier = (depth_readings > 0) \ + & (depth_readings < config.depth_max) \ + & (sdf >= -trunc) + + sdf[sdf >= trunc] = trunc + sdf = sdf / trunc + o3d.core.cuda.synchronize() + end = time.time() + + start = time.time() + weight = vbg.attribute('weight').reshape((-1, 1)) + tsdf = vbg.attribute('tsdf').reshape((-1, 1)) + + valid_voxel_indices = voxel_indices[mask_proj][mask_inlier] + w = weight[valid_voxel_indices] + wp = w + 1 + + tsdf[valid_voxel_indices] \ + = (tsdf[valid_voxel_indices] * w + + sdf[mask_inlier].reshape(w.shape)) / (wp) + if config.integrate_color: + color = o3d.t.io.read_image(color_file_names[i]).to(device) + color_readings = color.as_tensor()[v_proj, u_proj].to(o3c.float32) + + color = vbg.attribute('color').reshape((-1, 3)) + color[valid_voxel_indices] \ + = (color[valid_voxel_indices] * w + + color_readings[mask_inlier]) / (wp) + + weight[valid_voxel_indices] = wp + o3d.core.cuda.synchronize() + end = time.time() + + print(f'Saving to {config.path_npz}...') + vbg.save(config.path_npz) + print('Saving finished') return vbg @@ -170,7 +168,7 @@ def integrate(depth_file_names, color_file_names, intrinsic, extrinsics, # Extract RGB-D frames and intrinsic from bag file. if config.path_dataset.endswith(".bag"): assert os.path.isfile( - config.path_dataset), (f"File {config.path_dataset} not found.") + config.path_dataset), f"File {config.path_dataset} not found." print("Extracting frames from RGBD video file") config.path_dataset, config.path_intrinsic, config.depth_scale = extract_rgbd_frames( config.path_dataset) diff --git a/examples/python/t_reconstruction_system/pose_graph_optim.py b/examples/python/t_reconstruction_system/pose_graph_optim.py index 151f55a941f..94abaf34711 100644 --- a/examples/python/t_reconstruction_system/pose_graph_optim.py +++ b/examples/python/t_reconstruction_system/pose_graph_optim.py @@ -57,8 +57,8 @@ def _dicts2graph(self): for (i, j) in self.dict_edges: if not (i in self.dict_nodes) or not (j in self.dict_nodes): print( - 'Edge node ({} {}) not found, abort pose graph construction' - .format(i, j)) + f'Edge node ({i} {j}) not found, abort pose graph construction' + ) trans, info, is_loop = self.dict_edges[(i, j)] ki = nodes2indices[i] kj = nodes2indices[j] diff --git a/examples/python/t_reconstruction_system/ray_casting.py b/examples/python/t_reconstruction_system/ray_casting.py index 84af9a97af1..5bae7301a30 100644 --- a/examples/python/t_reconstruction_system/ray_casting.py +++ b/examples/python/t_reconstruction_system/ray_casting.py @@ -50,7 +50,7 @@ # Extract RGB-D frames and intrinsic from bag file. if config.path_dataset.endswith(".bag"): assert os.path.isfile( - config.path_dataset), (f"File {config.path_dataset} not found.") + config.path_dataset), f"File {config.path_dataset} not found." print("Extracting frames from RGBD video file") config.path_dataset, config.path_intrinsic, config.depth_scale = extract_rgbd_frames( config.path_dataset) diff --git a/examples/python/t_reconstruction_system/run_system.py b/examples/python/t_reconstruction_system/run_system.py index 55b5dbcb160..51e94257ace 100644 --- a/examples/python/t_reconstruction_system/run_system.py +++ b/examples/python/t_reconstruction_system/run_system.py @@ -104,7 +104,6 @@ intrinsic, intrinsic, extrinsics, - integrate_color=True, config=config) pcd = vbg.extract_point_cloud() diff --git a/examples/python/visualization/draw.py b/examples/python/visualization/draw.py index 3c2de61889c..f354f55783e 100644 --- a/examples/python/visualization/draw.py +++ b/examples/python/visualization/draw.py @@ -319,6 +319,8 @@ def main(): multi_objects() actions() selections() + groups() + time_animation() if __name__ == "__main__": diff --git a/examples/python/visualization/interactive_visualization.py b/examples/python/visualization/interactive_visualization.py index 29e98dd6c08..1b3ff5ed051 100644 --- a/examples/python/visualization/interactive_visualization.py +++ b/examples/python/visualization/interactive_visualization.py @@ -37,6 +37,15 @@ def draw_registration_result(source, target, transformation): o3d.visualization.draw_geometries([source_temp, target_temp]) +def prepare_data(): + pcd_data = o3d.data.DemoICPPointClouds() + source = o3d.io.read_point_cloud(pcd_data.paths[0]) + target = o3d.io.read_point_cloud(pcd_data.paths[2]) + print("Visualization of two point clouds before manual alignment") + draw_registration_result(source, target, np.identity(4)) + return source, target + + def pick_points(pcd): print("") print( @@ -53,29 +62,15 @@ def pick_points(pcd): return vis.get_picked_points() -def demo_manual_registration(): - print("Demo for manual ICP") - pcd_data = o3d.data.DemoICPPointClouds() - source = o3d.io.read_point_cloud(pcd_data.paths[0]) - target = o3d.io.read_point_cloud(pcd_data.paths[2]) - print("Visualization of two point clouds before manual alignment") - draw_registration_result(source, target, np.identity(4)) - - # pick points from two point clouds and builds correspondences - picked_id_source = pick_points(source) - picked_id_target = pick_points(target) - assert (len(picked_id_source) >= 3 and len(picked_id_target) >= 3) - assert (len(picked_id_source) == len(picked_id_target)) - corr = np.zeros((len(picked_id_source), 2)) - corr[:, 0] = picked_id_source - corr[:, 1] = picked_id_target - +def register_via_correspondences(source, target, source_points, target_points): + corr = np.zeros((len(source_points), 2)) + corr[:, 0] = source_points + corr[:, 1] = target_points # estimate rough transformation using correspondences print("Compute a rough transform using the correspondences given by user") p2p = o3d.pipelines.registration.TransformationEstimationPointToPoint() trans_init = p2p.compute_transformation(source, target, o3d.utility.Vector2iVector(corr)) - # point-to-point ICP for refinement print("Perform point-to-point ICP refinement") threshold = 0.03 # 3cm distance threshold @@ -83,6 +78,18 @@ def demo_manual_registration(): source, target, threshold, trans_init, o3d.pipelines.registration.TransformationEstimationPointToPoint()) draw_registration_result(source, target, reg_p2p.transformation) + + +def demo_manual_registration(): + print("Demo for manual ICP") + source, target = prepare_data() + + # pick points from two point clouds and builds correspondences + source_points = pick_points(source) + target_points = pick_points(target) + assert (len(source_points) >= 3 and len(target_points) >= 3) + assert (len(source_points) == len(target_points)) + register_via_correspondences(source, target, source_points, target_points) print("") diff --git a/examples/python/visualization/mitsuba_material_estimation.py b/examples/python/visualization/mitsuba_material_estimation.py new file mode 100644 index 00000000000..79783edcf3f --- /dev/null +++ b/examples/python/visualization/mitsuba_material_estimation.py @@ -0,0 +1,236 @@ +# ---------------------------------------------------------------------------- +# - Open3D: www.open3d.org - +# ---------------------------------------------------------------------------- +# Copyright (c) 2018-2023 www.open3d.org +# SPDX-License-Identifier: MIT +# ---------------------------------------------------------------------------- + +import sys +import argparse +from pathlib import Path +import open3d as o3d +import mitsuba as mi +import drjit as dr +import numpy as np +import math + + +def make_mitsuba_scene(mesh, cam_xform, fov, width, height, principle_pts, + envmap): + # Camera transform + t_from_np = mi.ScalarTransform4f(cam_xform) + # Transform necessary to get from Open3D's environment map coordinate system + # to Mitsuba's + env_t = mi.ScalarTransform4f.rotate(axis=[0, 0, 1], + angle=90).rotate(axis=[1, 0, 0], + angle=90) + scene_dict = { + "type": "scene", + "integrator": { + 'type': 'path' + }, + "light": { + "type": "envmap", + "to_world": env_t, + "bitmap": mi.Bitmap(envmap), + }, + "sensor": { + "type": "perspective", + "fov": fov, + "to_world": t_from_np, + "principal_point_offset_x": principle_pts[0], + "principal_point_offset_y": principle_pts[1], + "thefilm": { + "type": "hdrfilm", + "width": width, + "height": height, + }, + "thesampler": { + "type": "multijitter", + "sample_count": 64, + }, + }, + "themesh": mesh, + } + + scene = mi.load_dict(scene_dict) + return scene + + +def run_estimation(mesh, cam_info, ref_image, env_width, iterations, tv_alpha): + # Make Mitsuba mesh from Open3D mesh -- conversion will attach a Mitsuba + # Principled BSDF to the mesh + mesh_opt = mesh.to_mitsuba('themesh') + + # Prepare empty environment map + empty_envmap = np.ones((int(env_width / 2), env_width, 3)) + + # Create Mitsuba scene + scene = make_mitsuba_scene(mesh_opt, cam_info[0], cam_info[1], cam_info[2], + cam_info[3], cam_info[4], empty_envmap) + + def total_variation(image, alpha): + diff1 = image[1:, :, :] - image[:-1, :, :] + diff2 = image[:, 1:, :] - image[:, :-1, :] + return alpha * (dr.sum(dr.abs(diff1)) / len(diff1) + + dr.sum(dr.abs(diff2)) / len(diff2)) + + def mse(image, ref_img): + return dr.mean(dr.sqr(image - ref_img)) + + params = mi.traverse(scene) + print(params) + + # Create a Mitsuba Optimizer and configure it to optimize albedo and + # environment maps + opt = mi.ad.Adam(lr=0.05, mask_updates=True) + opt['themesh.bsdf.base_color.data'] = params['themesh.bsdf.base_color.data'] + opt['light.data'] = params['light.data'] + params.update(opt) + + integrator = mi.load_dict({'type': 'prb'}) + for i in range(iterations): + img = mi.render(scene, params, spp=8, seed=i, integrator=integrator) + + # Compute loss + loss = mse(img, ref_image) + # Apply TV regularization if requested + if tv_alpha > 0.0: + loss = loss + total_variation(opt['themesh.bsdf.base_color.data'], + tv_alpha) + + # Backpropogate and step. Note: if we were optimizing over a larger set + # of inputs not just a single image we might want to step only every x + # number of inputs + dr.backward(loss) + opt.step() + + # Make sure albedo values stay in allowed range + opt['themesh.bsdf.base_color.data'] = dr.clamp( + opt['themesh.bsdf.base_color.data'], 0.0, 1.0) + params.update(opt) + print(f'Iteration {i} complete') + + # Done! Return the estimated maps + albedo_img = params['themesh.bsdf.base_color.data'].numpy() + envmap_img = params['light.data'].numpy() + return (albedo_img, envmap_img) + + +def load_input_mesh(model_path, tex_dim): + mesh = o3d.t.io.read_triangle_mesh(str(model_path)) + mesh.material.set_default_properties() + mesh.material.material_name = 'defaultLit' # note: ignored by Mitsuba, just used to visualize in Open3D + mesh.material.texture_maps['albedo'] = o3d.t.geometry.Image(0.5 + np.zeros( + (tex_dim, tex_dim, 3), dtype=np.float32)) + return mesh + + +def load_input_data(object, camera_pose, input_image, tex_dim): + print(f'Loading {object}...') + mesh = load_input_mesh(object, tex_dim) + + print(f'Loading camera pose from {camera_pose}...') + cam_npz = np.load(camera_pose) + img_width = cam_npz['width'].item() + img_height = cam_npz['height'].item() + cam_xform = np.linalg.inv(cam_npz['T']) + cam_xform = np.matmul( + cam_xform, + np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], + dtype=np.float32)) + fov = 2 * np.arctan(0.5 * img_width / cam_npz['K'][0, 0]) + fov = (180.0 / math.pi) * fov.item() + camera = (cam_xform, fov, img_width, img_height, (0.0, 0.0)) + + print(f'Loading reference image from {input_image}...') + ref_img = o3d.t.io.read_image(str(input_image)) + ref_img = ref_img.as_tensor()[:, :, 0:3].to(o3d.core.Dtype.Float32) / 255.0 + bmp = mi.Bitmap(ref_img.numpy()).convert(srgb_gamma=False) + ref_img = mi.TensorXf(bmp) + return (mesh, camera, ref_img) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description= + "Script that estimates texture and environment map from an input image and geometry. You can find data to test this script here: https://github.com/isl-org/open3d_downloads/releases/download/mitsuba-demos/raven_mitsuba.zip.", + formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser.add_argument( + 'object_path', + type=Path, + help= + "Path to geometry for which to estimate albedo. It is assumed that in the same directory will be an object-name.npz which contains the camera pose information and an object-name.png which is the input image" + ) + parser.add_argument('--env-width', type=int, default=1024) + parser.add_argument('--tex-width', + type=int, + default=2048, + help="The dimensions of the texture") + parser.add_argument( + '--device', + default='cuda' if o3d.core.cuda.is_available() else 'cpu', + choices=('cpu', 'cuda'), + help="Run Mitsuba on 'cuda' or 'cpu'") + parser.add_argument('--iterations', + type=int, + default=40, + help="Number of iterations") + parser.add_argument( + '--total-variation', + type=float, + default=0.01, + help="Factor to apply to total_variation loss. 0.0 disables TV") + + if len(sys.argv) < 2: + parser.print_help(sys.stderr) + sys.exit(1) + args = parser.parse_args() + print("Arguments: ", vars(args)) + + # Initialize Mitsuba + if args.device == 'cpu': + mi.set_variant('llvm_ad_rgb') + else: + mi.set_variant('cuda_ad_rgb') + + # Confirm that the 3 required inputs exist + object_path = args.object_path + object_name = object_path.stem + datadir = args.object_path.parent + camera_pose = datadir / (object_name + '.npz') + input_image = datadir / (object_name + '.png') + if not object_path.exists(): + print(f'{object_path} does not exist!') + sys.exit() + if not camera_pose.exists(): + print(f'{camera_pose} does not exist!') + sys.exit() + if not input_image.exists(): + print(f'{input_image} does not exist!') + sys.exit() + + # Load input data + mesh, cam_info, input_image = load_input_data(object_path, camera_pose, + input_image, args.tex_width) + + # Estimate albedo map + print('Running material estimation...') + albedo, envmap = run_estimation(mesh, cam_info, input_image, args.env_width, + args.iterations, args.total_variation) + + # Save maps + def save_image(img, name, output_dir): + # scale to 0-255 + texture = o3d.core.Tensor(img * 255.0).to(o3d.core.Dtype.UInt8) + texture = o3d.t.geometry.Image(texture) + o3d.t.io.write_image(str(output_dir / name), texture) + + print('Saving final results...') + save_image(albedo, 'estimated_albedo.png', datadir) + mi.Bitmap(envmap).write(str(datadir / 'predicted_envmap.exr')) + + # Visualize result with Open3D + mesh.material.texture_maps['albedo'] = o3d.t.io.read_image( + str(datadir / 'estimated_albedo.png')) + o3d.visualization.draw(mesh) diff --git a/examples/python/visualization/non_blocking_visualization.py b/examples/python/visualization/non_blocking_visualization.py index cc8bd74697c..04baaf85574 100644 --- a/examples/python/visualization/non_blocking_visualization.py +++ b/examples/python/visualization/non_blocking_visualization.py @@ -10,22 +10,27 @@ import open3d as o3d import numpy as np -if __name__ == "__main__": - o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + +def prepare_data(): pcd_data = o3d.data.DemoICPPointClouds() source_raw = o3d.io.read_point_cloud(pcd_data.paths[0]) target_raw = o3d.io.read_point_cloud(pcd_data.paths[1]) - source = source_raw.voxel_down_sample(voxel_size=0.02) target = target_raw.voxel_down_sample(voxel_size=0.02) + trans = [[0.862, 0.011, -0.507, 0.0], [-0.139, 0.967, -0.215, 0.7], [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]] source.transform(trans) - flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] source.transform(flip_transform) target.transform(flip_transform) + return source, target + +def demo_non_blocking_visualization(): + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + + source, target = prepare_data() vis = o3d.visualization.Visualizer() vis.create_window() vis.add_geometry(source) @@ -46,4 +51,9 @@ if save_image: vis.capture_screen_image("temp_%04d.jpg" % i) vis.destroy_window() + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Info) + + +if __name__ == '__main__': + demo_non_blocking_visualization() diff --git a/python/README.rst b/python/README.rst index 64c5cb00c60..f55cf004b2a 100644 --- a/python/README.rst +++ b/python/README.rst @@ -44,10 +44,10 @@ The package has been tested on: With Python versions: -* 3.7 * 3.8 * 3.9 * 3.10 +* 3.11 Resources ====================== diff --git a/python/js/amd-public-path.js b/python/js/amd-public-path.js new file mode 100644 index 00000000000..71f9372e44f --- /dev/null +++ b/python/js/amd-public-path.js @@ -0,0 +1,8 @@ +// In an AMD module, we set the public path using the magic requirejs "module" dependency +// See https://github.com/requirejs/requirejs/wiki/Differences-between-the-simplified-CommonJS-wrapper-and-standard-AMD-define#module +// Since "module" is a requirejs magic module, we must include "module" in the webpack externals configuration. +var module = require("module"); +var url = new URL(module.uri, document.location); +// Using lastIndexOf("/")+1 gives us the empty string if there is no "/", so pathname becomes "/" +url.pathname = url.pathname.slice(0, url.pathname.lastIndexOf("/") + 1); +__webpack_public_path__ = `${url.origin}${url.pathname}`; diff --git a/python/js/lib/embed.js b/python/js/lib/embed.js deleted file mode 100644 index ff852e762cf..00000000000 --- a/python/js/lib/embed.js +++ /dev/null @@ -1,9 +0,0 @@ -// Entry point for the unpkg bundle containing custom model definitions. -// -// It differs from the notebook bundle in that it does not need to define a -// dynamic baseURL for the static assets and may load some css that would -// already be loaded by the notebook otherwise. - -// Export widget models and views, and the npm package version number. -module.exports = require("./web_visualizer.js"); -module.exports["version"] = require("../package.json").version; diff --git a/python/js/lib/extension.js b/python/js/lib/extension.js index 7c220feaed0..991c9a9a596 100644 --- a/python/js/lib/extension.js +++ b/python/js/lib/extension.js @@ -1,12 +1,6 @@ // This file contains the javascript that is run when the notebook is loaded. // It contains some requirejs configuration and the `load_ipython_extension` // which is required for any notebook extension. -// -// Some static assets may be required by the custom widget javascript. The base -// url for the notebook is not known at build time and is therefore computed -// dynamically. -__webpack_public_path__ = document.querySelector('body').getAttribute('data-base-url') + 'nbextensions/open3d'; - // Configure requirejs if (window.require) { @@ -21,5 +15,5 @@ if (window.require) { // Export the required load_ipython_extension module.exports = { - load_ipython_extension: function() {} + load_ipython_extensionn() {} }; diff --git a/python/js/lib/labplugin.js b/python/js/lib/labplugin.js index 50a8fda4b40..95c8cd49ad6 100644 --- a/python/js/lib/labplugin.js +++ b/python/js/lib/labplugin.js @@ -4,13 +4,12 @@ var base = require('@jupyter-widgets/base'); module.exports = { id: 'open3d:plugin', requires: [base.IJupyterWidgetRegistry], - activate: function(app, widgets) { - widgets.registerWidget({ - name: 'open3d', - version: plugin.version, - exports: plugin - }); + activate: (app, widgets) => { + widgets.registerWidget({ + name: 'open3d', + version: plugin.version, + exports: plugin + }); }, autoStart: true }; - diff --git a/python/js/lib/web_visualizer.js b/python/js/lib/web_visualizer.js index fd68ad6c307..3ccdff725a3 100644 --- a/python/js/lib/web_visualizer.js +++ b/python/js/lib/web_visualizer.js @@ -1,27 +1,8 @@ // ---------------------------------------------------------------------------- -// - Open3D: www.open3d.org - +// - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- -// The MIT License (MIT) -// -// Copyright (c) 2018-2023 www.open3d.org -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -// IN THE SOFTWARE. +// Copyright(c) 2018-2023 www.open3d.org +// SPDX - License - Identifier: MIT // ---------------------------------------------------------------------------- // Jupyter widget for Open3D WebRTC visualizer. See web_visualizer.py for the @@ -44,49 +25,51 @@ let WebRtcStreamer = require("./webrtcstreamer"); // // When serializing the entire widget state for embedding, only values that // differ from the defaults will be specified. -let WebVisualizerModel = widgets.DOMWidgetModel.extend({ - defaults: _.extend(widgets.DOMWidgetModel.prototype.defaults(), { - _model_name: "WebVisualizerModel", - _view_name: "WebVisualizerView", - _model_module: "open3d", - _view_module: "open3d", - // @...@ is configured by cpp/pybind/make_python_package.cmake. - _model_module_version: "@PROJECT_VERSION_THREE_NUMBER@", - _view_module_version: "@PROJECT_VERSION_THREE_NUMBER@", - }), -}); +class WebVisualizerModel extends widgets.DOMWidgetModel { + defaults() { + return _.extend(widgets.DOMWidgetModel.prototype.defaults(), { + _model_name: "WebVisualizerModel", + _view_name: "WebVisualizerView", + _model_module: "open3d", + _view_module: "open3d", + // @...@ is configured by cpp/pybind/make_python_package.cmake. + _model_module_version: "@PROJECT_VERSION_THREE_NUMBER@", + _view_module_version: "@PROJECT_VERSION_THREE_NUMBER@", + }); + } +} // Custom View. Renders the widget model. -let WebVisualizerView = widgets.DOMWidgetView.extend({ - sleep: function (time_ms) { +class WebVisualizerView extends widgets.DOMWidgetView { + sleep(time_ms) { return new Promise((resolve) => setTimeout(resolve, time_ms)); - }, + } - logAndReturn: function (value) { + logAndReturn(value) { console.log("logAndReturn: ", value); return value; - }, + } - callResultReady: function (callId) { + callResultReady(callId) { let pyjs_channel = this.model.get("pyjs_channel"); console.log("Current pyjs_channel:", pyjs_channel); let callResultMap = JSON.parse(this.model.get("pyjs_channel")); return callId in callResultMap; - }, + } - extractCallResult: function (callId) { + extractCallResult(callId) { if (!this.callResultReady(callId)) { throw "extractCallResult not ready yet."; } let callResultMap = JSON.parse(this.model.get("pyjs_channel")); return callResultMap[callId]; - }, + } /** * Hard-coded to call "call_http_api". Args and return value are all * strings. */ - callPython: async function (func, args = []) { + async callPython(func, args = []) { let callId = this.callId.toString(); this.callId++; let message = { @@ -116,9 +99,9 @@ let WebVisualizerView = widgets.DOMWidgetView.extend({ json_result ); return json_result; - }, + } - commsCall: function (url, data = {}) { + commsCall(url, data = {}) { // https://stackoverflow.com/a/736970/1255535 // parseUrl(url).hostname // parseUrl(url).entryPoint @@ -180,9 +163,9 @@ let WebVisualizerView = widgets.DOMWidgetView.extend({ } else { throw "Unsupported entryPoint: " + entryPoint; } - }, + } - render: function () { + render() { let windowUID = this.model.get("window_uid"); let onClose = function () { console.log("onClose() called for window_uid:", windowUID); @@ -214,8 +197,8 @@ let WebVisualizerView = widgets.DOMWidgetView.extend({ this.commsCall.bind(this) ); this.webRtcClient.connect(windowUID); - }, -}); + } +} module.exports = { WebVisualizerModel: WebVisualizerModel, diff --git a/python/js/package.json b/python/js/package.json index 091a632aae6..73a76b3a8af 100644 --- a/python/js/package.json +++ b/python/js/package.json @@ -1,8 +1,8 @@ { "name": "open3d", "version": "@PROJECT_VERSION_THREE_NUMBER@", - "description": "Open3D: A Modern Library for 3D Data Processing", - "author": "Open3D.org", + "description": "@PROJECT_DESCRIPTION@", + "author": "@PROJECT_EMAIL@", "main": "lib/index.js", "repository": { "type": "git", @@ -36,7 +36,7 @@ "rimraf": "^2.6.1" }, "dependencies": { - "@jupyter-widgets/base": "^1.1 || ^2 || ^3 || ^4", + "@jupyter-widgets/base": "^2 || ^3 || ^4 || ^5 || ^6", "lodash": "^4.17.4", "webrtc-adapter": "^4.2.2" }, diff --git a/python/js/webpack.config.js b/python/js/webpack.config.js index 7a7d378b956..05c2bee12aa 100644 --- a/python/js/webpack.config.js +++ b/python/js/webpack.config.js @@ -16,13 +16,11 @@ module.exports = (env, argv) => { // some configuration for requirejs, and provides the legacy // "load_ipython_extension" function which is required for any notebook // extension. - // entry: "./lib/extension.js", output: { filename: "extension.js", path: path.resolve(__dirname, "..", "open3d", "nbextension"), libraryTarget: "amd", - publicPath: "", // publicPath is set in extension.js }, devtool, }, @@ -32,47 +30,43 @@ module.exports = (env, argv) => { // This bundle contains the implementation for the custom widget views and // custom widget. // It must be an amd module - // - entry: "./lib/index.js", + entry: ["./amd-public-path.js", "./lib/index.js"], output: { filename: "index.js", path: path.resolve(__dirname, "..", "open3d", "nbextension"), libraryTarget: "amd", - publicPath: "", + publicPath: "", // Set in amd-public-path.js }, devtool, module: { rules: rules, }, - externals: ["@jupyter-widgets/base"], + // "module" is the magic requirejs dependency used to set the publicPath + externals: ["@jupyter-widgets/base", "module"] }, { // Embeddable open3d bundle // - // This bundle is generally almost identical to the notebook bundle - // containing the custom widget views and models. - // - // The only difference is in the configuration of the webpack public path - // for the static assets. - // - // It will be automatically distributed by unpkg to work with the static - // widget embedder. - // - // The target bundle is always `dist/index.js`, which is the path required - // by the custom widget embedder. + // This bundle is identical to the notebook bundle containing the custom + // widget views and models. The only difference is it is placed in the + // dist/ directory and shipped with the npm package for use from a CDN + // like jsdelivr. // - entry: "./lib/embed.js", + // The target bundle is always `dist/index.js`, which is the path + // required by the custom widget embedder. + entry: ["./amd-public-path.js", "./lib/index.js"], output: { filename: "index.js", path: path.resolve(__dirname, "dist"), libraryTarget: "amd", - publicPath: "https://unpkg.com/open3d@" + version + "/dist/", + publicPath: "", // Set in amd-public-path.js }, devtool, module: { rules: rules, }, - externals: ["@jupyter-widgets/base"], + // "module" is the magic requirejs dependency used to set the publicPath + externals: ["@jupyter-widgets/base", "module"] }, ]; }; diff --git a/python/open3d/__init__.py b/python/open3d/__init__.py index 8bbf38d0e2c..6344140669f 100644 --- a/python/open3d/__init__.py +++ b/python/open3d/__init__.py @@ -79,11 +79,10 @@ def load_cdll(path): "Open3D was built with CUDA support, but no suitable CUDA " "devices found. If your system has CUDA devices, check your " "CUDA drivers and runtime.", ImportWarning) - except OSError: + except OSError as os_error: warnings.warn( - "Open3D was built with CUDA support, but CUDA libraries could " - "not be found! Check your CUDA installation. Falling back to the " - "CPU pybind library.", ImportWarning) + f'Open3D was built with CUDA support, but an error ocurred while loading the Open3D CUDA Python bindings. This is usually because the CUDA libraries could not be found. Check your CUDA installation. Falling back to the CPU pybind library. Reported error: {os_error}.', + ImportWarning) except StopIteration: warnings.warn( "Open3D was built with CUDA support, but Open3D CUDA Python " diff --git a/python/open3d/visualization/draw.py b/python/open3d/visualization/draw.py index cf1c821b415..f7764b41400 100644 --- a/python/open3d/visualization/draw.py +++ b/python/open3d/visualization/draw.py @@ -18,6 +18,8 @@ def draw(geometry=None, eye=None, up=None, field_of_view=60.0, + intrinsic_matrix=None, + extrinsic_matrix=None, bg_color=(1.0, 1.0, 1.0, 1.0), bg_image=None, ibl=None, @@ -34,6 +36,121 @@ def draw(geometry=None, on_animation_frame=None, on_animation_tick=None, non_blocking_and_return_uid=False): + """Draw 3D geometry types and 3D models. This is a high level interface to + :class:`open3d.visualization.O3DVisualizer`. + + The initial view may be specified either as a combination of (lookat, eye, + up, and field of view) or (intrinsic matrix, extrinsic matrix) pair. A + simple pinhole camera model is used. + + Args: + geometry (List[Geometry] or List[Dict]): The 3D data to be displayed can be provided in different types: + - A list of any Open3D geometry types (``PointCloud``, ``TriangleMesh``, ``LineSet`` or ``TriangleMeshModel``). + - A list of dictionaries with geometry data and additional metadata. The following keys are used: + - **name** (str): Geometry name. + - **geometry** (Geometry): Open3D geometry to be drawn. + - **material** (:class:`open3d.visualization.rendering.MaterialRecord`): PBR material for the geometry. + - **group** (str): Assign the geometry to a group. Groups are shown in the settings panel and users can take take joint actions on a group as a whole. + - **time** (float): If geometry elements are assigned times, a time bar is displayed and the elements can be animated. + - **is_visible** (bool): Show this geometry? + title (str): Window title. + width (int): Viewport width. + height (int): Viewport height. + actions (List[(str, Callable)]): A list of pairs of action names and the + corresponding functions to execute. These actions are presented as + buttons in the settings panel. Each callable receives the window + (``O3DVisualizer``) as an argument. + lookat (array of shape (3,)): Camera principal axis direction. + eye (array of shape (3,)): Camera location. + up (array of shape (3,)): Camera up direction. + field_of_view (float): Camera horizontal field of view (degrees). + intrinsic_matrix (array of shape (3,3)): Camera intrinsic matrix. + extrinsic_matrix (array of shape (4,4)): Camera extrinsic matrix (world + to camera transformation). + bg_color (array of shape (4,)): Background color float with range [0,1], + default white. + bg_image (open3d.geometry.Image): Background image. + ibl (open3d.geometry.Image): Environment map for image based lighting + (IBL). + ibl_intensity (float): IBL intensity. + show_skybox (bool): Show skybox as scene background (default False). + show_ui (bool): Show settings user interface (default False). This can + be toggled from the Actions menu. + raw_mode (bool): Use raw mode for simpler rendering of the basic + geometry (Default false). + point_size (int): 3D point size (default 3). + line_width (int): 3D line width (default 1). + animation_time_step (float): Duration in seconds for each animation + frame. + animation_duration (float): Total animation duration in seconds. + rpc_interface (bool): Start an RPC interface at http://localhost:51454 and + listen for drawing requests. The requests can be made with + :class:`open3d.visualization.ExternalVisualizer`. + on_init (Callable): Extra initialization procedure for the underlying + GUI window. The procedure receives a single argument of type + :class:`open3d.visualization.O3DVisualizer`. + on_animation_frame (Callable): Callback for each animation frame update + with signature:: + + Callback(O3DVisualizer, double time) -> None + + on_animation_tick (Callable): Callback for each animation time step with + signature:: + + Callback(O3DVisualizer, double tick_duration, double time) -> TickResult + + If the callback returns ``TickResult.REDRAW``, the scene is redrawn. + It should return ``TickResult.NOCHANGE`` if redraw is not required. + non_blocking_and_return_uid (bool): Do not block waiting for the user + to close the window. Instead return the window ID. This is useful + for embedding the visualizer and is used in the WebRTC interface and + Tensorboard plugin. + + Example: + See `examples/visualization/draw.py` for examples of advanced usage. The ``actions()`` + example from that file is shown below:: + + import open3d as o3d + import open3d.visualization as vis + + SOURCE_NAME = "Source" + RESULT_NAME = "Result (Poisson reconstruction)" + TRUTH_NAME = "Ground truth" + + bunny = o3d.data.BunnyMesh() + bunny_mesh = o3d.io.read_triangle_mesh(bunny.path) + bunny_mesh.compute_vertex_normals() + + bunny_mesh.paint_uniform_color((1, 0.75, 0)) + bunny_mesh.compute_vertex_normals() + cloud = o3d.geometry.PointCloud() + cloud.points = bunny_mesh.vertices + cloud.normals = bunny_mesh.vertex_normals + + def make_mesh(o3dvis): + mesh, _ = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson( + cloud) + mesh.paint_uniform_color((1, 1, 1)) + mesh.compute_vertex_normals() + o3dvis.add_geometry({"name": RESULT_NAME, "geometry": mesh}) + o3dvis.show_geometry(SOURCE_NAME, False) + + def toggle_result(o3dvis): + truth_vis = o3dvis.get_geometry(TRUTH_NAME).is_visible + o3dvis.show_geometry(TRUTH_NAME, not truth_vis) + o3dvis.show_geometry(RESULT_NAME, truth_vis) + + vis.draw([{ + "name": SOURCE_NAME, + "geometry": cloud + }, { + "name": TRUTH_NAME, + "geometry": bunny_mesh, + "is_visible": False + }], + actions=[("Create Mesh", make_mesh), + ("Toggle truth/result", toggle_result)]) + """ gui.Application.instance.initialize() w = O3DVisualizer(title, width, height) w.set_background(bg_color, bg_image) @@ -65,6 +182,8 @@ def add(g, n): w.reset_camera_to_default() # make sure far/near get setup nicely if lookat is not None and eye is not None and up is not None: w.setup_camera(field_of_view, lookat, eye, up) + elif intrinsic_matrix is not None and extrinsic_matrix is not None: + w.setup_camera(intrinsic_matrix, extrinsic_matrix, width, height) w.animation_time_step = animation_time_step if animation_duration is not None: diff --git a/python/open3d/visualization/tensorboard_plugin/util.py b/python/open3d/visualization/tensorboard_plugin/util.py index b057ee1a5b0..8c8a5beda34 100644 --- a/python/open3d/visualization/tensorboard_plugin/util.py +++ b/python/open3d/visualization/tensorboard_plugin/util.py @@ -72,7 +72,7 @@ def release_read(self): try: self._readers -= 1 if not self._readers: - self._read_ready.notifyAll() + self._read_ready.notify_all() finally: self._read_ready.release() diff --git a/python/pyproject.toml b/python/pyproject.toml index 25cafb616b9..3d229011876 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -1,3 +1,3 @@ [build-system] -requires = ["ipywidgets>=7.6.0", "pygments>=2.7.4", "jupyter_packaging~=0.10", "jupyterlab>=3.0.0,==3.*", "setuptools>=40.8.0", "wheel"] +requires = ["ipywidgets>=8.0.3", "pygments>=2.7.4", "jupyter_packaging~=0.12", "jupyterlab>=3.0.0,==3.*", "setuptools>=50.3.2", "wheel==0.38.4"] build-backend = "setuptools.build_meta" diff --git a/python/requirements.txt b/python/requirements.txt index 1affea6046d..8677bc49f79 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,5 +1,5 @@ numpy>=1.18.0 dash>=2.6.0 werkzeug>=2.2.3 -nbformat==5.7.0 +nbformat>=5.7.0 configargparse diff --git a/python/requirements_build.txt b/python/requirements_build.txt index 308feb49aeb..b9bb3b1f28a 100644 --- a/python/requirements_build.txt +++ b/python/requirements_build.txt @@ -1,3 +1,3 @@ -setuptools>=50.3.2 +setuptools>=67.3.2 wheel==0.38.4 yapf==0.30.0 diff --git a/python/requirements_jupyter_build.txt b/python/requirements_jupyter_build.txt index 9d43e772d4d..492acf451d4 100644 --- a/python/requirements_jupyter_build.txt +++ b/python/requirements_jupyter_build.txt @@ -1,5 +1,5 @@ pywinpty==2.0.2; sys_platform=='win32' and python_version=='3.6' ipywidgets>=8.0.4 pygments>=2.7.4 -jupyter_packaging~=0.10 +jupyter_packaging~=0.12 jupyterlab>=3.0.0,==3.* diff --git a/python/requirements_test.txt b/python/requirements_test.txt index 7eafb00b60a..4a2f418b6bb 100644 --- a/python/requirements_test.txt +++ b/python/requirements_test.txt @@ -1,6 +1,6 @@ pytest==7.1.2 pytest-randomly==3.8.0 -scipy==1.7.3 -tensorboard==2.8.0 +scipy==1.10.1 +tensorboard==2.13.0 oauthlib==3.2.2 -certifi==2022.12.7 +certifi==2023.7.22 diff --git a/python/setup.py b/python/setup.py index 5e69df23932..029c5717180 100644 --- a/python/setup.py +++ b/python/setup.py @@ -35,7 +35,7 @@ import jupyterlab # noqa # pylint: disable=unused-import except ImportError as error: print(error.__class__.__name__ + ": " + error.message) - print("Run `pip install jupyter_packaging ipywidgets jupyterlab`.") + print("Run `pip install -r requirements-jupyter-build.txt`.") here = os.path.dirname(os.path.abspath(__file__)) js_dir = os.path.join(here, "js") @@ -127,10 +127,10 @@ def finalize_options(self): "Programming Language :: C", "Programming Language :: C++", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Topic :: Education", "Topic :: Multimedia :: Graphics :: 3D Modeling", "Topic :: Multimedia :: Graphics :: 3D Rendering", @@ -157,7 +157,7 @@ def finalize_options(self): setup_args = dict( name=name, version="@PROJECT_VERSION@", - python_requires=">=3.6", + python_requires=">=3.8", include_package_data=True, install_requires=install_requires, packages=find_packages(), @@ -178,6 +178,9 @@ def finalize_options(self): description="@PROJECT_DESCRIPTION@", long_description=long_description, long_description_content_type="text/x-rst", + # Metadata below is valid but currently ignored by pip (<=v23) + obsoletes=["open3d_python"], + provides=["open3d", "open3d_cpu"], # For open3d-cpu ) setup(**setup_args) diff --git a/python/test/core/test_core.py b/python/test/core/test_core.py index b0fac5c8f0c..47200dfe338 100644 --- a/python/test/core/test_core.py +++ b/python/test/core/test_core.py @@ -61,8 +61,8 @@ def to_numpy_dtype(dtype: o3c.Dtype): o3c.uint16: np.uint16, o3c.uint32: np.uint32, o3c.uint64: np.uint64, - o3c.bool8: np.bool8, # np.bool deprecated - o3c.bool: np.bool8, # o3c.bool is an alias for o3c.bool8 + o3c.bool8: np.bool_, + o3c.bool: np.bool_, # o3c.bool is an alias for o3c.bool8 } return conversions[dtype] @@ -163,7 +163,7 @@ def test_tensor_constructor(dtype, device): np.testing.assert_equal(np_t, o3_t.cpu().numpy()) # Boolean - np_t = np.array([True, False, True], dtype=np.bool8) + np_t = np.array([True, False, True], dtype=np.bool_) o3_t = o3c.Tensor([True, False, True], o3c.bool, device) np.testing.assert_equal(np_t, o3_t.cpu().numpy()) o3_t = o3c.Tensor(np_t, o3c.bool, device) @@ -801,7 +801,7 @@ def test_setitem(device): np.testing.assert_equal(o3_t.cpu().numpy(), np_t) # Scalar boolean set item - np_t = np.eye(4, dtype=np.bool8) + np_t = np.eye(4, dtype=np.bool_) o3_t = o3c.Tensor.eye(4, dtype=o3c.bool) np_t[2, 2] = False o3_t[2, 2] = False diff --git a/python/test/ml_ops/test_cconv_python.py b/python/test/ml_ops/test_cconv_python.py index 32a30aba4e7..2461d84b114 100644 --- a/python/test/ml_ops/test_cconv_python.py +++ b/python/test/ml_ops/test_cconv_python.py @@ -28,8 +28,8 @@ def test_compare_to_conv3d(dtype, filter_size, out_channels, in_channels, with_inp_importance, with_normalization): """Compares to the 3D convolution in tensorflow""" - import tensorflow as tf - import open3d.ml.tf as ml3d + tf = pytest.importorskip('tensorflow') + ml3d = pytest.importorskip('open3d.ml.tf') np.random.seed(0) conv_attrs = { @@ -123,8 +123,8 @@ def test_cconv_gradient(filter_size, out_channels, in_channels, with_inp_importance, with_neighbors_importance, with_individual_extent, with_normalization, align_corners, coordinate_mapping, interpolation): - import tensorflow as tf - import open3d.ml.tf as ml3d + tf = pytest.importorskip('tensorflow') + ml3d = pytest.importorskip('open3d.ml.tf') dtype = np.float64 np.random.seed(0) diff --git a/python/test/ml_ops/test_fixed_radius_search.py b/python/test/ml_ops/test_fixed_radius_search.py index 71ce5e1270a..f0ccf87eada 100644 --- a/python/test/ml_ops/test_fixed_radius_search.py +++ b/python/test/ml_ops/test_fixed_radius_search.py @@ -10,8 +10,10 @@ from scipy.spatial import cKDTree import pytest import mltest -import torch -import tensorflow as tf +if o3d._build_config['BUILD_PYTORCH_OPS']: + import torch +if o3d._build_config['BUILD_TENSORFLOW_OPS']: + import tensorflow as tf # skip all tests if the ml ops were not built pytestmark = mltest.default_marks diff --git a/python/test/ml_ops/test_knn_search.py b/python/test/ml_ops/test_knn_search.py index e4bdae38827..175835f6593 100644 --- a/python/test/ml_ops/test_knn_search.py +++ b/python/test/ml_ops/test_knn_search.py @@ -10,8 +10,10 @@ from scipy.spatial import cKDTree import pytest import mltest -import torch -import tensorflow as tf +if o3d._build_config['BUILD_PYTORCH_OPS']: + import torch +if o3d._build_config['BUILD_TENSORFLOW_OPS']: + import tensorflow as tf # skip all tests if the ml ops were not built pytestmark = mltest.default_marks diff --git a/python/test/ml_ops/test_radius_search.py b/python/test/ml_ops/test_radius_search.py index 6ce8da8b478..85b2af0ad0d 100644 --- a/python/test/ml_ops/test_radius_search.py +++ b/python/test/ml_ops/test_radius_search.py @@ -10,8 +10,10 @@ from scipy.spatial import cKDTree import pytest import mltest -import torch -import tensorflow as tf +if o3d._build_config['BUILD_PYTORCH_OPS']: + import torch +if o3d._build_config['BUILD_TENSORFLOW_OPS']: + import tensorflow as tf # skip all tests if the tf ops were not built and disable warnings caused by # tensorflow diff --git a/python/test/t/geometry/test_pointcloud.py b/python/test/t/geometry/test_pointcloud.py index 87db1ff7dd4..e6c194477ca 100644 --- a/python/test/t/geometry/test_pointcloud.py +++ b/python/test/t/geometry/test_pointcloud.py @@ -161,7 +161,7 @@ def test_member_functions(device): pcd_small_down = pcd.voxel_down_sample(1) assert pcd_small_down.point.positions.allclose( - o3c.Tensor([[0, 0, 0]], dtype, device)) + o3c.Tensor([[0.375, 0.375, 0.575]], dtype, device)) def test_extrude_rotation(): diff --git a/python/test/t/geometry/test_raycasting_scene.py b/python/test/t/geometry/test_raycasting_scene.py index f89bb0b6644..3ce024a2b29 100644 --- a/python/test/t/geometry/test_raycasting_scene.py +++ b/python/test/t/geometry/test_raycasting_scene.py @@ -137,6 +137,39 @@ def test_count_lots_of_intersections(): _ = scene.count_intersections(rays) +def test_list_intersections(): + cube = o3d.t.geometry.TriangleMesh.from_legacy( + o3d.geometry.TriangleMesh.create_box()) + + scene = o3d.t.geometry.RaycastingScene() + scene.add_triangles(cube) + + rays = o3d.core.Tensor([[0.5, 0.5, -1, 0, 0, 1], [0.5, 0.5, 0.5, 0, 0, 1], + [10, 10, 10, 1, 0, 0]], + dtype=o3d.core.float32) + ans = scene.list_intersections(rays) + + np.testing.assert_allclose(ans['t_hit'].numpy(), + np.array([1.0, 2.0, 0.5]), + rtol=1e-6, + atol=1e-6) + + +# list lots of random ray intersections to test the internal batching +# we expect no errors for this test +def test_list_lots_of_intersections(): + cube = o3d.t.geometry.TriangleMesh.from_legacy( + o3d.geometry.TriangleMesh.create_box()) + + scene = o3d.t.geometry.RaycastingScene() + scene.add_triangles(cube) + + rs = np.random.RandomState(123) + rays = o3d.core.Tensor.from_numpy(rs.rand(123456, 6).astype(np.float32)) + + _ = scene.list_intersections(rays) + + def test_compute_closest_points(): vertices = o3d.core.Tensor([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=o3d.core.float32) @@ -248,7 +281,9 @@ def test_output_shapes(shape): 'primitive_ids': [], 'primitive_uvs': [2], 'primitive_normals': [3], - 'points': [3] + 'points': [3], + 'ray_ids': [], + 'ray_splits': [] } ans = scene.cast_rays(rays) @@ -267,6 +302,20 @@ def test_output_shapes(shape): ) == expected_shape, 'shape mismatch: expected {} but got {} for {}'.format( expected_shape, list(v.shape), k) + ans = scene.list_intersections(rays) + nx = np.sum(scene.count_intersections(rays).numpy()).tolist() + for k, v in ans.items(): + if k == 'ray_splits': + alt_shape = [np.prod(rays.shape[:-1]) + 1] + else: + alt_shape = [nx] + #use np.append otherwise issues if alt_shape = [0] and last_dim[k] = [] + expected_shape = np.append(alt_shape, last_dim[k]).tolist() + assert list( + v.shape + ) == expected_shape, 'shape mismatch: expected {} but got {} for {}'.format( + expected_shape, list(v.shape), k) + def test_sphere_wrong_occupancy(): # This test checks a specific scenario where the old implementation diff --git a/python/test/t/geometry/test_trianglemesh.py b/python/test/t/geometry/test_trianglemesh.py index 843184dd3e6..2a108adff56 100644 --- a/python/test/t/geometry/test_trianglemesh.py +++ b/python/test/t/geometry/test_trianglemesh.py @@ -417,3 +417,225 @@ def test_pickle(device): mesh.vertex.positions.cpu().numpy()) np.testing.assert_equal(mesh_load.triangle.indices.cpu().numpy(), mesh.triangle.indices.cpu().numpy()) + + +@pytest.mark.parametrize("device", list_devices()) +def test_select_faces_by_mask_32(device): + sphere_custom = o3d.t.geometry.TriangleMesh.create_sphere( + 1, 3, o3c.float64, o3c.int32, device) + + expected_verts = o3c.Tensor( + [[0.0, 0.0, 1.0], [0.866025, 0, 0.5], [0.433013, 0.75, 0.5], + [-0.866025, 0.0, 0.5], [-0.433013, -0.75, 0.5], [0.433013, -0.75, 0.5] + ], o3c.float64, device) + + expected_tris = o3c.Tensor([[0, 1, 2], [0, 3, 4], [0, 4, 5], [0, 5, 1]], + o3c.int32, device) + + # check indices shape mismatch + mask_2d = o3c.Tensor([[False, False], [False, False], [False, False]], + o3c.bool, device) + with pytest.raises(RuntimeError): + selected = sphere_custom.select_faces_by_mask(mask_2d) + + # check indices type mismatch + mask_float = o3c.Tensor([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], o3c.float32, device) + with pytest.raises(RuntimeError): + selected = sphere_custom.select_faces_by_mask(mask_float) + + # check the basic case + mask = o3c.Tensor([ + True, False, False, False, False, False, True, False, True, False, True, + False, False, False, False, False, False, False, False, False, False, + False, False, False + ], o3c.bool, device) + selected = sphere_custom.select_faces_by_mask(mask) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check that the original mesh is unmodified + untouched_sphere = o3d.t.geometry.TriangleMesh.create_sphere( + 1, 3, o3c.float64, o3c.int32, device) + assert sphere_custom.vertex.positions.allclose( + untouched_sphere.vertex.positions) + assert sphere_custom.triangle.indices.allclose( + untouched_sphere.triangle.indices) + + +@pytest.mark.parametrize("device", list_devices()) +def test_select_faces_by_mask_64(device): + sphere_custom = o3d.t.geometry.TriangleMesh.create_sphere( + 1, 3, o3c.float64, o3c.int64, device) + + # check indices shape mismatch + mask_2d = o3c.Tensor([[False, False], [False, False], [False, False]], + o3c.bool, device) + with pytest.raises(RuntimeError): + selected = sphere_custom.select_faces_by_mask(mask_2d) + + # check indices type mismatch + mask_float = o3c.Tensor([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ], o3c.float32, device) + with pytest.raises(RuntimeError): + selected = sphere_custom.select_faces_by_mask(mask_float) + + expected_verts = o3c.Tensor( + [[0.0, 0.0, 1.0], [0.866025, 0, 0.5], [0.433013, 0.75, 0.5], + [-0.866025, 0.0, 0.5], [-0.433013, -0.75, 0.5], [0.433013, -0.75, 0.5] + ], o3c.float64, device) + + expected_tris = o3c.Tensor([[0, 1, 2], [0, 3, 4], [0, 4, 5], [0, 5, 1]], + o3c.int64, device) + # check the basic case + mask = o3c.Tensor([ + True, False, False, False, False, False, True, False, True, False, True, + False, False, False, False, False, False, False, False, False, False, + False, False, False + ], o3c.bool, device) + + selected = sphere_custom.select_faces_by_mask(mask) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check that the original mesh is unmodified + untouched_sphere = o3d.t.geometry.TriangleMesh.create_sphere( + 1, 3, o3c.float64, o3c.int64, device) + assert sphere_custom.vertex.positions.allclose( + untouched_sphere.vertex.positions) + assert sphere_custom.triangle.indices.allclose( + untouched_sphere.triangle.indices) + + +@pytest.mark.parametrize("device", list_devices()) +def test_select_by_index_32(device): + sphere_custom = o3d.t.geometry.TriangleMesh.create_sphere( + 1, 3, o3c.float64, o3c.int32, device) + + expected_verts = o3c.Tensor( + [[0.0, 0.0, 1.0], [0.866025, 0, 0.5], [0.433013, 0.75, 0.5], + [-0.866025, 0.0, 0.5], [-0.433013, -0.75, 0.5], [0.433013, -0.75, 0.5] + ], o3c.float64, device) + + expected_tris = o3c.Tensor([[0, 1, 2], [0, 3, 4], [0, 4, 5], [0, 5, 1]], + o3c.int32, device) + + # check indices shape mismatch + indices_2d = o3c.Tensor([[0, 2], [3, 5], [6, 7]], o3c.int32, device) + with pytest.raises(RuntimeError): + selected = sphere_custom.select_by_index(indices_2d) + + # check indices type mismatch + indices_float = o3c.Tensor([2.0, 4.0], o3c.float32, device) + with pytest.raises(RuntimeError): + selected = sphere_custom.select_by_index(indices_float) + + # check the expected mesh with int8 input + indices_8 = o3c.Tensor([0, 2, 3, 5, 6, 7], o3c.int8, device) + selected = sphere_custom.select_by_index(indices_8) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check the expected mesh with int16 input + indices_16 = o3c.Tensor([2, 0, 5, 3, 7, 6], o3c.int16, device) + selected = sphere_custom.select_by_index(indices_16) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check the expected mesh with uint32 input + indices_u32 = o3c.Tensor([7, 6, 5, 3, 2, 0], o3c.uint32, device) + selected = sphere_custom.select_by_index(indices_u32) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check the expected mesh with uint64 input and unsorted indices + indices_u64 = o3c.Tensor([7, 6, 3, 5, 0, 2], o3c.uint64, device) + selected = sphere_custom.select_by_index(indices_u64) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check that an index exceeding the max vertex index of the mesh is ignored + selected = sphere_custom.select_by_index([0, 2, 3, 5, 6, 99, 7]) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check that a negative index is ignored + selected = sphere_custom.select_by_index([0, 2, 3, 5, -10, 6, 7]) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check that the original mesh is unmodified + untouched_sphere = o3d.t.geometry.TriangleMesh.create_sphere( + 1, 3, o3c.float64, o3c.int32, device) + assert sphere_custom.vertex.positions.allclose( + untouched_sphere.vertex.positions) + assert sphere_custom.triangle.indices.allclose( + untouched_sphere.triangle.indices) + + +@pytest.mark.parametrize("device", list_devices()) +def test_select_by_index_64(device): + sphere_custom = o3d.t.geometry.TriangleMesh.create_sphere( + 1, 3, o3c.float64, o3c.int64, device) + + # check indices shape mismatch + with pytest.raises(RuntimeError): + indices_2d = o3c.Tensor([[0, 2], [3, 5], [6, 7]], o3c.int64, device) + selected = sphere_custom.select_by_index(indices_2d) + + # check indices type mismatch + with pytest.raises(RuntimeError): + indices_float = o3c.Tensor([2.0, 4.0], o3c.float64, device) + selected = sphere_custom.select_by_index(indices_float) + + expected_verts = o3c.Tensor( + [[0.0, 0.0, 1.0], [0.866025, 0, 0.5], [0.433013, 0.75, 0.5], + [-0.866025, 0.0, 0.5], [-0.433013, -0.75, 0.5], [0.433013, -0.75, 0.5] + ], o3c.float64, device) + + expected_tris = o3c.Tensor([[0, 1, 2], [0, 3, 4], [0, 4, 5], [0, 5, 1]], + o3c.int64, device) + + # check the expected mesh with int8 input + indices_u8 = o3c.Tensor([0, 2, 3, 5, 6, 7], o3c.uint8, device) + selected = sphere_custom.select_by_index(indices_u8) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check the expected mesh with int16 input + indices_u16 = o3c.Tensor([2, 0, 5, 3, 7, 6], o3c.uint16, device) + selected = sphere_custom.select_by_index(indices_u16) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check the expected mesh with int32 input + indices_32 = o3c.Tensor([7, 6, 5, 3, 2, 0], o3c.int32, device) + selected = sphere_custom.select_by_index(indices_32) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check the expected mesh with int64 input and unsorted indices + indices_64 = o3c.Tensor([7, 6, 3, 5, 0, 2], o3c.int64, device) + selected = sphere_custom.select_by_index(indices_64) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check that an index exceeding the max vertex index of the mesh is ignored + selected = sphere_custom.select_by_index([0, 2, 3, 5, 6, 99, 7]) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check that a negative index is ignored + selected = sphere_custom.select_by_index([0, 2, 3, 5, -10, 6, 7]) + assert selected.vertex.positions.allclose(expected_verts) + assert selected.triangle.indices.allclose(expected_tris) + + # check that the original mesh is unmodified + untouched_sphere = o3d.t.geometry.TriangleMesh.create_sphere( + 1, 3, o3c.float64, o3c.int64, device) + assert sphere_custom.vertex.positions.allclose( + untouched_sphere.vertex.positions) + assert sphere_custom.triangle.indices.allclose( + untouched_sphere.triangle.indices) diff --git a/util/check_style.py b/util/check_style.py index 97b7c1ba58c..be4d60c6f62 100644 --- a/util/check_style.py +++ b/util/check_style.py @@ -78,7 +78,7 @@ def _check_style(file_path, clang_format_bin): """ Returns (true, true) if (style, header) is valid. """ - with open(file_path, 'r') as f: + with open(file_path, 'r', encoding='utf-8') as f: is_valid_header = f.read().startswith(CppFormatter.standard_header) cmd = [ @@ -156,7 +156,7 @@ def _check_style(file_path, style_config): Returns (true, true) if (style, header) is valid. """ - with open(file_path, 'r') as f: + with open(file_path, 'r', encoding='utf-8') as f: content = f.read() is_valid_header = (len(content) == 0 or content.startswith( PythonFormatter.standard_header)) @@ -218,7 +218,7 @@ def _check_or_apply_style(file_path, style_config, apply): are merged into one. """ # Ref: https://gist.github.com/oskopek/496c0d96c79fb6a13692657b39d7c709 - with open(file_path, "r") as f: + with open(file_path, "r", encoding='utf-8') as f: notebook = nbformat.read(f, as_version=nbformat.NO_CONVERT) nbformat.validate(notebook) @@ -241,7 +241,7 @@ def _check_or_apply_style(file_path, style_config, apply): changed = True if apply: - with open(file_path, "w") as f: + with open(file_path, "w", encoding='utf-8') as f: nbformat.write(notebook, f, version=nbformat.NO_CONVERT) return not changed diff --git a/util/ci_utils.sh b/util/ci_utils.sh index 887e507fff5..e490b0d2282 100644 --- a/util/ci_utils.sh +++ b/util/ci_utils.sh @@ -23,40 +23,20 @@ BUILD_PYTORCH_OPS=${BUILD_PYTORCH_OPS:-ON} LOW_MEM_USAGE=${LOW_MEM_USAGE:-OFF} # Dependency versions: -# CUDA -if [[ $BUILD_TENSORFLOW_OPS == ON || $BUILD_PYTORCH_OPS == ON || - $UBUNTU_VERSION != bionic ]]; then - # CUDA version in sync with PyTorch and Tensorflow - CUDA_VERSION=("11-6" "11.6") - CUDNN_MAJOR_VERSION=8 - CUDNN_VERSION="8.4.1.50_cuda11.6" - GCC_MAX_VER=9 -else - # Without MLOps, ensure Open3D works with the lowest supported CUDA version - # Not available in Nvidia focal repos - CUDA_VERSION=("10-1" "10.1") - CUDNN_MAJOR_VERSION=8 - CUDNN_VERSION="8.0.5.39-1+cuda10.1" - GCC_MAX_VER=7 -fi +# CUDA: see docker/docker_build.sh # ML -TENSORFLOW_VER="2.8.4" -TENSORBOARD_VER="2.8.0" -TORCH_CPU_GLNX_VER="1.13.1+cpu" -TORCH_CUDA_GLNX_VER="1.13.1+cu116" -PYTHON_VER=$(python -c 'import sys; ver=f"{sys.version_info.major}{sys.version_info.minor}"; print(f"cp{ver}-cp{ver}{sys.abiflags}")' 2>/dev/null || true) -# TORCH_CUDA_GLNX_URL="https://github.com/isl-org/open3d_downloads/releases/download/torch1.8.2/torch-1.8.2-${PYTHON_VER}-linux_x86_64.whl" -TORCH_MACOS_VER="1.13.1" +TENSORFLOW_VER="2.13.0" +TORCH_VER="2.0.1" +TORCH_CPU_GLNX_VER="${TORCH_VER}+cpu" +TORCH_CUDA_GLNX_VER="${TORCH_VER}+cu117" # match CUDA_VERSION in docker/docker_build.sh +TORCH_MACOS_VER="${TORCH_VER}" TORCH_REPO_URL="https://download.pytorch.org/whl/torch/" # Python -PIP_VER="21.1.1" +PIP_VER="23.2.1" WHEEL_VER="0.38.4" STOOLS_VER="67.3.2" -PYTEST_VER="7.1.2" -PYTEST_RANDOMLY_VER="3.8.0" -SCIPY_VER="1.7.3" YAPF_VER="0.30.0" -PROTOBUF_VER="3.19.0" +PROTOBUF_VER="4.24.0" OPEN3D_INSTALL_DIR=~/open3d_install OPEN3D_SOURCE_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null 2>&1 && pwd)" @@ -68,16 +48,15 @@ install_python_dependencies() { python -m pip install --upgrade pip=="$PIP_VER" wheel=="$WHEEL_VER" \ setuptools=="$STOOLS_VER" if [[ "with-unit-test" =~ ^($options)$ ]]; then - python -m pip install -U scipy=="$SCIPY_VER" pytest=="$PYTEST_VER" \ - pytest-randomly=="$PYTEST_RANDOMLY_VER" + python -m pip install -U -r python/requirements_test.txt fi if [[ "with-cuda" =~ ^($options)$ ]]; then - TF_ARCH_NAME=tensorflow-gpu + TF_ARCH_NAME=tensorflow TF_ARCH_DISABLE_NAME=tensorflow-cpu TORCH_GLNX="torch==$TORCH_CUDA_GLNX_VER" else TF_ARCH_NAME=tensorflow-cpu - TF_ARCH_DISABLE_NAME=tensorflow-gpu + TF_ARCH_DISABLE_NAME=tensorflow TORCH_GLNX="torch==$TORCH_CPU_GLNX_VER" fi @@ -91,14 +70,14 @@ install_python_dependencies() { if [ "$BUILD_TENSORFLOW_OPS" == "ON" ]; then # TF happily installs both CPU and GPU versions at the same time, so remove the other python -m pip uninstall --yes "$TF_ARCH_DISABLE_NAME" - python -m pip install -U "$TF_ARCH_NAME"=="$TENSORFLOW_VER" + python -m pip install -U "$TF_ARCH_NAME"=="$TENSORFLOW_VER" # ML/requirements-tensorflow.txt fi - if [ "$BUILD_PYTORCH_OPS" == "ON" ]; then + if [ "$BUILD_PYTORCH_OPS" == "ON" ]; then # ML/requirements-torch.txt if [[ "$OSTYPE" == "linux-gnu"* ]]; then - python -m pip install -U "${TORCH_GLNX}" -f "$TORCH_REPO_URL" + python -m pip install -U "${TORCH_GLNX}" -f "$TORCH_REPO_URL" tensorboard elif [[ "$OSTYPE" == "darwin"* ]]; then - python -m pip install -U torch=="$TORCH_MACOS_VER" -f "$TORCH_REPO_URL" + python -m pip install -U torch=="$TORCH_MACOS_VER" -f "$TORCH_REPO_URL" tensorboard else echo "unknown OS $OSTYPE" exit 1 @@ -124,20 +103,17 @@ build_all() { mkdir -p build cd build - GLIBCXX_USE_CXX11_ABI=ON - if [ "$BUILD_PYTORCH_OPS" == ON ] || [ "$BUILD_TENSORFLOW_OPS" == ON ]; then - GLIBCXX_USE_CXX11_ABI=OFF - fi cmakeOptions=( - -DDEVELOPER_BUILD=$DEVELOPER_BUILD + -DDEVELOPER_BUILD="$DEVELOPER_BUILD" -DBUILD_SHARED_LIBS="$BUILD_SHARED_LIBS" -DCMAKE_BUILD_TYPE=Release -DBUILD_LIBREALSENSE=ON -DBUILD_CUDA_MODULE="$BUILD_CUDA_MODULE" -DBUILD_COMMON_CUDA_ARCHS=ON -DBUILD_COMMON_ISPC_ISAS=ON - -DGLIBCXX_USE_CXX11_ABI="$GLIBCXX_USE_CXX11_ABI" + # TODO: PyTorch still use old CXX ABI, remove this line when PyTorch is updated + -DGLIBCXX_USE_CXX11_ABI=OFF -DBUILD_TENSORFLOW_OPS="$BUILD_TENSORFLOW_OPS" -DBUILD_PYTORCH_OPS="$BUILD_PYTORCH_OPS" -DCMAKE_INSTALL_PREFIX="$OPEN3D_INSTALL_DIR" @@ -192,6 +168,13 @@ build_pip_package() { echo "Jupyter extension disabled in Python wheel." BUILD_JUPYTER_EXTENSION=OFF fi + CXX11_ABI=ON + if [ "$BUILD_TENSORFLOW_OPS" == "ON" ]; then + CXX11_ABI=$(python -c "import tensorflow as tf; print('ON' if tf.__cxx11_abi_flag__ else 'OFF')") + elif [ "$BUILD_PYTORCH_OPS" == "ON" ]; then + CXX11_ABI=$(python -c "import torch; print('ON' if torch._C._GLIBCXX_USE_CXX11_ABI else 'OFF')") + fi + echo Building with GLIBCXX_USE_CXX11_ABI="$CXX11_ABI" set -u echo @@ -202,10 +185,10 @@ build_pip_package() { "-DDEVELOPER_BUILD=$DEVELOPER_BUILD" "-DBUILD_COMMON_ISPC_ISAS=ON" "-DBUILD_AZURE_KINECT=$BUILD_AZURE_KINECT" - "-DBUILD_LIBREALSENSE=ON" - "-DGLIBCXX_USE_CXX11_ABI=OFF" - "-DBUILD_TENSORFLOW_OPS=ON" - "-DBUILD_PYTORCH_OPS=ON" + "-DBUILD_LIBREALSENSE=OFF" + "-DGLIBCXX_USE_CXX11_ABI=$CXX11_ABI" + "-DBUILD_TENSORFLOW_OPS=$BUILD_TENSORFLOW_OPS" + "-DBUILD_PYTORCH_OPS=$BUILD_PYTORCH_OPS" "-DBUILD_FILAMENT_FROM_SOURCE=$BUILD_FILAMENT_FROM_SOURCE" "-DBUILD_JUPYTER_EXTENSION=$BUILD_JUPYTER_EXTENSION" "-DCMAKE_INSTALL_PREFIX=$OPEN3D_INSTALL_DIR" @@ -218,7 +201,6 @@ build_pip_package() { cmake -DBUILD_CUDA_MODULE=OFF "${cmakeOptions[@]}" .. set +x # Echo commands off echo - make VERBOSE=1 -j"$NPROC" pybind open3d_tf_ops open3d_torch_ops echo "Packaging Open3D CPU pip package..." make VERBOSE=1 -j"$NPROC" pip-package @@ -244,8 +226,8 @@ build_pip_package() { echo "Packaging Open3D full pip package..." make VERBOSE=1 -j"$NPROC" pip-package - mv open3d*.whl lib/python_package/pip_package/ # restore CPU wheel - popd # PWD=Open3D + mv open3d*.whl lib/python_package/pip_package/ # restore CPU wheel + popd # PWD=Open3D } # Test wheel in blank virtual environment @@ -303,13 +285,10 @@ test_wheel() { run_python_tests() { # shellcheck disable=SC1091 source open3d_test.venv/bin/activate - python -m pip install -U pytest=="$PYTEST_VER" \ - pytest-randomly=="$PYTEST_RANDOMLY_VER" \ - scipy=="$SCIPY_VER" \ - tensorboard=="$TENSORBOARD_VER" + python -m pip install -U -r python/requirements_test.txt echo Add --randomly-seed=SEED to the test command to reproduce test order. pytest_args=("$OPEN3D_SOURCE_ROOT"/python/test/) - if [ "$BUILD_PYTORCH_OPS" == "OFF" ] || [ "$BUILD_TENSORFLOW_OPS" == "OFF" ]; then + if [ "$BUILD_PYTORCH_OPS" == "OFF" ] && [ "$BUILD_TENSORFLOW_OPS" == "OFF" ]; then echo Testing ML Ops disabled pytest_args+=(--ignore "$OPEN3D_SOURCE_ROOT"/python/test/ml_ops/) fi @@ -422,7 +401,8 @@ build_docs() { "-DBUILD_AZURE_KINECT=ON" "-DBUILD_LIBREALSENSE=ON" "-DGLIBCXX_USE_CXX11_ABI=OFF" - "-DBUILD_TENSORFLOW_OPS=ON" + # TODO: PyTorch still use old CXX ABI, re-enable Tensorflow when PyTorch is updated to use new ABI + "-DBUILD_TENSORFLOW_OPS=OFF" "-DBUILD_PYTORCH_OPS=ON" "-DBUILD_EXAMPLES=OFF" ) @@ -434,12 +414,13 @@ build_docs() { -DBUILD_WEBRTC=OFF \ -DBUILD_JUPYTER_EXTENSION=OFF \ .. - make install-pip-package -j$NPROC + make python-package -j$NPROC make -j$NPROC bin/GLInfo + export PYTHONPATH="${PYTHONPATH:+${PYTHONPATH}:}:$PWD/lib/python_package" python -c "from open3d import *; import open3d; print(open3d)" cd ../docs # To Open3D/docs - python make_docs.py $DOC_ARGS --clean_notebooks --execute_notebooks=always --py_api_rst=never + python make_docs.py $DOC_ARGS --clean_notebooks --execute_notebooks=always --py_api_rst=never --py_example_rst=never python -m pip uninstall --yes open3d cd ../build set +x # Echo commands off @@ -454,20 +435,23 @@ build_docs() { -DBUILD_WEBRTC=ON \ -DBUILD_JUPYTER_EXTENSION=OFF \ .. - make install-pip-package -j$NPROC + make python-package -j$NPROC make -j$NPROC bin/GLInfo || echo "Expect failure since HEADLESS_RENDERING=OFF" python -c "from open3d import *; import open3d; print(open3d)" cd ../docs # To Open3D/docs - python make_docs.py $DOC_ARGS --py_api_rst=always --execute_notebooks=never --sphinx --doxygen + python make_docs.py $DOC_ARGS --py_api_rst=always --py_example_rst=always --execute_notebooks=never --sphinx --doxygen set +x # Echo commands off } maximize_ubuntu_github_actions_build_space() { - df -h - $SUDO rm -rf /usr/share/dotnet - $SUDO rm -rf /usr/local/lib/android - $SUDO rm -rf /opt/ghc + # https://github.com/easimon/maximize-build-space/blob/master/action.yml + df -h . # => 26GB + $SUDO rm -rf /usr/share/dotnet # ~17GB + $SUDO rm -rf /usr/local/lib/android # ~11GB + $SUDO rm -rf /opt/ghc # ~2.7GB + $SUDO rm -rf /opt/hostedtoolcache/CodeQL # ~5.4GB + $SUDO docker image prune --all --force # ~4.5GB $SUDO rm -rf "$AGENT_TOOLSDIRECTORY" - df -h + df -h . # => 53GB }