From d50699b16ca726bb366ad65eea0c2a8c52f02723 Mon Sep 17 00:00:00 2001 From: Yixing Lao Date: Thu, 23 May 2019 12:55:41 -0700 Subject: [PATCH] Avoid import star (from open3d import *) (#982) * import open3d as o3d * rename python namespace * format python code with yapf * add python style * add init.py * fix import * * update all line numbers * address review comments * check-style and apply-style for python --- .style.yapf | 2 + .travis.yml | 1 + .../Advanced/color_map_optimization.rst | 12 +- .../colored_pointcloud_registration.rst | 6 +- .../Advanced/customized_visualization.rst | 24 +-- .../Advanced/fast_global_registration.rst | 12 +- .../tutorial/Advanced/global_registration.rst | 16 +- .../Advanced/interactive_visualization.rst | 16 +- .../Advanced/multiway_registration.rst | 14 +- .../Advanced/non_blocking_visualization.rst | 10 +- .../Advanced/pointcloud_outlier_removal.rst | 22 +- docs/tutorial/Advanced/rgbd_integration.rst | 6 +- docs/tutorial/Basic/icp_registration.rst | 14 +- docs/tutorial/Basic/kdtree.rst | 18 +- docs/tutorial/Basic/mesh.rst | 2 +- docs/tutorial/Basic/pointcloud.rst | 18 +- docs/tutorial/Basic/python_interface.rst | 8 +- docs/tutorial/Basic/rgbd_images/redwood.rst | 8 +- docs/tutorial/Basic/visualization.rst | 10 +- .../ReconstructionSystem/integrate_scene.rst | 2 +- .../ReconstructionSystem/make_fragments.rst | 20 +- .../refine_registration.rst | 16 +- .../register_fragments.rst | 24 +-- examples/Python/Advanced/__init__.py | 0 examples/Python/Advanced/camera_trajectory.py | 37 ++-- .../Python/Advanced/color_map_optimization.py | 57 +++--- .../colored_pointcloud_registration.py | 56 +++--- .../Advanced/customized_visualization.py | 50 +++-- .../Python/Advanced/downsampling_and_trace.py | 19 +- .../Advanced/fast_global_registration.py | 28 +-- .../Python/Advanced/global_registration.py | 67 ++++--- .../Python/Advanced/headless_rendering.py | 22 +- .../Advanced/interactive_visualization.py | 50 +++-- .../Python/Advanced/load_save_viewpoint.py | 14 +- .../Python/Advanced/multiway_registration.py | 95 +++++---- .../Advanced/non_blocking_visualization.py | 32 ++- .../Advanced/pointcloud_outlier_removal.py | 31 +-- examples/Python/Advanced/remove_geometry.py | 12 +- examples/Python/Advanced/rgbd_integration.py | 28 ++- examples/Python/Advanced/trajectory_io.py | 16 +- examples/Python/Basic/__init__.py | 0 examples/Python/Basic/convex_hull.py | 54 ++--- examples/Python/Basic/file_io.py | 14 +- examples/Python/Basic/half_edge_mesh.py | 10 +- examples/Python/Basic/icp_registration.py | 32 +-- examples/Python/Basic/kdtree.py | 9 +- examples/Python/Basic/mesh.py | 26 +-- examples/Python/Basic/mesh_filter.py | 24 ++- examples/Python/Basic/mesh_properties.py | 136 +++++++------ examples/Python/Basic/mesh_sampling.py | 73 ++++--- examples/Python/Basic/mesh_simplification.py | 91 +++++---- examples/Python/Basic/mesh_subdivision.py | 80 ++++---- examples/Python/Basic/mesh_voxelization.py | 88 ++++---- examples/Python/Basic/pointcloud.py | 29 +-- examples/Python/Basic/python_binding.py | 15 +- examples/Python/Basic/rgbd_nyu.py | 26 ++- examples/Python/Basic/rgbd_odometry.py | 56 +++--- examples/Python/Basic/rgbd_redwood.py | 21 +- examples/Python/Basic/rgbd_sun.py | 20 +- examples/Python/Basic/rgbd_tum.py | 20 +- examples/Python/Basic/transformation.py | 37 ++-- examples/Python/Basic/visualization.py | 44 ++-- examples/Python/Basic/working_with_numpy.py | 36 ++-- examples/Python/Benchmark/__init__.py | 0 examples/Python/Benchmark/benchmark_fgr.py | 27 +-- examples/Python/Benchmark/benchmark_pre.py | 12 +- examples/Python/Benchmark/benchmark_ransac.py | 25 +-- examples/Python/Misc/__init__.py | 0 examples/Python/Misc/color_image.py | 47 +++-- .../Python/Misc/evaluate_geometric_feature.py | 28 ++- examples/Python/Misc/feature.py | 30 +-- .../Python/Misc/pose_graph_optimization.py | 40 ++-- examples/Python/Misc/sampling.py | 32 +-- examples/Python/Misc/vector.py | 37 ++-- .../Python/ReconstructionSystem/__init__.py | 0 .../ReconstructionSystem/debug/__init__.py | 0 .../debug/pairwise_pc_alignment.py | 10 +- .../debug/pairwise_rgbd_alignment.py | 41 ++-- .../debug/visualize_alignment.py | 77 +++---- .../debug/visualize_fragments.py | 31 +-- .../debug/visualize_pointcloud.py | 58 +++--- .../ReconstructionSystem/initialize_config.py | 14 +- .../ReconstructionSystem/integrate_scene.py | 43 ++-- .../ReconstructionSystem/make_fragments.py | 189 ++++++++++-------- .../opencv_pose_estimation.py | 147 +++++++------- .../optimize_posegraph.py | 48 ++--- .../refine_registration.py | 146 ++++++++------ .../register_fragments.py | 150 +++++++------- .../Python/ReconstructionSystem/run_system.py | 36 ++-- .../ReconstructionSystem/scripts/__init__.py | 0 .../scripts/synchronize_frames.py | 40 ++-- .../ReconstructionSystem/sensors/__init__.py | 0 .../sensors/realsense_pcd_visualizer.py | 44 ++-- .../sensors/realsense_recorder.py | 58 +++--- examples/Python/Utility/__init__.py | 0 examples/Python/Utility/downloader.py | 3 +- examples/Python/Utility/file.py | 7 +- examples/Python/Utility/opencv.py | 1 + examples/Python/Utility/visualization.py | 9 +- examples/Python/__init__.py | 0 src/UnitTest/Python/test_octree.py | 64 +++--- src/UnitTest/Python/test_open3d_eigen.py | 172 +++++++++------- util/scripts/apply-style.cmake | 48 ++++- util/scripts/check-style.cmake | 59 +++++- 104 files changed, 2016 insertions(+), 1593 deletions(-) create mode 100644 .style.yapf create mode 100644 examples/Python/Advanced/__init__.py create mode 100644 examples/Python/Basic/__init__.py create mode 100644 examples/Python/Benchmark/__init__.py create mode 100644 examples/Python/Misc/__init__.py create mode 100644 examples/Python/ReconstructionSystem/__init__.py create mode 100644 examples/Python/ReconstructionSystem/debug/__init__.py create mode 100644 examples/Python/ReconstructionSystem/scripts/__init__.py create mode 100644 examples/Python/ReconstructionSystem/sensors/__init__.py create mode 100644 examples/Python/Utility/__init__.py create mode 100644 examples/Python/__init__.py diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 00000000000..0e9640c29a2 --- /dev/null +++ b/.style.yapf @@ -0,0 +1,2 @@ +[style] +based_on_style = google diff --git a/.travis.yml b/.travis.yml index 84cd77b9dfa..4a72532b5a7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,6 +32,7 @@ matrix: - mkdir build - cd build - cmake -DPYTHON_EXECUTABLE=`which python` .. + - pip install -U yapf - make check-style # Build docs only diff --git a/docs/tutorial/Advanced/color_map_optimization.rst b/docs/tutorial/Advanced/color_map_optimization.rst index ae3be6fcbf9..620a5a62144 100644 --- a/docs/tutorial/Advanced/color_map_optimization.rst +++ b/docs/tutorial/Advanced/color_map_optimization.rst @@ -18,7 +18,7 @@ Input .. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py :language: python :lineno-start: 19 - :lines: 19-35 + :lines: 19-38 :linenos: This script reads color and depth image pairs and makes ``rgbd_image``. Note that ``convert_rgb_to_intensity`` flag is ``False``. This is to preserve 8-bit color channels instead of using single channel float type image. @@ -27,16 +27,16 @@ It is always good practice to visualize RGBD image before applying it to color m .. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py :language: python - :lineno-start: 37 - :lines: 37-39 + :lineno-start: 40 + :lines: 40-44 :linenos: The script reads camera trajectory and mesh. .. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py :language: python - :lineno-start: 43 - :lines: 43-48 + :lineno-start: 46 + :lines: 46-53 :linenos: To visualize how the camera poses are not good for color mapping, this script intentionally set the iteration number as 0, which means no optimization. ``color_map_optimization`` paints a mesh using corresponding RGBD images and camera poses. Without optimization, the texture map is blurred. @@ -55,7 +55,7 @@ The next step is to optimize camera poses to get a sharp color map. .. literalinclude:: ../../../examples/Python/Advanced/color_map_optimization.py :language: python :lineno-start: 55 - :lines: 55-60 + :lines: 55-65 :linenos: The script sets ``maximum_iteration = 300`` for actual iterations. The optimization displays the following energy profile. diff --git a/docs/tutorial/Advanced/colored_pointcloud_registration.rst b/docs/tutorial/Advanced/colored_pointcloud_registration.rst index a69da3f7c46..84027e87a70 100644 --- a/docs/tutorial/Advanced/colored_pointcloud_registration.rst +++ b/docs/tutorial/Advanced/colored_pointcloud_registration.rst @@ -50,7 +50,7 @@ Point-to-plane ICP .. literalinclude:: ../../../examples/Python/Advanced/colored_pointcloud_registration.py :language: python :lineno-start: 29 - :lines: 29-37 + :lines: 29-38 :linenos: We first run :ref:`point_to_plane_icp` as a baseline approach. The visualization below shows misaligned green triangle textures. This is because geometric constraint does not prevent two planar surfaces from slipping. @@ -89,8 +89,8 @@ To further improve efficiency, [Park2017]_ proposes a multi-scale registration s .. literalinclude:: ../../../examples/Python/Advanced/colored_pointcloud_registration.py :language: python - :lineno-start: 39 - :lines: 39-70 + :lineno-start: 40 + :lines: 40-74 :linenos: In total, 3 layers of multi-resolution point clouds are created with :ref:`voxel_downsampling`. Normals are computed with :ref:`vertex_normal_estimation`. The core registration function ``registration_colored_icp`` is called for each layer, from coarse to fine. ``lambda_geometric`` is an optional argument for ``registration_colored_icp`` that determines :math:`\lambda \in [0,1]` in the overall energy :math:`\lambda E_{G} + (1-\lambda) E_{C}`. diff --git a/docs/tutorial/Advanced/customized_visualization.rst b/docs/tutorial/Advanced/customized_visualization.rst index fb1c0de5f79..0b8a19dd2ed 100644 --- a/docs/tutorial/Advanced/customized_visualization.rst +++ b/docs/tutorial/Advanced/customized_visualization.rst @@ -13,8 +13,8 @@ Mimic draw_geometries() with Visualizer class .. literalinclude:: ../../../examples/Python/Advanced/customized_visualization.py :language: python - :lineno-start: 12 - :lines: 12-19 + :lineno-start: 13 + :lines: 13-20 :linenos: This function produces exactly the same functionality of the convenient function ``draw_geometries``. @@ -26,8 +26,8 @@ Class ``Visualizer`` has a couple of variables such as a ``ViewControl`` and a ` .. literalinclude:: ../../../examples/Python/Advanced/customized_visualization.py :language: python - :lineno-start: 39 - :lines: 39-46 + :lineno-start: 46 + :lines: 46-52 :linenos: Outputs: @@ -42,8 +42,8 @@ To change field of view of the camera, it is necessary to get an instance of vis .. literalinclude:: ../../../examples/Python/Advanced/customized_visualization.py :language: python - :lineno-start: 21 - :lines: 21-30 + :lineno-start: 23 + :lines: 23-32 :linenos: The field of view can be set as [5,90] degree. Note that ``change_field_of_view`` adds specified FoV on the current FoV. By default, visualizer has 60 degrees of FoV. Calling the following code @@ -74,8 +74,8 @@ Use callback functions .. literalinclude:: ../../../examples/Python/Advanced/customized_visualization.py :language: python - :lineno-start: 32 - :lines: 32-37 + :lineno-start: 35 + :lines: 35-43 :linenos: Function ``draw_geometries_with_animation_callback`` registers a Python callback function ``rotate_view`` as the idle function of the main loop. It rotates the view along the x-axis whenever the visualizer is idle. This defines an animation behavior. @@ -85,8 +85,8 @@ Function ``draw_geometries_with_animation_callback`` registers a Python callback .. literalinclude:: ../../../examples/Python/Advanced/customized_visualization.py :language: python - :lineno-start: 48 - :lines: 48-72 + :lineno-start: 55 + :lines: 55-84 :linenos: Callback functions can also be registered upon key press event. This script registered four keys. For example, pressing :kbd:`k` changes the background color to black. @@ -99,8 +99,8 @@ Capture images in a customized animation .. literalinclude:: ../../../examples/Python/Advanced/customized_visualization.py :language: python - :lineno-start: 74 - :lines: 74-118 + :lineno-start: 87 + :lines: 87-134 :linenos: This function reads a camera trajectory, then defines an animation function ``move_forward`` to travel through the camera trajectory. In this animation function, both color image and depth image are captured using ``Visualizer.capture_depth_float_buffer`` and ``Visualizer.capture_screen_float_buffer`` respectively. They are saved in files. diff --git a/docs/tutorial/Advanced/fast_global_registration.rst b/docs/tutorial/Advanced/fast_global_registration.rst index c3f177afa1d..e3277c50d4b 100644 --- a/docs/tutorial/Advanced/fast_global_registration.rst +++ b/docs/tutorial/Advanced/fast_global_registration.rst @@ -20,8 +20,8 @@ Input .. literalinclude:: ../../../examples/Python/Advanced/fast_global_registration.py :language: python - :lineno-start: 27 - :lines: 27-29 + :lineno-start: 29 + :lines: 29-31 :linenos: For the pair comparison, the script reuses the ``prepare_dataset`` function defined in :ref:`global_registration`. @@ -32,8 +32,8 @@ Baseline .. literalinclude:: ../../../examples/Python/Advanced/fast_global_registration.py :language: python - :lineno-start: 31 - :lines: 31-37 + :lineno-start: 33 + :lines: 33-40 :linenos: This script calls RANSAC based :ref:`global_registration` as a baseline. After registration it displays the following result. @@ -52,8 +52,8 @@ With the same input used for a baseline, the next script calls the implementatio .. literalinclude:: ../../../examples/Python/Advanced/fast_global_registration.py :language: python - :lineno-start: 14 - :lines: 14-22 + :lineno-start: 15 + :lines: 15-24 :linenos: This script displays the following result. diff --git a/docs/tutorial/Advanced/global_registration.rst b/docs/tutorial/Advanced/global_registration.rst index ef488d93b26..85a8b5be9d7 100644 --- a/docs/tutorial/Advanced/global_registration.rst +++ b/docs/tutorial/Advanced/global_registration.rst @@ -16,8 +16,8 @@ Input .. literalinclude:: ../../../examples/Python/Advanced/global_registration.py :language: python - :lineno-start: 34 - :lines: 34-43 + :lineno-start: 39 + :lines: 39-50 :linenos: This script reads a source point cloud and a target point cloud from two files. They are misaligned with an identity matrix as transformation. @@ -32,8 +32,8 @@ Extract geometric feature .. literalinclude:: ../../../examples/Python/Advanced/global_registration.py :language: python - :lineno-start: 19 - :lines: 19-31 + :lineno-start: 21 + :lines: 21-36 :linenos: We down sample the point cloud, estimate normals, then compute a FPFH feature for each point. The FPFH feature is a 33-dimensional vector that describes the local geometric property of a point. A nearest neighbor query in the 33-dimensinal space can return points with similar local geometric structures. See [Rasu2009]_ for details. @@ -45,8 +45,8 @@ RANSAC .. literalinclude:: ../../../examples/Python/Advanced/global_registration.py :language: python - :lineno-start: 49 - :lines: 49-61 + :lineno-start: 53 + :lines: 53-66 :linenos: We use RANSAC for global registration. In each RANSAC iteration, ``ransac_n`` random points are picked from the source point cloud. Their corresponding points in the target point cloud are detected by querying the nearest neighbor in the 33-dimensional FPFH feature space. A pruning step takes fast pruning algorithms to quickly reject false matches early. @@ -75,8 +75,8 @@ For performance reason, the global registration is only performed on a heavily d .. literalinclude:: ../../../examples/Python/Advanced/global_registration.py :language: python - :lineno-start: 64 - :lines: 64-71 + :lineno-start: 69 + :lines: 69-77 :linenos: Outputs a tight alignment. This summarizes a complete pairwise registration workflow. diff --git a/docs/tutorial/Advanced/interactive_visualization.rst b/docs/tutorial/Advanced/interactive_visualization.rst index ccbd57eecaa..29ca7ba3bf0 100644 --- a/docs/tutorial/Advanced/interactive_visualization.rst +++ b/docs/tutorial/Advanced/interactive_visualization.rst @@ -20,8 +20,8 @@ Crop geometry .. literalinclude:: ../../../examples/Python/Advanced/interactive_visualization.py :language: python - :lineno-start: 11 - :lines: 11-20 + :lineno-start: 12 + :lines: 12-23 :linenos: This function simply reads a point cloud and calls ``draw_geometries_with_editing``. This function provides vertex selection and cropping. @@ -65,8 +65,8 @@ The following script register two point clouds using point-to-point ICP. It gets .. literalinclude:: ../../../examples/Python/Advanced/interactive_visualization.py :language: python - :lineno-start: 43 - :lines: 43-52 + :lineno-start: 51 + :lines: 51-60 :linenos: The script reads two point clouds, and visualize the point clouds before alignment. @@ -76,8 +76,8 @@ The script reads two point clouds, and visualize the point clouds before alignme .. literalinclude:: ../../../examples/Python/Advanced/interactive_visualization.py :language: python - :lineno-start: 30 - :lines: 30-41 + :lineno-start: 35 + :lines: 35-48 :linenos: Function ``pick_points(pcd)`` makes an instance of ``VisualizerWithEditing``. To mimic ``draw_geometries``, it creates windows, adds geometry, visualize geometry, and terminates. A novel interface function from ``VisualizerWithEditing`` is ``get_picked_points()`` that returns the indices of user-picked vertices. @@ -115,8 +115,8 @@ Registration using user correspondences .. literalinclude:: ../../../examples/Python/Advanced/interactive_visualization.py :language: python - :lineno-start: 53 - :lines: 53-71 + :lineno-start: 61 + :lines: 61-80 :linenos: The later part of the demo computes an initial transformation based on the user-provided correspondences. This script builds pairs of correspondences using ``Vector2iVector(corr)``. It utilizes ``TransformationEstimationPointToPoint.compute_transformation`` to compute the initial transformation from the correspondences. The initial transformation is refined using ``registration_icp``. diff --git a/docs/tutorial/Advanced/multiway_registration.rst b/docs/tutorial/Advanced/multiway_registration.rst index e6cec98fa5b..6236108a1f9 100644 --- a/docs/tutorial/Advanced/multiway_registration.rst +++ b/docs/tutorial/Advanced/multiway_registration.rst @@ -32,7 +32,7 @@ The first part of the tutorial script reads three point clouds from files. The p .. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py :language: python :lineno-start: 24 - :lines: 24-58 + :lines: 24-68 :linenos: A pose graph has two key elements: nodes and edges. A node is a piece of geometry :math:`\mathbf{P}_{i}` associated with a pose matrix :math:`\mathbf{T}_{i}` which transforms :math:`\mathbf{P}_{i}` into the global space. The set :math:`\{\mathbf{T}_{i}\}` are the unknown variables to be optimized. ``PoseGraph.nodes`` is a list of ``PoseGraphNode``. We set the global space to be the space of :math:`\mathbf{P}_{0}`. Thus :math:`\mathbf{T}_{0}` is identity matrix. The other pose matrices are initialized by accumulating transformation between neighboring nodes. The neighboring nodes usually have large overlap and can be registered with :ref:`point_to_plane_icp`. @@ -50,8 +50,8 @@ The script creates a pose graph with three nodes and three edges. Among the edge .. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py :language: python - :lineno-start: 72 - :lines: 72-79 + :lineno-start: 82 + :lines: 82-89 :linenos: Open3D uses function ``global_optimization`` to perform pose graph optimization. Two types of optimization methods can be chosen: ``GlobalOptimizationGaussNewton`` or ``GlobalOptimizationLevenbergMarquardt``. The latter is recommended since it has better convergence property. Class ``GlobalOptimizationConvergenceCriteria`` can be used to set the maximum number of iterations and various optimization parameters. @@ -87,8 +87,8 @@ Visualize optimization .. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py :language: python - :lineno-start: 81 - :lines: 81-85 + :lineno-start: 91 + :lines: 91-95 :linenos: Ouputs: @@ -105,8 +105,8 @@ Make a combined point cloud .. literalinclude:: ../../../examples/Python/Advanced/multiway_registration.py :language: python - :lineno-start: 87 - :lines: 87-95 + :lineno-start: 97 + :lines: 97-106 :linenos: .. image:: ../../_static/Advanced/multiway_registration/combined.png diff --git a/docs/tutorial/Advanced/non_blocking_visualization.rst b/docs/tutorial/Advanced/non_blocking_visualization.rst index c16e7ac9249..928b0b5d1f9 100644 --- a/docs/tutorial/Advanced/non_blocking_visualization.rst +++ b/docs/tutorial/Advanced/non_blocking_visualization.rst @@ -51,7 +51,7 @@ Prepare example data .. literalinclude:: ../../../examples/Python/Advanced/non_blocking_visualization.py :language: python :lineno-start: 13 - :lines: 13-28 + :lines: 13-23 :linenos: This part reads two point clouds and downsamples them. The source point cloud is intentionally transformed for the misalignment. Both point clouds are flipped for better visualization. @@ -62,8 +62,8 @@ Initialize Visualizer class .. literalinclude:: ../../../examples/Python/Advanced/non_blocking_visualization.py :language: python - :lineno-start: 30 - :lines: 30-33 + :lineno-start: 25 + :lines: 25-28 :linenos: These lines make an instance of the visualizer class, open a visualizer window, and add two geometries to the visualizer. @@ -73,8 +73,8 @@ Transform geometry and visualize it .. literalinclude:: ../../../examples/Python/Advanced/non_blocking_visualization.py :language: python - :lineno-start: 38 - :lines: 38-48 + :lineno-start: 33 + :lines: 33-44 :linenos: This script calls ``registration_icp`` for every iteration. Note that it explicitly forces only one ICP iteration via ``ICPConvergenceCriteria(max_iteration = 1)``. This is a trick to retrieve a slight pose update from a single ICP iteration. After ICP, source geometry is transformed accordingly. diff --git a/docs/tutorial/Advanced/pointcloud_outlier_removal.rst b/docs/tutorial/Advanced/pointcloud_outlier_removal.rst index 017e3ac1826..945dd83e76c 100644 --- a/docs/tutorial/Advanced/pointcloud_outlier_removal.rst +++ b/docs/tutorial/Advanced/pointcloud_outlier_removal.rst @@ -9,7 +9,7 @@ and artifact that one would like to remove. This tutorial address outlier remova .. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py :language: python :lineno-start: 5 - :lines: 5-41 + :lines: 5- :linenos: @@ -20,8 +20,8 @@ A point cloud is loaded and downsampled using ``voxel_downsample``. .. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py :language: python - :lineno-start: 21 - :lines: 21-27 + :lineno-start: 22 + :lines: 22-28 :linenos: .. image:: ../../_static/Advanced/pointcloud_outlier_removal/voxel_down_sample.png @@ -31,8 +31,8 @@ For comparison, ``uniform_down_sample`` can downsample point cloud by collecting .. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py :language: python - :lineno-start: 29 - :lines: 29-31 + :lineno-start: 30 + :lines: 30-32 :linenos: .. image:: ../../_static/Advanced/pointcloud_outlier_removal/uniform_down_sample.png @@ -46,8 +46,8 @@ The selected points and the non-selected points are visualized. .. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py :language: python - :lineno-start: 9 - :lines: 9-16 + :lineno-start: 10 + :lines: 10-17 :linenos: @@ -56,8 +56,8 @@ Statistical outlier removal .. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py :language: python - :lineno-start: 33 - :lines: 33-36 + :lineno-start: 34 + :lines: 34-38 :linenos: ``statistical_outlier_removal`` removes points that are further away from their neighbors compared to the average for the point cloud. It takes two input parameters: @@ -73,8 +73,8 @@ Radius outlier removal .. literalinclude:: ../../../examples/Python/Advanced/pointcloud_outlier_removal.py :language: python - :lineno-start: 38 - :lines: 38-41 + :lineno-start: 40 + :lines: 40-44 :linenos: ``radius_outlier_removal`` removes points that have few neighbors in a given sphere around them. Two parameters can be used to tune the filter to your data: diff --git a/docs/tutorial/Advanced/rgbd_integration.rst b/docs/tutorial/Advanced/rgbd_integration.rst index a09cf98c1c8..bc793ea1ab0 100644 --- a/docs/tutorial/Advanced/rgbd_integration.rst +++ b/docs/tutorial/Advanced/rgbd_integration.rst @@ -47,7 +47,7 @@ TSDF volume integration .. literalinclude:: ../../../examples/Python/Advanced/rgbd_integration.py :language: python :lineno-start: 13 - :lines: 13-24 + :lines: 13-30 :linenos: Open3D provides two types of TSDF volumes: ``UniformTSDFVolume`` and ``ScalableTSDFVolume``. The latter is recommended since it uses a hierarchical structure and thus supports larger scenes. @@ -63,8 +63,8 @@ Mesh extraction uses the marching cubes algorithm [LorensenAndCline1987]_. .. literalinclude:: ../../../examples/Python/Advanced/rgbd_integration.py :language: python - :lineno-start: 26 - :lines: 26-29 + :lineno-start: 32 + :lines: 32-35 :linenos: Outputs: diff --git a/docs/tutorial/Basic/icp_registration.rst b/docs/tutorial/Basic/icp_registration.rst index a81b99f2cf9..3b0ee7fd41e 100644 --- a/docs/tutorial/Basic/icp_registration.rst +++ b/docs/tutorial/Basic/icp_registration.rst @@ -18,8 +18,8 @@ Helper visualization function .. literalinclude:: ../../../examples/Python/Basic/icp_registration.py :language: python - :lineno-start: 11 - :lines: 11-17 + :lineno-start: 12 + :lines: 12-18 :linenos: This function visualizes a target point cloud, and a source point cloud transformed with an alignment transformation. The target point cloud and the source point cloud are painted with cyan and yellow colors respectively. The more and tighter the two point clouds overlap with each other, the better the alignment result is. @@ -31,8 +31,8 @@ Input .. literalinclude:: ../../../examples/Python/Basic/icp_registration.py :language: python - :lineno-start: 20 - :lines: 20-28 + :lineno-start: 22 + :lines: 22-28 :linenos: This script reads a source point cloud and a target point cloud from two files. A rough transformation is given. @@ -77,7 +77,7 @@ We first show a point-to-point ICP algorithm [BeslAndMcKay1992]_ using an object .. literalinclude:: ../../../examples/Python/Basic/icp_registration.py :language: python :lineno-start: 34 - :lines: 34-41 + :lines: 34-42 :linenos: Class ``TransformationEstimationPointToPoint`` provides functions to compute the residuals and Jacobian matrices of the point-to-point ICP objective. Function ``registration_icp`` takes it as a parameter and runs point-to-point ICP to obtain results. @@ -137,8 +137,8 @@ where :math:`\mathbf{n}_{\mathbf{p}}` is the normal of point :math:`\mathbf{p}`. .. literalinclude:: ../../../examples/Python/Basic/icp_registration.py :language: python - :lineno-start: 43 - :lines: 43-50 + :lineno-start: 44 + :lines: 44-52 :linenos: ``registration_icp`` is called with a different parameter ``TransformationEstimationPointToPlane``. Internally, this class implements functions to compute the residuals and Jacobian matrices of the point-to-plane ICP objective. diff --git a/docs/tutorial/Basic/kdtree.rst b/docs/tutorial/Basic/kdtree.rst index 0e158dc28e4..7e79c84bb44 100644 --- a/docs/tutorial/Basic/kdtree.rst +++ b/docs/tutorial/Basic/kdtree.rst @@ -19,7 +19,7 @@ Build KDTree from point cloud .. literalinclude:: ../../../examples/Python/Basic/kdtree.py :language: python :lineno-start: 12 - :lines: 12-16 + :lines: 12-17 :linenos: This script reads a point cloud and builds a KDTree. This is a preprocessing step for the following nearest neighbor queries. @@ -31,8 +31,8 @@ Find neighboring points .. literalinclude:: ../../../examples/Python/Basic/kdtree.py :language: python - :lineno-start: 18 - :lines: 18-19 + :lineno-start: 19 + :lines: 19-20 :linenos: We pick the 1500-th point as the anchor point and paint it red. @@ -42,8 +42,8 @@ Using search_knn_vector_3d .. literalinclude:: ../../../examples/Python/Basic/kdtree.py :language: python - :lineno-start: 21 - :lines: 21-23 + :lineno-start: 22 + :lines: 22-24 :linenos: Function ``search_knn_vector_3d`` returns a list of indices of the k nearest neighbors of the anchor point. These neighboring points are painted with blue color. Note that we convert ``pcd.colors`` to a numpy array to make batch access to the point colors, and broadcast a blue color [0, 0, 1] to all the selected points. We skip the first index since it is the anchor point itself. @@ -54,16 +54,16 @@ Using search_radius_vector_3d .. literalinclude:: ../../../examples/Python/Basic/kdtree.py :language: python - :lineno-start: 25 - :lines: 25-27 + :lineno-start: 26 + :lines: 26-28 :linenos: Similarly, we can use ``search_radius_vector_3d`` to query all points with distances to the anchor point less than a given radius. We paint these points with green color. .. literalinclude:: ../../../examples/Python/Basic/kdtree.py :language: python - :lineno-start: 29 - :lines: 29-31 + :lineno-start: 30 + :lines: 30-32 :linenos: The visualization looks like: diff --git a/docs/tutorial/Basic/mesh.rst b/docs/tutorial/Basic/mesh.rst index 61578f52f4d..c47e485b6b1 100644 --- a/docs/tutorial/Basic/mesh.rst +++ b/docs/tutorial/Basic/mesh.rst @@ -20,7 +20,7 @@ Print vertices and triangles .. literalinclude:: ../../../examples/Python/Basic/mesh.py :language: python :lineno-start: 13 - :lines: 13-17 + :lines: 13-18 :linenos: Outputs: diff --git a/docs/tutorial/Basic/pointcloud.rst b/docs/tutorial/Basic/pointcloud.rst index 7503888c82f..ed4691677a7 100644 --- a/docs/tutorial/Basic/pointcloud.rst +++ b/docs/tutorial/Basic/pointcloud.rst @@ -72,7 +72,7 @@ Another basic operation for point cloud is point normal estimation. .. literalinclude:: ../../../examples/Python/Basic/pointcloud.py :language: python :lineno-start: 22 - :lines: 22-25 + :lines: 22-27 :linenos: ``estimate_normals`` computes normal for every point. The function finds adjacent points and calculate the principal axis of the adjacent points using covariance analysis. @@ -94,8 +94,8 @@ Estimated normal vectors can be retrieved by ``normals`` variable of ``downpcd`` .. literalinclude:: ../../../examples/Python/Basic/pointcloud.py :language: python - :lineno-start: 27 - :lines: 27-28 + :lineno-start: 29 + :lines: 29-30 :linenos: .. code-block:: sh @@ -108,8 +108,8 @@ Normal vectors can be transformed as a numpy array using ``np.asarray``. .. literalinclude:: ../../../examples/Python/Basic/pointcloud.py :language: python - :lineno-start: 29 - :lines: 29-30 + :lineno-start: 31 + :lines: 31-32 :linenos: .. code-block:: sh @@ -135,8 +135,8 @@ Crop point cloud .. literalinclude:: ../../../examples/Python/Basic/pointcloud.py :language: python - :lineno-start: 33 - :lines: 33-37 + :lineno-start: 35 + :lines: 35-40 :linenos: ``read_selection_polygon_volume`` reads a json file that specifies polygon selection area. @@ -152,8 +152,8 @@ Paint point cloud .. literalinclude:: ../../../examples/Python/Basic/pointcloud.py :language: python - :lineno-start: 39 - :lines: 39-42 + :lineno-start: 42 + :lines: 42-45 :linenos: ``paint_uniform_color`` paints all the points to a uniform color. The color is in RGB space, [0, 1] range. diff --git a/docs/tutorial/Basic/python_interface.rst b/docs/tutorial/Basic/python_interface.rst index 18375f7cbae..ec575982a4b 100644 --- a/docs/tutorial/Basic/python_interface.rst +++ b/docs/tutorial/Basic/python_interface.rst @@ -38,8 +38,8 @@ that show very basic usage of Open3D Python module. .. literalinclude:: ../../../examples/Python/Basic/python_binding.py :language: python - :lineno-start: 9 - :lines: 9-12 + :lineno-start: 10 + :lines: 10-12 :linenos: This imports ``read_point_cloud`` function from ``open3d`` module. It reads a point cloud file and returns an instance of ``PointCloud`` class. ``print(pcd)`` prints brief information of the point cloud: @@ -58,8 +58,8 @@ It is recommended to use Python built-in ``help`` function to get definitions an .. literalinclude:: ../../../examples/Python/Basic/python_binding.py :language: python - :lineno-start: 14 - :lines: 14-18 + :lineno-start: 15 + :lines: 15-18 :linenos: diff --git a/docs/tutorial/Basic/rgbd_images/redwood.rst b/docs/tutorial/Basic/rgbd_images/redwood.rst index 969a42af3cf..b6ed8ba9727 100644 --- a/docs/tutorial/Basic/rgbd_images/redwood.rst +++ b/docs/tutorial/Basic/rgbd_images/redwood.rst @@ -15,8 +15,8 @@ The Redwood format stored depth in a 16-bit single channel image. The integer va .. literalinclude:: ../../../../examples/Python/Basic/rgbd_redwood.py :language: python - :lineno-start: 12 - :lines: 12-17 + :lineno-start: 11 + :lines: 11-16 :linenos: The default conversion function ``create_rgbd_image_from_color_and_depth`` creates an ``RGBDImage`` from a pair of color and depth image. The color image is converted into a grayscale image, stored in ``float`` ranged in [0, 1]. The depth image is stored in ``float``, representing the depth value in meters. ``print(rgbd_image)`` yields: @@ -45,8 +45,8 @@ The RGBD image can be converted into a point cloud, given a set of camera parame .. literalinclude:: ../../../../examples/Python/Basic/rgbd_redwood.py :language: python - :lineno-start: 25 - :lines: 25-29 + :lineno-start: 26 + :lines: 26-32 :linenos: Here we use ``PinholeCameraIntrinsicParameters.PrimeSenseDefault`` as default camera parameter. It has image resolution 640x480, focal length (fx, fy) = (525.0, 525.0), and optical center (cx, cy) = (319.5, 239.5). An identity matrix is used as the default extrinsic parameter. ``pcd.transform`` applies an up-down flip transformation on the point cloud for better visualization purpose. This outputs: diff --git a/docs/tutorial/Basic/visualization.rst b/docs/tutorial/Basic/visualization.rst index eb13e64f0f0..c165f89b984 100644 --- a/docs/tutorial/Basic/visualization.rst +++ b/docs/tutorial/Basic/visualization.rst @@ -97,7 +97,7 @@ Geometry primitives .. literalinclude:: ../../../examples/Python/Basic/visualization.py :language: python :lineno-start: 16 - :lines: 16-26 + :lines: 16-27 :linenos: This script generates a cubic, a sphere, and a cylinder using ``create_mesh_cubic``, ``create_mesh_sphere`` and @@ -110,8 +110,8 @@ Draw multiple geometries .. literalinclude:: ../../../examples/Python/Basic/visualization.py :language: python - :lineno-start: 28 - :lines: 28-32 + :lineno-start: 29 + :lines: 29-35 :linenos: ``draw_geometries`` takes a list of geometries and renders them all together. Alternatively, ``TriangleMesh`` supports a ``+`` operator to combine multiple meshes into one. We recommend the first approach since it supports a combination of different geometries (e.g., a mesh can be rendered in tandem with a point cloud). @@ -126,8 +126,8 @@ Draw line set .. literalinclude:: ../../../examples/Python/Basic/visualization.py :language: python - :lineno-start: 34 - :lines: 34-45 + :lineno-start: 37 + :lines: 37-47 :linenos: To draw lines, it is necessary to define ``LineSet`` and create a set of points and a set of edges. An edge is a pair of point indices. The above example creates custom ``points`` and edges (denoted as ``lines``) to make a cubic. Color is optional - red color ``[1,0,0]`` is assigned to each edge in this example. This script visualizes the following cubic. diff --git a/docs/tutorial/ReconstructionSystem/integrate_scene.rst b/docs/tutorial/ReconstructionSystem/integrate_scene.rst index 9ca5c26ef31..39866cd29dd 100644 --- a/docs/tutorial/ReconstructionSystem/integrate_scene.rst +++ b/docs/tutorial/ReconstructionSystem/integrate_scene.rst @@ -16,7 +16,7 @@ Integrate RGBD frames .. literalinclude:: ../../../examples/Python/ReconstructionSystem/integrate_scene.py :language: python :lineno-start: 13 - :lines: 5,14-51 + :lines: 5,17-54 :linenos: This function first reads the alignment results from both :ref:`reconstruction_system_make_fragments` and :ref:`reconstruction_system_register_fragments`, then computes the pose of each RGBD image in the global space. After that, RGBD images are integrated using :ref:`rgbd_integration`. diff --git a/docs/tutorial/ReconstructionSystem/make_fragments.rst b/docs/tutorial/ReconstructionSystem/make_fragments.rst index 1543d76b03a..afd43b22407 100644 --- a/docs/tutorial/ReconstructionSystem/make_fragments.rst +++ b/docs/tutorial/ReconstructionSystem/make_fragments.rst @@ -17,8 +17,8 @@ Register RGBD image pairs .. literalinclude:: ../../../examples/Python/ReconstructionSystem/make_fragments.py :language: python - :lineno-start: 29 - :lines: 5,30-55 + :lineno-start: 34 + :lines: 5,35-60 :linenos: The function reads a pair of RGBD images and registers the ``source_rgbd_image`` to the ``target_rgbd_image``. Open3D function ``compute_rgbd_odometry`` is called to align the RGBD images. For adjacent RGBD images, an identity matrix is used as initialization. For non-adjacent RGBD images, wide baseline matching is used as an initialization. In particular, function ``pose_estimation`` computes OpenCV ORB feature to match sparse features over wide baseline images, then performs 5-point RANSAC to estimate a rough alignment. It is used as the initialization of ``compute_rgbd_odometry``. @@ -31,8 +31,8 @@ Multiway registration .. literalinclude:: ../../../examples/Python/ReconstructionSystem/make_fragments.py :language: python - :lineno-start: 56 - :lines: 5,57-93 + :lineno-start: 62 + :lines: 5,63-109 :linenos: This script uses the technique demonstrated in :ref:`multiway_registration`. Function ``make_posegraph_for_fragment`` builds a pose graph for multiway registration of all RGBD images in this sequence. Each graph node represents an RGBD image and its pose which transforms the geometry to the global fragment space. For efficiency, only key frames are used. @@ -41,8 +41,8 @@ Once a pose graph is created, multiway registration is performed by calling func .. literalinclude:: ../../../examples/Python/ReconstructionSystem/optimize_posegraph.py :language: python - :lineno-start: 11 - :lines: 5,12-38 + :lineno-start: 12 + :lines: 5,13-40 :linenos: This function calls ``global_optimization`` to estimate poses of the RGBD images. @@ -54,8 +54,8 @@ Make a fragment .. literalinclude:: ../../../examples/Python/ReconstructionSystem/make_fragments.py :language: python - :lineno-start: 94 - :lines: 5,95-129 + :lineno-start: 111 + :lines: 5,112-146 :linenos: Once the poses are estimates, :ref:`rgbd_integration` is used to reconstruct a colored fragment from each RGBD sequence. @@ -65,8 +65,8 @@ Batch processing .. literalinclude:: ../../../examples/Python/ReconstructionSystem/make_fragments.py :language: python - :lineno-start: 130 - :lines: 5,131-149 + :lineno-start: 168 + :lines: 5,169-188 :linenos: The main function calls each individual function explained above. diff --git a/docs/tutorial/ReconstructionSystem/refine_registration.rst b/docs/tutorial/ReconstructionSystem/refine_registration.rst index d19a4499dbb..4649f85ee31 100644 --- a/docs/tutorial/ReconstructionSystem/refine_registration.rst +++ b/docs/tutorial/ReconstructionSystem/refine_registration.rst @@ -16,8 +16,8 @@ Fine-grained registration .. literalinclude:: ../../../examples/Python/ReconstructionSystem/refine_registration.py :language: python - :lineno-start: 30 - :lines: 5,31-69 + :lineno-start: 38 + :lines: 5,39-92 :linenos: Two options are given for the fine-grained registration. The ``color`` is recommended since it uses color information to prevent drift. Details see [Park2017]_. @@ -28,8 +28,8 @@ Multiway registration .. literalinclude:: ../../../examples/Python/ReconstructionSystem/refine_registration.py :language: python - :lineno-start: 14 - :lines: 5,15-28 + :lineno-start: 16 + :lines: 5,17-36 :linenos: This script uses the technique demonstrated in :ref:`multiway_registration`. Function ``update_posegrph_for_refined_scene`` builds a pose graph for multiway registration of all fragments. Each graph node represents a fragments and its pose which transforms the geometry to the global space. @@ -38,8 +38,8 @@ Once a pose graph is built, function ``optimize_posegraph_for_scene`` is called .. literalinclude:: ../../../examples/Python/ReconstructionSystem/optimize_posegraph.py :language: python - :lineno-start: 49 - :lines: 5,50-58 + :lineno-start: 52 + :lines: 5,53-60 :linenos: Main registration loop @@ -49,8 +49,8 @@ The function ``make_posegraph_for_refined_scene`` below calls all the functions .. literalinclude:: ../../../examples/Python/ReconstructionSystem/refine_registration.py :language: python - :lineno-start: 107 - :lines: 5,108-154 + :lineno-start: 131 + :lines: 5,132-178 :linenos: The main workflow is: pairwise local refinement -> multiway registration. diff --git a/docs/tutorial/ReconstructionSystem/register_fragments.rst b/docs/tutorial/ReconstructionSystem/register_fragments.rst index 1427a695ab9..227b7273bce 100644 --- a/docs/tutorial/ReconstructionSystem/register_fragments.rst +++ b/docs/tutorial/ReconstructionSystem/register_fragments.rst @@ -18,8 +18,8 @@ Preprocess point cloud .. literalinclude:: ../../../examples/Python/ReconstructionSystem/register_fragments.py :language: python - :lineno-start: 15 - :lines: 5,16-24 + :lineno-start: 17 + :lines: 5,18-29 :linenos: This function downsamples point cloud to make a point cloud sparser and regularly distributed. Normals and FPFH feature are precomputed. See :ref:`voxel_downsampling`, :ref:`vertex_normal_estimation`, and :ref:`extract_geometric_feature` for more details. @@ -30,8 +30,8 @@ Compute initial registration .. literalinclude:: ../../../examples/Python/ReconstructionSystem/register_fragments.py :language: python - :lineno-start: 50 - :lines: 5,51-77 + :lineno-start: 55 + :lines: 5,56-82 :linenos: This function computes a rough alignment between two fragments. If the fragments are neighboring fragments, the rough alignment is determined by an aggregating RGBD odometry obtained from :ref:`reconstruction_system_make_fragments`. Otherwise, ``register_point_cloud_fpfh`` is called to perform global registration. Note that global registration is less reliable according to [Choi2015]_. @@ -44,8 +44,8 @@ Pairwise global registration .. literalinclude:: ../../../examples/Python/ReconstructionSystem/register_fragments.py :language: python - :lineno-start: 25 - :lines: 5,26-49 + :lineno-start: 31 + :lines: 5,32-53 :linenos: This function uses :ref:`feature_matching` or :ref:`fast_global_registration` for pairwise global registration. @@ -58,8 +58,8 @@ Multiway registration .. literalinclude:: ../../../examples/Python/ReconstructionSystem/register_fragments.py :language: python - :lineno-start: 78 - :lines: 5,79-93 + :lineno-start: 84 + :lines: 5,85-104 :linenos: This script uses the technique demonstrated in :ref:`multiway_registration`. Function ``update_posegrph_for_scene`` builds a pose graph for multiway registration of all fragments. Each graph node represents a fragments and its pose which transforms the geometry to the global space. @@ -68,8 +68,8 @@ Once a pose graph is built, function ``optimize_posegraph_for_scene`` is called .. literalinclude:: ../../../examples/Python/ReconstructionSystem/optimize_posegraph.py :language: python - :lineno-start: 39 - :lines: 5,40-48 + :lineno-start: 42 + :lines: 5,43-50 :linenos: Main registration loop @@ -79,8 +79,8 @@ The function ``make_posegraph_for_scene`` below calls all the functions introduc .. literalinclude:: ../../../examples/Python/ReconstructionSystem/register_fragments.py :language: python - :lineno-start: 123 - :lines: 5,124-165 + :lineno-start: 136 + :lines: 5,137-177 :linenos: The main workflow is: pairwise global registration -> multiway registration. diff --git a/examples/Python/Advanced/__init__.py b/examples/Python/Advanced/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/Advanced/camera_trajectory.py b/examples/Python/Advanced/camera_trajectory.py index 28e221a53d0..8bcee617e82 100644 --- a/examples/Python/Advanced/camera_trajectory.py +++ b/examples/Python/Advanced/camera_trajectory.py @@ -5,36 +5,41 @@ # examples/Python/Advanced/camera_trajectory.py import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": print("Testing camera in open3d ...") - intrinsic = PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault) + intrinsic = o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault) print(intrinsic.intrinsic_matrix) - print(PinholeCameraIntrinsic()) - x = PinholeCameraIntrinsic(640, 480, 525, 525, 320, 240) + print(o3d.camera.PinholeCameraIntrinsic()) + x = o3d.camera.PinholeCameraIntrinsic(640, 480, 525, 525, 320, 240) print(x) print(x.intrinsic_matrix) - write_pinhole_camera_intrinsic("test.json", x) - y = read_pinhole_camera_intrinsic("test.json") + o3d.io.write_pinhole_camera_intrinsic("test.json", x) + y = o3d.io.read_pinhole_camera_intrinsic("test.json") print(y) print(np.asarray(y.intrinsic_matrix)) print("Read a trajectory and combine all the RGB-D images.") - pcds = []; - trajectory = read_pinhole_camera_trajectory("../../TestData/RGBD/trajectory.log") - write_pinhole_camera_trajectory("test.json", trajectory) + pcds = [] + trajectory = o3d.io.read_pinhole_camera_trajectory( + "../../TestData/RGBD/trajectory.log") + o3d.io.write_pinhole_camera_trajectory("test.json", trajectory) print(trajectory) print(trajectory.parameters[0].extrinsic) print(np.asarray(trajectory.parameters[0].extrinsic)) for i in range(5): - im1 = read_image("../../TestData/RGBD/depth/{:05d}.png".format(i)) - im2 = read_image("../../TestData/RGBD/color/{:05d}.jpg".format(i)) - im = create_rgbd_image_from_color_and_depth(im2, im1, 1000.0, 5.0, False) - pcd = create_point_cloud_from_rgbd_image(im, - trajectory.parameters[i].intrinsic, trajectory.parameters[i].extrinsic) + im1 = o3d.io.read_image( + "../../TestData/RGBD/depth/{:05d}.png".format(i)) + im2 = o3d.io.read_image( + "../../TestData/RGBD/color/{:05d}.jpg".format(i)) + im = o3d.geometry.create_rgbd_image_from_color_and_depth( + im2, im1, 1000.0, 5.0, False) + pcd = o3d.geometry.create_point_cloud_from_rgbd_image( + im, trajectory.parameters[i].intrinsic, + trajectory.parameters[i].extrinsic) pcds.append(pcd) - draw_geometries(pcds) + o3d.visualization.draw_geometries(pcds) print("") diff --git a/examples/Python/Advanced/color_map_optimization.py b/examples/Python/Advanced/color_map_optimization.py index 34f576418a2..ae659e7bd93 100644 --- a/examples/Python/Advanced/color_map_optimization.py +++ b/examples/Python/Advanced/color_map_optimization.py @@ -2,9 +2,9 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -# examples/Python/Advanced/color_map_optimization.py +# examples/Python/Advanced/o3d.color_map.color_map_optimization.py -from open3d import * +import open3d as o3d from trajectory_io import * import os, sys sys.path.append("../Utility") @@ -14,38 +14,43 @@ debug_mode = False if __name__ == "__main__": - set_verbosity_level(VerbosityLevel.Debug) + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) # Read RGBD images rgbd_images = [] - depth_image_path = get_file_list( - os.path.join(path, "depth/"), extension = ".png") - color_image_path = get_file_list( - os.path.join(path, "image/"), extension = ".jpg") - assert(len(depth_image_path) == len(color_image_path)) + depth_image_path = get_file_list(os.path.join(path, "depth/"), + extension=".png") + color_image_path = get_file_list(os.path.join(path, "image/"), + extension=".jpg") + assert (len(depth_image_path) == len(color_image_path)) for i in range(len(depth_image_path)): - depth = read_image(os.path.join(depth_image_path[i])) - color = read_image(os.path.join(color_image_path[i])) - rgbd_image = create_rgbd_image_from_color_and_depth(color, depth, - convert_rgb_to_intensity = False) + depth = o3d.io.read_image(os.path.join(depth_image_path[i])) + color = o3d.io.read_image(os.path.join(color_image_path[i])) + rgbd_image = o3d.geometry.create_rgbd_image_from_color_and_depth( + color, depth, convert_rgb_to_intensity=False) if debug_mode: - pcd = create_point_cloud_from_rgbd_image(rgbd_image, - PinholeCameraIntrinsic(PinholeCameraIntrinsicParameters.PrimeSenseDefault)) - draw_geometries([pcd]) + pcd = o3d.geometry.create_point_cloud_from_rgbd_image( + rgbd_image, + o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters. + PrimeSenseDefault)) + o3d.visualization.draw_geometries([pcd]) rgbd_images.append(rgbd_image) # Read camera pose and mesh - camera = read_pinhole_camera_trajectory(os.path.join(path, "scene/key.log")) - mesh = read_triangle_mesh(os.path.join(path, "scene", "integrated.ply")) + camera = o3d.io.read_pinhole_camera_trajectory( + os.path.join(path, "scene/key.log")) + mesh = o3d.io.read_triangle_mesh( + os.path.join(path, "scene", "integrated.ply")) # Before full optimization, let's just visualize texture map # with given geometry, RGBD images, and camera poses. - option = ColorMapOptimizationOption() + option = o3d.color_map.ColorMapOptimizationOption() option.maximum_iteration = 0 - color_map_optimization(mesh, rgbd_images, camera, option) - draw_geometries([mesh]) - write_triangle_mesh(os.path.join(path, "scene", - "color_map_before_optimization.ply"), mesh) + o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option) + o3d.visualization.draw_geometries([mesh]) + o3d.io.write_triangle_mesh( + os.path.join(path, "scene", "color_map_before_optimization.ply"), mesh) # Optimize texture and save the mesh as texture_mapped.ply # This is implementation of following paper @@ -54,7 +59,7 @@ # SIGGRAPH 2014 option.maximum_iteration = 300 option.non_rigid_camera_coordinate = True - color_map_optimization(mesh, rgbd_images, camera, option) - draw_geometries([mesh]) - write_triangle_mesh(os.path.join(path, "scene", - "color_map_after_optimization.ply"), mesh) + o3d.color_map.color_map_optimization(mesh, rgbd_images, camera, option) + o3d.visualization.draw_geometries([mesh]) + o3d.io.write_triangle_mesh( + os.path.join(path, "scene", "color_map_after_optimization.ply"), mesh) diff --git a/examples/Python/Advanced/colored_pointcloud_registration.py b/examples/Python/Advanced/colored_pointcloud_registration.py index 6e674cbbbae..367c894edd7 100644 --- a/examples/Python/Advanced/colored_pointcloud_registration.py +++ b/examples/Python/Advanced/colored_pointcloud_registration.py @@ -6,65 +6,69 @@ import numpy as np import copy -from open3d import * +import open3d as o3d def draw_registration_result_original_color(source, target, transformation): source_temp = copy.deepcopy(source) source_temp.transform(transformation) - draw_geometries([source_temp, target]) + o3d.visualization.draw_geometries([source_temp, target]) if __name__ == "__main__": print("1. Load two point clouds and show initial pose") - source = read_point_cloud("../../TestData/ColoredICP/frag_115.ply") - target = read_point_cloud("../../TestData/ColoredICP/frag_116.ply") + source = o3d.io.read_point_cloud("../../TestData/ColoredICP/frag_115.ply") + target = o3d.io.read_point_cloud("../../TestData/ColoredICP/frag_116.ply") # draw initial alignment current_transformation = np.identity(4) - draw_registration_result_original_color( - source, target, current_transformation) + draw_registration_result_original_color(source, target, + current_transformation) # point to plane ICP - current_transformation = np.identity(4); + current_transformation = np.identity(4) print("2. Point-to-plane ICP registration is applied on original point") print(" clouds to refine the alignment. Distance threshold 0.02.") - result_icp = registration_icp(source, target, 0.02, - current_transformation, TransformationEstimationPointToPlane()) + result_icp = o3d.registration.registration_icp( + source, target, 0.02, current_transformation, + o3d.registration.TransformationEstimationPointToPlane()) print(result_icp) - draw_registration_result_original_color( - source, target, result_icp.transformation) + draw_registration_result_original_color(source, target, + result_icp.transformation) # colored pointcloud registration # This is implementation of following paper # J. Park, Q.-Y. Zhou, V. Koltun, # Colored Point Cloud Registration Revisited, ICCV 2017 - voxel_radius = [ 0.04, 0.02, 0.01 ]; - max_iter = [ 50, 30, 14 ]; + voxel_radius = [0.04, 0.02, 0.01] + max_iter = [50, 30, 14] current_transformation = np.identity(4) print("3. Colored point cloud registration") for scale in range(3): iter = max_iter[scale] radius = voxel_radius[scale] - print([iter,radius,scale]) + print([iter, radius, scale]) print("3-1. Downsample with a voxel size %.2f" % radius) - source_down = voxel_down_sample(source, radius) - target_down = voxel_down_sample(target, radius) + source_down = o3d.geometry.voxel_down_sample(source, radius) + target_down = o3d.geometry.voxel_down_sample(target, radius) print("3-2. Estimate normal.") - estimate_normals(source_down, KDTreeSearchParamHybrid( - radius = radius * 2, max_nn = 30)) - estimate_normals(target_down, KDTreeSearchParamHybrid( - radius = radius * 2, max_nn = 30)) + o3d.geometry.estimate_normals( + source_down, + o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30)) + o3d.geometry.estimate_normals( + target_down, + o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30)) print("3-3. Applying colored point cloud registration") - result_icp = registration_colored_icp(source_down, target_down, - radius, current_transformation, - ICPConvergenceCriteria(relative_fitness = 1e-6, - relative_rmse = 1e-6, max_iteration = iter)) + result_icp = o3d.registration.registration_colored_icp( + source_down, target_down, radius, current_transformation, + o3d.registration.ICPConvergenceCriteria(relative_fitness=1e-6, + relative_rmse=1e-6, + max_iteration=iter)) current_transformation = result_icp.transformation print(result_icp) - draw_registration_result_original_color( - source, target, result_icp.transformation) + draw_registration_result_original_color(source, target, + result_icp.transformation) diff --git a/examples/Python/Advanced/customized_visualization.py b/examples/Python/Advanced/customized_visualization.py index 057665c8864..e0d34714f08 100644 --- a/examples/Python/Advanced/customized_visualization.py +++ b/examples/Python/Advanced/customized_visualization.py @@ -5,84 +5,99 @@ # examples/Python/Advanced/customized_visualization.py import os -from open3d import * +import open3d as o3d import numpy as np import matplotlib.pyplot as plt + def custom_draw_geometry(pcd): # The following code achieves the same effect as: - # draw_geometries([pcd]) - vis = Visualizer() + # o3d.visualization.draw_geometries([pcd]) + vis = o3d.visualization.Visualizer() vis.create_window() vis.add_geometry(pcd) vis.run() vis.destroy_window() + def custom_draw_geometry_with_custom_fov(pcd, fov_step): - vis = Visualizer() + vis = o3d.visualization.Visualizer() vis.create_window() vis.add_geometry(pcd) ctr = vis.get_view_control() print("Field of view (before changing) %.2f" % ctr.get_field_of_view()) - ctr.change_field_of_view(step = fov_step) + ctr.change_field_of_view(step=fov_step) print("Field of view (after changing) %.2f" % ctr.get_field_of_view()) vis.run() vis.destroy_window() + def custom_draw_geometry_with_rotation(pcd): + def rotate_view(vis): ctr = vis.get_view_control() ctr.rotate(10.0, 0.0) return False - draw_geometries_with_animation_callback([pcd], rotate_view) + + o3d.visualization.draw_geometries_with_animation_callback([pcd], + rotate_view) + def custom_draw_geometry_load_option(pcd): - vis = Visualizer() + vis = o3d.visualization.Visualizer() vis.create_window() vis.add_geometry(pcd) - vis.get_render_option().load_from_json( - "../../TestData/renderoption.json") + vis.get_render_option().load_from_json("../../TestData/renderoption.json") vis.run() vis.destroy_window() + def custom_draw_geometry_with_key_callback(pcd): + def change_background_to_black(vis): opt = vis.get_render_option() opt.background_color = np.asarray([0, 0, 0]) return False + def load_render_option(vis): vis.get_render_option().load_from_json( - "../../TestData/renderoption.json") + "../../TestData/renderoption.json") return False + def capture_depth(vis): depth = vis.capture_depth_float_buffer() plt.imshow(np.asarray(depth)) plt.show() return False + def capture_image(vis): image = vis.capture_screen_float_buffer() plt.imshow(np.asarray(image)) plt.show() return False + key_to_callback = {} key_to_callback[ord("K")] = change_background_to_black key_to_callback[ord("R")] = load_render_option key_to_callback[ord(",")] = capture_depth key_to_callback[ord(".")] = capture_image - draw_geometries_with_key_callbacks([pcd], key_to_callback) + o3d.visualization.draw_geometries_with_key_callbacks([pcd], key_to_callback) + def custom_draw_geometry_with_camera_trajectory(pcd): custom_draw_geometry_with_camera_trajectory.index = -1 custom_draw_geometry_with_camera_trajectory.trajectory =\ - read_pinhole_camera_trajectory( + o3d.io.read_pinhole_camera_trajectory( "../../TestData/camera_trajectory.json") - custom_draw_geometry_with_camera_trajectory.vis = Visualizer() + custom_draw_geometry_with_camera_trajectory.vis = o3d.visualization.Visualizer( + ) if not os.path.exists("../../TestData/image/"): os.makedirs("../../TestData/image/") if not os.path.exists("../../TestData/depth/"): os.makedirs("../../TestData/depth/") + def move_forward(vis): - # This function is called within the Visualizer::run() loop + # This function is called within the o3d.visualization.Visualizer::run() loop # The run loop calls the function, then re-render # So the sequence in this function is to: # 1. Capture frame @@ -103,11 +118,13 @@ def move_forward(vis): #vis.capture_screen_image("image/{:05d}.png".format(glb.index), False) glb.index = glb.index + 1 if glb.index < len(glb.trajectory.parameters): - ctr.convert_from_pinhole_camera_parameters(glb.trajectory.parameters[glb.index]) + ctr.convert_from_pinhole_camera_parameters( + glb.trajectory.parameters[glb.index]) else: custom_draw_geometry_with_camera_trajectory.vis.\ register_animation_callback(None) return False + vis = custom_draw_geometry_with_camera_trajectory.vis vis.create_window() vis.add_geometry(pcd) @@ -116,8 +133,9 @@ def move_forward(vis): vis.run() vis.destroy_window() + if __name__ == "__main__": - pcd = read_point_cloud("../../TestData/fragment.ply") + pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") print("1. Customized visualization to mimic DrawGeometry") custom_draw_geometry(pcd) diff --git a/examples/Python/Advanced/downsampling_and_trace.py b/examples/Python/Advanced/downsampling_and_trace.py index c1a782d2146..9b670d5149f 100644 --- a/examples/Python/Advanced/downsampling_and_trace.py +++ b/examples/Python/Advanced/downsampling_and_trace.py @@ -3,14 +3,14 @@ # See license file or visit www.open3d.org for details import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": - pcd = read_point_cloud("../../TestData/fragment.ply") + pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") min_cube_size = 0.05 print("\nOriginal, # of points %d" % (np.asarray(pcd.points).shape[0])) - pcd_down = voxel_down_sample(pcd, min_cube_size) + pcd_down = o3d.geometry.voxel_down_sample(pcd, min_cube_size) print("\nScale %f, # of points %d" % \ (min_cube_size, np.asarray(pcd_down.points).shape[0])) min_bound = pcd_down.get_min_bound() - min_cube_size * 0.5 @@ -20,12 +20,13 @@ num_scales = 3 for i in range(1, num_scales): multiplier = pow(2, i) - pcd_curr_down, cubic_id = voxel_down_sample_and_trace(pcd_curr, - multiplier * min_cube_size, min_bound, max_bound, False) - print("\nScale %f, # of points %d" % (multiplier * min_cube_size, - np.asarray(pcd_curr_down.points).shape[0])) + pcd_curr_down, cubic_id = o3d.geometry.voxel_down_sample_and_trace( + pcd_curr, multiplier * min_cube_size, min_bound, max_bound, False) + print("\nScale %f, # of points %d" % + (multiplier * min_cube_size, np.asarray( + pcd_curr_down.points).shape[0])) print("Downsampled points (the first 10 points)") - print(np.asarray(pcd_curr_down.points)[:10,:]) + print(np.asarray(pcd_curr_down.points)[:10, :]) print("Index (the first 10 indices)") - print(np.asarray(cubic_id)[:10,:]) + print(np.asarray(cubic_id)[:10, :]) pcd_curr = pcd_curr_down diff --git a/examples/Python/Advanced/fast_global_registration.py b/examples/Python/Advanced/fast_global_registration.py index 2a0150e4b6f..54a0c065f60 100644 --- a/examples/Python/Advanced/fast_global_registration.py +++ b/examples/Python/Advanced/fast_global_registration.py @@ -4,41 +4,45 @@ # examples/Python/Advanced/fast_global_registration.py -from open3d import * +import open3d as o3d from global_registration import * import numpy as np import copy import time -def execute_fast_global_registration(source_down, target_down, - source_fpfh, target_fpfh, voxel_size): + +def execute_fast_global_registration(source_down, target_down, source_fpfh, + target_fpfh, voxel_size): distance_threshold = voxel_size * 0.5 print(":: Apply fast global registration with distance threshold %.3f" \ % distance_threshold) - result = registration_fast_based_on_feature_matching( - source_down, target_down, source_fpfh, target_fpfh, - FastGlobalRegistrationOption( - maximum_correspondence_distance = distance_threshold)) + result = o3d.registration.registration_fast_based_on_feature_matching( + source_down, target_down, source_fpfh, target_fpfh, + o3d.registration.FastGlobalRegistrationOption( + maximum_correspondence_distance=distance_threshold)) return result + if __name__ == "__main__": - voxel_size = 0.05 # means 5cm for the dataset + voxel_size = 0.05 # means 5cm for the dataset source, target, source_down, target_down, source_fpfh, target_fpfh = \ prepare_dataset(voxel_size) start = time.time() result_ransac = execute_global_registration(source_down, target_down, - source_fpfh, target_fpfh, voxel_size) + source_fpfh, target_fpfh, + voxel_size) print(result_ransac) print("Global registration took %.3f sec.\n" % (time.time() - start)) draw_registration_result(source_down, target_down, - result_ransac.transformation) + result_ransac.transformation) start = time.time() result_fast = execute_fast_global_registration(source_down, target_down, - source_fpfh, target_fpfh, voxel_size) + source_fpfh, target_fpfh, + voxel_size) print("Fast global registration took %.3f sec.\n" % (time.time() - start)) draw_registration_result(source_down, target_down, - result_fast.transformation) + result_fast.transformation) diff --git a/examples/Python/Advanced/global_registration.py b/examples/Python/Advanced/global_registration.py index a81555b9959..a4457909d1a 100644 --- a/examples/Python/Advanced/global_registration.py +++ b/examples/Python/Advanced/global_registration.py @@ -4,41 +4,44 @@ # examples/Python/Advanced/global_registration.py -from open3d import * +import open3d as o3d import numpy as np import copy + def draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) - draw_geometries([source_temp, target_temp]) + o3d.visualization.draw_geometries([source_temp, target_temp]) + def preprocess_point_cloud(pcd, voxel_size): print(":: Downsample with a voxel size %.3f." % voxel_size) - pcd_down = voxel_down_sample(pcd, voxel_size) + pcd_down = o3d.geometry.voxel_down_sample(pcd, voxel_size) radius_normal = voxel_size * 2 print(":: Estimate normal with search radius %.3f." % radius_normal) - estimate_normals(pcd_down, KDTreeSearchParamHybrid( - radius = radius_normal, max_nn = 30)) + o3d.geometry.estimate_normals( + pcd_down, + o3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal, max_nn=30)) radius_feature = voxel_size * 5 print(":: Compute FPFH feature with search radius %.3f." % radius_feature) - pcd_fpfh = compute_fpfh_feature(pcd_down, - KDTreeSearchParamHybrid(radius = radius_feature, max_nn = 100)) + pcd_fpfh = o3d.registration.compute_fpfh_feature( + pcd_down, + o3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature, max_nn=100)) return pcd_down, pcd_fpfh + def prepare_dataset(voxel_size): print(":: Load two point clouds and disturb initial pose.") - source = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") - target = read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd") - trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], - [1.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0], - [0.0, 0.0, 0.0, 1.0]]) + source = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") + target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd") + trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) source.transform(trans_init) draw_registration_result(source, target, np.identity(4)) @@ -46,43 +49,47 @@ def prepare_dataset(voxel_size): target_down, target_fpfh = preprocess_point_cloud(target, voxel_size) return source, target, source_down, target_down, source_fpfh, target_fpfh -def execute_global_registration( - source_down, target_down, source_fpfh, target_fpfh, voxel_size): + +def execute_global_registration(source_down, target_down, source_fpfh, + target_fpfh, voxel_size): distance_threshold = voxel_size * 1.5 print(":: RANSAC registration on downsampled point clouds.") print(" Since the downsampling voxel size is %.3f," % voxel_size) print(" we use a liberal distance threshold %.3f." % distance_threshold) - result = registration_ransac_based_on_feature_matching( - source_down, target_down, source_fpfh, target_fpfh, - distance_threshold, - TransformationEstimationPointToPoint(False), 4, - [CorrespondenceCheckerBasedOnEdgeLength(0.9), - CorrespondenceCheckerBasedOnDistance(distance_threshold)], - RANSACConvergenceCriteria(4000000, 500)) + result = o3d.registration.registration_ransac_based_on_feature_matching( + source_down, target_down, source_fpfh, target_fpfh, distance_threshold, + o3d.registration.TransformationEstimationPointToPoint(False), 4, [ + o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), + o3d.registration.CorrespondenceCheckerBasedOnDistance( + distance_threshold) + ], o3d.registration.RANSACConvergenceCriteria(4000000, 500)) return result + def refine_registration(source, target, source_fpfh, target_fpfh, voxel_size): distance_threshold = voxel_size * 0.4 print(":: Point-to-plane ICP registration is applied on original point") print(" clouds to refine the alignment. This time we use a strict") print(" distance threshold %.3f." % distance_threshold) - result = registration_icp(source, target, distance_threshold, - result_ransac.transformation, - TransformationEstimationPointToPlane()) + result = o3d.registration.registration_icp( + source, target, distance_threshold, result_ransac.transformation, + o3d.registration.TransformationEstimationPointToPlane()) return result + if __name__ == "__main__": - voxel_size = 0.05 # means 5cm for the dataset + voxel_size = 0.05 # means 5cm for the dataset source, target, source_down, target_down, source_fpfh, target_fpfh = \ prepare_dataset(voxel_size) result_ransac = execute_global_registration(source_down, target_down, - source_fpfh, target_fpfh, voxel_size) + source_fpfh, target_fpfh, + voxel_size) print(result_ransac) draw_registration_result(source_down, target_down, - result_ransac.transformation) + result_ransac.transformation) - result_icp = refine_registration(source, target, - source_fpfh, target_fpfh, voxel_size) + result_icp = refine_registration(source, target, source_fpfh, target_fpfh, + voxel_size) print(result_icp) draw_registration_result(source, target, result_icp.transformation) diff --git a/examples/Python/Advanced/headless_rendering.py b/examples/Python/Advanced/headless_rendering.py index 89cca7ea8d0..0ee8a4b3a3a 100644 --- a/examples/Python/Advanced/headless_rendering.py +++ b/examples/Python/Advanced/headless_rendering.py @@ -5,22 +5,25 @@ # examples/Python/Advanced/headless_rendering.py import os -from open3d import * +import open3d as o3d import numpy as np import matplotlib.pyplot as plt + def custom_draw_geometry_with_camera_trajectory(pcd): custom_draw_geometry_with_camera_trajectory.index = -1 custom_draw_geometry_with_camera_trajectory.trajectory =\ - read_pinhole_camera_trajectory( + o3d.io.read_pinhole_camera_trajectory( "../../TestData/camera_trajectory.json") - custom_draw_geometry_with_camera_trajectory.vis = Visualizer() + custom_draw_geometry_with_camera_trajectory.vis = o3d.visualization.Visualizer( + ) if not os.path.exists("../../TestData/image/"): os.makedirs("../../TestData/image/") if not os.path.exists("../../TestData/depth/"): os.makedirs("../../TestData/depth/") + def move_forward(vis): - # This function is called within the Visualizer::run() loop + # This function is called within the o3d.visualization.Visualizer::run() loop # The run loop calls the function, then re-render # So the sequence in this function is to: # 1. Capture frame @@ -41,11 +44,13 @@ def move_forward(vis): #vis.capture_screen_image("image/{:05d}.png".format(glb.index), False) glb.index = glb.index + 1 if glb.index < len(glb.trajectory.parameters): - ctr.convert_from_pinhole_camera_parameters(glb.trajectory.parameters[glb.index]) + ctr.convert_from_pinhole_camera_parameters( + glb.trajectory.parameters[glb.index]) else: custom_draw_geometry_with_camera_trajectory.vis.\ register_animation_callback(None) return False + vis = custom_draw_geometry_with_camera_trajectory.vis vis.create_window() vis.add_geometry(pcd) @@ -54,8 +59,11 @@ def move_forward(vis): vis.run() vis.destroy_window() + if __name__ == "__main__": - pcd = read_point_cloud("../../TestData/fragment.ply") + pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") - print("Customized visualization playing a camera trajectory. Ctrl+z to terminate") + print( + "Customized visualization playing a camera trajectory. Ctrl+z to terminate" + ) custom_draw_geometry_with_camera_trajectory(pcd) diff --git a/examples/Python/Advanced/interactive_visualization.py b/examples/Python/Advanced/interactive_visualization.py index 42820ee0962..782a24b3730 100644 --- a/examples/Python/Advanced/interactive_visualization.py +++ b/examples/Python/Advanced/interactive_visualization.py @@ -6,18 +6,22 @@ import numpy as np import copy -from open3d import * +import open3d as o3d + def demo_crop_geometry(): print("Demo for manual geometry cropping") - print("1) Press 'Y' twice to align geometry with negative direction of y-axis") + print( + "1) Press 'Y' twice to align geometry with negative direction of y-axis" + ) print("2) Press 'K' to lock screen and to switch to selection mode") print("3) Drag for rectangle selection,") print(" or use ctrl + left click for polygon selection") print("4) Press 'C' to get a selected geometry and to save it") print("5) Press 'F' to switch to freeview mode") - pcd = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") - draw_geometries_with_editing([pcd]) + pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") + o3d.visualization.draw_geometries_with_editing([pcd]) + def draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source) @@ -25,51 +29,57 @@ def draw_registration_result(source, target, transformation): source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) - draw_geometries([source_temp, target_temp]) + o3d.visualization.draw_geometries([source_temp, target_temp]) + def pick_points(pcd): print("") - print("1) Please pick at least three correspondences using [shift + left click]") + print( + "1) Please pick at least three correspondences using [shift + left click]" + ) print(" Press [shift + right click] to undo point picking") print("2) Afther picking points, press q for close the window") - vis = VisualizerWithEditing() + vis = o3d.visualization.VisualizerWithEditing() vis.create_window() vis.add_geometry(pcd) - vis.run() # user picks points + vis.run() # user picks points vis.destroy_window() print("") return vis.get_picked_points() + def demo_manual_registration(): print("Demo for manual ICP") - source = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") - target = read_point_cloud("../../TestData/ICP/cloud_bin_2.pcd") + source = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") + target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_2.pcd") print("Visualization of two point clouds before manual alignment") draw_registration_result(source, target, np.identity(4)) # pick points from two point clouds and builds correspondences picked_id_source = pick_points(source) picked_id_target = pick_points(target) - assert(len(picked_id_source)>=3 and len(picked_id_target)>=3) - assert(len(picked_id_source) == len(picked_id_target)) - corr = np.zeros((len(picked_id_source),2)) - corr[:,0] = picked_id_source - corr[:,1] = picked_id_target + assert (len(picked_id_source) >= 3 and len(picked_id_target) >= 3) + assert (len(picked_id_source) == len(picked_id_target)) + corr = np.zeros((len(picked_id_source), 2)) + corr[:, 0] = picked_id_source + corr[:, 1] = picked_id_target # estimate rough transformation using correspondences print("Compute a rough transform using the correspondences given by user") - p2p = TransformationEstimationPointToPoint() + p2p = o3d.registration.TransformationEstimationPointToPoint() trans_init = p2p.compute_transformation(source, target, - Vector2iVector(corr)) + o3d.utility.Vector2iVector(corr)) # point-to-point ICP for refinement print("Perform point-to-point ICP refinement") - threshold = 0.03 # 3cm distance threshold - reg_p2p = registration_icp(source, target, threshold, trans_init, - TransformationEstimationPointToPoint()) + threshold = 0.03 # 3cm distance threshold + reg_p2p = o3d.registration.registration_icp( + source, target, threshold, trans_init, + o3d.registration.TransformationEstimationPointToPoint()) draw_registration_result(source, target, reg_p2p.transformation) print("") + if __name__ == "__main__": demo_crop_geometry() demo_manual_registration() diff --git a/examples/Python/Advanced/load_save_viewpoint.py b/examples/Python/Advanced/load_save_viewpoint.py index 7d8cdb8fa77..31d9797a8ff 100644 --- a/examples/Python/Advanced/load_save_viewpoint.py +++ b/examples/Python/Advanced/load_save_viewpoint.py @@ -5,24 +5,24 @@ # examples/Python/Advanced/camera_trajectory.py import numpy as np -from open3d import * +import open3d as o3d def save_view_point(pcd, filename): - vis = Visualizer() + vis = o3d.visualization.Visualizer() vis.create_window() vis.add_geometry(pcd) - vis.run() # user changes the view and press "q" to terminate + vis.run() # user changes the view and press "q" to terminate param = vis.get_view_control().convert_to_pinhole_camera_parameters() - write_pinhole_camera_parameters(filename, param) + o3d.io.write_pinhole_camera_parameters(filename, param) vis.destroy_window() def load_view_point(pcd, filename): - vis = Visualizer() + vis = o3d.visualization.Visualizer() vis.create_window() ctr = vis.get_view_control() - param = read_pinhole_camera_parameters(filename) + param = o3d.io.read_pinhole_camera_parameters(filename) vis.add_geometry(pcd) ctr.convert_from_pinhole_camera_parameters(param) vis.run() @@ -30,6 +30,6 @@ def load_view_point(pcd, filename): if __name__ == "__main__": - pcd = read_point_cloud("../../TestData/fragment.pcd") + pcd = o3d.io.read_point_cloud("../../TestData/fragment.pcd") save_view_point(pcd, "viewpoint.json") load_view_point(pcd, "viewpoint.json") diff --git a/examples/Python/Advanced/multiway_registration.py b/examples/Python/Advanced/multiway_registration.py index 5368f1a90a9..481707c2ce8 100644 --- a/examples/Python/Advanced/multiway_registration.py +++ b/examples/Python/Advanced/multiway_registration.py @@ -4,7 +4,7 @@ # examples/Python/Advanced/multiway_registration.py -from open3d import * +import open3d as o3d import numpy as np voxel_size = 0.02 @@ -12,84 +12,95 @@ max_correspondence_distance_fine = voxel_size * 1.5 -def load_point_clouds(voxel_size = 0.0): +def load_point_clouds(voxel_size=0.0): pcds = [] for i in range(3): - pcd = read_point_cloud("../../TestData/ICP/cloud_bin_%d.pcd" % i) - pcd_down = voxel_down_sample(pcd, voxel_size = voxel_size) + pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_%d.pcd" % i) + pcd_down = o3d.geometry.voxel_down_sample(pcd, voxel_size=voxel_size) pcds.append(pcd_down) return pcds def pairwise_registration(source, target): print("Apply point-to-plane ICP") - icp_coarse = registration_icp(source, target, - max_correspondence_distance_coarse, np.identity(4), - TransformationEstimationPointToPlane()) - icp_fine = registration_icp(source, target, - max_correspondence_distance_fine, icp_coarse.transformation, - TransformationEstimationPointToPlane()) + icp_coarse = o3d.registration.registration_icp( + source, target, max_correspondence_distance_coarse, np.identity(4), + o3d.registration.TransformationEstimationPointToPlane()) + icp_fine = o3d.registration.registration_icp( + source, target, max_correspondence_distance_fine, + icp_coarse.transformation, + o3d.registration.TransformationEstimationPointToPlane()) transformation_icp = icp_fine.transformation - information_icp = get_information_matrix_from_point_clouds( - source, target, max_correspondence_distance_fine, - icp_fine.transformation) + information_icp = o3d.registration.get_information_matrix_from_point_clouds( + source, target, max_correspondence_distance_fine, + icp_fine.transformation) return transformation_icp, information_icp -def full_registration(pcds, - max_correspondence_distance_coarse, max_correspondence_distance_fine): - pose_graph = PoseGraph() +def full_registration(pcds, max_correspondence_distance_coarse, + max_correspondence_distance_fine): + pose_graph = o3d.registration.PoseGraph() odometry = np.identity(4) - pose_graph.nodes.append(PoseGraphNode(odometry)) + pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry)) n_pcds = len(pcds) for source_id in range(n_pcds): for target_id in range(source_id + 1, n_pcds): transformation_icp, information_icp = pairwise_registration( - pcds[source_id], pcds[target_id]) - print("Build PoseGraph") - if target_id == source_id + 1: # odometry case + pcds[source_id], pcds[target_id]) + print("Build o3d.registration.PoseGraph") + if target_id == source_id + 1: # odometry case odometry = np.dot(transformation_icp, odometry) - pose_graph.nodes.append(PoseGraphNode(np.linalg.inv(odometry))) - pose_graph.edges.append(PoseGraphEdge(source_id, target_id, - transformation_icp, information_icp, uncertain = False)) - else: # loop closure case - pose_graph.edges.append(PoseGraphEdge(source_id, target_id, - transformation_icp, information_icp, uncertain = True)) + pose_graph.nodes.append( + o3d.registration.PoseGraphNode(np.linalg.inv(odometry))) + pose_graph.edges.append( + o3d.registration.PoseGraphEdge(source_id, + target_id, + transformation_icp, + information_icp, + uncertain=False)) + else: # loop closure case + pose_graph.edges.append( + o3d.registration.PoseGraphEdge(source_id, + target_id, + transformation_icp, + information_icp, + uncertain=True)) return pose_graph if __name__ == "__main__": - set_verbosity_level(VerbosityLevel.Debug) + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) pcds_down = load_point_clouds(voxel_size) - draw_geometries(pcds_down) + o3d.visualization.draw_geometries(pcds_down) print("Full registration ...") pose_graph = full_registration(pcds_down, - max_correspondence_distance_coarse, - max_correspondence_distance_fine) + max_correspondence_distance_coarse, + max_correspondence_distance_fine) print("Optimizing PoseGraph ...") - option = GlobalOptimizationOption( - max_correspondence_distance = max_correspondence_distance_fine, - edge_prune_threshold = 0.25, - reference_node = 0) - global_optimization(pose_graph, - GlobalOptimizationLevenbergMarquardt(), - GlobalOptimizationConvergenceCriteria(), option) + option = o3d.registration.GlobalOptimizationOption( + max_correspondence_distance=max_correspondence_distance_fine, + edge_prune_threshold=0.25, + reference_node=0) + o3d.registration.global_optimization( + pose_graph, o3d.registration.GlobalOptimizationLevenbergMarquardt(), + o3d.registration.GlobalOptimizationConvergenceCriteria(), option) print("Transform points and display") for point_id in range(len(pcds_down)): print(pose_graph.nodes[point_id].pose) pcds_down[point_id].transform(pose_graph.nodes[point_id].pose) - draw_geometries(pcds_down) + o3d.visualization.draw_geometries(pcds_down) print("Make a combined point cloud") pcds = load_point_clouds(voxel_size) - pcd_combined = PointCloud() + pcd_combined = o3d.geometry.PointCloud() for point_id in range(len(pcds)): pcds[point_id].transform(pose_graph.nodes[point_id].pose) pcd_combined += pcds[point_id] - pcd_combined_down = voxel_down_sample(pcd_combined, voxel_size = voxel_size) - write_point_cloud("multiway_registration.pcd", pcd_combined_down) - draw_geometries([pcd_combined_down]) + pcd_combined_down = o3d.geometry.voxel_down_sample(pcd_combined, + voxel_size=voxel_size) + o3d.io.write_point_cloud("multiway_registration.pcd", pcd_combined_down) + o3d.visualization.draw_geometries([pcd_combined_down]) diff --git a/examples/Python/Advanced/non_blocking_visualization.py b/examples/Python/Advanced/non_blocking_visualization.py index fc2b62cf259..dd2ca4e77dd 100644 --- a/examples/Python/Advanced/non_blocking_visualization.py +++ b/examples/Python/Advanced/non_blocking_visualization.py @@ -4,30 +4,25 @@ # examples/Python/Advanced/non_blocking_visualization.py -from open3d import * +import open3d as o3d import numpy as np import copy if __name__ == "__main__": - set_verbosity_level(VerbosityLevel.Debug) - source_raw = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") - target_raw = read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd") - source = voxel_down_sample(source_raw, voxel_size = 0.02) - target = voxel_down_sample(target_raw, voxel_size = 0.02) - trans = [[0.862, 0.011, -0.507, 0.0], - [-0.139, 0.967, -0.215, 0.7], - [0.487, 0.255, 0.835, -1.4], - [0.0, 0.0, 0.0, 1.0]] + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + source_raw = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") + target_raw = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd") + source = o3d.geometry.voxel_down_sample(source_raw, voxel_size=0.02) + target = o3d.geometry.voxel_down_sample(target_raw, voxel_size=0.02) + trans = [[0.862, 0.011, -0.507, 0.0], [-0.139, 0.967, -0.215, 0.7], + [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]] source.transform(trans) - flip_transform = [[1, 0, 0, 0], - [0, -1, 0, 0], - [0, 0, -1, 0], - [0, 0, 0, 1]] + flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] source.transform(flip_transform) target.transform(flip_transform) - vis = Visualizer() + vis = o3d.visualization.Visualizer() vis.create_window() vis.add_geometry(source) vis.add_geometry(target) @@ -36,9 +31,10 @@ save_image = False for i in range(icp_iteration): - reg_p2l = registration_icp(source, target, threshold, - np.identity(4), TransformationEstimationPointToPlane(), - ICPConvergenceCriteria(max_iteration = 1)) + reg_p2l = o3d.registration.registration_icp( + source, target, threshold, np.identity(4), + o3d.registration.TransformationEstimationPointToPlane(), + o3d.registration.ICPConvergenceCriteria(max_iteration=1)) source.transform(reg_p2l.transformation) vis.update_geometry() vis.poll_events() diff --git a/examples/Python/Advanced/pointcloud_outlier_removal.py b/examples/Python/Advanced/pointcloud_outlier_removal.py index d03a0b735c7..93e565fa369 100644 --- a/examples/Python/Advanced/pointcloud_outlier_removal.py +++ b/examples/Python/Advanced/pointcloud_outlier_removal.py @@ -4,38 +4,41 @@ # examples/Python/Advanced/outlier_removal.py -from open3d import * +import open3d as o3d + def display_inlier_outlier(cloud, ind): - inlier_cloud = select_down_sample(cloud, ind) - outlier_cloud = select_down_sample(cloud, ind, invert=True) + inlier_cloud = o3d.geometry.select_down_sample(cloud, ind) + outlier_cloud = o3d.geometry.select_down_sample(cloud, ind, invert=True) print("Showing outliers (red) and inliers (gray): ") outlier_cloud.paint_uniform_color([1, 0, 0]) inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8]) - draw_geometries([inlier_cloud, outlier_cloud]) + o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud]) if __name__ == "__main__": print("Load a ply point cloud, print it, and render it") - pcd = read_point_cloud("../../TestData/ICP/cloud_bin_2.pcd") - draw_geometries([pcd]) + pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_2.pcd") + o3d.visualization.draw_geometries([pcd]) print("Downsample the point cloud with a voxel of 0.02") - voxel_down_pcd = voxel_down_sample(pcd, voxel_size = 0.02) - draw_geometries([voxel_down_pcd]) + voxel_down_pcd = o3d.geometry.voxel_down_sample(pcd, voxel_size=0.02) + o3d.visualization.draw_geometries([voxel_down_pcd]) print("Every 5th points are selected") - uni_down_pcd = uniform_down_sample(pcd, every_k_points = 5) - draw_geometries([uni_down_pcd]) + uni_down_pcd = o3d.geometry.uniform_down_sample(pcd, every_k_points=5) + o3d.visualization.draw_geometries([uni_down_pcd]) print("Statistical oulier removal") - cl,ind = statistical_outlier_removal(voxel_down_pcd, - nb_neighbors=20, std_ratio=2.0) + cl, ind = o3d.geometry.statistical_outlier_removal(voxel_down_pcd, + nb_neighbors=20, + std_ratio=2.0) display_inlier_outlier(voxel_down_pcd, ind) print("Radius oulier removal") - cl,ind = radius_outlier_removal(voxel_down_pcd, - nb_points=16, radius=0.05) + cl, ind = o3d.geometry.radius_outlier_removal(voxel_down_pcd, + nb_points=16, + radius=0.05) display_inlier_outlier(voxel_down_pcd, ind) diff --git a/examples/Python/Advanced/remove_geometry.py b/examples/Python/Advanced/remove_geometry.py index 48c69d095c2..04c9c3e8a2e 100644 --- a/examples/Python/Advanced/remove_geometry.py +++ b/examples/Python/Advanced/remove_geometry.py @@ -4,18 +4,19 @@ # examples/Python/Advanced/remove_geometry.py -from open3d import * +import open3d as o3d import numpy as np import time import copy + def visualize_non_blocking(vis): vis.update_geometry() vis.poll_events() vis.update_renderer() -pcd_orig = read_point_cloud("../../TestData/fragment.pcd") +pcd_orig = o3d.io.read_point_cloud("../../TestData/fragment.pcd") flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] pcd_orig.transform(flip_transform) n_pcd = 5 @@ -23,10 +24,10 @@ def visualize_non_blocking(vis): for i in range(n_pcd): pcds.append(copy.deepcopy(pcd_orig)) trans = np.identity(4) - trans[:3,3] = [3*i,0,0] + trans[:3, 3] = [3 * i, 0, 0] pcds[i].transform(trans) -vis = Visualizer() +vis = o3d.visualization.Visualizer() vis.create_window() start_time = time.time() added = [False] * n_pcd @@ -48,6 +49,5 @@ def visualize_non_blocking(vis): vis.remove_geometry(pcds[i]) added[i] = False print("Removing %d" % i) - - visualize_non_blocking(vis) + visualize_non_blocking(vis) diff --git a/examples/Python/Advanced/rgbd_integration.py b/examples/Python/Advanced/rgbd_integration.py index 0ffac706d4e..0f4812e8eca 100644 --- a/examples/Python/Advanced/rgbd_integration.py +++ b/examples/Python/Advanced/rgbd_integration.py @@ -4,26 +4,32 @@ # examples/Python/Advanced/rgbd_integration.py -from open3d import * +import open3d as o3d from trajectory_io import * import numpy as np if __name__ == "__main__": camera_poses = read_trajectory("../../TestData/RGBD/odometry.log") - volume = ScalableTSDFVolume(voxel_length = 4.0 / 512.0, - sdf_trunc = 0.04, color_type = TSDFVolumeColorType.RGB8) + volume = o3d.integration.ScalableTSDFVolume( + voxel_length=4.0 / 512.0, + sdf_trunc=0.04, + color_type=o3d.integration.TSDFVolumeColorType.RGB8) for i in range(len(camera_poses)): print("Integrate {:d}-th image into the volume.".format(i)) - color = read_image("../../TestData/RGBD/color/{:05d}.jpg".format(i)) - depth = read_image("../../TestData/RGBD/depth/{:05d}.png".format(i)) - rgbd = create_rgbd_image_from_color_and_depth(color, depth, - depth_trunc = 4.0, convert_rgb_to_intensity = False) - volume.integrate(rgbd, PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault), - np.linalg.inv(camera_poses[i].pose)) + color = o3d.io.read_image( + "../../TestData/RGBD/color/{:05d}.jpg".format(i)) + depth = o3d.io.read_image( + "../../TestData/RGBD/depth/{:05d}.png".format(i)) + rgbd = o3d.geometry.create_rgbd_image_from_color_and_depth( + color, depth, depth_trunc=4.0, convert_rgb_to_intensity=False) + volume.integrate( + rgbd, + o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault), + np.linalg.inv(camera_poses[i].pose)) print("Extract a triangle mesh from the volume and visualize it.") mesh = volume.extract_triangle_mesh() mesh.compute_vertex_normals() - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) diff --git a/examples/Python/Advanced/trajectory_io.py b/examples/Python/Advanced/trajectory_io.py index f213503e65c..3fc7bedb273 100644 --- a/examples/Python/Advanced/trajectory_io.py +++ b/examples/Python/Advanced/trajectory_io.py @@ -6,32 +6,38 @@ import numpy as np + class CameraPose: + def __init__(self, meta, mat): self.metadata = meta self.pose = mat + def __str__(self): return 'Metadata : ' + ' '.join(map(str, self.metadata)) + '\n' + \ "Pose : " + "\n" + np.array_str(self.pose) + def read_trajectory(filename): traj = [] with open(filename, 'r') as f: - metastr = f.readline(); + metastr = f.readline() while metastr: metadata = list(map(int, metastr.split())) - mat = np.zeros(shape = (4, 4)) + mat = np.zeros(shape=(4, 4)) for i in range(4): - matstr = f.readline(); - mat[i, :] = np.fromstring(matstr, dtype = float, sep=' \t') + matstr = f.readline() + mat[i, :] = np.fromstring(matstr, dtype=float, sep=' \t') traj.append(CameraPose(metadata, mat)) metastr = f.readline() return traj + def write_trajectory(traj, filename): with open(filename, 'w') as f: for x in traj: p = x.pose.tolist() f.write(' '.join(map(str, x.metadata)) + '\n') - f.write('\n'.join(' '.join(map('{0:.12f}'.format, p[i])) for i in range(4))) + f.write('\n'.join( + ' '.join(map('{0:.12f}'.format, p[i])) for i in range(4))) f.write('\n') diff --git a/examples/Python/Basic/__init__.py b/examples/Python/Basic/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/Basic/convex_hull.py b/examples/Python/Basic/convex_hull.py index b06bbe0dee6..20275133d60 100644 --- a/examples/Python/Basic/convex_hull.py +++ b/examples/Python/Basic/convex_hull.py @@ -2,7 +2,7 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -# examples/Python/Basic/mesh_sampling.py +# examples/Python/Basic/convex_hull.py import numpy as np import os @@ -11,15 +11,19 @@ import tarfile import shutil import time -from open3d import * +import open3d as o3d + def create_mesh_plane(): - mesh = TriangleMesh() - mesh.vertices = Vector3dVector( - np.array([[0,0,0], [0,0.2,0], [1,0.2,0], [1,0,0]], dtype=np.float32)) - mesh.triangles = Vector3iVector(np.array([[0,2,1], [2,0,3]])) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector( + np.array([[0, 0, 0], [0, 0.2, 0], [1, 0.2, 0], [1, 0, 0]], + dtype=np.float32)) + mesh.triangles = o3d.utility.Vector3iVector(np.array([[0, 2, 1], [2, 0, + 3]])) return mesh + def armadillo_mesh(): armadillo_path = '../../TestData/Armadillo.ply' if not os.path.exists(armadillo_path): @@ -31,7 +35,8 @@ def armadillo_mesh(): with open(armadillo_path, 'wb') as fout: shutil.copyfileobj(fin, fout) os.remove(armadillo_path + '.gz') - return read_triangle_mesh(armadillo_path) + return o3d.io.read_triangle_mesh(armadillo_path) + def bunny_mesh(): bunny_path = '../../TestData/Bunny.ply' @@ -43,31 +48,32 @@ def bunny_mesh(): with tarfile.open(bunny_path + '.tar.gz') as tar: tar.extractall(path=os.path.dirname(bunny_path)) shutil.move( - os.path.join(os.path.dirname(bunny_path), - 'bunny', 'reconstruction', 'bun_zipper.ply'), - bunny_path) + os.path.join(os.path.dirname(bunny_path), 'bunny', 'reconstruction', + 'bun_zipper.ply'), bunny_path) os.remove(bunny_path + '.tar.gz') shutil.rmtree(os.path.join(os.path.dirname(bunny_path), 'bunny')) - return read_triangle_mesh(bunny_path) + return o3d.io.read_triangle_mesh(bunny_path) + def mesh_generator(): - yield create_mesh_box() - yield create_mesh_sphere() - yield read_triangle_mesh('../../TestData/knot.ply') + yield o3d.geometry.create_mesh_box() + yield o3d.geometry.create_mesh_sphere() + yield o3d.io.read_triangle_mesh('../../TestData/knot.ply') yield bunny_mesh() yield armadillo_mesh() + if __name__ == "__main__": for mesh in mesh_generator(): mesh.compute_vertex_normals() - hull = compute_mesh_convex_hull(mesh) - hull_ls = create_line_set_from_triangle_mesh(hull) - hull_ls.paint_uniform_color((1,0,0)) - draw_geometries([mesh, hull_ls]) - - pcl = sample_points_poisson_disk(mesh, number_of_points=2000) - hull = compute_point_cloud_convex_hull(pcl) - hull_ls = create_line_set_from_triangle_mesh(hull) - hull_ls.paint_uniform_color((1,0,0)) - draw_geometries([pcl, hull_ls]) + hull = o3d.geometry.compute_mesh_convex_hull(mesh) + hull_ls = o3d.geometry.create_line_set_from_triangle_mesh(hull) + hull_ls.paint_uniform_color((1, 0, 0)) + o3d.visualization.draw_geometries([mesh, hull_ls]) + pcl = o3d.geometry.sample_points_poisson_disk(mesh, + number_of_points=2000) + hull = o3d.geometry.compute_point_cloud_convex_hull(pcl) + hull_ls = o3d.geometry.create_line_set_from_triangle_mesh(hull) + hull_ls.paint_uniform_color((1, 0, 0)) + o3d.visualization.draw_geometries([pcl, hull_ls]) diff --git a/examples/Python/Basic/file_io.py b/examples/Python/Basic/file_io.py index 6aa0246ab39..142f8c77394 100644 --- a/examples/Python/Basic/file_io.py +++ b/examples/Python/Basic/file_io.py @@ -4,21 +4,21 @@ # examples/Python/Basic/file_io.py -from open3d import * +import open3d as o3d if __name__ == "__main__": print("Testing IO for point cloud ...") - pcd = read_point_cloud("../../TestData/fragment.pcd") + pcd = o3d.io.read_point_cloud("../../TestData/fragment.pcd") print(pcd) - write_point_cloud("copy_of_fragment.pcd", pcd) + o3d.io.write_point_cloud("copy_of_fragment.pcd", pcd) print("Testing IO for meshes ...") - mesh = read_triangle_mesh("../../TestData/knot.ply") + mesh = o3d.io.read_triangle_mesh("../../TestData/knot.ply") print(mesh) - write_triangle_mesh("copy_of_knot.ply", mesh) + o3d.io.write_triangle_mesh("copy_of_knot.ply", mesh) print("Testing IO for images ...") - img = read_image("../../TestData/lena_color.jpg") + img = o3d.io.read_image("../../TestData/lena_color.jpg") print(img) - write_image("copy_of_lena_color.jpg", img) + o3d.io.write_image("copy_of_lena_color.jpg", img) diff --git a/examples/Python/Basic/half_edge_mesh.py b/examples/Python/Basic/half_edge_mesh.py index 547eb85cb1a..ccc1b007cc4 100644 --- a/examples/Python/Basic/half_edge_mesh.py +++ b/examples/Python/Basic/half_edge_mesh.py @@ -9,7 +9,7 @@ def draw_geometries_with_back_face(geometries): - visualizer = o3d.Visualizer() + visualizer = o3d.visualization.Visualizer() visualizer.create_window() render_option = visualizer.get_render_option() render_option.mesh_show_back_face = True @@ -20,10 +20,10 @@ def draw_geometries_with_back_face(geometries): if __name__ == "__main__": - mesh = o3d.read_triangle_mesh("../../TestData/sphere.ply") - mesh = o3d.crop_triangle_mesh(mesh, [-1, -1, -1], [1, 0.6, 1]) + mesh = o3d.io.read_triangle_mesh("../../TestData/sphere.ply") + mesh = o3d.geometry.crop_triangle_mesh(mesh, [-1, -1, -1], [1, 0.6, 1]) mesh.purge() - mesh = o3d.create_half_edge_mesh_from_mesh(mesh) + mesh = o3d.geometry.create_half_edge_mesh_from_mesh(mesh) mesh.compute_vertex_normals() num_vertices = len(mesh.vertices) draw_geometries_with_back_face([mesh]) @@ -36,5 +36,5 @@ def draw_geometries_with_back_face(geometries): # Colorize boundary vertices vertex_colors = 0.75 * np.ones((num_vertices, 3)) vertex_colors[boundary_vertices, :] = np.array([1, 0, 0]) - mesh.vertex_colors = o3d.Vector3dVector(vertex_colors) + mesh.vertex_colors = o3d.utility.Vector3dVector(vertex_colors) draw_geometries_with_back_face([mesh]) diff --git a/examples/Python/Basic/icp_registration.py b/examples/Python/Basic/icp_registration.py index 26888f97053..0382a01ddfc 100644 --- a/examples/Python/Basic/icp_registration.py +++ b/examples/Python/Basic/icp_registration.py @@ -4,36 +4,37 @@ # examples/Python/Basic/icp_registration.py -from open3d import * +import open3d as o3d import numpy as np import copy + def draw_registration_result(source, target, transformation): source_temp = copy.deepcopy(source) target_temp = copy.deepcopy(target) source_temp.paint_uniform_color([1, 0.706, 0]) target_temp.paint_uniform_color([0, 0.651, 0.929]) source_temp.transform(transformation) - draw_geometries([source_temp, target_temp]) + o3d.visualization.draw_geometries([source_temp, target_temp]) + if __name__ == "__main__": - source = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") - target = read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd") + source = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") + target = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_1.pcd") threshold = 0.02 - trans_init = np.asarray( - [[0.862, 0.011, -0.507, 0.5], - [-0.139, 0.967, -0.215, 0.7], - [0.487, 0.255, 0.835, -1.4], - [0.0, 0.0, 0.0, 1.0]]) + trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5], + [-0.139, 0.967, -0.215, 0.7], + [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]]) draw_registration_result(source, target, trans_init) print("Initial alignment") - evaluation = evaluate_registration(source, target, - threshold, trans_init) + evaluation = o3d.registration.evaluate_registration(source, target, + threshold, trans_init) print(evaluation) print("Apply point-to-point ICP") - reg_p2p = registration_icp(source, target, threshold, trans_init, - TransformationEstimationPointToPoint()) + reg_p2p = o3d.registration.registration_icp( + source, target, threshold, trans_init, + o3d.registration.TransformationEstimationPointToPoint()) print(reg_p2p) print("Transformation is:") print(reg_p2p.transformation) @@ -41,8 +42,9 @@ def draw_registration_result(source, target, transformation): draw_registration_result(source, target, reg_p2p.transformation) print("Apply point-to-plane ICP") - reg_p2l = registration_icp(source, target, threshold, trans_init, - TransformationEstimationPointToPlane()) + reg_p2l = o3d.registration.registration_icp( + source, target, threshold, trans_init, + o3d.registration.TransformationEstimationPointToPlane()) print(reg_p2l) print("Transformation is:") print(reg_p2l.transformation) diff --git a/examples/Python/Basic/kdtree.py b/examples/Python/Basic/kdtree.py index 62cea6d6009..841070ca231 100644 --- a/examples/Python/Basic/kdtree.py +++ b/examples/Python/Basic/kdtree.py @@ -5,15 +5,16 @@ # examples/Python/Basic/kdtree.py import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": print("Testing kdtree in open3d ...") print("Load a point cloud and paint it gray.") - pcd = read_point_cloud("../../TestData/Feature/cloud_bin_0.pcd") + pcd = o3d.io.read_point_cloud( + "../../TestData/o3d.registration.Feature/cloud_bin_0.pcd") pcd.paint_uniform_color([0.5, 0.5, 0.5]) - pcd_tree = KDTreeFlann(pcd) + pcd_tree = o3d.geometry.KDTreeFlann(pcd) print("Paint the 1500th point red.") pcd.colors[1500] = [1, 0, 0] @@ -27,5 +28,5 @@ np.asarray(pcd.colors)[idx[1:], :] = [0, 1, 0] print("Visualize the point cloud.") - draw_geometries([pcd]) + o3d.visualization.draw_geometries([pcd]) print("") diff --git a/examples/Python/Basic/mesh.py b/examples/Python/Basic/mesh.py index 5b1b6767031..6ec7ecea54e 100644 --- a/examples/Python/Basic/mesh.py +++ b/examples/Python/Basic/mesh.py @@ -6,38 +6,38 @@ import copy import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": print("Testing mesh in open3d ...") - mesh = read_triangle_mesh("../../TestData/knot.ply") + mesh = o3d.io.read_triangle_mesh("../../TestData/knot.ply") print(mesh) print(np.asarray(mesh.vertices)) print(np.asarray(mesh.triangles)) print("") print("Try to render a mesh with normals (exist: " + - str(mesh.has_vertex_normals()) + - ") and colors (exist: " + str(mesh.has_vertex_colors()) + ")") - draw_geometries([mesh]) + str(mesh.has_vertex_normals()) + ") and colors (exist: " + + str(mesh.has_vertex_colors()) + ")") + o3d.visualization.draw_geometries([mesh]) print("A mesh with no normals and no colors does not seem good.") print("Computing normal and rendering it.") mesh.compute_vertex_normals() print(np.asarray(mesh.triangle_normals)) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) print("We make a partial mesh of only the first half triangles.") mesh1 = copy.deepcopy(mesh) - mesh1.triangles = Vector3iVector( - np.asarray(mesh1.triangles)[:len(mesh1.triangles)//2, :]) - mesh1.triangle_normals = Vector3dVector( - np.asarray(mesh1.triangle_normals) - [:len(mesh1.triangle_normals)//2, :]) + mesh1.triangles = o3d.utility.Vector3iVector( + np.asarray(mesh1.triangles)[:len(mesh1.triangles) // 2, :]) + mesh1.triangle_normals = o3d.utility.Vector3dVector( + np.asarray(mesh1.triangle_normals)[:len(mesh1.triangle_normals) // + 2, :]) print(mesh1.triangles) - draw_geometries([mesh1]) + o3d.visualization.draw_geometries([mesh1]) print("Painting the mesh") mesh1.paint_uniform_color([1, 0.706, 0]) - draw_geometries([mesh1]) + o3d.visualization.draw_geometries([mesh1]) diff --git a/examples/Python/Basic/mesh_filter.py b/examples/Python/Basic/mesh_filter.py index 05887d44618..ff000b1a265 100644 --- a/examples/Python/Basic/mesh_filter.py +++ b/examples/Python/Basic/mesh_filter.py @@ -5,36 +5,38 @@ # examples/Python/Basic/mesh_filtering.py import numpy as np -from open3d import * +import open3d as o3d + def test_mesh(noise=0): - mesh = read_triangle_mesh('../../TestData/knot.ply') + mesh = o3d.io.read_triangle_mesh('../../TestData/knot.ply') if noise > 0: vertices = np.asarray(mesh.vertices) vertices += np.random.uniform(0, noise, size=vertices.shape) - mesh.vertices = Vector3dVector(vertices) + mesh.vertices = o3d.utility.Vector3dVector(vertices) mesh.compute_vertex_normals() return mesh + if __name__ == '__main__': mesh = test_mesh() - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) mesh = test_mesh() mesh.filter_sharpen(number_of_iterations=1, strength=1) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) mesh = test_mesh(noise=5) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) mesh.filter_smooth_simple(number_of_iterations=1) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) mesh = test_mesh(noise=5) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) mesh.filter_smooth_laplacian(number_of_iterations=100) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) mesh = test_mesh(noise=5) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) mesh.filter_smooth_taubin(number_of_iterations=100) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) diff --git a/examples/Python/Basic/mesh_properties.py b/examples/Python/Basic/mesh_properties.py index 69c48b227df..6535bde8a30 100644 --- a/examples/Python/Basic/mesh_properties.py +++ b/examples/Python/Basic/mesh_properties.py @@ -7,71 +7,78 @@ import numpy as np import time -from open3d import * +import open3d as o3d + def cat_meshes(mesh0, mesh1): - mesh = TriangleMesh() + mesh = o3d.geometry.TriangleMesh() vertices0 = np.asarray(mesh0.vertices) - vertices = np.vstack((vertices0, - np.asarray(mesh1.vertices))) - mesh.vertices = Vector3dVector(vertices) + vertices = np.vstack((vertices0, np.asarray(mesh1.vertices))) + mesh.vertices = o3d.utility.Vector3dVector(vertices) triangles = np.vstack((np.asarray(mesh0.triangles), np.asarray(mesh1.triangles) + vertices0.shape[0])) - mesh.triangles = Vector3iVector(triangles) + mesh.triangles = o3d.utility.Vector3iVector(triangles) return mesh + def mesh_generator(): - yield 'box', create_mesh_box() - yield 'sphere', create_mesh_sphere() - yield 'cone', create_mesh_cone() - yield 'torus', create_mesh_torus(radial_resolution=30, tubular_resolution=20) - yield 'moebius (twists=1)', create_mesh_moebius(twists=1) - yield 'moebius (twists=2)', create_mesh_moebius(twists=2) - yield 'moebius (twists=3)', create_mesh_moebius(twists=3) - - yield 'knot', read_triangle_mesh('../../TestData/knot.ply') - - verts = np.array([[-1,0,0], [0,1,0], [1,0,0], [0,-1,0], [0,0,1]], dtype=np.float64) - triangles = np.array([[0,1,3], [1,2,3], [1,3,4]]) - mesh = TriangleMesh() - mesh.vertices = Vector3dVector(verts) - mesh.triangles = Vector3iVector(triangles) + yield 'box', o3d.geometry.create_mesh_box() + yield 'sphere', o3d.geometry.create_mesh_sphere() + yield 'cone', o3d.geometry.create_mesh_cone() + yield 'torus', o3d.geometry.create_mesh_torus(radial_resolution=30, + tubular_resolution=20) + yield 'moebius (twists=1)', o3d.geometry.create_mesh_moebius(twists=1) + yield 'moebius (twists=2)', o3d.geometry.create_mesh_moebius(twists=2) + yield 'moebius (twists=3)', o3d.geometry.create_mesh_moebius(twists=3) + + yield 'knot', o3d.io.read_triangle_mesh('../../TestData/knot.ply') + + verts = np.array([[-1, 0, 0], [0, 1, 0], [1, 0, 0], [0, -1, 0], [0, 0, 1]], + dtype=np.float64) + triangles = np.array([[0, 1, 3], [1, 2, 3], [1, 3, 4]]) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector(verts) + mesh.triangles = o3d.utility.Vector3iVector(triangles) yield 'non-manifold edge', mesh - verts = np.array([[-1,0,-1], [1,0,-1], [0,1,-1], [0,0,0], - [-1,0,1], [1,0,1], [0,1,1]], dtype=np.float64) - triangles = np.array([[0,1,2], [0,1,3], [1,2,3], [2,0,3], - [4,5,6], [4,5,3], [5,6,3], [4,6,3]]) - mesh = TriangleMesh() - mesh.vertices = Vector3dVector(verts) - mesh.triangles = Vector3iVector(triangles) + verts = np.array([[-1, 0, -1], [1, 0, -1], [0, 1, -1], [0, 0, 0], + [-1, 0, 1], [1, 0, 1], [0, 1, 1]], + dtype=np.float64) + triangles = np.array([[0, 1, 2], [0, 1, 3], [1, 2, 3], [2, 0, 3], [4, 5, 6], + [4, 5, 3], [5, 6, 3], [4, 6, 3]]) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector(verts) + mesh.triangles = o3d.utility.Vector3iVector(triangles) yield 'non-manifold vertex', mesh - mesh = create_mesh_box() - mesh.triangles = Vector3iVector(np.asarray(mesh.triangles)[:-2]) + mesh = o3d.geometry.create_mesh_box() + mesh.triangles = o3d.utility.Vector3iVector(np.asarray(mesh.triangles)[:-2]) yield 'open box', mesh - mesh0 = create_mesh_box() + mesh0 = o3d.geometry.create_mesh_box() T = np.eye(4) T[:, 3] += (0.5, 0.5, 0.5, 0) - mesh1 = create_mesh_box() + mesh1 = o3d.geometry.create_mesh_box() mesh1.transform(T) mesh = cat_meshes(mesh0, mesh1) yield 'boxes', mesh + def edges_to_lineset(mesh, edges, color): - ls = LineSet() + ls = o3d.geometry.LineSet() ls.points = mesh.vertices ls.lines = edges colors = np.empty((np.asarray(edges).shape[0], 3)) colors[:] = color - ls.colors = Vector3dVector(colors) + ls.colors = o3d.utility.Vector3dVector(colors) return ls def check_properties(name, mesh): + def fmt_bool(b): return 'yes' if b else 'no' + edge_manifold = mesh.is_edge_manifold(allow_boundary_edges=True) edge_manifold_boundary = mesh.is_edge_manifold(allow_boundary_edges=False) vertex_manifold = mesh.is_vertex_manifold() @@ -87,74 +94,83 @@ def fmt_bool(b): print(' orientable: %s' % fmt_bool(orientable)) mesh.compute_vertex_normals() - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) if not edge_manifold: edges = mesh.get_non_manifold_edges(allow_boundary_edges=True) print(' # visualize non-manifold edges (allow_boundary_edges=True)') - draw_geometries([mesh, edges_to_lineset(mesh, edges, (1,0,0))]) + o3d.visualization.draw_geometries( + [mesh, edges_to_lineset(mesh, edges, (1, 0, 0))]) if not edge_manifold_boundary: edges = mesh.get_non_manifold_edges(allow_boundary_edges=False) print(' # visualize non-manifold edges (allow_boundary_edges=False)') - draw_geometries([mesh, edges_to_lineset(mesh, edges, (0,1,0))]) + o3d.visualization.draw_geometries( + [mesh, edges_to_lineset(mesh, edges, (0, 1, 0))]) if not vertex_manifold: verts = np.asarray(mesh.get_non_manifold_vertices()) print(' # visualize non-manifold vertices') - pcl = PointCloud() - pcl.points = Vector3dVector(np.asarray(mesh.vertices)[verts]) - pcl.paint_uniform_color((0,0,1)) - draw_geometries([mesh, pcl]) + pcl = o3d.geometry.PointCloud() + pcl.points = o3d.utility.Vector3dVector( + np.asarray(mesh.vertices)[verts]) + pcl.paint_uniform_color((0, 0, 1)) + o3d.visualization.draw_geometries([mesh, pcl]) if self_intersecting: - intersecting_triangles = np.asarray(mesh.get_self_intersecting_triangles()) + intersecting_triangles = np.asarray( + mesh.get_self_intersecting_triangles()) intersecting_triangles = intersecting_triangles[0:1] intersecting_triangles = np.unique(intersecting_triangles) print(' # visualize self-intersecting triangles') triangles = np.asarray(mesh.triangles)[intersecting_triangles] - edges = [np.vstack((triangles[:,i], triangles[:,j])) for i,j in [(0,1), (1,2), (2,0)]] + edges = [ + np.vstack((triangles[:, i], triangles[:, j])) + for i, j in [(0, 1), (1, 2), (2, 0)] + ] edges = np.hstack(edges).T - edges = Vector2iVector(edges) - draw_geometries([mesh, edges_to_lineset(mesh, edges, (1,1,0))]) + edges = o3d.utility.Vector2iVector(edges) + o3d.visualization.draw_geometries( + [mesh, edges_to_lineset(mesh, edges, (1, 1, 0))]) + if __name__ == "__main__": # test mesh properties - print('#'*80) + print('#' * 80) print('Test mesh properties') - print('#'*80) + print('#' * 80) for name, mesh in mesh_generator(): check_properties(name, mesh) # fix triangle orientation - print('#'*80) + print('#' * 80) print('Fix triangle orientation') - print('#'*80) + print('#' * 80) for name, mesh in mesh_generator(): mesh.compute_vertex_normals() triangles = np.asarray(mesh.triangles) rnd_idx = np.random.rand(*triangles.shape).argsort(axis=1) rnd_idx[0] = (0, 1, 2) triangles = np.take_along_axis(triangles, rnd_idx, axis=1) - mesh.triangles = Vector3iVector(triangles) - draw_geometries([mesh]) + mesh.triangles = o3d.utility.Vector3iVector(triangles) + o3d.visualization.draw_geometries([mesh]) sucess = mesh.orient_triangles() print('%s orientated: %s' % (name, 'yes' if sucess else 'no')) - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) # intersection tests - print('#'*80) + print('#' * 80) print('Intersection tests') - print('#'*80) + print('#' * 80) np.random.seed(30) - bbox = create_mesh_box(20,20,20).translate((-10,-10,-10)) - meshes = [create_mesh_box() for _ in range(20)] - meshes.append(create_mesh_sphere()) - meshes.append(create_mesh_cone()) - meshes.append(create_mesh_torus()) + bbox = o3d.geometry.create_mesh_box(20, 20, 20).translate((-10, -10, -10)) + meshes = [o3d.geometry.create_mesh_box() for _ in range(20)] + meshes.append(o3d.geometry.create_mesh_sphere()) + meshes.append(o3d.geometry.create_mesh_cone()) + meshes.append(o3d.geometry.create_mesh_torus()) dirs = [np.random.uniform(-0.1, 0.1, size=(3,)) for _ in meshes] for mesh in meshes: mesh.compute_vertex_normals() mesh.paint_uniform_color((0.5, 0.5, 0.5)) mesh.translate(np.random.uniform(-7.5, 7.5, size=(3,))) - vis = Visualizer() + vis = o3d.visualization.Visualizer() vis.create_window() for mesh in meshes: vis.add_geometry(mesh) diff --git a/examples/Python/Basic/mesh_sampling.py b/examples/Python/Basic/mesh_sampling.py index 22f42f1fa14..31f33756398 100644 --- a/examples/Python/Basic/mesh_sampling.py +++ b/examples/Python/Basic/mesh_sampling.py @@ -11,15 +11,19 @@ import tarfile import shutil import time -from open3d import * +import open3d as o3d + def create_mesh_plane(): - mesh = TriangleMesh() - mesh.vertices = Vector3dVector( - np.array([[0,0,0], [0,0.2,0], [1,0.2,0], [1,0,0]], dtype=np.float32)) - mesh.triangles = Vector3iVector(np.array([[0,2,1], [2,0,3]])) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector( + np.array([[0, 0, 0], [0, 0.2, 0], [1, 0.2, 0], [1, 0, 0]], + dtype=np.float32)) + mesh.triangles = o3d.utility.Vector3iVector(np.array([[0, 2, 1], [2, 0, + 3]])) return mesh + def armadillo_mesh(): armadillo_path = '../../TestData/Armadillo.ply' if not os.path.exists(armadillo_path): @@ -31,7 +35,8 @@ def armadillo_mesh(): with open(armadillo_path, 'wb') as fout: shutil.copyfileobj(fin, fout) os.remove(armadillo_path + '.gz') - return read_triangle_mesh(armadillo_path) + return o3d.io.read_triangle_mesh(armadillo_path) + def bunny_mesh(): bunny_path = '../../TestData/Bunny.ply' @@ -43,12 +48,12 @@ def bunny_mesh(): with tarfile.open(bunny_path + '.tar.gz') as tar: tar.extractall(path=os.path.dirname(bunny_path)) shutil.move( - os.path.join(os.path.dirname(bunny_path), - 'bunny', 'reconstruction', 'bun_zipper.ply'), - bunny_path) + os.path.join(os.path.dirname(bunny_path), 'bunny', 'reconstruction', + 'bun_zipper.ply'), bunny_path) os.remove(bunny_path + '.tar.gz') shutil.rmtree(os.path.join(os.path.dirname(bunny_path), 'bunny')) - return read_triangle_mesh(bunny_path) + return o3d.io.read_triangle_mesh(bunny_path) + def time_fcn(fcn, *fcn_args, runs=5): times = [] @@ -58,21 +63,25 @@ def time_fcn(fcn, *fcn_args, runs=5): times.append(time.time() - tic) return res, times + def mesh_generator(): yield create_mesh_plane() - yield create_mesh_sphere() + yield o3d.geometry.create_mesh_sphere() yield bunny_mesh() yield armadillo_mesh() + if __name__ == "__main__": plane = create_mesh_plane() - draw_geometries([plane]) + o3d.visualization.draw_geometries([plane]) print('Uniform sampling can yield clusters of points on the surface') - pcd = sample_points_uniformly(plane, number_of_points=500) - draw_geometries([pcd]) + pcd = o3d.geometry.sample_points_uniformly(plane, number_of_points=500) + o3d.visualization.draw_geometries([pcd]) - print('Poisson disk sampling can evenly distributes the points on the surface.') + print( + 'Poisson disk sampling can evenly distributes the points on the surface.' + ) print('The method implements sample elimination.') print('Therefore, the method starts with a sampled point cloud and removes ' 'point to satisfy the sampling criterion.') @@ -80,27 +89,33 @@ def mesh_generator(): print('1) Default via the parameter init_factor: The method first samples ' 'uniformly a point cloud from the mesh with ' 'init_factor x number_of_points and uses this for the elimination') - pcd = sample_points_poisson_disk(plane, number_of_points=500, init_factor=5) - draw_geometries([pcd]) + pcd = o3d.geometry.sample_points_poisson_disk(plane, + number_of_points=500, + init_factor=5) + o3d.visualization.draw_geometries([pcd]) - print('2) one can provide an own point cloud and pass it to the ' - 'sample_points_poisson_disk method. Then this point cloud is used ' - 'for elimination.') + print( + '2) one can provide an own point cloud and pass it to the ' + 'o3d.geometry.sample_points_poisson_disk method. Then this point cloud is used ' + 'for elimination.') print('Initial point cloud') - pcd = sample_points_uniformly(plane, number_of_points=2500) - draw_geometries([pcd]) - pcd = sample_points_poisson_disk(plane, number_of_points=500, pcl=pcd) - draw_geometries([pcd]) + pcd = o3d.geometry.sample_points_uniformly(plane, number_of_points=2500) + o3d.visualization.draw_geometries([pcd]) + pcd = o3d.geometry.sample_points_poisson_disk(plane, + number_of_points=500, + pcl=pcd) + o3d.visualization.draw_geometries([pcd]) print('Timings') for mesh in mesh_generator(): mesh.compute_vertex_normals() - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) - pcd, times = time_fcn(sample_points_uniformly, mesh, 500) + pcd, times = time_fcn(o3d.geometry.sample_points_uniformly, mesh, 500) print('sample uniform took on average: %f[s]' % np.mean(times)) - draw_geometries([pcd]) + o3d.visualization.draw_geometries([pcd]) - pcd, times = time_fcn(sample_points_poisson_disk, mesh, 500, 5) + pcd, times = time_fcn(o3d.geometry.sample_points_poisson_disk, mesh, + 500, 5) print('sample poisson disk took on average: %f[s]' % np.mean(times)) - draw_geometries([pcd]) + o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/mesh_simplification.py b/examples/Python/Basic/mesh_simplification.py index 8d4ced3f8ca..fd09cfde0ba 100644 --- a/examples/Python/Basic/mesh_simplification.py +++ b/examples/Python/Basic/mesh_simplification.py @@ -2,70 +2,81 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -# examples/Python/Basic/mesh_sampling.py +# examples/Python/Basic/mesh_simplification.py import numpy as np -from open3d import * +import open3d as o3d + def create_mesh_plane(): - mesh = TriangleMesh() - mesh.vertices = Vector3dVector( - np.array([[0,0,0], [0,1,0], [1,1,0], [1,0,0]], dtype=np.float32)) - mesh.triangles = Vector3iVector(np.array([[0,2,1], [2,0,3]])) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector( + np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], + dtype=np.float32)) + mesh.triangles = o3d.utility.Vector3iVector(np.array([[0, 2, 1], [2, 0, + 3]])) return mesh + def mesh_generator(): mesh = create_mesh_plane() - yield subdivide_midpoint(mesh, 2) + yield o3d.geometry.subdivide_midpoint(mesh, 2) - mesh = create_mesh_box() - yield subdivide_midpoint(mesh, 2) + mesh = o3d.geometry.create_mesh_box() + yield o3d.geometry.subdivide_midpoint(mesh, 2) - mesh = create_mesh_sphere() - yield subdivide_midpoint(mesh, 2) + mesh = o3d.geometry.create_mesh_sphere() + yield o3d.geometry.subdivide_midpoint(mesh, 2) - mesh = create_mesh_cone() - yield subdivide_midpoint(mesh, 2) + mesh = o3d.geometry.create_mesh_cone() + yield o3d.geometry.subdivide_midpoint(mesh, 2) - mesh = create_mesh_cylinder() - yield subdivide_midpoint(mesh, 2) + mesh = o3d.geometry.create_mesh_cylinder() + yield o3d.geometry.subdivide_midpoint(mesh, 2) - mesh = read_triangle_mesh("../../TestData/bathtub_0154.ply") + mesh = o3d.io.read_triangle_mesh("../../TestData/bathtub_0154.ply") yield mesh + if __name__ == "__main__": np.random.seed(42) for mesh in mesh_generator(): mesh.compute_vertex_normals() n_verts = np.asarray(mesh.vertices).shape[0] - mesh.vertex_colors = Vector3dVector(np.random.uniform(0,1, size=(n_verts,3))) + mesh.vertex_colors = o3d.utility.Vector3dVector( + np.random.uniform(0, 1, size=(n_verts, 3))) - print("original mesh has %d triangles and %d vertices" % - (np.asarray(mesh.triangles).shape[0], - np.asarray(mesh.vertices).shape[0])) - draw_geometries([mesh]) + print("original mesh has %d triangles and %d vertices" % (np.asarray( + mesh.triangles).shape[0], np.asarray(mesh.vertices).shape[0])) + o3d.visualization.draw_geometries([mesh]) voxel_size = max(mesh.get_max_bound() - mesh.get_min_bound()) / 4 target_number_of_triangles = np.asarray(mesh.triangles).shape[0] // 2 - mesh_smp = simplify_vertex_clustering(mesh, voxel_size=voxel_size, - contraction=SimplificationContraction.Average) - print("vertex clustered mesh (average) has %d triangles and %d vertices" % - (np.asarray(mesh_smp.triangles).shape[0], - np.asarray(mesh_smp.vertices).shape[0])) - draw_geometries([mesh_smp]) - - mesh_smp = simplify_vertex_clustering(mesh, voxel_size=voxel_size, - contraction=SimplificationContraction.Quadric) - print("vertex clustered mesh (quadric) has %d triangles and %d vertices" % - (np.asarray(mesh_smp.triangles).shape[0], - np.asarray(mesh_smp.vertices).shape[0])) - draw_geometries([mesh_smp]) - - mesh_smp = simplify_quadric_decimation(mesh, - target_number_of_triangles=target_number_of_triangles) + mesh_smp = o3d.geometry.simplify_vertex_clustering( + mesh, + voxel_size=voxel_size, + contraction=o3d.geometry.SimplificationContraction.Average) + print( + "vertex clustered mesh (average) has %d triangles and %d vertices" % + (np.asarray(mesh_smp.triangles).shape[0], + np.asarray(mesh_smp.vertices).shape[0])) + o3d.visualization.draw_geometries([mesh_smp]) + + mesh_smp = o3d.geometry.simplify_vertex_clustering( + mesh, + voxel_size=voxel_size, + contraction=o3d.geometry.SimplificationContraction.Quadric) + print( + "vertex clustered mesh (quadric) has %d triangles and %d vertices" % + (np.asarray(mesh_smp.triangles).shape[0], + np.asarray(mesh_smp.vertices).shape[0])) + o3d.visualization.draw_geometries([mesh_smp]) + + mesh_smp = o3d.geometry.simplify_quadric_decimation( + mesh, target_number_of_triangles=target_number_of_triangles) print("quadric decimated mesh has %d triangles and %d vertices" % - (np.asarray(mesh_smp.triangles).shape[0], - np.asarray(mesh_smp.vertices).shape[0])) - draw_geometries([mesh_smp]) + (np.asarray(mesh_smp.triangles).shape[0], + np.asarray(mesh_smp.vertices).shape[0])) + o3d.visualization.draw_geometries([mesh_smp]) diff --git a/examples/Python/Basic/mesh_subdivision.py b/examples/Python/Basic/mesh_subdivision.py index cac76f80ca6..ff2a77800ba 100644 --- a/examples/Python/Basic/mesh_subdivision.py +++ b/examples/Python/Basic/mesh_subdivision.py @@ -2,39 +2,46 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -# examples/Python/Basic/mesh_sampling.py +# examples/Python/Basic/mesh_subdivision.py import numpy as np -from open3d import * +import open3d as o3d + def create_mesh_triangle(): - mesh = TriangleMesh() - mesh.vertices = Vector3dVector( - np.array([(np.sqrt(8/9), 0, -1/3), - (-np.sqrt(2/9), np.sqrt(2/3), -1/3), - (-np.sqrt(2/9), -np.sqrt(2/3), -1/3)], dtype=np.float32)) - mesh.triangles = Vector3iVector(np.array([[0,1,2]])) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector( + np.array([(np.sqrt(8 / 9), 0, -1 / 3), + (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), + (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3)], + dtype=np.float32)) + mesh.triangles = o3d.utility.Vector3iVector(np.array([[0, 1, 2]])) return mesh + def create_mesh_plane(): - mesh = TriangleMesh() - mesh.vertices = Vector3dVector( - np.array([[0,0,0], [0,1,0], [1,1,0], [1,0,0]], dtype=np.float32)) - mesh.triangles = Vector3iVector(np.array([[0,2,1], [2,0,3]])) + mesh = o3d.geometry.TriangleMesh() + mesh.vertices = o3d.utility.Vector3dVector( + np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], + dtype=np.float32)) + mesh.triangles = o3d.utility.Vector3iVector(np.array([[0, 2, 1], [2, 0, + 3]])) return mesh + def mesh_generator(): yield create_mesh_triangle() yield create_mesh_plane() - yield create_mesh_tetrahedron() - yield create_mesh_box() - yield create_mesh_octahedron() - yield create_mesh_icosahedron() - yield create_mesh_sphere() - yield create_mesh_cone() - yield create_mesh_cylinder() - yield read_triangle_mesh("../../TestData/knot.ply") - yield read_triangle_mesh("../../TestData/bathtub_0154.ply") + yield o3d.geometry.create_mesh_tetrahedron() + yield o3d.geometry.create_mesh_box() + yield o3d.geometry.create_mesh_octahedron() + yield o3d.geometry.create_mesh_icosahedron() + yield o3d.geometry.create_mesh_sphere() + yield o3d.geometry.create_mesh_cone() + yield o3d.geometry.create_mesh_cylinder() + yield o3d.io.read_triangle_mesh("../../TestData/knot.ply") + yield o3d.io.read_triangle_mesh("../../TestData/bathtub_0154.ply") + if __name__ == "__main__": np.random.seed(42) @@ -44,24 +51,23 @@ def mesh_generator(): for mesh in mesh_generator(): mesh.compute_vertex_normals() n_verts = np.asarray(mesh.vertices).shape[0] - colors = np.random.uniform(0,1, size=(n_verts,3)) - mesh.vertex_colors = Vector3dVector(colors) + colors = np.random.uniform(0, 1, size=(n_verts, 3)) + mesh.vertex_colors = o3d.utility.Vector3dVector(colors) - print("original mesh has %d triangles and %d vertices" % - (np.asarray(mesh.triangles).shape[0], - np.asarray(mesh.vertices).shape[0])) - draw_geometries([mesh]) + print("original mesh has %d triangles and %d vertices" % (np.asarray( + mesh.triangles).shape[0], np.asarray(mesh.vertices).shape[0])) + o3d.visualization.draw_geometries([mesh]) - mesh_up = subdivide_midpoint(mesh, - number_of_iterations=number_of_iterations) + mesh_up = o3d.geometry.subdivide_midpoint( + mesh, number_of_iterations=number_of_iterations) print("midpoint upsampled mesh has %d triangles and %d vertices" % - (np.asarray(mesh_up.triangles).shape[0], - np.asarray(mesh_up.vertices).shape[0])) - draw_geometries([mesh_up]) + (np.asarray(mesh_up.triangles).shape[0], + np.asarray(mesh_up.vertices).shape[0])) + o3d.visualization.draw_geometries([mesh_up]) - mesh_up = subdivide_loop(mesh, - number_of_iterations=number_of_iterations) + mesh_up = o3d.geometry.subdivide_loop( + mesh, number_of_iterations=number_of_iterations) print("loop upsampled mesh has %d triangles and %d vertices" % - (np.asarray(mesh_up.triangles).shape[0], - np.asarray(mesh_up.vertices).shape[0])) - draw_geometries([mesh_up]) + (np.asarray(mesh_up.triangles).shape[0], + np.asarray(mesh_up.vertices).shape[0])) + o3d.visualization.draw_geometries([mesh_up]) diff --git a/examples/Python/Basic/mesh_voxelization.py b/examples/Python/Basic/mesh_voxelization.py index a238620df7c..69e9790ac84 100644 --- a/examples/Python/Basic/mesh_voxelization.py +++ b/examples/Python/Basic/mesh_voxelization.py @@ -1,56 +1,56 @@ -from open3d import * +import open3d as o3d import math import numpy as np import numpy.matlib import matplotlib.pyplot as plt + def get_extrinsic(xyz): rvec = xyz_spherical(xyz) r = get_rotation_matrix(rvec[1], rvec[2]) - t = np.asarray([0,0,2]).transpose() + t = np.asarray([0, 0, 2]).transpose() trans = np.eye(4) - trans[:3,:3] = r - trans[:3,3] = t + trans[:3, :3] = r + trans[:3, 3] = t return trans + def xyz_spherical(xyz): x = xyz[0] y = xyz[1] z = xyz[2] - r = math.sqrt(x*x+y*y+z*z) - r_x = math.acos(y/r) - r_y = math.atan2(z,x) + r = math.sqrt(x * x + y * y + z * z) + r_x = math.acos(y / r) + r_y = math.atan2(z, x) return [r, r_x, r_y] + def get_rotation_matrix(r_x, r_y): - rot_x = np.asarray( - [[1, 0, 0], - [0, math.cos(r_x), -math.sin(r_x)], - [0, math.sin(r_x), math.cos(r_x)]]) - rot_y = np.asarray( - [[math.cos(r_y), 0, math.sin(r_y)], - [0, 1, 0], - [-math.sin(r_y), 0, math.cos(r_y)]]) + rot_x = np.asarray([[1, 0, 0], [0, math.cos(r_x), -math.sin(r_x)], + [0, math.sin(r_x), math.cos(r_x)]]) + rot_y = np.asarray([[math.cos(r_y), 0, math.sin(r_y)], [0, 1, 0], + [-math.sin(r_y), 0, math.cos(r_y)]]) return rot_y.dot(rot_x) + def depth_to_pcd(depth, intrinsic, extrinsic, w, h): - x = np.linspace(0, w-1, w) - y = np.linspace(0, h-1, h) + x = np.linspace(0, w - 1, w) + y = np.linspace(0, h - 1, h) uu, vv = np.meshgrid(x, y) uu_vector = uu.ravel() vv_vector = vv.ravel() depth_vector = np.asarray(depth, dtype=np.float32).ravel() - uvd = np.asarray([uu_vector * depth_vector, - vv_vector * depth_vector, - depth_vector]) + uvd = np.asarray( + [uu_vector * depth_vector, vv_vector * depth_vector, depth_vector]) uvd_roi = uvd[:, depth_vector != 0] xyz_3d = np.linalg.inv(intrinsic).dot(uvd_roi) - pcd = PointCloud() - pcd.points = Vector3dVector(xyz_3d.transpose()) + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(xyz_3d.transpose()) pcd.transform(np.linalg.inv(extrinsic)) return pcd + def preprocess(model): min_bound = model.get_min_bound() max_bound = model.get_max_bound() @@ -58,13 +58,14 @@ def preprocess(model): scale = np.linalg.norm(max_bound - min_bound) / 2.0 vertices = np.asarray(model.vertices) vertices -= np.matlib.repmat(center, len(model.vertices), 1) - model.vertices = Vector3dVector(vertices / scale) + model.vertices = o3d.utility.Vector3dVector(vertices / scale) return model -sphere = read_triangle_mesh("../../TestData/sphere.ply") -model = read_triangle_mesh("../../TestData/bathtub_0154.ply") + +sphere = o3d.io.read_triangle_mesh("../../TestData/sphere.ply") +model = o3d.io.read_triangle_mesh("../../TestData/bathtub_0154.ply") print("visualize model") -draw_geometries([model]) +o3d.visualization.draw_geometries([model]) # rescale geometry sphere = preprocess(sphere) @@ -72,24 +73,24 @@ def preprocess(model): w = 320 h = 320 -vis = Visualizer() -vis.create_window(width = w, height = h) +vis = o3d.visualization.Visualizer() +vis.create_window(width=w, height=h) vis.add_geometry(model) vis.get_render_option().mesh_show_back_face = True ctr = vis.get_view_control() param = ctr.convert_to_pinhole_camera_parameters() -pcd_agg = PointCloud() +pcd_agg = o3d.geometry.PointCloud() n_pts = len(sphere.vertices) -centers_pts = np.zeros((n_pts,3)) +centers_pts = np.zeros((n_pts, 3)) i = 0 for xyz in sphere.vertices: # get new camera pose trans = get_extrinsic(xyz) param.extrinsic = trans - c = np.linalg.inv(trans).dot(np.asarray([0,0,0,1]).transpose()) - centers_pts[i,:] = c[:3] + c = np.linalg.inv(trans).dot(np.asarray([0, 0, 0, 1]).transpose()) + centers_pts[i, :] = c[:3] i += 1 ctr.convert_from_pinhole_camera_parameters(param) @@ -97,25 +98,26 @@ def preprocess(model): vis.poll_events() vis.update_renderer() depth = vis.capture_depth_float_buffer(False) - pcd_agg += depth_to_pcd(depth, - param.intrinsic.intrinsic_matrix, trans, w, h) + pcd_agg += depth_to_pcd(depth, param.intrinsic.intrinsic_matrix, trans, w, + h) vis.destroy_window() print("visualize camera center") -centers = PointCloud() -centers.points = Vector3dVector(centers_pts) -draw_geometries([centers, model]) +centers = o3d.geometry.PointCloud() +centers.points = o3d.utility.Vector3dVector(centers_pts) +o3d.visualization.draw_geometries([centers, model]) print("voxelize dense point cloud") -voxel = create_surface_voxel_grid_from_point_cloud(pcd_agg, voxel_size=0.05) +voxel = o3d.geometry.create_surface_voxel_grid_from_point_cloud(pcd_agg, + voxel_size=0.05) print(voxel) -draw_geometries([voxel]) +o3d.visualization.draw_geometries([voxel]) -print("save and load VoxelGrid") -write_voxel_grid("voxel_grid_test.ply", voxel) -voxel_read = read_voxel_grid("voxel_grid_test.ply") +print("save and load o3d.geometry.VoxelGrid") +o3d.io.write_voxel_grid("voxel_grid_test.ply", voxel) +voxel_read = o3d.io.read_voxel_grid("voxel_grid_test.ply") print(voxel_read) print("visualize original model and voxels together") -draw_geometries([voxel_read, model]) +o3d.visualization.draw_geometries([voxel_read, model]) diff --git a/examples/Python/Basic/pointcloud.py b/examples/Python/Basic/pointcloud.py index ae5c34cfb07..5edf48436de 100644 --- a/examples/Python/Basic/pointcloud.py +++ b/examples/Python/Basic/pointcloud.py @@ -5,38 +5,41 @@ # examples/Python/Basic/pointcloud.py import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": print("Load a ply point cloud, print it, and render it") - pcd = read_point_cloud("../../TestData/fragment.ply") + pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") print(pcd) print(np.asarray(pcd.points)) - draw_geometries([pcd]) + o3d.visualization.draw_geometries([pcd]) print("Downsample the point cloud with a voxel of 0.05") - downpcd = voxel_down_sample(pcd, voxel_size = 0.05) - draw_geometries([downpcd]) + downpcd = o3d.geometry.voxel_down_sample(pcd, voxel_size=0.05) + o3d.visualization.draw_geometries([downpcd]) print("Recompute the normal of the downsampled point cloud") - estimate_normals(downpcd, search_param = KDTreeSearchParamHybrid( - radius = 0.1, max_nn = 30)) - draw_geometries([downpcd]) + o3d.geometry.estimate_normals( + downpcd, + search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, + max_nn=30)) + o3d.visualization.draw_geometries([downpcd]) print("Print a normal vector of the 0th point") print(downpcd.normals[0]) print("Print the normal vectors of the first 10 points") - print(np.asarray(downpcd.normals)[:10,:]) + print(np.asarray(downpcd.normals)[:10, :]) print("") print("Load a polygon volume and use it to crop the original point cloud") - vol = read_selection_polygon_volume("../../TestData/Crop/cropped.json") - chair = vol.crop_point_cloud(pcd) - draw_geometries([chair]) + vol = o3d.visualization.read_selection_polygon_volume( + "../../TestData/Crop/cropped.json") + chair = vol.o3d.geometry.crop_point_cloud(pcd) + o3d.visualization.draw_geometries([chair]) print("") print("Paint chair") chair.paint_uniform_color([1, 0.706, 0]) - draw_geometries([chair]) + o3d.visualization.draw_geometries([chair]) print("") diff --git a/examples/Python/Basic/python_binding.py b/examples/Python/Basic/python_binding.py index 5a215408946..08991d7668c 100644 --- a/examples/Python/Basic/python_binding.py +++ b/examples/Python/Basic/python_binding.py @@ -4,18 +4,19 @@ # examples/Python/Basic/python_binding.py -import numpy as np +import open3d as o3d + def example_import_function(): - from open3d import read_point_cloud - pcd = read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") + pcd = o3d.io.read_point_cloud("../../TestData/ICP/cloud_bin_0.pcd") print(pcd) + def example_help_function(): - import open3d - help(open3d) - help(open3d.PointCloud) - help(open3d.read_point_cloud) + help(o3d) + help(o3d.geometry.PointCloud) + help(o3d.io.read_point_cloud) + if __name__ == "__main__": example_import_function() diff --git a/examples/Python/Basic/rgbd_nyu.py b/examples/Python/Basic/rgbd_nyu.py index 917b8110b0e..da4651dc78b 100644 --- a/examples/Python/Basic/rgbd_nyu.py +++ b/examples/Python/Basic/rgbd_nyu.py @@ -4,12 +4,13 @@ # examples/Python/Basic/rgbd_nyu.py -from open3d import * +import open3d as o3d import numpy as np import re import matplotlib.image as mpimg import matplotlib.pyplot as plt + # This is special function used for reading NYU pgm format # as it is written in big endian byte order. def read_nyu_pgm(filename, byteorder='>'): @@ -24,21 +25,22 @@ def read_nyu_pgm(filename, byteorder='>'): except AttributeError: raise ValueError("Not a raw PGM file: '%s'" % filename) img = np.frombuffer(buffer, - dtype=byteorder+'u2', - count=int(width)*int(height), - offset=len(header)).reshape((int(height), int(width))) + dtype=byteorder + 'u2', + count=int(width) * int(height), + offset=len(header)).reshape((int(height), int(width))) img_out = img.astype('u2') return img_out + if __name__ == "__main__": print("Read NYU dataset") - # Open3D does not support ppm/pgm file yet. Not using read_image here. + # Open3D does not support ppm/pgm file yet. Not using o3d.io.read_image here. # MathplotImage having some ISSUE with NYU pgm file. Not using imread for pgm. color_raw = mpimg.imread("../../TestData/RGBD/other_formats/NYU_color.ppm") depth_raw = read_nyu_pgm("../../TestData/RGBD/other_formats/NYU_depth.pgm") - color = Image(color_raw) - depth = Image(depth_raw) - rgbd_image = create_rgbd_image_from_nyu_format(color, depth) + color = o3d.geometry.Image(color_raw) + depth = o3d.geometry.Image(depth_raw) + rgbd_image = o3d.geometry.create_rgbd_image_from_nyu_format(color, depth) print(rgbd_image) plt.subplot(1, 2, 1) plt.title('NYU grayscale image') @@ -47,8 +49,10 @@ def read_nyu_pgm(filename, byteorder='>'): plt.title('NYU depth image') plt.imshow(rgbd_image.depth) plt.show() - pcd = create_point_cloud_from_rgbd_image(rgbd_image, PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault)) + pcd = o3d.geometry.create_point_cloud_from_rgbd_image( + rgbd_image, + o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)) # Flip it, otherwise the pointcloud will be upside down pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - draw_geometries([pcd]) + o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/rgbd_odometry.py b/examples/Python/Basic/rgbd_odometry.py index c6eadee3cf6..724f1275556 100644 --- a/examples/Python/Basic/rgbd_odometry.py +++ b/examples/Python/Basic/rgbd_odometry.py @@ -4,49 +4,49 @@ # examples/Python/Basic/rgbd_odometry.py -from open3d import * +import open3d as o3d import numpy as np if __name__ == "__main__": - pinhole_camera_intrinsic = read_pinhole_camera_intrinsic( - "../../TestData/camera_primesense.json") + pinhole_camera_intrinsic = o3d.io.read_pinhole_camera_intrinsic( + "../../TestData/camera_primesense.json") print(pinhole_camera_intrinsic.intrinsic_matrix) - source_color = read_image("../../TestData/RGBD/color/00000.jpg") - source_depth = read_image("../../TestData/RGBD/depth/00000.png") - target_color = read_image("../../TestData/RGBD/color/00001.jpg") - target_depth = read_image("../../TestData/RGBD/depth/00001.png") - source_rgbd_image = create_rgbd_image_from_color_and_depth( - source_color, source_depth) - target_rgbd_image = create_rgbd_image_from_color_and_depth( - target_color, target_depth) - target_pcd = create_point_cloud_from_rgbd_image( - target_rgbd_image, pinhole_camera_intrinsic) + source_color = o3d.io.read_image("../../TestData/RGBD/color/00000.jpg") + source_depth = o3d.io.read_image("../../TestData/RGBD/depth/00000.png") + target_color = o3d.io.read_image("../../TestData/RGBD/color/00001.jpg") + target_depth = o3d.io.read_image("../../TestData/RGBD/depth/00001.png") + source_rgbd_image = o3d.geometry.create_rgbd_image_from_color_and_depth( + source_color, source_depth) + target_rgbd_image = o3d.geometry.create_rgbd_image_from_color_and_depth( + target_color, target_depth) + target_pcd = o3d.geometry.create_point_cloud_from_rgbd_image( + target_rgbd_image, pinhole_camera_intrinsic) - option = OdometryOption() + option = o3d.odometry.OdometryOption() odo_init = np.identity(4) print(option) - [success_color_term, trans_color_term, info] = compute_rgbd_odometry( - source_rgbd_image, target_rgbd_image, - pinhole_camera_intrinsic, odo_init, - RGBDOdometryJacobianFromColorTerm(), option) - [success_hybrid_term, trans_hybrid_term, info] = compute_rgbd_odometry( - source_rgbd_image, target_rgbd_image, - pinhole_camera_intrinsic, odo_init, - RGBDOdometryJacobianFromHybridTerm(), option) + [success_color_term, trans_color_term, + info] = o3d.odometry.compute_rgbd_odometry( + source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic, + odo_init, o3d.odometry.RGBDOdometryJacobianFromColorTerm(), option) + [success_hybrid_term, trans_hybrid_term, + info] = o3d.odometry.compute_rgbd_odometry( + source_rgbd_image, target_rgbd_image, pinhole_camera_intrinsic, + odo_init, o3d.odometry.RGBDOdometryJacobianFromHybridTerm(), option) if success_color_term: print("Using RGB-D Odometry") print(trans_color_term) - source_pcd_color_term = create_point_cloud_from_rgbd_image( - source_rgbd_image, pinhole_camera_intrinsic) + source_pcd_color_term = o3d.geometry.create_point_cloud_from_rgbd_image( + source_rgbd_image, pinhole_camera_intrinsic) source_pcd_color_term.transform(trans_color_term) - draw_geometries([target_pcd, source_pcd_color_term]) + o3d.visualization.draw_geometries([target_pcd, source_pcd_color_term]) if success_hybrid_term: print("Using Hybrid RGB-D Odometry") print(trans_hybrid_term) - source_pcd_hybrid_term = create_point_cloud_from_rgbd_image( - source_rgbd_image, pinhole_camera_intrinsic) + source_pcd_hybrid_term = o3d.geometry.create_point_cloud_from_rgbd_image( + source_rgbd_image, pinhole_camera_intrinsic) source_pcd_hybrid_term.transform(trans_hybrid_term) - draw_geometries([target_pcd, source_pcd_hybrid_term]) + o3d.visualization.draw_geometries([target_pcd, source_pcd_hybrid_term]) diff --git a/examples/Python/Basic/rgbd_redwood.py b/examples/Python/Basic/rgbd_redwood.py index 7072d7e53aa..e4122b809de 100644 --- a/examples/Python/Basic/rgbd_redwood.py +++ b/examples/Python/Basic/rgbd_redwood.py @@ -4,17 +4,17 @@ # examples/Python/Basic/rgbd_redwood.py -from open3d import * +import open3d as o3d import matplotlib.pyplot as plt - if __name__ == "__main__": print("Read Redwood dataset") - color_raw = read_image("../../TestData/RGBD/color/00000.jpg") - depth_raw = read_image("../../TestData/RGBD/depth/00000.png") - rgbd_image = create_rgbd_image_from_color_and_depth( - color_raw, depth_raw); + color_raw = o3d.io.read_image("../../TestData/RGBD/color/00000.jpg") + depth_raw = o3d.io.read_image("../../TestData/RGBD/depth/00000.png") + rgbd_image = o3d.geometry.create_rgbd_image_from_color_and_depth( + color_raw, depth_raw) print(rgbd_image) + plt.subplot(1, 2, 1) plt.title('Redwood grayscale image') plt.imshow(rgbd_image.color) @@ -22,8 +22,11 @@ plt.title('Redwood depth image') plt.imshow(rgbd_image.depth) plt.show() - pcd = create_point_cloud_from_rgbd_image(rgbd_image, PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault)) + + pcd = o3d.geometry.create_point_cloud_from_rgbd_image( + rgbd_image, + o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)) # Flip it, otherwise the pointcloud will be upside down pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - draw_geometries([pcd]) + o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/rgbd_sun.py b/examples/Python/Basic/rgbd_sun.py index 8157594fd9d..360aacb8740 100644 --- a/examples/Python/Basic/rgbd_sun.py +++ b/examples/Python/Basic/rgbd_sun.py @@ -4,15 +4,17 @@ # examples/Python/Basic/rgbd_sun.py -from open3d import * +import open3d as o3d import matplotlib.pyplot as plt - if __name__ == "__main__": print("Read SUN dataset") - color_raw = read_image("../../TestData/RGBD/other_formats/SUN_color.jpg") - depth_raw = read_image("../../TestData/RGBD/other_formats/SUN_depth.png") - rgbd_image = create_rgbd_image_from_sun_format(color_raw, depth_raw); + color_raw = o3d.io.read_image( + "../../TestData/RGBD/other_formats/SUN_color.jpg") + depth_raw = o3d.io.read_image( + "../../TestData/RGBD/other_formats/SUN_depth.png") + rgbd_image = o3d.geometry.create_rgbd_image_from_sun_format( + color_raw, depth_raw) print(rgbd_image) plt.subplot(1, 2, 1) plt.title('SUN grayscale image') @@ -21,8 +23,10 @@ plt.title('SUN depth image') plt.imshow(rgbd_image.depth) plt.show() - pcd = create_point_cloud_from_rgbd_image(rgbd_image, PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault)) + pcd = o3d.geometry.create_point_cloud_from_rgbd_image( + rgbd_image, + o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)) # Flip it, otherwise the pointcloud will be upside down pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - draw_geometries([pcd]) + o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/rgbd_tum.py b/examples/Python/Basic/rgbd_tum.py index 9b2fc5bbb13..d02bcc3f071 100644 --- a/examples/Python/Basic/rgbd_tum.py +++ b/examples/Python/Basic/rgbd_tum.py @@ -4,15 +4,17 @@ # examples/Python/Basic/rgbd_tum.py -from open3d import * +import open3d as o3d import matplotlib.pyplot as plt - if __name__ == "__main__": print("Read TUM dataset") - color_raw = read_image("../../TestData/RGBD/other_formats/TUM_color.png") - depth_raw = read_image("../../TestData/RGBD/other_formats/TUM_depth.png") - rgbd_image = create_rgbd_image_from_tum_format(color_raw, depth_raw); + color_raw = o3d.io.read_image( + "../../TestData/RGBD/other_formats/TUM_color.png") + depth_raw = o3d.io.read_image( + "../../TestData/RGBD/other_formats/TUM_depth.png") + rgbd_image = o3d.geometry.create_rgbd_image_from_tum_format( + color_raw, depth_raw) print(rgbd_image) plt.subplot(1, 2, 1) plt.title('TUM grayscale image') @@ -21,8 +23,10 @@ plt.title('TUM depth image') plt.imshow(rgbd_image.depth) plt.show() - pcd = create_point_cloud_from_rgbd_image(rgbd_image, PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault)) + pcd = o3d.geometry.create_point_cloud_from_rgbd_image( + rgbd_image, + o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)) # Flip it, otherwise the pointcloud will be upside down pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) - draw_geometries([pcd]) + o3d.visualization.draw_geometries([pcd]) diff --git a/examples/Python/Basic/transformation.py b/examples/Python/Basic/transformation.py index f3d85b927ec..2ff3ca80f7f 100644 --- a/examples/Python/Basic/transformation.py +++ b/examples/Python/Basic/transformation.py @@ -5,43 +5,49 @@ # examples/Python/Utility/visualization.py import numpy as np -from open3d import * +import open3d as o3d import time + def geometry_generator(): - mesh = create_mesh_sphere() + mesh = o3d.geometry.create_mesh_sphere() verts = np.asarray(mesh.vertices) - colors = np.random.uniform(0,1, size=verts.shape) - mesh.vertex_colors = Vector3dVector(colors) + colors = np.random.uniform(0, 1, size=verts.shape) + mesh.vertex_colors = o3d.utility.Vector3dVector(colors) mesh.compute_vertex_normals() yield mesh - pcl = PointCloud() + pcl = o3d.geometry.PointCloud() pcl.points = mesh.vertices pcl.colors = mesh.vertex_colors pcl.normals = mesh.vertex_normals yield pcl - ls = LineSet() - ls.points = Vector3dVector(np.array([(0,0,0), (1,0,0), (1,0,1), (0,0,1), - (0,1,0), (1,1,0), (1,1,1), (0,1,1)], dtype=np.float64)) - ls.lines = Vector2iVector(np.array([(0,1), (0,4), (0,3), (2,3), (2,1), - (2,6), (5,1), (5,4), (5,6), (7,3), (7,6), (7,4)])) + ls = o3d.geometry.LineSet() + ls.points = o3d.utility.Vector3dVector( + np.array([(0, 0, 0), (1, 0, 0), (1, 0, 1), (0, 0, 1), (0, 1, 0), + (1, 1, 0), (1, 1, 1), (0, 1, 1)], + dtype=np.float64)) + ls.lines = o3d.utility.Vector2iVector( + np.array([(0, 1), (0, 4), (0, 3), (2, 3), (2, 1), (2, 6), (5, 1), + (5, 4), (5, 6), (7, 3), (7, 6), (7, 4)])) yield ls + def animate(geom): - vis = Visualizer() + vis = o3d.visualization.Visualizer() vis.create_window() geom.rotate(np.array((0.75, 0.5, 0))) vis.add_geometry(geom) - scales = [0.9 for _ in range(30)] + [1/0.9 for _ in range(30)] - axisangles = [(0.2/np.sqrt(2), 0.2/np.sqrt(2), 0) for _ in range(60)] - ts = [(0.1, 0.1, -0.1) for _ in range(30)] + [(-0.1, -0.1, 0.1) for _ in range(30)] + scales = [0.9 for _ in range(30)] + [1 / 0.9 for _ in range(30)] + axisangles = [(0.2 / np.sqrt(2), 0.2 / np.sqrt(2), 0) for _ in range(60)] + ts = [(0.1, 0.1, -0.1) for _ in range(30) + ] + [(-0.1, -0.1, 0.1) for _ in range(30)] for scale, aa, t in zip(scales, axisangles, ts): - geom.scale(scale).rotate(aa, type=RotationType.AxisAngle) + geom.scale(scale).rotate(aa, type=o3d.geometry.RotationType.AxisAngle) vis.update_geometry() vis.poll_events() vis.update_renderer() @@ -54,6 +60,7 @@ def animate(geom): vis.update_renderer() time.sleep(0.05) + if __name__ == "__main__": for geom in geometry_generator(): animate(geom) diff --git a/examples/Python/Basic/visualization.py b/examples/Python/Basic/visualization.py index 27b51911230..a39ce819eab 100644 --- a/examples/Python/Basic/visualization.py +++ b/examples/Python/Basic/visualization.py @@ -5,41 +5,43 @@ # examples/Python/Basic/visualization.py import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": print("Load a ply point cloud, print it, and render it") - pcd = read_point_cloud("../../TestData/fragment.ply") - draw_geometries([pcd]) + pcd = o3d.io.read_point_cloud("../../TestData/fragment.ply") + o3d.visualization.draw_geometries([pcd]) print("Let\'s draw some primitives") - mesh_box = create_mesh_box(width = 1.0, height = 1.0, depth = 1.0) + mesh_box = o3d.geometry.create_mesh_box(width=1.0, height=1.0, depth=1.0) mesh_box.compute_vertex_normals() mesh_box.paint_uniform_color([0.9, 0.1, 0.1]) - mesh_sphere = create_mesh_sphere(radius = 1.0) + mesh_sphere = o3d.geometry.create_mesh_sphere(radius=1.0) mesh_sphere.compute_vertex_normals() mesh_sphere.paint_uniform_color([0.1, 0.1, 0.7]) - mesh_cylinder = create_mesh_cylinder(radius = 0.3, height = 4.0) + mesh_cylinder = o3d.geometry.create_mesh_cylinder(radius=0.3, height=4.0) mesh_cylinder.compute_vertex_normals() mesh_cylinder.paint_uniform_color([0.1, 0.9, 0.1]) - mesh_frame = create_mesh_coordinate_frame(size = 0.6, origin = [-2, -2, -2]) + mesh_frame = o3d.geometry.create_mesh_coordinate_frame(size=0.6, + origin=[-2, -2, -2]) print("We draw a few primitives using collection.") - draw_geometries([mesh_box, mesh_sphere, mesh_cylinder, mesh_frame]) + o3d.visualization.draw_geometries( + [mesh_box, mesh_sphere, mesh_cylinder, mesh_frame]) print("We draw a few primitives using + operator of mesh.") - draw_geometries([mesh_box + mesh_sphere + mesh_cylinder + mesh_frame]) - - print("Let\'s draw a cubic using LineSet") - points = [[0,0,0],[1,0,0],[0,1,0],[1,1,0], - [0,0,1],[1,0,1],[0,1,1],[1,1,1]] - lines = [[0,1],[0,2],[1,3],[2,3], - [4,5],[4,6],[5,7],[6,7], - [0,4],[1,5],[2,6],[3,7]] + o3d.visualization.draw_geometries( + [mesh_box + mesh_sphere + mesh_cylinder + mesh_frame]) + + print("Let\'s draw a cubic using o3d.geometry.LineSet") + points = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0], [0, 0, 1], [1, 0, 1], + [0, 1, 1], [1, 1, 1]] + lines = [[0, 1], [0, 2], [1, 3], [2, 3], [4, 5], [4, 6], [5, 7], [6, 7], + [0, 4], [1, 5], [2, 6], [3, 7]] colors = [[1, 0, 0] for i in range(len(lines))] - line_set = LineSet() - line_set.points = Vector3dVector(points) - line_set.lines = Vector2iVector(lines) - line_set.colors = Vector3dVector(colors) - draw_geometries([line_set]) \ No newline at end of file + line_set = o3d.geometry.LineSet() + line_set.points = o3d.utility.Vector3dVector(points) + line_set.lines = o3d.utility.Vector2iVector(lines) + line_set.colors = o3d.utility.Vector3dVector(colors) + o3d.visualization.draw_geometries([line_set]) diff --git a/examples/Python/Basic/working_with_numpy.py b/examples/Python/Basic/working_with_numpy.py index 5fa2f451a9e..f7dcfc7d4c5 100644 --- a/examples/Python/Basic/working_with_numpy.py +++ b/examples/Python/Basic/working_with_numpy.py @@ -6,37 +6,37 @@ import copy import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": # generate some neat n times 3 matrix using a variant of sync function x = np.linspace(-3, 3, 401) - mesh_x, mesh_y = np.meshgrid(x,x) - z = np.sinc((np.power(mesh_x,2)+np.power(mesh_y,2))) - z_norm = (z-z.min())/(z.max()-z.min()) - xyz = np.zeros((np.size(mesh_x),3)) - xyz[:,0] = np.reshape(mesh_x,-1) - xyz[:,1] = np.reshape(mesh_y,-1) - xyz[:,2] = np.reshape(z_norm,-1) + mesh_x, mesh_y = np.meshgrid(x, x) + z = np.sinc((np.power(mesh_x, 2) + np.power(mesh_y, 2))) + z_norm = (z - z.min()) / (z.max() - z.min()) + xyz = np.zeros((np.size(mesh_x), 3)) + xyz[:, 0] = np.reshape(mesh_x, -1) + xyz[:, 1] = np.reshape(mesh_y, -1) + xyz[:, 2] = np.reshape(z_norm, -1) print('xyz') print(xyz) - # Pass xyz to Open3D.PointCloud and visualize - pcd = PointCloud() - pcd.points = Vector3dVector(xyz) - write_point_cloud("../../TestData/sync.ply", pcd) + # Pass xyz to Open3D.o3d.geometry.PointCloud and visualize + pcd = o3d.geometry.PointCloud() + pcd.points = o3d.utility.Vector3dVector(xyz) + o3d.io.write_point_cloud("../../TestData/sync.ply", pcd) # Load saved point cloud and visualize it - pcd_load = read_point_cloud("../../TestData/sync.ply") - draw_geometries([pcd_load]) + pcd_load = o3d.io.read_point_cloud("../../TestData/sync.ply") + o3d.visualization.draw_geometries([pcd_load]) - # convert Open3D.PointCloud to numpy array + # convert Open3D.o3d.geometry.PointCloud to numpy array xyz_load = np.asarray(pcd_load.points) print('xyz_load') print(xyz_load) # save z_norm as an image (change [0,1] range to [0,255] range with uint8 type) - img = Image((z_norm*255).astype(np.uint8)) - write_image("../../TestData/sync.png", img) - draw_geometries([img]) + img = o3d.geometry.Image((z_norm * 255).astype(np.uint8)) + o3d.io.write_image("../../TestData/sync.png", img) + o3d.visualization.draw_geometries([img]) diff --git a/examples/Python/Benchmark/__init__.py b/examples/Python/Benchmark/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/Benchmark/benchmark_fgr.py b/examples/Python/Benchmark/benchmark_fgr.py index d8425f0e09a..e34fa830bbf 100644 --- a/examples/Python/Benchmark/benchmark_fgr.py +++ b/examples/Python/Benchmark/benchmark_fgr.py @@ -17,6 +17,7 @@ do_visualization = False + def get_ply_path(dataset_name, id): return "%s/%s/cloud_bin_%d.ply" % (dataset_path, dataset_name, id) @@ -24,8 +25,9 @@ def get_ply_path(dataset_name, id): def get_log_path(dataset_name): return "%s/fgr_%s.log" % (dataset_path, dataset_name) + dataset_path = 'testdata' -dataset_names = ['livingroom1','livingroom2','office1','office2'] +dataset_names = ['livingroom1', 'livingroom2', 'office1', 'office2'] if __name__ == "__main__": # data preparation @@ -34,8 +36,8 @@ def get_log_path(dataset_name): # do RANSAC based alignment for dataset_name in dataset_names: - ply_file_names = get_file_list( - "%s/%s/" % (dataset_path, dataset_name), ".ply") + ply_file_names = get_file_list("%s/%s/" % (dataset_path, dataset_name), + ".ply") n_ply_files = len(ply_file_names) alignment = [] @@ -43,16 +45,16 @@ def get_log_path(dataset_name): for t in range(s + 1, n_ply_files): print("%s:: matching %d-%d" % (dataset_name, s, t)) - source = read_point_cloud(get_ply_path(dataset_name, s)) - target = read_point_cloud(get_ply_path(dataset_name, t)) + source = o3d.io.read_point_cloud(get_ply_path(dataset_name, s)) + target = o3d.io.read_point_cloud(get_ply_path(dataset_name, t)) source_down, source_fpfh = preprocess_point_cloud( - source, voxel_size) + source, voxel_size) target_down, target_fpfh = preprocess_point_cloud( - target, voxel_size) + target, voxel_size) result = execute_fast_global_registration( - source_down, target_down, - source_fpfh, target_fpfh, voxel_size) + source_down, target_down, source_fpfh, target_fpfh, + voxel_size) if (result.transformation.trace() == 4.0): success = False else: @@ -60,13 +62,14 @@ def get_log_path(dataset_name): # Note: we save inverse of result_ransac.transformation # to comply with http://redwood-data.org/indoor/fileformat.html - alignment.append(CameraPose([s, t, n_ply_files], - np.linalg.inv(result.transformation))) + alignment.append( + CameraPose([s, t, n_ply_files], + np.linalg.inv(result.transformation))) print(np.linalg.inv(result.transformation)) if do_visualization: draw_registration_result(source_down, target_down, - result.transformation) + result.transformation) write_trajectory(alignment, get_log_path(dataset_name)) # do evaluation diff --git a/examples/Python/Benchmark/benchmark_pre.py b/examples/Python/Benchmark/benchmark_pre.py index ea59be7e466..ee345fbe342 100644 --- a/examples/Python/Benchmark/benchmark_pre.py +++ b/examples/Python/Benchmark/benchmark_pre.py @@ -19,6 +19,7 @@ do_visualization = False + def get_ply_path(dataset_name, id): return "%s/%s/cloud_bin_%d.ply" % (dataset_path, dataset_name, id) @@ -26,8 +27,9 @@ def get_ply_path(dataset_name, id): def get_log_path(dataset_name): return "%s/fgr_%s.log" % (dataset_path, dataset_name) + dataset_path = 'testdata' -dataset_names = ['livingroom1','livingroom2','office1','office2'] +dataset_names = ['livingroom1', 'livingroom2', 'office1', 'office2'] if __name__ == "__main__": # data preparation @@ -36,15 +38,15 @@ def get_log_path(dataset_name): # do RANSAC based alignment for dataset_name in dataset_names: - ply_file_names = get_file_list( - "%s/%s/" % (dataset_path, dataset_name), ".ply") + ply_file_names = get_file_list("%s/%s/" % (dataset_path, dataset_name), + ".ply") n_ply_files = len(ply_file_names) alignment = [] for s in range(n_ply_files): - source = read_point_cloud(get_ply_path(dataset_name, s)) + source = o3d.io.read_point_cloud(get_ply_path(dataset_name, s)) source_down, source_fpfh = preprocess_point_cloud( - source, voxel_size) + source, voxel_size) f = open('store.pckl', 'wb') pickle.dump([source_down, source_fpfh], f) f.close() diff --git a/examples/Python/Benchmark/benchmark_ransac.py b/examples/Python/Benchmark/benchmark_ransac.py index 12c68b80272..b058664879b 100644 --- a/examples/Python/Benchmark/benchmark_ransac.py +++ b/examples/Python/Benchmark/benchmark_ransac.py @@ -33,8 +33,8 @@ def get_log_path(dataset_name): # do RANSAC based alignment for dataset_name in dataset_names: - ply_file_names = get_file_list( - "%s/%s/" % (dataset_path, dataset_name), ".ply") + ply_file_names = get_file_list("%s/%s/" % (dataset_path, dataset_name), + ".ply") n_ply_files = len(ply_file_names) alignment = [] @@ -42,16 +42,16 @@ def get_log_path(dataset_name): for t in range(s + 1, n_ply_files): print("%s:: matching %d-%d" % (dataset_name, s, t)) - source = read_point_cloud(get_ply_path(dataset_name, s)) - target = read_point_cloud(get_ply_path(dataset_name, t)) + source = o3d.io.read_point_cloud(get_ply_path(dataset_name, s)) + target = o3d.io.read_point_cloud(get_ply_path(dataset_name, t)) source_down, source_fpfh = preprocess_point_cloud( - source, voxel_size) + source, voxel_size) target_down, target_fpfh = preprocess_point_cloud( - target, voxel_size) + target, voxel_size) - result = execute_global_registration( - source_down, target_down, - source_fpfh, target_fpfh, voxel_size) + result = execute_global_registration(source_down, target_down, + source_fpfh, target_fpfh, + voxel_size) if (result.transformation.trace() == 4.0): success = False else: @@ -62,13 +62,14 @@ def get_log_path(dataset_name): if not success: print("No resonable solution.") else: - alignment.append(CameraPose([s, t, n_ply_files], - np.linalg.inv(result.transformation))) + alignment.append( + CameraPose([s, t, n_ply_files], + np.linalg.inv(result.transformation))) print(np.linalg.inv(result.transformation)) if do_visualization: draw_registration_result(source_down, target_down, - result.transformation) + result.transformation) write_trajectory(alignment, get_log_path(dataset_name)) # do evaluation diff --git a/examples/Python/Misc/__init__.py b/examples/Python/Misc/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/Misc/color_image.py b/examples/Python/Misc/color_image.py index fded194fbd2..fca7093681e 100644 --- a/examples/Python/Misc/color_image.py +++ b/examples/Python/Misc/color_image.py @@ -6,54 +6,61 @@ import matplotlib.pyplot as plt import matplotlib.image as mpimg -from open3d import * +import open3d as o3d #conda install pillow matplotlib if __name__ == "__main__": print("Testing image in open3d ...") print("Convert an image to numpy and draw it with matplotlib.") - x = read_image("../../TestData/image.PNG") + x = o3d.io.read_image("../../TestData/image.PNG") print(x) plt.imshow(np.asarray(x)) plt.show() - print("Convet a numpy image to Image and show it with DrawGeomtries().") + print( + "Convet a numpy image to o3d.geometry.Image and show it with DrawGeomtries()." + ) y = mpimg.imread("../../TestData/lena_color.jpg") print(y.shape) - yy = Image(y) + yy = o3d.geometry.Image(y) print(yy) - draw_geometries([yy]) + o3d.visualization.draw_geometries([yy]) print("Render a channel of the previous image.") - z = np.array(y[:,:,1]) + z = np.array(y[:, :, 1]) print(z.shape) print(z.strides) - zz = Image(z) + zz = o3d.geometry.Image(z) print(zz) - draw_geometries([zz]) + o3d.visualization.draw_geometries([zz]) print("Write the previous image to file then load it with matplotlib.") - write_image("test.jpg", zz, quality = 100) + o3d.io.write_image("test.jpg", zz, quality=100) zzz = mpimg.imread("test.jpg") plt.imshow(zzz) plt.show() print("Testing basic image processing module.") im_raw = mpimg.imread("../../TestData/lena_color.jpg") - im = Image(im_raw) - im_g3 = filter_image(im, ImageFilterType.Gaussian3) - im_g5 = filter_image(im, ImageFilterType.Gaussian5) - im_g7 = filter_image(im, ImageFilterType.Gaussian7) + im = o3d.geometry.Image(im_raw) + im_g3 = o3d.geometry.filter_image(im, + o3d.geometry.ImageFilterType.Gaussian3) + im_g5 = o3d.geometry.filter_image(im, + o3d.geometry.ImageFilterType.Gaussian5) + im_g7 = o3d.geometry.filter_image(im, + o3d.geometry.ImageFilterType.Gaussian7) im_gaussian = [im, im_g3, im_g5, im_g7] pyramid_levels = 4 pyramid_with_gaussian_filter = True - im_pyramid = create_image_pyramid(im, pyramid_levels, - pyramid_with_gaussian_filter) - im_dx = filter_image(im, ImageFilterType.Sobel3dx) - im_dx_pyramid = filter_image_pyramid(im_pyramid, ImageFilterType.Sobel3dx) - im_dy = filter_image(im, ImageFilterType.Sobel3dy) - im_dy_pyramid = filter_image_pyramid(im_pyramid, ImageFilterType.Sobel3dy) + im_pyramid = o3d.geometry.create_image_pyramid( + im, pyramid_levels, pyramid_with_gaussian_filter) + im_dx = o3d.geometry.filter_image(im, o3d.geometry.ImageFilterType.Sobel3dx) + im_dx_pyramid = o3d.geometry.filter_image_pyramid( + im_pyramid, o3d.geometry.ImageFilterType.Sobel3dx) + im_dy = o3d.geometry.filter_image(im, o3d.geometry.ImageFilterType.Sobel3dy) + im_dy_pyramid = o3d.geometry.filter_image_pyramid( + im_pyramid, o3d.geometry.ImageFilterType.Sobel3dy) switcher = { 0: im_gaussian, 1: im_pyramid, @@ -62,7 +69,7 @@ } for i in range(4): for j in range(pyramid_levels): - plt.subplot(4, pyramid_levels, i*4+j+1) + plt.subplot(4, pyramid_levels, i * 4 + j + 1) plt.imshow(switcher.get(i)[j]) plt.show() diff --git a/examples/Python/Misc/evaluate_geometric_feature.py b/examples/Python/Misc/evaluate_geometric_feature.py index 657730df670..5336fb21276 100644 --- a/examples/Python/Misc/evaluate_geometric_feature.py +++ b/examples/Python/Misc/evaluate_geometric_feature.py @@ -2,22 +2,32 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -from open3d import * +import open3d as o3d import numpy as np + def evaluate(pcd_target, pcd_source, feature_target, feature_source): - tree_target = KDTreeFlann(feature_target) + tree_target = o3d.geometry.KDTreeFlann(feature_target) pt_dis = np.zeros(len(pcd_source.points)) for i in range(len(pcd_source.points)): - [_, idx, _] = tree_target.search_knn_vector_xd(feature_source.data[:, i], 1) - pt_dis[i] = np.linalg.norm(pcd_source.points[i] - pcd_target.points[idx[0]]) + [_, idx, + _] = tree_target.search_knn_vector_xd(feature_source.data[:, i], 1) + pt_dis[i] = np.linalg.norm(pcd_source.points[i] - + pcd_target.points[idx[0]]) return pt_dis + if __name__ == "__main__": - pcd_target = read_point_cloud("../../TestData/Feature/cloud_bin_0.pcd") - pcd_source = read_point_cloud("../../TestData/Feature/cloud_bin_1.pcd") - feature_target = read_feature("../../TestData/Feature/cloud_bin_0.fpfh.bin") - feature_source = read_feature("../../TestData/Feature/cloud_bin_1.fpfh.bin") + pcd_target = o3d.io.read_point_cloud( + "../../TestData/o3d.registration.Feature/cloud_bin_0.pcd") + pcd_source = o3d.io.read_point_cloud( + "../../TestData/o3d.registration.Feature/cloud_bin_1.pcd") + feature_target = o3d.io.read_feature( + "../../TestData/o3d.registration.Feature/cloud_bin_0.fpfh.bin") + feature_source = o3d.io.read_feature( + "../../TestData/o3d.registration.Feature/cloud_bin_1.fpfh.bin") pt_dis = evaluate(pcd_target, pcd_source, feature_target, feature_source) num_good = sum(pt_dis < 0.075) - print("{:.2f}% points in source pointcloud successfully found their correspondence.".format(num_good * 100.0 / len(pcd_source.points))) + print( + "{:.2f}% points in source pointcloud successfully found their correspondence." + .format(num_good * 100.0 / len(pcd_source.points))) diff --git a/examples/Python/Misc/feature.py b/examples/Python/Misc/feature.py index a081e96e57b..2f0dfbb718f 100644 --- a/examples/Python/Misc/feature.py +++ b/examples/Python/Misc/feature.py @@ -3,40 +3,46 @@ # See license file or visit www.open3d.org for details import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": print("Load two aligned point clouds.") - pcd0 = read_point_cloud("../../TestData/Feature/cloud_bin_0.pcd") - pcd1 = read_point_cloud("../../TestData/Feature/cloud_bin_1.pcd") + pcd0 = o3d.io.read_point_cloud( + "../../TestData/o3d.registration.Feature/cloud_bin_0.pcd") + pcd1 = o3d.io.read_point_cloud( + "../../TestData/o3d.registration.Feature/cloud_bin_1.pcd") pcd0.paint_uniform_color([1, 0.706, 0]) pcd1.paint_uniform_color([0, 0.651, 0.929]) - draw_geometries([pcd0, pcd1]) + o3d.visualization.draw_geometries([pcd0, pcd1]) print("Load their FPFH feature and evaluate.") print("Black : matching distance > 0.2") print("White : matching distance = 0") - feature0 = read_feature("../../TestData/Feature/cloud_bin_0.fpfh.bin") - feature1 = read_feature("../../TestData/Feature/cloud_bin_1.fpfh.bin") - fpfh_tree = KDTreeFlann(feature1) + feature0 = o3d.io.read_feature( + "../../TestData/o3d.registration.Feature/cloud_bin_0.fpfh.bin") + feature1 = o3d.io.read_feature( + "../../TestData/o3d.registration.Feature/cloud_bin_1.fpfh.bin") + fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i in range(len(pcd0.points)): [_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) dis = np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]]) c = (0.2 - np.fmin(dis, 0.2)) / 0.2 pcd0.colors[i] = [c, c, c] - draw_geometries([pcd0]) + o3d.visualization.draw_geometries([pcd0]) print("") print("Load their L32D feature and evaluate.") print("Black : matching distance > 0.2") print("White : matching distance = 0") - feature0 = read_feature("../../TestData/Feature/cloud_bin_0.d32.bin") - feature1 = read_feature("../../TestData/Feature/cloud_bin_1.d32.bin") - fpfh_tree = KDTreeFlann(feature1) + feature0 = o3d.io.read_feature( + "../../TestData/o3d.registration.Feature/cloud_bin_0.d32.bin") + feature1 = o3d.io.read_feature( + "../../TestData/o3d.registration.Feature/cloud_bin_1.d32.bin") + fpfh_tree = o3d.geometry.KDTreeFlann(feature1) for i in range(len(pcd0.points)): [_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1) dis = np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]]) c = (0.2 - np.fmin(dis, 0.2)) / 0.2 pcd0.colors[i] = [c, c, c] - draw_geometries([pcd0]) + o3d.visualization.draw_geometries([pcd0]) print("") diff --git a/examples/Python/Misc/pose_graph_optimization.py b/examples/Python/Misc/pose_graph_optimization.py index 2549e835741..434e05a437b 100644 --- a/examples/Python/Misc/pose_graph_optimization.py +++ b/examples/Python/Misc/pose_graph_optimization.py @@ -2,39 +2,43 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -from open3d import * +import open3d as o3d import numpy as np if __name__ == "__main__": - set_verbosity_level(VerbosityLevel.Debug) + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) print("") - print("Parameters for PoseGraph optimization ...") - method = GlobalOptimizationLevenbergMarquardt() - criteria = GlobalOptimizationConvergenceCriteria() - option = GlobalOptimizationOption() + print("Parameters for o3d.registration.PoseGraph optimization ...") + method = o3d.registration.GlobalOptimizationLevenbergMarquardt() + criteria = o3d.registration.GlobalOptimizationConvergenceCriteria() + option = o3d.registration.GlobalOptimizationOption() print("") print(method) print(criteria) print(option) print("") - print("Optimizing Fragment PoseGraph using open3d ...") + print("Optimizing Fragment o3d.registration.PoseGraph using open3d ...") data_path = "../../TestData/GraphOptimization/" - pose_graph_fragment = read_pose_graph(data_path + - "pose_graph_example_fragment.json") + pose_graph_fragment = o3d.io.read_pose_graph( + data_path + "pose_graph_example_fragment.json") print(pose_graph_fragment) - global_optimization(pose_graph_fragment, method, criteria, option) - write_pose_graph(data_path + - "pose_graph_example_fragment_optimized.json", pose_graph_fragment) + o3d.registration.global_optimization(pose_graph_fragment, method, criteria, + option) + o3d.io.write_pose_graph( + data_path + "pose_graph_example_fragment_optimized.json", + pose_graph_fragment) print("") - print("Optimizing Global PoseGraph using open3d ...") - pose_graph_global = read_pose_graph(data_path + - "pose_graph_example_global.json") + print("Optimizing Global o3d.registration.PoseGraph using open3d ...") + pose_graph_global = o3d.io.read_pose_graph(data_path + + "pose_graph_example_global.json") print(pose_graph_global) - global_optimization(pose_graph_global, method, criteria, option) - write_pose_graph(data_path + - "pose_graph_example_global_optimized.json", pose_graph_global) + o3d.registration.global_optimization(pose_graph_global, method, criteria, + option) + o3d.io.write_pose_graph( + data_path + "pose_graph_example_global_optimized.json", + pose_graph_global) print("") diff --git a/examples/Python/Misc/sampling.py b/examples/Python/Misc/sampling.py index 7510802accf..5943274a711 100644 --- a/examples/Python/Misc/sampling.py +++ b/examples/Python/Misc/sampling.py @@ -2,7 +2,7 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -from open3d import * +import open3d as o3d import os, sys sys.path.append("../Utility") from common import * @@ -11,7 +11,7 @@ from shutil import copyfile if __name__ == "__main__": - set_verbosity_level(VerbosityLevel.Debug) + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) path = "[path_to_reconstruction_system_output]" out_path = "[path_to_sampled_frames_are_located]" @@ -21,17 +21,17 @@ make_clean_folder(os.path.join(out_path, "scene/")) sampling_rate = 30 - depth_image_path = get_file_list( - os.path.join(path, "depth/"), extension = ".png") - color_image_path = get_file_list( - os.path.join(path, "image/"), extension = ".jpg") - pose_graph_global = read_pose_graph(os.path.join(path, - template_global_posegraph_optimized)) + depth_image_path = get_file_list(os.path.join(path, "depth/"), + extension=".png") + color_image_path = get_file_list(os.path.join(path, "image/"), + extension=".jpg") + pose_graph_global = o3d.io.read_pose_graph( + os.path.join(path, template_global_posegraph_optimized)) n_fragments = len(depth_image_path) // n_frames_per_fragment + 1 pose_graph_fragments = [] for i in range(n_fragments): - pose_graph_fragment = read_pose_graph(os.path.join(path, - template_fragment_posegraph_optimized % i)) + pose_graph_fragment = o3d.io.read_pose_graph( + os.path.join(path, template_fragment_posegraph_optimized % i)) pose_graph_fragments.append(pose_graph_fragment) depth_image_path_new = [] @@ -44,14 +44,18 @@ print(metadata) fragment_id = i // n_frames_per_fragment local_frame_id = i - fragment_id * n_frames_per_fragment - traj.append(CameraPose(metadata, np.dot( - pose_graph_global.nodes[fragment_id].pose, - pose_graph_fragments[fragment_id].nodes[local_frame_id].pose))) + traj.append( + CameraPose( + metadata, + np.dot( + pose_graph_global.nodes[fragment_id].pose, + pose_graph_fragments[fragment_id].nodes[local_frame_id]. + pose))) copyfile(depth_image_path[i], os.path.join(out_path, "depth/", \ os.path.basename(depth_image_path[i]))) copyfile(color_image_path[i], os.path.join(out_path, "image/", \ os.path.basename(color_image_path[i]))) cnt += 1 copyfile(os.path.join(path, "/scene/cropped.ply"), - os.path.join(out_path, "/scene/integrated.ply")) + os.path.join(out_path, "/scene/integrated.ply")) write_trajectory(traj, os.path.join(out_path, "scene/key.log")) diff --git a/examples/Python/Misc/vector.py b/examples/Python/Misc/vector.py index e0c68675809..72c8e818f7f 100644 --- a/examples/Python/Misc/vector.py +++ b/examples/Python/Misc/vector.py @@ -4,26 +4,27 @@ import copy import numpy as np -from open3d import * +import open3d as o3d if __name__ == "__main__": print("Testing vector in open3d ...") print("") - print("Testing IntVector ...") - vi = IntVector([1, 2, 3, 4, 5]) # made from python list - vi1 = IntVector(np.asarray([1, 2, 3, 4, 5])) # made from numpy array - vi2 = copy.copy(vi) # valid copy - vi3 = copy.deepcopy(vi) # valid copy - vi4 = vi[:] # valid copy + print("Testing o3d.utility.IntVector ...") + vi = o3d.utility.IntVector([1, 2, 3, 4, 5]) # made from python list + vi1 = o3d.utility.IntVector(np.asarray([1, 2, 3, 4, + 5])) # made from numpy array + vi2 = copy.copy(vi) # valid copy + vi3 = copy.deepcopy(vi) # valid copy + vi4 = vi[:] # valid copy print(vi) print(np.asarray(vi)) vi[0] = 10 np.asarray(vi)[1] = 22 vi1[0] *= 5 vi2[0] += 1 - vi3[0:2] = IntVector([40, 50]) + vi3[0:2] = o3d.utility.IntVector([40, 50]) print(vi) print(vi1) print(vi2) @@ -31,10 +32,10 @@ print(vi4) print("") - print("Testing DoubleVector ...") - vd = DoubleVector([1, 2, 3]) - vd1 = DoubleVector([1.1, 1.2]) - vd2 = DoubleVector(np.asarray([0.1, 0.2])) + print("Testing o3d.utility.DoubleVector ...") + vd = o3d.utility.DoubleVector([1, 2, 3]) + vd1 = o3d.utility.DoubleVector([1.1, 1.2]) + vd2 = o3d.utility.DoubleVector(np.asarray([0.1, 0.2])) print(vd) print(vd1) print(vd2) @@ -43,10 +44,10 @@ print(vd1) print("") - print("Testing Vector3dVector ...") - vv3d = Vector3dVector([[1, 2, 3], [0.1, 0.2, 0.3]]) - vv3d1 = Vector3dVector(vv3d) - vv3d2 = Vector3dVector(np.asarray(vv3d)) + print("Testing o3d.utility.Vector3dVector ...") + vv3d = o3d.utility.Vector3dVector([[1, 2, 3], [0.1, 0.2, 0.3]]) + vv3d1 = o3d.utility.Vector3dVector(vv3d) + vv3d2 = o3d.utility.Vector3dVector(np.asarray(vv3d)) vv3d3 = copy.deepcopy(vv3d) print(vv3d) print(np.asarray(vv3d)) @@ -66,8 +67,8 @@ print(np.asarray(vv3d3)) print("") - print("Testing Vector3iVector ...") - vv3i = Vector3iVector([[1, 2, 3], [4, 5, 6]]) + print("Testing o3d.utility.Vector3iVector ...") + vv3i = o3d.utility.Vector3iVector([[1, 2, 3], [4, 5, 6]]) print(vv3i) print(np.asarray(vv3i)) diff --git a/examples/Python/ReconstructionSystem/__init__.py b/examples/Python/ReconstructionSystem/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/ReconstructionSystem/debug/__init__.py b/examples/Python/ReconstructionSystem/debug/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/ReconstructionSystem/debug/pairwise_pc_alignment.py b/examples/Python/ReconstructionSystem/debug/pairwise_pc_alignment.py index cd64850c54b..9ecab3d372a 100644 --- a/examples/Python/ReconstructionSystem/debug/pairwise_pc_alignment.py +++ b/examples/Python/ReconstructionSystem/debug/pairwise_pc_alignment.py @@ -2,19 +2,15 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -import numpy as np import argparse -import os import json import sys -from open3d import * sys.path.append("../Utility") from file import * sys.path.append(".") from initialize_config import * from register_fragments import * - if __name__ == "__main__": parser = argparse.ArgumentParser(description="mathching two point clouds") parser.add_argument("config", help="reading json file for initial pose") @@ -27,6 +23,6 @@ initialize_config(config) config['debug_mode'] = True ply_file_names = get_file_list( - join(config["path_dataset"], config["folder_fragment"]), ".ply") - register_point_cloud_pair(ply_file_names, - args.source_id, args.target_id, config) + join(config["path_dataset"], config["folder_fragment"]), ".ply") + register_point_cloud_pair(ply_file_names, args.source_id, + args.target_id, config) diff --git a/examples/Python/ReconstructionSystem/debug/pairwise_rgbd_alignment.py b/examples/Python/ReconstructionSystem/debug/pairwise_rgbd_alignment.py index 9b0c95a55b8..ea3120aca47 100644 --- a/examples/Python/ReconstructionSystem/debug/pairwise_rgbd_alignment.py +++ b/examples/Python/ReconstructionSystem/debug/pairwise_rgbd_alignment.py @@ -2,11 +2,10 @@ # The MIT License (MIT) # See license file or visit www.open3d.org for details -import numpy as np import argparse import json import sys -from open3d import * +import open3d as o3d sys.path.append("../Utility") from file import * from visualization import * @@ -15,20 +14,23 @@ from make_fragments import * -def test_single_pair(s, t, color_files, depth_files, - intrinsic, with_opencv, config): - set_verbosity_level(VerbosityLevel.Debug) - [success, trans, info] = register_one_rgbd_pair(s, t, - color_files, depth_files, intrinsic, with_opencv, config) +def test_single_pair(s, t, color_files, depth_files, intrinsic, with_opencv, + config): + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + [success, trans, + info] = register_one_rgbd_pair(s, t, color_files, depth_files, intrinsic, + with_opencv, config) print(trans) print(info) print(intrinsic) - source_rgbd_image = read_rgbd_image( - color_files[s], depth_files[s], False, config) - target_rgbd_image = read_rgbd_image( - color_files[t], depth_files[t], False, config) - source = create_point_cloud_from_rgbd_image(source_rgbd_image, intrinsic) - target = create_point_cloud_from_rgbd_image(target_rgbd_image, intrinsic) + source_rgbd_image = read_rgbd_image(color_files[s], depth_files[s], False, + config) + target_rgbd_image = read_rgbd_image(color_files[t], depth_files[t], False, + config) + source = o3d.geometry.create_point_cloud_from_rgbd_image( + source_rgbd_image, intrinsic) + target = o3d.geometry.create_point_cloud_from_rgbd_image( + target_rgbd_image, intrinsic) draw_geometries_flip([source, target]) @@ -38,7 +40,7 @@ def test_single_pair(s, t, color_files, depth_files, parser.add_argument("source_id", type=int, help="ID of source RGBD image") parser.add_argument("target_id", type=int, help="ID of target RGBD image") parser.add_argument("--path_intrinsic", - help="path to the RGBD camera intrinsic") + help="path to the RGBD camera intrinsic") args = parser.parse_args() with open(args.config) as json_file: @@ -51,9 +53,10 @@ def test_single_pair(s, t, color_files, depth_files, [color_files, depth_files] = get_rgbd_file_lists(config["path_dataset"]) if args.path_intrinsic: - intrinsic = read_pinhole_camera_intrinsic(args.path_intrinsic) + intrinsic = o3d.io.read_pinhole_camera_intrinsic( + args.path_intrinsic) else: - intrinsic = PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault) - test_single_pair(args.source_id, args.target_id, - color_files, depth_files, intrinsic, with_opencv, config) + intrinsic = o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault) + test_single_pair(args.source_id, args.target_id, color_files, + depth_files, intrinsic, with_opencv, config) diff --git a/examples/Python/ReconstructionSystem/debug/visualize_alignment.py b/examples/Python/ReconstructionSystem/debug/visualize_alignment.py index 5556cbaa374..484ebe89895 100644 --- a/examples/Python/ReconstructionSystem/debug/visualize_alignment.py +++ b/examples/Python/ReconstructionSystem/debug/visualize_alignment.py @@ -5,9 +5,8 @@ import numpy as np import json import argparse -import os import sys -from open3d import * +import open3d as o3d sys.path.append("../Utility") from file import * from visualization import * @@ -18,11 +17,12 @@ def list_posegraph_files(folder_posegraph): pose_graph_paths = get_file_list(folder_posegraph, ".json") for pose_graph_path in pose_graph_paths: - pose_graph = read_pose_graph(pose_graph_path) + pose_graph = o3d.io.read_pose_graph(pose_graph_path) n_nodes = len(pose_graph.nodes) n_edges = len(pose_graph.edges) - print("Fragment PoseGraph %s has %d nodes and %d edges" % - (pose_graph_path, n_nodes, n_edges)) + print( + "Fragment o3d.registration.PoseGraph %s has %d nodes and %d edges" % + (pose_graph_path, n_nodes, n_edges)) if __name__ == "__main__": @@ -30,47 +30,50 @@ def list_posegraph_files(folder_posegraph): parser.add_argument("config", help="path to the config file") parser.add_argument("--source_id", type=int, help="ID of source fragment") parser.add_argument("--target_id", type=int, help="ID of target fragment") - parser.add_argument("--adjacent", help="visualize adjacent pairs", - action="store_true") - parser.add_argument("--all", help="visualize all pairs", - action="store_true") + parser.add_argument("--adjacent", + help="visualize adjacent pairs", + action="store_true") + parser.add_argument("--all", + help="visualize all pairs", + action="store_true") parser.add_argument("--list_posegraphs", - help="list number of node and edges of all pose graphs", - action="store_true") + help="list number of node and edges of all pose graphs", + action="store_true") parser.add_argument("--before_optimized", - help="visualize posegraph edges that is not optimized", - action="store_true") + help="visualize posegraph edges that is not optimized", + action="store_true") args = parser.parse_args() with open(args.config) as json_file: config = json.load(json_file) initialize_config(config) - ply_file_names = get_file_list(join(config["path_dataset"], - config["folder_fragment"]), ".ply") + ply_file_names = get_file_list( + join(config["path_dataset"], config["folder_fragment"]), ".ply") if (args.list_posegraphs): - list_posegraph_files(join(config["path_dataset"], - config["folder_fragment"])) - list_posegraph_files(join(config["path_dataset"], - config["folder_scene"])) + list_posegraph_files( + join(config["path_dataset"], config["folder_fragment"])) + list_posegraph_files( + join(config["path_dataset"], config["folder_scene"])) if (args.before_optimized): global_pose_graph_name = join(config["path_dataset"], - config["template_global_posegraph"]) + config["template_global_posegraph"]) else: - global_pose_graph_name = join(config["path_dataset"], - config["template_refined_posegraph_optimized"]) + global_pose_graph_name = join( + config["path_dataset"], + config["template_refined_posegraph_optimized"]) print("Reading posegraph") print(global_pose_graph_name) - pose_graph = read_pose_graph(global_pose_graph_name) + pose_graph = o3d.io.read_pose_graph(global_pose_graph_name) n_nodes = len(pose_graph.nodes) n_edges = len(pose_graph.edges) - print("Global PoseGraph having %d nodes and %d edges" % \ + print("Global o3d.registration.PoseGraph having %d nodes and %d edges" % \ (n_nodes, n_edges)) # visualize alignment of posegraph edges for edge in pose_graph.edges: - print("PoseGraphEdge %d-%d" % \ + print("o3d.registration.PoseGraphEdge %d-%d" % \ (edge.source_node_id, edge.target_node_id)) if ((args.adjacent and \ edge.target_node_id - edge.source_node_id == 1)) or \ @@ -79,17 +82,21 @@ def list_posegraph_files(folder_posegraph): args.target_id == edge.target_node_id)) or \ args.all: print(" confidence : %.3f" % edge.confidence) - source = read_point_cloud(ply_file_names[edge.source_node_id]) - target = read_point_cloud(ply_file_names[edge.target_node_id]) - source_down = voxel_down_sample(source, config["voxel_size"]) - target_down = voxel_down_sample(target, config["voxel_size"]) + source = o3d.io.read_point_cloud( + ply_file_names[edge.source_node_id]) + target = o3d.io.read_point_cloud( + ply_file_names[edge.target_node_id]) + source_down = o3d.geometry.voxel_down_sample( + source, config["voxel_size"]) + target_down = o3d.geometry.voxel_down_sample( + target, config["voxel_size"]) print("original registration") - draw_registration_result( - source_down, target_down, edge.transformation) + draw_registration_result(source_down, target_down, + edge.transformation) print("optimized registration") source_down.transform( - pose_graph.nodes[edge.source_node_id].pose) + pose_graph.nodes[edge.source_node_id].pose) target_down.transform( - pose_graph.nodes[edge.target_node_id].pose) - draw_registration_result( - source_down, target_down, np.identity(4)) + pose_graph.nodes[edge.target_node_id].pose) + draw_registration_result(source_down, target_down, + np.identity(4)) diff --git a/examples/Python/ReconstructionSystem/debug/visualize_fragments.py b/examples/Python/ReconstructionSystem/debug/visualize_fragments.py index a88be1d840a..1917965766d 100644 --- a/examples/Python/ReconstructionSystem/debug/visualize_fragments.py +++ b/examples/Python/ReconstructionSystem/debug/visualize_fragments.py @@ -4,36 +4,41 @@ import argparse import json -import os import sys -from open3d import * +import open3d as o3d sys.path.append("../Utility") from file import * from visualization import * sys.path.append(".") from initialize_config import * - # test wide baseline matching if __name__ == "__main__": parser = argparse.ArgumentParser(description="visualize pose graph") parser.add_argument("config", help="path to the config file") - parser.add_argument("--start_id", type=int, default=0, - help="starting ID of fragment") - parser.add_argument("--estimate_normal", type=int, default=0, - help="normal estimation for better visualization of point cloud") + parser.add_argument("--start_id", + type=int, + default=0, + help="starting ID of fragment") + parser.add_argument( + "--estimate_normal", + type=int, + default=0, + help="normal estimation for better visualization of point cloud") args = parser.parse_args() with open(args.config) as json_file: config = json.load(json_file) initialize_config(config) - fragment_files = get_file_list( - join(config["path_dataset"], config["folder_fragment"]), - extension='.ply') + fragment_files = get_file_list(join(config["path_dataset"], + config["folder_fragment"]), + extension='.ply') for i in range(args.start_id, len(fragment_files)): print(fragment_files[i]) - pcd = read_point_cloud(fragment_files[i]) + pcd = o3d.io.read_point_cloud(fragment_files[i]) if (args.estimate_normal): - estimate_normals(pcd, KDTreeSearchParamHybrid( - radius = config["voxel_size"] * 2.0, max_nn = 30)) + o3d.geometry.estimate_normals( + pcd, + o3d.geometry.KDTreeSearchParamHybrid( + radius=config["voxel_size"] * 2.0, max_nn=30)) draw_geometries_flip([pcd]) diff --git a/examples/Python/ReconstructionSystem/debug/visualize_pointcloud.py b/examples/Python/ReconstructionSystem/debug/visualize_pointcloud.py index b709b682395..80d04ffc295 100644 --- a/examples/Python/ReconstructionSystem/debug/visualize_pointcloud.py +++ b/examples/Python/ReconstructionSystem/debug/visualize_pointcloud.py @@ -4,10 +4,9 @@ import argparse import json -import os import math import sys -from open3d import * +import open3d as o3d sys.path.append("../Utility") from file import * from visualization import * @@ -15,21 +14,21 @@ from initialize_config import * from make_fragments import * - # test wide baseline matching if __name__ == "__main__": parser = argparse.ArgumentParser( - description="visualize fragment or scene as a point cloud form") + description="visualize fragment or scene as a point cloud form") parser.add_argument("config", help="path to the config file") parser.add_argument("--path_intrinsic", - help="path to the RGBD camera intrinsic") + help="path to the RGBD camera intrinsic") parser.add_argument("--fragment", - help="visualize nodes in form of point clouds") + help="visualize nodes in form of point clouds") parser.add_argument("--scene", - help="visualize nodes in form of point clouds", action="store_true") + help="visualize nodes in form of point clouds", + action="store_true") parser.add_argument("--before_optimized", - help="visualize posegraph edges that is not optimized", - action="store_true") + help="visualize posegraph edges that is not optimized", + action="store_true") args = parser.parse_args() if not args.fragment and not args.scene: parser.print_help(sys.stderr) @@ -40,19 +39,21 @@ initialize_config(config) if (args.scene): if (args.before_optimized): - global_pose_graph_name = join(config["path_dataset"], - config["template_refined_posegraph"]) + global_pose_graph_name = join( + config["path_dataset"], + config["template_refined_posegraph"]) else: - global_pose_graph_name = join(config["path_dataset"], - config["template_refined_posegraph_optimized"]) - pose_graph = read_pose_graph(global_pose_graph_name) + global_pose_graph_name = join( + config["path_dataset"], + config["template_refined_posegraph_optimized"]) + pose_graph = o3d.io.read_pose_graph(global_pose_graph_name) ply_file_names = get_file_list( - join(config["path_dataset"], - config["folder_fragment"]), ".ply") + join(config["path_dataset"], config["folder_fragment"]), ".ply") pcds = [] for i in range(len(pose_graph.nodes)): - pcd = read_point_cloud(ply_file_names[i]) - pcd_down = voxel_down_sample(pcd, config["voxel_size"]/2.0) + pcd = o3d.io.read_point_cloud(ply_file_names[i]) + pcd_down = o3d.geometry.voxel_down_sample( + pcd, config["voxel_size"] / 2.0) pcd_down.transform(pose_graph.nodes[i].pose) print(np.linalg.inv(pose_graph.nodes[i].pose)) pcds.append(pcd_down) @@ -60,12 +61,12 @@ if (args.fragment): if (args.path_intrinsic): - pinhole_camera_intrinsic = read_pinhole_camera_intrinsic( - args.path_intrinsic) + pinhole_camera_intrinsic = o3d.io.read_pinhole_camera_intrinsic( + args.path_intrinsic) else: pinhole_camera_intrinsic = \ - PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault) + o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault) pcds = [] [color_files, depth_files] = \ get_rgbd_file_lists(config["path_dataset"]) @@ -74,17 +75,18 @@ config['n_frames_per_fragment'])) sid = int(args.fragment) * config['n_frames_per_fragment'] eid = min(sid + config['n_frames_per_fragment'], n_files) - pose_graph = read_pose_graph(join(config["path_dataset"], + pose_graph = o3d.io.read_pose_graph(join(config["path_dataset"], config["template_fragment_posegraph_optimized"] % \ int(args.fragment))) for i in range(sid, eid): print("appending rgbd image %d" % i) rgbd_image = read_rgbd_image(color_files[i], depth_files[i], - False, config) - pcd_i = create_point_cloud_from_rgbd_image(rgbd_image, - pinhole_camera_intrinsic, - np.linalg.inv(pose_graph.nodes[i-sid].pose)) - pcd_i_down = voxel_down_sample(pcd_i, config["voxel_size"]) + False, config) + pcd_i = o3d.geometry.create_point_cloud_from_rgbd_image( + rgbd_image, pinhole_camera_intrinsic, + np.linalg.inv(pose_graph.nodes[i - sid].pose)) + pcd_i_down = o3d.geometry.voxel_down_sample( + pcd_i, config["voxel_size"]) pcds.append(pcd_i_down) draw_geometries_flip(pcds) diff --git a/examples/Python/ReconstructionSystem/initialize_config.py b/examples/Python/ReconstructionSystem/initialize_config.py index 5b234a97aa4..e474c562f8d 100644 --- a/examples/Python/ReconstructionSystem/initialize_config.py +++ b/examples/Python/ReconstructionSystem/initialize_config.py @@ -29,18 +29,18 @@ def initialize_config(config): set_default_value(config, "folder_fragment", "fragments/") set_default_value(config, "template_fragment_posegraph", - "fragments/fragment_%03d.json") + "fragments/fragment_%03d.json") set_default_value(config, "template_fragment_posegraph_optimized", - "fragments/fragment_optimized_%03d.json") + "fragments/fragment_optimized_%03d.json") set_default_value(config, "template_fragment_pointcloud", - "fragments/fragment_%03d.ply") + "fragments/fragment_%03d.ply") set_default_value(config, "folder_scene", "scene/") set_default_value(config, "template_global_posegraph", - "scene/global_registration.json") + "scene/global_registration.json") set_default_value(config, "template_global_posegraph_optimized", - "scene/global_registration_optimized.json") + "scene/global_registration_optimized.json") set_default_value(config, "template_refined_posegraph", - "scene/refined_registration.json") + "scene/refined_registration.json") set_default_value(config, "template_refined_posegraph_optimized", - "scene/refined_registration_optimized.json") + "scene/refined_registration_optimized.json") set_default_value(config, "template_global_mesh", "scene/integrated.ply") diff --git a/examples/Python/ReconstructionSystem/integrate_scene.py b/examples/Python/ReconstructionSystem/integrate_scene.py index 4a1e25607aa..94ed9bd5448 100644 --- a/examples/Python/ReconstructionSystem/integrate_scene.py +++ b/examples/Python/ReconstructionSystem/integrate_scene.py @@ -7,11 +7,11 @@ import numpy as np import math import sys -from open3d import * +import open3d as o3d sys.path.append("../Utility") -from file import * +from file import join, get_rgbd_file_lists sys.path.append(".") -from make_fragments import * +from make_fragments import read_rgbd_image def scalable_integrate_rgb_frames(path_dataset, intrinsic, config): @@ -19,42 +19,47 @@ def scalable_integrate_rgb_frames(path_dataset, intrinsic, config): n_files = len(color_files) n_fragments = int(math.ceil(float(n_files) / \ config['n_frames_per_fragment'])) - volume = ScalableTSDFVolume(voxel_length = config["tsdf_cubic_size"]/512.0, - sdf_trunc = 0.04, color_type = TSDFVolumeColorType.RGB8) + volume = o3d.integration.ScalableTSDFVolume( + voxel_length=config["tsdf_cubic_size"] / 512.0, + sdf_trunc=0.04, + color_type=o3d.integration.TSDFVolumeColorType.RGB8) - pose_graph_fragment = read_pose_graph(join( - path_dataset, config["template_refined_posegraph_optimized"])) + pose_graph_fragment = o3d.io.read_pose_graph( + join(path_dataset, config["template_refined_posegraph_optimized"])) for fragment_id in range(len(pose_graph_fragment.nodes)): - pose_graph_rgbd = read_pose_graph(join(path_dataset, - config["template_fragment_posegraph_optimized"] % fragment_id)) + pose_graph_rgbd = o3d.io.read_pose_graph( + join(path_dataset, + config["template_fragment_posegraph_optimized"] % fragment_id)) for frame_id in range(len(pose_graph_rgbd.nodes)): frame_id_abs = fragment_id * \ config['n_frames_per_fragment'] + frame_id - print("Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d)." - % (fragment_id, n_fragments-1, frame_id_abs, frame_id+1, - len(pose_graph_rgbd.nodes))) + print( + "Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d)." % + (fragment_id, n_fragments - 1, frame_id_abs, frame_id + 1, + len(pose_graph_rgbd.nodes))) rgbd = read_rgbd_image(color_files[frame_id_abs], - depth_files[frame_id_abs], False, config) + depth_files[frame_id_abs], False, config) pose = np.dot(pose_graph_fragment.nodes[fragment_id].pose, - pose_graph_rgbd.nodes[frame_id].pose) + pose_graph_rgbd.nodes[frame_id].pose) volume.integrate(rgbd, intrinsic, np.linalg.inv(pose)) mesh = volume.extract_triangle_mesh() mesh.compute_vertex_normals() if config["debug_mode"]: - draw_geometries([mesh]) + o3d.visualization.draw_geometries([mesh]) mesh_name = join(path_dataset, config["template_global_mesh"]) - write_triangle_mesh(mesh_name, mesh, False, True) + o3d.io.write_triangle_mesh(mesh_name, mesh, False, True) def run(config): print("integrate the whole RGBD sequence using estimated camera pose.") if config["path_intrinsic"]: - intrinsic = read_pinhole_camera_intrinsic(config["path_intrinsic"]) + intrinsic = o3d.io.read_pinhole_camera_intrinsic( + config["path_intrinsic"]) else: - intrinsic = PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault) + intrinsic = o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault) scalable_integrate_rgb_frames(config["path_dataset"], intrinsic, config) diff --git a/examples/Python/ReconstructionSystem/make_fragments.py b/examples/Python/ReconstructionSystem/make_fragments.py index d528446e3d2..7af2ecc26f4 100644 --- a/examples/Python/ReconstructionSystem/make_fragments.py +++ b/examples/Python/ReconstructionSystem/make_fragments.py @@ -6,106 +6,124 @@ import numpy as np import math -from open3d import * +import open3d as o3d import sys sys.path.append("../Utility") -from file import * -from opencv import * -from optimize_posegraph import * - +from file import join, make_clean_folder, get_rgbd_file_lists +from opencv import initialize_opencv +sys.path.append(".") +from optimize_posegraph import optimize_posegraph_for_fragment # check opencv python package with_opencv = initialize_opencv() if with_opencv: from opencv_pose_estimation import pose_estimation + def read_rgbd_image(color_file, depth_file, convert_rgb_to_intensity, config): - color = read_image(color_file) - depth = read_image(depth_file) + color = o3d.io.read_image(color_file) + depth = o3d.io.read_image(depth_file) if config["depth_map_type"] == "redwood": - rgbd_image = create_rgbd_image_from_color_and_depth(color, depth, - depth_trunc = config["max_depth"], - convert_rgb_to_intensity = convert_rgb_to_intensity) + rgbd_image = o3d.geometry.create_rgbd_image_from_color_and_depth( + color, + depth, + depth_trunc=config["max_depth"], + convert_rgb_to_intensity=convert_rgb_to_intensity) return rgbd_image -def register_one_rgbd_pair(s, t, color_files, depth_files, - intrinsic, with_opencv, config): - source_rgbd_image = read_rgbd_image( - color_files[s], depth_files[s], True, config) - target_rgbd_image = read_rgbd_image( - color_files[t], depth_files[t], True, config) +def register_one_rgbd_pair(s, t, color_files, depth_files, intrinsic, + with_opencv, config): + source_rgbd_image = read_rgbd_image(color_files[s], depth_files[s], True, + config) + target_rgbd_image = read_rgbd_image(color_files[t], depth_files[t], True, + config) - option = OdometryOption() + option = o3d.odometry.OdometryOption() option.max_depth_diff = config["max_depth_diff"] - if abs(s-t) is not 1: + if abs(s - t) is not 1: if with_opencv: - success_5pt, odo_init = pose_estimation( - source_rgbd_image, target_rgbd_image, intrinsic, False) + success_5pt, odo_init = pose_estimation(source_rgbd_image, + target_rgbd_image, + intrinsic, False) if success_5pt: - [success, trans, info] = compute_rgbd_odometry( - source_rgbd_image, target_rgbd_image, intrinsic, - odo_init, RGBDOdometryJacobianFromHybridTerm(), option) + [success, trans, info] = o3d.odometry.compute_rgbd_odometry( + source_rgbd_image, target_rgbd_image, intrinsic, odo_init, + o3d.odometry.RGBDOdometryJacobianFromHybridTerm(), option) return [success, trans, info] return [False, np.identity(4), np.identity(6)] else: odo_init = np.identity(4) - [success, trans, info] = compute_rgbd_odometry( - source_rgbd_image, target_rgbd_image, intrinsic, odo_init, - RGBDOdometryJacobianFromHybridTerm(), option) + [success, trans, info] = o3d.odometry.compute_rgbd_odometry( + source_rgbd_image, target_rgbd_image, intrinsic, odo_init, + o3d.odometry.RGBDOdometryJacobianFromHybridTerm(), option) return [success, trans, info] def make_posegraph_for_fragment(path_dataset, sid, eid, color_files, - depth_files, fragment_id, n_fragments, intrinsic, with_opencv, config): - set_verbosity_level(VerbosityLevel.Error) - pose_graph = PoseGraph() + depth_files, fragment_id, n_fragments, + intrinsic, with_opencv, config): + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error) + pose_graph = o3d.registration.PoseGraph() trans_odometry = np.identity(4) - pose_graph.nodes.append(PoseGraphNode(trans_odometry)) + pose_graph.nodes.append(o3d.registration.PoseGraphNode(trans_odometry)) for s in range(sid, eid): for t in range(s + 1, eid): # odometry if t == s + 1: - print("Fragment %03d / %03d :: RGBD matching between frame : %d and %d" - % (fragment_id, n_fragments-1, s, t)) - [success, trans, info] = register_one_rgbd_pair(s, t, - color_files, depth_files, intrinsic, - with_opencv, config) + print( + "Fragment %03d / %03d :: RGBD matching between frame : %d and %d" + % (fragment_id, n_fragments - 1, s, t)) + [success, trans, + info] = register_one_rgbd_pair(s, t, color_files, depth_files, + intrinsic, with_opencv, config) trans_odometry = np.dot(trans, trans_odometry) trans_odometry_inv = np.linalg.inv(trans_odometry) - pose_graph.nodes.append(PoseGraphNode(trans_odometry_inv)) + pose_graph.nodes.append( + o3d.registration.PoseGraphNode(trans_odometry_inv)) pose_graph.edges.append( - PoseGraphEdge(s-sid, t-sid, trans, info, - uncertain = False)) + o3d.registration.PoseGraphEdge(s - sid, + t - sid, + trans, + info, + uncertain=False)) # keyframe loop closure if s % config['n_keyframes_per_n_frame'] == 0 \ and t % config['n_keyframes_per_n_frame'] == 0: - print("Fragment %03d / %03d :: RGBD matching between frame : %d and %d" - % (fragment_id, n_fragments-1, s, t)) - [success, trans, info] = register_one_rgbd_pair(s, t, - color_files, depth_files, intrinsic, - with_opencv, config) + print( + "Fragment %03d / %03d :: RGBD matching between frame : %d and %d" + % (fragment_id, n_fragments - 1, s, t)) + [success, trans, + info] = register_one_rgbd_pair(s, t, color_files, depth_files, + intrinsic, with_opencv, config) if success: pose_graph.edges.append( - PoseGraphEdge(s-sid, t-sid, trans, info, - uncertain = True)) - write_pose_graph(join(path_dataset, - config["template_fragment_posegraph"] % fragment_id), pose_graph) - - -def integrate_rgb_frames_for_fragment(color_files, depth_files, - fragment_id, n_fragments, pose_graph_name, intrinsic, config): - pose_graph = read_pose_graph(pose_graph_name) - volume = ScalableTSDFVolume(voxel_length = config["tsdf_cubic_size"]/512.0, - sdf_trunc = 0.04, color_type = TSDFVolumeColorType.RGB8) + o3d.registration.PoseGraphEdge(s - sid, + t - sid, + trans, + info, + uncertain=True)) + o3d.io.write_pose_graph( + join(path_dataset, config["template_fragment_posegraph"] % fragment_id), + pose_graph) + + +def integrate_rgb_frames_for_fragment(color_files, depth_files, fragment_id, + n_fragments, pose_graph_name, intrinsic, + config): + pose_graph = o3d.io.read_pose_graph(pose_graph_name) + volume = o3d.integration.ScalableTSDFVolume( + voxel_length=config["tsdf_cubic_size"] / 512.0, + sdf_trunc=0.04, + color_type=o3d.integration.TSDFVolumeColorType.RGB8) for i in range(len(pose_graph.nodes)): i_abs = fragment_id * config['n_frames_per_fragment'] + i - print("Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d)." - % (fragment_id, n_fragments-1, - i_abs, i+1, len(pose_graph.nodes))) - rgbd = read_rgbd_image( - color_files[i_abs], depth_files[i_abs], False, config) + print( + "Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d)." % + (fragment_id, n_fragments - 1, i_abs, i + 1, len(pose_graph.nodes))) + rgbd = read_rgbd_image(color_files[i_abs], depth_files[i_abs], False, + config) pose = pose_graph.nodes[i].pose volume.integrate(rgbd, intrinsic, np.linalg.inv(pose)) mesh = volume.extract_triangle_mesh() @@ -114,37 +132,38 @@ def integrate_rgb_frames_for_fragment(color_files, depth_files, def make_pointcloud_for_fragment(path_dataset, color_files, depth_files, - fragment_id, n_fragments, intrinsic, config): + fragment_id, n_fragments, intrinsic, config): mesh = integrate_rgb_frames_for_fragment( - color_files, depth_files, fragment_id, n_fragments, - join(path_dataset, - config["template_fragment_posegraph_optimized"] % fragment_id), - intrinsic, config) - pcd = PointCloud() + color_files, depth_files, fragment_id, n_fragments, + join(path_dataset, + config["template_fragment_posegraph_optimized"] % fragment_id), + intrinsic, config) + pcd = o3d.geometry.PointCloud() pcd.points = mesh.vertices pcd.colors = mesh.vertex_colors - pcd_name = join(path_dataset, config["template_fragment_pointcloud"] % fragment_id) - write_point_cloud(pcd_name, pcd, False, True) + pcd_name = join(path_dataset, + config["template_fragment_pointcloud"] % fragment_id) + o3d.io.write_point_cloud(pcd_name, pcd, False, True) -def process_single_fragment(fragment_id, color_files, depth_files, - n_files, n_fragments, config): +def process_single_fragment(fragment_id, color_files, depth_files, n_files, + n_fragments, config): if config["path_intrinsic"]: - intrinsic = read_pinhole_camera_intrinsic(config["path_intrinsic"]) + intrinsic = o3d.io.read_pinhole_camera_intrinsic( + config["path_intrinsic"]) else: - intrinsic = PinholeCameraIntrinsic( - PinholeCameraIntrinsicParameters.PrimeSenseDefault) + intrinsic = o3d.camera.PinholeCameraIntrinsic( + o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault) sid = fragment_id * config['n_frames_per_fragment'] eid = min(sid + config['n_frames_per_fragment'], n_files) - make_posegraph_for_fragment(config["path_dataset"], sid, eid, - color_files, depth_files, fragment_id, - n_fragments, intrinsic, with_opencv, config) - optimize_posegraph_for_fragment( - config["path_dataset"], fragment_id, config) - make_pointcloud_for_fragment( - config["path_dataset"], color_files, depth_files, - fragment_id, n_fragments, intrinsic, config) + make_posegraph_for_fragment(config["path_dataset"], sid, eid, color_files, + depth_files, fragment_id, n_fragments, + intrinsic, with_opencv, config) + optimize_posegraph_for_fragment(config["path_dataset"], fragment_id, config) + make_pointcloud_for_fragment(config["path_dataset"], color_files, + depth_files, fragment_id, n_fragments, + intrinsic, config) def run(config): @@ -161,11 +180,9 @@ def run(config): import subprocess MAX_THREAD = min(multiprocessing.cpu_count(), n_fragments) Parallel(n_jobs=MAX_THREAD)(delayed(process_single_fragment)( - fragment_id, color_files, depth_files, - n_files, n_fragments, config) - for fragment_id in range(n_fragments)) + fragment_id, color_files, depth_files, n_files, n_fragments, config) + for fragment_id in range(n_fragments)) else: for fragment_id in range(n_fragments): - process_single_fragment( - fragment_id, color_files, depth_files, - n_files, n_fragments, config) + process_single_fragment(fragment_id, color_files, depth_files, + n_files, n_fragments, config) diff --git a/examples/Python/ReconstructionSystem/opencv_pose_estimation.py b/examples/Python/ReconstructionSystem/opencv_pose_estimation.py index 7622681ab25..38929c372e7 100644 --- a/examples/Python/ReconstructionSystem/opencv_pose_estimation.py +++ b/examples/Python/ReconstructionSystem/opencv_pose_estimation.py @@ -10,32 +10,37 @@ # source activate py27opencv # conda install -c conda-forge opencv # conda install -c conda-forge openblas (if openblas conflicts) + import numpy as np import cv2 -from open3d import * -from matplotlib import pyplot as plt # for visualizing feature matching +from matplotlib import pyplot as plt # for visualizing feature matching import copy def pose_estimation(source_rgbd_image, target_rgbd_image, - pinhole_camera_intrinsic, debug_draw_correspondences): + pinhole_camera_intrinsic, debug_draw_correspondences): success = False trans = np.identity(4) # transform double array to unit8 array - color_cv_s = np.uint8(np.asarray(source_rgbd_image.color)*255.0) - color_cv_t = np.uint8(np.asarray(target_rgbd_image.color)*255.0) - - orb = cv2.ORB_create(scaleFactor=1.2, nlevels = 8, edgeThreshold = 31, - firstLevel = 0, WTA_K = 2, scoreType=cv2.ORB_HARRIS_SCORE, - nfeatures = 100, patchSize = 31) # to save time + color_cv_s = np.uint8(np.asarray(source_rgbd_image.color) * 255.0) + color_cv_t = np.uint8(np.asarray(target_rgbd_image.color) * 255.0) + + orb = cv2.ORB_create(scaleFactor=1.2, + nlevels=8, + edgeThreshold=31, + firstLevel=0, + WTA_K=2, + scoreType=cv2.ORB_HARRIS_SCORE, + nfeatures=100, + patchSize=31) # to save time [kp_s, des_s] = orb.detectAndCompute(color_cv_s, None) [kp_t, des_t] = orb.detectAndCompute(color_cv_t, None) if len(kp_s) is 0 or len(kp_t) is 0: return success, trans bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) - matches = bf.match(des_s,des_t) + matches = bf.match(des_s, des_t) pts_s = [] pts_t = [] @@ -47,27 +52,32 @@ def pose_estimation(source_rgbd_image, target_rgbd_image, # inlier points after initial BF matching if debug_draw_correspondences: draw_correspondences(np.asarray(source_rgbd_image.color), - np.asarray(target_rgbd_image.color), pts_s, pts_t, - np.ones(pts_s.shape[0]), "Initial BF matching") + np.asarray(target_rgbd_image.color), pts_s, pts_t, + np.ones(pts_s.shape[0]), "Initial BF matching") - focal_input = (pinhole_camera_intrinsic.intrinsic_matrix[0,0] + - pinhole_camera_intrinsic.intrinsic_matrix[1,1]) / 2.0 - pp_x = pinhole_camera_intrinsic.intrinsic_matrix[0,2] - pp_y = pinhole_camera_intrinsic.intrinsic_matrix[1,2] + focal_input = (pinhole_camera_intrinsic.intrinsic_matrix[0, 0] + + pinhole_camera_intrinsic.intrinsic_matrix[1, 1]) / 2.0 + pp_x = pinhole_camera_intrinsic.intrinsic_matrix[0, 2] + pp_y = pinhole_camera_intrinsic.intrinsic_matrix[1, 2] # Essential matrix is made for masking inliers pts_s_int = np.int32(pts_s + 0.5) pts_t_int = np.int32(pts_t + 0.5) - [E, mask] = cv2.findEssentialMat(pts_s_int, pts_t_int, focal=focal_input, - pp=(pp_x, pp_y), method=cv2.RANSAC, prob=0.999, threshold=1.0) + [E, mask] = cv2.findEssentialMat(pts_s_int, + pts_t_int, + focal=focal_input, + pp=(pp_x, pp_y), + method=cv2.RANSAC, + prob=0.999, + threshold=1.0) if mask is None: return success, trans # inlier points after 5pt algorithm if debug_draw_correspondences: draw_correspondences(np.asarray(source_rgbd_image.color), - np.asarray(target_rgbd_image.color), - pts_s, pts_t, mask, "5-pt RANSAC") + np.asarray(target_rgbd_image.color), pts_s, pts_t, + mask, "5-pt RANSAC") # make 3D correspondences depth_s = np.asarray(source_rgbd_image.depth) @@ -77,55 +87,57 @@ def pose_estimation(source_rgbd_image, target_rgbd_image, cnt = 0 for i in range(pts_s.shape[0]): if mask[i]: - xyz_s = get_xyz_from_pts(pts_s[i,:], - depth_s, pp_x, pp_y, focal_input) - pts_xyz_s[:,cnt] = xyz_s - xyz_t = get_xyz_from_pts(pts_t[i,:], - depth_t, pp_x, pp_y, focal_input) - pts_xyz_t[:,cnt] = xyz_t + xyz_s = get_xyz_from_pts(pts_s[i, :], depth_s, pp_x, pp_y, + focal_input) + pts_xyz_s[:, cnt] = xyz_s + xyz_t = get_xyz_from_pts(pts_t[i, :], depth_t, pp_x, pp_y, + focal_input) + pts_xyz_t[:, cnt] = xyz_t cnt = cnt + 1 - pts_xyz_s = pts_xyz_s[:,:cnt] - pts_xyz_t = pts_xyz_t[:,:cnt] + pts_xyz_s = pts_xyz_s[:, :cnt] + pts_xyz_t = pts_xyz_t[:, :cnt] success, trans, inlier_id_vec = estimate_3D_transform_RANSAC( - pts_xyz_s, pts_xyz_t) + pts_xyz_s, pts_xyz_t) if debug_draw_correspondences: - pts_s_new = np.zeros(shape=(len(inlier_id_vec),2)) - pts_t_new = np.zeros(shape=(len(inlier_id_vec),2)) + pts_s_new = np.zeros(shape=(len(inlier_id_vec), 2)) + pts_t_new = np.zeros(shape=(len(inlier_id_vec), 2)) mask = np.ones(len(inlier_id_vec)) cnt = 0 for i in inlier_id_vec: - u_s,v_s = get_uv_from_xyz(pts_xyz_s[0,i], pts_xyz_s[1,i], - pts_xyz_s[2,i], pp_x, pp_y, focal_input) - u_t,v_t = get_uv_from_xyz(pts_xyz_t[0,i], pts_xyz_t[1,i], - pts_xyz_t[2,i], pp_x, pp_y, focal_input) - pts_s_new[cnt,:] = [u_s,v_s] - pts_t_new[cnt,:] = [u_t,v_t] + u_s, v_s = get_uv_from_xyz(pts_xyz_s[0, i], pts_xyz_s[1, i], + pts_xyz_s[2, i], pp_x, pp_y, focal_input) + u_t, v_t = get_uv_from_xyz(pts_xyz_t[0, i], pts_xyz_t[1, i], + pts_xyz_t[2, i], pp_x, pp_y, focal_input) + pts_s_new[cnt, :] = [u_s, v_s] + pts_t_new[cnt, :] = [u_t, v_t] cnt = cnt + 1 draw_correspondences(np.asarray(source_rgbd_image.color), - np.asarray(target_rgbd_image.color), - pts_s_new, pts_t_new, mask, "5-pt RANSAC + 3D Rigid RANSAC") + np.asarray(target_rgbd_image.color), pts_s_new, + pts_t_new, mask, "5-pt RANSAC + 3D Rigid RANSAC") return success, trans def draw_correspondences(img_s, img_t, pts_s, pts_t, mask, title): - ha,wa = img_s.shape[:2] - hb,wb = img_t.shape[:2] - total_width = wa+wb + ha, wa = img_s.shape[:2] + hb, wb = img_t.shape[:2] + total_width = wa + wb new_img = np.zeros(shape=(ha, total_width)) - new_img[:ha,:wa]=img_s - new_img[:hb,wa:wa+wb]=img_t + new_img[:ha, :wa] = img_s + new_img[:hb, wa:wa + wb] = img_t fig = plt.figure() fig.canvas.set_window_title(title) for i in range(pts_s.shape[0]): if mask[i]: - sx = pts_s[i,0] - sy = pts_s[i,1] - tx = pts_t[i,0] + wa - ty = pts_t[i,1] - plt.plot([sx,tx], [sy,ty], color=np.random.random(3)/2+0.5, lw=1.0) + sx = pts_s[i, 0] + sy = pts_s[i, 1] + tx = pts_t[i, 0] + wa + ty = pts_t[i, 1] + plt.plot([sx, tx], [sy, ty], + color=np.random.random(3) / 2 + 0.5, + lw=1.0) plt.imshow(new_img) plt.pause(0.5) plt.close() @@ -148,22 +160,22 @@ def estimate_3D_transform_RANSAC(pts_xyz_s, pts_xyz_t): # sampling rand_idx = np.random.randint(n_points, size=n_sample) - sample_xyz_s = pts_xyz_s[:,rand_idx] - sample_xyz_t = pts_xyz_t[:,rand_idx] + sample_xyz_s = pts_xyz_s[:, rand_idx] + sample_xyz_t = pts_xyz_t[:, rand_idx] R_approx, t_approx = estimate_3D_transform(sample_xyz_s, sample_xyz_t) # evaluation diff_mat = pts_xyz_t - (np.matmul(R_approx, pts_xyz_s) + - np.tile(t_approx, [1, n_points])) - diff = [np.linalg.norm(diff_mat[:,i]) for i in range(n_points)] + np.tile(t_approx, [1, n_points])) + diff = [np.linalg.norm(diff_mat[:, i]) for i in range(n_points)] n_inlier = len([1 for diff_iter in diff if diff_iter < max_distance]) # note: diag(R_approx) > 0 prevents ankward transformation between # RGBD pair of relatively small amount of baseline. if (n_inlier > max_inlier) and (np.linalg.det(R_approx) != 0.0) and \ (R_approx[0,0] > 0 and R_approx[1,1] > 0 and R_approx[2,2] > 0): - Transform_good[:3,:3] = R_approx - Transform_good[:3,3] = [t_approx[0],t_approx[1],t_approx[2]] + Transform_good[:3, :3] = R_approx + Transform_good[:3, 3] = [t_approx[0], t_approx[1], t_approx[2]] max_inlier = n_inlier inlier_vec = [id_iter for diff_iter, id_iter \ in zip(diff, range(n_points)) \ @@ -184,19 +196,19 @@ def estimate_3D_transform(input_xyz_s, input_xyz_t): n_points = xyz_s.shape[1] mean_s = np.mean(xyz_s, axis=1) mean_t = np.mean(xyz_t, axis=1) - mean_s.shape = (3,1) - mean_t.shape = (3,1) + mean_s.shape = (3, 1) + mean_t.shape = (3, 1) xyz_diff_s = xyz_s - np.tile(mean_s, [1, n_points]) xyz_diff_t = xyz_t - np.tile(mean_t, [1, n_points]) - H = np.matmul(xyz_diff_s,xyz_diff_t.transpose()) + H = np.matmul(xyz_diff_s, xyz_diff_t.transpose()) # solve system U, s, V = np.linalg.svd(H) R_approx = np.matmul(V.transpose(), U.transpose()) if np.linalg.det(R_approx) < 0.0: - det = np.linalg.det(np.matmul(U,V)) + det = np.linalg.det(np.matmul(U, V)) D = np.identity(3) - D[2,2] = det - R_approx = np.matmul(U,np.matmul(D,V)) + D[2, 2] = det + R_approx = np.matmul(U, np.matmul(D, V)) t_approx = mean_t - np.matmul(R_approx, mean_s) return R_approx, t_approx @@ -209,17 +221,18 @@ def get_xyz_from_pts(pts_row, depth, px, py, focal): height = depth.shape[0] width = depth.shape[1] # bilinear depth interpolation - if u0 > 0 and u0 < width-1 and v0 > 0 and v0 < height-1: + if u0 > 0 and u0 < width - 1 and v0 > 0 and v0 < height - 1: up = pts_row[0] - u0 vp = pts_row[1] - v0 d0 = depth[v0, u0] - d1 = depth[v0, u0+1] - d2 = depth[v0+1, u0] - d3 = depth[v0+1, u0] - d = (1-vp) * (d1 * up + d0 * (1-up)) + vp * (d3 * up + d2 * (1-up)) + d1 = depth[v0, u0 + 1] + d2 = depth[v0 + 1, u0] + d3 = depth[v0 + 1, u0] + d = (1 - vp) * (d1 * up + d0 * (1 - up)) + vp * (d3 * up + d2 * + (1 - up)) return get_xyz_from_uv(u, v, d, px, py, focal) else: - return [0,0,0] + return [0, 0, 0] def get_xyz_from_uv(u, v, d, px, py, focal): diff --git a/examples/Python/ReconstructionSystem/optimize_posegraph.py b/examples/Python/ReconstructionSystem/optimize_posegraph.py index 7f22c6ad154..b6a49ef7bac 100644 --- a/examples/Python/ReconstructionSystem/optimize_posegraph.py +++ b/examples/Python/ReconstructionSystem/optimize_posegraph.py @@ -4,34 +4,36 @@ # examples/Python/ReconstructionSystem/optimize_posegraph.py +import open3d as o3d import sys -from open3d import * sys.path.append("../Utility") -from file import * +from file import join def run_posegraph_optimization(pose_graph_name, pose_graph_optimized_name, - max_correspondence_distance, preference_loop_closure): - # to display messages from global_optimization - set_verbosity_level(VerbosityLevel.Debug) - method = GlobalOptimizationLevenbergMarquardt() - criteria = GlobalOptimizationConvergenceCriteria() - option = GlobalOptimizationOption( - max_correspondence_distance = max_correspondence_distance, - edge_prune_threshold = 0.25, - preference_loop_closure = preference_loop_closure, - reference_node = 0) - pose_graph = read_pose_graph(pose_graph_name) - global_optimization(pose_graph, method, criteria, option) - write_pose_graph(pose_graph_optimized_name, pose_graph) - set_verbosity_level(VerbosityLevel.Error) + max_correspondence_distance, + preference_loop_closure): + # to display messages from o3d.registration.global_optimization + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + method = o3d.registration.GlobalOptimizationLevenbergMarquardt() + criteria = o3d.registration.GlobalOptimizationConvergenceCriteria() + option = o3d.registration.GlobalOptimizationOption( + max_correspondence_distance=max_correspondence_distance, + edge_prune_threshold=0.25, + preference_loop_closure=preference_loop_closure, + reference_node=0) + pose_graph = o3d.io.read_pose_graph(pose_graph_name) + o3d.registration.global_optimization(pose_graph, method, criteria, option) + o3d.io.write_pose_graph(pose_graph_optimized_name, pose_graph) + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error) def optimize_posegraph_for_fragment(path_dataset, fragment_id, config): pose_graph_name = join(path_dataset, - config["template_fragment_posegraph"] % fragment_id) - pose_graph_optimized_name = join(path_dataset, - config["template_fragment_posegraph_optimized"] % fragment_id) + config["template_fragment_posegraph"] % fragment_id) + pose_graph_optimized_name = join( + path_dataset, + config["template_fragment_posegraph_optimized"] % fragment_id) run_posegraph_optimization(pose_graph_name, pose_graph_optimized_name, max_correspondence_distance = config["max_depth_diff"], preference_loop_closure = \ @@ -40,8 +42,8 @@ def optimize_posegraph_for_fragment(path_dataset, fragment_id, config): def optimize_posegraph_for_scene(path_dataset, config): pose_graph_name = join(path_dataset, config["template_global_posegraph"]) - pose_graph_optimized_name = join(path_dataset, - config["template_global_posegraph_optimized"]) + pose_graph_optimized_name = join( + path_dataset, config["template_global_posegraph_optimized"]) run_posegraph_optimization(pose_graph_name, pose_graph_optimized_name, max_correspondence_distance = config["voxel_size"] * 1.4, preference_loop_closure = \ @@ -50,8 +52,8 @@ def optimize_posegraph_for_scene(path_dataset, config): def optimize_posegraph_for_refined_scene(path_dataset, config): pose_graph_name = join(path_dataset, config["template_refined_posegraph"]) - pose_graph_optimized_name = join(path_dataset, - config["template_refined_posegraph_optimized"]) + pose_graph_optimized_name = join( + path_dataset, config["template_refined_posegraph_optimized"]) run_posegraph_optimization(pose_graph_name, pose_graph_optimized_name, max_correspondence_distance = config["voxel_size"] * 1.4, preference_loop_closure = \ diff --git a/examples/Python/ReconstructionSystem/refine_registration.py b/examples/Python/ReconstructionSystem/refine_registration.py index 7308023128e..a61e479cc6c 100644 --- a/examples/Python/ReconstructionSystem/refine_registration.py +++ b/examples/Python/ReconstructionSystem/refine_registration.py @@ -5,68 +5,90 @@ # examples/Python/ReconstructionSystem/register_fragments.py import numpy as np -from open3d import * +import open3d as o3d import sys sys.path.append("../Utility") -from file import * -from visualization import * -from optimize_posegraph import * +from file import join, get_file_list +from visualization import draw_registration_result_original_color +sys.path.append(".") +from optimize_posegraph import optimize_posegraph_for_refined_scene -def update_posegrph_for_scene(s, t, transformation, information, - odometry, pose_graph): - if t == s + 1: # odometry case +def update_posegrph_for_scene(s, t, transformation, information, odometry, + pose_graph): + if t == s + 1: # odometry case odometry = np.dot(transformation, odometry) odometry_inv = np.linalg.inv(odometry) - pose_graph.nodes.append(PoseGraphNode(odometry_inv)) + pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry_inv)) pose_graph.edges.append( - PoseGraphEdge(s, t, transformation, - information, uncertain = False)) - else: # loop closure case + o3d.registration.PoseGraphEdge(s, + t, + transformation, + information, + uncertain=False)) + else: # loop closure case pose_graph.edges.append( - PoseGraphEdge(s, t, transformation, - information, uncertain = True)) + o3d.registration.PoseGraphEdge(s, + t, + transformation, + information, + uncertain=True)) return (odometry, pose_graph) -def multiscale_icp(source, target, voxel_size, max_iter, - config, init_transformation = np.identity(4)): +def multiscale_icp(source, + target, + voxel_size, + max_iter, + config, + init_transformation=np.identity(4)): current_transformation = init_transformation - for i, scale in enumerate(range(len(max_iter))): # multi-scale approach + for i, scale in enumerate(range(len(max_iter))): # multi-scale approach iter = max_iter[scale] distance_threshold = config["voxel_size"] * 1.4 print("voxel_size %f" % voxel_size[scale]) - source_down = voxel_down_sample(source, voxel_size[scale]) - target_down = voxel_down_sample(target, voxel_size[scale]) + source_down = o3d.geometry.voxel_down_sample(source, voxel_size[scale]) + target_down = o3d.geometry.voxel_down_sample(target, voxel_size[scale]) if config["icp_method"] == "point_to_point": - result_icp = registration_icp(source_down, target_down, - distance_threshold, current_transformation, - TransformationEstimationPointToPoint(), - ICPConvergenceCriteria(max_iteration = iter)) + result_icp = o3d.registration.registration_icp( + source_down, target_down, distance_threshold, + current_transformation, + o3d.registration.TransformationEstimationPointToPoint(), + o3d.registration.ICPConvergenceCriteria(max_iteration=iter)) else: - estimate_normals(source_down, KDTreeSearchParamHybrid( - radius = voxel_size[scale] * 2.0, max_nn = 30)) - estimate_normals(target_down, KDTreeSearchParamHybrid( - radius = voxel_size[scale] * 2.0, max_nn = 30)) + o3d.geometry.estimate_normals( + source_down, + o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] * + 2.0, + max_nn=30)) + o3d.geometry.estimate_normals( + target_down, + o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size[scale] * + 2.0, + max_nn=30)) if config["icp_method"] == "point_to_plane": - result_icp = registration_icp(source_down, target_down, - distance_threshold, current_transformation, - TransformationEstimationPointToPlane(), - ICPConvergenceCriteria(max_iteration = iter)) + result_icp = o3d.registration.registration_icp( + source_down, target_down, distance_threshold, + current_transformation, + o3d.registration.TransformationEstimationPointToPlane(), + o3d.registration.ICPConvergenceCriteria(max_iteration=iter)) if config["icp_method"] == "color": - result_icp = registration_colored_icp(source_down, target_down, - voxel_size[scale], current_transformation, - ICPConvergenceCriteria(relative_fitness = 1e-6, - relative_rmse = 1e-6, max_iteration = iter)) + result_icp = o3d.registration.registration_colored_icp( + source_down, target_down, voxel_size[scale], + current_transformation, + o3d.registration.ICPConvergenceCriteria( + relative_fitness=1e-6, + relative_rmse=1e-6, + max_iteration=iter)) current_transformation = result_icp.transformation - if i == len(max_iter)-1: - information_matrix = get_information_matrix_from_point_clouds( - source_down, target_down, voxel_size[scale] * 1.4, - result_icp.transformation) + if i == len(max_iter) - 1: + information_matrix = o3d.registration.get_information_matrix_from_point_clouds( + source_down, target_down, voxel_size[scale] * 1.4, + result_icp.transformation) if config["debug_mode"]: draw_registration_result_original_color(source, target, - result_icp.transformation) + result_icp.transformation) return (result_icp.transformation, information_matrix) @@ -78,16 +100,16 @@ def local_refinement(source, target, transformation_init, config): [voxel_size, voxel_size/2.0, voxel_size/4.0], [50, 30, 14], config, transformation_init) if config["debug_mode"]: - draw_registration_result_original_color( - source, target, transformation) + draw_registration_result_original_color(source, target, transformation) return (transformation, information) -def register_point_cloud_pair(ply_file_names, s, t, transformation_init, config): +def register_point_cloud_pair(ply_file_names, s, t, transformation_init, + config): print("reading %s ..." % ply_file_names[s]) - source = read_point_cloud(ply_file_names[s]) + source = o3d.io.read_point_cloud(ply_file_names[s]) print("reading %s ..." % ply_file_names[t]) - target = read_point_cloud(ply_file_names[t]) + target = o3d.io.read_point_cloud(ply_file_names[t]) (transformation, information) = \ local_refinement(source, target, transformation_init, config) if config["debug_mode"]: @@ -98,6 +120,7 @@ def register_point_cloud_pair(ply_file_names, s, t, transformation_init, config) # other types instead of class? class matching_result: + def __init__(self, s, t, trans): self.s = s self.t = t @@ -107,8 +130,9 @@ def __init__(self, s, t, trans): def make_posegraph_for_refined_scene(ply_file_names, config): - pose_graph = read_pose_graph(join(config["path_dataset"], - config["template_global_posegraph_optimized"])) + pose_graph = o3d.io.read_pose_graph( + join(config["path_dataset"], + config["template_global_posegraph_optimized"])) n_files = len(ply_file_names) matching_results = {} @@ -123,12 +147,12 @@ def make_posegraph_for_refined_scene(ply_file_names, config): import multiprocessing import subprocess MAX_THREAD = min(multiprocessing.cpu_count(), - max(len(pose_graph.edges), 1)) + max(len(pose_graph.edges), 1)) results = Parallel(n_jobs=MAX_THREAD)( - delayed(register_point_cloud_pair)(ply_file_names, - matching_results[r].s, matching_results[r].t, + delayed(register_point_cloud_pair)( + ply_file_names, matching_results[r].s, matching_results[r].t, matching_results[r].transformation, config) - for r in matching_results) + for r in matching_results) for i, r in enumerate(matching_results): matching_results[r].transformation = results[i][0] matching_results[r].information = results[i][1] @@ -140,24 +164,24 @@ def make_posegraph_for_refined_scene(ply_file_names, config): matching_results[r].s, matching_results[r].t, matching_results[r].transformation, config) - pose_graph_new = PoseGraph() + pose_graph_new = o3d.registration.PoseGraph() odometry = np.identity(4) - pose_graph_new.nodes.append(PoseGraphNode(odometry)) + pose_graph_new.nodes.append(o3d.registration.PoseGraphNode(odometry)) for r in matching_results: (odometry, pose_graph_new) = update_posegrph_for_scene( - matching_results[r].s, matching_results[r].t, - matching_results[r].transformation, - matching_results[r].information, - odometry, pose_graph_new) + matching_results[r].s, matching_results[r].t, + matching_results[r].transformation, matching_results[r].information, + odometry, pose_graph_new) print(pose_graph_new) - write_pose_graph(join(config["path_dataset"], - config["template_refined_posegraph"]), pose_graph_new) + o3d.io.write_pose_graph( + join(config["path_dataset"], config["template_refined_posegraph"]), + pose_graph_new) def run(config): print("refine rough registration of fragments.") - set_verbosity_level(VerbosityLevel.Debug) - ply_file_names = get_file_list(join( - config["path_dataset"], config["folder_fragment"]), ".ply") + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + ply_file_names = get_file_list( + join(config["path_dataset"], config["folder_fragment"]), ".ply") make_posegraph_for_refined_scene(ply_file_names, config) optimize_posegraph_for_refined_scene(config["path_dataset"], config) diff --git a/examples/Python/ReconstructionSystem/register_fragments.py b/examples/Python/ReconstructionSystem/register_fragments.py index de7c6ee8234..507ce41e21a 100644 --- a/examples/Python/ReconstructionSystem/register_fragments.py +++ b/examples/Python/ReconstructionSystem/register_fragments.py @@ -5,99 +5,110 @@ # examples/Python/ReconstructionSystem/register_fragments.py import numpy as np -from open3d import * +import open3d as o3d import sys sys.path.append("../Utility") -from file import * -from visualization import * -from optimize_posegraph import * -from refine_registration import * +from file import join, get_file_list, make_clean_folder +from visualization import draw_registration_result +sys.path.append(".") +from optimize_posegraph import optimize_posegraph_for_scene +from refine_registration import multiscale_icp def preprocess_point_cloud(pcd, config): voxel_size = config["voxel_size"] - pcd_down = voxel_down_sample(pcd, voxel_size) - estimate_normals(pcd_down, - KDTreeSearchParamHybrid(radius = voxel_size * 2.0, max_nn = 30)) - pcd_fpfh = compute_fpfh_feature(pcd_down, - KDTreeSearchParamHybrid(radius = voxel_size * 5.0, max_nn = 100)) + pcd_down = o3d.geometry.voxel_down_sample(pcd, voxel_size) + o3d.geometry.estimate_normals( + pcd_down, + o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 2.0, + max_nn=30)) + pcd_fpfh = o3d.registration.compute_fpfh_feature( + pcd_down, + o3d.geometry.KDTreeSearchParamHybrid(radius=voxel_size * 5.0, + max_nn=100)) return (pcd_down, pcd_fpfh) -def register_point_cloud_fpfh(source, target, - source_fpfh, target_fpfh, config): +def register_point_cloud_fpfh(source, target, source_fpfh, target_fpfh, config): distance_threshold = config["voxel_size"] * 1.4 if config["global_registration"] == "fgr": - result = registration_fast_based_on_feature_matching( - source, target, source_fpfh, target_fpfh, - FastGlobalRegistrationOption( - maximum_correspondence_distance = distance_threshold)) + result = o3d.registration.registration_fast_based_on_feature_matching( + source, target, source_fpfh, target_fpfh, + o3d.registration.FastGlobalRegistrationOption( + maximum_correspondence_distance=distance_threshold)) if config["global_registration"] == "ransac": - result = registration_ransac_based_on_feature_matching( - source, target, source_fpfh, target_fpfh, - distance_threshold, - TransformationEstimationPointToPoint(False), 4, - [CorrespondenceCheckerBasedOnEdgeLength(0.9), - CorrespondenceCheckerBasedOnDistance(distance_threshold)], - RANSACConvergenceCriteria(4000000, 500)) + result = o3d.registration.registration_ransac_based_on_feature_matching( + source, target, source_fpfh, target_fpfh, distance_threshold, + o3d.registration.TransformationEstimationPointToPoint(False), 4, [ + o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9), + o3d.registration.CorrespondenceCheckerBasedOnDistance( + distance_threshold) + ], o3d.registration.RANSACConvergenceCriteria(4000000, 500)) if (result.transformation.trace() == 4.0): - return (False, np.identity(4), np.zeros((6,6))) - information = get_information_matrix_from_point_clouds( - source, target, distance_threshold, result.transformation) - if information[5,5] / min(len(source.points),len(target.points)) < 0.3: - return (False, np.identity(4), np.zeros((6,6))) + return (False, np.identity(4), np.zeros((6, 6))) + information = o3d.registration.get_information_matrix_from_point_clouds( + source, target, distance_threshold, result.transformation) + if information[5, 5] / min(len(source.points), len(target.points)) < 0.3: + return (False, np.identity(4), np.zeros((6, 6))) return (True, result.transformation, information) -def compute_initial_registration(s, t, source_down, target_down, - source_fpfh, target_fpfh, path_dataset, config): +def compute_initial_registration(s, t, source_down, target_down, source_fpfh, + target_fpfh, path_dataset, config): - if t == s + 1: # odometry case + if t == s + 1: # odometry case print("Using RGBD odometry") - pose_graph_frag = read_pose_graph(join(path_dataset, - config["template_fragment_posegraph_optimized"] % s)) + pose_graph_frag = o3d.io.read_pose_graph( + join(path_dataset, + config["template_fragment_posegraph_optimized"] % s)) n_nodes = len(pose_graph_frag.nodes) - transformation_init = np.linalg.inv( - pose_graph_frag.nodes[n_nodes-1].pose) + transformation_init = np.linalg.inv(pose_graph_frag.nodes[n_nodes - + 1].pose) (transformation, information) = \ multiscale_icp(source_down, target_down, [config["voxel_size"]], [50], config, transformation_init) - else: # loop closure case - (success, transformation, information) = register_point_cloud_fpfh( - source_down, target_down, - source_fpfh, target_fpfh, config) + else: # loop closure case + (success, transformation, + information) = register_point_cloud_fpfh(source_down, target_down, + source_fpfh, target_fpfh, + config) if not success: print("No resonable solution. Skip this pair") - return (False, np.identity(4), np.zeros((6,6))) + return (False, np.identity(4), np.zeros((6, 6))) print(transformation) if config["debug_mode"]: - draw_registration_result(source_down, target_down, - transformation) + draw_registration_result(source_down, target_down, transformation) return (True, transformation, information) -def update_posegrph_for_scene(s, t, transformation, information, - odometry, pose_graph): - if t == s + 1: # odometry case +def update_posegrph_for_scene(s, t, transformation, information, odometry, + pose_graph): + if t == s + 1: # odometry case odometry = np.dot(transformation, odometry) odometry_inv = np.linalg.inv(odometry) - pose_graph.nodes.append(PoseGraphNode(odometry_inv)) + pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry_inv)) pose_graph.edges.append( - PoseGraphEdge(s, t, transformation, - information, uncertain = False)) - else: # loop closure case + o3d.registration.PoseGraphEdge(s, + t, + transformation, + information, + uncertain=False)) + else: # loop closure case pose_graph.edges.append( - PoseGraphEdge(s, t, transformation, - information, uncertain = True)) + o3d.registration.PoseGraphEdge(s, + t, + transformation, + information, + uncertain=True)) return (odometry, pose_graph) def register_point_cloud_pair(ply_file_names, s, t, config): print("reading %s ..." % ply_file_names[s]) - source = read_point_cloud(ply_file_names[s]) + source = o3d.io.read_point_cloud(ply_file_names[s]) print("reading %s ..." % ply_file_names[t]) - target = read_point_cloud(ply_file_names[t]) + target = o3d.io.read_point_cloud(ply_file_names[t]) (source_down, source_fpfh) = preprocess_point_cloud(source, config) (target_down, target_fpfh) = preprocess_point_cloud(target, config) (success, transformation, information) = \ @@ -114,6 +125,7 @@ def register_point_cloud_pair(ply_file_names, s, t, config): # other types instead of class? class matching_result: + def __init__(self, s, t): self.s = s self.t = t @@ -123,9 +135,9 @@ def __init__(self, s, t): def make_posegraph_for_scene(ply_file_names, config): - pose_graph = PoseGraph() + pose_graph = o3d.registration.PoseGraph() odometry = np.identity(4) - pose_graph.nodes.append(PoseGraphNode(odometry)) + pose_graph.nodes.append(o3d.registration.PoseGraphNode(odometry)) n_files = len(ply_file_names) matching_results = {} @@ -138,11 +150,11 @@ def make_posegraph_for_scene(ply_file_names, config): import multiprocessing import subprocess MAX_THREAD = min(multiprocessing.cpu_count(), - max(len(matching_results), 1)) - results = Parallel(n_jobs=MAX_THREAD)( - delayed(register_point_cloud_pair)(ply_file_names, - matching_results[r].s, matching_results[r].t, config) - for r in matching_results) + max(len(matching_results), 1)) + results = Parallel(n_jobs=MAX_THREAD)(delayed( + register_point_cloud_pair)(ply_file_names, matching_results[r].s, + matching_results[r].t, config) + for r in matching_results) for i, r in enumerate(matching_results): matching_results[r].success = results[i][0] matching_results[r].transformation = results[i][1] @@ -157,19 +169,19 @@ def make_posegraph_for_scene(ply_file_names, config): for r in matching_results: if matching_results[r].success: (odometry, pose_graph) = update_posegrph_for_scene( - matching_results[r].s, matching_results[r].t, - matching_results[r].transformation, - matching_results[r].information, - odometry, pose_graph) - write_pose_graph(join(config["path_dataset"], - config["template_global_posegraph"]), pose_graph) + matching_results[r].s, matching_results[r].t, + matching_results[r].transformation, + matching_results[r].information, odometry, pose_graph) + o3d.io.write_pose_graph( + join(config["path_dataset"], config["template_global_posegraph"]), + pose_graph) def run(config): print("register fragments.") - set_verbosity_level(VerbosityLevel.Debug) - ply_file_names = get_file_list(join( - config["path_dataset"], config["folder_fragment"]), ".ply") + o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug) + ply_file_names = get_file_list( + join(config["path_dataset"], config["folder_fragment"]), ".ply") make_clean_folder(join(config["path_dataset"], config["folder_scene"])) make_posegraph_for_scene(ply_file_names, config) optimize_posegraph_for_scene(config["path_dataset"], config) diff --git a/examples/Python/ReconstructionSystem/run_system.py b/examples/Python/ReconstructionSystem/run_system.py index 4383e06911f..af44328f4eb 100644 --- a/examples/Python/ReconstructionSystem/run_system.py +++ b/examples/Python/ReconstructionSystem/run_system.py @@ -4,33 +4,35 @@ # examples/Python/ReconstructionSystem/run_system.py -import os -import sys import json import argparse import time, datetime +import sys sys.path.append("../Utility") -from file import * +from file import check_folder_structure sys.path.append(".") -from initialize_config import * - +from initialize_config import initialize_config if __name__ == "__main__": parser = argparse.ArgumentParser(description="Reconstruction system") parser.add_argument("config", help="path to the config file") parser.add_argument("--make", - help="Step 1) make fragments from RGBD sequence", - action="store_true") - parser.add_argument("--register", - help="Step 2) register all fragments to detect loop closure", - action="store_true") + help="Step 1) make fragments from RGBD sequence", + action="store_true") + parser.add_argument( + "--register", + help="Step 2) register all fragments to detect loop closure", + action="store_true") parser.add_argument("--refine", - help="Step 3) refine rough registrations", action="store_true") - parser.add_argument("--integrate", - help="Step 4) integrate the whole RGBD sequence to make final mesh", - action="store_true") - parser.add_argument("--debug_mode", help="turn on debug mode", - action="store_true") + help="Step 3) refine rough registrations", + action="store_true") + parser.add_argument( + "--integrate", + help="Step 4) integrate the whole RGBD sequence to make final mesh", + action="store_true") + parser.add_argument("--debug_mode", + help="turn on debug mode", + action="store_true") args = parser.parse_args() if not args.make and \ @@ -59,7 +61,7 @@ for key, val in config.items(): print("%40s : %s" % (key, str(val))) - times = [0,0,0,0] + times = [0, 0, 0, 0] if args.make: start_time = time.time() import make_fragments diff --git a/examples/Python/ReconstructionSystem/scripts/__init__.py b/examples/Python/ReconstructionSystem/scripts/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/ReconstructionSystem/scripts/synchronize_frames.py b/examples/Python/ReconstructionSystem/scripts/synchronize_frames.py index 45f2919b4b8..3cf7f137438 100644 --- a/examples/Python/ReconstructionSystem/scripts/synchronize_frames.py +++ b/examples/Python/ReconstructionSystem/scripts/synchronize_frames.py @@ -19,13 +19,17 @@ def run_synchronization(args): # filename format is: # frame-timestamp.filetype - timestamps = {'depth':[None] * len(depth_files), - 'color':[None] * len(color_files)} + timestamps = { + 'depth': [None] * len(depth_files), + 'color': [None] * len(color_files) + } for i, name in enumerate(depth_files): - depth_timestamp = int(os.path.basename(depth_files[i]).replace('-','.').split('.')[1]) + depth_timestamp = int( + os.path.basename(depth_files[i]).replace('-', '.').split('.')[1]) timestamps['depth'][i] = depth_timestamp for i, name in enumerate(color_files): - color_timestamp = int(os.path.basename(color_files[i]).replace('-','.').split('.')[1]) + color_timestamp = int( + os.path.basename(color_files[i]).replace('-', '.').split('.')[1]) timestamps['color'][i] = color_timestamp # associations' index is the color frame, and the value at @@ -34,7 +38,7 @@ def run_synchronization(args): depth_idx = 0 for i in range(len(color_files)): best_dist = float('inf') - while depth_idx <= len(depth_files)-1 and i <= len(color_files)-1: + while depth_idx <= len(depth_files) - 1 and i <= len(color_files) - 1: dist = math.fabs(timestamps['depth'][depth_idx] - \ timestamps['color'][i]) if dist > best_dist: @@ -42,33 +46,39 @@ def run_synchronization(args): best_dist = dist depth_idx += 1 if depth_idx > timestamps['depth'][-1]: - print("Ended at color frame %d, depth frame %d" % (i, depth_idx)) - associations.append(depth_idx-1) + print("Ended at color frame %d, depth frame %d" % + (i, depth_idx)) + associations.append(depth_idx - 1) if args.debug_mode: - print("%d %d %d %d" % (i, depth_idx-1, - timestamps['depth'][depth_idx-1], timestamps['color'][i])) + print("%d %d %d %d" % + (i, depth_idx - 1, timestamps['depth'][depth_idx - 1], + timestamps['color'][i])) os.rename(os.path.join(folder_path, "depth"), - os.path.join(folder_path, "temp")) + os.path.join(folder_path, "temp")) if not os.path.exists(os.path.join(folder_path, "depth")): os.makedirs(os.path.join(folder_path, "depth")) for i, assn in enumerate(associations): temp_name = os.path.join(folder_path, "temp", - os.path.basename(depth_files[assn])) - new_name = os.path.join(folder_path, "depth/%06d.png" % (i+1)) + os.path.basename(depth_files[assn])) + new_name = os.path.join(folder_path, "depth/%06d.png" % (i + 1)) if args.debug_mode: print(temp_name) print(new_name) if not exists(temp_name): - assert(i+1 == len(color_files)) + assert (i + 1 == len(color_files)) os.remove(color_files[-1]) else: os.rename(temp_name, new_name) shutil.rmtree(os.path.join(folder_path, "temp")) + if __name__ == '__main__': - parser = argparse.ArgumentParser(description="Synchronize color and depth images") + parser = argparse.ArgumentParser( + description="Synchronize color and depth images") parser.add_argument("dataset", help="path to the dataset") - parser.add_argument("--debug_mode", help="turn on debug mode", action="store_true") + parser.add_argument("--debug_mode", + help="turn on debug mode", + action="store_true") args = parser.parse_args() run_synchronization(args) diff --git a/examples/Python/ReconstructionSystem/sensors/__init__.py b/examples/Python/ReconstructionSystem/sensors/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/ReconstructionSystem/sensors/realsense_pcd_visualizer.py b/examples/Python/ReconstructionSystem/sensors/realsense_pcd_visualizer.py index 0aff6b02b83..ea3bb925d93 100644 --- a/examples/Python/ReconstructionSystem/sensors/realsense_pcd_visualizer.py +++ b/examples/Python/ReconstructionSystem/sensors/realsense_pcd_visualizer.py @@ -1,12 +1,12 @@ # pyrealsense2 is required. # Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python import pyrealsense2 as rs -from open3d import * import numpy as np from enum import IntEnum from datetime import datetime -from open3d import * +import open3d as o3d + class Preset(IntEnum): Custom = 0 @@ -16,10 +16,12 @@ class Preset(IntEnum): HighDensity = 4 MediumDensity = 5 + def get_intrinsic_matrix(frame): intrinsics = frame.profile.as_video_stream_profile().intrinsics - out = PinholeCameraIntrinsic(640, 480, - intrinsics.fx, intrinsics.fy, intrinsics.ppx, intrinsics.ppy) + out = o3d.camera.PinholeCameraIntrinsic(640, 480, intrinsics.fx, + intrinsics.fy, intrinsics.ppx, + intrinsics.ppy) return out @@ -47,7 +49,7 @@ def get_intrinsic_matrix(frame): # We will not display the background of objects more than # clipping_distance_in_meters meters away - clipping_distance_in_meters = 3 # 3 meter + clipping_distance_in_meters = 3 # 3 meter clipping_distance = clipping_distance_in_meters / depth_scale # print(depth_scale) @@ -57,10 +59,10 @@ def get_intrinsic_matrix(frame): align_to = rs.stream.color align = rs.align(align_to) - vis = Visualizer() + vis = o3d.visualization.Visualizer() vis.create_window() - pcd = PointCloud() + pcd = o3d.geometry.PointCloud() flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] # Streaming loop @@ -68,7 +70,7 @@ def get_intrinsic_matrix(frame): try: while True: - dt0=datetime.now() + dt0 = datetime.now() # Get frameset of color and depth frames = pipeline.wait_for_frames() @@ -79,22 +81,26 @@ def get_intrinsic_matrix(frame): # Get aligned frames aligned_depth_frame = aligned_frames.get_depth_frame() color_frame = aligned_frames.get_color_frame() - intrinsic = PinholeCameraIntrinsic(get_intrinsic_matrix(color_frame)) + intrinsic = o3d.camera.PinholeCameraIntrinsic( + get_intrinsic_matrix(color_frame)) # Validate that both frames are valid if not aligned_depth_frame or not color_frame: continue - depth_image = Image(np.array(aligned_depth_frame.get_data())) + depth_image = o3d.geometry.Image( + np.array(aligned_depth_frame.get_data())) color_temp = np.asarray(color_frame.get_data()) - color_image = Image(color_temp) - - rgbd_image = create_rgbd_image_from_color_and_depth( - color_image, depth_image, depth_scale=1.0/depth_scale, - depth_trunc=clipping_distance_in_meters, - convert_rgb_to_intensity = False) - temp = create_point_cloud_from_rgbd_image( - rgbd_image, intrinsic) + color_image = o3d.geometry.Image(color_temp) + + rgbd_image = o3d.geometry.create_rgbd_image_from_color_and_depth( + color_image, + depth_image, + depth_scale=1.0 / depth_scale, + depth_trunc=clipping_distance_in_meters, + convert_rgb_to_intensity=False) + temp = o3d.geometry.create_point_cloud_from_rgbd_image( + rgbd_image, intrinsic) temp.transform(flip_transform) pcd.points = temp.points pcd.colors = temp.colors @@ -107,7 +113,7 @@ def get_intrinsic_matrix(frame): vis.update_renderer() process_time = datetime.now() - dt0 - print("FPS: "+str(1/process_time.total_seconds())) + print("FPS: " + str(1 / process_time.total_seconds())) frame_count += 1 finally: diff --git a/examples/Python/ReconstructionSystem/sensors/realsense_recorder.py b/examples/Python/ReconstructionSystem/sensors/realsense_recorder.py index af78293720e..19bb3d4f1f0 100644 --- a/examples/Python/ReconstructionSystem/sensors/realsense_recorder.py +++ b/examples/Python/ReconstructionSystem/sensors/realsense_recorder.py @@ -41,27 +41,39 @@ def make_clean_folder(path_folder): def save_intrinsic_as_json(filename, frame): intrinsics = frame.profile.as_video_stream_profile().intrinsics with open(filename, 'w') as outfile: - obj = json.dump({'width': intrinsics.width, - 'height': intrinsics.height, - 'intrinsic_matrix': - [intrinsics.fx, 0, 0, - 0, intrinsics.fy, 0, - intrinsics.ppx, intrinsics.ppy, 1]}, - outfile, indent=4) + obj = json.dump( + { + 'width': + intrinsics.width, + 'height': + intrinsics.height, + 'intrinsic_matrix': [ + intrinsics.fx, 0, 0, 0, intrinsics.fy, 0, intrinsics.ppx, + intrinsics.ppy, 1 + ] + }, + outfile, + indent=4) if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Realsense Recorder. Please select one of the optional arguments") - parser.add_argument("--output_folder", default='../dataset/realsense/', - help="set output folder") - parser.add_argument("--record_rosbag", action='store_true', - help="Recording rgbd stream into realsense.bag") - parser.add_argument("--record_imgs", action='store_true', - help="Recording save color and depth images into realsense folder") - parser.add_argument("--playback_rosbag", action='store_true', - help="Play recorded realsense.bag file") + description= + "Realsense Recorder. Please select one of the optional arguments") + parser.add_argument("--output_folder", + default='../dataset/realsense/', + help="set output folder") + parser.add_argument("--record_rosbag", + action='store_true', + help="Recording rgbd stream into realsense.bag") + parser.add_argument( + "--record_imgs", + action='store_true', + help="Recording save color and depth images into realsense folder") + parser.add_argument("--playback_rosbag", + action='store_true', + help="Play recorded realsense.bag file") args = parser.parse_args() if sum(o is not False for o in vars(args).values()) != 2: @@ -98,7 +110,7 @@ def save_intrinsic_as_json(filename, frame): if args.record_rosbag: config.enable_record_to_file(path_bag) if args.playback_rosbag: - config.enable_device_from_file(path_bag, repeat_playback = True) + config.enable_device_from_file(path_bag, repeat_playback=True) # Start streaming profile = pipeline.start(config) @@ -113,7 +125,7 @@ def save_intrinsic_as_json(filename, frame): # We will not display the background of objects more than # clipping_distance_in_meters meters away - clipping_distance_in_meters = 3 # 3 meter + clipping_distance_in_meters = 3 # 3 meter clipping_distance = clipping_distance_in_meters / depth_scale # Create an align object @@ -145,8 +157,9 @@ def save_intrinsic_as_json(filename, frame): if args.record_imgs: if frame_count == 0: - save_intrinsic_as_json(join(args.output_folder, - "camera_intrinsic.json"), color_frame) + save_intrinsic_as_json( + join(args.output_folder, "camera_intrinsic.json"), + color_frame) cv2.imwrite("%s/%06d.png" % \ (path_depth, frame_count), depth_image) cv2.imwrite("%s/%06d.jpg" % \ @@ -157,14 +170,13 @@ def save_intrinsic_as_json(filename, frame): # Remove background - Set pixels further than clipping_distance to grey grey_color = 153 #depth image is 1 channel, color is 3 channels - depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) + depth_image_3d = np.dstack((depth_image, depth_image, depth_image)) bg_removed = np.where((depth_image_3d > clipping_distance) | \ (depth_image_3d <= 0), grey_color, color_image) # Render images depth_colormap = cv2.applyColorMap( - cv2.convertScaleAbs(depth_image, alpha=0.09), - cv2.COLORMAP_JET) + cv2.convertScaleAbs(depth_image, alpha=0.09), cv2.COLORMAP_JET) images = np.hstack((bg_removed, depth_colormap)) cv2.namedWindow('Recorder Realsense', cv2.WINDOW_AUTOSIZE) cv2.imshow('Recorder Realsense', images) diff --git a/examples/Python/Utility/__init__.py b/examples/Python/Utility/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/Python/Utility/downloader.py b/examples/Python/Utility/downloader.py index 2db13bbae6d..d94638c5171 100644 --- a/examples/Python/Utility/downloader.py +++ b/examples/Python/Utility/downloader.py @@ -14,7 +14,6 @@ pyver = 2 from urllib2 import Request, urlopen - # dataset from redwood-data.org dataset_names = ["livingroom1", "livingroom2", "office1", "office2"] dataset_path = "testdata/" @@ -29,7 +28,7 @@ def get_redwood_dataset(): file_downloader("http://redwood-data.org/indoor/data/%s-fragments-ply.zip" % \ name) unzip_data("%s-fragments-ply.zip" % name, - "%s/%s" % (dataset_path, name)) + "%s/%s" % (dataset_path, name)) os.remove("%s-fragments-ply.zip" % name) print("") diff --git a/examples/Python/Utility/file.py b/examples/Python/Utility/file.py index f7136244065..725a70773c6 100644 --- a/examples/Python/Utility/file.py +++ b/examples/Python/Utility/file.py @@ -20,8 +20,11 @@ def get_file_list(path, extension=None): if extension is None: file_list = [path + f for f in listdir(path) if isfile(join(path, f))] else: - file_list = [path + f for f in listdir(path) - if isfile(join(path, f)) and splitext(f)[1] == extension] + file_list = [ + path + f + for f in listdir(path) + if isfile(join(path, f)) and splitext(f)[1] == extension + ] file_list = sorted_alphanum(file_list) return file_list diff --git a/examples/Python/Utility/opencv.py b/examples/Python/Utility/opencv.py index 6a7487f6639..8f6cd0b38f5 100644 --- a/examples/Python/Utility/opencv.py +++ b/examples/Python/Utility/opencv.py @@ -4,6 +4,7 @@ # examples/Python/Utility/opencv.py + def initialize_opencv(): opencv_installed = True try: diff --git a/examples/Python/Utility/visualization.py b/examples/Python/Utility/visualization.py index 9d89dedad11..176558ad265 100644 --- a/examples/Python/Utility/visualization.py +++ b/examples/Python/Utility/visualization.py @@ -5,17 +5,18 @@ # examples/Python/Utility/visualization.py import copy -from open3d import * +import open3d as o3d flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] + def draw_geometries_flip(pcds): pcds_transform = [] for pcd in pcds: pcd_temp = copy.deepcopy(pcd) pcd_temp.transform(flip_transform) pcds_transform.append(pcd_temp) - draw_geometries(pcds_transform) + o3d.visualization.draw_geometries(pcds_transform) def draw_registration_result(source, target, transformation): @@ -26,7 +27,7 @@ def draw_registration_result(source, target, transformation): source_temp.transform(transformation) source_temp.transform(flip_transform) target_temp.transform(flip_transform) - draw_geometries([source_temp, target_temp]) + o3d.visualization.draw_geometries([source_temp, target_temp]) def draw_registration_result_original_color(source, target, transformation): @@ -35,4 +36,4 @@ def draw_registration_result_original_color(source, target, transformation): source_temp.transform(transformation) source_temp.transform(flip_transform) target_temp.transform(flip_transform) - draw_geometries([source_temp, target_temp]) + o3d.visualization.draw_geometries([source_temp, target_temp]) diff --git a/examples/Python/__init__.py b/examples/Python/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/UnitTest/Python/test_octree.py b/src/UnitTest/Python/test_octree.py index 23a6a254898..9c70cba5ba7 100644 --- a/src/UnitTest/Python/test_octree.py +++ b/src/UnitTest/Python/test_octree.py @@ -30,32 +30,27 @@ import pytest import os - -_eight_cubes_colors = np.array( - [ - [0.0, 0.0, 0.0], - [0.1, 0.0, 0.0], - [0.0, 0.1, 0.0], - [0.1, 0.1, 0.0], - [0.0, 0.0, 0.1], - [0.1, 0.0, 0.1], - [0.0, 0.1, 0.1], - [0.1, 0.1, 0.1], - ] -) - -_eight_cubes_points = np.array( - [ - [0.5, 0.5, 0.5], - [1.5, 0.5, 0.5], - [0.5, 1.5, 0.5], - [1.5, 1.5, 0.5], - [0.5, 0.5, 1.5], - [1.5, 0.5, 1.5], - [0.5, 1.5, 1.5], - [1.5, 1.5, 1.5], - ] -) +_eight_cubes_colors = np.array([ + [0.0, 0.0, 0.0], + [0.1, 0.0, 0.0], + [0.0, 0.1, 0.0], + [0.1, 0.1, 0.0], + [0.0, 0.0, 0.1], + [0.1, 0.0, 0.1], + [0.0, 0.1, 0.1], + [0.1, 0.1, 0.1], +]) + +_eight_cubes_points = np.array([ + [0.5, 0.5, 0.5], + [1.5, 0.5, 0.5], + [0.5, 1.5, 0.5], + [1.5, 1.5, 0.5], + [0.5, 0.5, 1.5], + [1.5, 0.5, 1.5], + [0.5, 1.5, 1.5], + [1.5, 1.5, 1.5], +]) def test_octree_OctreeNodeInfo(): @@ -91,9 +86,11 @@ def test_octree_OctreeColorLeafNode(): assert color_leaf_node == color_leaf_node_clone assert color_leaf_node_clone == color_leaf_node + def test_octree_init(): octree = o3d.geometry.Octree(1, [0, 0, 0], 2) + def test_octree_convert_from_point_cloud(): octree = o3d.geometry.Octree(1, [0, 0, 0], 2) @@ -118,16 +115,14 @@ def test_octree_node_access(): f_update = o3d.geometry.OctreeColorLeafNode.get_update_function(color) octree.insert_point(point, f_init, f_update) for i in range(8): - np.testing.assert_equal( - octree.root_node.children[i].color, _eight_cubes_colors[i] - ) + np.testing.assert_equal(octree.root_node.children[i].color, + _eight_cubes_colors[i]) def test_octree_visualize(): pwd = os.path.dirname(os.path.realpath(__file__)) - data_dir = os.path.join( - pwd, os.pardir, os.pardir, os.pardir, "examples", "TestData" - ) + data_dir = os.path.join(pwd, os.pardir, os.pardir, os.pardir, "examples", + "TestData") pcd_path = os.path.join(data_dir, "fragment.ply") pcd = o3d.io.read_point_cloud(pcd_path) octree = o3d.geometry.Octree(8) @@ -138,9 +133,8 @@ def test_octree_visualize(): def test_locate_leaf_node(): pwd = os.path.dirname(os.path.realpath(__file__)) - data_dir = os.path.join( - pwd, os.pardir, os.pardir, os.pardir, "examples", "TestData" - ) + data_dir = os.path.join(pwd, os.pardir, os.pardir, os.pardir, "examples", + "TestData") pcd_path = os.path.join(data_dir, "fragment.ply") pcd = o3d.io.read_point_cloud(pcd_path) diff --git a/src/UnitTest/Python/test_open3d_eigen.py b/src/UnitTest/Python/test_open3d_eigen.py index 9aed5638081..40e2c7103d8 100644 --- a/src/UnitTest/Python/test_open3d_eigen.py +++ b/src/UnitTest/Python/test_open3d_eigen.py @@ -29,26 +29,32 @@ import time import pytest -@pytest.mark.parametrize("input_array, expect_exception", [ - # Empty case - (np.ones((0, 3), dtype=np.float64), False), - # Wrong shape - (np.ones((2, 4), dtype=np.float64), True), - # Non-numpy array - ([[1, 2, 3], [4, 5, 6]], False), - ([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], False), - # Datatypes - (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), False), - (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False), - (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False), - # Slice non-contiguous memory - (np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=np.float64)[:, 0:6:2], False), - # Transpose view - (np.array([[1, 4], [2, 5], [3, 6]], dtype=np.float64).T, False), - # Fortran layout - (np.asfortranarray(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)), False), -]) + +@pytest.mark.parametrize( + "input_array, expect_exception", + [ + # Empty case + (np.ones((0, 3), dtype=np.float64), False), + # Wrong shape + (np.ones((2, 4), dtype=np.float64), True), + # Non-numpy array + ([[1, 2, 3], [4, 5, 6]], False), + ([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], False), + # Datatypes + (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), False), + (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False), + (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False), + # Slice non-contiguous memory + (np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], + dtype=np.float64)[:, 0:6:2], False), + # Transpose view + (np.array([[1, 4], [2, 5], [3, 6]], dtype=np.float64).T, False), + # Fortran layout + (np.asfortranarray(np.array([[1, 2, 3], [4, 5, 6]], + dtype=np.float64)), False), + ]) def test_Vector3dVector(input_array, expect_exception): + def run_test(input_array): open3d_array = open3d.Vector3dVector(input_array) output_array = np.asarray(open3d_array) @@ -60,26 +66,32 @@ def run_test(input_array): else: run_test(input_array) -@pytest.mark.parametrize("input_array, expect_exception", [ - # Empty case - (np.ones((0, 3), dtype=np.int32), False), - # Wrong shape - (np.ones((2, 4), dtype=np.int32), True), - # Non-numpy array - ([[1, 2, 3], [4, 5, 6]], False), - ([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], False), - # Datatypes - (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), False), - (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False), - (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False), - # Slice non-contiguous memory - (np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=np.int32)[:, 0:6:2], False), - # Transpose view - (np.array([[1, 4], [2, 5], [3, 6]], dtype=np.int32).T, False), - # Fortran layout - (np.asfortranarray(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32)), False), -]) + +@pytest.mark.parametrize( + "input_array, expect_exception", + [ + # Empty case + (np.ones((0, 3), dtype=np.int32), False), + # Wrong shape + (np.ones((2, 4), dtype=np.int32), True), + # Non-numpy array + ([[1, 2, 3], [4, 5, 6]], False), + ([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], False), + # Datatypes + (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64), False), + (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False), + (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32), False), + # Slice non-contiguous memory + (np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], + dtype=np.int32)[:, 0:6:2], False), + # Transpose view + (np.array([[1, 4], [2, 5], [3, 6]], dtype=np.int32).T, False), + # Fortran layout + (np.asfortranarray(np.array([[1, 2, 3], [4, 5, 6]], + dtype=np.int32)), False), + ]) def test_Vector3iVector(input_array, expect_exception): + def run_test(input_array): open3d_array = open3d.Vector3iVector(input_array) output_array = np.asarray(open3d_array) @@ -92,26 +104,30 @@ def run_test(input_array): run_test(input_array) -@pytest.mark.parametrize("input_array, expect_exception", [ - # Empty case - (np.ones((0, 2), dtype=np.int32), False), - # Wrong shape - (np.ones((10, 3), dtype=np.int32), True), - # Non-numpy array - ([[1, 2], [4, 5]], False), - ([[1.0, 2.0], [4.0, 5.0]], False), - # Datatypes - (np.array([[1, 2], [4, 5]], dtype=np.float64), False), - (np.array([[1, 2], [4, 5]], dtype=np.int32), False), - (np.array([[1, 2], [4, 5]], dtype=np.int32), False), - # Slice non-contiguous memory - (np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], dtype=np.int32)[:, 0:6:3], False), - # Transpose view - (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32).T, False), - # Fortran layout - (np.asfortranarray(np.array([[1, 2], [4, 5]], dtype=np.int32)), False), -]) +@pytest.mark.parametrize( + "input_array, expect_exception", + [ + # Empty case + (np.ones((0, 2), dtype=np.int32), False), + # Wrong shape + (np.ones((10, 3), dtype=np.int32), True), + # Non-numpy array + ([[1, 2], [4, 5]], False), + ([[1.0, 2.0], [4.0, 5.0]], False), + # Datatypes + (np.array([[1, 2], [4, 5]], dtype=np.float64), False), + (np.array([[1, 2], [4, 5]], dtype=np.int32), False), + (np.array([[1, 2], [4, 5]], dtype=np.int32), False), + # Slice non-contiguous memory + (np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]], + dtype=np.int32)[:, 0:6:3], False), + # Transpose view + (np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32).T, False), + # Fortran layout + (np.asfortranarray(np.array([[1, 2], [4, 5]], dtype=np.int32)), False), + ]) def test_Vector2iVector(input_array, expect_exception): + def run_test(input_array): open3d_array = open3d.Vector2iVector(input_array) output_array = np.asarray(open3d_array) @@ -124,26 +140,29 @@ def run_test(input_array): run_test(input_array) -@pytest.mark.parametrize("input_array, expect_exception", [ - # Empty case - (np.ones((0, 4, 4), dtype=np.float64), False), - # Wrong shape - (np.ones((10, 3), dtype=np.float64), True), - (np.ones((10, 3, 3), dtype=np.float64), True), - # Non-numpy array - ([[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [12, 13, 14, 15]]], False), - # Datatypes - (np.random.randint(10, size=(10, 4, 4)).astype(np.float64), False), - (np.random.randint(10, size=(10, 4, 4)).astype(np.int32), False), - # Slice non-contiguous memory - (np.random.random((10, 8, 8)).astype(np.float64)[:, 0:8:2, 0:8:2], False), - # Fortran layout - (np.asfortranarray(np.array(np.random.random((10, 4, 4)), dtype=np.float64)), False), -]) +@pytest.mark.parametrize( + "input_array, expect_exception", + [ + # Empty case + (np.ones((0, 4, 4), dtype=np.float64), False), + # Wrong shape + (np.ones((10, 3), dtype=np.float64), True), + (np.ones((10, 3, 3), dtype=np.float64), True), + # Non-numpy array + ([[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]], False + ), + # Datatypes + (np.random.randint(10, size=(10, 4, 4)).astype(np.float64), False), + (np.random.randint(10, size=(10, 4, 4)).astype(np.int32), False), + # Slice non-contiguous memory + (np.random.random( + (10, 8, 8)).astype(np.float64)[:, 0:8:2, 0:8:2], False), + # Fortran layout + (np.asfortranarray( + np.array(np.random.random((10, 4, 4)), dtype=np.float64)), False), + ]) def test_Matrix4dVector(input_array, expect_exception): + def run_test(input_array): open3d_array = open3d.Matrix4dVector(input_array) output_array = np.asarray(open3d_array) @@ -155,6 +174,7 @@ def run_test(input_array): else: run_test(input_array) + # Run with pytest -s to show output def test_benchmark(): vector_size = int(2e6) diff --git a/util/scripts/apply-style.cmake b/util/scripts/apply-style.cmake index 8544c487fee..600ee998a98 100644 --- a/util/scripts/apply-style.cmake +++ b/util/scripts/apply-style.cmake @@ -17,7 +17,39 @@ # limitations under the License. # ****************************************************************************** -# Tries to locate "clang-format-5.0" and then "clang-format" +# Try to locate "yapf" +find_program(YAPF yapf PATHS ENV PATH) +if (YAPF) + message(STATUS "yapf found at: ${YAPF}") + execute_process(COMMAND ${YAPF} --version) +else() + message(STATUS "Please Install YAPF (https://github.com/google/yapf)") + message(STATUS "With PyPI: `pip install yapf`") + message(STATUS "With Conda: `conda install yapf`") + message(FATAL_ERROR "yapf not found, python not available") +endif() + +function(style_apply_file_python FILE) + execute_process(COMMAND ${YAPF} -i ${FILE}) +endfunction() + +set(DIRECTORIES_OF_INTEREST_PYTHON + examples/Python + src/UnitTest/Python +) + +message(STATUS "Python apply-style...") +foreach(DIRECTORY ${DIRECTORIES_OF_INTEREST_PYTHON}) + set(PY_GLOB "${PROJECT_SOURCE_DIR}/${DIRECTORY}/*.py") + file(GLOB_RECURSE FILES ${PY_GLOB}) + foreach(FILE ${FILES}) + style_apply_file_python(${FILE}) + endforeach(FILE) +endforeach(DIRECTORY) +message(STATUS "Python apply-style done") + + +# Try to locate "clang-format-5.0" and then "clang-format" find_program(CLANG_FORMAT clang-format-5.0 PATHS ENV PATH) if (NOT CLANG_FORMAT) find_program(CLANG_FORMAT clang-format PATHS ENV PATH) @@ -30,30 +62,30 @@ else() message(FATAL_ERROR "clang-format not found, style not available") endif() -function(style_apply_file PATH) +function(style_apply_file_cpp FILE) execute_process( COMMAND ${CLANG_FORMAT} -style=file -output-replacements-xml ${FILE} OUTPUT_VARIABLE STYLE_CHECK_RESULT ) if("${STYLE_CHECK_RESULT}" MATCHES ".*