From 6c66acdda96a4508fbcd2ce344110610aa3ce761 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Wed, 10 Mar 2021 13:42:07 +0100 Subject: [PATCH 01/29] point_cloud_class_initial --- .../projects/point_convolutions/.flake8 | 32 ++ .../projects/point_convolutions/LICENSE | 202 +++++++++++++ .../projects/point_convolutions/README.md | 70 +++++ .../point_convolutions/pylib/__init__.py | 15 + .../point_convolutions/pylib/pc/PointCloud.py | 281 ++++++++++++++++++ .../pylib/pc/tests/aabb_test.py | 72 +++++ .../pylib/pc/tests/point_cloud_test.py | 109 +++++++ .../pylib/pc/tests/utils.py | 81 +++++ .../point_convolutions/pylib/pc/utils.py | 114 +++++++ .../projects/point_convolutions/pytest.ini | 3 + 10 files changed, 979 insertions(+) create mode 100644 tensorflow_graphics/projects/point_convolutions/.flake8 create mode 100644 tensorflow_graphics/projects/point_convolutions/LICENSE create mode 100644 tensorflow_graphics/projects/point_convolutions/README.md create mode 100755 tensorflow_graphics/projects/point_convolutions/pylib/__init__.py create mode 100755 tensorflow_graphics/projects/point_convolutions/pylib/pc/PointCloud.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/aabb_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/point_cloud_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pytest.ini diff --git a/tensorflow_graphics/projects/point_convolutions/.flake8 b/tensorflow_graphics/projects/point_convolutions/.flake8 new file mode 100644 index 000000000..f4e3292ee --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/.flake8 @@ -0,0 +1,32 @@ +[flake8] +inline-quotes = double +max-line-length = 79 +max-complexity = 10 +exclude = .git, + .tox, + .pytest_cache, + __pycache__, + tensorflow_graphics/projects/* + tensorflow_graphics/submodules/* +ignore = C901, + E101, + E111, + E114, + E121, + E125, + E126, + E129, + E221, + E265, + E271, + E305, + E306, + #E501, + E502, + E731, + E741, + F401, + F812, + W191, + W503, + W504, \ No newline at end of file diff --git a/tensorflow_graphics/projects/point_convolutions/LICENSE b/tensorflow_graphics/projects/point_convolutions/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/tensorflow_graphics/projects/point_convolutions/README.md b/tensorflow_graphics/projects/point_convolutions/README.md new file mode 100644 index 000000000..83db3fe66 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/README.md @@ -0,0 +1,70 @@ +# TensorFlow Graphics Point Cloud Convolutions + +This module contains a python TensorFlow module `pylib` and a custom ops package in `tfg_custom_ops`. +While it is possible to run without the custom ops package, it is strongly advised to install it for performance and memory efficiency. + +## Content + +This code contains all necessary operations to perform point cloud convolutions + +1. Datastructure + - Point cloud class for batches of arbitrary sized point clouds. + - Memory efficient regular grid data structure +2. Point cloud operations + - Neighborhood computation + - Point density estimation + - Spatial sampling + - Poisson Disk sampling + - Cell average sampling +3. Convolution kernels + - Kernel Point Convolutions + - linear interpolation + - gaussian interpolation + - deformable points with regularization loss as in [KPConv](https://arxiv.org/abs/1904.08889) + - MLP + - multiple MLPs as in [MCConv](https://arxiv.org/abs/1806.01759) + - single MLP as in [PointConv](https://arxiv.org/abs/1811.07246) +4. Feature aggregation inside receptive fields + - Monte-Carlo integration with pdf + - Constant summation +5. Easy to use classes for building models + - `PointCloud` class + - `PointHierarchy` for sequential downsampling of point clouds + - layer classes + - `MCConv` + - `PointConv` + - `KPConv` + - `Conv1x1` + +## Installation + +Precompiled versions of the custom ops package are provided in `custom_ops/pkg_builds/tf_*` for the latest TensorFlow versions. +For compilation instructions see the [README](custom_ops/README.md) in the `custom_ops` folder. + +To install it run the following command (replace `VERSION` with your installed TensorFlow version, e.g. `2.3.0`) +```bash + pip install custom_ops/tf_VERSION/*.whl +``` + +## Tutorials + +Check out the Colab notebooks for an introduction to the code + +- [Introduction](pylib/notebooks/Introduction.ipynb) +- [Classification on ModelNet40](pylib/notebooks/ModelNet40.ipynb) + +## Unit tests + +Unit tests can be evaluated using + +```bash + pip install -r pytest_requirements.txt + pytest pylib/ +``` + +These include tests of the custom ops if they are installed. + +## Additional Information + +You may use this software under the +[Apache 2.0 License](https://github.com/schellmi42/tensorflow_graphics_point_clouds/blob/master/LICENSE). \ No newline at end of file diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/__init__.py new file mode 100755 index 000000000..87d0e6976 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific + +from pylib import pc +from pylib import io diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/PointCloud.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/PointCloud.py new file mode 100755 index 000000000..e08cf279a --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/PointCloud.py @@ -0,0 +1,281 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Class to represent a point cloud.""" + +import tensorflow as tf +from tensorflow_graphics.geometry.convolution.utils import \ + flatten_batch_to_2d, unflatten_2d_to_batch + +from pylib.pc.utils import check_valid_point_cloud_input + + +class _AABB: + """Axis aligned bounding box of a point cloud. + + Args: + Pointcloud: A 'PointCloud' instance from which to compute the + axis aligned bounding box. + + """ + + def __init__(self, point_cloud, name=None): + + self._batch_size = point_cloud._batch_size + self._batch_shape = point_cloud._batch_shape + self.point_cloud_ = point_cloud + + self._aabb_min = tf.math.unsorted_segment_min( + data=point_cloud._points, segment_ids=point_cloud._batch_ids, + num_segments=self._batch_size) - 1e-9 + self._aabb_max = tf.math.unsorted_segment_max( + data=point_cloud._points, segment_ids=point_cloud._batch_ids, + num_segments=self._batch_size) + 1e-9 + + def get_diameter(self, ord='euclidean', name=None): + """ Returns the diameter of the bounding box. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + ord: Order of the norm. Supported values are `'euclidean'`, + `1`, `2`, `np.inf` and any positive real number yielding the + corresponding p-norm. Default is `'euclidean'`. (optional) + Return: + diam: A `float` 'Tensor' of shape `[A1, ..., An]`, diameters of the + bounding boxes + + """ + + diam = tf.linalg.norm(self._aabb_max - self._aabb_min, ord=ord, axis=-1) + if self._batch_shape is None: + return diam + else: + return tf.reshape(diam, self._batch_shape) + + +class PointCloud: + """ Class to represent point clouds. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + points: A float `Tensor` either of shape `[N, D]` or of shape + `[A1, .., An, V, D]`, possibly padded as indicated by `sizes`. + Represents the point coordinates. + batch_ids: An `int` `Tensor` of shape `[N]` associated with the points. + Is required if `points` is of shape `[N, D]`. + sizes: An `int` `Tensor` of shape `[A1, ..., An]` indicating the + true input sizes in case of padding (`sizes=None` indicates no padding) + Note that `sizes[A1, ..., An] <= V` or `sum(sizes) == N`. + batch_size: An `int`, the size of the batch. + """ + + def __init__(self, + points, + batch_ids=None, + batch_size=None, + sizes=None, + name=None): + points = tf.convert_to_tensor(value=points, dtype=tf.float32) + if sizes is not None: + sizes = tf.convert_to_tensor(value=sizes, dtype=tf.int32) + if batch_ids is not None: + batch_ids = tf.convert_to_tensor(value=batch_ids, dtype=tf.int32) + if batch_size is not None: + self._batch_size = tf.convert_to_tensor(value=batch_size, dtype=tf.int32) + else: + self._batch_size = None + + check_valid_point_cloud_input(points, sizes, batch_ids) + + self._sizes = sizes + # compatibility batch size as CPU int for graph mode + self._batch_size_numpy = batch_size + self._batch_ids = batch_ids + self._dimension = tf.gather(tf.shape(points), tf.rank(points) - 1) + self._batch_shape = None + self._unflatten = None + self._aabb = None + + if points.shape.ndims > 2: + self._init_from_padded(points) + else: + self._init_from_segmented(points) + + if self._batch_size_numpy is None: + self._batch_size_numpy = self._batch_size + + #Sort the points based on the batch ids in incremental order. + self._sorted_indices_batch = tf.argsort(self._batch_ids) + + def _init_from_padded(self, points): + """converting padded `Tensor` of shape `[A1, ..., An, V, D]` into a 2D + `Tensor` of shape `[N,D]` with segmentation ids. + """ + self._batch_shape = tf.shape(points)[:-2] + if self._batch_size is None: + self._batch_size = tf.reduce_prod(self._batch_shape) + if self._sizes is None: + self._sizes = tf.constant( + value=tf.shape(points)[-2], shape=self._batch_shape) + self._get_segment_id = tf.reshape( + tf.range(0, self._batch_size), self._batch_shape) + self._points, self._unflatten = flatten_batch_to_2d(points, self._sizes) + self._batch_ids = tf.repeat( + tf.range(0, self._batch_size), + repeats=tf.reshape(self._sizes, [-1])) + + def _init_from_segmented(self, points): + """if input is already 2D `Tensor` with segmentation ids or given sizes. + """ + if self._batch_ids is None: + if self._batch_size is None: + self._batch_size = tf.reduce_prod(self._sizes.shape) + self._batch_ids = tf.repeat(tf.range(0, self._batch_size), self._sizes) + if self._batch_size is None: + self._batch_size = tf.reduce_max(self._batch_ids) + 1 + self._points = points + + def get_points(self, id=None, max_num_points=None, name=None): + """ Returns the points. + + Note: + In the following, A1 to An are optional batch dimensions. + + If called withoud specifying 'id' returns the points in padded format + `[A1, ..., An, V, D]` + + Args: + id: An `int`, index of point cloud in the batch, if `None` returns all + max_num_points: An `int`, specifies the 'V' dimension the method returns, + by default uses maximum of 'sizes'. `max_rows >= max(sizes)` + + Return: + A `float` `Tensor` + of shape `[Ni, D]`, if 'id' was given + or + of shape `[A1, ..., An, V, D]`, zero padded, if no `id` was given. + + """ + if id is not None: + if not isinstance(id, int): + slice = self._get_segment_id + for slice_id in id: + slice = slice[slice_id] + id = slice + if id > self._batch_size: + raise IndexError('batch index out of range') + return self._points[self._batch_ids == id] + else: + return self.get_unflatten(max_num_points=max_num_points)(self._points) + + def get_sizes(self, name=None): + """ Returns the sizes of the point clouds in the batch. + + Note: + In the following, A1 to An are optional batch dimensions. + Use this instead of accessing 'self._sizes', + if the class was constructed using segmented input the '_sizes' is + created in this method. + + Returns: + `Tensor` of shape `[A1, .., An]`. + + """ + if self._sizes is None: + _ids, _, self._sizes = tf.unique_with_counts( + self._batch_ids) + _ids_sorted = tf.argsort(_ids) + self._sizes = tf.gather(self._sizes, _ids_sorted) + if self._batch_shape is not None: + self._sizes = tf.reshape(self._sizes, self._batch_shape) + return self._sizes + + def get_unflatten(self, max_num_points=None, name=None): + """ Returns the method to unflatten the segmented points. + + Use this instead of accessing 'self._unflatten', + if the class was constructed using segmented input the '_unflatten' method + is created in this method. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + max_num_points: An `int`, specifies the 'V' dimension the method returns, + by default uses maximum of 'sizes'. `max_rows >= max(sizes)` + Returns: + A method to unflatten the segmented points, which returns a `Tensor` of + shape `[A1,...,An,V,D]`, zero padded. + + Raises: + ValueError: When trying to unflatten unsorted points. + + """ + if self._unflatten is None: + self._unflatten = lambda data: unflatten_2d_to_batch( + data=tf.gather(data, self._sorted_indices_batch), + sizes=self.get_sizes(), + max_rows=max_num_points) + return self._unflatten + + def get_AABB(self) -> _AABB: + """ Returns the axis aligned bounding box of the point cloud. + + Use this instead of accessing `self._aabb`, as the bounding box + is initialized with tthe first call of his method. + + Returns: + A `AABB` instance + + """ + if self._aabb is None: + self._aabb = _AABB(point_cloud=self) + return self._aabb + + def set_batch_shape(self, batch_shape, name=None): + """ Function to change the batch shape + + Use this to set a batch shape instead of using 'self._batch_shape' to + also change dependent variables. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + batch_shape: A 1D `int` `Tensor` `[A1,...,An]`. + + Raises: + ValueError: if shape does not sum up to batch size. + + """ + if batch_shape is not None: + batch_shape = tf.convert_to_tensor(value=batch_shape, dtype=tf.int32) + tf.assert_equal( + tf.reduce_prod(batch_shape), self._batch_size, + f'Incompatible batch size. Must be {self._batch_size} \ + but is {tf.reduce_prod(batch_shape)}') + # if tf.reduce_prod(batch_shape) != self._batch_size: + # raise ValueError( + # f'Incompatible batch size. Must be {self._batch_size} \ + # but is {tf.reduce_prod(batch_shape)}') + self._batch_shape = batch_shape + self._get_segment_id = tf.reshape( + tf.range(0, self._batch_size), self._batch_shape) + if self._sizes is not None: + self._sizes = tf.reshape(self._sizes, self._batch_shape) + else: + self._batch_shape = None diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/aabb_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/aabb_test.py new file mode 100644 index 000000000..c7635badd --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/aabb_test.py @@ -0,0 +1,72 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Class to test bounding box""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud, AABB +from pylib.pc.tests import utils + + +class AABB_test(test_case.TestCase): + + @parameterized.parameters( + (1, 1000, 3), + (8, 1000, 2), + (32, 1000, 4) + ) + def test_aabb_min_max(self, batch_size, num_points, dimension): + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension) + aabb_max_numpy = np.empty([batch_size, dimension]) + aabb_min_numpy = np.empty([batch_size, dimension]) + for i in range(batch_size): + aabb_max_numpy[i] = np.amax(points[batch_ids == i], axis=0) + aabb_min_numpy[i] = np.amin(points[batch_ids == i], axis=0) + + aabb_tf = PointCloud(points, batch_ids=batch_ids, + batch_size=batch_size).get_AABB() + + self.assertAllClose(aabb_max_numpy, aabb_tf._aabb_max) + self.assertAllClose(aabb_min_numpy, aabb_tf._aabb_min) + + @parameterized.parameters( + ([1], 1000, 3), + ([4, 4], 1000, 2), + ([1, 2, 3], 100, 4) + ) + def test_aabb_diameter(self, batch_shape, max_num_points, dimension): + points, sizes = utils._create_random_point_cloud_padded( + max_num_points, batch_shape, dimension) + batch_size = np.prod(batch_shape) + diameter_numpy = np.empty(batch_size) + points_flat = np.reshape(points, [batch_size, max_num_points, dimension]) + sizes_flat = np.reshape(sizes, [batch_size]) + for i in range(batch_size): + curr_pts = points_flat[i][:sizes_flat[i]] + diag = np.amax(curr_pts, axis=0) - np.amin(curr_pts, axis=0) + diameter_numpy[i] = np.linalg.norm(diag) + diameter_numpy = np.reshape(diameter_numpy, batch_shape) + + aabb_tf = PointCloud(points, sizes=sizes).get_AABB() + diameter_tf = aabb_tf.get_diameter() + self.assertAllClose(diameter_numpy, diameter_tf) + +if __name__ == '__main__': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/point_cloud_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/point_cloud_test.py new file mode 100644 index 000000000..e71d27ff3 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/point_cloud_test.py @@ -0,0 +1,109 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test point clouds""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud +from pylib.pc.tests import utils + + +class PointCloudTest(test_case.TestCase): + + @parameterized.parameters( + ([32], 100, 3), + ([5, 2], 100, 2), + ([2, 3, 4], 100, 4) + ) + def test_flatten_unflatten_padded(self, batch_shape, num_points, dimension): + batch_size = np.prod(batch_shape) + points, sizes = utils._create_random_point_cloud_padded( + num_points, batch_shape, dimension=dimension) + point_cloud = PointCloud(points, sizes=sizes) + retrieved_points = point_cloud.get_points().numpy() + self.assertAllEqual(points.shape, retrieved_points.shape) + points = points.reshape([batch_size, num_points, dimension]) + retrieved_points = retrieved_points.reshape( + [batch_size, num_points, dimension]) + sizes = sizes.reshape([batch_size]) + for i in range(batch_size): + self.assertAllClose(points[i, :sizes[i]], retrieved_points[i, :sizes[i]]) + self.assertTrue(np.all(retrieved_points[i, sizes[i]:] == 0)) + + @parameterized.parameters( + (100, 32, [8, 4]), + (100, 16, [2, 2, 2, 2]) + ) + def test_construction_methods(self, max_num_points, batch_size, batch_shape): + points, sizes = utils._create_random_point_cloud_padded( + max_num_points, batch_shape) + num_points = np.sum(sizes) + + sizes_flat = sizes.reshape([batch_size]) + points_flat = points.reshape([batch_size, max_num_points, 3]) + batch_ids = np.repeat(np.arange(0, batch_size), sizes_flat) + + points_seg = np.empty([num_points, 3]) + cur_id = 0 + for pts, size in zip(points_flat, sizes_flat): + points_seg[cur_id:cur_id + size] = pts[:size] + cur_id += size + + pc_from_padded = PointCloud(points, sizes=sizes) + self.assertAllEqual(batch_ids, pc_from_padded._batch_ids) + self.assertAllClose(points_seg, pc_from_padded._points) + + pc_from_ids = PointCloud(points_seg, batch_ids) + pc_from_ids.set_batch_shape(batch_shape) + + pc_from_sizes = PointCloud(points_seg, sizes=sizes_flat) + pc_from_sizes.set_batch_shape(batch_shape) + self.assertAllEqual(batch_ids, pc_from_sizes._batch_ids) + + points_from_padded = pc_from_padded.get_points( + max_num_points=max_num_points) + points_from_ids = pc_from_ids.get_points( + max_num_points=max_num_points) + points_from_sizes = pc_from_sizes.get_points( + max_num_points=max_num_points) + + self.assertAllEqual(points_from_padded, points_from_ids) + self.assertAllEqual(points_from_ids, points_from_sizes) + self.assertAllEqual(points_from_sizes, points_from_padded) + + @parameterized.parameters( + (1000, + ['Invalid input! Point tensor is of dimension 1 \ + but should be at least 2!', + 'Missing input! Either sizes or batch_ids must be given.', + 'Invalid sizes! Sizes of points and batch_ids are not equal.']) + ) + def test_exceptions_raised_at_construction(self, num_points, msgs): + points = np.random.rand(num_points) + batch_ids = np.zeros(num_points) + with self.assertRaisesRegex(ValueError, msgs[0]): + _ = PointCloud(points, batch_ids) + points = np.random.rand(num_points, 3) + with self.assertRaisesRegexp(ValueError, msgs[1]): + _ = PointCloud(points) + with self.assertRaisesRegexp(AssertionError, msgs[2]): + _ = PointCloud(points, batch_ids[1:]) + + +if __name__ == '__main__': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py new file mode 100644 index 000000000..fe69ed795 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py @@ -0,0 +1,81 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""helper functions for unit tests""" + +import tensorflow as tf +import numpy as np + + +def _create_random_point_cloud_segmented(batch_size, + num_points, + dimension=3, + sizes=None, + scale=1, + clean_aabb=False, + equal_sized_batches=False): + points = np.random.uniform(0, scale, [num_points, dimension]) + if sizes is None: + if not equal_sized_batches: + batch_ids = np.random.randint(0, batch_size, num_points) + batch_ids[:batch_size] = np.arange(0, batch_size) + else: + batch_ids = np.repeat(np.arange(0, batch_size), num_points // batch_size) + # batch_ids = np.sort(batch_ids) + else: + sizes = np.array(sizes, dtype=int) + batch_ids = np.repeat(np.arange(0, batch_size), sizes) + if clean_aabb: + # adds points such that the aabb is [0,0,0] [1,1,1]*scale + # to prevent rounding errors + points = np.concatenate( + (points, scale * np.ones([batch_size, dimension]) - 1e-9, + 1e-9 + np.zeros([batch_size, dimension]))) + batch_ids = np.concatenate( + (batch_ids, np.arange(0, batch_size), np.arange(0, batch_size))) + return points, batch_ids + + +def _create_random_point_cloud_padded(max_num_points, + batch_shape, + dimension=3, + sizes=None, + scale=1): + batch_size = np.prod(batch_shape) + points = np.random.uniform( + 0, scale, [max_num_points * batch_size, dimension]) + points = points.reshape(batch_shape + [max_num_points, dimension]) + if sizes is None: + sizes = np.random.randint(1, max_num_points, batch_shape) + return points, sizes + + +def _create_uniform_distributed_point_cloud_2D(num_points_sqrt, + scale=1, + flat=False): + ticks = np.linspace(0, scale, num=num_points_sqrt) + points = np.array(np.meshgrid(ticks, ticks)).T + if flat: + points = points.reshape(-1, 2) + return points + + +def _create_uniform_distributed_point_cloud_3D(num_points_root, + bb_min=0, + bb_max=1, + flat=False): + ticks = np.linspace(bb_min, bb_max, num=num_points_root, endpoint=False) + points = np.array(np.meshgrid(ticks, ticks, ticks)).T + if flat: + points = points.reshape(-1, 3) + return points diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py new file mode 100644 index 000000000..2f761c375 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py @@ -0,0 +1,114 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +''' helper functions for point clouds ''' + +import tensorflow as tf +from tensorflow_graphics.geometry.convolution.utils import flatten_batch_to_2d +from pylib.pc import PointCloud + + +def check_valid_point_cloud_input(points, sizes, batch_ids): + """Checks that the inputs to the constructor of class 'PointCloud' are valid. + + Args: + points: A `float` `Tensor` of shape `[N, D]` or `[A1, ..., An, V, D]`. + sizes: An `int` `Tensor` of shape `[A1, ..., An]` or `None`. + batch_ids: An `int` `Tensor` of shape `[N]` or `None`. + + Raises: + Value Error: If input dimensions are invalid or no valid segmentation + is given. + + """ + + if points.shape.ndims == 2 and sizes is None and batch_ids is None: + raise ValueError('Missing input! Either sizes or batch_ids must be given.') + if points.shape.ndims == 1: + raise ValueError( + 'Invalid input! Point tensor is of dimension 1 \ + but should be at least 2!') + if points.shape.ndims == 2 and batch_ids is not None: + if points.shape[0] != batch_ids.shape[0]: + raise AssertionError('Invalid sizes! Sizes of points and batch_ids are' + + ' not equal.') + + +def check_valid_point_hierarchy_input(point_cloud, cell_sizes, pool_mode): + """ Checks that inputs to the constructor of class 'PontHierarchy' are valid. + + Args: + point_cloud: A 'PointCloud' instance. + cell_sizes: A `list` of `float` `Tensors`. + pool_mode: An `int`. + + Raises: + TypeError: if input is of invalid type + ValueError: if pool_mode is invalid, or cell_sizes dimension are invalid + or non-positive + + """ + if not isinstance(point_cloud, (PointCloud)): + raise TypeError('Input must be instance of class PointCloud') + if pool_mode not in [0, 1]: + raise ValueError('Unknown pooling mode.') + for curr_cell_sizes in cell_sizes: + if any(curr_cell_sizes <= 0): + raise ValueError('cell size must be positive.') + if not curr_cell_sizes.shape[0] in [1, point_cloud.dimension_]: + raise ValueError( + 'Invalid number of cell sizes for point cloud' +\ + f'dimension. Must be 1 or {point_cloud.dimension_} but is' +\ + f'{curr_cell_sizes.shape[0]}.') + + +def _flatten_features(features, point_cloud: PointCloud): + """ Converts features of shape `[A1, ..., An, C]` to shape `[N, C]`. + + Args: + features: A `Tensor`. + point_cloud: A `PointCloud` instance. + + Returns: + A `Tensor` of shape `[N, C]`. + + """ + if features.shape.ndims > 2: + sizes = point_cloud.get_sizes() + features, _ = flatten_batch_to_2d(features, sizes) + sorting = tf.math.invert_permutation(point_cloud._sorted_indices_batch) + features = tf.gather(features, sorting) + else: + tf.assert_equal(tf.shape(features)[0], tf.shape(point_cloud._points)[0]) + tf.assert_equal(tf.rank(features), 2) + return features + + +def cast_to_num_dims(values, num_dims, dtype=tf.float32): + """ Converts an input to the specified `dtype` and repeats it `num_dims` + times. + + Args: + values: Must be convertible to a `Tensor` of shape `[], [1]` or + `[num_dims]`. + dtype: A `tf.dtype`. + + Returns: + A `dtype` `Tensor` of shape `[num_dims]`. + + """ + values = tf.cast(tf.convert_to_tensor(value=values), + dtype=dtype) + if values.shape == [] or values.shape[0] == 1: + values = tf.repeat(values, num_dims) + return values diff --git a/tensorflow_graphics/projects/point_convolutions/pytest.ini b/tensorflow_graphics/projects/point_convolutions/pytest.ini new file mode 100644 index 000000000..82972425f --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +norecursedirs = tensorflow_graphics/rendering/opengl/tests tensorflow_graphics/submodules +python_files = *_test.py From 047cddffe92d2dc3f66fd0ecd1319467f9a07704 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Wed, 10 Mar 2021 13:48:51 +0100 Subject: [PATCH 02/29] commented unused functions --- .../projects/point_convolutions/pylib/pc/utils.py | 3 ++- .../projects/point_convolutions/pytest_requirements.txt | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 tensorflow_graphics/projects/point_convolutions/pytest_requirements.txt diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py index 2f761c375..972222310 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py @@ -43,7 +43,7 @@ def check_valid_point_cloud_input(points, sizes, batch_ids): raise AssertionError('Invalid sizes! Sizes of points and batch_ids are' + ' not equal.') - +''' def check_valid_point_hierarchy_input(point_cloud, cell_sizes, pool_mode): """ Checks that inputs to the constructor of class 'PontHierarchy' are valid. @@ -112,3 +112,4 @@ def cast_to_num_dims(values, num_dims, dtype=tf.float32): if values.shape == [] or values.shape[0] == 1: values = tf.repeat(values, num_dims) return values +''' diff --git a/tensorflow_graphics/projects/point_convolutions/pytest_requirements.txt b/tensorflow_graphics/projects/point_convolutions/pytest_requirements.txt new file mode 100644 index 000000000..418e8a444 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pytest_requirements.txt @@ -0,0 +1,3 @@ +pytest +sklearn +trimesh \ No newline at end of file From 7c05856870d627297d29209d6f84afe57c9d9cef Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Wed, 10 Mar 2021 15:27:45 +0100 Subject: [PATCH 03/29] added missing __init__.py --- .../point_convolutions/pylib/pc/__init__.py | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100755 tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py new file mode 100755 index 000000000..9b58df3ad --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Point cloud module.""" + +from .PointCloud import _AABB as AABB +from .PointCloud import PointCloud +from .Grid import Grid +from .Neighborhood import Neighborhood +from .Neighborhood import KDEMode +from .sampling import poisson_disk_sampling, cell_average_sampling +from .sampling import sample +from .PointHierarchy import PointHierarchy + +from pylib.pc import layers +from pylib.pc import custom_ops From 80e9f605223740290111d49c75fa89a4f5891005 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Wed, 10 Mar 2021 15:35:02 +0100 Subject: [PATCH 04/29] added missing __init__.py --- .../point_convolutions/pylib/pc/tests/__init__.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/__init__.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/__init__.py new file mode 100644 index 000000000..26540aa8e --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/__init__.py @@ -0,0 +1,12 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific From 91c54821ca7809a53ae53ebc5e04c9062225eb1c Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Wed, 10 Mar 2021 15:39:47 +0100 Subject: [PATCH 05/29] fixed __init__.py imports --- .../projects/point_convolutions/pylib/__init__.py | 2 +- .../projects/point_convolutions/pylib/pc/__init__.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/__init__.py index 87d0e6976..73eba35d2 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/__init__.py @@ -12,4 +12,4 @@ # See the License for the specific from pylib import pc -from pylib import io +# from pylib import io diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py index 9b58df3ad..8407fc9c0 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py @@ -16,6 +16,7 @@ from .PointCloud import _AABB as AABB from .PointCloud import PointCloud from .Grid import Grid +''' from .Neighborhood import Neighborhood from .Neighborhood import KDEMode from .sampling import poisson_disk_sampling, cell_average_sampling @@ -24,3 +25,4 @@ from pylib.pc import layers from pylib.pc import custom_ops +''' \ No newline at end of file From 2830c7de02d8c2a939abe409e97f09737ecbc8eb Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Wed, 10 Mar 2021 15:45:06 +0100 Subject: [PATCH 06/29] fixed __init__.py imports --- requirements.txt | 2 ++ .../projects/point_convolutions/pylib/pc/__init__.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cb1a66eaf..35d99498d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,3 +13,5 @@ termcolor >= 1.1.0 trimesh >= 2.37.22 # Required by trimesh. networkx +# required for pytests of project/point_convolutions +sklearn diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py index 8407fc9c0..26a7badc3 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py @@ -15,8 +15,8 @@ from .PointCloud import _AABB as AABB from .PointCloud import PointCloud -from .Grid import Grid ''' +from .Grid import Grid from .Neighborhood import Neighborhood from .Neighborhood import KDEMode from .sampling import poisson_disk_sampling, cell_average_sampling From 157bf31804dc256ca7c7b05647e39b5f94ca17dd Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Wed, 10 Mar 2021 15:52:18 +0100 Subject: [PATCH 07/29] fixed __init__.py imports --- requirements.txt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 35d99498d..cdc4294fd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,5 +13,4 @@ termcolor >= 1.1.0 trimesh >= 2.37.22 # Required by trimesh. networkx -# required for pytests of project/point_convolutions -sklearn + From ccb9d90580cfd40e7efb43fde48b896ac6efc0dc Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 10:55:29 +0100 Subject: [PATCH 08/29] grid initial --- .../point_convolutions/pylib/pc/Grid.py | 84 ++++ .../point_convolutions/pylib/pc/__init__.py | 4 +- .../pylib/pc/custom_ops/__init__.py | 39 ++ .../pylib/pc/custom_ops/custom_ops_tf.py | 447 ++++++++++++++++++ .../pylib/pc/custom_ops/custom_ops_wrapper.py | 198 ++++++++ .../pylib/pc/custom_ops/tests/__init__.py | 0 .../custom_ops/tests/build_grid_ds_tf_test.py | 96 ++++ .../custom_ops/tests/compute_keys_tf_test.py | 73 +++ .../pylib/pc/tests/grid_test.py | 133 ++++++ 9 files changed, 1072 insertions(+), 2 deletions(-) create mode 100755 tensorflow_graphics/projects/point_convolutions/pylib/pc/Grid.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_wrapper.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/__init__.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/build_grid_ds_tf_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_keys_tf_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/grid_test.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/Grid.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Grid.py new file mode 100755 index 000000000..5fc8565f6 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Grid.py @@ -0,0 +1,84 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import tensorflow as tf + +from pylib.pc.custom_ops import compute_keys, build_grid_ds +from pylib.pc import PointCloud, AABB + + +class Grid: + """ 2D regular grid of a point cloud. + + Args: + point_cloud : A `PointCloud` instance to distribute in the grid. + cell_sizes A `float` `Tensor` of shape `[D]`, the sizes of the grid + cells in each dimension. + aabb: An `AABB` instance, the bounding box of the grid, if `None` + the bounding box of `point_cloud` is used. (optional) + + """ + + def __init__(self, point_cloud: PointCloud, cell_sizes, aabb=None, + name=None): + cell_sizes = tf.cast(tf.convert_to_tensor(value=cell_sizes), + tf.float32) + if cell_sizes.shape == [] or cell_sizes.shape[0] == 1: + cell_sizes = tf.repeat(cell_sizes, point_cloud._dimension) + #Save the attributes. + self._batch_size = point_cloud._batch_size_numpy + self._cell_sizes = cell_sizes + self._point_cloud = point_cloud + self._aabb = point_cloud.get_AABB() + #Compute the number of cells in the grid. + aabb_sizes = self._aabb._aabb_max - self._aabb._aabb_min + batch_num_cells = tf.cast( + tf.math.ceil(aabb_sizes / self._cell_sizes), tf.int32) + self._num_cells = tf.maximum( + tf.reduce_max(batch_num_cells, axis=0), 1) + + #Compute the key for each point. + self._cur_keys = compute_keys( + self._point_cloud, self._num_cells, + self._cell_sizes) + + #Sort the keys. + self._sorted_indices = tf.argsort( + self._cur_keys, direction='DESCENDING') + self._sorted_keys = tf.gather(self._cur_keys, self._sorted_indices) + + #Get the sorted points and batch ids. + self._sorted_points = tf.gather( + self._point_cloud._points, self._sorted_indices) + self._sorted_batch_ids = tf.gather( + self._point_cloud._batch_ids, self._sorted_indices) + + self._fast_DS = None + + def get_DS(self): + """ Method to get the 2D-Grid datastructure. + + Note: By default the data structure is not build on initialization, + but with this method + + Returns: + A `int` `Tensor` of shape `[num_cells[0], num_cells[1], 2]`, where + `[i,j,0]:[i,j,1]` is the range of points in cell `i,j`. + The indices are with respect to the sorted points of the grid. + + """ + if self._fast_DS is None: + #Build the fast access data structure. + self._fast_DS = build_grid_ds( + self._sorted_keys, self._num_cells, self._batch_size) + return self._fast_DS diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py index 26a7badc3..26d924a5b 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py @@ -15,8 +15,8 @@ from .PointCloud import _AABB as AABB from .PointCloud import PointCloud -''' from .Grid import Grid +''' from .Neighborhood import Neighborhood from .Neighborhood import KDEMode from .sampling import poisson_disk_sampling, cell_average_sampling @@ -24,5 +24,5 @@ from .PointHierarchy import PointHierarchy from pylib.pc import layers +''' from pylib.pc import custom_ops -''' \ No newline at end of file diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py new file mode 100644 index 000000000..cd2aee431 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py @@ -0,0 +1,39 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Loads custom ops if installed, else loads tensorflow implementations""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +try: + import tfg_custom_ops + CUSTOM = 1 +except ImportError: + CUSTOM = 0 + +if CUSTOM: + from .custom_ops_wrapper import basis_proj + from .custom_ops_wrapper import build_grid_ds + from .custom_ops_wrapper import compute_keys + from .custom_ops_wrapper import compute_pdf + from .custom_ops_wrapper import find_neighbors + from .custom_ops_wrapper import sampling +else: + from .custom_ops_tf import basis_proj_tf as basis_proj + from .custom_ops_tf import build_grid_ds_tf as build_grid_ds + from .custom_ops_tf import compute_keys_tf as compute_keys + from .custom_ops_tf import compute_pdf_tf as compute_pdf + from .custom_ops_tf import find_neighbors_tf as find_neighbors + from .custom_ops_tf import sampling_tf as sampling diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py new file mode 100644 index 000000000..be0a8041d --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -0,0 +1,447 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" tensorflow implemetations of the custom ops """ + +import tensorflow as tf +import numpy as np +from pylib.pc import PointCloud, Grid + + +def compute_keys_tf(point_cloud: PointCloud, num_cells, cell_size, name=None): + """ Computes the regular grid cell keys of a point cloud. + + For a point in cell `c` the key is computed as + \\(key = batch_id * prod_{d=0}^{D} num_cells_{d} + \\) + \\(sum_{d=0}^{D}( c_{d} prod_{d'=d+1}^{D} num_cells_{d'} ) \\). + Args: + point_cloud: A `PointCloud` instance. + num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells + per dimension. + cell_size: An `int` `Tensor` of shape `[D]`, the cell sizes per + dimension. + + Returns: + An `int` `Tensor` of shape `[N]`, the keys per point. + + """ + aabb = point_cloud.get_AABB() + abb_min_per_batch = aabb._aabb_min + aabb_min_per_point = tf.gather(abb_min_per_batch, point_cloud._batch_ids) + cell_ind = tf.math.floor( + (point_cloud._points - aabb_min_per_point) / cell_size) + cell_ind = tf.cast(cell_ind, tf.int32) + cell_ind = tf.minimum( + tf.maximum(cell_ind, tf.zeros_like(cell_ind)), + num_cells) + cell_multiplier = tf.math.cumprod(num_cells, reverse=True) + cell_multiplier = tf.concat((cell_multiplier, [1]), axis=0) + keys = point_cloud._batch_ids * cell_multiplier[0] + \ + tf.math.reduce_sum(cell_ind * tf.reshape(cell_multiplier[1:], [1, -1]), + axis=1) + return tf.cast(keys, tf.int64) +tf.no_gradient('ComputeKeysTF') + + +def build_grid_ds_tf(sorted_keys, num_cells, batch_size, name=None): + """ Method to build a fast access data structure for point clouds. + + Creates a 2D regular grid in the first two dimension, saving the first and + last index belonging to that cell array. + + Args: + sorted_keys: An `int` `Tensor` of shape `[N]`, the sorted keys. + num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells + per dimension. + batch_size: An `int`. + + Returns: + An `int` `Tensor` of shape `[batch_size, num_cells[0], num_cells[1], 2]`. + + """ + sorted_keys = tf.cast(tf.convert_to_tensor(value=sorted_keys), tf.int32) + num_cells = tf.cast(tf.convert_to_tensor(value=num_cells), tf.int32) + + num_keys = tf.shape(sorted_keys)[0] + num_cells_2D = batch_size * num_cells[0] * num_cells[1] + tf.assert_greater( + tf.shape(num_cells)[0], 1, + 'Points must have dimensionality >1.') + cells_per_2D_cell = tf.cond( + tf.shape(num_cells)[0] > 2, + lambda: tf.reduce_prod(num_cells[2:]), + lambda: 1 + ) + # condition without graph mode + # if tf.shape(num_cells)[0] > 2: + # cells_per_2D_cell = tf.reduce_prod(num_cells[2:]) + # elif tf.shape(num_cells)[0] == 2: + # cells_per_2D_cell = 1 + + ds_indices = tf.cast(tf.floor(sorted_keys / cells_per_2D_cell), + dtype=tf.int32) + indices = tf.range(0, num_keys, dtype=tf.int32) + first_per_cell = tf.math.unsorted_segment_min( + indices, ds_indices, num_cells_2D) + last_per_cell = tf.math.unsorted_segment_max( + indices + 1, ds_indices, num_cells_2D) + + empty_cells = first_per_cell < 0 + first_per_cell = tf.where( + empty_cells, tf.zeros_like(first_per_cell), first_per_cell) + last_per_cell = tf.where( + empty_cells, tf.zeros_like(last_per_cell), last_per_cell) + empty_cells = first_per_cell > num_keys + first_per_cell = tf.where( + empty_cells, tf.zeros_like(first_per_cell), first_per_cell) + last_per_cell = tf.where( + empty_cells, tf.zeros_like(last_per_cell), last_per_cell) + + return tf.stack([tf.reshape(first_per_cell, + [batch_size, num_cells[0], num_cells[1]]), + tf.reshape(last_per_cell, + [batch_size, num_cells[0], num_cells[1]])], + axis=3) +tf.no_gradient('BuildGridDsTF') + +''' +def find_neighbors_tf(grid, + point_cloud_centers, + radii, + max_neighbors=0, + name=None): + """ Method to find the neighbors of a center point cloud in another + point cloud. + + Args: + grid: A `Grid` instance, from which the neighbors are chosen. + point_cloud_centers: A `PointCloud` instance, containing the center points. + radii: A `float`, the radii to select neighbors from. + max_neighbors: An `int`, if `0` all neighbors are selected. + + Returns: + center_neigh_ranges: An `int` `Tensor` of shape `[N]`, end of the ranges per + center point. You can get the neighbor ids of point `i` (i>0) with + `neighbors[center_neigh_ranges[i-1]:center_neigh_ranges[i]]`. + neighbors: An `int` `Tensor` of shape `[M, 2]`, indices of the neighbor + point and the center for each neighbor. Follows the order of + `grid._sorted_points`. + + """ + radii = tf.convert_to_tensor(value=radii) + radii = tf.cond( + tf.logical_or(tf.shape(radii)[0] == [], tf.shape(radii)[0] == 1), + lambda: tf.repeat(radii, grid._point_cloud._dimension), + lambda: radii) + # condition without graph mode + # if tf.shape(radii)[0] == [] or tf.shape(radii)[0] == 1: + # radii = tf.repeat(radii, grid._point_cloud._dimension) + # compute keys of center points in neighbors 2D grid + center_points = point_cloud_centers._points + center_batch_ids = point_cloud_centers._batch_ids + aabb = grid._aabb + abb_min_per_batch_2D = aabb._aabb_min[:, :2] + aabb_min_per_center_point_2D = tf.gather( + abb_min_per_batch_2D, center_batch_ids) + center_cell_ind_2D = tf.math.floor( + (center_points[:, :2] - aabb_min_per_center_point_2D) / radii[:2]) + center_cell_ind_2D = tf.cast(center_cell_ind_2D, tf.int32) + center_cell_ind_2D = tf.minimum( + tf.maximum(center_cell_ind_2D, tf.zeros_like(center_cell_ind_2D)), + grid._num_cells[:2]) + # find neighbors using fast 2D grid datastructure + neighbor_points = grid._sorted_points + neighbor_batch_ids = grid._sorted_batch_ids + data_structure = grid.get_DS() + + neighbors = [] + center_neigh_ranges = [] + cur_neigh_range = 0 + for i in range(tf.shape(center_points)[0]): + cur_point = center_points[i] + cur_batch_id = center_batch_ids[i] + # get cell_ids of adjacent 2D cells (9 in total) + cur_cell_id_2D = center_cell_ind_2D[i] + adj_cell_ids_2D = tf.stack( + (cur_cell_id_2D + [-1, -1], + cur_cell_id_2D + [-1, 0], + cur_cell_id_2D + [-1, 1], + cur_cell_id_2D + [0, 1], + cur_cell_id_2D, + cur_cell_id_2D + [0, -1], + cur_cell_id_2D + [1, -1], + cur_cell_id_2D + [1, 0], + cur_cell_id_2D + [1, 1]), axis=0) + # clip to range between 0 and max num cells + adj_cell_ids_2D = tf.minimum( + tf.maximum(adj_cell_ids_2D, tf.zeros_like(adj_cell_ids_2D)), + grid._num_cells[:2]) + # get min and max point ids of the adjacent cells + adj_ids = tf.gather_nd(data_structure[cur_batch_id], [adj_cell_ids_2D]) + adj_ids_start = tf.reduce_min(adj_ids[0, :, 0]) + adj_ids_end = tf.reduce_max(adj_ids[0, :, 1]) + # choose points below certain distance and in same batch + adj_points = neighbor_points[adj_ids_start:adj_ids_end] + adj_batch_ids = neighbor_batch_ids[adj_ids_start:adj_ids_end] + distances = tf.linalg.norm( + adj_points - tf.reshape(cur_point, [1, -1]), axis=1) + close = (distances < radii[0]) + same_batch = (adj_batch_ids == cur_batch_id) + close = tf.math.logical_and(close, same_batch) + close_ids = tf.boolean_mask(tf.range(adj_ids_start, adj_ids_end), close) + + cur_neighbors = tf.stack( + (close_ids, tf.ones_like(close_ids) * i), axis=1) + neighbors.append(cur_neighbors) + cur_neigh_range = cur_neigh_range + tf.shape(cur_neighbors)[0] + center_neigh_ranges.append(cur_neigh_range) + + neighbors = tf.concat(neighbors, axis=0) + center_neigh_ranges = tf.concat(center_neigh_ranges, axis=0) + + return center_neigh_ranges, neighbors +tf.no_gradient('FindNeighborsTF') + + +def find_neighbors_no_grid(point_cloud, + point_cloud_centers, + radius, + name=None): + """ Method to find the neighbors of a center point cloud in another + point cloud. + + Args: + point_cloud: A `PointCloud` instance, from which the neighbors are chosen. + point_cloud_centers: A `PointCloud` instance, containing the center points. + radius: A `float`, the radius to select neighbors from. + + Returns: + center_neigh_ranges: An `int` `Tensor` of shape `[N]`, end of the ranges per + center point. You can get the neighbor ids of point `i` (i>0) with + `neighbors[center_neigh_ranges[i-1]:center_neigh_ranges[i]]`. + neighbors: An `int` `Tensor` of shape `[M, 2]`, indices of the neighbor + point and the center for each neighbor. Follows the order of + `grid._sorted_points`. + + """ + points = point_cloud._points + batch_ids = point_cloud._batch_ids + center_points = point_cloud_centers._points + center_batch_ids = point_cloud_centers._batch_ids + num_center_points = tf.shape(center_points)[0] + + distances = tf.linalg.norm(tf.expand_dims(points, axis=0) - \ + tf.expand_dims(center_points, axis=1), + axis=-1) + close = (distances <= radius) + same_batch = (tf.expand_dims(batch_ids, axis=0) == \ + tf.expand_dims(center_batch_ids, axis=1)) + close = tf.math.logical_and(same_batch, close) + + neighbors = tf.where(close) + neighbors = tf.reverse(neighbors, axis=[1]) + num_neighbors = tf.shape(neighbors)[0] + neigh_ranges = tf.math.unsorted_segment_max( + tf.range(1, num_neighbors + 1), + neighbors[:, 1], + num_center_points) + return neigh_ranges, neighbors +tf.no_gradient('FindNeighborsNoGrid') + + +def sampling_tf(neighborhood, sample_mode, name=None): + """ Method to sample the points of a point cloud. + + Args: + neighborhood: A `Neighborhood` instance, which contains a point cloud with + its neighbors. + sample_mode: An `int`specifiying the sample mode, + `0` for average, `1` for poisson. + + Returns: + sampled_points: A `float` `Tensor` of shape [S, D], the sampled points. + sampled_batch_ids: An `int` `Tensor` of shape [S], the batch ids. + sampled_indices: An `int` `Tensor` of shape [S], the indices to the + unsampled points. + Following the order of neighborhood._grid._sorted_points. + + """ + points = neighborhood._grid._sorted_points + batch_ids = neighborhood._grid._sorted_batch_ids + num_points = tf.shape(points)[0] + if sample_mode == 0: + # poisson sampling + nb_ranges = tf.concat(([0], neighborhood._samples_neigh_ranges), axis=0) + neighbors = neighborhood._neighbors + num_points = tf.shape(neighborhood._grid._sorted_points)[0] + log_probabilities = tf.ones([num_points]) + sampled_indices = tf.zeros([0], dtype=tf.int64) + # to set log prob to -inf <=> prob to zero + tf_neg_inf = tf.constant(-np.inf) + + #sample points until all log probabilites are set to -inf + while not tf.reduce_all(tf.math.is_inf(log_probabilities)): + choice = tf.random.categorical( + tf.expand_dims(log_probabilities, axis=0), 1)[0] + # add choice to sampled indices + sampled_indices = tf.concat((sampled_indices, choice), axis=0) + # set log probability of neighbors to -inf + sample_neighbors = \ + neighbors[nb_ranges[choice[0]]:nb_ranges[choice[0] + 1], 0] + num_neighbors = tf.shape(sample_neighbors)[0] + log_probabilities = tf.tensor_scatter_nd_update( + log_probabilities, + tf.expand_dims(sample_neighbors, axis=1), + tf.repeat(tf_neg_inf, num_neighbors)) + sampled_points = tf.gather(points, sampled_indices) + sampled_batch_ids = tf.gather(batch_ids, sampled_indices) + + elif sample_mode == 1: + # cell average sampling + keys = neighborhood._grid._sorted_keys + # replace keys with numbers 0 to num_unique keys + unique, _, counts = tf.unique_with_counts(keys) + num_unique_keys = tf.shape(unique)[0] + new_keys = tf.repeat(tf.range(0, num_unique_keys), counts) + # average over points with same cell key + sampled_points = tf.math.segment_mean(points, new_keys) + # get batch of a point in the same cell + sampled_indices = tf.math.segment_min(tf.range(0, num_points), new_keys) + sampled_batch_ids = tf.gather(batch_ids, sampled_indices) + + return sampled_points, sampled_batch_ids, sampled_indices + +tf.no_gradient('samplingTF') + + +_pi = tf.constant(np.pi) + + +def compute_pdf_inside_neighborhoods_tf(neighborhood, + bandwidth, + mode, + name=None): + """ Method to compute the density distribution inside the neighborhoods of a + point cloud in euclidean space using kernel density estimation (KDE). + + Args: + neighborhood: A `Neighborhood` instance. + bandwidth: An `int` `Tensor` of shape `[D]`, the bandwidth of the KDE. + mode: A `KDEMode` value. + + Returns: + A `float` `Tensor` of shape `[N]`, the estimated density per center point. + + """ + bandwidth = tf.convert_to_tensor(value=bandwidth) + points = neighborhood._grid._sorted_points + neighbors = neighborhood._neighbors + nbh_ranges = neighborhood._samples_neigh_ranges + + # compute difference vectors inside neighborhoods + num_adjacencies = tf.shape(neighbors)[0] + nbh_start_ind = tf.concat(([0], nbh_ranges[0:-1]), axis=0) + nbh_sizes = nbh_ranges - nbh_start_ind + max_num_neighbors = tf.reduce_max(nbh_sizes) + nbh_sizes_per_nb = tf.repeat(nbh_sizes, nbh_sizes) + + nb_indices_1 = tf.repeat(neighbors[:, 0], nbh_sizes_per_nb) + + mask = tf.sequence_mask(nbh_sizes_per_nb, max_num_neighbors) + mask_indices = tf.cast(tf.compat.v1.where(mask), tf.int32) + indices_tensor = tf.repeat(tf.reshape(tf.range(0, max_num_neighbors), + [1, max_num_neighbors]), + num_adjacencies, axis=0) + nbh_start_per_nb = tf.repeat(nbh_start_ind, nbh_sizes) + indices_tensor = indices_tensor + \ + tf.reshape(nbh_start_per_nb, [num_adjacencies, 1]) + indices_2 = tf.gather_nd(params=indices_tensor, indices=mask_indices) + nb_indices_2 = tf.gather(neighbors[:, 0], indices_2) + + nb_diff = tf.gather(points, nb_indices_1) - tf.gather(points, nb_indices_2) + # kernel density estimation using the distances + rel_bandwidth = tf.reshape(bandwidth * neighborhood._radii, [1, -1]) + kernel_input = nb_diff / rel_bandwidth + # gaussian kernel + nb_kernel_value = tf.exp(-tf.pow(kernel_input, 2) / 2) / tf.sqrt(2 * _pi) + nb_kernel_value = tf.reduce_prod(nb_kernel_value, axis=1) + nb_id_per_nb_pair = tf.repeat(tf.range(0, num_adjacencies), + nbh_sizes_per_nb) + # sum over influence inside neighborhood + pdf = tf.math.unsorted_segment_sum(nb_kernel_value, + nb_id_per_nb_pair, + num_adjacencies) /\ + tf.reduce_prod(bandwidth) + return pdf + + +def compute_pdf_tf(neighborhood, bandwidth, mode, name=None): + """ Method to compute the density distribution using neighborhood information + in euclidean space using kernel density estimation (KDE). + + Args: + neighborhood: A `Neighborhood` instance of the pointcloud to itself. + bandwidth: An `int` `Tensor` of shape `[D]`, the bandwidth of the KDE. + mode: A `KDEMode` value. + + Returns: + A `float` `Tensor` of shape `[N]`, the estimated density per point, + with respect to the sorted points of the grid in `neighborhood`. + + """ + bandwidth = tf.convert_to_tensor(value=bandwidth) + + rel_bandwidth = tf.reshape(bandwidth * neighborhood._radii, [1, -1]) + points = neighborhood._grid._sorted_points / rel_bandwidth + num_points = tf.shape(points)[0] + neighbors = neighborhood._neighbors + # point differences + nb_diff = tf.gather(points, neighbors[:, 0]) - \ + tf.gather(points, neighbors[:, 1]) + # kde on point differences + # gaussian kernel, note division by bandwidth was already done above + nb_kernel_value = tf.exp(-tf.pow(nb_diff, 2) / 2) / tf.sqrt(2 * _pi) + nb_kernel_value = tf.reduce_prod(nb_kernel_value, axis=1) + # sum over influence of neighbors + pdf = tf.math.unsorted_segment_sum(nb_kernel_value, + neighbors[:, 1], + num_points) / \ + tf.reduce_prod(bandwidth) + return pdf + + +def basis_proj_tf(neigh_basis, features, neighborhood, name=None): + """ Method to aggregate the features*basis for different neighborhoods. + + Args: + neigh_basis: A `float` `Tensor` of shape `[M, H]`, the projection of + each neighbor to the different basis. + features: A `float` `Tensor` of shape `[N_in, C]`, the input features. + neighborhood: A `Neighborhood` instance. + + Returns: + A `float` `Tensor` of shape ``[N_out, C, H]`, the weighted latent features. + """ + neigh_basis = tf.convert_to_tensor(value=neigh_basis, dtype=tf.float32) + features = tf.convert_to_tensor(value=features, dtype=tf.float32) + # get input in correct shapes + num_nbh = tf.shape(neighborhood._point_cloud_sampled._points)[0] + features_per_nb = tf.gather(features, + neighborhood._original_neigh_ids[:, 0]) + # Monte-Carlo Integration + weighted_features_per_nb = tf.expand_dims(features_per_nb, 2) *\ + tf.expand_dims(neigh_basis, 1) + weighted_latent_per_center = tf.math.unsorted_segment_sum( + weighted_features_per_nb, neighborhood._neighbors[:, 1], num_nbh) + return weighted_latent_per_center +''' \ No newline at end of file diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_wrapper.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_wrapper.py new file mode 100644 index 000000000..694ee4d6e --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_wrapper.py @@ -0,0 +1,198 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Wrappers for point cloud CUDA functions """ + +import tensorflow as tf +import tfg_custom_ops + + +def compute_keys(point_cloud, num_cells, cell_size, name=None): + """ Method to compute the regular grid keys of a point cloud. + + For a point in cell `c` the key is computed as + \\(key = batch_id * prod_{d=0}^{D} num_cells_{d} + \\) + \\(sum_{d=0}^{D}( c_{d} prod_{d'=d+1}^{D} num_cells_{d'} ) \\). + Args: + point_cloud: A `PointCloud` instance. + num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells + per dimension. + cell_size: An `int` `Tensor` of shape `[D]`, the cell sizes per + dimension. + + Returns: + An `int` `Tensor` of shape `[N]`, the keys per point. + + """ + aabb = point_cloud.get_AABB() + return tfg_custom_ops.compute_keys( + point_cloud._points, + point_cloud._batch_ids, + aabb._aabb_min / cell_size, + num_cells, + tf.math.reciprocal(cell_size)) +tf.no_gradient('ComputeKeys') + + +def build_grid_ds(sorted_keys, num_cells, batch_size, name=None): + """ Method to build a fast access data structure for point clouds. + + Creates a 2D regular grid in the first two dimension, saving the first and + last index belonging to that cell array. + Args: + sorted_keys: An `int` `Tensor` of shape `[N]`, the sorted keys. + num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells + per dimension. + batch_size: An `int`. + + Returns: + An `int` `Tensor` of shape `[batch_size, num_cells[0], num_cells[1], 2]`. + + """ + return tfg_custom_ops.build_grid_ds( + sorted_keys, + num_cells, + num_cells, + batch_size) +tf.no_gradient('BuildGridDs') + + +def find_neighbors(grid, + point_cloud_centers, + radii, + max_neighbors=0, + name=None): + """ Method to find the neighbors of a center point cloud in another + point cloud. + + Args: + grid: A Grid instance, from which the neighbors are chosen. + point_cloud_centers: A `PointCloud` instance, containing the center points. + radii: An `float` `Tensor` of shape `[D]`, the radii to select neighbors + from. + max_neighbors: An `int`, if `0` all neighbors are selected. + + Returns: + center_neigh_ranges: An `int` `Tensor` of shape `[N]`, end of the ranges per + center point. You can get the neighbor ids of point `i` (i>0) with + `neighbors[center_neigh_ranges[i-1]:center_neigh_ranges[i]]`. + neighbors: An `int` `Tensor` of shape [M, 2], indices of the neighbor point + and the center for each neighbor. Follows the order of + `grid._sorted_points`. + + """ + return tfg_custom_ops.find_neighbors( + point_cloud_centers._points, + point_cloud_centers._batch_ids, + grid._sorted_points, + grid._sorted_keys, + grid.get_DS(), + grid._num_cells, + grid._aabb._aabb_min / grid._cell_sizes, + tf.math.reciprocal(grid._cell_sizes), + tf.math.reciprocal(radii), + max_neighbors) +tf.no_gradient('FindNeighbors') + + +def sampling(neighborhood, sample_mode, name=None): + """ Method to sample the points of a point cloud. + + Args: + neighborhood: A `Neighborhood` instance, which contains a point cloud with + its neighbors. + sample_mode: An `int`specifiying the sample mode, + `0` for average, `1` for poisson. + + Returns: + sampled_points: A `float` `Tensor` of shape `[S, D]`, the sampled points. + sampled_batch_ids: An `int` `Tensor` of shape `[S]`, the batch ids. + sampled_indices: An `int` `Tensor` of shape `[S]`, the indices to the + unsampled points. + Following the order of neighborhood._grid._sorted_points. + + """ + return tfg_custom_ops.sampling( + neighborhood._grid._sorted_points, + neighborhood._grid._sorted_batch_ids, + neighborhood._grid._sorted_keys, + neighborhood._grid._num_cells, + neighborhood._neighbors, + neighborhood._samples_neigh_ranges, + sample_mode) +tf.no_gradient('Sampling') + + +def compute_pdf(neighborhood, bandwidth, mode, name=None): + """ Method to compute the density distribution inside the neighborhoods of a + point cloud in euclidean space using kernel density estimation (KDE). + + Args: + neighborhood: A `Neighborhood` instance. + bandwidth: An `int` `Tensor` of shape `[D]`, the bandwidth of the KDE. + mode: A `KDEMode` value. + + Returns: + A `float` `Tensor` of shape `[N]`, the estimated density per point, + with respect to the sorted points of the grid in `neighborhood`. + + """ + return tfg_custom_ops.compute_pdf_with_pt_grads( + neighborhood._grid._sorted_points, + neighborhood._neighbors, + neighborhood._samples_neigh_ranges, + tf.math.reciprocal(bandwidth), + tf.math.reciprocal(neighborhood._radii), + mode) + + +@tf.RegisterGradient("ComputePdfWithPtGrads") +def _compute_pdf_grad(op, *grads): + inPtsGrad = tfg_custom_ops.compute_pdf_pt_grads( + op.inputs[0], + op.inputs[1], + op.inputs[2], + op.inputs[3], + op.inputs[4], + grads[0], + op.get_attr("mode")) + return [inPtsGrad, None, None, None, None] + + +def basis_proj(neigh_basis, features, neighborhood): + """ Method to aggregate the features*basis for different neighborhoods. + + Args: + neigh_basis: A `float` `Tensor` of shape `[M, H]`, the projection of + each neighbor to the different basis. + features: A `float` `Tensor` of shape `[N_in, C]`, the input features. + neighborhood: A `Neighborhood` instance. + + Returns: + A `float` `Tensor` of shape ``[N_out, C, H]`, the weighted latent features. + + """ + return tfg_custom_ops.basis_proj( + neigh_basis, + features, + neighborhood._original_neigh_ids, + neighborhood._samples_neigh_ranges) + + +@tf.RegisterGradient("BasisProj") +def _basis_proj_grad(op, *grads): + basis_grads, feature_grads = \ + tfg_custom_ops.basis_proj_grads( + op.inputs[0], op.inputs[1], op.inputs[2], + op.inputs[3], grads[0]) + return [basis_grads, feature_grads, None, None] diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/build_grid_ds_tf_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/build_grid_ds_tf_test.py new file mode 100644 index 000000000..b12c2c4fe --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/build_grid_ds_tf_test.py @@ -0,0 +1,96 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test build_grid_ds tensorflow implementation""" + +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud +from pylib.pc.tests import utils +from pylib.pc.custom_ops.custom_ops_tf import build_grid_ds_tf +from pylib.pc.custom_ops import compute_keys + + +class BuildGridDSTF(test_case.TestCase): + + @parameterized.parameters( + (100, 32, 30, 0.1, 2), + (200, 16, 1, 0.2, 2), + (200, 8, 1, np.sqrt(2), 2), + (100, 32, 30, 0.1, 3), + (200, 16, 1, 0.2, 3), + (200, 8, 1, np.sqrt(3), 3), + (100, 32, 30, 0.1, 4), + (200, 16, 1, 0.2, 4), + (200, 8, 1, np.sqrt(4), 4) + ) + def test_grid_datastructure(self, + num_points, + batch_size, + scale, + radius, + dimension): + radius = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + sizes=np.ones(batch_size, dtype=int) * num_points, clean_aabb=True) + point_cloud = PointCloud(points, batch_ids) + #Compute the number of cells in the grid. + aabb = point_cloud.get_AABB() + aabb_sizes = aabb._aabb_max - aabb._aabb_min + batch_num_cells = tf.cast( + tf.math.ceil(aabb_sizes / radius), tf.int32) + total_num_cells = tf.maximum( + tf.reduce_max(batch_num_cells, axis=0), 1) + keys = compute_keys(point_cloud, total_num_cells, radius) + keys = tf.sort(keys, direction='DESCENDING') + ds_tf = build_grid_ds_tf(keys, total_num_cells, batch_size) + + keys = keys.numpy() + ds_numpy = np.full((batch_size, total_num_cells[0], + total_num_cells[1], 2), 0) + if dimension == 2: + cells_per_2D_cell = 1 + elif dimension > 2: + cells_per_2D_cell = np.prod(total_num_cells[2:]) + for key_iter, key in enumerate(keys): + curDSIndex = key // cells_per_2D_cell + yIndex = curDSIndex % total_num_cells[1] + auxInt = curDSIndex // total_num_cells[1] + xIndex = auxInt % total_num_cells[0] + curbatch_ids = auxInt // total_num_cells[0] + + if key_iter == 0: + ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter + else: + prevKey = keys[key_iter - 1] + prevDSIndex = prevKey // cells_per_2D_cell + if prevDSIndex != curDSIndex: + ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter + + nextIter = key_iter + 1 + if nextIter >= len(keys): + ds_numpy[curbatch_ids, xIndex, yIndex, 1] = len(keys) + else: + nextKey = keys[key_iter + 1] + nextDSIndex = nextKey // cells_per_2D_cell + if nextDSIndex != curDSIndex: + ds_numpy[curbatch_ids, xIndex, yIndex, 1] = key_iter + 1 + + # check if the data structure is equal + self.assertAllEqual(ds_tf, ds_numpy) + +if __name__ == '__main__': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_keys_tf_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_keys_tf_test.py new file mode 100644 index 000000000..b32840084 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_keys_tf_test.py @@ -0,0 +1,73 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test compute_keys tensorflow implementation""" + +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud +from pylib.pc.tests import utils +from pylib.pc.custom_ops.custom_ops_tf import compute_keys_tf + + +class ComputeKeysTF(test_case.TestCase): + + @parameterized.parameters( + (100, 32, 30, 0.1, 2), + (200, 16, 1, 0.2, 2), + (200, 8, 1, np.sqrt(2), 2), + (100, 32, 30, 0.1, 3), + (200, 16, 1, 0.2, 3), + (200, 8, 1, np.sqrt(3), 3), + (100, 32, 30, 0.1, 4), + (200, 16, 1, 0.2, 4), + (200, 8, 1, np.sqrt(4), 4) + ) + def test_compute_keys_tf(self, + num_points, + batch_size, + scale, + radius, + dimension): + radius = np.repeat(radius, dimension) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + sizes=np.ones(batch_size, dtype=int) * num_points, clean_aabb=False) + point_cloud = PointCloud(points, batch_ids) + + #Compute the number of cells in the grid. + aabb = point_cloud.get_AABB() + aabb_sizes = aabb._aabb_max - aabb._aabb_min + batch_num_cells = tf.cast( + tf.math.ceil(aabb_sizes / radius), tf.int32) + total_num_cells = tf.maximum( + tf.reduce_max(batch_num_cells, axis=0), 1) + + keys_tf = compute_keys_tf(point_cloud, total_num_cells, radius) + aabb_min = aabb._aabb_min.numpy() + + aabb_min_per_point = aabb_min[batch_ids, :] + cell_ind = np.floor((points - aabb_min_per_point) / radius).astype(int) + cell_ind = np.minimum(np.maximum(cell_ind, [0] * dimension), + total_num_cells) + cell_multiplier = np.flip(np.cumprod(np.flip(total_num_cells))) + cell_multiplier = np.concatenate((cell_multiplier, [1]), axis=0) + keys = batch_ids * cell_multiplier[0] + \ + np.sum(cell_ind * cell_multiplier[1:].reshape([1, -1]), axis=1) + # check unsorted keys + self.assertAllEqual(keys_tf, keys) + +if __name__ == '__main__': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/grid_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/grid_test.py new file mode 100644 index 000000000..e7b6211d3 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/grid_test.py @@ -0,0 +1,133 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test regular grid data structure""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc.tests import utils + + +class GridTest(test_case.TestCase): + + @parameterized.parameters( + (10000, 32, 30, 0.1, 2), + (20000, 16, 1, 0.2, 2), + (200, 8, 1, np.sqrt(2), 2), + (100, 32, 30, 0.1, 3), + (200, 16, 1, 0.2, 3), + (200, 8, 1, np.sqrt(3), 3), + (100, 32, 30, 0.1, 4), + (200, 16, 1, 0.2, 4), + (200, 8, 1, np.sqrt(4), 4) + ) + def test_compute_keys_with_sort(self, + num_points, + batch_size, + scale, + radius, + dimension): + radius = np.repeat(radius, dimension) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + sizes=np.ones(batch_size, dtype=int) * num_points, clean_aabb=False) + point_cloud = PointCloud(points, batch_ids) + aabb = point_cloud.get_AABB() + grid = Grid(point_cloud, radius) + + total_num_cells = grid._num_cells.numpy() + aabb_min = aabb._aabb_min.numpy() + + aabb_min_per_point = aabb_min[batch_ids, :] + cell_ind = np.floor((points - aabb_min_per_point) / radius).astype(int) + cell_ind = np.minimum(np.maximum(cell_ind, [0] * dimension), + total_num_cells) + cell_multiplier = np.flip(np.cumprod(np.flip(total_num_cells))) + cell_multiplier = np.concatenate((cell_multiplier, [1]), axis=0) + keys = batch_ids * cell_multiplier[0] + \ + np.sum(cell_ind * cell_multiplier[1:].reshape([1, -1]), axis=1) + # check unsorted keys + self.assertAllEqual(grid._cur_keys, keys) + + # sort descending + sorted_keys = np.flip(np.sort(keys)) + # check if the cell keys per point are equal + self.assertAllEqual(grid._sorted_keys, sorted_keys) + + @parameterized.parameters( + (100, 32, 30, 0.1, 2), + (200, 16, 1, 0.2, 2), + (200, 8, 1, np.sqrt(2), 2), + (100, 32, 30, 0.1, 3), + (200, 16, 1, 0.2, 3), + (200, 8, 1, np.sqrt(3), 3), + (200, 16, 1, 0.2, 4), + (200, 8, 1, np.sqrt(4), 4) + ) + def test_grid_datastructure(self, + num_points, + batch_size, + scale, + radius, + dimension): + radius = np.repeat(radius, dimension) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + sizes=np.ones(batch_size, dtype=int) * num_points, clean_aabb=True) + point_cloud = PointCloud(points, batch_ids) + aabb = point_cloud.get_AABB() + grid = Grid(point_cloud, radius, aabb) + + total_num_cells = grid._num_cells.numpy() + keys = grid._sorted_keys.numpy() + ds_numpy = np.full((batch_size, total_num_cells[0], + total_num_cells[1], 2), 0) + if dimension == 2: + cells_per_2D_cell = 1 + elif dimension > 2: + cells_per_2D_cell = np.prod(total_num_cells[2:]) + for key_iter, key in enumerate(keys): + curDSIndex = key // cells_per_2D_cell + yIndex = curDSIndex % total_num_cells[1] + auxInt = curDSIndex // total_num_cells[1] + xIndex = auxInt % total_num_cells[0] + curbatch_ids = auxInt // total_num_cells[0] + + if key_iter == 0: + ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter + else: + prevKey = keys[key_iter - 1] + prevDSIndex = prevKey // cells_per_2D_cell + if prevDSIndex != curDSIndex: + ds_numpy[curbatch_ids, xIndex, yIndex, 0] = key_iter + + nextIter = key_iter + 1 + if nextIter >= len(keys): + ds_numpy[curbatch_ids, xIndex, yIndex, 1] = len(keys) + else: + nextKey = keys[key_iter + 1] + nextDSIndex = nextKey // cells_per_2D_cell + if nextDSIndex != curDSIndex: + ds_numpy[curbatch_ids, xIndex, yIndex, 1] = key_iter + 1 + + # check if the data structure is equal + self.assertAllEqual(grid.get_DS(), ds_numpy) + +if __name__ == '__main__': + test_case.main() From 8db85138c190179f5950850766dd50a5603f445a Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 11:02:55 +0100 Subject: [PATCH 09/29] removed unused custom_ops wrappers --- .../pylib/pc/custom_ops/__init__.py | 16 +- .../pylib/pc/custom_ops/custom_ops_wrapper.py | 198 ------------------ .../pylib/pc/tests/utils.py | 3 +- 3 files changed, 10 insertions(+), 207 deletions(-) delete mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_wrapper.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py index cd2aee431..6509ca236 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py @@ -24,16 +24,16 @@ CUSTOM = 0 if CUSTOM: - from .custom_ops_wrapper import basis_proj + # from .custom_ops_wrapper import basis_proj from .custom_ops_wrapper import build_grid_ds from .custom_ops_wrapper import compute_keys - from .custom_ops_wrapper import compute_pdf - from .custom_ops_wrapper import find_neighbors - from .custom_ops_wrapper import sampling + # from .custom_ops_wrapper import compute_pdf + # from .custom_ops_wrapper import find_neighbors + # from .custom_ops_wrapper import sampling else: - from .custom_ops_tf import basis_proj_tf as basis_proj + # from .custom_ops_tf import basis_proj_tf as basis_proj from .custom_ops_tf import build_grid_ds_tf as build_grid_ds from .custom_ops_tf import compute_keys_tf as compute_keys - from .custom_ops_tf import compute_pdf_tf as compute_pdf - from .custom_ops_tf import find_neighbors_tf as find_neighbors - from .custom_ops_tf import sampling_tf as sampling + # from .custom_ops_tf import compute_pdf_tf as compute_pdf + # from .custom_ops_tf import find_neighbors_tf as find_neighbors + # from .custom_ops_tf import sampling_tf as sampling diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_wrapper.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_wrapper.py deleted file mode 100644 index 694ee4d6e..000000000 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_wrapper.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2020 The TensorFlow Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Wrappers for point cloud CUDA functions """ - -import tensorflow as tf -import tfg_custom_ops - - -def compute_keys(point_cloud, num_cells, cell_size, name=None): - """ Method to compute the regular grid keys of a point cloud. - - For a point in cell `c` the key is computed as - \\(key = batch_id * prod_{d=0}^{D} num_cells_{d} + \\) - \\(sum_{d=0}^{D}( c_{d} prod_{d'=d+1}^{D} num_cells_{d'} ) \\). - Args: - point_cloud: A `PointCloud` instance. - num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells - per dimension. - cell_size: An `int` `Tensor` of shape `[D]`, the cell sizes per - dimension. - - Returns: - An `int` `Tensor` of shape `[N]`, the keys per point. - - """ - aabb = point_cloud.get_AABB() - return tfg_custom_ops.compute_keys( - point_cloud._points, - point_cloud._batch_ids, - aabb._aabb_min / cell_size, - num_cells, - tf.math.reciprocal(cell_size)) -tf.no_gradient('ComputeKeys') - - -def build_grid_ds(sorted_keys, num_cells, batch_size, name=None): - """ Method to build a fast access data structure for point clouds. - - Creates a 2D regular grid in the first two dimension, saving the first and - last index belonging to that cell array. - Args: - sorted_keys: An `int` `Tensor` of shape `[N]`, the sorted keys. - num_cells: An `int` `Tensor` of shape `[D]`, the total number of cells - per dimension. - batch_size: An `int`. - - Returns: - An `int` `Tensor` of shape `[batch_size, num_cells[0], num_cells[1], 2]`. - - """ - return tfg_custom_ops.build_grid_ds( - sorted_keys, - num_cells, - num_cells, - batch_size) -tf.no_gradient('BuildGridDs') - - -def find_neighbors(grid, - point_cloud_centers, - radii, - max_neighbors=0, - name=None): - """ Method to find the neighbors of a center point cloud in another - point cloud. - - Args: - grid: A Grid instance, from which the neighbors are chosen. - point_cloud_centers: A `PointCloud` instance, containing the center points. - radii: An `float` `Tensor` of shape `[D]`, the radii to select neighbors - from. - max_neighbors: An `int`, if `0` all neighbors are selected. - - Returns: - center_neigh_ranges: An `int` `Tensor` of shape `[N]`, end of the ranges per - center point. You can get the neighbor ids of point `i` (i>0) with - `neighbors[center_neigh_ranges[i-1]:center_neigh_ranges[i]]`. - neighbors: An `int` `Tensor` of shape [M, 2], indices of the neighbor point - and the center for each neighbor. Follows the order of - `grid._sorted_points`. - - """ - return tfg_custom_ops.find_neighbors( - point_cloud_centers._points, - point_cloud_centers._batch_ids, - grid._sorted_points, - grid._sorted_keys, - grid.get_DS(), - grid._num_cells, - grid._aabb._aabb_min / grid._cell_sizes, - tf.math.reciprocal(grid._cell_sizes), - tf.math.reciprocal(radii), - max_neighbors) -tf.no_gradient('FindNeighbors') - - -def sampling(neighborhood, sample_mode, name=None): - """ Method to sample the points of a point cloud. - - Args: - neighborhood: A `Neighborhood` instance, which contains a point cloud with - its neighbors. - sample_mode: An `int`specifiying the sample mode, - `0` for average, `1` for poisson. - - Returns: - sampled_points: A `float` `Tensor` of shape `[S, D]`, the sampled points. - sampled_batch_ids: An `int` `Tensor` of shape `[S]`, the batch ids. - sampled_indices: An `int` `Tensor` of shape `[S]`, the indices to the - unsampled points. - Following the order of neighborhood._grid._sorted_points. - - """ - return tfg_custom_ops.sampling( - neighborhood._grid._sorted_points, - neighborhood._grid._sorted_batch_ids, - neighborhood._grid._sorted_keys, - neighborhood._grid._num_cells, - neighborhood._neighbors, - neighborhood._samples_neigh_ranges, - sample_mode) -tf.no_gradient('Sampling') - - -def compute_pdf(neighborhood, bandwidth, mode, name=None): - """ Method to compute the density distribution inside the neighborhoods of a - point cloud in euclidean space using kernel density estimation (KDE). - - Args: - neighborhood: A `Neighborhood` instance. - bandwidth: An `int` `Tensor` of shape `[D]`, the bandwidth of the KDE. - mode: A `KDEMode` value. - - Returns: - A `float` `Tensor` of shape `[N]`, the estimated density per point, - with respect to the sorted points of the grid in `neighborhood`. - - """ - return tfg_custom_ops.compute_pdf_with_pt_grads( - neighborhood._grid._sorted_points, - neighborhood._neighbors, - neighborhood._samples_neigh_ranges, - tf.math.reciprocal(bandwidth), - tf.math.reciprocal(neighborhood._radii), - mode) - - -@tf.RegisterGradient("ComputePdfWithPtGrads") -def _compute_pdf_grad(op, *grads): - inPtsGrad = tfg_custom_ops.compute_pdf_pt_grads( - op.inputs[0], - op.inputs[1], - op.inputs[2], - op.inputs[3], - op.inputs[4], - grads[0], - op.get_attr("mode")) - return [inPtsGrad, None, None, None, None] - - -def basis_proj(neigh_basis, features, neighborhood): - """ Method to aggregate the features*basis for different neighborhoods. - - Args: - neigh_basis: A `float` `Tensor` of shape `[M, H]`, the projection of - each neighbor to the different basis. - features: A `float` `Tensor` of shape `[N_in, C]`, the input features. - neighborhood: A `Neighborhood` instance. - - Returns: - A `float` `Tensor` of shape ``[N_out, C, H]`, the weighted latent features. - - """ - return tfg_custom_ops.basis_proj( - neigh_basis, - features, - neighborhood._original_neigh_ids, - neighborhood._samples_neigh_ranges) - - -@tf.RegisterGradient("BasisProj") -def _basis_proj_grad(op, *grads): - basis_grads, feature_grads = \ - tfg_custom_ops.basis_proj_grads( - op.inputs[0], op.inputs[1], op.inputs[2], - op.inputs[3], grads[0]) - return [basis_grads, feature_grads, None, None] diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py index fe69ed795..4e54f9739 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py @@ -45,7 +45,7 @@ def _create_random_point_cloud_segmented(batch_size, (batch_ids, np.arange(0, batch_size), np.arange(0, batch_size))) return points, batch_ids - +''' def _create_random_point_cloud_padded(max_num_points, batch_shape, dimension=3, @@ -79,3 +79,4 @@ def _create_uniform_distributed_point_cloud_3D(num_points_root, if flat: points = points.reshape(-1, 3) return points +''' From 858eab079bf204d1141f840a65dc540c4aebba39 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 11:28:18 +0100 Subject: [PATCH 10/29] added missing test util --- .../projects/point_convolutions/pylib/pc/tests/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py index 4e54f9739..6a8cf059b 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py @@ -45,7 +45,7 @@ def _create_random_point_cloud_segmented(batch_size, (batch_ids, np.arange(0, batch_size), np.arange(0, batch_size))) return points, batch_ids -''' + def _create_random_point_cloud_padded(max_num_points, batch_shape, dimension=3, @@ -59,7 +59,7 @@ def _create_random_point_cloud_padded(max_num_points, sizes = np.random.randint(1, max_num_points, batch_shape) return points, sizes - +''' def _create_uniform_distributed_point_cloud_2D(num_points_sqrt, scale=1, flat=False): From da7e761d01240758d012f3e1cc42c37d2400149f Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 11:50:41 +0100 Subject: [PATCH 11/29] fixed indexing error in compute_keys_tf --- .../point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py index be0a8041d..113b7d8a6 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -43,7 +43,7 @@ def compute_keys_tf(point_cloud: PointCloud, num_cells, cell_size, name=None): cell_ind = tf.cast(cell_ind, tf.int32) cell_ind = tf.minimum( tf.maximum(cell_ind, tf.zeros_like(cell_ind)), - num_cells) + num_cells - 1) cell_multiplier = tf.math.cumprod(num_cells, reverse=True) cell_multiplier = tf.concat((cell_multiplier, [1]), axis=0) keys = point_cloud._batch_ids * cell_multiplier[0] + \ From a260bc47e5f085d1b4dffe027ca5f0cc624735eb Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 12:11:22 +0100 Subject: [PATCH 12/29] neighborhood_initial --- .../pylib/pc/Neighborhood.py | 179 ++++++++++++++++++ .../point_convolutions/pylib/pc/__init__.py | 2 +- .../pylib/pc/custom_ops/__init__.py | 4 +- .../pylib/pc/custom_ops/custom_ops_tf.py | 4 +- .../tests/find_neighbors_tf_test.py | 98 ++++++++++ .../pylib/pc/tests/neighbors_test.py | 150 +++++++++++++++ .../pylib/pc/tests/utils.py | 4 +- 7 files changed, 434 insertions(+), 7 deletions(-) create mode 100755 tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/find_neighbors_tf_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/neighbors_test.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py new file mode 100755 index 000000000..410ecf624 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py @@ -0,0 +1,179 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Class to represent a neighborhood of points. + +Note: + In the following `D` is the spatial dimensionality of the points, + `N` is the number of (samples) points, and `M` is the total number of + adjacencies. + +Attributes: + _point_cloud_sampled: 'PointCloud', samples point cloud. + _grid : 'Grid', regular grid data structure. + _radii: `float` `Tensor` of shape [D], radius used to select the neighbors. + _samples_neigh_ranges: `int` `Tensor` of shape `[N]`, end of the ranges per + sample. + _neighbors: `int` `Tensor` of shape `[M,2]`, indices of the neighbor point, + with respect to the sorted point in the grid, and the sample for each + neighbor. + _original_neigh_ids: `int` `Tensor` of shape `[M,2]`, indices of the + neighbor point, with respect to the points in the input point cloud, + and the sample for each neighbor. + _pdf: `float` `Tensor` of shape `[M]`, PDF value for each neighbor. +""" + +import enum +import tensorflow as tf + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc.custom_ops import find_neighbors # , compute_pdf +from pylib.pc.utils import cast_to_num_dims + + +class KDEMode(enum.Enum): + """ Parameters for kernel density estimation (KDE) """ + constant = 0 + num_points = 1 + no_pdf = 2 + + +class Neighborhood: + """ Neighborhood of a point cloud. + + Args: + grid: A 'Grid' instance, the regular grid data structure. + radius: A `float` `Tensor` of shape `[D]`, the radius used to select the + neighbors. + point_cloud_sample: A 'PointCloud' instance. Samples point cloud. + If None, the sorted points from the grid will be used. + max_neighbors: An `int`, maximum number of neighbors per sample, + if `0` all neighbors are selected. + + """ + + def __init__(self, + grid: Grid, + radius, + point_cloud_sample=None, + max_neighbors=0, + name=None): + radii = tf.reshape(tf.cast(tf.convert_to_tensor(value=radius), + tf.float32), [-1]) + if radii.shape[0] == 1: + radii = tf.repeat(radius, grid._point_cloud._dimension) + #Save the attributes. + if point_cloud_sample is None: + self._equal_samples = True + self._point_cloud_sampled = PointCloud( + grid._sorted_points, grid._sorted_batch_ids, + grid._batch_size) + else: + self._equal_samples = False + self._point_cloud_sampled = point_cloud_sample + self._grid = grid + self._radii = radii + self.max_neighbors = max_neighbors + + #Find the neighbors. + self._samples_neigh_ranges, self._neighbors = find_neighbors( + self._grid, self._point_cloud_sampled, self._radii, max_neighbors) + + #Original neighIds. + aux_original_neigh_ids = tf.gather( + self._grid._sorted_indices, self._neighbors[:, 0]) + self._original_neigh_ids = tf.concat([ + tf.reshape(aux_original_neigh_ids, [-1, 1]), + tf.reshape(self._neighbors[:, 1], [-1, 1])], axis=-1) + + #Initialize the pdf + self._pdf = None + + self._transposed = None + +''' + def compute_pdf(self, + bandwidth=0.2, + mode=KDEMode.constant, + normalize=False, + name=None): + """Method to compute the probability density function of the neighborhoods. + + Note: By default the returned densitity is not normalized. + + Args: + bandwidth: A `float` `Tensor` of shape `[D]`, bandwidth used to compute + the pdf. (optional) + mode: 'KDEMode', mode used to determine the bandwidth. (optional) + normalize: A `bool`, if `True` each value is divided by be size of the + respective neighborhood. (optional) + + """ + bandwidth = cast_to_num_dims( + bandwidth, self._point_cloud_sampled._dimension) + + if mode == KDEMode.no_pdf: + self._pdf = tf.ones_like( + self._neighbors[:, 0], dtype=tf.float32) + else: + if self._equal_samples: + pdf_neighbors = self + else: + pdf_neighbors = Neighborhood(self._grid, self._radii, None) + _pdf = compute_pdf( + pdf_neighbors, bandwidth, mode.value) + self._pdf = tf.gather(_pdf, self._neighbors[:, 0]) + if normalize: + norm_factors = tf.math.unsorted_segment_sum( + tf.ones_like(self._pdf), + self._neighbors[:, 1], + self._point_cloud_sampled._points.shape[0]) + self._pdf = self._pdf / tf.gather(norm_factors, self._neighbors[:, 1]) + + def get_pdf(self, **kwargs): + """ Method which returns the pdfs of the neighborhoods. + + If no pdf was computed before, it will compute one using the provided + arguments. + + Args: + **kwargs: if no pdf is available, these arguments will be passed to + `compute_pdf`.(optional) + + Returns: + A `float` `Tensor` of shape `[M]`, the estimated densities. + + """ + if self._pdf is None: + self.compute_pdf(**kwargs) + return self._pdf +''' + + def get_grid(self): + """ Returns the grid used for neighborhood computation. + """ + return self._grid + + def transpose(self): + """ Returns the transposed neighborhood where center and neighbor points + are switched. (faster than recomputing) + """ + if self._transposed is None: + if self._equal_samples: + self._transposed = self + else: + grid = Grid(self._point_cloud_sampled, self._radii) + self._transposed = Neighborhood( + grid, self._radii, self._grid._point_cloud) + return self._transposed diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py index 26d924a5b..72382f824 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py @@ -16,9 +16,9 @@ from .PointCloud import _AABB as AABB from .PointCloud import PointCloud from .Grid import Grid -''' from .Neighborhood import Neighborhood from .Neighborhood import KDEMode +''' from .sampling import poisson_disk_sampling, cell_average_sampling from .sampling import sample from .PointHierarchy import PointHierarchy diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py index 6509ca236..5929afdb8 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py @@ -28,12 +28,12 @@ from .custom_ops_wrapper import build_grid_ds from .custom_ops_wrapper import compute_keys # from .custom_ops_wrapper import compute_pdf - # from .custom_ops_wrapper import find_neighbors + from .custom_ops_wrapper import find_neighbors # from .custom_ops_wrapper import sampling else: # from .custom_ops_tf import basis_proj_tf as basis_proj from .custom_ops_tf import build_grid_ds_tf as build_grid_ds from .custom_ops_tf import compute_keys_tf as compute_keys # from .custom_ops_tf import compute_pdf_tf as compute_pdf - # from .custom_ops_tf import find_neighbors_tf as find_neighbors + from .custom_ops_tf import find_neighbors_tf as find_neighbors # from .custom_ops_tf import sampling_tf as sampling diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py index 113b7d8a6..baeb3f7d7 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -114,7 +114,7 @@ def build_grid_ds_tf(sorted_keys, num_cells, batch_size, name=None): axis=3) tf.no_gradient('BuildGridDsTF') -''' + def find_neighbors_tf(grid, point_cloud_centers, radii, @@ -257,7 +257,7 @@ def find_neighbors_no_grid(point_cloud, num_center_points) return neigh_ranges, neighbors tf.no_gradient('FindNeighborsNoGrid') - +''' def sampling_tf(neighborhood, sample_mode, name=None): """ Method to sample the points of a point cloud. diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/find_neighbors_tf_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/find_neighbors_tf_test.py new file mode 100644 index 000000000..637f77e61 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/find_neighbors_tf_test.py @@ -0,0 +1,98 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test find neighbors tensorflow implementation""" + +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud, Grid +from pylib.pc.tests import utils +from pylib.pc.custom_ops.custom_ops_tf import find_neighbors_tf +from pylib.pc.custom_ops.custom_ops_tf import find_neighbors_no_grid + + +class FindNeighborsTF(test_case.TestCase): + + @parameterized.parameters( + (10, 4, 0.05, 1), + (10, 4, 0.11, 7), + (10, 4, 0.142, 19), + (10, 4, 0.174, 27), + ) + def test_neighbors_on_3D_meshgrid(self, + num_points_cbrt, + num_points_samples_cbrt, + radius, + expected_num_neighbors): + num_points = num_points_cbrt**3 + num_samples = num_points_samples_cbrt**3 + + points = utils._create_uniform_distributed_point_cloud_3D( + num_points_cbrt, flat=True) + batch_ids = np.zeros(num_points) + points_samples = utils._create_uniform_distributed_point_cloud_3D( + num_points_samples_cbrt, bb_min=1 / (num_points_samples_cbrt + 1), + flat=True) + batch_ids_samples = np.zeros(num_samples) + point_cloud = PointCloud(points, batch_ids) + point_cloud_samples = PointCloud(points_samples, batch_ids_samples) + cell_sizes = np.float32(np.repeat([radius], 3)) + grid = Grid(point_cloud, cell_sizes) + + # with grid + neigh_ranges, _ = find_neighbors_tf(grid, point_cloud_samples, cell_sizes) + num_neighbors = np.zeros(num_samples) + num_neighbors[0] = neigh_ranges[0] + num_neighbors[1:] = neigh_ranges[1:] - neigh_ranges[:-1] + expected_num_neighbors = \ + np.ones_like(num_neighbors) * expected_num_neighbors + self.assertAllEqual(num_neighbors, expected_num_neighbors) + + @parameterized.parameters( + (10, 4, 0.05, 1), + (10, 4, 0.11, 7), + (10, 4, 0.142, 19), + (10, 4, 0.174, 27), + ) + def test_neighbors_on_3D_meshgrid_without_gridDS(self, + num_points_cbrt, + num_points_samples_cbrt, + radius, + expected_num_neighbors): + num_points = num_points_cbrt**3 + num_samples = num_points_samples_cbrt**3 + + points = utils._create_uniform_distributed_point_cloud_3D( + num_points_cbrt, flat=True) + batch_ids = np.zeros(num_points) + points_samples = utils._create_uniform_distributed_point_cloud_3D( + num_points_samples_cbrt, bb_min=1 / (num_points_samples_cbrt + 1), + flat=True) + batch_ids_samples = np.zeros(num_samples) + point_cloud = PointCloud(points, batch_ids) + point_cloud_samples = PointCloud(points_samples, batch_ids_samples) + + # without grid + neigh_ranges, _ = find_neighbors_no_grid( + point_cloud, point_cloud_samples, radius) + num_neighbors = np.zeros(num_samples) + num_neighbors[0] = neigh_ranges[0] + num_neighbors[1:] = neigh_ranges[1:] - neigh_ranges[:-1] + expected_num_neighbors = \ + np.ones_like(num_neighbors) * expected_num_neighbors + self.assertAllEqual(num_neighbors, expected_num_neighbors) + +if __name__ == '__main__': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/neighbors_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/neighbors_test.py new file mode 100644 index 000000000..4b4beb716 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/neighbors_test.py @@ -0,0 +1,150 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test neighbor functions""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc import Neighborhood +from pylib.pc.tests import utils + + +class NeighborsTest(test_case.TestCase): + + @parameterized.parameters( + (100, 10, 4, 0.025, 2), + (100, 10, 4, 0.025, 3), + (100, 10, 4, 0.025, 4) + ) + def test_find_neighbors(self, + num_points, + num_samples, + batch_size, + radius, + dimension): + cell_sizes = np.repeat(radius, dimension) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + sizes=np.ones(batch_size, dtype=int) * num_points) + point_cloud = PointCloud(points, batch_ids) + samples_points, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples * batch_size, dimension=dimension, + sizes=np.ones(batch_size, dtype=int) * num_samples) + point_cloud_sampled = PointCloud(samples_points, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_sampled) + sorted_points = grid._sorted_points + + neighbors_tf = neighborhood._neighbors + + neighbors_numpy = [[] for i in range(num_samples * batch_size)] + + for k in range(batch_size): + for i in range(num_samples): + for j in range(num_points): + diffArray = (samples_points[i + k * num_samples] - \ + sorted_points[(batch_size - k - 1) * num_points + j])\ + / cell_sizes + if np.linalg.norm(diffArray) < 1.0: + neighbors_numpy[k * num_samples + i].append((batch_size - k - 1)\ + * num_points + j) + + allFound = True + for neigh in neighbors_tf: + found = False + for ref_neigh in neighbors_numpy[neigh[1]]: + if ref_neigh == neigh[0]: + found = True + allFound = allFound and found + self.assertTrue(allFound) + + @parameterized.parameters( + (12, 100, 24, np.sqrt(2), 2), + (32, 1000, 32, 0.7, 2), + (32, 1000, 32, 0.1, 2), + (12, 100, 24, np.sqrt(3), 3), + (32, 1000, 32, 0.7, 3), + (32, 1000, 32, 0.1, 3), + (12, 100, 24, np.sqrt(4), 4), + (32, 10000, 32, 0.7, 4), + (32, 1000, 32, 0.1, 4), + ) + def test_neighbors_are_from_same_batch(self, + batch_size, + num_points, + num_samples, + radius, + dimension): + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + samples, batch_ids_samples = utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + radius = np.float32(np.repeat([radius], dimension)) + + point_cloud = PointCloud(points, batch_ids) + point_cloud_samples = PointCloud(samples, batch_ids_samples) + grid = Grid(point_cloud, radius) + neighborhood = Neighborhood(grid, radius, point_cloud_samples) + + batch_ids_in = tf.gather( + point_cloud._batch_ids, neighborhood._original_neigh_ids[:, 0]) + batch_ids_out = tf.gather( + point_cloud_samples._batch_ids, neighborhood._original_neigh_ids[:, 1]) + batch_check = batch_ids_in == batch_ids_out + self.assertTrue(np.all(batch_check)) + + @parameterized.parameters( + (10, 4, 0.05, 1), + (10, 4, 0.11, 7), + (10, 4, 0.142, 19), + (10, 4, 0.174, 27), + ) + def test_neighbors_on_3D_meshgrid(self, + num_points_cbrt, + num_points_samples_cbrt, + radius, + expected_num_neighbors): + num_points = num_points_cbrt**3 + num_samples = num_points_samples_cbrt**3 + + points = utils._create_uniform_distributed_point_cloud_3D( + num_points_cbrt, flat=True) + batch_ids = np.zeros(num_points) + points_samples = utils._create_uniform_distributed_point_cloud_3D( + num_points_samples_cbrt, bb_min=1 / (num_points_samples_cbrt + 1), + flat=True) + batch_ids_samples = np.zeros(num_samples) + point_cloud = PointCloud(points, batch_ids) + point_cloud_samples = PointCloud(points_samples, batch_ids_samples) + radius = np.float32(np.repeat([radius], 3)) + grid = Grid(point_cloud, radius) + neighborhood = Neighborhood(grid, radius, point_cloud_samples) + + neigh_ranges = neighborhood._samples_neigh_ranges + num_neighbors = np.zeros(num_samples) + num_neighbors[0] = neigh_ranges[0] + num_neighbors[1:] = neigh_ranges[1:] - neigh_ranges[:-1] + expected_num_neighbors = \ + np.ones_like(num_neighbors) * expected_num_neighbors + self.assertAllEqual(num_neighbors, expected_num_neighbors) + + +if __name__ == '__main__': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py index 6a8cf059b..48a3cdb55 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/utils.py @@ -59,7 +59,7 @@ def _create_random_point_cloud_padded(max_num_points, sizes = np.random.randint(1, max_num_points, batch_shape) return points, sizes -''' + def _create_uniform_distributed_point_cloud_2D(num_points_sqrt, scale=1, flat=False): @@ -79,4 +79,4 @@ def _create_uniform_distributed_point_cloud_3D(num_points_root, if flat: points = points.reshape(-1, 3) return points -''' + From a8f88afe0b6be61235456199e96ab04049003c3d Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 13:28:45 +0100 Subject: [PATCH 13/29] fixed indentation error --- .../projects/point_convolutions/pylib/pc/Neighborhood.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py index 410ecf624..ac0210653 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py @@ -102,7 +102,7 @@ def __init__(self, self._transposed = None -''' + ''' def compute_pdf(self, bandwidth=0.2, mode=KDEMode.constant, @@ -158,7 +158,7 @@ def get_pdf(self, **kwargs): if self._pdf is None: self.compute_pdf(**kwargs) return self._pdf -''' + ''' def get_grid(self): """ Returns the grid used for neighborhood computation. From 6e0982d2b11da6c0e5cdd69a8c7593d1a4e54f63 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 13:38:23 +0100 Subject: [PATCH 14/29] uncommented missing util function --- .../projects/point_convolutions/pylib/pc/utils.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py index 972222310..1e61a55d8 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py @@ -71,7 +71,7 @@ def check_valid_point_hierarchy_input(point_cloud, cell_sizes, pool_mode): f'dimension. Must be 1 or {point_cloud.dimension_} but is' +\ f'{curr_cell_sizes.shape[0]}.') - +''' def _flatten_features(features, point_cloud: PointCloud): """ Converts features of shape `[A1, ..., An, C]` to shape `[N, C]`. @@ -92,7 +92,7 @@ def _flatten_features(features, point_cloud: PointCloud): tf.assert_equal(tf.shape(features)[0], tf.shape(point_cloud._points)[0]) tf.assert_equal(tf.rank(features), 2) return features - +''' def cast_to_num_dims(values, num_dims, dtype=tf.float32): """ Converts an input to the specified `dtype` and repeats it `num_dims` @@ -112,4 +112,3 @@ def cast_to_num_dims(values, num_dims, dtype=tf.float32): if values.shape == [] or values.shape[0] == 1: values = tf.repeat(values, num_dims) return values -''' From ab73ea49342a6f0fa1d350106bff776358199aa3 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 13:39:37 +0100 Subject: [PATCH 15/29] uncommented missing util function --- .../projects/point_convolutions/pylib/pc/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py index 1e61a55d8..dc4d83893 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py @@ -71,7 +71,7 @@ def check_valid_point_hierarchy_input(point_cloud, cell_sizes, pool_mode): f'dimension. Must be 1 or {point_cloud.dimension_} but is' +\ f'{curr_cell_sizes.shape[0]}.') -''' + def _flatten_features(features, point_cloud: PointCloud): """ Converts features of shape `[A1, ..., An, C]` to shape `[N, C]`. From 621d296656c5f4e6a0abf7a0a002627af9c1df77 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 14:12:45 +0100 Subject: [PATCH 16/29] gather_nd incompatibility --- .../point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py index baeb3f7d7..c26c1fa32 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -187,7 +187,7 @@ def find_neighbors_tf(grid, tf.maximum(adj_cell_ids_2D, tf.zeros_like(adj_cell_ids_2D)), grid._num_cells[:2]) # get min and max point ids of the adjacent cells - adj_ids = tf.gather_nd(data_structure[cur_batch_id], [adj_cell_ids_2D]) + adj_ids = tf.compat.v1.gather_nd(data_structure[cur_batch_id], [adj_cell_ids_2D]) adj_ids_start = tf.reduce_min(adj_ids[0, :, 0]) adj_ids_end = tf.reduce_max(adj_ids[0, :, 1]) # choose points below certain distance and in same batch From 40dc80d79c268cc5f9a60ddd112bbdab031d051d Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 14:34:42 +0100 Subject: [PATCH 17/29] gather_nd issue --- .../point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py index c26c1fa32..1fb021669 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -187,7 +187,8 @@ def find_neighbors_tf(grid, tf.maximum(adj_cell_ids_2D, tf.zeros_like(adj_cell_ids_2D)), grid._num_cells[:2]) # get min and max point ids of the adjacent cells - adj_ids = tf.compat.v1.gather_nd(data_structure[cur_batch_id], [adj_cell_ids_2D]) + ds_cur_batch = tf.gather(data_structure, cur_batch_id) + adj_ids = tf.gather_nd(ds_cur_batch, [adj_cell_ids_2D]) adj_ids_start = tf.reduce_min(adj_ids[0, :, 0]) adj_ids_end = tf.reduce_max(adj_ids[0, :, 1]) # choose points below certain distance and in same batch From 2bf99f66ef5fd703a83fe9de252c1a610a4e327c Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 14:52:54 +0100 Subject: [PATCH 18/29] fixed indexing error --- .../point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py index 1fb021669..9ca8b0728 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -185,7 +185,7 @@ def find_neighbors_tf(grid, # clip to range between 0 and max num cells adj_cell_ids_2D = tf.minimum( tf.maximum(adj_cell_ids_2D, tf.zeros_like(adj_cell_ids_2D)), - grid._num_cells[:2]) + grid._num_cells[:2] - 1) # get min and max point ids of the adjacent cells ds_cur_batch = tf.gather(data_structure, cur_batch_id) adj_ids = tf.gather_nd(ds_cur_batch, [adj_cell_ids_2D]) From 4f5fd221ec6231998300b495dbb667cfb7f3ff2d Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 15:12:57 +0100 Subject: [PATCH 19/29] initial pde --- .../pylib/pc/Neighborhood.py | 2 - .../pylib/pc/custom_ops/__init__.py | 4 +- .../pylib/pc/custom_ops/custom_ops_tf.py | 60 +------ .../custom_ops/tests/compute_pdf_tf_test.py | 152 ++++++++++++++++++ .../pylib/pc/tests/compute_pdf_test.py | 149 +++++++++++++++++ 5 files changed, 305 insertions(+), 62 deletions(-) create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_pdf_tf_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/compute_pdf_test.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py index ac0210653..f7948b39f 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py @@ -102,7 +102,6 @@ def __init__(self, self._transposed = None - ''' def compute_pdf(self, bandwidth=0.2, mode=KDEMode.constant, @@ -158,7 +157,6 @@ def get_pdf(self, **kwargs): if self._pdf is None: self.compute_pdf(**kwargs) return self._pdf - ''' def get_grid(self): """ Returns the grid used for neighborhood computation. diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py index 5929afdb8..7beed10b1 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py @@ -27,13 +27,13 @@ # from .custom_ops_wrapper import basis_proj from .custom_ops_wrapper import build_grid_ds from .custom_ops_wrapper import compute_keys - # from .custom_ops_wrapper import compute_pdf + from .custom_ops_wrapper import compute_pdf from .custom_ops_wrapper import find_neighbors # from .custom_ops_wrapper import sampling else: # from .custom_ops_tf import basis_proj_tf as basis_proj from .custom_ops_tf import build_grid_ds_tf as build_grid_ds from .custom_ops_tf import compute_keys_tf as compute_keys - # from .custom_ops_tf import compute_pdf_tf as compute_pdf + from .custom_ops_tf import compute_pdf_tf as compute_pdf from .custom_ops_tf import find_neighbors_tf as find_neighbors # from .custom_ops_tf import sampling_tf as sampling diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py index 9ca8b0728..f0b49bb56 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -324,68 +324,11 @@ def sampling_tf(neighborhood, sample_mode, name=None): tf.no_gradient('samplingTF') +''' _pi = tf.constant(np.pi) -def compute_pdf_inside_neighborhoods_tf(neighborhood, - bandwidth, - mode, - name=None): - """ Method to compute the density distribution inside the neighborhoods of a - point cloud in euclidean space using kernel density estimation (KDE). - - Args: - neighborhood: A `Neighborhood` instance. - bandwidth: An `int` `Tensor` of shape `[D]`, the bandwidth of the KDE. - mode: A `KDEMode` value. - - Returns: - A `float` `Tensor` of shape `[N]`, the estimated density per center point. - - """ - bandwidth = tf.convert_to_tensor(value=bandwidth) - points = neighborhood._grid._sorted_points - neighbors = neighborhood._neighbors - nbh_ranges = neighborhood._samples_neigh_ranges - - # compute difference vectors inside neighborhoods - num_adjacencies = tf.shape(neighbors)[0] - nbh_start_ind = tf.concat(([0], nbh_ranges[0:-1]), axis=0) - nbh_sizes = nbh_ranges - nbh_start_ind - max_num_neighbors = tf.reduce_max(nbh_sizes) - nbh_sizes_per_nb = tf.repeat(nbh_sizes, nbh_sizes) - - nb_indices_1 = tf.repeat(neighbors[:, 0], nbh_sizes_per_nb) - - mask = tf.sequence_mask(nbh_sizes_per_nb, max_num_neighbors) - mask_indices = tf.cast(tf.compat.v1.where(mask), tf.int32) - indices_tensor = tf.repeat(tf.reshape(tf.range(0, max_num_neighbors), - [1, max_num_neighbors]), - num_adjacencies, axis=0) - nbh_start_per_nb = tf.repeat(nbh_start_ind, nbh_sizes) - indices_tensor = indices_tensor + \ - tf.reshape(nbh_start_per_nb, [num_adjacencies, 1]) - indices_2 = tf.gather_nd(params=indices_tensor, indices=mask_indices) - nb_indices_2 = tf.gather(neighbors[:, 0], indices_2) - - nb_diff = tf.gather(points, nb_indices_1) - tf.gather(points, nb_indices_2) - # kernel density estimation using the distances - rel_bandwidth = tf.reshape(bandwidth * neighborhood._radii, [1, -1]) - kernel_input = nb_diff / rel_bandwidth - # gaussian kernel - nb_kernel_value = tf.exp(-tf.pow(kernel_input, 2) / 2) / tf.sqrt(2 * _pi) - nb_kernel_value = tf.reduce_prod(nb_kernel_value, axis=1) - nb_id_per_nb_pair = tf.repeat(tf.range(0, num_adjacencies), - nbh_sizes_per_nb) - # sum over influence inside neighborhood - pdf = tf.math.unsorted_segment_sum(nb_kernel_value, - nb_id_per_nb_pair, - num_adjacencies) /\ - tf.reduce_prod(bandwidth) - return pdf - - def compute_pdf_tf(neighborhood, bandwidth, mode, name=None): """ Method to compute the density distribution using neighborhood information in euclidean space using kernel density estimation (KDE). @@ -420,6 +363,7 @@ def compute_pdf_tf(neighborhood, bandwidth, mode, name=None): tf.reduce_prod(bandwidth) return pdf +''' def basis_proj_tf(neigh_basis, features, neighborhood, name=None): """ Method to aggregate the features*basis for different neighborhoods. diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_pdf_tf_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_pdf_tf_test.py new file mode 100644 index 000000000..8484ad8f6 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_pdf_tf_test.py @@ -0,0 +1,152 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test kernel density estimation tensorflow implementation""" + +import os +import sys +import numpy as np +from sklearn.neighbors import KernelDensity +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc import KDEMode +from pylib.pc import Neighborhood +from pylib.pc.custom_ops.custom_ops_tf import compute_pdf_tf +from pylib.pc.tests import utils + + +class ComputePDFTFTest(test_case.TestCase): + + @parameterized.parameters( + (2, 100, 10, 0.2, 0.1, 2), + (2, 100, 10, 0.7, 0.1, 2), + (2, 100, 10, np.sqrt(2), 0.1, 2), + (2, 100, 10, 0.2, 0.2, 3), + (2, 100, 10, 0.7, 0.1, 3), + (2, 100, 10, np.sqrt(3), 0.2, 3), + (2, 100, 10, 0.2, 0.2, 4), + (2, 100, 10, np.sqrt(4), 0.2, 4) + ) + def test_compute_pdf_tf(self, + batch_size, + num_points, + num_samples_per_batch, + cell_size, + bandwidth, + dimension): + cell_sizes = np.float32(np.repeat(cell_size, dimension)) + bandwidths = np.float32(np.repeat(bandwidth, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, batch_size * num_points, dimension, + equal_sized_batches=True) + samples = np.full((batch_size * num_samples_per_batch, dimension), + 0.0, dtype=float) + for i in range(batch_size): + cur_choice = np.random.choice(num_points, num_samples_per_batch, + replace=True) + samples[num_samples_per_batch * i:num_samples_per_batch * (i + 1), :] = \ + points[cur_choice + i * num_points] + samples_batch_ids = np.repeat(np.arange(0, batch_size), + num_samples_per_batch) + + point_cloud = PointCloud(points, batch_ids, batch_size) + grid = Grid(point_cloud, cell_sizes) + + point_cloud_samples = PointCloud(samples, samples_batch_ids, batch_size) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + neighbor_ids = neighborhood._neighbors + pdf_neighbors = Neighborhood(grid, cell_sizes) + pdf_tf = compute_pdf_tf(pdf_neighbors, bandwidths, KDEMode.constant) + pdf_tf = tf.gather(pdf_tf, neighbor_ids[:, 0]) + + sorted_points = grid._sorted_points.numpy() + sorted_batch_ids = grid._sorted_batch_ids.numpy() + neighbor_ids = neighborhood._neighbors + + pdf_real = [] + accum_points = [] + prev_batch_i = -1 + for pt_i, batch_i in enumerate(sorted_batch_ids): + if batch_i != prev_batch_i: + if len(accum_points) > 0: + test_points = np.array(accum_points) + kde_skl = KernelDensity(bandwidth=bandwidth) + kde_skl.fit(test_points) + log_pdf = kde_skl.score_samples(test_points) + pdf = np.exp(log_pdf) + if len(pdf_real) > 0: + pdf_real = np.concatenate((pdf_real, pdf), axis=0) + else: + pdf_real = pdf + accum_points = [sorted_points[pt_i] / cell_size] + prev_batch_i = batch_i + else: + accum_points.append(sorted_points[pt_i] / cell_size) + + test_points = np.array(accum_points) + kde_skl = KernelDensity(bandwidth=bandwidth) + kde_skl.fit(test_points) + log_pdf = kde_skl.score_samples(test_points) + pdf = np.exp(log_pdf) + if len(pdf_real) > 0: + pdf_real = np.concatenate((pdf_real, pdf), axis=0) + else: + pdf_real = pdf + + pdf_tf = np.asarray(pdf_tf / float(len(accum_points))) + pdf_skl = np.asarray(pdf_real)[neighbor_ids[:, 0]] + self.assertAllClose(pdf_tf, pdf_skl) + + @parameterized.parameters( + (1, 20, 1, np.sqrt(2), 2), + (1, 20, 1, np.sqrt(3), 3), + (1, 20, 1, np.sqrt(4), 4) + ) + def test_compute_pdf_jacobian(self, + batch_size, + num_points, + num_samples, + radius, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + bandwidths = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, batch_size * num_points, dimension, + equal_sized_batches=True) + samples = np.full((batch_size * num_samples, dimension), 0.0, dtype=float) + for i in range(batch_size): + cur_choice = np.random.choice(num_points, num_samples, replace=True) + samples[num_samples * i:num_samples * (i + 1), :] = \ + points[cur_choice + i * num_points] + samples_batch_ids = np.repeat(np.arange(0, batch_size), num_samples) + def compute_pdf(points_in): + point_cloud = PointCloud(points_in, batch_ids, batch_size) + grid = Grid(point_cloud, cell_sizes) + + point_cloud_samples = PointCloud(samples, samples_batch_ids, batch_size) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + neighborhood.compute_pdf(bandwidths, KDEMode.constant, normalize=True) + # account for influence of neighborhood size + _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1]) + max_num_nb = tf.cast(tf.reduce_max(counts), tf.float32) + return neighborhood._pdf / max_num_nb + + self.assert_jacobian_is_correct_fn( + compute_pdf, [np.float32(points)], atol=1e-4) + + +if __name__ == '__main__': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/compute_pdf_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/compute_pdf_test.py new file mode 100644 index 000000000..ba081b4e3 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/compute_pdf_test.py @@ -0,0 +1,149 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test kernel density estimation for point clouds""" + +import os +import sys +import numpy as np +from sklearn.neighbors import KernelDensity +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc import KDEMode +from pylib.pc import Neighborhood +from pylib.pc.tests import utils + + +class ComputePDFTest(test_case.TestCase): + + @parameterized.parameters( + (4, 100, 10, 0.2, 0.1, 2), + (4, 100, 10, 0.7, 0.1, 2), + (4, 100, 10, np.sqrt(2), 0.1, 2), + (4, 100, 10, 0.2, 0.1, 3), + (4, 100, 10, 0.7, 0.1, 3), + (4, 100, 10, np.sqrt(3), 0.1, 3), + (4, 100, 10, 0.2, 0.1, 4), + (4, 100, 10, np.sqrt(4), 0.1, 4) + ) + def test_compute_pdf(self, + batch_size, + num_points, + num_samples_per_batch, + cell_size, + bandwidth, + dimension): + cell_sizes = np.float32(np.repeat(cell_size, dimension)) + bandwidths = np.float32(np.repeat(bandwidth, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, batch_size * num_points, dimension, + equal_sized_batches=True) + samples = np.full((batch_size * num_samples_per_batch, dimension), + 0.0, dtype=float) + for i in range(batch_size): + cur_choice = np.random.choice(num_points, num_samples_per_batch, + replace=True) + samples[num_samples_per_batch * i:num_samples_per_batch * (i + 1), :] = \ + points[cur_choice + i * num_points] + samples_batch_ids = np.repeat(np.arange(0, batch_size), + num_samples_per_batch) + + point_cloud = PointCloud(points, batch_ids, batch_size) + grid = Grid(point_cloud, cell_sizes) + + point_cloud_samples = PointCloud(samples, samples_batch_ids, batch_size) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + neighborhood.compute_pdf(bandwidths, KDEMode.constant) + pdf_tf = neighborhood._pdf + + sorted_points = grid._sorted_points.numpy() + sorted_batch_ids = grid._sorted_batch_ids.numpy() + neighbor_ids = neighborhood._neighbors + + pdf_real = [] + accum_points = [] + prev_batch_i = -1 + for pt_i, batch_i in enumerate(sorted_batch_ids): + if batch_i != prev_batch_i: + if len(accum_points) > 0: + test_points = np.array(accum_points) + kde_skl = KernelDensity(bandwidth=bandwidth) + kde_skl.fit(test_points) + log_pdf = kde_skl.score_samples(test_points) + pdf = np.exp(log_pdf) + if len(pdf_real) > 0: + pdf_real = np.concatenate((pdf_real, pdf), axis=0) + else: + pdf_real = pdf + accum_points = [sorted_points[pt_i] / cell_size] + prev_batch_i = batch_i + else: + accum_points.append(sorted_points[pt_i] / cell_size) + + test_points = np.array(accum_points) + kde_skl = KernelDensity(bandwidth=bandwidth) + kde_skl.fit(test_points) + log_pdf = kde_skl.score_samples(test_points) + pdf = np.exp(log_pdf) + if len(pdf_real) > 0: + pdf_real = np.concatenate((pdf_real, pdf), axis=0) + else: + pdf_real = pdf + + pdf_tf = np.asarray(pdf_tf / float(len(accum_points))) + pdf_skl = np.asarray(pdf_real)[neighbor_ids[:, 0]] + self.assertAllClose(pdf_tf, pdf_skl) + + @parameterized.parameters( + (1, 200, 1, 4, 2), + (1, 200, 1, 4, 3), + (1, 100, 1, 4, 4) + ) + def test_compute_pdf_jacobian(self, + batch_size, + num_points, + num_samples, + radius, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + bandwidths = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, batch_size * num_points, dimension, + equal_sized_batches=True) + samples = np.full((batch_size * num_samples, dimension), 0.0, dtype=float) + for i in range(batch_size): + cur_choice = np.random.choice(num_points, num_samples, replace=True) + samples[num_samples * i:num_samples * (i + 1), :] = \ + points[cur_choice + i * num_points] + samples_batch_ids = np.repeat(np.arange(0, batch_size), num_samples) + def compute_pdf(points_in): + point_cloud = PointCloud(points_in, batch_ids, batch_size) + grid = Grid(point_cloud, cell_sizes) + + point_cloud_samples = PointCloud(samples, samples_batch_ids, batch_size) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + neighborhood.compute_pdf(bandwidths, KDEMode.constant, normalize=True) + # account for influence of neighborhood size + _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1]) + max_num_nb = tf.cast(tf.reduce_max(counts), tf.float32) + return neighborhood._pdf / max_num_nb + + self.assert_jacobian_is_correct_fn( + compute_pdf, [np.float32(points)], atol=1e-4, delta=1e-3) + + +if __name__ == '__main__': + test_case.main() From a99fa170a1142804285f96f041bfe418323153fe Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 15:22:41 +0100 Subject: [PATCH 20/29] added missing dependency --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cdc4294fd..35d99498d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,4 +13,5 @@ termcolor >= 1.1.0 trimesh >= 2.37.22 # Required by trimesh. networkx - +# required for pytests of project/point_convolutions +sklearn From b35115940767a3b839d4aba2f70a0c88f7123a25 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 11 Mar 2021 15:46:13 +0100 Subject: [PATCH 21/29] uncommented missing import --- .../projects/point_convolutions/pylib/pc/Neighborhood.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py index f7948b39f..75276ce63 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/Neighborhood.py @@ -38,7 +38,7 @@ from pylib.pc import PointCloud from pylib.pc import Grid -from pylib.pc.custom_ops import find_neighbors # , compute_pdf +from pylib.pc.custom_ops import find_neighbors, compute_pdf from pylib.pc.utils import cast_to_num_dims From ac9fc084cc41d4d408d3203249c5d07ce829661f Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Fri, 12 Mar 2021 09:25:57 +0100 Subject: [PATCH 22/29] test acc fix --- .../pylib/pc/custom_ops/tests/compute_pdf_tf_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_pdf_tf_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_pdf_tf_test.py index 8484ad8f6..dc66f13c0 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_pdf_tf_test.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/compute_pdf_tf_test.py @@ -145,7 +145,7 @@ def compute_pdf(points_in): return neighborhood._pdf / max_num_nb self.assert_jacobian_is_correct_fn( - compute_pdf, [np.float32(points)], atol=1e-4) + compute_pdf, [np.float32(points)], atol=1e-4, delta=1e-4) if __name__ == '__main__': From ab4c109ee65b4e6f063c68dad3bfa8c348f3682e Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Fri, 12 Mar 2021 10:51:50 +0100 Subject: [PATCH 23/29] spatial_sampling_initial --- .../point_convolutions/pylib/pc/__init__.py | 2 +- .../pylib/pc/custom_ops/__init__.py | 4 +- .../pylib/pc/custom_ops/custom_ops_tf.py | 3 +- .../point_convolutions/pylib/pc/sampling.py | 150 ++++++++++++++++++ .../pylib/pc/tests/sample_test.py | 129 +++++++++++++++ 5 files changed, 283 insertions(+), 5 deletions(-) create mode 100755 tensorflow_graphics/projects/point_convolutions/pylib/pc/sampling.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/sample_test.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py index 72382f824..66c27be40 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py @@ -18,9 +18,9 @@ from .Grid import Grid from .Neighborhood import Neighborhood from .Neighborhood import KDEMode -''' from .sampling import poisson_disk_sampling, cell_average_sampling from .sampling import sample +''' from .PointHierarchy import PointHierarchy from pylib.pc import layers diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py index 7beed10b1..17be8cb2e 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py @@ -29,11 +29,11 @@ from .custom_ops_wrapper import compute_keys from .custom_ops_wrapper import compute_pdf from .custom_ops_wrapper import find_neighbors - # from .custom_ops_wrapper import sampling + from .custom_ops_wrapper import sampling else: # from .custom_ops_tf import basis_proj_tf as basis_proj from .custom_ops_tf import build_grid_ds_tf as build_grid_ds from .custom_ops_tf import compute_keys_tf as compute_keys from .custom_ops_tf import compute_pdf_tf as compute_pdf from .custom_ops_tf import find_neighbors_tf as find_neighbors - # from .custom_ops_tf import sampling_tf as sampling + from .custom_ops_tf import sampling_tf as sampling diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py index f0b49bb56..d992fa45e 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -258,7 +258,7 @@ def find_neighbors_no_grid(point_cloud, num_center_points) return neigh_ranges, neighbors tf.no_gradient('FindNeighborsNoGrid') -''' + def sampling_tf(neighborhood, sample_mode, name=None): """ Method to sample the points of a point cloud. @@ -324,7 +324,6 @@ def sampling_tf(neighborhood, sample_mode, name=None): tf.no_gradient('samplingTF') -''' _pi = tf.constant(np.pi) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/sampling.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/sampling.py new file mode 100755 index 000000000..b10cff416 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/sampling.py @@ -0,0 +1,150 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Methods to sample point clouds. """ + +import tensorflow as tf + +from pylib.pc.custom_ops import sampling + +from pylib.pc import PointCloud, Neighborhood, Grid +from pylib.pc.utils import cast_to_num_dims + +sample_modes = {'average': 1, + 'cell average': 1, + 'cell_average': 1, + 'poisson': 0, + 'poisson disk': 0, + 'poisson_disk': 0} + + +def poisson_disk_sampling(point_cloud, + radius=None, + neighborhood=None, + return_ids=False, + name=None): + """ Poisson disk sampling of a point cloud. + + Note: Either `radius` or `neighborhood` must be provided. + + Args: + point_cloud: A `PointCloud` instance. + radius: A `float` or a `float` `Tensor` of shape `[D]`, the radius for the + Poisson disk sampling. + neighborhood: A `Neighborhood` instance. + return_ids: A `bool`, if `True` returns the indices of the sampled points. + (optional) + + Returns: + A `PointCloud` instance. + An `int` `Tensor` of shape `[S]`, if `return_ids` is `True`. + + Raises: + ValueError: If no radius or neighborhood is given. + + """ + if radius is None and neighborhood is None: + raise ValueError( + "Missing Argument! Either radius or neighborhood must be given!") + if neighborhood is None: + # compute neighborhood + radii = cast_to_num_dims(radius, point_cloud) + grid = Grid(point_cloud, radii) + neighborhood = Neighborhood(grid, radii) + + #Compute the sampling. + sampled_points, sampled_batch_ids, sampled_indices = \ + sampling(neighborhood, 1) + + sampled_point_cloud = PointCloud( + points=sampled_points, batch_ids=sampled_batch_ids, + batch_size=neighborhood._point_cloud_sampled._batch_size) + + if return_ids: + sampled_indices = tf.gather(neighborhood._grid._sorted_indices, + sampled_indices) + return sampled_point_cloud, sampled_indices + else: + return sampled_point_cloud + + +def cell_average_sampling(point_cloud, + cell_sizes=None, + grid=None, + name=None): + """ Cell average sampling of a point cloud. + + Note: Either `cell_sizes` or `grid` must be provided. + + Args: + point_cloud: A `PointCloud` instance. + cell_sizes: A `float` or a `float` `Tensor` of shape `[D]`, the cell sizes + for the sampling. + grid: A `Grid` instance. + + Returns: + A `PointCloud` instance. + + Raises: + ValueError: If no radius or grid is given. + + """ + if cell_sizes is None and grid is None: + raise ValueError( + "Missing Argument! Either cell_sizes or grid must be given!") + if grid is None: + # compute grid + cell_sizes = cast_to_num_dims(cell_sizes, point_cloud) + grid = Grid(point_cloud, cell_sizes) + + neighborhood = Neighborhood(grid, cell_sizes) + + #Compute the sampling. + sampled_points, sampled_batch_ids, sampled_indices = \ + sampling(neighborhood, 0) + + sampled_point_cloud = PointCloud( + points=sampled_points, batch_ids=sampled_batch_ids, + batch_size=neighborhood._point_cloud_sampled._batch_size) + + return sampled_point_cloud + + +def sample(neighborhood, sample_mode='poisson', name=None): + """ Sampling for a neighborhood. + + Args: + neighborhood: A `Neighborhood` instance. + sample_mode: A `string`, either `'poisson'`or `'cell average'`. + + Returns: + A `PointCloud` instance, the sampled points. + An `int` `Tensor` of shape `[S]`, the indices of the sampled points, + `None` for cell average sampling. + + """ + sample_mode_value = sample_modes[sample_mode.lower()] + #Compute the sampling. + sampled_points, sampled_batch_ids, sampled_indices = \ + sampling(neighborhood, sample_mode_value) + + #Save the sampled point cloud. + if sample_mode_value == 0: + sampled_indices = tf.gather( + neighborhood._grid._sorted_indices, sampled_indices) + else: + sampled_indices = None + sampled_point_cloud = PointCloud( + points=sampled_points, batch_ids=sampled_batch_ids, + batch_size=neighborhood._point_cloud_sampled._batch_size_numpy) + return sampled_point_cloud, sampled_indices diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/sample_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/sample_test.py new file mode 100644 index 000000000..69284c726 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/sample_test.py @@ -0,0 +1,129 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test point sampling operations""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc import sample +from pylib.pc import Neighborhood +from pylib.pc.tests import utils + + +class SamplingTest(test_case.TestCase): + + @parameterized.parameters( + (100, 8, 0.1, 3), + (100, 8, 0.1, 3), + (100, 16, 0.1, 4) + ) + def test_sampling_poisson_disk_on_random( + self, num_points, batch_size, cell_size, dimension): + cell_sizes = np.float32(np.repeat(cell_size, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + sizes=np.ones(batch_size, dtype=int) * num_points) + point_cloud = PointCloud(points, batch_ids) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes) + sampled_point_cloud, _ = sample(neighborhood, 'poisson') + + sampled_points = sampled_point_cloud._points.numpy() + sampled_batch_ids = sampled_point_cloud._batch_ids.numpy() + + min_dist = 1.0 + for i in range(batch_size): + indices = np.where(sampled_batch_ids == i) + diff = np.expand_dims(sampled_points[indices], 1) - \ + np.expand_dims(sampled_points[indices], 0) + dists = np.linalg.norm(diff, axis=2) + dists = np.sort(dists, axis=1) + min_dist = min(min_dist, np.amin(dists[:, 1])) + + self.assertLess(min_dist, cell_size + 1e-3) + + @parameterized.parameters( + (6, 1), + (100, 5) + ) + def test_sampling_poisson_disk_on_uniform(self, num_points_sqrt, scale): + points = utils._create_uniform_distributed_point_cloud_2D( + num_points_sqrt, scale=scale) + cell_sizes = scale * np.array([2, 2], dtype=np.float32) \ + / num_points_sqrt + batch_ids = np.zeros([len(points)]) + point_cloud = PointCloud(points, batch_ids) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes) + sample_point_cloud, _ = sample(neighborhood, 'poisson') + + sampled_points = sample_point_cloud._points.numpy() + expected_num_pts = num_points_sqrt ** 2 // 2 + self.assertTrue(len(sampled_points) == expected_num_pts) + + @parameterized.parameters( + (100, 2, 0.1, 3), + (100, 8, 0.7, 3), + (50, 2, np.sqrt(3), 3), + ) + def test_sampling_average_on_random( + self, num_points, batch_size, cell_size, dimension): + cell_sizes = np.repeat(cell_size, dimension) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + sizes=np.ones(batch_size, dtype=int) * num_points) + #print(points.shape, batch_ids.shape) + point_cloud = PointCloud(points=points, batch_ids=batch_ids) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes) + sample_point_cloud, _ = sample(neighborhood, 'average') + + sampled_points_tf = sample_point_cloud._points.numpy() + sorted_keys = neighborhood._grid._sorted_keys.numpy() + sorted_points = neighborhood._grid._sorted_points.numpy() + + sampled_points_numpy = [] + cur_point = np.repeat(0.0, dimension) + cur_key = -1 + cur_num_points = 0.0 + for pt_id, cur_key_point in enumerate(sorted_keys): + if cur_key_point != cur_key: + if cur_key != -1: + cur_point /= cur_num_points + sampled_points_numpy.append(cur_point) + cur_key = cur_key_point + cur_point = [0.0, 0.0, 0.0] + cur_num_points = 0.0 + cur_point += sorted_points[pt_id] + cur_num_points += 1.0 + cur_point /= cur_num_points + sampled_points_numpy.append(cur_point) + + equal = True + for point_numpy in sampled_points_numpy: + found = False + for point_tf in sampled_points_tf: + if np.all(np.abs(point_numpy - point_tf) < 0.0001): + found = True + equal = equal and found + self.assertTrue(equal) + + +if __name__ == '__main__': + test_case.main() From 552d6af0182d11a53fd411c62b938a11314778df Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Fri, 12 Mar 2021 12:04:37 +0100 Subject: [PATCH 24/29] fixed failed unit test --- .../point_convolutions/pylib/pc/sampling.py | 2 +- .../pylib/pc/tests/grid_test.py | 4 +-- .../pylib/pc/tests/sample_test.py | 9 ++++-- .../point_convolutions/pylib/pc/utils.py | 29 ------------------- 4 files changed, 9 insertions(+), 35 deletions(-) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/sampling.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/sampling.py index b10cff416..ce8c37e14 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/sampling.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/sampling.py @@ -15,7 +15,7 @@ import tensorflow as tf -from pylib.pc.custom_ops import sampling +from pylib.pc.custom_ops.custom_ops_tf import sampling_tf as sampling from pylib.pc import PointCloud, Neighborhood, Grid from pylib.pc.utils import cast_to_num_dims diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/grid_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/grid_test.py index e7b6211d3..94407d7e3 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/grid_test.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/grid_test.py @@ -27,8 +27,8 @@ class GridTest(test_case.TestCase): @parameterized.parameters( - (10000, 32, 30, 0.1, 2), - (20000, 16, 1, 0.2, 2), + (100, 32, 30, 0.1, 2), + (200, 16, 1, 0.2, 2), (200, 8, 1, np.sqrt(2), 2), (100, 32, 30, 0.1, 3), (200, 16, 1, 0.2, 3), diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/sample_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/sample_test.py index 69284c726..988d545db 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/sample_test.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/sample_test.py @@ -56,7 +56,7 @@ def test_sampling_poisson_disk_on_random( dists = np.sort(dists, axis=1) min_dist = min(min_dist, np.amin(dists[:, 1])) - self.assertLess(min_dist, cell_size + 1e-3) + self.assertLess(min_dist, cell_size + dimension * 1e-3) @parameterized.parameters( (6, 1), @@ -74,8 +74,11 @@ def test_sampling_poisson_disk_on_uniform(self, num_points_sqrt, scale): sample_point_cloud, _ = sample(neighborhood, 'poisson') sampled_points = sample_point_cloud._points.numpy() - expected_num_pts = num_points_sqrt ** 2 // 2 - self.assertTrue(len(sampled_points) == expected_num_pts) + expected_max_num_pts = num_points_sqrt ** 2 // 2 + expected_min_num_pts = np.ceil(num_points_sqrt ** 2 / 3) + self.assertTrue( + len(sampled_points) <= expected_max_num_pts and \ + len(sampled_points) >= expected_min_num_pts) @parameterized.parameters( (100, 2, 0.1, 3), diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py index dc4d83893..7c6c3eab3 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py @@ -42,36 +42,7 @@ def check_valid_point_cloud_input(points, sizes, batch_ids): if points.shape[0] != batch_ids.shape[0]: raise AssertionError('Invalid sizes! Sizes of points and batch_ids are' + ' not equal.') - ''' -def check_valid_point_hierarchy_input(point_cloud, cell_sizes, pool_mode): - """ Checks that inputs to the constructor of class 'PontHierarchy' are valid. - - Args: - point_cloud: A 'PointCloud' instance. - cell_sizes: A `list` of `float` `Tensors`. - pool_mode: An `int`. - - Raises: - TypeError: if input is of invalid type - ValueError: if pool_mode is invalid, or cell_sizes dimension are invalid - or non-positive - - """ - if not isinstance(point_cloud, (PointCloud)): - raise TypeError('Input must be instance of class PointCloud') - if pool_mode not in [0, 1]: - raise ValueError('Unknown pooling mode.') - for curr_cell_sizes in cell_sizes: - if any(curr_cell_sizes <= 0): - raise ValueError('cell size must be positive.') - if not curr_cell_sizes.shape[0] in [1, point_cloud.dimension_]: - raise ValueError( - 'Invalid number of cell sizes for point cloud' +\ - f'dimension. Must be 1 or {point_cloud.dimension_} but is' +\ - f'{curr_cell_sizes.shape[0]}.') - - def _flatten_features(features, point_cloud: PointCloud): """ Converts features of shape `[A1, ..., An, C]` to shape `[N, C]`. From dccca3929a08fe894cd88ec028ce220c23a8b52c Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Mon, 3 May 2021 13:58:10 +0200 Subject: [PATCH 25/29] added point hierarchy calss --- .../pylib/pc/PointHierarchy.py | 187 ++++++++++++++++++ .../point_convolutions/pylib/pc/__init__.py | 4 +- 2 files changed, 189 insertions(+), 2 deletions(-) create mode 100755 tensorflow_graphics/projects/point_convolutions/pylib/pc/PointHierarchy.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/PointHierarchy.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/PointHierarchy.py new file mode 100755 index 000000000..0e1d90631 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/PointHierarchy.py @@ -0,0 +1,187 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Class to represent a point cloud hierarchy.""" + +import numpy as np +import tensorflow as tf + + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc import Neighborhood +from pylib.pc import sample +from pylib.pc.utils import cast_to_num_dims + + +class PointHierarchy: + """ A hierarchy of sampled point clouds. + + Args: + point_cloud: A `PointCloud` instance.. + cell_sizes: A list of `floats` or `float` `Tensors` of shape `[D]`, + the cell sizes for the sampling. The length of the list defines + the number of samplings. + sample_mode: A `string`, either `'poisson'`or `'cell average'`. + + """ + + def __init__(self, + point_cloud: PointCloud, + cell_sizes, + sample_mode='poisson', + name=None): + #Initialize the attributes. + self._aabb = point_cloud.get_AABB() + self._point_clouds = [point_cloud] + self._cell_sizes = [] + self._neighborhoods = [] + + self._dimension = point_cloud._dimension + self._batch_shape = point_cloud._batch_shape + + #Create the different sampling operations. + cur_point_cloud = point_cloud + for sample_iter, cur_cell_sizes in enumerate(cell_sizes): + cur_cell_sizes = tf.convert_to_tensor( + value=cur_cell_sizes, dtype=tf.float32) + + # Check if the cell size is defined for all the dimensions. + # If not, the last cell size value is tiled until all the dimensions + # have a value. + cur_num_dims = tf.gather(cur_cell_sizes.shape, 0) + cur_cell_sizes = tf.cond( + cur_num_dims < self._dimension, + lambda: tf.concat((cur_cell_sizes, + tf.tile(tf.gather(cur_cell_sizes, + [tf.rank(cur_cell_sizes) - 1]), + [self._dimension - cur_num_dims])), + axis=0), + lambda: cur_cell_sizes) + tf.assert_greater( + self._dimension + 1, + cur_num_dims, + f'Too many dimensions in cell sizes {cur_num_dims} ' + \ + f'instead of max. {self._dimension}') + # old version, does not run in graph mode + # if cur_num_dims < self._dimension: + # cur_cell_sizes = tf.concat((cur_cell_sizes, + # tf.tile(tf.gather(cur_cell_sizes, + # [tf.rank(cur_cell_sizes) - 1]), + # [self._dimension - cur_num_dims])), + # axis=0) + # if cur_num_dims > self._dimension: + # raise ValueError( + # f'Too many dimensions in cell sizes {cur_num_dims} ' + \ + # f'instead of max. {self._dimension}') + + self._cell_sizes.append(cur_cell_sizes) + + #Create the sampling operation. + cur_grid = Grid(cur_point_cloud, cur_cell_sizes, self._aabb) + cur_neighborhood = Neighborhood(cur_grid, cur_cell_sizes) + cur_point_cloud, _ = sample(cur_neighborhood, sample_mode) + + self._neighborhoods.append(cur_neighborhood) + cur_point_cloud.set_batch_shape(self._batch_shape) + self._point_clouds.append(cur_point_cloud) + + def get_points(self, batch_id=None, max_num_points=None, name=None): + """ Returns the points. + + Note: + In the following, A1 to An are optional batch dimensions. + + If called withoud specifying 'id' returns the points in padded format + `[A1, ..., An, V, D]`. + + Args: + batch_id: An `int`, identifier of point cloud in the batch, if `None` + returns all points. + + Return: + A list of `float` `Tensors` of shape + `[N_i, D]`, if 'batch_id' was given + or + `[A1, ..., An, V_i, D]`, if no 'batch_id' was given. + """ + points = [] + for point_cloud in self._point_clouds: + points.append(point_cloud.get_points(batch_id)) + return points + + def get_sizes(self, name=None): + """ Returns the sizes of the point clouds in the point hierarchy. + + Note: + In the following, A1 to An are optional batch dimensions. + + Returns: + A `list` of `Tensors` of shape '`[A1, .., An]`' + + """ + + sizes = [] + for point_cloud in self._point_clouds: + sizes.append(point_cloud.get_sizes()) + return sizes + + def set_batch_shape(self, batch_shape, name=None): + """ Function to change the batch shape. + + Use this to set a batch shape instead of using 'self._batch_shape' + to also change dependent variables. + + Note: + In the following, A1 to An are optional batch dimensions. + + Args: + batch_shape: An 1D `int` `Tensor` `[A1, ..., An]`. + + Raises: + ValueError: if shape does not sum up to batch size. + + """ + for point_cloud in self._point_clouds: + point_cloud.set_batch_shape(batch_shape) + + def get_neighborhood(self, i=None, transposed=False): + """ Returns the neighborhood between level `i` and `i+1` of the hierarchy. + If called without argument returns a list of all neighborhoods. + + Args: + i: An `int`, can be negative but must be in range + `[-num_levels, num_levels-1]`. + transposed: A `bool`, if `True` returns the neighborhood between + level `i+1` and `i`. + + Returs: + A `Neighborhood` instance or a `list` of `Neighborhood` instances. + + """ + if i is None: + if transposed: + return [nb.transposed() for nb in self._neighborhoods] + else: + return self._neighborhoods + else: + if transposed: + return self._neighborhoods[i].transpose() + else: + return self._neighborhoods[i] + + def __getitem__(self, index): + return self._point_clouds[index] + + def __len__(self): + return len(self._point_clouds) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py index 66c27be40..5960b5c0d 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py @@ -20,9 +20,9 @@ from .Neighborhood import KDEMode from .sampling import poisson_disk_sampling, cell_average_sampling from .sampling import sample -''' -from .PointHierarchy import PointHierarchy +from .PointHierarchy import PointHierarchy +''' from pylib.pc import layers ''' from pylib.pc import custom_ops From 36a298a5cfcb94d3aa56598d9c0d1db18d4ac11a Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Mon, 3 May 2021 14:12:58 +0200 Subject: [PATCH 26/29] added 1x1conv and pooling layers --- .../pylib/pc/layers/Conv1x1.py | 101 +++++++ .../pylib/pc/layers/Pooling.py | 246 ++++++++++++++++++ .../pylib/pc/layers/__init__.py | 27 ++ .../pylib/pc/layers/tests/__init__.py | 0 .../pc/layers/tests/pooling_layers_test.py | 139 ++++++++++ .../pylib/pc/layers/utils.py | 245 +++++++++++++++++ .../point_convolutions/pylib/pc/utils.py | 4 +- 7 files changed, 760 insertions(+), 2 deletions(-) create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/Conv1x1.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/Pooling.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/__init__.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/pooling_layers_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/utils.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/Conv1x1.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/Conv1x1.py new file mode 100644 index 000000000..3bfd2997c --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/Conv1x1.py @@ -0,0 +1,101 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Class to represent point cloud 1x1 convolution""" + +import tensorflow as tf +from pylib.pc.utils import _flatten_features +from pylib.pc.layers.utils import _format_output + +from pylib.pc import PointCloud + + +class Conv1x1(tf.Module): + """ A 1x1 convolution on the point features. This op reshapes the arguments + to pass them to `tf.keras.layers.Conv1D` to perform the equivalent + convolution operation. + + Note: This uses less memory than a point cloud convolution layer with a 1x1 + neighborhood, but might be slower for large feature dimensions. + + Args: + num_features_in: An `int` `C_in`, the number of input features. + num_features_out: An `int` `C_out`, the number of output features. + name: An `string` with the name of the module. + """ + + def __init__(self, num_features_in, num_features_out, name=None): + + super().__init__(name=name) + + if not(name is None): + weigths_name = name + "/weights" + bias_name = name + "/bias" + else: + weigths_name = "Conv1x1/weights" + bias_name = "Conv1x1/bias" + + std_dev = tf.math.sqrt(2.0 / float(num_features_in)) + weights_init_obj = tf.initializers.TruncatedNormal(stddev=std_dev) + self._weights_tf = tf.Variable( + weights_init_obj( + shape=[num_features_in, num_features_out], + dtype=tf.float32), + trainable=True, + name=weigths_name) + + bias_init_obj = tf.initializers.zeros() + self._bias_tf = tf.Variable( + bias_init_obj( + shape=[1, num_features_out], + dtype=tf.float32), + trainable=True, + name=bias_name) + + def __call__(self, + features, + point_cloud, + return_sorted=False, + return_padded=False): + """ Computes the 1x1 convolution on a point cloud. + + Note: + In the following, `A1` to `An` are optional batch dimensions. + `C_in` is the number of input features. + `C_out` is the number of output features. + + Args: + features: A `float` `Tensor` of shape `[N_in, C_in]` or + `[A1, ..., An, V, C_in]`. + point_cloud: A 'PointCloud' instance, on which the features are + defined. + return_sorted: A `boolean`, if `True` the output tensor is sorted + according to the batch_ids. (optional) + return_padded: A `bool`, if 'True' the output tensor is sorted and + zero padded. (optional) + + Returns: + A `float` `Tensor` of shape + `[N_out, C_out]`, if `return_padded` is `False` + or + `[A1, ..., An, V_out, C_out]`, if `return_padded` is `True`. + + """ + features = tf.cast(tf.convert_to_tensor(value=features), + dtype=tf.float32) + features = _flatten_features(features, point_cloud) + features = tf.matmul(features, self._weights_tf) + self._bias_tf + return _format_output(features, + point_cloud, + return_sorted, + return_padded) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/Pooling.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/Pooling.py new file mode 100644 index 000000000..77fcb8e98 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/Pooling.py @@ -0,0 +1,246 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Classes for point cloud spatial pooling operations""" + +import tensorflow as tf +from pylib.pc.utils import _flatten_features +from pylib.pc.layers.utils import _format_output + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc import Neighborhood + + +class GlobalMaxPooling: + """ Global max pooling on a point cloud. + """ + + def __call__(self, + features, + point_cloud: PointCloud, + return_padded=False, + name=None): + """ Performs a global max pooling on a point cloud. + + Note: + In the following, `A1` to `An` are optional batch dimensions. + + Args: + features: A tensor of shape `[N,C]` or `[A1,...,An,V,C]`. + point_cloud: A `PointCloud` instance. + return_padded: A `bool`, if `True` reshapes the output to match the + batch shape of `point_cloud`. + + Returns: + A tensor of same type as `features` and of shape + `[B, C]`, if not `return_padded` + or + `[A1, ..., An, C]`, if `return_padded` + + """ + features = tf.convert_to_tensor(value=features) + features = _flatten_features(features, point_cloud) + features = tf.math.unsorted_segment_max( + features, + segment_ids=point_cloud._batch_ids, + num_segments=point_cloud._batch_size) + if return_padded: + shape = tf.concat((point_cloud._batch_shape, [-1]), axis=0) + features = tf.reshape(features, shape) + return features + + +class GlobalAveragePooling: + """ Global average pooling on a point cloud. + """ + + def __call__(self, + features, + point_cloud: PointCloud, + return_padded=False, + name=None): + """ Performs a global average pooling on a point cloud + + Note: + In the following, `A1` to `An` are optional batch dimensions. + + Args: + features: A tensor of shape `[N, C]` or `[A1, ..., An, V, C]`. + point_cloud: A `PointCloud` instance. + return_padded: A `bool`, if `True` reshapes the output to match the + batch shape of `point_cloud`. + + Returns: + A tensor of same type as `features` and of shape + `[B, C]`, if not `return_padded` + or + `[A1 ,..., An, C]`, if `return_padded` + + """ + features = tf.convert_to_tensor(value=features) + features = _flatten_features(features, point_cloud) + features = tf.math.unsorted_segment_mean( + features, + segment_ids=point_cloud._batch_ids, + num_segments=point_cloud._batch_size) + if return_padded: + shape = tf.concat((point_cloud._batch_shape, [-1]), axis=0) + features = tf.reshape(features, shape) + return features + + +class _LocalPointPooling: + """ Local point pooling between two point clouds. + """ + + def __call__(self, + pool_op, + features, + point_cloud_in: PointCloud, + point_cloud_out: PointCloud, + pooling_radius, + return_sorted=False, + return_padded=False, + name=None, + default_name="custom pooling"): + """ Computes a local pooling between two point clouds specified by `pool_op`. + + Note: + In the following, `A1` to `An` are optional batch dimensions. + + Args: + pool_op: A function of type `tf.math.unsorted_segmented_*`. + features: A `float` `Tensor` of shape `[N_in, C]` or + `[A1, ..., An, V_in, C]`. + point_cloud_in: A `PointCloud` instance on which the features are + defined. + point_cloud_out: A `PointCloud` instance, on which the output features + are defined. + pooling_radius: A `float` or a `float` `Tensor` of shape `[D]`. + return_sorted: A `bool`, if 'True' the output tensor is sorted + according to the sorted batch ids of `point_cloud_out`. + return_padded: A `bool`, if 'True' the output tensor is sorted and + zero padded. + + Returns: + A `float` `Tensor` of shape + `[N_out, C]`, if `return_padded` is `False` + or + `[A1, ..., An, V_out, C]`, if `return_padded` is `True`. + + """ + features = tf.convert_to_tensor(value=features) + features = _flatten_features(features, point_cloud_in) + pooling_radius = tf.convert_to_tensor( + value=pooling_radius, dtype=tf.float32) + if pooling_radius.shape[0] == 1: + pooling_radius = tf.repeat(pooling_radius, point_cloud_in._dimension) + + # Compute the grid. + grid_in = Grid(point_cloud_in, pooling_radius) + + # Compute the neighborhood keys. + neigh = Neighborhood(grid_in, pooling_radius, point_cloud_out) + features_on_neighbors = tf.gather( + features, neigh._original_neigh_ids[:, 0]) + + # Pool the features in the neighborhoods + features_out = pool_op( + data=features_on_neighbors, + segment_ids=neigh._original_neigh_ids[:, 1], + num_segments=tf.shape(point_cloud_out._points)[0]) + return _format_output(features_out, + point_cloud_out, + return_sorted, + return_padded) + + +class MaxPooling(_LocalPointPooling): + """ Local max pooling between two point clouds. + """ + + def __call__(self, + features, + point_cloud_in: PointCloud, + point_cloud_out: PointCloud, + pooling_radius, + return_sorted=False, + return_padded=False, + name=None): + """ Computes a local max pooling between two point clouds. + + Args: + features: A `float` `Tensor` of shape `[N_in, C]` or + `[A1, ..., An, V_in, C]`. + point_cloud_in: A `PointCloud` instance on which the features are + defined. + point_cloud_out: A `PointCloud` instance, on which the output features + are defined. + pooling_radius: A `float` or a `float` `Tensor` of shape [D]. + return_sorted: A `bool`, if 'True' the output tensor is sorted + according to the sorted batch ids of `point_cloud_out`. + return_padded: A `bool`, if 'True' the output tensor is sorted and + zero padded. + + Returns: + A `float` `Tensor` of shape + `[N_out, C]`, if `return_padded` is `False` + or + `[A1, ..., An, V_out, C]`, if `return_padded` is `True`. + + """ + return super(MaxPooling, self).__call__( + tf.math.unsorted_segment_max, + features, point_cloud_in, point_cloud_out, pooling_radius, + return_sorted, return_padded, name, default_name="max pooling") + + +class AveragePooling(_LocalPointPooling): + """ Local average pooling between two point clouds. + """ + + def __call__(self, + features, + point_cloud_in: PointCloud, + point_cloud_out: PointCloud, + pooling_radius, + return_sorted=False, + return_padded=False, + name=None): + """ Computes a local average pooling between two point clouds. + + Args: + features: A `float` `Tensor` of shape `[N_in, C]` or + `[A1, ..., An, V_in, C]`. + point_cloud_in: A `PointCloud` instance on which the features are + defined. + point_cloud_out: A `PointCloud` instance, on which the output features + are defined. + pooling_radius: A `float` or a `float` `Tensor` of shape `[D]`. + return_sorted: A boolean, if 'True' the output tensor is sorted + according to the sorted batch ids of `point_cloud_out`. + return_padded: A `bool`, if 'True' the output tensor is sorted and + zero padded. + + Returns: + A `float` `Tensor` of shape + `[N_out, C]`, if `return_padded` is `False` + or + `[A1, ..., An, V_out, C]`, if `return_padded` is `True`. + + """ + return super(AveragePooling, self).__call__( + tf.math.unsorted_segment_mean, + features, point_cloud_in, point_cloud_out, pooling_radius, + return_sorted, return_padded, name, default_name="average pooling") diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py new file mode 100644 index 000000000..7f23150e0 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Point cloud network layers""" + +from .Pooling import GlobalAveragePooling, GlobalMaxPooling +from .Pooling import MaxPooling, AveragePooling +from .Conv1x1 import Conv1x1 +''' +from .MCConv import MCConv +from .KPConv import KPConv +from .PointConv import PointConv +from .network_blocks import PointResNet, \ + PointResNetBottleNeck, PointResNetSpatialBottleNeck +from .utils import spherical_kernel_points, \ + cube_kernel_points +''' \ No newline at end of file diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/pooling_layers_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/pooling_layers_test.py new file mode 100644 index 000000000..5ed7993a1 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/pooling_layers_test.py @@ -0,0 +1,139 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test pooling layers""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud, Grid, Neighborhood, AABB +from pylib.pc.tests import utils +from pylib.pc.layers import GlobalMaxPooling, GlobalAveragePooling +from pylib.pc.layers import MaxPooling, AveragePooling + + +class PoolingTest(test_case.TestCase): + + @parameterized.parameters( + (1000, 32, 2), + (2000, 16, 2), + (4000, 8, 2), + (1000, 32, 3), + (2000, 16, 3), + (4000, 8, 3), + (4000, 2, 3), + (1000, 32, 4), + (2000, 16, 4), + (4000, 8, 4) + ) + def test_global_pooling(self, num_points, batch_size, dimension): + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + equal_sized_batches=True) + features = np.random.rand(batch_size, num_points, dimension) + point_cloud = PointCloud(points, batch_ids) + + # max pooling + with self.subTest(name='max_pooling'): + PoolLayer = GlobalMaxPooling() + pool_tf = PoolLayer(features, point_cloud) + pool_numpy = np.empty([batch_size, dimension]) + features = features.reshape([-1, dimension]) + for i in range(batch_size): + pool_numpy[i] = np.max(features[batch_ids == i], axis=0) + self.assertAllClose(pool_numpy, pool_tf) + point_cloud.set_batch_shape([batch_size // 2, 2]) + padded = PoolLayer(features, point_cloud, return_padded=True) + self.assertTrue(padded.shape.rank > 2) + + # average pooling + with self.subTest(name='average_pooling'): + PoolLayer = GlobalAveragePooling() + pool_tf = PoolLayer(features, point_cloud) + pool_numpy = np.empty([batch_size, dimension]) + for i in range(batch_size): + pool_numpy[i] = np.mean(features[batch_ids == i], axis=0) + self.assertAllClose(pool_numpy, pool_tf) + point_cloud.set_batch_shape([batch_size // 2, 2]) + padded = PoolLayer(features, point_cloud, return_padded=True) + self.assertTrue(padded.shape.rank > 2) + + @parameterized.parameters( + (2000, 200, 16, 0.7, 2), + (4000, 400, 8, np.sqrt(2), 2), + (2000, 200, 16, 0.7, 3), + (4000, 400, 8, np.sqrt(3), 3), + (4000, 100, 2, np.sqrt(3), 3), + (2000, 200, 16, 0.7, 4), + (4000, 400, 8, np.sqrt(4), 4) + ) + def test_local_pooling(self, + num_points, + num_samples, + batch_size, + radius, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, dimension) + point_cloud = PointCloud(points, batch_ids) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + neighbor_ids = neighborhood._original_neigh_ids.numpy() + features_on_neighbors = features[neighbor_ids[:, 0]] + + #max pooling + with self.subTest(name='max_pooling_to_sampled'): + PoolLayer = MaxPooling() + pool_tf = PoolLayer( + features, point_cloud, point_cloud_samples, cell_sizes) + + pool_numpy = np.empty([num_samples, dimension]) + for i in range(num_samples): + pool_numpy[i] = np.max( + features_on_neighbors[neighbor_ids[:, 1] == i], axis=0) + + self.assertAllClose(pool_tf, pool_numpy) + point_cloud.set_batch_shape([batch_size // 2, 2]) + padded = PoolLayer( + features, point_cloud, point_cloud_samples, cell_sizes, + return_padded=True) + self.assertTrue(padded.shape.rank > 2) + + #max pooling + with self.subTest(name='average_pooling_to_sampled'): + PoolLayer = AveragePooling() + pool_tf = PoolLayer( + features, point_cloud, point_cloud_samples, cell_sizes) + + pool_numpy = np.empty([num_samples, dimension]) + for i in range(num_samples): + pool_numpy[i] = np.mean( + features_on_neighbors[neighbor_ids[:, 1] == i], axis=0) + + self.assertAllClose(pool_tf, pool_numpy) + + +if __name__ == '__main___': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/utils.py new file mode 100644 index 000000000..7c2513935 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/utils.py @@ -0,0 +1,245 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utility methods for point cloud layers.""" + +import tensorflow as tf +import numpy as np + +tf_pi = tf.convert_to_tensor(np.pi) + + +def _format_output(features, point_cloud, return_sorted, return_padded): + """ Method to format and sort the output of a point cloud convolution layer. + + Note: + In the following, `A1` to `An` are optional batch dimensions. + + Args: + features: A `float` `Tensor` of shape `[N, C]`. + point_cloud: A `PointCloud` instance, on which the `feautres` are defined. + return_sorted: A `bool`, if 'True' the output tensor is sorted + according to the sorted batch ids of `point_cloud`. + return_padded: A `bool`, if 'True' the output tensor is sorted and + zero padded. + + Returns: + A `float` `Tensor` of shape + `[N, C]`, if `return_padded` is `False` + or + `[A1, ..., An, V, C]`, if `return_padded` is `True`. + + """ + + if return_padded: + unflatten = point_cloud.get_unflatten() + features = unflatten(features) + elif return_sorted: + features = tf.gather(features, point_cloud._sorted_indices_batch) + return features + + +def random_rotation(points, name=None): + """ Method to rotate 3D points randomly. + + Args: + points: A `float` `Tensor` of shape `[N, 3]`. + + Returns: + A `float` `Tensor` of the same shape as `points`. + + """ + points = tf.convert_to_tensor(value=points) + angles = tf.random.uniform([3], 0, 2 * np.pi) + sine = tf.math.sin(angles) + cosine = tf.math.cos(angles) + Rx = tf.stack(([1.0, 0.0, 0.0], + [0.0, cosine[0], -sine[0]], + [0.0, sine[0], cosine[0]]), axis=1) + Ry = tf.stack(([cosine[1], 0, sine[1]], + [0.0, 1.0, 0.0], + [-sine[1], 0.0, cosine[1]]), axis=1) + Rz = tf.stack(([cosine[2], -sine[2], 0.0], + [sine[2], cosine[2], 0.0], + [0.0, 0.0, 1.0]), axis=1) + R = tf.matmul(tf.matmul(Rx, Ry), Rz) + return tf.matmul(points, R) + + +def _hexagon(scale, z_shift): + """ Numpy hexagon ponts in the xy-plane with diameter `scale` + at z-position `z_shift`. + + Args: + scale: A `float`. + z_shift: A `float`. + + Returns: + A `np.array` of shape `[6, 3]`. + + """ + phi = np.sqrt(3) / 2 + points = [[0, 1, 0], + [0, -1, 0], + [0.5, phi, 0], + [0.5, -phi, 0], + [-0.5, phi, 0], + [0.5, -phi, 0]] + points = np.array(points) * scale + points[:, 2] += z_shift + return points + + +def _pentagon(scale, z_shift): + """ Numpy pentagon ponts in the xy-plane with diameter `scale` + at z-position `z_shift`. + + Args: + scale: A `float`. + z_shift: A `float`. + + Returns: + A `np.array` of shape `[5, 3]`. + + """ + c1 = (np.sqrt(5) - 1) / 4 + c2 = (np.sqrt(5) + 1) / 4 + s1 = np.sqrt(10 + 2 * np.sqrt(5)) / 4 + s2 = np.sqrt(10 - 2 * np.sqrt(5)) / 4 + points = [[1, 0, 0], + [c1, s1, 0], + [c1, -s1, 0], + [-c2, s2, 0], + [-c2, -s2, 0]] + points = np.array(points) * scale + points[:, 2] += z_shift + return points + + +def square(scale, z_shift): + """ Numpy square ponts in the xy-plane with diameter `scale` + at z-position `z_shift`. + + Args: + scale: A `float`. + z_shift: A `float`. + + Returns: + A `np.array` of shape `[4, 3]`. + + """ + points = [[1, 0, 0], + [0, 1, 0], + [-1, 0, 0], + [0, -1, 0]] + points = np.array(points) * scale + points[:, 2] += z_shift + return points + + +def spherical_kernel_points(num_points, rotate=True, name=None): + """ Kernel points in a unit sphere. + + The points are located at positions as described in Appendix B of + [KPConv: Flexible and Deformable Convolution for Point Clouds. Thomas et + al., + 2019](https://arxiv.org/abs/1904.08889). + + Args: + num_points: An `int`, the number of kernel points, must be in + `[5, 7, 13, 15, 18]`. + rotate: A 'bool', if `True` a random rotation is applied to the points. + + Returns: + A `float` `Tensor` of shape `[num_points, 3]`. + + """ + + if num_points not in [5, 7, 13, 15, 18]: + raise ValueError('KPConv currently only supports kernel sizes' + \ + ' [5, 7, 13, 15, 18]') + if num_points == 5: + # Tetrahedron + points = tf.Variable([[0, 0, 0], + [0, 0, 1], + [tf.sqrt(8 / 9), 0, -1 / 3], + [- tf.sqrt(2 / 9), tf.sqrt(2 / 3), - 1 / 3], + [-tf.sqrt(2 / 9), - tf.sqrt(2 / 3), -1 / 3]], + dtype=tf.float32) + elif num_points == 7: + # Octahedron + points = tf.Variable([[0, 0, 0], + [1, 0, 0], + [-1, 0, 0], + [0, 1, 0], + [0, -1, 0], + [0, 0, 1], + [0, 0, -1]], dtype=tf.float32) + elif num_points == 13: + # Icosahedron + phi = (1 + tf.sqrt(5)) / 2 + points = tf.Variable([[0, 0, 0], + [0, 1, phi], + [0, 1, -phi], + [0, -1, phi], + [0, -1, -phi], + [1, phi, 0], + [1, -phi, 0], + [-1, phi, 0], + [-1, -phi, 0], + [phi, 0, 1], + [-phi, 0, 1], + [phi, 0, -1], + [-phi, 0, -1]], dtype=tf.float32) + elif num_points == 15: + hex1 = _hexagon(0.5, np.sqrt(3) / 2) + hex2 = _hexagon(-0.5, np.sqrt(3) / 2)[:,[1, 0, 2]] # rotated 90 deg in xy + points = np.concatenate(([[0, 0, 0], [0, 0, 1], [0, 0, -1]], hex1, hex2), + axis=0) + points = tf.Variable(points, dtype=tf.float32) + elif num_points == 18: + penta1 = _pentagon(1 / np.sqrt(2), 0.5) + penta2 = -_pentagon(1.0, 0.0) # flipped in xy + penta3 = _pentagon(-1 / np.sqrt(2), 0.5) + points = np.concatenate(([[0, 0, 0], [0, 0, 1], [0, 0, -1]], + penta1, penta2, penta3), + axis=0) + points = tf.Variable(points, dtype=tf.float32) + if rotate: + points = random_rotation(points) + return points + + +def cube_kernel_points(cbrt_num_points, name): + """ Regularily distributed points in a unit cube. + + Args: + cbrt_num_points: An `int`, the cubic root of the number of points. + + Returns: + A `float` `Tensor` of shape `[cbrt_num_points^3, 3]`. + + """ + x = np.linspace(0, 1, cbrt_num_points) + x, y, z = np.meshgrid(x, x, x) + points = np.stack((x.flatten(), y.flatten(), z.flatten()), axis=1) + return tf.Variable(points, dtype=tf.float32) + + +def _identity(features, *args, **kwargs): + """ Simple identity layer, to be used as placeholder. + + Used to replace projection shortcuts, if not desired. + + """ + return features diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py index 7c6c3eab3..9c1d1abf3 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/utils.py @@ -42,7 +42,7 @@ def check_valid_point_cloud_input(points, sizes, batch_ids): if points.shape[0] != batch_ids.shape[0]: raise AssertionError('Invalid sizes! Sizes of points and batch_ids are' + ' not equal.') -''' + def _flatten_features(features, point_cloud: PointCloud): """ Converts features of shape `[A1, ..., An, C]` to shape `[N, C]`. @@ -63,7 +63,7 @@ def _flatten_features(features, point_cloud: PointCloud): tf.assert_equal(tf.shape(features)[0], tf.shape(point_cloud._points)[0]) tf.assert_equal(tf.rank(features), 2) return features -''' + def cast_to_num_dims(values, num_dims, dtype=tf.float32): """ Converts an input to the specified `dtype` and repeats it `num_dims` From eb1b5d56a39b9518b1252e622b8602e0df528217 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Mon, 3 May 2021 14:41:18 +0200 Subject: [PATCH 27/29] added unit tests --- .../pylib/pc/layers/tests/conv1x1_test.py | 47 +++++++++++++++++++ .../pylib/pc/tests/point_cloud_test.py | 28 ++++++++++- 2 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/conv1x1_test.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/conv1x1_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/conv1x1_test.py new file mode 100644 index 000000000..6148348dc --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/conv1x1_test.py @@ -0,0 +1,47 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test pooling layers""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud, Grid, Neighborhood, AABB +from pylib.pc.tests import utils +from pylib.pc.layers import Conv1x1 + + +class Conv1x1Test(test_case.TestCase): + + @parameterized.parameters( + (1000, 4, [3, 3], 3), + (1000, 4, [3, 1], 3), + (1000, 4, [1, 3], 3), + ) + def test_conv1x1(self, num_points, batch_size, feature_sizes, dimension): + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points * batch_size, dimension=dimension, + equal_sized_batches=True) + features = np.random.rand(batch_size, num_points, feature_sizes[0]) + point_cloud = PointCloud(points, batch_ids) + + conv_layer = Conv1x1(feature_sizes[0], feature_sizes[1]) + result = conv_layer(features, point_cloud) + self.assertTrue(result.shape[-1] == feature_sizes[1]) + + +if __name__ == '__main___': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/point_cloud_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/point_cloud_test.py index e71d27ff3..fd7bb62c0 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/point_cloud_test.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/tests/point_cloud_test.py @@ -19,7 +19,7 @@ from absl.testing import parameterized from tensorflow_graphics.util import test_case -from pylib.pc import PointCloud +from pylib.pc import PointCloud, PointHierarchy from pylib.pc.tests import utils @@ -86,6 +86,13 @@ def test_construction_methods(self, max_num_points, batch_size, batch_shape): self.assertAllEqual(points_from_ids, points_from_sizes) self.assertAllEqual(points_from_sizes, points_from_padded) + points_from_padded = pc_from_padded.get_points(0) + points_from_ids = pc_from_ids.get_points(0) + points_from_sizes = pc_from_sizes.get_points(0) + self.assertAllEqual(points_from_padded, points_from_ids) + self.assertAllEqual(points_from_ids, points_from_sizes) + self.assertAllEqual(points_from_sizes, points_from_padded) + @parameterized.parameters( (1000, ['Invalid input! Point tensor is of dimension 1 \ @@ -104,6 +111,25 @@ def test_exceptions_raised_at_construction(self, num_points, msgs): with self.assertRaisesRegexp(AssertionError, msgs[2]): _ = PointCloud(points, batch_ids[1:]) + @parameterized.parameters( + ([32], 100, 3), + ([5, 2], 100, 2), + ([2, 3, 4], 100, 4) + ) + def test_point_hierarchy(self, batch_shape, num_points, dimension): + batch_size = np.prod(batch_shape) + points, sizes = utils._create_random_point_cloud_padded( + num_points, batch_shape, dimension=dimension) + point_cloud = PointCloud(points, sizes=sizes) + point_hierarchy = PointHierarchy(point_cloud, [[0.1]]) + points_retrieved = point_hierarchy.get_points(0) + self.assertTrue(len(points_retrieved) == 2) + sizes_retrieved = point_hierarchy.get_sizes() + self.assertTrue(len(sizes_retrieved) == 2) + self.assertAllEqual(sizes_retrieved[0], sizes) + nbhs = point_hierarchy.get_neighborhood() + self.assertTrue(len(nbhs) == 1) + if __name__ == '__main__': test_case.main() From 479e8cd03f84c462445a822be2b9a18dddefd0b6 Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 20 May 2021 15:44:18 +0200 Subject: [PATCH 28/29] MCConv initial --- .../point_convolutions/pylib/pc/__init__.py | 4 +- .../pylib/pc/custom_ops/__init__.py | 4 +- .../pylib/pc/custom_ops/custom_ops_tf.py | 3 +- .../pc/custom_ops/tests/basis_proj_tf_test.py | 177 +++++++++++ .../pylib/pc/layers/MCConv.py | 278 ++++++++++++++++++ .../pylib/pc/layers/__init__.py | 5 +- .../pylib/pc/layers/tests/basis_proj_test.py | 180 ++++++++++++ .../pc/layers/tests/monte_carlo_conv_test.py | 223 ++++++++++++++ .../pylib/pc/layers/utils.py | 9 +- 9 files changed, 867 insertions(+), 16 deletions(-) create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/basis_proj_tf_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/MCConv.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/basis_proj_test.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/monte_carlo_conv_test.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py index 5960b5c0d..c499758e3 100755 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/__init__.py @@ -22,7 +22,7 @@ from .sampling import sample from .PointHierarchy import PointHierarchy -''' + from pylib.pc import layers -''' + from pylib.pc import custom_ops diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py index 17be8cb2e..cd2aee431 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/__init__.py @@ -24,14 +24,14 @@ CUSTOM = 0 if CUSTOM: - # from .custom_ops_wrapper import basis_proj + from .custom_ops_wrapper import basis_proj from .custom_ops_wrapper import build_grid_ds from .custom_ops_wrapper import compute_keys from .custom_ops_wrapper import compute_pdf from .custom_ops_wrapper import find_neighbors from .custom_ops_wrapper import sampling else: - # from .custom_ops_tf import basis_proj_tf as basis_proj + from .custom_ops_tf import basis_proj_tf as basis_proj from .custom_ops_tf import build_grid_ds_tf as build_grid_ds from .custom_ops_tf import compute_keys_tf as compute_keys from .custom_ops_tf import compute_pdf_tf as compute_pdf diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py index d992fa45e..e7032fedf 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/custom_ops_tf.py @@ -362,7 +362,7 @@ def compute_pdf_tf(neighborhood, bandwidth, mode, name=None): tf.reduce_prod(bandwidth) return pdf -''' + def basis_proj_tf(neigh_basis, features, neighborhood, name=None): """ Method to aggregate the features*basis for different neighborhoods. @@ -388,4 +388,3 @@ def basis_proj_tf(neigh_basis, features, neighborhood, name=None): weighted_latent_per_center = tf.math.unsorted_segment_sum( weighted_features_per_nb, neighborhood._neighbors[:, 1], num_nbh) return weighted_latent_per_center -''' \ No newline at end of file diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/basis_proj_tf_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/basis_proj_tf_test.py new file mode 100644 index 000000000..93c4cda9b --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/custom_ops/tests/basis_proj_tf_test.py @@ -0,0 +1,177 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test basis projection tensorflow implementation""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud, Grid, Neighborhood, AABB +from pylib.pc.tests import utils +from pylib.pc.layers import MCConv +from pylib.pc.custom_ops.custom_ops_tf import basis_proj_tf + + +class BasisProjTFTest(test_case.TestCase): + + @parameterized.parameters( + (2000, 200, [3, 3], 16, 0.7, 4, 2), + (2000, 200, [1, 3], 16, 0.7, 8, 3), + (4000, 400, [3, 3], 8, 0.7, 8, 3), + (2000, 200, [3, 3], 16, 0.7, 8, 4), + ) + def test_basis_proj(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, num_features[0]) + point_cloud = PointCloud(points, batch_ids) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + nb_ids = neighborhood._original_neigh_ids + # tf + conv_layer = MCConv( + num_features[0], num_features[1], dimension, 1, [hidden_size]) + + basis_weights_tf = tf.reshape(conv_layer._weights_tf[0], + [dimension, hidden_size]) + basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size]) + + neigh_point_coords = points[nb_ids[:, 0]] + center_point_coords = point_samples[nb_ids[:, 1]] + kernel_input = (neigh_point_coords - center_point_coords) / radius + basis_neighs = \ + tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) + \ + basis_biases_tf + basis_neighs = tf.nn.relu(basis_neighs) + + weighted_latent_per_sample_tf = basis_proj_tf(basis_neighs, + features, + neighborhood) + + # numpy + neighbor_ids = neighborhood._original_neigh_ids.numpy() + nb_ranges = neighborhood._samples_neigh_ranges.numpy() + # extract variables + hidden_weights = basis_weights_tf.numpy() + hidden_biases = basis_biases_tf.numpy() + + features_on_neighbors = features[neighbor_ids[:, 0]] + # compute first layer of kernel MLP + point_diff = (points[neighbor_ids[:, 0]] -\ + point_samples[neighbor_ids[:, 1]])\ + / np.expand_dims(cell_sizes, 0) + + latent_per_nb = np.dot(point_diff, hidden_weights) + hidden_biases + + latent_relu_per_nb = np.maximum(latent_per_nb, 0) + # Monte-Carlo integration after first layer + # weighting with pdf + weighted_features_per_nb = np.expand_dims(features_on_neighbors, 2) * \ + np.expand_dims(latent_relu_per_nb, 1) + nb_ranges = np.concatenate(([0], nb_ranges), axis=0) + # sum (integration) + weighted_latent_per_sample = \ + np.zeros([num_samples, num_features[0], hidden_size]) + for i in range(num_samples): + weighted_latent_per_sample[i] = \ + np.sum(weighted_features_per_nb[nb_ranges[i]:nb_ranges[i + 1]], + axis=0) + + self.assertAllClose(weighted_latent_per_sample_tf, + weighted_latent_per_sample, atol=1e-3) + + @parameterized.parameters( + (8, 4, [8, 8], 2, np.sqrt(3) * 1.25, 8, 3) + ) + def test_basis_proj_jacobian(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, num_features[0]) + point_cloud = PointCloud(points, batch_ids) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + nb_ids = neighborhood._original_neigh_ids + # tf + conv_layer = MCConv( + num_features[0], num_features[1], dimension, 1, [hidden_size]) + + neigh_point_coords = points[nb_ids[:, 0].numpy()] + center_point_coords = point_samples[nb_ids[:, 1].numpy()] + kernel_input = (neigh_point_coords - center_point_coords) / radius + + basis_weights_tf = tf.reshape(conv_layer._weights_tf[0], + [dimension, hidden_size]) + basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size]) + + basis_neighs = \ + tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) +\ + basis_biases_tf + basis_neighs = tf.nn.leaky_relu(basis_neighs) + + _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1]) + max_num_nb = tf.reduce_max(counts).numpy() + + with self.subTest(name='features'): + def basis_proj_features(features_in): + return basis_proj_tf(basis_neighs, + features_in, + neighborhood) / (max_num_nb) + + self.assert_jacobian_is_correct_fn( + basis_proj_features, [np.float32(features)], atol=1e-4, delta=1e-3) + + with self.subTest(name='neigh_basis'): + def basis_proj_basis_neighs(basis_neighs_in): + return basis_proj_tf(basis_neighs_in, + features, + neighborhood) / (max_num_nb) + + self.assert_jacobian_is_correct_fn( + basis_proj_basis_neighs, + [np.float32(basis_neighs)], + atol=1e-4, delta=1e-3) + + +if __name__ == '__main___': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/MCConv.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/MCConv.py new file mode 100644 index 000000000..f21059a8d --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/MCConv.py @@ -0,0 +1,278 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Classes to Monte-Carlo point cloud convolutions""" + +import tensorflow as tf +from pylib.pc.utils import _flatten_features + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc import Neighborhood +from pylib.pc import KDEMode + +from pylib.pc.custom_ops import basis_proj +from pylib.pc.layers.utils import _format_output + +non_linearity_types = {'relu': tf.nn.relu, + 'lrelu': tf.nn.leaky_relu, + 'leakyrelu': tf.nn.leaky_relu, + 'leaky_relu': tf.nn.leaky_relu, + 'elu': tf.nn.elu} + + +class MCConv(tf.Module): + """ Monte-Carlo convolution for point clouds. + + Based on the paper [Monte Carlo Convolution for Learning on Non-Uniformly + Sampled Point Clouds. Hermosilla et al., 2018] + (https://arxiv.org/abs/1806.01759). + Uses a multiple MLPs as convolution kernels. + + Args: + num_features_in: An `int`, `C_in`, the number of features per input point. + num_features_out: An `int`, `C_out`, the number of features to compute. + num_dims: An `int`, the input dimension to the kernel MLP. Should be the + dimensionality of the point cloud. + num_mlps: An `int`, number of MLPs used to compute the output features. + Warning: num_features_out should be divisible by num_mlps. + mlp_size: An ìnt list`, list with the number of layers and hidden neurons + of the MLP used as kernel, defaults to `[8]`. (optional). + non_linearity_type: An `string`, specifies the type of the activation + function used inside the kernel MLP. + Possible: `'ReLU', 'lReLU', 'ELU'`, defaults to leaky ReLU. (optional) + initializer_weights: A `tf.initializer` for the kernel MLP weights, + default `TruncatedNormal`. (optional) + initializer_biases: A `tf.initializer` for the kernel MLP biases, + default: `zeros`. (optional) + + """ + + def __init__(self, + num_features_in, + num_features_out, + num_dims, + num_mlps=4, + mlp_size=[8], + non_linearity_type='leaky_relu', + initializer_weights=None, + initializer_biases=None, + name=None): + + super().__init__(name=name) + + self._num_features_in = num_features_in + self._num_features_out = num_features_out + self._num_mlps = num_mlps + self._mlp_size = mlp_size + self._num_dims = num_dims + self._non_linearity_type = non_linearity_type + + if num_features_out % num_mlps != 0: + raise ValueError( + "The number of output features must be divisible by the number" + + " of kernel MLPs") + + if name is None: + self._name = 'MCConv' + else: + self._name = name + + # initialize variables + if initializer_weights is None: + initializer_weights = tf.initializers.TruncatedNormal + if initializer_biases is None: + initializer_biases = tf.initializers.zeros + + self._weights_tf = [] + self._bias_tf = [] + prev_num_inut = self._num_dims + for cur_layer_iter, cur_layer in enumerate(self._mlp_size): + + if cur_layer_iter: + std_dev = tf.math.sqrt(1.0 / float(prev_num_inut)) + else: + std_dev = tf.math.sqrt(2.0 / float(prev_num_inut)) + + weights_init_obj = initializer_weights(stddev=std_dev) + self._weights_tf.append(tf.Variable( + weights_init_obj( + shape=[self._num_mlps, prev_num_inut, cur_layer], + dtype=tf.float32), + trainable=True, + name=self._name + "/weights_" + str(cur_layer_iter))) + + bias_init_obj = initializer_biases() + self._bias_tf.append(tf.Variable( + bias_init_obj(shape=[self._num_mlps, 1, cur_layer], + dtype=tf.float32), + trainable=True, + name=self._name + "/bias_" + str(cur_layer_iter))) + prev_num_inut = cur_layer + + std_dev = tf.math.sqrt(2.0 / \ + float(cur_layer * self._num_features_in)) + + weights_init_obj = initializer_weights(stddev=std_dev) + self._final_weights_tf = tf.Variable( + weights_init_obj( + shape=[ + self._num_mlps, + cur_layer * self._num_features_in, + self._num_features_out // self._num_mlps], + dtype=tf.float32), + trainable=True, + name=self._name + "/final_weights_" + str(cur_layer_iter)) + + def _monte_carlo_conv(self, + kernel_inputs, + neighborhood, + pdf, + features, + non_linearity_type='leaky_relu'): + """ Method to compute a Monte-Carlo integrated convolution using multiple + MLPs as implicit convolution kernel functions. + + Args: + kernel_inputs: A `float` `Tensor` of shape `[M, L]`, the input to the + kernel MLP. + neighborhood: A `Neighborhood` instance. + pdf: A `float` `Tensor` of shape `[M]`, the point densities. + features: A `float` `Tensor` of shape `[N, C1]`, the input features. + non_linearity_type: An `string`, specifies the type of the activation + function used inside the kernel MLP. + Possible: `'ReLU', 'leaky_ReLU', 'ELU'`, defaults to leaky ReLU. + (optional) + + Returns: + A `float` `Tensor` of shape `[N,C2]`, the output features. + + """ + + # Compute the hidden layer MLP + cur_inputs = tf.tile(tf.reshape(kernel_inputs, [1, -1, self._num_dims]), + [self._num_mlps, 1, 1]) + for cur_layer_iter in range(len(self._weights_tf)): + cur_inputs = tf.matmul(cur_inputs, self._weights_tf[cur_layer_iter]) + \ + self._bias_tf[cur_layer_iter] + cur_inputs = non_linearity_types[non_linearity_type.lower()](cur_inputs) + cur_inputs = tf.reshape(tf.transpose(cur_inputs, [1, 0, 2]), + [-1, self._mlp_size[-1] * self._num_mlps]) \ + / tf.reshape(pdf, [-1, 1]) + + # Compute the projection to the samples. + weighted_features = basis_proj( + cur_inputs, + features, + neighborhood) + + # Reshape features + weighted_features = tf.transpose(tf.reshape(weighted_features, + [-1, self._num_features_in, + self._num_mlps, + self._mlp_size[-1]]), + [2, 0, 1, 3]) + + #Compute convolution - hidden layer to output (linear) + convolution_result = tf.matmul( + tf.reshape( + weighted_features, + [self._num_mlps, -1, self._num_features_in * self._mlp_size[-1]]), + self._final_weights_tf) + + return tf.reshape(tf.transpose(convolution_result, [1, 0, 2]), + [-1, self._num_features_out]) + + def __call__(self, + features, + point_cloud_in: PointCloud, + point_cloud_out: PointCloud, + radius, + neighborhood=None, + bandwidth=0.2, + return_sorted=False, + return_padded=False, + name=None): + """ Computes the Monte-Carlo Convolution between two point clouds. + + Note: + In the following, `A1` to `An` are optional batch dimensions. + `C_in` is the number of input features. + `C_out` is the number of output features. + + Args: + features: A `float` `Tensor` of shape `[N_in, C_in]` or + `[A1, ..., An,V, C_in]`. + point_cloud_in: A 'PointCloud' instance, on which the features are + defined. + point_cloud_out: A `PointCloud` instance, on which the output + features are defined. + radius: A `float`, the convolution radius. + neighborhood: A `Neighborhood` instance, defining the neighborhood + with centers from `point_cloud_out` and neighbors in `point_cloud_in`. + If `None` it is computed internally. (optional) + bandwidth: A `float`, the bandwidth used in the kernel density + estimation on the input point cloud. (optional) + return_sorted: A `boolean`, if `True` the output tensor is sorted + according to the batch_ids. (optional) + return_padded: A `bool`, if 'True' the output tensor is sorted and + zero padded. (optional) + + Returns: + A `float` `Tensor` of shape + `[N_out, C_out]`, if `return_padded` is `False` + or + `[A1, ..., An, V_out, C_out]`, if `return_padded` is `True`. + + """ + + features = tf.cast(tf.convert_to_tensor(value=features), + dtype=tf.float32) + + tf.assert_equal( + tf.shape(features)[-1], + self._num_features_in) + + features = _flatten_features(features, point_cloud_in) + + #Create the radii tensor. + radius = tf.reshape(tf.convert_to_tensor(value=radius, dtype=tf.float32), [1, 1]) + radii_tensor = tf.repeat(radius, self._num_dims) + #Create the bandwidth tensor. + bwTensor = tf.repeat(bandwidth, self._num_dims) + + if neighborhood is None: + #Compute the grid + grid = Grid(point_cloud_in, radii_tensor) + #Compute the neighborhoods + neigh = Neighborhood(grid, radii_tensor, point_cloud_out) + else: + neigh = neighborhood + pdf = neigh.get_pdf(bandwidth=bwTensor, mode=KDEMode.constant) + + #Compute kernel inputs. + neigh_point_coords = tf.gather( + point_cloud_in._points, neigh._original_neigh_ids[:, 0]) + center_point_coords = tf.gather( + point_cloud_out._points, neigh._original_neigh_ids[:, 1]) + points_diff = (neigh_point_coords - center_point_coords) / \ + tf.reshape(radii_tensor, [1, self._num_dims]) + + #Compute Monte-Carlo convolution + convolution_result = self._monte_carlo_conv( + points_diff, neigh, pdf, features, self._non_linearity_type) + + return _format_output(convolution_result, + point_cloud_out, + return_sorted, + return_padded) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py index 7f23150e0..e21220129 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py @@ -16,12 +16,11 @@ from .Pooling import GlobalAveragePooling, GlobalMaxPooling from .Pooling import MaxPooling, AveragePooling from .Conv1x1 import Conv1x1 -''' + from .MCConv import MCConv +''' from .KPConv import KPConv from .PointConv import PointConv -from .network_blocks import PointResNet, \ - PointResNetBottleNeck, PointResNetSpatialBottleNeck from .utils import spherical_kernel_points, \ cube_kernel_points ''' \ No newline at end of file diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/basis_proj_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/basis_proj_test.py new file mode 100644 index 000000000..07e6e68b6 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/basis_proj_test.py @@ -0,0 +1,180 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test basis projection kernel""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud, Grid, Neighborhood, AABB +from pylib.pc.tests import utils +from pylib.pc.layers import MCConv +from pylib.pc.custom_ops import basis_proj + + +class BasisProjTest(test_case.TestCase): + + @parameterized.parameters( + (200, 20, [3, 3], 16, 0.7, 8, 2), + (400, 40, [3, 3], 8, np.sqrt(2), 8, 2), + (200, 20, [1, 3], 16, 0.7, 8, 3), + (400, 40, [3, 3], 8, 0.7, 8, 3), + (400, 10, [3, 1], 1, np.sqrt(3), 16, 3), + (200, 20, [3, 3], 16, 0.7, 8, 4), + (400, 40, [1, 3], 8, np.sqrt(4), 32, 4) + ) + def test_basis_proj(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, num_features[0]) + point_cloud = PointCloud(points, batch_ids) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + nb_ids = neighborhood._original_neigh_ids + # tf + conv_layer = MCConv( + num_features[0], num_features[1], dimension, 1, [hidden_size]) + + basis_weights_tf = tf.reshape(conv_layer._weights_tf[0], + [dimension, hidden_size]) + basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size]) + + neigh_point_coords = points[nb_ids[:, 0]] + center_point_coords = point_samples[nb_ids[:, 1]] + kernel_input = (neigh_point_coords - center_point_coords) / radius + basis_neighs = \ + tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) + \ + basis_biases_tf + basis_neighs = tf.nn.relu(basis_neighs) + + weighted_latent_per_sample_tf = basis_proj(basis_neighs, + features, + neighborhood) + + # numpy + neighbor_ids = neighborhood._original_neigh_ids.numpy() + nb_ranges = neighborhood._samples_neigh_ranges.numpy() + # extract variables + hidden_weights = basis_weights_tf.numpy() + hidden_biases = basis_biases_tf.numpy() + + features_on_neighbors = features[neighbor_ids[:, 0]] + # compute first layer of kernel MLP + point_diff = (points[neighbor_ids[:, 0]] -\ + point_samples[neighbor_ids[:, 1]])\ + / np.expand_dims(cell_sizes, 0) + + latent_per_nb = np.dot(point_diff, hidden_weights) + hidden_biases + + latent_relu_per_nb = np.maximum(latent_per_nb, 0) + # Monte-Carlo integration after first layer + # weighting with pdf + weighted_features_per_nb = np.expand_dims(features_on_neighbors, 2) * \ + np.expand_dims(latent_relu_per_nb, 1) + nb_ranges = np.concatenate(([0], nb_ranges), axis=0) + # sum (integration) + weighted_latent_per_sample = \ + np.zeros([num_samples, num_features[0], hidden_size]) + for i in range(num_samples): + weighted_latent_per_sample[i] = \ + np.sum(weighted_features_per_nb[nb_ranges[i]:nb_ranges[i + 1]], + axis=0) + + self.assertAllClose(weighted_latent_per_sample_tf, + weighted_latent_per_sample) + + @parameterized.parameters( + (8, 4, [8, 8], 2, np.sqrt(3) * 1.25, 8, 3) + ) + def test_basis_proj_jacobian(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, num_features[0]) + point_cloud = PointCloud(points, batch_ids) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + nb_ids = neighborhood._original_neigh_ids + # tf + conv_layer = MCConv( + num_features[0], num_features[1], dimension, 1, [hidden_size]) + + neigh_point_coords = points[nb_ids[:, 0].numpy()] + center_point_coords = point_samples[nb_ids[:, 1].numpy()] + kernel_input = (neigh_point_coords - center_point_coords) / radius + + basis_weights_tf = tf.reshape(conv_layer._weights_tf[0], + [dimension, hidden_size]) + basis_biases_tf = tf.reshape(conv_layer._bias_tf[0], [1, hidden_size]) + + basis_neighs = \ + tf.matmul(kernel_input.astype(np.float32), basis_weights_tf) +\ + basis_biases_tf + basis_neighs = tf.nn.leaky_relu(basis_neighs) + + _, _, counts = tf.unique_with_counts(neighborhood._neighbors[:, 1]) + max_num_nb = tf.reduce_max(counts).numpy() + + with self.subTest(name='features'): + def basis_proj_features(features_in): + return basis_proj(basis_neighs, + features_in, + neighborhood) / (max_num_nb) + + self.assert_jacobian_is_correct_fn( + basis_proj_features, [np.float32(features)], atol=1e-4, delta=1e-3) + + with self.subTest(name='neigh_basis'): + def basis_proj_basis_neighs(basis_neighs_in): + return basis_proj(basis_neighs_in, + features, + neighborhood) / (max_num_nb) + + self.assert_jacobian_is_correct_fn( + basis_proj_basis_neighs, + [np.float32(basis_neighs)], + atol=1e-4, delta=1e-3) + + +if __name__ == '__main___': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/monte_carlo_conv_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/monte_carlo_conv_test.py new file mode 100644 index 000000000..ec2e2555b --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/monte_carlo_conv_test.py @@ -0,0 +1,223 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test Monte Carlo convolutions""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud, Grid, Neighborhood, KDEMode, AABB +from pylib.pc.tests import utils +from pylib.pc.layers import MCConv + + +class MCConvTest(test_case.TestCase): + + @parameterized.parameters( + (2000, 200, [3, 3], 16, 0.7, 8, 2), + (4000, 400, [3, 3], 8, np.sqrt(2), 8, 2), + (2000, 200, [1, 3], 16, 0.7, 8, 3), + (4000, 400, [3, 3], 8, 0.7, 8, 3), + (4000, 100, [3, 1], 1, np.sqrt(3), 16, 3), + (2000, 200, [3, 3], 16, 0.7, 8, 4), + (4000, 400, [1, 3], 8, np.sqrt(4), 32, 4) + ) + def test_convolution(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, num_features[0]) + point_cloud = PointCloud(points, batch_ids) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + # tf + conv_layer = MCConv( + num_features[0], num_features[1], dimension, 1, [hidden_size], + non_linearity_type='relu') + conv_result_tf = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + + # numpy + pdf = neighborhood.get_pdf().numpy() + neighbor_ids = neighborhood._original_neigh_ids.numpy() + nb_ranges = neighborhood._samples_neigh_ranges.numpy() + + # extract variables + hidden_weights = \ + tf.reshape(conv_layer._weights_tf[0], [dimension, hidden_size]).numpy() + hidden_biases = \ + tf.reshape(conv_layer._bias_tf[0], [1, hidden_size]).numpy() + weights = \ + tf.reshape(conv_layer._final_weights_tf, + [num_features[0] * hidden_size, num_features[1]]).numpy() + + features_on_neighbors = features[neighbor_ids[:, 0]] + # compute first layer of kernel MLP + point_diff = (points[neighbor_ids[:, 0]] -\ + point_samples[neighbor_ids[:, 1]])\ + / np.expand_dims(cell_sizes, 0) + + latent_per_nb = np.dot(point_diff, hidden_weights) + hidden_biases + + latent_relu_per_nb = np.maximum(latent_per_nb, 0) + + # Monte-Carlo integration after first layer + # weighting with pdf + weighted_features_per_nb = np.expand_dims(features_on_neighbors, 2) * \ + np.expand_dims(latent_relu_per_nb, 1) / \ + np.expand_dims(pdf, [1, 2]) + nb_ranges = np.concatenate(([0], nb_ranges), axis=0) + # sum (integration) + weighted_latent_per_sample = \ + np.zeros([num_samples, num_features[0], hidden_size]) + for i in range(num_samples): + weighted_latent_per_sample[i] = \ + np.sum(weighted_features_per_nb[nb_ranges[i]:nb_ranges[i + 1]], + axis=0) + # second layer of MLP (linear) + weighted_latent_per_sample = np.reshape(weighted_latent_per_sample, + [num_samples, -1]) + conv_result_np = np.matmul(weighted_latent_per_sample, weights) + + self.assertAllClose(conv_result_tf, conv_result_np) + + @parameterized.parameters( + (8, 4, [8, 8], 2, np.sqrt(3) * 1.25, 8, 3) + ) + def test_conv_jacobian_params(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + point_cloud = PointCloud(points, batch_ids) + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + conv_layer = MCConv( + num_features[0], num_features[1], dimension, 1, [hidden_size]) + + features = np.random.rand(num_points, num_features[0]) + + with self.subTest(name='features'): + def conv_features(features_in): + conv_result = conv_layer( + features_in, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + self.assert_jacobian_is_correct_fn( + conv_features, [features], atol=1e-4, delta=1e-3) + + with self.subTest(name='params_basis_axis_proj'): + def conv_basis(weights_tf_in): + conv_layer._weights_tf[0] = weights_tf_in + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + weights_tf = conv_layer._weights_tf[0] + self.assert_jacobian_is_correct_fn( + conv_basis, [weights_tf], atol=1e-4, delta=1e-3) + + with self.subTest(name='params_basis_bias_proj'): + def conv_basis(bias_tf_in): + conv_layer._bias_tf[0] = bias_tf_in + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + bias_tf = conv_layer._bias_tf[0] + self.assert_jacobian_is_correct_fn( + conv_basis, [bias_tf], atol=1e-4, delta=1e-4) + + with self.subTest(name='params_second_layer'): + def conv_weights(weigths_in): + conv_layer._final_weights_tf = weigths_in + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + weights = conv_layer._final_weights_tf + self.assert_jacobian_is_correct_fn( + conv_weights, [weights], atol=1e-4, delta=1e-3) + + @parameterized.parameters( + (8, 4, [8, 8], 2, np.sqrt(3) * 1.25, 8, 3), + ) + def test_conv_jacobian_points(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, num_features[0]) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + point_cloud = PointCloud(points, batch_ids) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + neighborhood.compute_pdf() + + conv_layer = MCConv( + num_features[0], num_features[1], dimension, 1, [hidden_size], 'elu') + + def conv_points(points_in): + point_cloud._points = points_in + neighborhood._grid._sorted_points = \ + tf.gather(points_in, grid._sorted_indices) + + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + + return conv_result + + self.assert_jacobian_is_correct_fn( + conv_points, [np.float32(points)], atol=1e-4, delta=1e-3) + + +if __name__ == '__main___': + test_case.main() diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/utils.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/utils.py index 7c2513935..14e0e26fa 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/utils.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/utils.py @@ -48,6 +48,7 @@ def _format_output(features, point_cloud, return_sorted, return_padded): features = tf.gather(features, point_cloud._sorted_indices_batch) return features +''' def random_rotation(points, name=None): """ Method to rotate 3D points randomly. @@ -235,11 +236,5 @@ def cube_kernel_points(cbrt_num_points, name): points = np.stack((x.flatten(), y.flatten(), z.flatten()), axis=1) return tf.Variable(points, dtype=tf.float32) +''' -def _identity(features, *args, **kwargs): - """ Simple identity layer, to be used as placeholder. - - Used to replace projection shortcuts, if not desired. - - """ - return features From 46d7c3edf0721f7656829079463c7f0501fa020f Mon Sep 17 00:00:00 2001 From: Michael Schelling Date: Thu, 20 May 2021 15:50:47 +0200 Subject: [PATCH 29/29] init PointConv --- .../pylib/pc/layers/PointConv.py | 281 ++++++++++++++++++ .../pylib/pc/layers/__init__.py | 5 +- .../pylib/pc/layers/tests/point_conv_test.py | 259 ++++++++++++++++ 3 files changed, 543 insertions(+), 2 deletions(-) create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/PointConv.py create mode 100644 tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/point_conv_test.py diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/PointConv.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/PointConv.py new file mode 100644 index 000000000..1a9fa9452 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/PointConv.py @@ -0,0 +1,281 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Classes to for PointConv point cloud convolutions""" + +import tensorflow as tf +from pylib.pc.utils import _flatten_features + +from pylib.pc import PointCloud +from pylib.pc import Grid +from pylib.pc import Neighborhood +from pylib.pc import KDEMode + +from pylib.pc.custom_ops import basis_proj +from pylib.pc.layers.utils import _format_output + +non_linearity_types = {'relu': tf.nn.relu, + 'lrelu': tf.nn.leaky_relu, + 'leakyrelu': tf.nn.leaky_relu, + 'leaky_relu': tf.nn.leaky_relu, + 'elu': tf.nn.elu} + + +class PointConv(tf.Module): + """ Monte-Carlo convolution for point clouds. + + Based on the paper [PointConv: Deep Convolutional Networks on 3D Point + Clouds. Wu et al., 2019](https://arxiv.org/abs/1811.07246). + Uses a single MLP with one hidden layer as convolution kernel. + + Args: + num_features_in: An `int`, C_in, the number of features per input point. + num_features_out: An `int`, C_out, the number of features to compute. + num_dims: An `int`, the input dimension to the kernel MLP. Should be the + dimensionality of the point cloud. + size_hidden: An ìnt`, the number of neurons in the hidden layer of the + kernel MLP, must be in `[8, 16, 32]`, defaults to `32`. (optional). + non_linearity_type: An `string`, specifies the type of the activation + function used inside the kernel MLP. + Possible: `'ReLU', 'lReLU', 'ELU'`, defaults to leaky ReLU. (optional) + initializer_weights: A `tf.initializer` for the kernel MLP weights, + default `TruncatedNormal`. (optional) + initializer_biases: A `tf.initializer` for the kernel MLP biases, + default: `zeros`. (optional) + + """ + + def __init__(self, + num_features_in, + num_features_out, + num_dims, + size_hidden=32, + non_linearity_type='relu', + initializer_weights=None, + initializer_biases=None, + name=None): + + super().__init__(name=name) + + self._num_features_in = num_features_in + self._num_features_out = num_features_out + self._size_hidden = size_hidden + self._num_dims = num_dims + self._non_linearity_type = non_linearity_type + + if name is None: + self._name = 'PointConv' + else: + self._name = name + + # initialize variables + if initializer_weights is None: + initializer_weights = tf.initializers.GlorotNormal + if initializer_biases is None: + initializer_biases = tf.initializers.zeros + + # Hidden layer of the kernel. + weights_init_obj = initializer_weights() + self._basis_axis_tf = tf.Variable( + weights_init_obj( + shape=[self._num_dims, self._size_hidden], + dtype=tf.float32), + trainable=True, + name=self._name + "/hidden_vectors") + + bias_init_obj = initializer_biases() + self._basis_bias_tf = tf.Variable( + bias_init_obj( + shape=[1, self._size_hidden], + dtype=tf.float32), + trainable=True, + name=self._name + "/hidden_bias") + + # Convolution weights. + self._weights = tf.Variable( + weights_init_obj( + shape=[ + self._size_hidden * self._num_features_in, + self._num_features_out], + dtype=tf.float32), + trainable=True, + name=self._name + "/conv_weights") + + # Weights of the non-linear transform of the pdf. + self._weights_pdf = \ + [tf.Variable( + weights_init_obj( + shape=[1, 16], + dtype=tf.float32), + trainable=True, + name=self._name + "/pdf_weights_1"), + tf.Variable( + weights_init_obj( + shape=[16, 1], + dtype=tf.float32), + trainable=True, + name=self._name + "/pdf_weights_2")] + + self._biases_pdf = \ + [tf.Variable( + bias_init_obj( + shape=[1, 16], + dtype=tf.float32), + trainable=True, + name=self._name + "/pdf_biases_1"), + tf.Variable( + bias_init_obj( + shape=[1, 1], + dtype=tf.float32), + trainable=True, + name=self._name + "/pdf_biases_2")] + + def _point_conv(self, + kernel_inputs, + neighborhood, + pdf, + features, + non_linearity_type='relu'): + """ Method to compute a PointConv convolution using a single + MLP with one hidden layer as implicit convolution kernel function. + + Args: + kernel_inputs: A `float` `Tensor` of shape `[M, L]`, the input to the + kernel MLP. + neighborhood: A `Neighborhood` instance. + pdf: A `float` `Tensor` of shape `[M]`, the point densities. + features: A `float` `Tensor` of shape `[N, C1]`, the input features. + non_linearity_type: An `string`, specifies the type of the activation + function used inside the kernel MLP. + Possible: `'ReLU', 'lReLU', 'ELU'`, defaults to leaky ReLU. (optional) + + Returns: + A `float` `Tensor` of shape `[N,C2]`, the output features. + + """ + + # Compute the hidden layer MLP + basis_neighs = tf.matmul(kernel_inputs, self._basis_axis_tf) + \ + self._basis_bias_tf + basis_neighs = \ + non_linearity_types[non_linearity_type.lower()](basis_neighs) + + # Normalize the pdf + max_pdf = tf.math.unsorted_segment_max( + pdf, + neighborhood._original_neigh_ids[:, 1], + tf.shape(neighborhood._samples_neigh_ranges)[0]) + neigh_max_pdfs = tf.gather(max_pdf, neighborhood._original_neigh_ids[:, 1]) + cur_pdf = pdf / neigh_max_pdfs + cur_pdf = tf.reshape(cur_pdf, [-1, 1]) + + # Non-linear transform pdf + cur_pdf = tf.nn.relu(tf.matmul(cur_pdf, self._weights_pdf[0]) +\ + self._biases_pdf[0]) + cur_pdf = tf.matmul(cur_pdf, self._weights_pdf[1]) + self._biases_pdf[1] + + # Scale features + basis_neighs = basis_neighs / cur_pdf + + # Compute the projection to the samples. + weighted_features = basis_proj( + basis_neighs, + features, + neighborhood) + + #Compute convolution - hidden layer to output (linear) + convolution_result = tf.matmul( + tf.reshape(weighted_features, + [-1, self._num_features_in * self._size_hidden]), + self._weights) + + return convolution_result + + def __call__(self, + features, + point_cloud_in: PointCloud, + point_cloud_out: PointCloud, + radius, + neighborhood=None, + bandwidth=0.2, + return_sorted=False, + return_padded=False, + name=None): + """ Computes the Monte-Carlo Convolution between two point clouds. + + Note: + In the following, `A1` to `An` are optional batch dimensions. + `C_in` is the number of input features. + `C_out` is the number of output features. + + Args: + features: A `float` `Tensor` of shape `[N_in, C_in]` or + `[A1, ..., An,V, C_in]`. + point_cloud_in: A 'PointCloud' instance, on which the features are + defined. + point_cloud_out: A `PointCloud` instance, on which the output + features are defined. + radius: A `float`, the convolution radius. + neighborhood: A `Neighborhood` instance, defining the neighborhood + with centers from `point_cloud_out` and neighbors in `point_cloud_in`. + If `None` it is computed internally. (optional) + bandwidth: A `float`, the bandwidth used in the kernel density + estimation on the input point cloud. (optional) + return_sorted: A `boolean`, if `True` the output tensor is sorted + according to the batch_ids. (optional) + return_padded: A `bool`, if 'True' the output tensor is sorted and + zero padded. (optional) + + Returns: + A `float` `Tensor` of shape + `[N_out, C_out]`, if `return_padded` is `False` + or + `[A1, ..., An, V_out, C_out]`, if `return_padded` is `True`. + + """ + + features = tf.cast(tf.convert_to_tensor(value=features), + dtype=tf.float32) + features = _flatten_features(features, point_cloud_in) + + #Create the radii tensor. + radius = tf.reshape(tf.convert_to_tensor(value=radius, dtype=tf.float32), [1, 1]) + radii_tensor = tf.repeat(radius, self._num_dims) + + #Create the badnwidth tensor. + bwTensor = tf.repeat(bandwidth, self._num_dims) + + if neighborhood is None: + #Compute the grid + grid = Grid(point_cloud_in, radii_tensor) + #Compute the neighborhoods + neigh = Neighborhood(grid, radii_tensor, point_cloud_out) + else: + neigh = neighborhood + pdf = neigh.get_pdf(bandwidth=bwTensor, mode=KDEMode.constant) + + #Compute kernel inputs. + neigh_point_coords = tf.gather( + point_cloud_in._points, neigh._original_neigh_ids[:, 0]) + center_point_coords = tf.gather( + point_cloud_out._points, neigh._original_neigh_ids[:, 1]) + points_diff = (neigh_point_coords - center_point_coords) + + #Compute PointConv convolution + convolution_result = self._point_conv( + points_diff, neigh, pdf, features, self._non_linearity_type) + + return _format_output(convolution_result, + point_cloud_out, + return_sorted, + return_padded) diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py index e21220129..bf7c59200 100644 --- a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/__init__.py @@ -18,9 +18,10 @@ from .Conv1x1 import Conv1x1 from .MCConv import MCConv -''' -from .KPConv import KPConv + +#from .KPConv import KPConv from .PointConv import PointConv +''' from .utils import spherical_kernel_points, \ cube_kernel_points ''' \ No newline at end of file diff --git a/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/point_conv_test.py b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/point_conv_test.py new file mode 100644 index 000000000..91b1ea720 --- /dev/null +++ b/tensorflow_graphics/projects/point_convolutions/pylib/pc/layers/tests/point_conv_test.py @@ -0,0 +1,259 @@ +# Copyright 2020 The TensorFlow Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific +"""Class to test PointConv convolutions""" + +import os +import sys +import numpy as np +import tensorflow as tf +from absl.testing import parameterized +from tensorflow_graphics.util import test_case + +from pylib.pc import PointCloud, Grid, Neighborhood, KDEMode, AABB +from pylib.pc.tests import utils +from pylib.pc.layers import PointConv + + +class PointConvTest(test_case.TestCase): + + @parameterized.parameters( + (2000, 200, [3, 3], 16, 0.7, 8, 2), + (4000, 400, [3, 3], 8, np.sqrt(2), 8, 2), + (2000, 200, [1, 3], 16, 0.7, 8, 3), + (4000, 400, [3, 3], 8, 0.7, 8, 3), + (4000, 100, [3, 1], 1, np.sqrt(3), 16, 3), + (2000, 200, [3, 3], 16, 0.7, 8, 4), + (4000, 400, [1, 3], 8, np.sqrt(4), 32, 4) + ) + def test_convolution(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, num_features[0]) + point_cloud = PointCloud(points, batch_ids) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + pdf = neighborhood.get_pdf() + # tf + conv_layer = PointConv( + num_features[0], num_features[1], dimension, hidden_size, + non_linearity_type='relu') + conv_result_tf = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + + # numpy + neighbor_ids = neighborhood._original_neigh_ids.numpy() + nb_ranges = neighborhood._samples_neigh_ranges.numpy() + max_pdf = tf.math.unsorted_segment_max( + pdf, + neighborhood._original_neigh_ids[:, 1], + tf.shape(neighborhood._samples_neigh_ranges)[0]) + neigh_max_pdfs = tf.gather(max_pdf, neighborhood._original_neigh_ids[:, 1]) + neigh_max_pdfs = neigh_max_pdfs + pdf = pdf.numpy() / neigh_max_pdfs.numpy() + + # extract variables + hidden_weights = conv_layer._basis_axis_tf.numpy() + hidden_biases = conv_layer._basis_bias_tf.numpy() + pdf_weights = conv_layer._weights_pdf + pdf_biases = conv_layer._biases_pdf + weights = conv_layer._weights.numpy() + + features_on_neighbors = features[neighbor_ids[:, 0]] + # compute first layer of kernel MLP + point_diff = (points[neighbor_ids[:, 0]] -\ + point_samples[neighbor_ids[:, 1]]) + + latent_per_nb = np.dot(point_diff, hidden_weights) + hidden_biases + latent_relu_per_nb = np.maximum(latent_per_nb, 0) + + # non-linear transform pdf with 2 layer MLP with ReLU + cur_pdf = np.dot(pdf.reshape([-1, 1]), pdf_weights[0].numpy()) + \ + pdf_biases[0].numpy() + cur_pdf = np.maximum(cur_pdf, 0) + cur_pdf = np.dot(cur_pdf, pdf_weights[1].numpy()) + pdf_biases[1].numpy() + + # weighting with pdf + latent_relu_per_nb /= cur_pdf + + # Monte-Carlo integration after first layer + weighted_features_per_nb = np.expand_dims(features_on_neighbors, 2) * \ + np.expand_dims(latent_relu_per_nb, 1) + nb_ranges = np.concatenate(([0], nb_ranges), axis=0) + # sum (integration) + weighted_latent_per_sample = \ + np.zeros([num_samples, num_features[0], hidden_size]) + for i in range(num_samples): + weighted_latent_per_sample[i] = \ + np.sum(weighted_features_per_nb[nb_ranges[i]:nb_ranges[i + 1]], + axis=0) + # second layer of MLP (linear) + weighted_latent_per_sample = np.reshape(weighted_latent_per_sample, + [num_samples, -1]) + conv_result_np = np.matmul(weighted_latent_per_sample, weights) + + self.assertAllClose(conv_result_tf, conv_result_np, atol=1e-3, rtol=1e-3) + + @parameterized.parameters( + (8, 4, [8, 8], 2, np.sqrt(3) * 1.25, 8, 3) + ) + def test_conv_jacobian_params(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + point_cloud = PointCloud(points, batch_ids) + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + conv_layer = PointConv( + num_features[0], num_features[1], dimension, hidden_size) + + features = np.random.rand(num_points, num_features[0]) + + with self.subTest(name='features'): + def conv_features(features_in): + conv_result = conv_layer( + features_in, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + self.assert_jacobian_is_correct_fn( + conv_features, [features], atol=1e-3, delta=1e-3) + + with self.subTest(name='params_basis_axis_proj'): + def conv_basis(weights_tf_in): + conv_layer._basis_axis_tf = weights_tf_in + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + weights_tf = conv_layer._basis_axis_tf + self.assert_jacobian_is_correct_fn( + conv_basis, [weights_tf], atol=1e-2, delta=1e-3) + + with self.subTest(name='params_basis_bias_proj'): + def conv_basis(bias_tf_in): + conv_layer._basis_bias_tf = bias_tf_in + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + bias_tf = conv_layer._basis_bias_tf + self.assert_jacobian_is_correct_fn( + conv_basis, [bias_tf], atol=1e-2, delta=1e-4) + + with self.subTest(name='params_pdf_transform'): + def conv_pdf(bias_pdf_tf_in1, + bias_pdf_tf_in2, + weights_pdf_tf_in1, + weights_pdf_tf_in2): + conv_layer._biases_pdf = [bias_pdf_tf_in1, bias_pdf_tf_in2] + conv_layer._weights_pdf = [weights_pdf_tf_in1, weights_pdf_tf_in2] + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + bias_pdf_tf = conv_layer._biases_pdf + weights_pdf_tf = conv_layer._weights_pdf + self.assert_jacobian_is_correct_fn( + conv_pdf, + [bias_pdf_tf[0], bias_pdf_tf[1], + weights_pdf_tf[0], weights_pdf_tf[1]], + atol=5e-3, delta=1e-3) + + with self.subTest(name='params_second_layer'): + def conv_weights(weigths_in): + conv_layer._weights = weigths_in + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + return conv_result + + weights = conv_layer._weights + self.assert_jacobian_is_correct_fn( + conv_weights, [weights], atol=1e-2, delta=1e-3) + + @parameterized.parameters( + # neighbor ids are currently corrupted on dimension 2: todo fix + # (2000, 200, 16, 0.7, 2), + # (4000, 400, 8, np.sqrt(2), 2), + (8, 4, [8, 8], 2, np.sqrt(3) * 1.25, 8, 3), + # (4000, 400, [1, 1], 8, np.sqrt(3), 8, 3), + # (4000, 100, [2, 4], 1, np.sqrt(3), 8, 3), + # (2000, 200, [4, 2], 16, 0.7, 8, 4), + # (4000, 400, [2, 2], 8, np.sqrt(4), 8, 4) + ) + def test_conv_jacobian_points(self, + num_points, + num_samples, + num_features, + batch_size, + radius, + hidden_size, + dimension): + cell_sizes = np.float32(np.repeat(radius, dimension)) + points, batch_ids = utils._create_random_point_cloud_segmented( + batch_size, num_points, dimension=dimension) + features = np.random.rand(num_points, num_features[0]) + + point_samples, batch_ids_samples = \ + utils._create_random_point_cloud_segmented( + batch_size, num_samples, dimension=dimension) + + point_cloud_samples = PointCloud(point_samples, batch_ids_samples) + point_cloud = PointCloud(points, batch_ids) + grid = Grid(point_cloud, cell_sizes) + neighborhood = Neighborhood(grid, cell_sizes, point_cloud_samples) + neighborhood.compute_pdf() + + conv_layer = PointConv( + num_features[0], num_features[1], dimension, hidden_size, 'elu') + + def conv_points(points_in): + point_cloud._points = points_in + neighborhood._grid._sorted_points = \ + tf.gather(points_in, grid._sorted_indices) + + conv_result = conv_layer( + features, point_cloud, point_cloud_samples, radius, neighborhood) + + return conv_result + + self.assert_jacobian_is_correct_fn( + conv_points, [np.float32(points)], atol=5e-3, delta=1e-2) + + +if __name__ == '__main___': + test_case.main()