diff --git a/Changes.md b/Changes.md index 68ba3b4f524..b0cf6e0ddc9 100644 --- a/Changes.md +++ b/Changes.md @@ -13,6 +13,8 @@ Features - `staticComponent` : A single character string for the component to use as the static component for the color field. The other two components in the "RGB", "HSV" and "TMI" triplets will be controllable in the widget. - `colorFieldVisible` : A boolean indicating if the color field should be visible or not. - Added a menu item to the color chooser settings to save the UI configuration for the inline color chooser and the dialogue color chooser as a startup script to persist the configuration across Gaffer restarts. +- MeshToLevelSet : Added `destination` plug, allowing multiple input meshes to be merged into a single level set at an arbitrary location. +- LevelSetToMesh : Added `destination` plug, allowing multiple input level sets to be merged into a single mesh at an arbitrary location. Improvements ------------ @@ -31,6 +33,11 @@ Breaking Changes ---------------- - Cycles : Removed custom handling of unnormalized lights. We now rely on Cycles' inbuilt behaviour which results in a brightness difference for unnormalized point, spot and disk lights. +- MeshToLevelSet : Objects which are not meshes are now converted to an empty VDB grid, instead of being left unchanged. +- LevelSetToMesh : + - Objects which are not level sets are now converted to an empty mesh, instead of being left unchanged. + - Removed the `adjustBounds` plug. In the rare case where it is important to recompute slightly tighter bounds, one workaround is to use ShufflePrimitiveVariables to shuffle from "P" to "P" with `adjustBounds` checked. + - Removed support for grid types other than `FloatGrid`. If other types are required, please request them. 1.5.0.0a2 (relative to 1.5.0.0a1) ========= diff --git a/include/GafferScene/MergeCurves.h b/include/GafferScene/MergeCurves.h index 02209dc6257..a90a6cd2f45 100644 --- a/include/GafferScene/MergeCurves.h +++ b/include/GafferScene/MergeCurves.h @@ -52,7 +52,7 @@ class GAFFERSCENE_API MergeCurves : public MergeObjects protected : - IECore::ConstObjectPtr mergeObjects( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const override; + IECore::ConstObjectPtr computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const override; private : diff --git a/include/GafferScene/MergeMeshes.h b/include/GafferScene/MergeMeshes.h index 64549e83623..bdc41eee204 100644 --- a/include/GafferScene/MergeMeshes.h +++ b/include/GafferScene/MergeMeshes.h @@ -52,7 +52,7 @@ class GAFFERSCENE_API MergeMeshes : public MergeObjects protected : - IECore::ConstObjectPtr mergeObjects( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const override; + IECore::ConstObjectPtr computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const override; private : diff --git a/include/GafferScene/MergeObjects.h b/include/GafferScene/MergeObjects.h index f94814bfd9e..dfeff229594 100644 --- a/include/GafferScene/MergeObjects.h +++ b/include/GafferScene/MergeObjects.h @@ -107,15 +107,30 @@ class GAFFERSCENE_API MergeObjects : public FilteredSceneProcessor Gaffer::ValuePlug::CachePolicy computeCachePolicy( const Gaffer::ValuePlug *output ) const override; - /// @name Actual object merge function + /// If there are any additional plugs that affect the merge, this should be implemented + /// to call the base class, and then return true for those extra plugs. + virtual bool affectsMergedObject( const Gaffer::Plug *input ) const; + + /// If there are any additional plugs that affect the merge, this should be implemented + /// to call the base class, and then add those plugs to the hash. + virtual void hashMergedObject( + const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h + ) const; + + /// Actual object merge function /// This must be implemented by derived classes. It receives a vector of pairs of objects /// and the transform that maps each object into the shared space of the output location. /// - virtual IECore::ConstObjectPtr mergeObjects( + virtual IECore::ConstObjectPtr computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const = 0; + + + + + // \todo - should we offer alternate ways to merge bounds? Can we think of any use cases for this? //virtual Imath::Box3f mergeBounds( const std::vector< ScenePath > &sourcePaths, const Gaffer::Context *context ) const; diff --git a/include/GafferScene/MergePoints.h b/include/GafferScene/MergePoints.h index 95c6ab52bae..14f4a472925 100644 --- a/include/GafferScene/MergePoints.h +++ b/include/GafferScene/MergePoints.h @@ -52,7 +52,7 @@ class GAFFERSCENE_API MergePoints : public MergeObjects protected : - IECore::ConstObjectPtr mergeObjects( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const override; + IECore::ConstObjectPtr computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const override; private : diff --git a/include/GafferVDB/LevelSetToMesh.h b/include/GafferVDB/LevelSetToMesh.h index 1aaff1eae44..1d76c914c79 100644 --- a/include/GafferVDB/LevelSetToMesh.h +++ b/include/GafferVDB/LevelSetToMesh.h @@ -39,7 +39,7 @@ #include "GafferVDB/Export.h" #include "GafferVDB/TypeIds.h" -#include "GafferScene/Deformer.h" +#include "GafferScene/MergeObjects.h" #include "Gaffer/NumericPlug.h" #include "Gaffer/StringPlug.h" @@ -47,7 +47,7 @@ namespace GafferVDB { -class GAFFERVDB_API LevelSetToMesh : public GafferScene::Deformer +class GAFFERVDB_API LevelSetToMesh : public GafferScene::MergeObjects { public : @@ -55,7 +55,7 @@ class GAFFERVDB_API LevelSetToMesh : public GafferScene::Deformer explicit LevelSetToMesh( const std::string &name=defaultName() ); ~LevelSetToMesh() override; - GAFFER_NODE_DECLARE_TYPE( GafferVDB::LevelSetToMesh, LevelSetToMeshTypeId, GafferScene::Deformer ); + GAFFER_NODE_DECLARE_TYPE( GafferVDB::LevelSetToMesh, LevelSetToMeshTypeId, GafferScene::MergeObjects ); Gaffer::StringPlug *gridPlug(); const Gaffer::StringPlug *gridPlug() const; @@ -68,10 +68,13 @@ class GAFFERVDB_API LevelSetToMesh : public GafferScene::Deformer protected : - bool affectsProcessedObject( const Gaffer::Plug *input ) const override; - void hashProcessedObject( const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h ) const override; - IECore::ConstObjectPtr computeProcessedObject( const ScenePath &path, const Gaffer::Context *context, const IECore::Object *inputObject ) const override; - Gaffer::ValuePlug::CachePolicy processedObjectComputeCachePolicy() const override; + bool affectsMergedObject( const Gaffer::Plug *input ) const override; + + void hashMergedObject( + const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h + ) const override; + + IECore::ConstObjectPtr computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const override; private : diff --git a/include/GafferVDB/MeshToLevelSet.h b/include/GafferVDB/MeshToLevelSet.h index cecb8c3848a..fc6f1b2f2ac 100644 --- a/include/GafferVDB/MeshToLevelSet.h +++ b/include/GafferVDB/MeshToLevelSet.h @@ -39,7 +39,7 @@ #include "GafferVDB/Export.h" #include "GafferVDB/TypeIds.h" -#include "GafferScene/ObjectProcessor.h" +#include "GafferScene/MergeObjects.h" #include "Gaffer/NumericPlug.h" @@ -51,7 +51,7 @@ class StringPlug; namespace GafferVDB { -class GAFFERVDB_API MeshToLevelSet : public GafferScene::ObjectProcessor +class GAFFERVDB_API MeshToLevelSet : public GafferScene::MergeObjects { public : @@ -59,7 +59,7 @@ class GAFFERVDB_API MeshToLevelSet : public GafferScene::ObjectProcessor explicit MeshToLevelSet( const std::string &name=defaultName() ); ~MeshToLevelSet() override; - GAFFER_NODE_DECLARE_TYPE( GafferVDB::MeshToLevelSet, MeshToLevelSetTypeId, GafferScene::ObjectProcessor ); + GAFFER_NODE_DECLARE_TYPE( GafferVDB::MeshToLevelSet, MeshToLevelSetTypeId, GafferScene::MergeObjects ); Gaffer::StringPlug *gridPlug(); const Gaffer::StringPlug *gridPlug() const; @@ -75,10 +75,13 @@ class GAFFERVDB_API MeshToLevelSet : public GafferScene::ObjectProcessor protected : - bool affectsProcessedObject( const Gaffer::Plug *plug ) const override; - void hashProcessedObject( const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h ) const override; - IECore::ConstObjectPtr computeProcessedObject( const ScenePath &path, const Gaffer::Context *context, const IECore::Object *inputObject ) const override; - Gaffer::ValuePlug::CachePolicy processedObjectComputeCachePolicy() const override; + bool affectsMergedObject( const Gaffer::Plug *input ) const override; + + void hashMergedObject( + const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h + ) const override; + + IECore::ConstObjectPtr computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const override; private : diff --git a/python/GafferSceneTest/IECoreScenePreviewTest/PrimitiveAlgoTest.py b/python/GafferSceneTest/IECoreScenePreviewTest/PrimitiveAlgoTest.py index 20d4825194c..a35c125b2dd 100644 --- a/python/GafferSceneTest/IECoreScenePreviewTest/PrimitiveAlgoTest.py +++ b/python/GafferSceneTest/IECoreScenePreviewTest/PrimitiveAlgoTest.py @@ -706,6 +706,25 @@ def testMergeFewPerf( self ) : with GafferTest.TestRunner.PerformanceScope() : PrimitiveAlgo.mergePrimitives( meshes ) + @GafferTest.TestRunner.PerformanceTestMethod() + def testSingleMeshPerf( self ) : + + # Calling mergePrimitives with a single source should use transformPrimitives + # and not pay the cost of accumulating topology + + mesh = IECoreScene.MeshPrimitive.createPlane( + imath.Box2f( imath.V2f( -2 ), imath.V2f( 2 ) ), + divisions = imath.V2i( 2000, 2000 ) + ) + + m = imath.M44f() + m.setTranslation( imath.V3f( 0, 1, 0 ) ) + + meshes = [ ( mesh, m ) ] + + with GafferTest.TestRunner.PerformanceScope() : + PrimitiveAlgo.mergePrimitives( meshes ) + if __name__ == "__main__": unittest.main() diff --git a/python/GafferSceneTest/SceneTestCase.py b/python/GafferSceneTest/SceneTestCase.py index 71dbc0a4ff1..1f06cb9298b 100644 --- a/python/GafferSceneTest/SceneTestCase.py +++ b/python/GafferSceneTest/SceneTestCase.py @@ -373,7 +373,7 @@ def assertParallelGetValueComputesObjectOnce( self, scene, path ) : c["scene:path"] = GafferScene.ScenePlug.stringToPath( path ) GafferTest.parallelGetValue( scene["object"], 100 ) - if isinstance( scene.node(), GafferScene.ObjectProcessor ) : + if isinstance( scene.node(), GafferScene.ObjectProcessor ) or isinstance( scene.node(), GafferScene.MergeObjects ) : self.assertEqual( pm.plugStatistics( scene.node()["__processedObject"] ).computeCount, 1 ) elif isinstance( scene.node(), GafferScene.ObjectSource ) : self.assertEqual( pm.plugStatistics( scene.node()["__source"] ).computeCount, 1 ) diff --git a/python/GafferVDBTest/LevelSetToMeshTest.py b/python/GafferVDBTest/LevelSetToMeshTest.py index ce59b2b6a12..367ee05f1e2 100644 --- a/python/GafferVDBTest/LevelSetToMeshTest.py +++ b/python/GafferVDBTest/LevelSetToMeshTest.py @@ -34,6 +34,7 @@ # ########################################################################## +import imath import pathlib import IECore @@ -68,7 +69,7 @@ def testCanConvertLevelSetToMesh( self ) : mesh = levelSetToMesh["out"].object( "sphere" ) self.assertTrue( isinstance( mesh, IECoreScene.MeshPrimitive) ) - def testChangingIsoValueUpdatesBounds ( self ) : + def testChangingIsoValueDoesntUpdateBounds ( self ) : sphere = GafferScene.Sphere() sphere["radius"].setValue( 5 ) @@ -84,27 +85,8 @@ def testChangingIsoValueUpdatesBounds ( self ) : levelSetToMesh["in"].setInput( meshToLevelSet["out"] ) self.assertSceneValid( levelSetToMesh["out"] ) - self.assertEqual( levelSetToMesh["adjustBounds"].getValue(), False ) self.assertEqual( levelSetToMesh["out"].bound( "/sphere" ), levelSetToMesh["in"].bound( "/sphere" ) ) - levelSetToMesh["adjustBounds"].setValue( True ) - self.assertSceneValid( levelSetToMesh["out"] ) - self.assertEqual( - levelSetToMesh["out"].bound( "/sphere" ), - levelSetToMesh["out"].object( "/sphere" ).bound() - ) - bound = levelSetToMesh["out"].bound( "/sphere" ) - - levelSetToMesh["isoValue"].setValue( -0.5 ) # Shrinks the output mesh - - self.assertSceneValid( levelSetToMesh["out"] ) - self.assertEqual( - levelSetToMesh["out"].bound( "/sphere" ), - levelSetToMesh["out"].object( "/sphere" ).bound() - ) - self.assertTrue( bound.intersects( levelSetToMesh["out"].bound( "/sphere" ).min() ) ) - self.assertTrue( bound.intersects( levelSetToMesh["out"].bound( "/sphere" ).max() ) ) - def testIncreasingAdapativityDecreasesPolyCount( self ) : sphere = GafferScene.Sphere() @@ -141,3 +123,60 @@ def testParallelGetValueComputesObjectOnce( self ) : levelSetToMesh["grid"].setValue( "ls_sphere" ) self.assertParallelGetValueComputesObjectOnce( levelSetToMesh["out"], "/vdb" ) + + def testMerging( self ): + + # Quick test of merging a ring of spheres into a torus. + # This test checks the number of faces produced, which is quick way to check that we're + # getting the right amount of overlap, but relies on the specific algorithm used by OpenVDB. + # A future update to OpenVDB might cause this test to fail and require updating, but it would + # be easy enough to validate these results and update the numbers if that happens. ( An OpenVDB + # update that changes this algorithm seems possible, but unlikely ). + + sphere = GafferScene.Sphere( "sphere" ) + + duplicate = GafferScene.Duplicate( "duplicate" ) + duplicate["in"].setInput( sphere["out"] ) + duplicate["copies"].setValue( 11 ) + duplicate["transform"]["rotate"].setValue( imath.V3f( 0, 30, 0 ) ) + duplicate["transform"]["pivot"].setValue( imath.V3f( -2, 0, 0 ) ) + self.setFilter( duplicate, path='/sphere' ) + + freezeTransform = GafferScene.FreezeTransform( "freezeTransform" ) + freezeTransform["enabled"].setValue( False ) + freezeTransform["in"].setInput( duplicate["out"] ) + self.setFilter( freezeTransform, path='/*' ) + + meshToLevelSet = GafferVDB.MeshToLevelSet( "meshToLevelSet" ) + meshToLevelSet["in"].setInput( freezeTransform["out"] ) + self.setFilter( meshToLevelSet, path='/*' ) + + levelSetToMesh = GafferVDB.LevelSetToMesh( "levelSetToMesh" ) + levelSetToMesh["in"].setInput( meshToLevelSet["out"] ) + self.setFilter( levelSetToMesh, path='/*' ) + + # We're not yet merging, so the spheres all get converted to the same mesh + self.assertEqual( levelSetToMesh["out"].object( "/sphere" ).numFaces(), 1854 ) + self.assertEqual( levelSetToMesh["out"].object( "/sphere6" ).numFaces(), 1854 ) + + # Merge into a big donut + levelSetToMesh["destination"].setValue( '/merged' ) + + unfrozen = levelSetToMesh["out"].object( "/merged" ) + self.assertEqual( unfrozen.numFaces(), 11336 ) + + # Now try freezing the transform to make sure we get matching results + freezeTransform["enabled"].setValue( True ) + + # The individual spheres are now each a bit different + levelSetToMesh["destination"].setValue( '${scene:path}' ) + self.assertEqual( levelSetToMesh["out"].object( "/sphere" ).numFaces(), 1854 ) + self.assertEqual( levelSetToMesh["out"].object( "/sphere5" ).numFaces(), 1842 ) + levelSetToMesh["destination"].setValue( '/merged' ) + + # The combined mesh is very slightly different ( only because of resampling error + # when rotating the grids ). The result is different, but not enough to change the + # face count. + frozen = levelSetToMesh["out"].object( "/merged" ) + self.assertNotEqual( frozen["P"], unfrozen["P"] ) + self.assertEqual( frozen.numFaces(), 11336 ) diff --git a/python/GafferVDBTest/MeshToLevelSetTest.py b/python/GafferVDBTest/MeshToLevelSetTest.py index 90b13a65883..499b80e459a 100644 --- a/python/GafferVDBTest/MeshToLevelSetTest.py +++ b/python/GafferVDBTest/MeshToLevelSetTest.py @@ -34,6 +34,7 @@ # ########################################################################## +import imath import time import IECore @@ -233,3 +234,60 @@ def testRecursionViaIntermediateQuery( self ) : # we'll get deadlock. meshToLevelSet2["out"].object( "/cube" ) + + def testMerging( self ): + + # Create two non-overlapping spheres + sphere = GafferScene.Sphere() + sphere["radius"].setValue( 1.0 ) + + sphere2 = GafferScene.Sphere() + sphere2["name"].setValue( "sphere2" ) + sphere2["radius"].setValue( 1.0 ) + sphere2["transform"]["translate"]["x"].setValue( 5 ) + + freezeTransform = GafferScene.FreezeTransform() + freezeTransform["in"].setInput( sphere2["out"] ) + self.setFilter( freezeTransform, '/sphere2' ) + + parent = GafferScene.Parent() + parent["parent"].setValue( "/" ) + parent["in"].setInput( sphere["out"] ) + parent["children"][0].setInput( freezeTransform["out"] ) + + + meshToLevelSet = GafferVDB.MeshToLevelSet() + meshToLevelSet["in"].setInput( parent["out"] ) + self.setFilter( meshToLevelSet, '/*' ) + + voxelCountA = meshToLevelSet["out"].object( "/sphere" ).findGrid( "surface" ).activeVoxelCount() + voxelCountB = meshToLevelSet["out"].object( "/sphere2" ).findGrid( "surface" ).activeVoxelCount() + + # Maybe this could change if OpenVDB's algorithm changes, but I would expect it to be constant + # unless something weird changes, so might as well check the actual numbers + self.assertEqual( voxelCountA, 7712 ) + self.assertEqual( voxelCountB, 7712 ) + + meshToLevelSet["destination"].setValue( "/merged" ) + + # If we write both locations to the same destination, they get merged + self.assertEqual( + meshToLevelSet["out"].object( "/merged" ).findGrid( "surface" ).activeVoxelCount(), + voxelCountA + voxelCountB + ) + + @GafferTest.TestRunner.PerformanceTestMethod() + def testBasicPerf( self ): + sphere = GafferScene.Sphere() + sphere["radius"].setValue( 2.0 ) + sphere["divisions"].setValue( imath.V2i( 1000, 1000 ) ) + + meshToLevelSet = GafferVDB.MeshToLevelSet() + self.setFilter( meshToLevelSet, '/sphere' ) + meshToLevelSet["voxelSize"].setValue( 0.05 ) + meshToLevelSet["in"].setInput( sphere["out"] ) + + meshToLevelSet["in"].object( "/sphere" ) + + with GafferTest.TestRunner.PerformanceScope() : + meshToLevelSet["out"].object( "/sphere" ) diff --git a/python/GafferVDBUI/LevelSetToMeshUI.py b/python/GafferVDBUI/LevelSetToMeshUI.py index 4ed15b3d803..b833130ddb4 100644 --- a/python/GafferVDBUI/LevelSetToMeshUI.py +++ b/python/GafferVDBUI/LevelSetToMeshUI.py @@ -43,6 +43,34 @@ """Converts a level set VDB object to a mesh primitive .""", plugs={ + "filter" : [ + "description", + """ + The filter used to choose the vdbs to be converted. Source locations are + pruned from the output scene, unless they are reused as part of a destination location + (or a separate source scene is connected). + """ + ], + "source" : [ + "description", + """ + An optional alternate scene to provide the vdbs to be converted. When connected : + + - The `filter` chooses locations to be merged from the `source` scene rather than then `in` scene. + - Source locations are not pruned from the output scene. + """ + ], + "destination" : [ + "description", + """ + By default, vdbs will be replaced with a mesh in place, using the destination `${scene:path}`. + The destination can be modified to change where the outputs are placed. If multiple filtered locations + have the same destination, the vdbs will be merged into one mesh. + + The destination location will be created if it doesn't exist already. If the name overlaps + with an existing location that isn't filtered, the name will get a suffix. + """, + ], 'grid' : [ 'description', """ diff --git a/python/GafferVDBUI/MeshToLevelSetUI.py b/python/GafferVDBUI/MeshToLevelSetUI.py index 7aca418b456..67ce5968e1d 100644 --- a/python/GafferVDBUI/MeshToLevelSetUI.py +++ b/python/GafferVDBUI/MeshToLevelSetUI.py @@ -42,6 +42,34 @@ 'description', """Converts a mesh primitive to a level set VDB object.""", plugs={ + "filter" : [ + "description", + """ + The filter used to choose the meshes to be converted. Source locations are + pruned from the output scene, unless they are reused as part of a destination location + (or a separate source scene is connected). + """ + ], + "source" : [ + "description", + """ + An optional alternate scene to provide the meshes to be converted. When connected : + + - The `filter` chooses locations to be merged from the `source` scene rather than then `in` scene. + - Source locations are not pruned from the output scene. + """ + ], + "destination" : [ + "description", + """ + By default, meshes will be replaced with a level set in place, using the destination `${scene:path}`. + The destination can be modified to change where the outputs are placed. If multiple filtered locations + have the same destination, the meshes will be merged into one level set. + + The destination location will be created if it doesn't exist already. If the name overlaps + with an existing location that isn't filtered, the name will get a suffix. + """, + ], 'grid' : [ 'description', """ diff --git a/src/GafferScene/IECoreScenePreview/PrimitiveAlgo.cpp b/src/GafferScene/IECoreScenePreview/PrimitiveAlgo.cpp index a25f830f459..e670e216445 100644 --- a/src/GafferScene/IECoreScenePreview/PrimitiveAlgo.cpp +++ b/src/GafferScene/IECoreScenePreview/PrimitiveAlgo.cpp @@ -583,7 +583,6 @@ class MergePrimitivesMeshResult // This must be called after all calls to copyFromSource void finalize() { - result->setTopologyUnchecked( m_resultVerticesPerFaceData, m_resultVertexIdsData, m_numVertices, result->interpolation() ); if( m_resultCornerIdsData ) @@ -964,6 +963,14 @@ IECoreScene::PrimitivePtr mergePrimitivesInternal( const IECore::Canceller *canceller ) { + if( primitives.size() == 1 ) + { + // If we have a single input, we just need to transform it + IECoreScene::PrimitivePtr result = primitives[0].first->copy(); + PrimitiveAlgo::transformPrimitive( *result, primitives[0].second, canceller ); + return result; + } + IECoreScene::TypeId resultTypeId = (IECoreScene::TypeId)ResultStruct::PrimitiveType::staticTypeId(); // Data we need to store for each primvar we output diff --git a/src/GafferScene/MergeCurves.cpp b/src/GafferScene/MergeCurves.cpp index 7a703cfefd9..bb706f58150 100644 --- a/src/GafferScene/MergeCurves.cpp +++ b/src/GafferScene/MergeCurves.cpp @@ -63,7 +63,7 @@ MergeCurves::~MergeCurves() { } -IECore::ConstObjectPtr MergeCurves::mergeObjects( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const +IECore::ConstObjectPtr MergeCurves::computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const { std::vector< std::pair< const IECoreScene::Primitive *, Imath::M44f > > curves; diff --git a/src/GafferScene/MergeMeshes.cpp b/src/GafferScene/MergeMeshes.cpp index decc88dc6c1..7dc1d2bf1ce 100644 --- a/src/GafferScene/MergeMeshes.cpp +++ b/src/GafferScene/MergeMeshes.cpp @@ -63,7 +63,7 @@ MergeMeshes::~MergeMeshes() { } -IECore::ConstObjectPtr MergeMeshes::mergeObjects( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const +IECore::ConstObjectPtr MergeMeshes::computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const { std::vector< std::pair< const IECoreScene::Primitive *, Imath::M44f > > meshes; diff --git a/src/GafferScene/MergeObjects.cpp b/src/GafferScene/MergeObjects.cpp index d1b0e14c0aa..4eafd8e44d8 100644 --- a/src/GafferScene/MergeObjects.cpp +++ b/src/GafferScene/MergeObjects.cpp @@ -626,6 +626,89 @@ M44f relativeTransform( return fromSource * toDest; } +IECore::MurmurHash g_invalidTransformHash = [](){ + IECore::MurmurHash r; + r.append( -1 ); + return r; +}(); + +IECore::MurmurHash relativeTransformHash( + const ScenePlug::ScenePath &sourcePath, const ScenePlug::ScenePath &destPath, + const ScenePlug *sourceScene, const ScenePlug *destScene, + ScenePlug::PathScope &pathScope, ScenePlug::ScenePath &matchingPrefix, IECore::MurmurHash &toDestHash +) +{ + unsigned int matchingLength = std::min( sourcePath.size(), destPath.size() ); + if( sourceScene != destScene ) + { + // In theory, we could do something more accurate even when the scenes are different, but + // we couldn't skip evaluating the transforms in the prefix just because the names match. + // We would need a separate code path where we evaluate the transforms at each level, but + // don't multiply them onto the matrices if they are identical ( to avoid accumulating error ). + // For now, do the simple thing, because preserving existing transforms is most important when + // working in place. + matchingLength = 0; + } + else + { + for( unsigned int i = 0; i < matchingLength; i++ ) + { + if( sourcePath[ i ] != destPath[ i ] ) + { + matchingLength = i; + break; + } + } + } + + if( matchingPrefix.size() != matchingLength || toDestHash == g_invalidTransformHash ) + { + ScenePlug::ScenePath &curPath = matchingPrefix; + + if( curPath.size() > matchingLength ) + { + curPath.resize( matchingLength ); + } + + curPath.reserve( destPath.size() ); + while( curPath.size() < destPath.size() ) + { + curPath.push_back( destPath[ curPath.size() ] ); + } + + toDestHash = IECore::MurmurHash(); + while( curPath.size() > matchingLength ) + { + pathScope.setPath( &curPath ); + if( destScene->existsPlug()->getValue() ) + { + destScene->transformPlug()->hash( toDestHash ); + } + curPath.pop_back(); + } + } + + ScenePlug::ScenePath &curPath = matchingPrefix; + + curPath.reserve( sourcePath.size() ); + while( curPath.size() < sourcePath.size() ) + { + curPath.push_back( sourcePath[ curPath.size() ] ); + } + + IECore::MurmurHash r; + while( curPath.size() > matchingLength ) + { + pathScope.setPath( &curPath ); + sourceScene->transformPlug()->hash( r ); + curPath.pop_back(); + } + + r.append( toDestHash ); + + return r; +} + // The filter value used for pruning the existing scene - if the sourcePlug is connected, then no pruning occurs. IECore::PathMatcher::Result pruneFilterValue( const GafferScene::ScenePlug *inPlug, const GafferScene::FilterPlug *filterPlug, const GafferScene::ScenePlug *sourcePlug, const Gaffer::Context *context ) { @@ -776,7 +859,8 @@ void MergeObjects::affects( const Plug *input, AffectedPlugsContainer &outputs ) input == inPlug()->objectPlug() || input == inPlug()->transformPlug() || input == sourcePlug()->objectPlug() || - input == sourcePlug()->transformPlug() + input == sourcePlug()->transformPlug() || + affectsMergedObject( input ) ) { outputs.push_back( processedObjectPlug() ); @@ -857,8 +941,6 @@ void MergeObjects::hash( const Gaffer::ValuePlug *output, const Gaffer::Context throw IECore::Exception( "__processedObject should only be hashed from hashObject, which checks for a matching tree location first" ); } - h.append( outPlug()->fullTransformHash( path ) ); - const ScenePlug *effectiveSource = effectiveSourcePlug(); const ThreadState &threadState = ThreadState::current(); @@ -867,21 +949,27 @@ void MergeObjects::hash( const Gaffer::ValuePlug *output, const Gaffer::Context const IECore::MurmurHash reduction = tbb::parallel_deterministic_reduce( tbb::blocked_range( 0, sourcePaths->size() ), IECore::MurmurHash(), - [&] ( const tbb::blocked_range &range, const MurmurHash &hash ) { + [&] ( const tbb::blocked_range &range, const MurmurHash &hash ) + { + ScenePlug::ScenePath matchingPrefix; + IECore::MurmurHash toDestHash = g_invalidTransformHash; ScenePlug::PathScope pathScope( threadState ); IECore::MurmurHash result = hash; for( size_t i = range.begin(); i != range.end(); ++i ) { pathScope.setPath( &((*sourcePaths)[i]) ); - result.append( effectiveSource->fullTransformHash( (*sourcePaths)[i] ) ); effectiveSource->objectPlug()->hash( result ); + result.append( relativeTransformHash( + (*sourcePaths)[i], path, effectiveSource, inPlug(), pathScope, + matchingPrefix, toDestHash + ) ); } - return result; + return result; }, - [] ( const MurmurHash &x, const MurmurHash &y ) { - + [] ( const MurmurHash &x, const MurmurHash &y ) + { MurmurHash result = x; result.append( y ); return result; @@ -891,6 +979,7 @@ void MergeObjects::hash( const Gaffer::ValuePlug *output, const Gaffer::Context ); h.append( reduction ); + hashMergedObject( path, context, h ); } } @@ -944,8 +1033,8 @@ void MergeObjects::compute( Gaffer::ValuePlug *output, const Gaffer::Context *co tbb::parallel_for( tbb::blocked_range( 0, sourcePaths->size() ), - [&] ( const tbb::blocked_range &range ) { - + [&] ( const tbb::blocked_range &range ) + { ScenePlug::ScenePath matchingPrefix; M44f toDest( 0.0f ); @@ -965,7 +1054,7 @@ void MergeObjects::compute( Gaffer::ValuePlug *output, const Gaffer::Context *co taskGroupContext ); - static_cast( output )->setValue( mergeObjects( sources, context ) ); + static_cast( output )->setValue( computeMergedObject( sources, context ) ); } else { @@ -1515,3 +1604,12 @@ Gaffer::ValuePlug::CachePolicy MergeObjects::computeCachePolicy( const Gaffer::V } return FilteredSceneProcessor::computeCachePolicy( output ); } + +bool MergeObjects::affectsMergedObject( const Gaffer::Plug *input ) const +{ + return false; +} + +void MergeObjects::hashMergedObject( const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h ) const +{ +} diff --git a/src/GafferScene/MergePoints.cpp b/src/GafferScene/MergePoints.cpp index a7e76c9740a..6bb3c39a634 100644 --- a/src/GafferScene/MergePoints.cpp +++ b/src/GafferScene/MergePoints.cpp @@ -63,7 +63,7 @@ MergePoints::~MergePoints() { } -IECore::ConstObjectPtr MergePoints::mergeObjects( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const +IECore::ConstObjectPtr MergePoints::computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const { std::vector< std::pair< const IECoreScene::Primitive *, Imath::M44f > > points; diff --git a/src/GafferVDB/LevelSetToMesh.cpp b/src/GafferVDB/LevelSetToMesh.cpp index 6c6b2c94877..dc49ac0d6a7 100644 --- a/src/GafferVDB/LevelSetToMesh.cpp +++ b/src/GafferVDB/LevelSetToMesh.cpp @@ -36,6 +36,8 @@ #include "GafferVDB/LevelSetToMesh.h" +#include "GafferVDB/Interrupter.h" + #include "Gaffer/StringPlug.h" #include "IECoreVDB/VDBObject.h" @@ -43,6 +45,8 @@ #include "IECoreScene/MeshPrimitive.h" #include "openvdb/openvdb.h" +#include "openvdb/tools/Composite.h" +#include "openvdb/tools/GridTransformer.h" #include "openvdb/tools/VolumeToMesh.h" #include "fmt/format.h" @@ -62,49 +66,155 @@ using namespace GafferVDB; namespace { -struct MesherDispatch +template +void dispatchForVdbType( const openvdb::GridBase *grid, F &&functor, Args&&... args ) { - MesherDispatch( openvdb::GridBase::ConstPtr grid, openvdb::tools::VolumeToMesh &mesher ) : m_grid( grid ), m_mesher( mesher ) + const std::string &type = grid->type(); + + // Currently, we're just supporting FloatGrid. We could add cases here for + // DoubleGrid, Int32Grid, Int64Grid, and BoolGrid ... the only reason we + // haven't currently is that we're not aware of anyone who needs them, and + // compiling for the other types adds another 60 seconds of compile time for this file, + // and makes this compilation unit too big too compile on Windows without /bigobj. + if( type == openvdb::FloatGrid::gridType() ) + { + return functor( static_cast( grid ), std::forward( args )... ); + } + else { + throw IECore::Exception( fmt::format( "Incompatible Grid found name: '{}' type: '{}' ", grid->getName(), grid->type() ) ); } +} + +template< typename T> +openvdb::GridBase::ConstPtr mergeGrids( + const std::vector< std::pair< openvdb::GridBase::ConstPtr, Imath::M44f > > &grids, + const openvdb::math::Transform &vdbTransform, + const IECore::Canceller *canceller +) +{ + typename T::ConstPtr resultGrid; + typename T::Ptr editableResultGrid; + + Interrupter interrupter( canceller ); - template - void execute() + static const Imath::M44f identity; + for( const auto &[untypedGrid, transform] : grids ) { - if( typename GridType::ConstPtr t = openvdb::GridBase::constGrid( m_grid ) ) + typename T::ConstPtr grid = openvdb::GridBase::grid< T >( untypedGrid ); + + // We check the grid types match when we put them in the grids list + assert( grid.get() ); + typename T::ConstPtr gridWithTransform; + + bool transformMatches; + if( transform == identity ) { - m_mesher( *t ); + gridWithTransform = grid; + transformMatches = openvdb::tools::ABTransform( grid->transform(), vdbTransform ).isIdentity(); + } + else + { + openvdb::math::Transform::Ptr toSource = grid->transformPtr()->copy(); + toSource->postMult( openvdb::math::Mat4d( + transform[0][0], transform[0][1], transform[0][2], transform[0][3], + transform[1][0], transform[1][1], transform[1][2], transform[1][3], + transform[2][0], transform[2][1], transform[2][2], transform[2][3], + transform[3][0], transform[3][1], transform[3][2], transform[3][3] + ) ); + gridWithTransform = openvdb::GridBase::constGrid< T >( grid->copyGridReplacingTransform( toSource ) ); + + transformMatches = false; } - } - openvdb::GridBase::ConstPtr m_grid; - openvdb::tools::VolumeToMesh &m_mesher; -}; -std::map > meshers = -{ - { openvdb::typeNameAsString(), []( MesherDispatch& dispatch ) { dispatch.execute(); } }, - { openvdb::typeNameAsString(), []( MesherDispatch& dispatch ) { dispatch.execute(); } }, - { openvdb::typeNameAsString(), []( MesherDispatch& dispatch ) { dispatch.execute(); } }, - { openvdb::typeNameAsString(), []( MesherDispatch& dispatch ) { dispatch.execute(); } }, - { openvdb::typeNameAsString(), []( MesherDispatch& dispatch ) { dispatch.execute(); } } -}; + if( transformMatches && !resultGrid ) + { + resultGrid = grid; + } + else if( transformMatches ) + { + if( !editableResultGrid ) + { + editableResultGrid = openvdb::deepCopyTypedGrid( resultGrid ); + resultGrid = editableResultGrid; + } + + openvdb::tools::csgUnion( *editableResultGrid, *openvdb::deepCopyTypedGrid( grid ) ); + + } + else + { + // We need to set up a fresh grid to hold the data transformed into the correct space for + // merging. Currently we use this call which keeps the metadata from the original grid + // We could probably just call T::create() instead to create totally fresh grid, but I'm + // not confident that there isn't metadata that could affect VolumeToMesh + typename T::Ptr destSpaceGrid = grid->copyWithNewTree(); + destSpaceGrid->setTransform( vdbTransform.copy() ); + + + // Our goal here is not actually to produce a "level set" ... the goal is to produce a grid that + // has the maximum of all the source level sets at each point. This actually makes a difference + // when doing a resample to a different scale - if we directly take the source values, the result + // is not a proper level set, because the sources at different scales have different gradients. + // + // The usual way to resample a level set is to use tool::resampleToMatch, which has a special code + // path for level sets that recomputes their distannces. However, that would change the values in + // the source grids, so selecting an iso value with the isoValue plug would no longer have the same + // effect. This would mean that if you merged level sets with different scales, the resulting + // meshes would end up in different places than if you converted to meshes before merging. + // + // Instead, we use the helpfully named tool::doResampleToMatch, which does not have a special path + // for level sets, and just takes the iso values from the source grids directly. The resulting grid + // is not technically a proper level set, but it has the right iso values to ensure that the + // conversion to mesh works correctly. It doesn't matter that it doesn't have other properties of + // a level set, because we know we're just feeding this grid to VolumeToMesh and then discarding + // it - it cannot possibly be used for something else where not being a "proper" level set would + // be a problem. ( As an added bonus, doResampleToMatch is about twice as fast as resampleToMesh + // on level sets ). + // + // It would be reasonable to offer a control for the sampler to use here. PointSampler is fastest + // but noticeably blocky. QuadraticSampler gives higher quality results for smooth surfaces but + // is slower, and can make hard edged models look a bit wobbly. BoxSampler is a basic trilinear, + // which seems like a reasonable default. + openvdb::tools::doResampleToMatch( *gridWithTransform, *destSpaceGrid, interrupter ); + // If we've been cancelled, the interrupter will have stopped + // `resampleToMatch()` and we'll have a partial result in the grid. + // We need to throw rather than allow this partial result to be + // returned. + Canceller::check( canceller ); + + if( !resultGrid ) + { + resultGrid = destSpaceGrid; + } + else + { + if( !editableResultGrid ) + { + editableResultGrid = openvdb::deepCopyTypedGrid( resultGrid ); + resultGrid = editableResultGrid; + } + openvdb::tools::csgUnion( *editableResultGrid, *destSpaceGrid ); + } + } + } + + return resultGrid; +} IECoreScene::MeshPrimitivePtr volumeToMesh( openvdb::GridBase::ConstPtr grid, double isoValue, double adaptivity ) { openvdb::tools::VolumeToMesh mesher( isoValue, adaptivity ); - MesherDispatch dispatch( grid, mesher ); - const auto it = meshers.find( grid->valueType() ); - if( it != meshers.end() ) - { - it->second( dispatch ); - } - else - { - throw IECore::InvalidArgumentException( fmt::format( "Incompatible Grid found name: '{}' type: '{}' ", grid->valueType(), grid->getName() ) ); - } + dispatchForVdbType( + grid.get(), + [ &mesher ]( const auto *typedGrid ) + { + mesher( *typedGrid ); + } + ); // Copy out topology IntVectorDataPtr verticesPerFaceData = new IntVectorData; @@ -162,11 +272,10 @@ IECoreScene::MeshPrimitivePtr volumeToMesh( openvdb::GridBase::ConstPtr grid, do return new MeshPrimitive( verticesPerFaceData, vertexIdsData, "linear", pointsData ); } - } // namespace ////////////////////////////////////////////////////////////////////////// -// VolumeToMesh implementation +// LevelSetToMesh implementation ////////////////////////////////////////////////////////////////////////// GAFFER_NODE_DEFINE_TYPE( LevelSetToMesh ); @@ -174,19 +283,13 @@ GAFFER_NODE_DEFINE_TYPE( LevelSetToMesh ); size_t LevelSetToMesh::g_firstPlugIndex = 0; LevelSetToMesh::LevelSetToMesh( const std::string &name ) - : Deformer( name ) + : MergeObjects( name, "${scene:path}" ) { storeIndexOfNextChild( g_firstPlugIndex ); addChild( new StringPlug( "grid", Plug::In, "surface" ) ); addChild( new FloatPlug( "isoValue", Plug::In, 0.0f ) ); addChild( new FloatPlug( "adaptivity", Plug::In, 0.0f, 0.0f, 1.0f ) ); - - // The output mesh will always be bounded by the input level set, and only - // in rare cases will it be shrunk enough to warrant the cost of computing - // exact bounds. So we default `adjustBounds` to `false`. - adjustBoundsPlug()->setValue( false ); - adjustBoundsPlug()->resetDefault(); } LevelSetToMesh::~LevelSetToMesh() @@ -195,12 +298,12 @@ LevelSetToMesh::~LevelSetToMesh() Gaffer::StringPlug *LevelSetToMesh::gridPlug() { - return getChild( g_firstPlugIndex ); + return getChild( g_firstPlugIndex ); } const Gaffer::StringPlug *LevelSetToMesh::gridPlug() const { - return getChild( g_firstPlugIndex ); + return getChild( g_firstPlugIndex ); } Gaffer::FloatPlug *LevelSetToMesh::isoValuePlug() @@ -223,44 +326,99 @@ const Gaffer::FloatPlug *LevelSetToMesh::adaptivityPlug() const return getChild( g_firstPlugIndex + 2 ); } -bool LevelSetToMesh::affectsProcessedObject( const Gaffer::Plug *input ) const +bool LevelSetToMesh::affectsMergedObject( const Gaffer::Plug *input ) const { return - Deformer::affectsProcessedObject( input ) || + MergeObjects::affectsMergedObject( input ) || input == isoValuePlug() || input == adaptivityPlug() || input == gridPlug() ; } -void LevelSetToMesh::hashProcessedObject( const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h ) const +void LevelSetToMesh::hashMergedObject( const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h ) const { - Deformer::hashProcessedObject( path, context, h ); + MergeObjects::hashMergedObject( path, context, h ); gridPlug()->hash( h ); isoValuePlug()->hash( h ); adaptivityPlug()->hash( h ); } -IECore::ConstObjectPtr LevelSetToMesh::computeProcessedObject( const ScenePath &path, const Gaffer::Context *context, const IECore::Object *inputObject ) const +IECore::ConstObjectPtr LevelSetToMesh::computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const { - const VDBObject *vdbObject = runTimeCast( inputObject ); - if( !vdbObject ) + std::string gridName = gridPlug()->getValue(); + + float smallestVoxel = 0; + + // We need to decide on a resolution for our intermediate grid if we're merging to a different location + // than the source. The voxel size is determined by the VDB's transform, so we take the transform that + // results in the smallest voxels. This has been chosen just as a reasonable heuristic that will likely + // preserve the meaningful detail in the source volume - we don't include the Gaffer location scale in + // this heuristic, so if someone merges two volumes where one of them has been scaled down tiny in Gaffer, + // you'll get the voxel size at a default scale, rather than trying to cature the detail in the scaled + // down volume by resampling the large volume to have a massive number of voxels ( which could be + // very expensive ). If you really want to capture detail across different scales, you might be better + // converting to mesh without merging, and then merging the meshes. + openvdb::math::Transform::ConstPtr mostPreciseIndexing; + std::string gridType; + + std::vector< std::pair< openvdb::GridBase::ConstPtr, Imath::M44f > > grids; + + for( const auto &[object, transform] : sources ) { - return inputObject; - } + const IECoreVDB::VDBObject * v = IECore::runTimeCast< const IECoreVDB::VDBObject >( object.get() ); + if( !v ) + { + // Just skip anything that's not a vdb + continue; + } - openvdb::GridBase::ConstPtr grid = vdbObject->findGrid( gridPlug()->getValue() ); + openvdb::GridBase::ConstPtr grid = v->findGrid( gridPlug()->getValue() ); - if (!grid) + if (!grid) + { + continue; + } + + if( !gridType.size() ) + { + gridType = grid->type(); + } + else if( gridType != grid->type() ) + { + throw IECore::Exception( fmt::format( "Incompatible grid types: '{}' vs '{}' ", gridType, grid->type() ) ); + } + + openvdb::Vec3d voxelSize3 = grid->transform().voxelSize(); + float voxelSize = std::min( voxelSize3[0], std::min( voxelSize3[1], voxelSize3[2] ) ); + + if( !mostPreciseIndexing || voxelSize < smallestVoxel ) + { + smallestVoxel = voxelSize; + mostPreciseIndexing = grid->transformPtr(); + } + + grids.emplace_back( std::make_pair( grid, transform ) ); + } + + if( !grids.size() ) { - return inputObject; + // If there are no grids, return a mesh with no faces. + // There's a question whether NullObject would be more consistent, but this makes it consistent with + // the result you get for a grid with no voxels matching the iso value. + return new IECoreScene::MeshPrimitive(); } - return volumeToMesh( grid, isoValuePlug()->getValue(), adaptivityPlug()->getValue() ); -} + openvdb::GridBase::ConstPtr resultGrid; + dispatchForVdbType( + grids[0].first.get(), + [ &grids, &mostPreciseIndexing, &context, &resultGrid ]( const auto *typedGrid ) + { + using GridType = typename std::remove_const_t< std::remove_pointer_t >; + resultGrid = mergeGrids( grids, *mostPreciseIndexing, context->canceller() ); + } + ); -Gaffer::ValuePlug::CachePolicy LevelSetToMesh::processedObjectComputeCachePolicy() const -{ - return ValuePlug::CachePolicy::TaskCollaboration; + return volumeToMesh( resultGrid, isoValuePlug()->getValue(), adaptivityPlug()->getValue() ); } diff --git a/src/GafferVDB/MeshToLevelSet.cpp b/src/GafferVDB/MeshToLevelSet.cpp index 0d0422bb956..0dce4bf4385 100644 --- a/src/GafferVDB/MeshToLevelSet.cpp +++ b/src/GafferVDB/MeshToLevelSet.cpp @@ -43,6 +43,7 @@ #include "Gaffer/StringPlug.h" #include "IECoreScene/MeshPrimitive.h" +#include "GafferScene/Private/IECoreScenePreview/PrimitiveAlgo.h" #include "openvdb/openvdb.h" #include "openvdb/tools/MeshToVolume.h" @@ -64,16 +65,22 @@ namespace struct CortexMeshAdapter { - - CortexMeshAdapter( const MeshPrimitive *mesh, const openvdb::math::Transform *transform ) + CortexMeshAdapter( const MeshPrimitive *mesh ) : m_numFaces( mesh->numFaces() ), m_numVertices( mesh->variableSize( PrimitiveVariable::Vertex ) ), m_verticesPerFace( mesh->verticesPerFace()->readable() ), - m_vertexIds( mesh->vertexIds()->readable() ), - m_transform( transform ) + m_vertexIds( mesh->vertexIds()->readable() ) { size_t offset = 0; m_faceOffsets.reserve( m_numFaces ); + + // \todo - Preparing this list of face offsets is not an effective way to prepare topology for + // OpenVDB. If we wanted to be optimal, we would probably just convert everything to quads, where + // the 4 vertex is set to openvdb::util::INVALID_IDX if the face is actually a triangle ( this is + // the convention used by OpenVDB in their adapter ). If we were going to do this, we would also + // want to process n-gons with > 4 verts somehow to preserve watertightness. Currently, we pass + // n-gons through unchanged, and then VDB discards them, which breaks watertightness and causes + // level set conversion to completely fail on meshes with n-gons. for( vector::const_iterator it = m_verticesPerFace.begin(), eIt = m_verticesPerFace.end(); it != eIt; ++it ) { m_faceOffsets.push_back( offset ); @@ -102,9 +109,8 @@ struct CortexMeshAdapter // Return position pos in local grid index space for polygon n and vertex v void getIndexSpacePoint( size_t polygonIndex, size_t polygonVertexIndex, openvdb::Vec3d &pos ) const { - /// \todo Threaded pretransform in constructor? - const V3f p = (*m_points)[ m_vertexIds[ m_faceOffsets[polygonIndex] + polygonVertexIndex ] ]; - pos = m_transform->worldToIndex( openvdb::math::Vec3s( p.x, p.y, p.z ) ); + const V3f &p = (*m_points)[ m_vertexIds[ m_faceOffsets[polygonIndex] + polygonVertexIndex ] ]; + pos = openvdb::math::Vec3s( p.x, p.y, p.z ); } private : @@ -115,8 +121,6 @@ struct CortexMeshAdapter const vector &m_vertexIds; vector m_faceOffsets; const vector *m_points; - const openvdb::math::Transform *m_transform; - }; } // namespace @@ -130,7 +134,7 @@ GAFFER_NODE_DEFINE_TYPE( MeshToLevelSet ); size_t MeshToLevelSet::g_firstPlugIndex = 0; MeshToLevelSet::MeshToLevelSet( const std::string &name ) - : ObjectProcessor( name ) + : MergeObjects( name, "${scene:path}" ) { storeIndexOfNextChild( g_firstPlugIndex ); @@ -146,12 +150,12 @@ MeshToLevelSet::~MeshToLevelSet() Gaffer::StringPlug *MeshToLevelSet::gridPlug() { - return getChild( g_firstPlugIndex ); + return getChild( g_firstPlugIndex ); } const Gaffer::StringPlug *MeshToLevelSet::gridPlug() const { - return getChild( g_firstPlugIndex ); + return getChild( g_firstPlugIndex ); } FloatPlug *MeshToLevelSet::voxelSizePlug() @@ -184,10 +188,10 @@ const FloatPlug *MeshToLevelSet::interiorBandwidthPlug() const return getChild( g_firstPlugIndex + 3 ); } -bool MeshToLevelSet::affectsProcessedObject( const Gaffer::Plug *input ) const +bool MeshToLevelSet::affectsMergedObject( const Gaffer::Plug *input ) const { return - ObjectProcessor::affectsProcessedObject( input ) || + MergeObjects::affectsMergedObject( input ) || input == gridPlug() || input == voxelSizePlug() || input == exteriorBandwidthPlug() || @@ -195,9 +199,11 @@ bool MeshToLevelSet::affectsProcessedObject( const Gaffer::Plug *input ) const ; } -void MeshToLevelSet::hashProcessedObject( const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h ) const +void MeshToLevelSet::hashMergedObject( + const ScenePath &path, const Gaffer::Context *context, IECore::MurmurHash &h +) const { - ObjectProcessor::hashProcessedObject( path, context, h ); + MergeObjects::hashMergedObject( path, context, h ); gridPlug()->hash( h ); voxelSizePlug()->hash( h ); @@ -205,45 +211,79 @@ void MeshToLevelSet::hashProcessedObject( const ScenePath &path, const Gaffer::C interiorBandwidthPlug()->hash ( h ); } -IECore::ConstObjectPtr MeshToLevelSet::computeProcessedObject( const ScenePath &path, const Gaffer::Context *context, const IECore::Object *inputObject ) const +IECore::ConstObjectPtr MeshToLevelSet::computeMergedObject( const std::vector< std::pair< IECore::ConstObjectPtr, Imath::M44f > > &sources, const Gaffer::Context *context ) const { - const MeshPrimitive *mesh = runTimeCast( inputObject ); - if( !mesh ) + std::vector< IECoreScene::MeshPrimitivePtr > meshStorage; + std::vector< std::pair< const IECoreScene::Primitive *, Imath::M44f > > meshes; + + const float voxelSize = voxelSizePlug()->getValue(); + + openvdb::math::Transform::Ptr vdbTransform = openvdb::math::Transform::createLinearTransform( voxelSize ); + Imath::M44f worldToIndex; + worldToIndex.setScale( 1.0f / voxelSize ); + + for( const auto &[object, transform] : sources ) { - return inputObject; + const IECoreScene::MeshPrimitive * m = IECore::runTimeCast< const IECoreScene::MeshPrimitive >( object.get() ); + if( !m ) + { + // Just skip anything that's not a mesh + continue; + } + + // Create a simplified mesh with only basic topology and P - OpenVDB won't use anything else, + // and we don't want to spend time merging primvars or creases that won't be used. + // The copy-on-write mechanism should ensure that we don't actually duplicate this data. + IECoreScene::MeshPrimitivePtr simpleMesh = new IECoreScene::MeshPrimitive(); + simpleMesh->setTopologyUnchecked( + m->verticesPerFace(), m->vertexIds(), m->variableSize( PrimitiveVariable::Interpolation::Vertex ) + ); + simpleMesh->variables["P"] = m->variables.at("P"); + meshStorage.push_back( simpleMesh ); + + meshes.push_back( std::make_pair( simpleMesh.get(), transform * worldToIndex ) ); } - const float voxelSize = voxelSizePlug()->getValue(); - const float exteriorBandwidth = exteriorBandwidthPlug()->getValue(); - const float interiorBandwidth = interiorBandwidthPlug()->getValue(); - - openvdb::math::Transform::Ptr transform = openvdb::math::Transform::createLinearTransform( voxelSize ); - Interrupter interrupter( context->canceller() ); - - openvdb::FloatGrid::Ptr grid = openvdb::tools::meshToVolume( - interrupter, - CortexMeshAdapter( mesh, transform.get() ), - *transform, - exteriorBandwidth, //in voxel units - interiorBandwidth, //in voxel units - 0 //conversionFlags - ); - - // If we've been cancelled, the interrupter will have stopped - // `meshToVolume()` and we'll have a partial result in the grid. - // We need to throw rather than allow this partial result to be - // returned. - Canceller::check( context->canceller() ); + openvdb::FloatGrid::Ptr grid; + if( !meshes.size() ) + { + // None of the filtered sources were actually meshes. We could consider this an exception, + // but I guess the most consistent thing is just to return an empty grid with the correct voxel size. + grid = openvdb::FloatGrid::create(); + grid->setTransform( vdbTransform ); + } + else + { + IECoreScene::MeshPrimitivePtr mergedMesh = IECore::runTimeCast( + IECoreScenePreview::PrimitiveAlgo::mergePrimitives( meshes, context->canceller() ) + ); + assert( mergedMesh ); + + const float exteriorBandwidth = exteriorBandwidthPlug()->getValue(); + const float interiorBandwidth = interiorBandwidthPlug()->getValue(); + + Interrupter interrupter( context->canceller() ); + + grid = openvdb::tools::meshToVolume( + interrupter, + CortexMeshAdapter( mergedMesh.get() ), + *vdbTransform, + exteriorBandwidth, //in voxel units + interiorBandwidth, //in voxel units + 0 //conversionFlags + ); + + // If we've been cancelled, the interrupter will have stopped + // `meshToVolume()` and we'll have a partial result in the grid. + // We need to throw rather than allow this partial result to be + // returned. + Canceller::check( context->canceller() ); + } grid->setName( gridPlug()->getValue() ); - VDBObjectPtr newVDBObject = new VDBObject(); + VDBObjectPtr newVDBObject = new VDBObject(); newVDBObject->insertGrid( grid ); return newVDBObject; } - -Gaffer::ValuePlug::CachePolicy MeshToLevelSet::processedObjectComputeCachePolicy() const -{ - return ValuePlug::CachePolicy::TaskCollaboration; -}