From 49282e5650247f944b976b5bbcfc480f2e929f68 Mon Sep 17 00:00:00 2001 From: Daniel Dresser Date: Wed, 6 Dec 2023 13:44:02 -0800 Subject: [PATCH] Add DeepSlice, for trimming deep images to particular depths --- include/GafferImage/DeepSlice.h | 113 +++ include/GafferImage/TypeIds.h | 1 + python/GafferImageTest/DeepSliceTest.py | 497 ++++++++++ python/GafferImageTest/__init__.py | 1 + python/GafferImageUI/DeepSliceUI.py | 105 +++ python/GafferImageUI/__init__.py | 1 + src/GafferImage/DeepSlice.cpp | 1047 +++++++++++++++++++++ src/GafferImageModule/DeepNodeBinding.cpp | 2 + startup/gui/menus.py | 1 + 9 files changed, 1768 insertions(+) create mode 100644 include/GafferImage/DeepSlice.h create mode 100644 python/GafferImageTest/DeepSliceTest.py create mode 100644 python/GafferImageUI/DeepSliceUI.py create mode 100644 src/GafferImage/DeepSlice.cpp diff --git a/include/GafferImage/DeepSlice.h b/include/GafferImage/DeepSlice.h new file mode 100644 index 00000000000..d6de2e7469c --- /dev/null +++ b/include/GafferImage/DeepSlice.h @@ -0,0 +1,113 @@ +////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2023, Image Engine Design Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above +// copyright notice, this list of conditions and the following +// disclaimer. +// +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided with +// the distribution. +// +// * Neither the name of John Haddon nor the names of +// any other contributors to this software may be used to endorse or +// promote products derived from this software without specific prior +// written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +////////////////////////////////////////////////////////////////////////// + +#pragma once + +#include "GafferImage/ImageProcessor.h" + +#include "Gaffer/CompoundNumericPlug.h" +#include "Gaffer/NumericPlug.h" + +namespace Gaffer +{ + +IE_CORE_FORWARDDECLARE( StringPlug ) + +} // namespace Gaffer + +namespace GafferImage +{ + +/// Utility node designed for internal use in other node implementations. +/// It resamples all the data from the input data window into a new +/// data window in the output image, using a chosen filter. Uses OIIO::Filter2D +/// to provide the filter implementation, and is based heavily on OIIO's +/// ImageBufAlgo resize() function. +class GAFFERIMAGE_API DeepSlice : public ImageProcessor +{ + public : + + explicit DeepSlice( const std::string &name=defaultName() ); + ~DeepSlice() override; + + GAFFER_NODE_DECLARE_TYPE( GafferImage::DeepSlice, DeepSliceTypeId, ImageProcessor ); + + Gaffer::BoolPlug *nearClipPlug(); + const Gaffer::BoolPlug *nearClipPlug() const; + + Gaffer::FloatPlug *nearClipDepthPlug(); + const Gaffer::FloatPlug *nearClipDepthPlug() const; + + Gaffer::BoolPlug *farClipPlug(); + const Gaffer::BoolPlug *farClipPlug() const; + + Gaffer::FloatPlug *farClipDepthPlug(); + const Gaffer::FloatPlug *farClipDepthPlug() const; + + Gaffer::BoolPlug *flattenPlug(); + const Gaffer::BoolPlug *flattenPlug() const; + + void affects( const Gaffer::Plug *input, AffectedPlugsContainer &outputs ) const override; + + protected : + + void hash( const Gaffer::ValuePlug *output, const Gaffer::Context *context, IECore::MurmurHash &h ) const override; + void compute( Gaffer::ValuePlug *output, const Gaffer::Context *context ) const override; + + void hashChannelData( const GafferImage::ImagePlug *parent, const Gaffer::Context *context, IECore::MurmurHash &h ) const override; + IECore::ConstFloatVectorDataPtr computeChannelData( const std::string &channelName, const Imath::V2i &tileOrigin, const Gaffer::Context *context, const ImagePlug *parent ) const override; + + void hashSampleOffsets( const GafferImage::ImagePlug *parent, const Gaffer::Context *context, IECore::MurmurHash &h ) const override; + IECore::ConstIntVectorDataPtr computeSampleOffsets( const Imath::V2i &tileOrigin, const Gaffer::Context *context, const ImagePlug *parent ) const override; + + void hashDeep( const GafferImage::ImagePlug *parent, const Gaffer::Context *context, IECore::MurmurHash &h ) const override; + bool computeDeep( const Gaffer::Context *context, const ImagePlug *parent ) const override; + + private : + + ImagePlug *tidyInPlug(); + const ImagePlug *tidyInPlug() const; + + Gaffer::CompoundObjectPlug *sliceDataPlug(); + const Gaffer::CompoundObjectPlug *sliceDataPlug() const; + + static size_t g_firstPlugIndex; + +}; + +IE_CORE_DECLAREPTR( DeepSlice ) + +} // namespace GafferImage diff --git a/include/GafferImage/TypeIds.h b/include/GafferImage/TypeIds.h index 1136ed5120c..29db0377f68 100644 --- a/include/GafferImage/TypeIds.h +++ b/include/GafferImage/TypeIds.h @@ -128,6 +128,7 @@ enum TypeId DeepHoldoutTypeId = 110833, DeepRecolorTypeId = 110834, SaturationTypeId = 110835, + DeepSliceTypeId = 110836, LastTypeId = 110849 }; diff --git a/python/GafferImageTest/DeepSliceTest.py b/python/GafferImageTest/DeepSliceTest.py new file mode 100644 index 00000000000..9c214da4027 --- /dev/null +++ b/python/GafferImageTest/DeepSliceTest.py @@ -0,0 +1,497 @@ +########################################################################## +# +# Copyright (c) 2023 Image Engine Design Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided with +# the distribution. +# +# * Neither the name of John Haddon nor the names of +# any other contributors to this software may be used to endorse or +# promote products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +########################################################################## + +import os +import unittest +import imath +import random + +import IECore + +import Gaffer +import GafferImage +import GafferImageTest + +class DeepSliceTest( GafferImageTest.ImageTestCase ) : + + def testBasics( self ) : + + # Set up 3 segments in primary colors with depth ranges 1-2, 3-4, 5-6 + + constantRed = GafferImage.Constant() + constantRed["format"].setValue( GafferImage.Format( 32, 32, 1.000 ) ) + constantRed["color"].setValue( imath.Color4f( 0.5, 0, 0, 0.5 ) ) + + constantGreen = GafferImage.Constant() + constantGreen["format"].setValue( GafferImage.Format( 32, 32, 1.000 ) ) + constantGreen["color"].setValue( imath.Color4f( 0, 0.5, 0, 0.5 ) ) + + constantBlue = GafferImage.Constant() + constantBlue["format"].setValue( GafferImage.Format( 32, 32, 1.000 ) ) + constantBlue["color"].setValue( imath.Color4f( 0, 0, 0.5, 0.5 ) ) + + flatToDeep1 = GafferImage.FlatToDeep() + flatToDeep1["in"].setInput( constantRed["out"] ) + flatToDeep1["depth"].setValue( 1.0 ) + flatToDeep1["zBackMode"].setValue( 1 ) + flatToDeep1["thickness"].setValue( 1.0 ) + + flatToDeep2 = GafferImage.FlatToDeep() + flatToDeep2["in"].setInput( constantGreen["out"] ) + flatToDeep2["depth"].setValue( 3.0 ) + flatToDeep2["zBackMode"].setValue( 1 ) + flatToDeep2["thickness"].setValue( 1.0 ) + + flatToDeep3 = GafferImage.FlatToDeep() + flatToDeep3["in"].setInput( constantBlue["out"] ) + flatToDeep3["depth"].setValue( 5.0 ) + flatToDeep3["zBackMode"].setValue( 1 ) + flatToDeep3["thickness"].setValue( 1.0 ) + + deepMerge = GafferImage.DeepMerge() + deepMerge["in"][0].setInput( flatToDeep1["out"] ) + deepMerge["in"][1].setInput( flatToDeep2["out"] ) + deepMerge["in"][2].setInput( flatToDeep3["out"] ) + + deepSlice = GafferImage.DeepSlice() + deepSlice["in"].setInput( constantRed["out"] ) + deepSlice["flatten"].setValue( False ) + + deepSliceFlatten = GafferImage.DeepSlice() + deepSliceFlatten["in"].setInput( constantRed["out"] ) + deepSliceFlatten["flatten"].setValue( True ) + + flatten = GafferImage.DeepToFlat() + flatten["in"].setInput( deepSlice["out"] ) + flatten["depthMode"].setValue( GafferImage.DeepToFlat.DepthMode.Range ) + + sampler = GafferImage.DeepSampler() + sampler["image"].setInput( deepSlice["out"] ) + + # Run a test with specified near and far clips, based on what we expect for the 3 segments we've + # set up. + # The expected Z values are computed based on + # the given values, but the expected alpha and color values are computed based on the given + # expectedWeights, specifying the fraction of each sample that is taken. + # We could compute expectedWeights based some simple computations, but I think it makes it more + # obivous what we're testing to hardcode the expectedWeights for each test. + def sliceTest( nearClip, nearClipDepth, farClip, farClipDepth, expectedWeights ): + deepSlice["nearClip"].setValue( nearClip ) + deepSlice["nearClipDepth"].setValue( nearClipDepth ) + deepSlice["farClip"].setValue( farClip ) + deepSlice["farClipDepth"].setValue( farClipDepth ) + deepSliceFlatten["nearClip"].setValue( nearClip ) + deepSliceFlatten["nearClipDepth"].setValue( nearClipDepth ) + deepSliceFlatten["farClip"].setValue( farClip ) + deepSliceFlatten["farClipDepth"].setValue( farClipDepth ) + + self.assertImagesEqual( deepSliceFlatten["out"], flatten["out"], maxDifference = 1e-7 ) + + pd = sampler["pixelData"].getValue() + + if expectedWeights == [ None, None, None ]: + self.assertEqual( pd, IECore.CompoundData() ) + return + + self.assertEqual( pd["R"], IECore.FloatVectorData( [ 0.5 * w * c for w, c in zip( expectedWeights, [ 1, 0, 0 ] ) if not w is None ] ) ) + self.assertEqual( pd["G"], IECore.FloatVectorData( [ 0.5 * w * c for w, c in zip( expectedWeights, [ 0, 1, 0 ] ) if not w is None ] ) ) + self.assertEqual( pd["B"], IECore.FloatVectorData( [ 0.5 * w * c for w, c in zip( expectedWeights, [ 0, 0, 1 ] ) if not w is None ] ) ) + self.assertEqual( pd["A"], IECore.FloatVectorData( [ 0.5 * w for w in expectedWeights if not w is None] ) ) + self.assertEqual( pd["Z"], IECore.FloatVectorData( [ + max( nearClipDepth, z ) if nearClip else z + for w, z in zip( expectedWeights, [ 1, 3, 5 ] ) if not w is None + ] ) ) + self.assertEqual( pd["ZBack"], IECore.FloatVectorData( [ + min( farClipDepth, z ) if farClip else z + for w, z in zip( expectedWeights, [ 2, 4, 6 ] ) if not w is None + ] ) ) + + with self.assertRaisesRegex( RuntimeError, "DeepSlice requires a Z channel" ) : + sliceTest( False, 0, False, 0, [ 1, None, None ] ) + + # Compute the multiplier we need of a segment with 50% alpha in order to split it in half, + # so that the two halves composite back to the original. It's more than 50% because of + # how alpha compositing works - if we used 50%, we would get a total of: + # 0.25 + ( 1 - 0.25 ) * 0.25 == 0.4375. So instead we use this math, which results in a value + # of about 58%, which then accumulates to exactly 0.5 + halfSeg = ( 1 - 0.5 ** 0.5 ) / 0.5 + + + # Test with only one segment hooked up + + deepSlice["in"].setInput( flatToDeep1["out"] ) + deepSliceFlatten["in"].setInput( flatToDeep1["out"] ) + + sliceTest( False, 0, False, 0, [ 1, None, None ] ) + sliceTest( False, 1.5, False, 1.5, [ 1, None, None ] ) + sliceTest( True, 1.5, False, 1.5, [ halfSeg, None, None ] ) + sliceTest( False, 1.5, True, 1.5, [ halfSeg, None, None ] ) + sliceTest( True, 1.5, True, 1.5, [ 0, None, None ] ) + + + # Create a flat image with the first segment in it, but with valid Z and ZBack, + # so we can check we do something reasonable with a flat input + oneSegmentFlat = GafferImage.DeepToFlat() + oneSegmentFlat["in"].setInput( flatToDeep1["out"] ) + oneSegmentFlat["depthMode"].setValue( GafferImage.DeepToFlat.DepthMode.Range ) + + deepSlice["in"].setInput( oneSegmentFlat["out"] ) + deepSliceFlatten["in"].setInput( oneSegmentFlat["out"] ) + + # Results should be identical to the "deep" image produced by flatToDeep + sliceTest( False, 0, False, 0, [ 1, None, None ] ) + sliceTest( False, 1.5, False, 1.5, [ 1, None, None ] ) + sliceTest( True, 1.5, False, 1.5, [ halfSeg, None, None ] ) + sliceTest( False, 1.5, True, 1.5, [ halfSeg, None, None ] ) + sliceTest( True, 1.5, True, 1.5, [ 0, None, None ] ) + + + # Now hook up all 3 segments through the DeepMerge + deepSlice["in"].setInput( deepMerge["out"] ) + deepSliceFlatten["in"].setInput( deepMerge["out"] ) + + # Toggle near/far on and off + sliceTest( False, 1.5, False, 1.5, [ 1, 1, 1 ] ) + sliceTest( True, 1.5, False, 1.5, [ halfSeg, 1, 1 ] ) + sliceTest( False, 1.5, True, 1.5, [ halfSeg, None, None ] ) + sliceTest( True, 1.5, True, 1.5, [ 0, None, None ] ) + + # Test many near clip values + sliceTest( True, 0, False, 1.5, [ 1, 1, 1 ] ) + sliceTest( True, 1, False, 1.5, [ 1, 1, 1 ] ) + sliceTest( True, 1.5, False, 1.5, [ halfSeg, 1, 1 ] ) + sliceTest( True, 2, False, 1.5, [ None, 1, 1 ] ) + sliceTest( True, 3, False, 1.5, [ None, 1, 1 ] ) + sliceTest( True, 3.5, False, 1.5, [ None, halfSeg, 1 ] ) + sliceTest( True, 4, False, 1.5, [ None, None, 1 ] ) + sliceTest( True, 5, False, 1.5, [ None, None, 1 ] ) + sliceTest( True, 5.5, False, 1.5, [ None, None, halfSeg ] ) + sliceTest( True, 6, False, 1.5, [ None, None, None ] ) + sliceTest( True, 7, False, 1.5, [ None, None, None ] ) + + # Test many far clip values + sliceTest( False, 1.5, True, 0, [ None, None, None ] ) + sliceTest( False, 1.5, True, 1, [ None, None, None ] ) + sliceTest( False, 1.5, True, 1.0000001, [ 1.6525917e-7, None, None ] ) + sliceTest( False, 1.5, True, 1.5, [ halfSeg, None, None ] ) + sliceTest( False, 1.5, True, 2, [ 1, None, None ] ) + sliceTest( False, 1.5, True, 3, [ 1, None, None ] ) + sliceTest( False, 1.5, True, 3.0000002, [ 1, 3.305183e-7, None ] ) + sliceTest( False, 1.5, True, 3.5, [ 1, halfSeg, None ] ) + sliceTest( False, 1.5, True, 4, [ 1, 1, None ] ) + sliceTest( False, 1.5, True, 5, [ 1, 1, None ] ) + sliceTest( False, 1.5, True, 5.0000004, [ 1, 1, 6.6103655e-07 ] ) + sliceTest( False, 1.5, True, 5.5, [ 1, 1, halfSeg ] ) + sliceTest( False, 1.5, True, 6, [ 1, 1, 1 ] ) + + # Handle zero length segments in node, or in tests ( Handle in tests using None ) + # Test both clips here + # Test point samples at 1.25 and 1.75 + sliceTest( True, 3.5, True, 3.5, [ None, 0, None ] ) + sliceTest( True, 1.5, True, 3.5, [ halfSeg, halfSeg, None ] ) + sliceTest( True, 3.5, True, 5.5, [ None, halfSeg, halfSeg ] ) + sliceTest( True, 2, True, 5, [ None, 1, None ] ) + sliceTest( True, 2, True, 5.0000004, [ None, 1, 6.6103655e-07 ] ) + sliceTest( True, 1.5, True, 5.5, [ halfSeg, 1, halfSeg ] ) + sliceTest( True, 1, True, 6, [ 1, 1, 1 ] ) + + # Compare a lot of possible slices to a reference implementation using a slower "generate a holdout image + # then DeepHoldout" approach. + def testBruteForce( self ) : + + representativeImagePath = GafferImageTest.ImageTestCase.imagesPath() / "representativeDeepImage.exr" + deepIntPointsPath = GafferImageTest.ImageTestCase.imagesPath() / "deepIntPoints.exr" + deepIntVolumesPath = GafferImageTest.ImageTestCase.imagesPath() / "deepIntVolumes.exr" + deepFloatPointsPath = GafferImageTest.ImageTestCase.imagesPath() / "deepFloatPoints.exr" + deepFloatVolumesPath = GafferImageTest.ImageTestCase.imagesPath() / "deepFloatVolumes.exr" + + representativeImage = GafferImage.ImageReader( "representativeDeep" ) + representativeImage["fileName"].setValue( representativeImagePath ) + intPoints = GafferImage.ImageReader() + intPoints["fileName"].setValue( deepIntPointsPath ) + intVolumes = GafferImage.ImageReader() + intVolumes["fileName"].setValue( deepIntVolumesPath ) + floatPoints = GafferImage.ImageReader() + floatPoints["fileName"].setValue( deepFloatPointsPath ) + floatVolumes = GafferImage.ImageReader() + floatVolumes["fileName"].setValue( deepFloatVolumesPath ) + + allInts = GafferImage.DeepMerge( "allInts" ) + allInts["in"][0].setInput( intPoints["out"] ) + allInts["in"][1].setInput( intVolumes["out"] ) + + allFloats = GafferImage.DeepMerge( "allFloats" ) + allFloats["in"][0].setInput( floatPoints["out"] ) + allFloats["in"][1].setInput( floatVolumes["out"] ) + + allCombined = GafferImage.DeepMerge( "allCombined" ) + allCombined["in"][0].setInput( intPoints["out"] ) + allCombined["in"][1].setInput( intVolumes["out"] ) + allCombined["in"][2].setInput( floatPoints["out"] ) + allCombined["in"][3].setInput( floatVolumes["out"] ) + + testImage = GafferImage.ImagePlug() + + formatQuery = GafferImage.FormatQuery() + formatQuery["image"].setInput( testImage ) + + sliceNear = GafferImage.DeepSlice() + sliceNear["in"].setInput( testImage ) + sliceNear["nearClip"].setValue( False ) + sliceNear["farClip"].setValue( True ) + sliceNear["flatten"].setValue( False ) + + flattenedNear = GafferImage.DeepToFlat() + flattenedNear["in"].setInput( sliceNear["out"] ) + flattenedNear["depthMode"].setValue( GafferImage.DeepToFlat.DepthMode.Range ) + + flatSliceNear = GafferImage.DeepSlice() + flatSliceNear["in"].setInput( testImage ) + flatSliceNear["nearClip"].setValue( False ) + flatSliceNear["farClip"].setValue( True ) + flatSliceNear["farClipDepth"].setInput( sliceNear["farClipDepth"] ) + flatSliceNear["flatten"].setValue( True ) + + sliceFar = GafferImage.DeepSlice() + sliceFar["in"].setInput( testImage ) + sliceFar["nearClip"].setValue( True ) + sliceFar["farClip"].setValue( False ) + sliceFar["flatten"].setValue( False ) + + flattenedFar = GafferImage.DeepToFlat() + flattenedFar["in"].setInput( sliceFar["out"] ) + flattenedFar["depthMode"].setValue( GafferImage.DeepToFlat.DepthMode.Range ) + + flatSliceFar = GafferImage.DeepSlice() + flatSliceFar["in"].setInput( testImage ) + flatSliceFar["nearClip"].setValue( True ) + flatSliceFar["nearClipDepth"].setInput( sliceFar["nearClipDepth"] ) + flatSliceFar["farClip"].setValue( False ) + flatSliceFar["flatten"].setValue( True ) + + sliceMiddle = GafferImage.DeepSlice() + sliceMiddle["in"].setInput( testImage ) + sliceMiddle["nearClip"].setValue( True ) + sliceMiddle["farClip"].setValue( True ) + sliceMiddle["flatten"].setValue( False ) + + flattenedMiddle = GafferImage.DeepToFlat() + flattenedMiddle["in"].setInput( sliceMiddle["out"] ) + flattenedMiddle["depthMode"].setValue( GafferImage.DeepToFlat.DepthMode.Range ) + + flatSliceMiddle = GafferImage.DeepSlice() + flatSliceMiddle["in"].setInput( testImage ) + flatSliceMiddle["nearClip"].setValue( True ) + flatSliceMiddle["nearClipDepth"].setInput( sliceMiddle["nearClipDepth"] ) + flatSliceMiddle["farClip"].setValue( True ) + flatSliceMiddle["farClipDepth"].setInput( sliceMiddle["farClipDepth"] ) + flatSliceMiddle["flatten"].setValue( True ) + + flattenedInput = GafferImage.DeepToFlat() + flattenedInput["in"].setInput( testImage ) + flattenedInput["depthMode"].setValue( GafferImage.DeepToFlat.DepthMode.None_ ) + + flatSliceNearWithoutDepth = GafferImage.DeleteChannels() + flatSliceNearWithoutDepth["in"].setInput( flatSliceNear["out"] ) + flatSliceNearWithoutDepth["channels"].setValue( "Z ZBack" ) + + flatSliceFarWithoutDepth = GafferImage.DeleteChannels() + flatSliceFarWithoutDepth["in"].setInput( flatSliceFar["out"] ) + flatSliceFarWithoutDepth["channels"].setValue( "Z ZBack" ) + + flatSliceMiddleWithoutDepth = GafferImage.DeleteChannels() + flatSliceMiddleWithoutDepth["in"].setInput( flatSliceMiddle["out"] ) + flatSliceMiddleWithoutDepth["channels"].setValue( "Z ZBack" ) + + nearOverFar = GafferImage.Merge() + nearOverFar["operation"].setValue( GafferImage.Merge.Operation.Over ) + nearOverFar["in"][0].setInput( flatSliceFarWithoutDepth["out"] ) + nearOverFar["in"][1].setInput( flatSliceNearWithoutDepth["out"] ) + + nearOverMiddleOverFar = GafferImage.Merge() + nearOverMiddleOverFar["operation"].setValue( GafferImage.Merge.Operation.Over ) + nearOverMiddleOverFar["in"][0].setInput( flatSliceFarWithoutDepth["out"] ) + nearOverMiddleOverFar["in"][1].setInput( flatSliceMiddleWithoutDepth["out"] ) + nearOverMiddleOverFar["in"][2].setInput( flatSliceNearWithoutDepth["out"] ) + + tidyInput = GafferImage.DeepTidy() + tidyInput["in"].setInput( testImage ) + + sampleCountsInput = GafferImage.DeepSampleCounts() + sampleCountsInput["in"].setInput( tidyInput["out"] ) + + sampleCountsNear = GafferImage.DeepSampleCounts() + sampleCountsNear["in"].setInput( sliceNear["out"] ) + + sampleCountsFar = GafferImage.DeepSampleCounts() + sampleCountsFar["in"].setInput( sliceFar["out"] ) + + sampleCountsMiddle = GafferImage.DeepSampleCounts() + sampleCountsMiddle["in"].setInput( sliceMiddle["out"] ) + + sampleCountsNearFar = GafferImage.Merge() + sampleCountsNearFar["operation"].setValue( GafferImage.Merge.Operation.Add ) + sampleCountsNearFar["in"][0].setInput( sampleCountsNear["out"] ) + sampleCountsNearFar["in"][1].setInput( sampleCountsFar["out"] ) + + sampleCountsNearMiddleFar = GafferImage.Merge() + sampleCountsNearMiddleFar["operation"].setValue( GafferImage.Merge.Operation.Add ) + sampleCountsNearMiddleFar["in"][0].setInput( sampleCountsNear["out"] ) + sampleCountsNearMiddleFar["in"][1].setInput( sampleCountsMiddle["out"] ) + sampleCountsNearMiddleFar["in"][2].setInput( sampleCountsFar["out"] ) + + tidyNear = GafferImage.DeepTidy() + tidyNear["in"].setInput( sliceNear["out"] ) + + tidyFar = GafferImage.DeepTidy() + tidyFar["in"].setInput( sliceFar["out"] ) + + tidyMiddle = GafferImage.DeepTidy() + tidyMiddle["in"].setInput( sliceMiddle["out"] ) + + holdoutConstant = GafferImage.Constant() + holdoutConstant["format"].setInput( formatQuery["format"] ) + + holdoutDepth = GafferImage.FlatToDeep() + holdoutDepth["in"].setInput( holdoutConstant["out"] ) + + holdout = GafferImage.DeepHoldout() + holdout["in"].setInput( testImage ) + holdout["holdout"].setInput( holdoutDepth["out"] ) + + holdoutWithoutDepth = GafferImage.DeleteChannels() + holdoutWithoutDepth["in"].setInput( holdout["out"] ) + holdoutWithoutDepth["channels"].setValue( "Z ZBack" ) + + random.seed( 42 ) + + for image, zStart, zEnd in [ + ( allInts["out"], 0, 4 ), + ( allFloats["out"], 0, 4 ), + ( allCombined["out"], 0, 4 ), + ( representativeImage["out"], 4, 11 ), + ]: + testImage.setInput( image ) + + # Since some of our tests have samples at integer depths, test specifically in the neighbourhood + # of integer depths, then test at a bunch of random depths as well + for depth in ( + [ i + o for i in range( zStart, zEnd + 1) for o in [ -5e-7, 0, 5e-7 ] ] + + [ random.uniform( zStart, zEnd ) for i in range( 20 ) ] + ): + with self.subTest( mode = "Near/Far", name = image.node().getName(), depth = depth ) : + sliceNear["farClipDepth"].setValue( depth ) + sliceFar["nearClipDepth"].setValue( depth ) + + # The output from DeepSlice should always be tidy, which we can validate by making + # sure tidying has no effect + self.assertImagesEqual( tidyNear["out"], sliceNear["out"] ) + self.assertImagesEqual( tidyFar["out"], sliceFar["out"] ) + + # Check that the flat output from DeepSlice matches with what we get by flattening + # the deep output + self.assertImagesEqual( flattenedNear["out"], flatSliceNear["out"], maxDifference = 1e-6 ) + self.assertImagesEqual( flattenedFar["out"], flatSliceFar["out"], maxDifference = 1e-6 ) + + + # Check that we match with passing an image containing a constant depth into DeepHoldout + holdoutDepth["depth"].setValue( depth ) + try: + self.assertImagesEqual( flatSliceNearWithoutDepth["out"], holdoutWithoutDepth["out"], maxDifference = 3e-5 ) + except: + # We handle point samples exactly at the threshold a little bit differently than + # this DeepHoldout approach - the holdout is doing a DeepMerge with a black image with + # a point sample at a fixed depth at each pixel, so the fraction of a point sample + # exactly at the cutoff depth that comes through depends on the EXR logic for merging + # samples ( since the cutoff image is opaque, you get 50% of an opaque sample, or 0% of + # a non-opaque sample ). + # + # Our logic is different: we exclude all samples at the cutoff for farClipDepth, + # and we include samples at the cutoff for nearClipDepth. This ensures that using + # two DeepSlices to split an image at a particular depth, and then re-compositing it, + # gives you something that matches the original. + # + # Because of this difference, in order to get the tests passing, we just shift the + # holdout depth slightly nearer in order to get a matching result from the holdout. + holdoutDepth["depth"].setValue( depth - 5e-7 ) + self.assertImagesEqual( flatSliceNearWithoutDepth["out"], holdoutWithoutDepth["out"], maxDifference = 3e-5 ) + + # Check that using DeepSlice to take everything before a depth, and using DeepSlice to + # take everything after a depth, results in 2 images that composite together to match + # the original + self.assertImagesEqual( nearOverFar["out"], flattenedInput["out"], maxDifference = 4e-6 ) + + # Check that sample counts of the two slices are reasonable. The sum should be no less than + # the original sample counts, and no more than 1 greater ( since if a sample is split by + # the depth, it will appear in both ) + self.assertImagesEqual( + sampleCountsNearFar["out"], sampleCountsInput["out"], + maxDifferenceGreater = 1, maxDifferenceLess = 0 + ) + + # Run a few more tests when we're taking a middle slice by clipping both near and far + for a, b in ( ( random.uniform( zStart, zEnd ), random.uniform( zStart, zEnd ) ) for i in range( 20 ) ): + with self.subTest( mode = "Middle", name = image.node().getName(), depth = depth ) : + nearDepth = min( a, b ) + farDepth = max( a, b ) + sliceNear["farClipDepth"].setValue( nearDepth ) + sliceMiddle["nearClipDepth"].setValue( nearDepth ) + sliceMiddle["farClipDepth"].setValue( farDepth ) + sliceFar["nearClipDepth"].setValue( farDepth ) + + # Check that the flat output from DeepSlice matches with what we get by flattening + # the deep output + self.assertImagesEqual( flattenedMiddle["out"], flatSliceMiddle["out"], maxDifference = 1e-6 ) + + # The output from DeepSlice should always be tidy, which we can validate by making + # sure tidying has no effect + self.assertImagesEqual( tidyMiddle["out"], sliceMiddle["out"] ) + + # Check that compositing the middle slice with the part before and after it gives + # us the original + self.assertImagesEqual( nearOverMiddleOverFar["out"], flattenedInput["out"], maxDifference = 4e-6 ) + + # Check that sample counts of the three slices are reasonable. The sum should be no less than + # the original sample counts, and no more than 2 greater ( since both clipping depths + # could split a sample ) + self.assertImagesEqual( + sampleCountsNearMiddleFar["out"], sampleCountsInput["out"], + maxDifferenceGreater = 2, maxDifferenceLess = 0 + ) + +if __name__ == "__main__": + unittest.main() + diff --git a/python/GafferImageTest/__init__.py b/python/GafferImageTest/__init__.py index f29266ae61d..251799019c0 100644 --- a/python/GafferImageTest/__init__.py +++ b/python/GafferImageTest/__init__.py @@ -113,6 +113,7 @@ from .OpenColorIOAlgoTest import OpenColorIOAlgoTest from .OpenColorIOContextTest import OpenColorIOContextTest from .OpenColorIOConfigPlugTest import OpenColorIOConfigPlugTest +from .DeepSliceTest import DeepSliceTest if __name__ == "__main__": diff --git a/python/GafferImageUI/DeepSliceUI.py b/python/GafferImageUI/DeepSliceUI.py new file mode 100644 index 00000000000..a52750722c8 --- /dev/null +++ b/python/GafferImageUI/DeepSliceUI.py @@ -0,0 +1,105 @@ +########################################################################## +# +# Copyright (c) 2023, Image Engine Design Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above +# copyright notice, this list of conditions and the following +# disclaimer. +# +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided with +# the distribution. +# +# * Neither the name of John Haddon nor the names of +# any other contributors to this software may be used to endorse or +# promote products derived from this software without specific prior +# written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +########################################################################## + +import Gaffer +import GafferImage + +Gaffer.Metadata.registerNode( + + GafferImage.DeepSlice, + + "description", + """ + Takes a slice out of a deep image by discarding everything outside of a clipping range. + Optionally also flattens the image. The range is half open, including point samples exactly + at the near clip, but excluding point samples exactly at the far clip. This means that if you split + an image into a front and back with two DeepSlices, they will composite back together to match the + original. + """, + + "layout:activator:nearClip", lambda node : node["nearClip"].getValue(), + "layout:activator:farClip", lambda node : node["farClip"].getValue(), + + plugs = { + + "nearClip" : [ + + "description", + """ + Remove everything with Z less than the near clip depth. + """, + + ], + "nearClipDepth" : [ + + "description", + """ + The depth for the near clip. + """, + "label", "", + "layout:accessory", True, + "layout:activator", "nearClip", + + ], + "farClip" : [ + + "description", + """ + Remove everything with Z greater than or equal to the far clip depth. + """, + + ], + "farClipDepth" : [ + + "description", + """ + The depth for the far clip. + """, + "label", "", + "layout:accessory", True, + "layout:activator", "farClip", + ], + "flatten" : [ + + "description", + """ + Output a flat image, instead of output a deep image with any samples within the range. + """, + + ], + } + +) diff --git a/python/GafferImageUI/__init__.py b/python/GafferImageUI/__init__.py index ce291b1adfc..4f5c7fa91bb 100644 --- a/python/GafferImageUI/__init__.py +++ b/python/GafferImageUI/__init__.py @@ -121,5 +121,6 @@ from . import LookTransformUI from . import OpenColorIOContextUI from . import OpenColorIOConfigPlugUI +from . import DeepSliceUI __import__( "IECore" ).loadConfig( "GAFFER_STARTUP_PATHS", subdirectory = "GafferImageUI" ) diff --git a/src/GafferImage/DeepSlice.cpp b/src/GafferImage/DeepSlice.cpp new file mode 100644 index 00000000000..adbd7c136f8 --- /dev/null +++ b/src/GafferImage/DeepSlice.cpp @@ -0,0 +1,1047 @@ +////////////////////////////////////////////////////////////////////////// +// +// Copyright (c) 2023, Image Engine Design Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above +// copyright notice, this list of conditions and the following +// disclaimer. +// +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided with +// the distribution. +// +// * Neither the name of Image Engine Design nor the names of +// any other contributors to this software may be used to endorse or +// promote products derived from this software without specific prior +// written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +////////////////////////////////////////////////////////////////////////// + +#include "GafferImage/DeepSlice.h" + +#include "GafferImage/DeepState.h" +#include "GafferImage/ImageAlgo.h" + +#include "Gaffer/Context.h" +#include "Gaffer/StringPlug.h" + +using namespace Imath; +using namespace IECore; +using namespace Gaffer; +using namespace GafferImage; + +////////////////////////////////////////////////////////////////////////// +// Utilities +////////////////////////////////////////////////////////////////////////// + +namespace +{ + +// \todo - might be nice to move this a central algo header, and share this math with DeepState, +// though precision is important there, so we'd probably have to modify the API to return the +// directly computed alpha and the sample multiplier separately, and maybe that's not worth it. +float sampleMultiplier( float alpha, float fraction ) +{ + if( alpha <= 0.0f ) + { + // If alpha is zero, then EXR says that this represents a fully transparent incandescent + // volume, and the contribution is linear in the fraction of the sample that we take + // ( the exponential shape comes from visibility blocking in fog causing later + // contributions to contribute less than the start of the curve, which doesn't + // happen without visibility blocking ). + return fraction; + } + else if( alpha == 1.0f ) + { + // If the alpha is 1, then this represents a fully opaque volume, which requires infinite density. + // It reaches an opacity of 1 immediately with no curve. + return 1.0f; + } + else if( fraction == 1.0f || fraction == 0.0f ) + { + // For these two values, the equation below will evaluate to simply the value of "fraction" + // in the limit, regardless of the value of alpha ( as long as it isn't one of the special + // values checked above ). + // + // The fraction == 0.0 case is not currently used ( because we exclude samples which we are + // taking 0% of when computing start/end for the sample range ), but is included for + // completeness. + return fraction; + } + else + { + // Use the numerically reliable math from "Interpreting OpenEXR Deep Pixels" to find the + // alpha after taking the fraction of the segment, and then divide by the original alpha + // to find the weighting factor we need to multiply this sample by. + return -expm1( fraction * log1p( -alpha ) ) / alpha; + } +} + +const IECore::InternedString g_accumCountsName = "accumCounts"; +const IECore::InternedString g_inputIndicesName = "inputIndices"; +const IECore::InternedString g_firstWeightsName = "firstWeights"; +const IECore::InternedString g_lastWeightsName = "lastWeights"; + +} // namespace + +////////////////////////////////////////////////////////////////////////// +// DeepSlice +////////////////////////////////////////////////////////////////////////// + +GAFFER_NODE_DEFINE_TYPE( DeepSlice ); + +size_t DeepSlice::g_firstPlugIndex = 0; + +DeepSlice::DeepSlice( const std::string &name ) + : ImageProcessor( name ) +{ + storeIndexOfNextChild( g_firstPlugIndex ); + addChild( new BoolPlug( "nearClip", Plug::In, false ) ); + addChild( new FloatPlug( "nearClipDepth", Plug::In, 0.0f ) ); + addChild( new BoolPlug( "farClip", Plug::In, true ) ); + addChild( new FloatPlug( "farClipDepth", Plug::In, 1.0f ) ); + addChild( new BoolPlug( "flatten", Plug::In, true ) ); + + addChild( new ImagePlug( "__tidyIn", Plug::In, Plug::Default & ~Plug::Serialisable ) ); + + // The "sliceData" contains all the information about which samples to take that depends on Z/ZBack. + // See compute() for more description. + addChild( new CompoundObjectPlug( "__sliceData", Gaffer::Plug::Out, new IECore::CompoundObject ) ); + + // We don't ever want to change these, so we make pass-through connections. + outPlug()->viewNamesPlug()->setInput( inPlug()->viewNamesPlug() ); + outPlug()->formatPlug()->setInput( inPlug()->formatPlug() ); + outPlug()->metadataPlug()->setInput( inPlug()->metadataPlug() ); + outPlug()->channelNamesPlug()->setInput( inPlug()->channelNamesPlug() ); + + // We tidy the input image before we process it, because this means we can just process each sample + // in order ( and is quite cheap if the image is already tidy ). + DeepStatePtr tidy = new DeepState( "__tidy" ); + addChild( tidy ); + tidy->inPlug()->setInput( inPlug() ); + tidyInPlug()->setInput( tidy->outPlug() ); + + outPlug()->viewNamesPlug()->setInput( inPlug()->viewNamesPlug() ); + outPlug()->channelNamesPlug()->setInput( inPlug()->channelNamesPlug() ); + outPlug()->dataWindowPlug()->setInput( inPlug()->dataWindowPlug() ); + outPlug()->formatPlug()->setInput( inPlug()->formatPlug() ); + outPlug()->metadataPlug()->setInput( inPlug()->metadataPlug() ); +} + +DeepSlice::~DeepSlice() +{ +} + +Gaffer::BoolPlug *DeepSlice::nearClipPlug() +{ + return getChild( g_firstPlugIndex ); +} + +const Gaffer::BoolPlug *DeepSlice::nearClipPlug() const +{ + return getChild( g_firstPlugIndex ); +} + +Gaffer::FloatPlug *DeepSlice::nearClipDepthPlug() +{ + return getChild( g_firstPlugIndex + 1 ); +} + +const Gaffer::FloatPlug *DeepSlice::nearClipDepthPlug() const +{ + return getChild( g_firstPlugIndex + 1 ); +} + +Gaffer::BoolPlug *DeepSlice::farClipPlug() +{ + return getChild( g_firstPlugIndex + 2 ); +} + +const Gaffer::BoolPlug *DeepSlice::farClipPlug() const +{ + return getChild( g_firstPlugIndex + 2 ); +} + +Gaffer::FloatPlug *DeepSlice::farClipDepthPlug() +{ + return getChild( g_firstPlugIndex + 3 ); +} + +const Gaffer::FloatPlug *DeepSlice::farClipDepthPlug() const +{ + return getChild( g_firstPlugIndex + 3 ); +} + +Gaffer::BoolPlug *DeepSlice::flattenPlug() +{ + return getChild( g_firstPlugIndex + 4 ); +} + +const Gaffer::BoolPlug *DeepSlice::flattenPlug() const +{ + return getChild( g_firstPlugIndex + 4 ); +} + +GafferImage::ImagePlug *DeepSlice::tidyInPlug() +{ + return getChild( g_firstPlugIndex + 5 ); +} + +const GafferImage::ImagePlug *DeepSlice::tidyInPlug() const +{ + return getChild( g_firstPlugIndex + 5 ); +} + +CompoundObjectPlug *DeepSlice::sliceDataPlug() +{ + return getChild( g_firstPlugIndex + 6 ); +} + +const CompoundObjectPlug *DeepSlice::sliceDataPlug() const +{ + return getChild( g_firstPlugIndex + 6 ); +} + +void DeepSlice::affects( const Gaffer::Plug *input, AffectedPlugsContainer &outputs ) const +{ + ImageProcessor::affects( input, outputs ); + + if( + input == inPlug()->deepPlug() || + input == flattenPlug() + ) + { + outputs.push_back( outPlug()->deepPlug() ); + } + + if( + input == nearClipPlug() || + input == nearClipDepthPlug() || + input == farClipPlug() || + input == farClipDepthPlug() || + input == inPlug()->deepPlug() || + input == tidyInPlug()->channelDataPlug() || + input == tidyInPlug()->sampleOffsetsPlug() + ) + { + outputs.push_back( sliceDataPlug() ); + } + + if( + input == nearClipPlug() || + input == nearClipDepthPlug() || + input == farClipPlug() || + input == farClipDepthPlug() || + input == flattenPlug() || + input == inPlug()->deepPlug() || + input == tidyInPlug()->channelDataPlug() || + input == sliceDataPlug() + ) + { + outputs.push_back( outPlug()->channelDataPlug() ); + } + + if( + input == sliceDataPlug() + ) + { + outputs.push_back( outPlug()->sampleOffsetsPlug() ); + } +} + +void DeepSlice::hash( const Gaffer::ValuePlug *output, const Gaffer::Context *context, IECore::MurmurHash &h ) const +{ + ImageProcessor::hash( output, context, h ); + + if( output != sliceDataPlug() ) + { + return; + } + + ConstStringVectorDataPtr channelNamesData; + + { + ImagePlug::GlobalScope s( context ); + inPlug()->deepPlug()->hash( h ); + nearClipPlug()->hash( h ); + nearClipDepthPlug()->hash( h ); + farClipPlug()->hash( h ); + farClipDepthPlug()->hash( h ); + channelNamesData = inPlug()->channelNamesPlug()->getValue(); + } + + const std::vector &channelNames = channelNamesData->readable(); + + inPlug()->sampleOffsetsPlug()->hash( h ); + + { + ImagePlug::ChannelDataScope s( context ); + if( ImageAlgo::channelExists( channelNames, ImageAlgo::channelNameA ) ) + { + s.setChannelName( &ImageAlgo::channelNameA ); + inPlug()->channelDataPlug()->hash( h ); + } + else + { + h.append( false ); + } + + if( ImageAlgo::channelExists( channelNames, ImageAlgo::channelNameZ ) ) + { + s.setChannelName( &ImageAlgo::channelNameZ ); + inPlug()->channelDataPlug()->hash( h ); + } + else + { + h.append( false ); + } + + if( ImageAlgo::channelExists( channelNames, ImageAlgo::channelNameZBack ) ) + { + s.setChannelName( &ImageAlgo::channelNameZBack ); + inPlug()->channelDataPlug()->hash( h ); + } + else + { + h.append( false ); + } + } +} + +void DeepSlice::compute( Gaffer::ValuePlug *output, const Gaffer::Context *context ) const +{ + ImageProcessor::compute( output, context ); + + if( output != sliceDataPlug() ) + { + return; + } + + // sliceData is a CompoundObject with up to 4 members, storing the following things + // + // "accumCounts" : a running sum of the number of samples contributing to each pixel. + // When outputting a deep image, this will be the sampleOffsets of the output. + // When outputting a flat image, this is used to know which samples to sum. + // "inputIndices" : an int vector with the sample index where we start taking samples for each pixel + // "firstWeights" : a float for each pixel with a multiplier for the first sample for each pixel + // ( included when nearClip is on ) + // "lastWeights" : a float for each pixel with a multiplier for the last sample for each pixel + // ( included when farClip is on ) + // + // ( Note that any sample that is not first or last cannot intersect a clip plane, so we always take 100% ) + + // In order to compute this, we first need to get the control parameters, and the Z, ZBack, and A channel data + + bool deep; + bool nearClip; + float nearClipDepth; + bool farClip; + float farClipDepth; + + ConstStringVectorDataPtr channelNamesData; + + { + ImagePlug::GlobalScope s( context ); + deep = inPlug()->deepPlug()->getValue(); + nearClip = nearClipPlug()->getValue(); + nearClipDepth = nearClipDepthPlug()->getValue(); + farClip = farClipPlug()->getValue(); + farClipDepth = farClipDepthPlug()->getValue(); + channelNamesData = inPlug()->channelNamesPlug()->getValue(); + } + + const std::vector &channelNames = channelNamesData->readable(); + + ConstIntVectorDataPtr sampleOffsetsData; + if( deep ) + { + sampleOffsetsData = tidyInPlug()->sampleOffsetsPlug()->getValue(); + } + else + { + sampleOffsetsData = ImagePlug::flatTileSampleOffsets(); + } + + ConstFloatVectorDataPtr aData; + ConstFloatVectorDataPtr zData; + ConstFloatVectorDataPtr zBackData; + + { + ImagePlug::ChannelDataScope s( context ); + if( ImageAlgo::channelExists( channelNames, ImageAlgo::channelNameA ) ) + { + s.setChannelName( &ImageAlgo::channelNameA ); + aData = tidyInPlug()->channelDataPlug()->getValue(); + } + else + { + // We can produce legitimate results without an alpha channel by treating the alpha + // as zero, but we don't have a reliable and efficient way to get a buffer of zeros + // guaranteed to be large enough, so I guess we just have to have a special case for + // null a later in this function. + } + + if( ImageAlgo::channelExists( channelNames, ImageAlgo::channelNameZ ) ) + { + s.setChannelName( &ImageAlgo::channelNameZ ); + zData = tidyInPlug()->channelDataPlug()->getValue(); + } + else + { + throw IECore::Exception( "DeepSlice requires a Z channel" ); + } + + if( ImageAlgo::channelExists( channelNames, ImageAlgo::channelNameZBack ) ) + { + s.setChannelName( &ImageAlgo::channelNameZBack ); + zBackData = tidyInPlug()->channelDataPlug()->getValue(); + } + else + { + zBackData = zData; + } + } + + const std::vector &sampleOffsets = sampleOffsetsData->readable(); + const float *a = aData ? &aData->readable()[0] : nullptr; + const std::vector &z = zData->readable(); + const std::vector &zBack = zBackData->readable(); + + // Allocate outputs + + IntVectorDataPtr accumCountsData; + int *accumCounts = nullptr; + IntVectorDataPtr inputIndicesData; + int *inputIndices = nullptr; + FloatVectorDataPtr firstWeightsData; + float *firstWeights = nullptr; + FloatVectorDataPtr lastWeightsData; + float *lastWeights = nullptr; + + accumCountsData = new IntVectorData(); + accumCountsData->writable().resize( ImagePlug::tilePixels() ); + accumCounts = &accumCountsData->writable()[0]; + inputIndicesData = new IntVectorData(); + inputIndicesData->writable().resize( ImagePlug::tilePixels() ); + inputIndices = &inputIndicesData->writable()[0]; + + if( nearClip ) + { + firstWeightsData = new FloatVectorData(); + firstWeightsData->writable().resize( ImagePlug::tilePixels() ); + firstWeights = &firstWeightsData->writable()[0]; + } + + if( farClip ) + { + lastWeightsData = new FloatVectorData(); + lastWeightsData->writable().resize( ImagePlug::tilePixels() ); + lastWeights = &lastWeightsData->writable()[0]; + } + + // Now we're ready to actually process all the samples + + int prevOffset = 0; + int accumCount = 0; + + for( int i = 0; i < ImagePlug::tilePixels(); i++ ) + { + // Figure out the start and end of the range of samples to consider for each pixel. + // + // This is where we implement the logic that includes samples exactly at nearClipDepth, + // but exclude samples exactly at farClipDepth. + // + // We need to include point samples at the threshold on one side and not the other so that you + // can split on a chosen depth and then composite the two slices back together. + // + // The choice to keep samples at the the near clip was made to avoid a specific weird special + // case: if we kept samples at the far clip, then it would really make sense to include a + // volume sample with an alpha of 1 starting at the far clip, since an alpha of 1 means it reaches + // full opacity immediately at the start of the volume range. However, if we include this sample, + // it would become a point sample, once the zBack is reduced to the far clip. The problem with this + // is that there could already be a point sample at this depth, before the volume sample. Outputting + // two point samples at the same depth would violate tidyness, and produce unexpected results because + // the two point samples wouldn't be combined in the right order. The only real solution would be + // adding a special case to combine the two source samples into one output point sample, but this + // would add some annoying complexity, since other than this, each output sample corresponds to + // exactly one input sample. + // + // Solution: discard point samples at the far clip, and keep point samples at the near clip instead. + // There is hypothetically the same problem with the near clip being exactly equal to the zBack value + // of a volume sample with an alpha of 1 ... but in order for this to happen, we're looking at something + // behind a sample with an alpha of 1, which isn't very meaningful anyway. Under these circumstances, + // I'm OK with simply discarding a volume sample when we are taking 0% of it, even if its alpha is 1, + // which keeps the code simpler. + + int offset = sampleOffsets[i]; + int start = prevOffset; + if( nearClip ) + { + // Increment start to omit any samples that are before the near clip, but don't skip a point + // sample exactly at the near clip ( if zBack is exactly on the clip, we only skip if it's + // a volume sample with z < zBack ) + while( start < offset && ( zBack[start] < nearClipDepth || ( zBack[start] == nearClipDepth && z[start] < nearClipDepth ) ) ) + { + start++; + } + } + + int end = offset; + if( farClip ) + { + end = start; + + // Increment end to include any samples that are strictly before the far clip. Point samples exactly + // at the near clip are omitted. + while( end < offset && z[end] < farClipDepth ) + { + end++; + } + } + + accumCount += end - start; + accumCounts[i] = accumCount; + inputIndices[i] = start; + + // Now set the weights for what fractions of the input samples to take. + // + // Note: you'll see a bunch of cases skipping these calculations when zBack == z. + // That's a point sample, which we either take or don't, you can't slice a fraction + // of a sample with no size. + float firstWeight = 1.0f; + float lastWeight = 1.0f; + if( end - start == 0 ) + { + // If there are no samples, no need to worry about the values of the multipliers + } + else if( nearClip && farClip && end - start == 1 && zBack[start] > z[start] ) + { + // Weird special case: if there is exactly one sample, then there's the possibility + // that both the start and end of the sample could be clipped. We put the combined + // weight representing both clips into firstWeight, and leave lastWeight set to 1.0. + + float usedFraction = + ( std::min( zBack[start], farClipDepth ) - std::max( z[start], nearClipDepth ) ) / + ( zBack[start] - z[start] ); + + firstWeight = sampleMultiplier( a ? a[start] : 0.0f, usedFraction ); + } + else + { + if( nearClip && zBack[start] > z[start] ) + { + float usedFraction = + ( zBack[start] - std::max( z[start], nearClipDepth ) ) / + ( zBack[start] - z[start] ); + + firstWeight = sampleMultiplier( a ? a[start] : 0.0f, usedFraction ); + } + + if( farClip && zBack[end - 1] > z[end - 1] ) + { + float usedFraction = + ( std::min( zBack[end - 1], farClipDepth ) - z[end - 1] ) / + ( zBack[end - 1] - z[end - 1] ); + + lastWeight = sampleMultiplier( a ? a[end - 1] : 0.0f, usedFraction ); + } + } + + if( firstWeights ) + { + firstWeights[i] = firstWeight; + } + + if( lastWeights ) + { + lastWeights[i] = lastWeight; + } + + prevOffset = offset; + } + + // Fill the result CompoundObject + + CompoundObjectPtr result = new CompoundObject; + result->members()[ g_accumCountsName ] = std::move( accumCountsData ); + result->members()[ g_inputIndicesName ] = std::move( inputIndicesData ); + if( firstWeightsData ) + { + result->members()[ g_firstWeightsName ] = std::move( firstWeightsData ); + } + if( lastWeightsData ) + { + result->members()[ g_lastWeightsName ] = std::move( lastWeightsData ); + } + static_cast( output )->setValue( result ); +} + +void DeepSlice::hashChannelData( const GafferImage::ImagePlug *parent, const Gaffer::Context *context, IECore::MurmurHash &h ) const +{ + ImageProcessor::hashChannelData( parent, context, h ); + + tidyInPlug()->channelDataPlug()->hash( h ); + + bool deep; + bool flatten; + + { + ImagePlug::GlobalScope s( context ); + flatten = flattenPlug()->getValue(); + deep = inPlug()->deepPlug()->getValue(); + nearClipPlug()->hash( h ); + nearClipDepthPlug()->hash( h ); + farClipPlug()->hash( h ); + farClipDepthPlug()->hash( h ); + } + + h.append( deep ); + h.append( flatten ); + + { + ImagePlug::ChannelDataScope scope( context ); + scope.remove( ImagePlug::channelNameContextName ); + sliceDataPlug()->hash( h ); + + const std::string &channelName = context->get( ImagePlug::channelNameContextName ); + + if( + flatten && deep && + channelName != ImageAlgo::channelNameA && + channelName != ImageAlgo::channelNameZ && + channelName != ImageAlgo::channelNameZBack + ) + { + ConstStringVectorDataPtr channelNamesData = inPlug()->channelNames(); + const std::vector &channelNames = channelNamesData->readable(); + if( ImageAlgo::channelExists( channelNames, ImageAlgo::channelNameA ) ) + { + scope.setChannelName( &ImageAlgo::channelNameA ); + tidyInPlug()->channelDataPlug()->hash( h ); + } + else + { + h.append( false ); + } + } + } +} + + +IECore::ConstFloatVectorDataPtr DeepSlice::computeChannelData( const std::string &channelName, const Imath::V2i &tileOrigin, const Gaffer::Context *context, const ImagePlug *parent ) const +{ + ConstFloatVectorDataPtr channelData = tidyInPlug()->channelDataPlug()->getValue(); + const std::vector &channel = channelData->readable(); + + bool deep; + bool flatten; + bool nearClip; + float nearClipDepth; + bool farClip; + float farClipDepth; + + { + ImagePlug::GlobalScope s( context ); + deep = inPlug()->deepPlug()->getValue(); + flatten = flattenPlug()->getValue(); + nearClip = nearClipPlug()->getValue(); + nearClipDepth = nearClipDepthPlug()->getValue(); + farClip = farClipPlug()->getValue(); + farClipDepth = farClipDepthPlug()->getValue(); + } + + if( !deep ) + { + // If the input is flat, we always make a flat output + flatten = true; + } + + ConstCompoundObjectPtr sliceData; + ConstFloatVectorDataPtr alphaData; + + { + ImagePlug::ChannelDataScope scope( context ); + scope.remove( ImagePlug::channelNameContextName ); + sliceData = sliceDataPlug()->getValue(); + + if( + flatten && deep && + channelName != ImageAlgo::channelNameA && + channelName != ImageAlgo::channelNameZ && + channelName != ImageAlgo::channelNameZBack + ) + { + // If the input is deep, and we're flattening, then we need to take into account the alpha's + // of samples in front of us when compositing this channel. ( If we're not flattening, + // then this compositing happens later, and if we're not deep, then nothing can come in front ). + ConstStringVectorDataPtr channelNamesData = inPlug()->channelNames(); + const std::vector &channelNames = channelNamesData->readable(); + if( ImageAlgo::channelExists( channelNames, ImageAlgo::channelNameA ) ) + { + scope.setChannelName( &ImageAlgo::channelNameA ); + alphaData = tidyInPlug()->channelDataPlug()->getValue(); + } + } + } + + const int *accumCounts = nullptr; + const int *inputIndices = nullptr; + const float *firstWeights = nullptr; + const float *lastWeights = nullptr; + + if( const IntVectorData *accumCountsData = sliceData->member( g_accumCountsName ) ) + { + accumCounts = &accumCountsData->readable()[0]; + } + if( const IntVectorData *inputIndicesData = sliceData->member( g_inputIndicesName ) ) + { + inputIndices = &inputIndicesData->readable()[0]; + } + if( const FloatVectorData *firstWeightsData = sliceData->member( g_firstWeightsName ) ) + { + firstWeights = &firstWeightsData->readable()[0]; + } + if( const FloatVectorData *lastWeightsData = sliceData->member( g_lastWeightsName ) ) + { + lastWeights = &lastWeightsData->readable()[0]; + } + + FloatVectorDataPtr resultData = new FloatVectorData; + std::vector &result = resultData->writable(); + if( flatten ) + { + result.reserve( ImagePlug::tilePixels() ); + } + else + { + result.reserve( accumCounts[ ImagePlug::tilePixels() - 1 ] ); + } + + if( channelName == ImageAlgo::channelNameZ ) + { + // Special case for Z - instead of using the weights from sliceDataPlug(), we just apply the + // nearClipDepth here. + int prevAccumCount = 0; + for( int i = 0; i < ImagePlug::tilePixels(); i++ ) + { + int count = 1; + if( deep ) + { + count = accumCounts[i] - prevAccumCount; + prevAccumCount = accumCounts[i]; + + if( count == 0 ) + { + if( flatten ) + { + result.push_back( 0.0f ); + } + continue; + } + + if( flatten ) + { + count = 1; + } + } + + int inputIndex = inputIndices[i]; + + int curIndex = inputIndex; + if( nearClip ) + { + result.push_back( std::max( channel[inputIndex], nearClipDepth ) ); + curIndex++; + } + + for( ; curIndex < inputIndex + count; curIndex++ ) + { + result.push_back( channel[curIndex] ); + } + } + } + else if( channelName == ImageAlgo::channelNameZBack ) + { + // Special case for Z - instead of using the weights from sliceDataPlug(), we just apply the + // farClipDepth here. + int prevAccumCount = 0; + for( int i = 0; i < ImagePlug::tilePixels(); i++ ) + { + int inputIndex = inputIndices[i]; + + int count = 1; + + if( deep ) + { + count = accumCounts[i] - prevAccumCount; + prevAccumCount = accumCounts[i]; + + if( count == 0 ) + { + if( flatten ) + { + result.push_back( 0.0f ); + } + continue; + } + + if( !flatten ) + { + for( int curIndex = inputIndex; curIndex < inputIndex + count - 1; curIndex++ ) + { + result.push_back( channel[curIndex] ); + } + } + + } + + if( farClip ) + { + result.push_back( std::min( channel[inputIndex + count - 1], farClipDepth ) ); + } + else + { + result.push_back( channel[inputIndex + count - 1] ); + } + } + } + else if( flatten && channelName == ImageAlgo::channelNameA ) + { + // Flattening alpha is a pretty common case, and offers a significant simplification over any other + // channel when flattening: whenever we flatten, we need to include the occlusion from the alpha of + // other samples, so we need both the channel and the alpha - but in the case of alpha, we only need + // one channel. + int prevAccumCount = 0; + for( int i = 0; i < ImagePlug::tilePixels(); i++ ) + { + int inputIndex = inputIndices[i]; + int curIndex = inputIndex; + + int count = accumCounts[i] - prevAccumCount; + prevAccumCount = accumCounts[i]; + + if( count == 0 ) + { + result.push_back( 0.0f ); + continue; + } + + float accumAlpha = 0; + + // If nearClip is set, multiply the first sample by the provided weight, and increment the current + // output index. + if( nearClip ) + { + accumAlpha = channel[inputIndex] * firstWeights[i]; + curIndex++; + } + + // Process all the samples that weren't output yet, except for the last sample + for( ; curIndex < inputIndex + count - 1; curIndex++ ) + { + accumAlpha += channel[curIndex] * ( 1 - accumAlpha ); + } + + // This conditional only fails when there was a single deep sample, and it was output by the near + // clip ( we build the weights so that the firstWeight will include the far clip as well in this case ) + if( curIndex < inputIndex + count ) + { + // Process the last sample + if( farClip ) + { + accumAlpha += channel[curIndex] * lastWeights[i] * ( 1 - accumAlpha ); + } + else + { + accumAlpha += channel[curIndex] * ( 1 - accumAlpha ); + } + } + result.push_back( accumAlpha ); + } + } + else if( flatten ) + { + // Now the more complex general case, where we have both an alpha and a separate channel + const float* alpha = nullptr; + if( alphaData ) + { + alpha = &alphaData->readable()[0]; + } + + int prevAccumCount = 0; + for( int i = 0; i < ImagePlug::tilePixels(); i++ ) + { + int inputIndex = inputIndices[i]; + int curIndex = inputIndex; + + int count = accumCounts[i] - prevAccumCount; + prevAccumCount = accumCounts[i]; + + if( count == 0 ) + { + result.push_back( 0.0f ); + continue; + } + + float accumAlpha = 0; + float accumChannel = 0; + + // If nearClip is set, multiply the first sample by the provided weight, and increment the current + // output index. + if( nearClip ) + { + accumChannel = channel[inputIndex] * firstWeights[i]; + if( alpha ) + { + accumAlpha = alpha[inputIndex] * firstWeights[i]; + } + curIndex++; + } + + // Process all the samples that weren't output yet, except for the last sample + for( ; curIndex < inputIndex + count - 1; curIndex++ ) + { + accumChannel += channel[curIndex] * ( 1 - accumAlpha ); + if( alpha ) + { + accumAlpha += alpha[curIndex] * ( 1 - accumAlpha ); + } + } + + // This conditional only fails when there was a single deep sample, and it was output by the near + // clip ( we build the weights so that the firstWeight will include the far clip as well in this case ) + if( curIndex < inputIndex + count ) + { + // Process the last sample + if( farClip ) + { + accumChannel += channel[curIndex] * lastWeights[i] * ( 1 - accumAlpha ); + // We don't care about updating accumAlpha, because we have no more samples to alpha- + // composite + } + else + { + accumChannel += channel[curIndex] * ( 1 - accumAlpha ); + } + } + + result.push_back( accumChannel ); + } + } + else + { + // Finally, if we're not flattening, then we don't account for alpha occlusion yet. It's basically the + // same as above, but simpler, and we output separate samples instead of accumulating. + int prevAccumCount = 0; + for( int i = 0; i < ImagePlug::tilePixels(); i++ ) + { + int count = 1; + int inputIndex = inputIndices[i]; + int curIndex = inputIndex; + if( deep ) + { + count = accumCounts[i] - prevAccumCount; + prevAccumCount = accumCounts[i]; + if( count == 0 ) + { + continue; + } + + if( nearClip ) + { + result.push_back( channel[inputIndex] * firstWeights[i] ); + curIndex++; + } + } + + for( ; curIndex < inputIndex + count - 1; curIndex++ ) + { + result.push_back( channel[curIndex] ); + } + + // This conditional only fails when there was a single deep sample, and it was output by the near + // clip ( we build the weights so that the firstWeight will include the far clip as well in this case ) + if( curIndex < inputIndex + count ) + { + if( farClip ) + { + result.push_back( channel[inputIndex + count - 1] * lastWeights[i] ); + } + else + { + result.push_back( channel[inputIndex + count - 1] ); + } + } + } + } + + return resultData; +} + +void DeepSlice::hashSampleOffsets( const GafferImage::ImagePlug *parent, const Gaffer::Context *context, IECore::MurmurHash &h ) const +{ + ImageProcessor::hashSampleOffsets( parent, context, h ); + + { + ImagePlug::GlobalScope s( context ); + if( flattenPlug()->getValue() || !inPlug()->deepPlug()->getValue() ) + { + h = ImagePlug::flatTileSampleOffsets()->Object::hash(); + return; + } + } + + sliceDataPlug()->hash( h ); +} + +IECore::ConstIntVectorDataPtr DeepSlice::computeSampleOffsets( const Imath::V2i &tileOrigin, const Gaffer::Context *context, const ImagePlug *parent ) const +{ + + { + ImagePlug::GlobalScope s( context ); + if( flattenPlug()->getValue() || !inPlug()->deepPlug()->getValue() ) + { + return ImagePlug::flatTileSampleOffsets(); + } + } + + // The accumulated counts in the sliceData are the same thing as the sampleOffsets. The only reason the + // name is different is because it doesn't really make sense to call them "sampleOffsets" when the output + // is flat ( in that case, we use the same data for a different purpose ). + ConstCompoundObjectPtr sliceData = sliceDataPlug()->getValue(); + return sliceData->member( g_accumCountsName ); +} + +void DeepSlice::hashDeep( const GafferImage::ImagePlug *parent, const Gaffer::Context *context, IECore::MurmurHash &h ) const +{ + inPlug()->deepPlug()->hash( h ); + flattenPlug()->hash( h ); +} + +bool DeepSlice::computeDeep( const Gaffer::Context *context, const ImagePlug *parent ) const +{ + bool deep = inPlug()->deepPlug()->getValue(); + if( flattenPlug()->getValue() ) + { + deep = false; + } + return deep; +} + diff --git a/src/GafferImageModule/DeepNodeBinding.cpp b/src/GafferImageModule/DeepNodeBinding.cpp index 1d5dca7f1bf..b7510f970cd 100644 --- a/src/GafferImageModule/DeepNodeBinding.cpp +++ b/src/GafferImageModule/DeepNodeBinding.cpp @@ -45,6 +45,7 @@ #include "GafferImage/DeepSampler.h" #include "GafferImage/DeepHoldout.h" #include "GafferImage/DeepRecolor.h" +#include "GafferImage/DeepSlice.h" #include "DeepNodeBinding.h" @@ -92,4 +93,5 @@ void GafferImageModule::bindDeepNodes() DependencyNodeClass(); DependencyNodeClass(); DependencyNodeClass(); + DependencyNodeClass(); } diff --git a/startup/gui/menus.py b/startup/gui/menus.py index db003944913..33b24d7c01e 100644 --- a/startup/gui/menus.py +++ b/startup/gui/menus.py @@ -402,6 +402,7 @@ def __lightCreator( nodeName, shaderName, shape ) : nodeMenu.append( "/Image/Deep/Deep Sampler", GafferImage.DeepSampler, searchText = "DeepSampler" ) nodeMenu.append( "/Image/Deep/Deep Holdout", GafferImage.DeepHoldout, searchText = "DeepHoldout" ) nodeMenu.append( "/Image/Deep/Deep Recolor", GafferImage.DeepRecolor, searchText = "DeepRecolor" ) +nodeMenu.append( "/Image/Deep/Deep Slice", GafferImage.DeepSlice, searchText = "DeepSlice" ) nodeMenu.append( "/Image/MultiView/Create Views", GafferImage.CreateViews, searchText = "CreateViews", postCreator = GafferImageUI.CreateViewsUI.postCreate ) nodeMenu.append( "/Image/MultiView/Select View", GafferImage.SelectView, searchText = "SelectView" ) nodeMenu.append( "/Image/MultiView/Delete Views", GafferImage.DeleteViews, searchText = "DeleteViews" )