diff --git a/Changes.md b/Changes.md index 7efa875689..057dd721ac 100644 --- a/Changes.md +++ b/Changes.md @@ -14,6 +14,7 @@ Improvements - Blue : Proxy - Red : Guide - Catalogue : Added a handle for controlling the relative sizes of the listing and image property widgets. +- RenderPassEditor, LightEditor : Improved update performance for certain graph configurations, by optimising `SceneAlgo::history()` (#5199). Fixes ----- @@ -26,6 +27,7 @@ Fixes - Fixed bug which allowed locked Catalogues to be edited. - Fixed NodeEditor update when the first image is added or the last image is removed. - NameWidget : Fixed bug which allowed plugs on locked nodes to be renamed. +- ValuePlug : Fixed the plug passed to `Monitor::forceMonitoring()`. Previously `Process::destinationPlug()` was being passed instead of `Process::plug()`. API --- diff --git a/python/GafferSceneTest/SceneAlgoTest.py b/python/GafferSceneTest/SceneAlgoTest.py index 86df510ada..c1c29248d4 100644 --- a/python/GafferSceneTest/SceneAlgoTest.py +++ b/python/GafferSceneTest/SceneAlgoTest.py @@ -1946,6 +1946,46 @@ def assertNoCanceller( history ) : assertNoCanceller( history ) + @GafferTest.TestRunner.PerformanceTestMethod() + def testHistoryDiamondPerformance( self ) : + + # A series of diamonds where every iteration reads the output of the + # previous iteration twice and then feeds into the next iteration. + # + # o + # / \ + # o o + # \ / + # o + # / \ + # o o + # \ / + # o + # / \ + # ... + # \ / + # o + # + # Without caching, this leads to an explosion of paths through the + # graph, and can lead to poor performance in `history()`. + + plane = GafferScene.Plane() + + loop = Gaffer.Loop() + loop.setup( plane["out"] ) + loop["in"].setInput( plane["out"] ) + + copyOptions = GafferScene.CopyOptions() + copyOptions["in"].setInput( loop["previous"] ) + copyOptions["source"].setInput( loop["previous"] ) + copyOptions["options"].setValue( "*" ) + + loop["next"].setInput( copyOptions["out"] ) + loop["iterations"].setValue( 20 ) + + with GafferTest.TestRunner.PerformanceScope() : + GafferScene.SceneAlgo.history( loop["out"]["globals"] ) + def testLinkingQueries( self ) : # Everything linked to `defaultLights` via the default value for the attribute. diff --git a/src/Gaffer/ValuePlug.cpp b/src/Gaffer/ValuePlug.cpp index 5e5c224877..fe036e133e 100644 --- a/src/Gaffer/ValuePlug.cpp +++ b/src/Gaffer/ValuePlug.cpp @@ -241,7 +241,7 @@ class ValuePlug::HashProcess : public Process // Then get our hash. We do this using this `acquireHash()` functor so that // we can repeat the process for `Checked` mode. - const bool forceMonitoring = Process::forceMonitoring( threadState, plug, staticType ); + const bool forceMonitoring = Process::forceMonitoring( threadState, p, staticType ); auto acquireHash = [&]( const HashCacheKey &cacheKey ) { diff --git a/src/GafferScene/SceneAlgo.cpp b/src/GafferScene/SceneAlgo.cpp index 24e30a4401..fdf0eeef5e 100644 --- a/src/GafferScene/SceneAlgo.cpp +++ b/src/GafferScene/SceneAlgo.cpp @@ -437,8 +437,6 @@ class CapturingMonitor : public Monitor { const Plug *p = process->plug(); - CapturedProcess::Ptr capturedProcess; - ProcessOrScope entry; if( !shouldCapture( p ) ) { // Parents may spawn other processes in support of the requested plug. This is one @@ -450,39 +448,44 @@ class CapturingMonitor : public Monitor // order of the stack is preserved - if this happens out of order, the stack will be // corrupted, and weird crashes will happen. But as long as it is created in // processStarted, and released in processFinished, everything should line up. - entry = std::make_unique( this, false ); - } - else - { - capturedProcess = std::make_unique(); - capturedProcess->type = process->type(); - capturedProcess->plug = p; - capturedProcess->destinationPlug = process->destinationPlug(); - capturedProcess->context = new Context( *process->context(), /* omitCanceller = */ true ); - entry = capturedProcess.get(); + Mutex::scoped_lock lock( m_mutex ); + m_processMap[process] = std::make_unique( this, false ); + return; } + // Capture this process. + + CapturedProcess::Ptr capturedProcess = std::make_unique(); + capturedProcess->type = process->type(); + capturedProcess->plug = p; + capturedProcess->destinationPlug = process->destinationPlug(); + capturedProcess->context = new Context( *process->context(), /* omitCanceller = */ true ); + Mutex::scoped_lock lock( m_mutex ); - m_processMap[process] = std::move( entry ); + m_processMap[process] = capturedProcess.get(); - if( capturedProcess ) + ProcessMap::const_iterator it = m_processMap.find( process->parent() ); + if( it != m_processMap.end() ) { - ProcessMap::const_iterator it = m_processMap.find( process->parent() ); - if( it != m_processMap.end() ) + CapturedProcess * const * parent = boost::get< CapturedProcess* >( &it->second ); + if( parent && *parent ) { - CapturedProcess * const * parent = boost::get< CapturedProcess* >( &it->second ); - if( parent && *parent ) - { - (*parent)->children.push_back( std::move( capturedProcess ) ); - } - } - else - { - // Either `process->parent()` was null, or was started - // before we were made active via `Monitor::Scope`. - m_rootProcesses.push_back( std::move( capturedProcess ) ); + (*parent)->children.push_back( std::move( capturedProcess ) ); } } + else + { + // Either `process->parent()` was null, or was started + // before we were made active via `Monitor::Scope`. + m_rootProcesses.push_back( std::move( capturedProcess ) ); + } + + // Remember that we've monitored this process so that we dont force + // its monitoring again. + + IECore::MurmurHash h = process->context()->hash(); + h.append( reinterpret_cast( p ) ); + m_monitoredSet.insert( h ); } void processFinished( const Process *process ) override @@ -498,12 +501,22 @@ class CapturingMonitor : public Monitor bool forceMonitoring( const Plug *plug, const IECore::InternedString &processType ) override { - if( processType == g_hashProcessType && shouldCapture( plug ) ) + if( processType != g_hashProcessType || !shouldCapture( plug ) ) { - return true; + return false; } - return false; + // Don't force the monitoring of a process we've monitored already. This does + // mean we throw away diamond dependencies in the process graph, but it is essential + // for performance in some cases - see `testHistoryDiamondPerformance()` for example. + /// \todo Potentially we could use the hash to find the previously captured process, + /// and instance that into our process graph. This would require clients of `History` + /// to be updated to handle such topologies efficiently by tracking previously visited + /// items. It may also be of fairly low value, since typically our goal is to find the + /// first relevant path through the graph to present to the user. + IECore::MurmurHash h = Context::current()->hash(); + h.append( reinterpret_cast( plug ) ); + return !m_monitoredSet.count( h ); } private : @@ -518,15 +531,18 @@ class CapturingMonitor : public Monitor } using Mutex = tbb::spin_mutex; - - Mutex m_mutex; - using ProcessOrScope = boost::variant>; using ProcessMap = boost::unordered_map; + const IECore::InternedString m_scenePlugChildName; + + // Protects `m_processMap` and `m_rootProcesses`. + /// \todo Perhaps they should be concurrent containers instead? + Mutex m_mutex; ProcessMap m_processMap; CapturedProcess::PtrVector m_rootProcesses; - IECore::InternedString m_scenePlugChildName; + + tbb::concurrent_unordered_set m_monitoredSet; };