diff --git a/src/quo-vadis-pthread.cc b/src/quo-vadis-pthread.cc index 99543cd..60bd7e9 100644 --- a/src/quo-vadis-pthread.cc +++ b/src/quo-vadis-pthread.cc @@ -129,7 +129,7 @@ qv_pthread_scopes_free( return QV_ERR_INVLD_ARG; } try { - qv_scope_s::thdel(&scopes, nscopes); + qv_scope_s::thdestroy(&scopes, nscopes); return QV_SUCCESS; } qvi_catch_and_return(); diff --git a/src/quo-vadis.cc b/src/quo-vadis.cc index 708b4b1..0cea6fd 100644 --- a/src/quo-vadis.cc +++ b/src/quo-vadis.cc @@ -82,7 +82,7 @@ qv_scope_free( return QV_ERR_INVLD_ARG; } try { - qv_scope_s::del(&scope); + qv_scope_s::destroy(&scope); return QV_SUCCESS; } qvi_catch_and_return(); @@ -98,7 +98,7 @@ qv_scope_nobjs( return QV_ERR_INVLD_ARG; } try { - *nobjs = scope->nobjects(obj); + *nobjs = scope->hwpool_nobjects(obj); return QV_SUCCESS; } qvi_catch_and_return(); @@ -142,7 +142,7 @@ qv_scope_barrier( return QV_ERR_INVLD_ARG; } try { - return scope->barrier(); + return scope->group_barrier(); } qvi_catch_and_return(); } diff --git a/src/qvi-hwpool.cc b/src/qvi-hwpool.cc index 05156cf..517f179 100644 --- a/src/qvi-hwpool.cc +++ b/src/qvi-hwpool.cc @@ -13,116 +13,10 @@ * Hardware Resource Pool */ -// TODOs -// * Resource reference counting. -// * Need to deal with resource unavailability. -// * Split and attach devices properly. -// * Have bitmap scratch pad that is initialized once, then destroyed? This -// approach may be an nice allocation optimization, but in heavily threaded -// code may be a bottleneck. - - -// Notes: -// * Does it make sense attempting resource exclusivity? Why not just let the -// users get what they ask for and hope that the abstractions that we provide do -// a good enough job most of the time. Making the user deal with resource -// exhaustion and retries (which will eventually be the case with -// QV_RES_UNAVAILABLE) is error prone and often frustrating. -// -// * Reference Counting: we should probably still implement a rudimentary -// reference counting system, but perhaps not for enforcing resource -// exclusivity. Rather we could use this information to guide a collection of -// resource allocators that would use resource availability for their pool -// management strategies. - -// A Straightforward Reference Counting Approach: Maintain an array of integers -// with length number of cpuset bits. As each resource (bitmap) is obtained, -// increment the internal counter of each corresponding position. When a -// resource is released, decrement in a similar way. If a location in the array -// is zero, then the resource is not in use. For devices, we can take a similar -// approach using the device IDs instead of the bit positions. - #include "qvi-hwpool.h" #include "qvi-bbuff-rmi.h" #include "qvi-utils.h" -#if 0 -/** - * - */ -static int -cpus_available( - hwloc_const_cpuset_t which, - hwloc_const_cpuset_t from, - bool *avail -) { - // TODO(skg) Cache storage for calculation? - hwloc_cpuset_t tcpus = nullptr; - int rc = qvi_hwloc_bitmap_calloc(&tcpus); - if (rc != QV_SUCCESS) return rc; - - int hrc = hwloc_bitmap_and(tcpus, which, from); - if (hrc != 0) { - rc = QV_ERR_HWLOC; - } - if (rc == QV_SUCCESS) { - *avail = cpusets_equal(tcpus, which); - } - else { - *avail = false; - } - qvi_hwloc_bitmap_free(&tcpus); - return rc; -} -#endif - -/** - * Example: - * obcpuset 0110 0101 - * request 1000 1010 - * obcpuset' 1110 1111 - */ -#if 0 -static int -pool_obtain_cpus_by_cpuset( - qvi_hwpool_s *pool, - hwloc_const_cpuset_t request -) { -#if 0 - int hwrc = hwloc_bitmap_or( - pool->obcpuset, - pool->obcpuset, - request - ); - return (hwrc == 0 ? QV_SUCCESS : QV_ERR_HWLOC); -#endif - QVI_UNUSED(pool); - QVI_UNUSED(request); - return QV_SUCCESS; -} -#endif - -/** - * Example: - * obcpuset 0110 0101 - * release 0100 0100 - * obcpuset' 0010 0001 - */ -#if 0 -static int -pool_release_cpus_by_cpuset( - qvi_hwpool_s *pool, - hwloc_const_cpuset_t release -) { - int hwrc = hwloc_bitmap_andnot( - pool->obcpuset, - pool->obcpuset, - release - ); - return (hwrc == 0 ? QV_SUCCESS : QV_ERR_HWLOC); -} -#endif - qv_scope_create_hints_t qvi_hwpool_res_s::hints(void) { @@ -130,15 +24,15 @@ qvi_hwpool_res_s::hints(void) } qvi_hwloc_bitmap_s & -qvi_hwpool_cpu_s::cpuset(void) +qvi_hwpool_res_s::affinity(void) { - return m_cpuset; + return m_affinity; } const qvi_hwloc_bitmap_s & -qvi_hwpool_cpu_s::cpuset(void) const +qvi_hwpool_res_s::affinity(void) const { - return m_cpuset; + return m_affinity; } int @@ -149,7 +43,7 @@ qvi_hwpool_cpu_s::packinto( const int rc = qvi_bbuff_rmi_pack_item(buff, m_hints); if (qvi_unlikely(rc != QV_SUCCESS)) return rc; // Pack cpuset. - return qvi_bbuff_rmi_pack_item(buff, m_cpuset); + return qvi_bbuff_rmi_pack_item(buff, m_affinity); } int @@ -168,7 +62,7 @@ qvi_hwpool_cpu_s::unpack( buffpos += bw; // Unpack bitmap. rc = qvi_bbuff_rmi_unpack_item( - cpu.m_cpuset, buffpos, &bw + cpu.m_affinity, buffpos, &bw ); if (qvi_unlikely(rc != QV_SUCCESS)) goto out; total_bw += bw; @@ -209,7 +103,7 @@ int qvi_hwpool_dev_s::id( qv_device_id_type_t format, char **result -) { +) const { int rc = QV_SUCCESS, nw = 0; switch (format) { case (QV_DEVICE_ID_UUID): @@ -233,12 +127,6 @@ qvi_hwpool_dev_s::id( return rc; } -const qvi_hwloc_bitmap_s & -qvi_hwpool_dev_s::affinity(void) const -{ - return m_affinity; -} - int qvi_hwpool_dev_s::packinto( qvi_bbuff_t *buff @@ -327,7 +215,7 @@ qvi_hwpool_s::add_devices_with_affinity( for (const auto devt : qvi_hwloc_supported_devices()) { qvi_hwloc_dev_list_t devs; rc = qvi_hwloc_get_devices_in_bitmap( - hwloc, devt, m_cpu.cpuset(), devs + hwloc, devt, m_cpu.affinity(), devs ); if (qvi_unlikely(rc != QV_SUCCESS)) return rc; for (const auto &dev : devs) { @@ -339,7 +227,7 @@ qvi_hwpool_s::add_devices_with_affinity( } int -qvi_hwpool_s::new_hwpool( +qvi_hwpool_s::create( qvi_hwloc_t *hwloc, hwloc_const_cpuset_t cpuset, qvi_hwpool_s **opool @@ -362,7 +250,7 @@ qvi_hwpool_s::initialize( qvi_hwloc_t *hwloc, hwloc_const_bitmap_t cpuset ) { - const int rc = m_cpu.cpuset().set(cpuset); + const int rc = m_cpu.affinity().set(cpuset); if (qvi_unlikely(rc != QV_SUCCESS)) return rc; // Add devices with affinity to the hardware pool. return add_devices_with_affinity(hwloc); @@ -371,7 +259,7 @@ qvi_hwpool_s::initialize( const qvi_hwloc_bitmap_s & qvi_hwpool_s::cpuset(void) const { - return m_cpu.cpuset(); + return m_cpu.affinity(); } const qvi_hwpool_devs_t & @@ -388,7 +276,7 @@ qvi_hwpool_s::nobjects( ) { if (qvi_hwloc_obj_type_is_host_resource(obj_type)) { return qvi_hwloc_get_nobjs_in_cpuset( - hwloc, obj_type, m_cpu.cpuset().cdata(), result + hwloc, obj_type, m_cpu.affinity().cdata(), result ); } *result = m_devs.count(obj_type); diff --git a/src/qvi-hwpool.h b/src/qvi-hwpool.h index 76d0111..5e9c608 100644 --- a/src/qvi-hwpool.h +++ b/src/qvi-hwpool.h @@ -26,33 +26,29 @@ struct qvi_hwpool_res_s { protected: /** Resource hint flags. */ qv_scope_create_hints_t m_hints = QV_SCOPE_CREATE_HINT_NONE; + /** The resource's affinity encoded as a bitmap. */ + qvi_hwloc_bitmap_s m_affinity; public: /** Returns the resource's create hints. */ qv_scope_create_hints_t hints(void); -}; - -/** - * Defines a hardware pool CPU. A CPU here may have multiple - * processing units (PUs), which are defined in the CPU's cpuset. - */ -struct qvi_hwpool_cpu_s : qvi_hwpool_res_s { -private: - /** The cpuset of the CPU's PUs. */ - qvi_hwloc_bitmap_s m_cpuset; -public: /** - * Returns a reference to the - * CPU's resources encoded by a bitmap. + * Returns a reference to the resource's affinity encoded by a bitmap. */ qvi_hwloc_bitmap_s & - cpuset(void); + affinity(void); /** - * Returns a const reference to the - * CPU's resources encoded by a bitmap. + * Returns a const reference to the resource's affinity encoded by a bitmap. */ const qvi_hwloc_bitmap_s & - cpuset(void) const; + affinity(void) const; +}; + +/** + * Defines a hardware pool CPU. A CPU here may have multiple + * processing units (PUs), which are defined as the CPU's affinity. + */ +struct qvi_hwpool_cpu_s : qvi_hwpool_res_s { /** Packs the instance into the provided buffer. */ int packinto( @@ -109,13 +105,7 @@ struct qvi_hwpool_dev_s : qvi_hwpool_res_s { id( qv_device_id_type_t format, char **result - ); - /** - * Returns a const reference to the - * device's affinity encoded by a bitmap. - */ - const qvi_hwloc_bitmap_s & - affinity(void) const; + ) const; /** Packs the instance into the provided buffer. */ int packinto( @@ -157,7 +147,7 @@ struct qvi_hwpool_s { * on the affinity encoded in the provided cpuset. */ static int - new_hwpool( + create( qvi_hwloc_t *hwloc, hwloc_const_cpuset_t cpuset, qvi_hwpool_s **opool diff --git a/src/qvi-hwsplit.cc b/src/qvi-hwsplit.cc index 33a003b..9eba903 100644 --- a/src/qvi-hwsplit.cc +++ b/src/qvi-hwsplit.cc @@ -18,6 +18,113 @@ #include "qvi-task.h" // IWYU pragma: keep #include "qvi-scope.h" // IWYU pragma: keep +#if 0 +/** + * + */ +static int +cpus_available( + hwloc_const_cpuset_t which, + hwloc_const_cpuset_t from, + bool *avail +) { + // TODO(skg) Cache storage for calculation? + hwloc_cpuset_t tcpus = nullptr; + int rc = qvi_hwloc_bitmap_calloc(&tcpus); + if (rc != QV_SUCCESS) return rc; + + int hrc = hwloc_bitmap_and(tcpus, which, from); + if (hrc != 0) { + rc = QV_ERR_HWLOC; + } + if (rc == QV_SUCCESS) { + *avail = cpusets_equal(tcpus, which); + } + else { + *avail = false; + } + qvi_hwloc_bitmap_free(&tcpus); + return rc; +} +#endif + +/** + * Example: + * obcpuset 0110 0101 + * request 1000 1010 + * obcpuset' 1110 1111 + */ +#if 0 +static int +pool_obtain_cpus_by_cpuset( + qvi_hwpool_s *pool, + hwloc_const_cpuset_t request +) { +#if 0 + int hwrc = hwloc_bitmap_or( + pool->obcpuset, + pool->obcpuset, + request + ); + return (hwrc == 0 ? QV_SUCCESS : QV_ERR_HWLOC); +#endif + QVI_UNUSED(pool); + QVI_UNUSED(request); + return QV_SUCCESS; +} +#endif + +/** + * Example: + * obcpuset 0110 0101 + * release 0100 0100 + * obcpuset' 0010 0001 + */ +#if 0 +static int +pool_release_cpus_by_cpuset( + qvi_hwpool_s *pool, + hwloc_const_cpuset_t release +) { + int hwrc = hwloc_bitmap_andnot( + pool->obcpuset, + pool->obcpuset, + release + ); + return (hwrc == 0 ? QV_SUCCESS : QV_ERR_HWLOC); +} +#endif + +// TODOs +// * Resource reference counting. +// * Need to deal with resource unavailability. +// * Split and attach devices properly. +// * Have bitmap scratch pad that is initialized once, then destroyed? This +// approach may be an nice allocation optimization, but in heavily threaded +// code may be a bottleneck. +// TODO(skg) Use distance API for device affinity. +// TODO(skg) Add RMI to acquire/release resources. + +// Notes: +// * Does it make sense attempting resource exclusivity? Why not just let the +// users get what they ask for and hope that the abstractions that we provide do +// a good enough job most of the time. Making the user deal with resource +// exhaustion and retries (which will eventually be the case with +// QV_RES_UNAVAILABLE) is error prone and often frustrating. +// +// * Reference Counting: we should probably still implement a rudimentary +// reference counting system, but perhaps not for enforcing resource +// exclusivity. Rather we could use this information to guide a collection of +// resource allocators that would use resource availability for their pool +// management strategies. + +// A Straightforward Reference Counting Approach: Maintain an array of integers +// with length number of cpuset bits. As each resource (bitmap) is obtained, +// increment the internal counter of each corresponding position. When a +// resource is released, decrement in a similar way. If a location in the array +// is zero, then the resource is not in use. For devices, we can take a similar +// approach using the device IDs instead of the bit positions. + /** Maintains a mapping between IDs to device information. */ using id2devs_t = std::multimap; @@ -140,6 +247,7 @@ qvi_hwsplit_s::affinity_preserving_policy(void) const return qvi_map_spread; } } + /** Releases all devices contained in the provided split aggregate. */ int qvi_hwsplit_s::release_devices(void) @@ -151,6 +259,7 @@ qvi_hwsplit_s::release_devices(void) } return rc; } + /** * Straightforward user-defined device splitting. */ @@ -214,6 +323,7 @@ qvi_hwsplit_s::split_devices_user_defined(void) } return rc; } + /** * Affinity preserving device splitting. */ @@ -287,6 +397,7 @@ apply_cpuset_mapping( } return rc; } + /** * User-defined split. */ @@ -352,6 +463,7 @@ qvi_hwsplit_s::split_affinity_preserving(void) // Finally, split the devices. return split_devices_affinity_preserving(); } + /** * Takes a vector of colors and clamps their values to [0, ndc) * in place, where ndc is the number of distinct numbers found in values. diff --git a/src/qvi-rmi.cc b/src/qvi-rmi.cc index f55ac10..bebb58c 100644 --- a/src/qvi-rmi.cc +++ b/src/qvi-rmi.cc @@ -644,7 +644,7 @@ get_intrinsic_scope_user( qvi_hwpool_s **hwpool ) { // TODO(skg) Is the cpuset the best way to do this? - return qvi_hwpool_s::new_hwpool( + return qvi_hwpool_s::create( server->config.hwloc, qvi_hwloc_topo_get_cpuset(server->config.hwloc), hwpool @@ -663,7 +663,7 @@ get_intrinsic_scope_proc( ); if (rc != QV_SUCCESS) goto out; - rc = qvi_hwpool_s::new_hwpool( + rc = qvi_hwpool_s::create( server->config.hwloc, cpuset, hwpool ); out: diff --git a/src/qvi-scope.cc b/src/qvi-scope.cc index 9dc5520..65a7ae0 100644 --- a/src/qvi-scope.cc +++ b/src/qvi-scope.cc @@ -14,9 +14,6 @@ * @file qvi-scope.cc */ -// TODO(skg) Use distance API for device affinity. -// TODO(skg) Add RMI to acquire/release resources. - #include "qvi-scope.h" #include "qvi-rmi.h" #include "qvi-task.h" @@ -37,21 +34,21 @@ qv_scope_s::~qv_scope_s(void) } void -qv_scope_s::del( +qv_scope_s::destroy( qv_scope_t **scope ) { qvi_delete(scope); } void -qv_scope_s::thdel( +qv_scope_s::thdestroy( qv_scope_t ***kscopes, uint_t k ) { if (!kscopes) return; qv_scope_t **ikscopes = *kscopes; for (uint_t i = 0; i < k; ++i) { - qv_scope_s::del(&ikscopes[i]); + qv_scope_s::destroy(&ikscopes[i]); } delete[] ikscopes; *kscopes = nullptr; @@ -85,7 +82,7 @@ qv_scope_s::make_intrinsic( // Create and initialize the scope. rc = scope_new(group, hwpool, scope); if (qvi_unlikely(rc != QV_SUCCESS)) { - qv_scope_s::del(scope); + qv_scope_s::destroy(scope); } return rc; } @@ -136,7 +133,7 @@ qv_scope_s::create( qv_scope_t *ichild = nullptr; rc = scope_new(group, hwpool, &ichild); if (rc != QV_SUCCESS) { - qv_scope_s::del(&ichild); + qv_scope_s::destroy(&ichild); } *child = ichild; return rc; @@ -167,7 +164,7 @@ qv_scope_s::group_rank(void) const } int -qv_scope_s::nobjects( +qv_scope_s::hwpool_nobjects( qv_hw_obj_type_t obj ) const { int result = 0; @@ -199,7 +196,7 @@ qv_scope_s::device_id( } int -qv_scope_s::barrier(void) +qv_scope_s::group_barrier(void) { return m_group->barrier(); } @@ -260,7 +257,7 @@ qv_scope_s::split( if (rc != QV_SUCCESS) { qvi_delete(&hwpool); qvi_delete(&group); - qv_scope_s::del(&ichild); + qv_scope_s::destroy(&ichild); } *child = ichild; return rc; @@ -272,7 +269,7 @@ qv_scope_s::split_at( int color, qv_scope_t **child ) { - return split(nobjects(type), color, type, child); + return split(hwpool_nobjects(type), color, type, child); } int @@ -338,7 +335,7 @@ qv_scope_s::thsplit( ithchildren[i] = child; } if (rc != QV_SUCCESS) { - qv_scope_s::thdel(&ithchildren, k); + qv_scope_s::thdestroy(&ithchildren, k); } else { // Subtract one to account for the parent's @@ -356,7 +353,7 @@ qv_scope_s::thsplit_at( uint_t k, qv_scope_t ***kchildren ) { - return thsplit(nobjects(type), kgroup_ids, k, type, kchildren); + return thsplit(hwpool_nobjects(type), kgroup_ids, k, type, kchildren); } /* diff --git a/src/qvi-scope.h b/src/qvi-scope.h index 4843e40..4ac03af 100644 --- a/src/qvi-scope.h +++ b/src/qvi-scope.h @@ -35,19 +35,6 @@ struct qv_scope_s { qvi_group_t *group, qvi_hwpool_s *hwpool ); - /** Destructor */ - ~qv_scope_s(void); - /** Destroys a scope. */ - static void - del( - qv_scope_t **scope - ); - /** Frees scope resources and container created by thsplit*. */ - static void - thdel( - qv_scope_t ***kscopes, - uint_t k - ); /** Takes the provided group and creates a new intrinsic scope from it. */ static int make_intrinsic( @@ -66,6 +53,19 @@ struct qv_scope_s { qv_scope_create_hints_t hints, qv_scope_t **child ); + /** Destructor */ + ~qv_scope_s(void); + /** Destroys a scope. */ + static void + destroy( + qv_scope_t **scope + ); + /** Destroys scopes created by thsplit*. */ + static void + thdestroy( + qv_scope_t ***kscopes, + uint_t k + ); /** Returns a pointer to the scope's underlying group. */ qvi_group_t * group(void) const; @@ -78,9 +78,12 @@ struct qv_scope_s { /** Returns the caller's group rank in the provided scope. */ int group_rank(void) const; + /** Performs a barrier across all members in the scope. */ + int + group_barrier(void); /** Returns the number of hardware objects in the provided scope. */ int - nobjects( + hwpool_nobjects( qv_hw_obj_type_t obj ) const; /** @@ -94,9 +97,6 @@ struct qv_scope_s { qv_device_id_type_t format, char **result ) const; - /** Performs a scope-level barrier. */ - int - barrier(void); int bind_push(void);