Skip to content

Commit

Permalink
Fix S/R adaptors of more parallel algorithms
Browse files Browse the repository at this point in the history
This commit introduces a new set of fixes for the
S/R versions of several more parallel algorithms and
reworks of some previous, buggy fixes. Additionally,
new unit test cases have been added for some of the
algorithms to cover edge cases such as empty ranges in
case they could become pitfalls for the S/R adaptations.
This led to the discovery of some general bugs in the
algorithms, which were fixed as flybys:
* `first_first_of` (+ regression test case)
* `ends_with`
  • Loading branch information
zhekemist committed Jul 29, 2024
1 parent f9da244 commit 79ba095
Show file tree
Hide file tree
Showing 42 changed files with 1,290 additions and 433 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -276,27 +276,34 @@ namespace hpx::parallel {
util::detail::algorithm_result<ExPolicy, FwdIter2>;
using difference_type =
typename std::iterator_traits<FwdIter1>::difference_type;

constexpr bool scheduler_policy =
constexpr bool has_scheduler_policy =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if constexpr (!scheduler_policy)
FwdIter1 prev = first;
difference_type count;

if (first == last)
{
if (first == last)
if constexpr (!has_scheduler_policy)
{
return result::get(HPX_MOVE(dest));
}
else
{
count = static_cast<difference_type>(0);
}
}
else
{
count = detail::distance(first, last) - 1;

difference_type count = detail::distance(first, last) - 1;

FwdIter1 prev = first;
hpx::traits::proxy_value_t<
typename std::iterator_traits<FwdIter1>::value_type>
tmp = *first++;
*dest++ = HPX_MOVE(tmp);
hpx::traits::proxy_value_t<
typename std::iterator_traits<FwdIter1>::value_type>
tmp = *first++;
*dest++ = HPX_MOVE(tmp);
}

if constexpr (!scheduler_policy)
if constexpr (!has_scheduler_policy)
{
if (count == 0)
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -182,10 +182,10 @@ namespace hpx::parallel {
using result =
util::detail::algorithm_result<ExPolicy, FwdIter>;

constexpr bool is_scheduler_policy =
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if constexpr (!is_scheduler_policy)
if constexpr (!has_scheduler_executor)
{
if (first == last)
{
Expand Down Expand Up @@ -236,6 +236,13 @@ namespace hpx::parallel {
return HPX_MOVE(first);
};

if constexpr (has_scheduler_executor)
{
// underflow prevention for the upcoming call
if (count == 0)
++count;
}

using partitioner_type =
util::partitioner<policy_type, FwdIter, void>;
return partitioner_type::call_with_index(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -377,10 +377,10 @@ namespace hpx::parallel {
Sent last, F&& op, Proj&& proj)
{
using result = util::detail::algorithm_result<ExPolicy, bool>;
constexpr bool is_scheduler_policy =
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if constexpr (!is_scheduler_policy)
if constexpr (!has_scheduler_executor)
{
if (first == last)
{
Expand All @@ -389,12 +389,15 @@ namespace hpx::parallel {
}

using policy_type = std::decay_t<ExPolicy>;
using intermediate_result_t =
std::conditional_t<has_scheduler_executor, char, bool>;

util::cancellation_token<> tok;
auto f1 = [op = HPX_FORWARD(F, op), tok,
proj = HPX_FORWARD(Proj, proj)](
FwdIter part_begin,
std::size_t part_count) mutable -> bool {
std::size_t part_count) mutable
-> intermediate_result_t {
detail::sequential_find_if<policy_type>(part_begin,
part_count, tok, HPX_FORWARD(F, op),
HPX_FORWARD(Proj, proj));
Expand All @@ -411,9 +414,11 @@ namespace hpx::parallel {
hpx::util::end(results);
};

return util::partitioner<policy_type, bool>::call(
HPX_FORWARD(decltype(policy), policy), first,
detail::distance(first, last), HPX_MOVE(f1), HPX_MOVE(f2));
return util::partitioner<policy_type, bool,
intermediate_result_t>::call(HPX_FORWARD(decltype(policy),
policy),
first, detail::distance(first, last), HPX_MOVE(f1),
HPX_MOVE(f2));
}
};
/// \endcond
Expand Down Expand Up @@ -447,10 +452,10 @@ namespace hpx::parallel {
Sent last, F&& op, Proj&& proj)
{
using result = util::detail::algorithm_result<ExPolicy, bool>;
constexpr bool is_scheduler_policy =
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if constexpr (!is_scheduler_policy)
if constexpr (!has_scheduler_executor)
{
if (first == last)
{
Expand All @@ -459,12 +464,15 @@ namespace hpx::parallel {
}

using policy_type = std::decay_t<ExPolicy>;
using intermediate_result_t =
std::conditional_t<has_scheduler_executor, char, bool>;

util::cancellation_token<> tok;
auto f1 = [op = HPX_FORWARD(F, op), tok,
proj = HPX_FORWARD(Proj, proj)](
FwdIter part_begin,
std::size_t part_count) mutable -> bool {
std::size_t part_count) mutable
-> intermediate_result_t {
detail::sequential_find_if<policy_type>(part_begin,
part_count, tok, HPX_FORWARD(F, op),
HPX_FORWARD(Proj, proj));
Expand All @@ -481,9 +489,11 @@ namespace hpx::parallel {
hpx::util::end(results);
};

return util::partitioner<policy_type, bool>::call(
HPX_FORWARD(decltype(policy), policy), first,
detail::distance(first, last), HPX_MOVE(f1), HPX_MOVE(f2));
return util::partitioner<policy_type, bool,
intermediate_result_t>::call(HPX_FORWARD(decltype(policy),
policy),
first, detail::distance(first, last), HPX_MOVE(f1),
HPX_MOVE(f2));
}
};
/// \endcond
Expand Down Expand Up @@ -516,10 +526,10 @@ namespace hpx::parallel {
Sent last, F&& op, Proj&& proj)
{
using result = util::detail::algorithm_result<ExPolicy, bool>;
constexpr bool is_scheduler_policy =
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if constexpr (!is_scheduler_policy)
if constexpr (!has_scheduler_executor)
{
if (first == last)
{
Expand All @@ -528,12 +538,15 @@ namespace hpx::parallel {
}

using policy_type = std::decay_t<ExPolicy>;
using intermediate_result_t =
std::conditional_t<has_scheduler_executor, char, bool>;

util::cancellation_token<> tok;
auto f1 = [op = HPX_FORWARD(F, op), tok,
proj = HPX_FORWARD(Proj, proj)](
FwdIter part_begin,
std::size_t part_count) mutable -> bool {
std::size_t part_count) mutable
-> intermediate_result_t {
detail::sequential_find_if_not<policy_type>(part_begin,
part_count, tok, HPX_FORWARD(F, op),
HPX_FORWARD(Proj, proj));
Expand All @@ -550,9 +563,11 @@ namespace hpx::parallel {
hpx::util::end(results);
};

return util::partitioner<policy_type, bool>::call(
HPX_FORWARD(decltype(policy), policy), first,
detail::distance(first, last), HPX_MOVE(f1), HPX_MOVE(f2));
return util::partitioner<policy_type, bool,
intermediate_result_t>::call(HPX_FORWARD(decltype(policy),
policy),
first, detail::distance(first, last), HPX_MOVE(f1),
HPX_MOVE(f2));
}
};
/// \endcond
Expand Down
4 changes: 2 additions & 2 deletions libs/core/algorithms/include/hpx/parallel/algorithms/copy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -721,12 +721,12 @@ namespace hpx {
hpx::traits::is_output_iterator_v<FwdIter2>),
"Requires at least forward iterator or sequential execution.");

constexpr bool is_scheduler_policy =
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if (hpx::parallel::detail::is_negative(count))
{
if constexpr (is_scheduler_policy)
if constexpr (has_scheduler_executor)
{
count = static_cast<Size>(0);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -325,10 +325,10 @@ namespace hpx::parallel {
static decltype(auto) parallel(ExPolicy&& policy, IterB first,
IterE last, T const& value, Proj&& proj)
{
constexpr bool is_scheduler_policy =
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if constexpr (!is_scheduler_policy)
if constexpr (!has_scheduler_executor)
{
if (first == last)
{
Expand Down Expand Up @@ -390,10 +390,10 @@ namespace hpx::parallel {
static decltype(auto) parallel(ExPolicy&& policy, IterB first,
IterE last, Pred&& op, Proj&& proj)
{
constexpr bool is_scheduler_policy =
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if constexpr (!is_scheduler_policy)
if constexpr (!has_scheduler_executor)
{
if (first == last)
{
Expand Down
11 changes: 7 additions & 4 deletions libs/core/algorithms/include/hpx/parallel/algorithms/destroy.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,10 @@ namespace hpx::parallel {
decltype(auto) parallel_sequential_destroy_n(
ExPolicy&& policy, Iter first, std::size_t count)
{
if constexpr (!hpx::execution_policy_has_scheduler_executor_v<
ExPolicy>)
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if constexpr (!has_scheduler_executor)
{
if (count == 0)
{
Expand Down Expand Up @@ -344,11 +346,12 @@ namespace hpx {
{
static_assert(hpx::traits::is_forward_iterator_v<FwdIter>,
"Requires at least forward iterator.");
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

if (hpx::parallel::detail::is_negative(count))
{
if constexpr (hpx::execution_policy_has_scheduler_executor_v<
ExPolicy>)
if constexpr (has_scheduler_executor)
{
count = static_cast<Size>(0);
}
Expand Down
42 changes: 23 additions & 19 deletions libs/core/algorithms/include/hpx/parallel/algorithms/ends_with.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,17 +147,18 @@ namespace hpx::parallel {
Sent1 last1, Iter2 first2, Sent2 last2, Pred&& pred,
Proj1&& proj1, Proj2&& proj2)
{
auto const drop = detail::distance(first1, last1) -
detail::distance(first2, last2);
auto distance1 = detail::distance(first1, last1);
auto distance2 = detail::distance(first2, last2);

if (drop < 0)
if (distance1 < distance2)
return false;

std::advance(first1, distance1 - distance2);

return hpx::parallel::detail::equal_binary().call(
hpx::execution::seq, std::next(HPX_MOVE(first1), drop),
HPX_MOVE(last1), HPX_MOVE(first2), HPX_MOVE(last2),
HPX_FORWARD(Pred, pred), HPX_FORWARD(Proj1, proj1),
HPX_FORWARD(Proj2, proj2));
hpx::execution::seq, HPX_MOVE(first1), HPX_MOVE(last1),
HPX_MOVE(first2), HPX_MOVE(last2), HPX_FORWARD(Pred, pred),
HPX_FORWARD(Proj1, proj1), HPX_FORWARD(Proj2, proj2));
}

template <typename ExPolicy, typename FwdIter1, typename Sent1,
Expand All @@ -167,17 +168,18 @@ namespace hpx::parallel {
Sent1 last1, FwdIter2 first2, Sent2 last2, Pred&& pred,
Proj1&& proj1, Proj2&& proj2)
{
auto const drop = detail::distance(first1, last1) -
detail::distance(first2, last2);
constexpr bool has_scheduler_executor =
hpx::execution_policy_has_scheduler_executor_v<ExPolicy>;

auto distance1 = detail::distance(first1, last1);
auto distance2 = detail::distance(first2, last2);
auto diff = distance1 - distance2;

if (drop < 0)
if (distance1 < distance2)
{
if constexpr (hpx::
execution_policy_has_scheduler_executor_v<
ExPolicy>)
if constexpr (has_scheduler_executor)
{
return hpx::execution::experimental::transfer_just(
policy.executor().sched(), false);
diff = 0;
}
else
{
Expand All @@ -186,11 +188,13 @@ namespace hpx::parallel {
}
}

std::advance(first1, diff);

return hpx::parallel::detail::equal_binary().call(
HPX_FORWARD(ExPolicy, policy),
std::next(HPX_MOVE(first1), drop), HPX_MOVE(last1),
HPX_MOVE(first2), HPX_MOVE(last2), HPX_FORWARD(Pred, pred),
HPX_FORWARD(Proj1, proj1), HPX_FORWARD(Proj2, proj2));
HPX_FORWARD(ExPolicy, policy), HPX_MOVE(first1),
HPX_MOVE(last1), HPX_MOVE(first2), HPX_MOVE(last2),
HPX_FORWARD(Pred, pred), HPX_FORWARD(Proj1, proj1),
HPX_FORWARD(Proj2, proj2));
}
};
/// \endcond
Expand Down
Loading

0 comments on commit 79ba095

Please sign in to comment.