diff --git a/include/coro/concepts/executor.hpp b/include/coro/concepts/executor.hpp index c6c9129..ecb009c 100644 --- a/include/coro/concepts/executor.hpp +++ b/include/coro/concepts/executor.hpp @@ -22,6 +22,9 @@ concept executor = requires(type t, std::coroutine_handle<> c) { t.schedule() } -> coro::concepts::awaiter; { t.yield() } -> coro::concepts::awaiter; { t.resume(c) } -> std::same_as; + { t.size() } -> std::same_as; + { t.empty() } -> std::same_as; + { t.shutdown() } -> std::same_as; }; #ifdef LIBCORO_FEATURE_NETWORKING diff --git a/include/coro/task_container.hpp b/include/coro/task_container.hpp index e45d9ac..3fbba7a 100644 --- a/include/coro/task_container.hpp +++ b/include/coro/task_container.hpp @@ -10,6 +10,7 @@ #include #include #include +#include #include namespace coro @@ -78,25 +79,25 @@ class task_container { m_size.fetch_add(1, std::memory_order::relaxed); - std::unique_lock lk{m_mutex}; - - if (cleanup == garbage_collect_t::yes) + std::size_t index{}; { - gc_internal(); - } + std::unique_lock lk{m_mutex}; - // Only grow if completely full and attempting to add more. - if (m_free_task_indices.empty()) - { - grow(); - } + if (cleanup == garbage_collect_t::yes) + { + gc_internal(); + } - // Reserve a free task index - std::size_t index = m_free_task_indices.front(); - m_free_task_indices.pop(); + // Only grow if completely full and attempting to add more. + if (m_free_task_indices.empty()) + { + grow(); + } - // We've reserved the slot, we can release the lock. - lk.unlock(); + // Reserve a free task index + index = m_free_task_indices.front(); + m_free_task_indices.pop(); + } // Store the task inside a cleanup task for self deletion. m_tasks[index] = make_cleanup_task(std::move(user_task), index); @@ -106,7 +107,7 @@ class task_container } /** - * Garbage collects any tasks that are marked as deleted. This frees up space to be re-used by + * Garbage collects any tasks that are marked as deleted. This frees up space to be re-used by * the task container for newly stored tasks. * @return The number of tasks that were deleted. */ @@ -182,7 +183,7 @@ class task_container pos++; continue; } - // Destroy the cleanup task and the user task. + // Destroy the cleanup task. m_tasks[*pos].destroy(); // Put the deleted position at the end of the free indexes list. m_free_task_indices.emplace(*pos); @@ -231,9 +232,12 @@ class task_container std::cerr << "coro::task_container user_task had unhandle exception, not derived from std::exception.\n"; } + // Destroy the user task since it is complete. This is important to do so outside the lock + // since the user could schedule a new task from the destructor (tls::client does this interanlly) + // causing a deadlock. + user_task.destroy(); + { - // This scope is required around this lock otherwise if this task on destruction schedules a new task it - // can cause a deadlock, notably tls::client schedules a task to cleanup tls resources. std::scoped_lock lk{m_mutex}; m_tasks_to_delete.emplace_back(index); }