From 294f722096fb2926852045a1e0b96fb91f89e447 Mon Sep 17 00:00:00 2001 From: Bruno Nicoletti Date: Wed, 14 Feb 2024 22:56:36 +0000 Subject: [PATCH 01/24] Added missing header '' when compiling with clang and c++23 (#254) --- include/coro/sync_wait.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/include/coro/sync_wait.hpp b/include/coro/sync_wait.hpp index 333d187b..5a20def4 100644 --- a/include/coro/sync_wait.hpp +++ b/include/coro/sync_wait.hpp @@ -3,6 +3,7 @@ #include "coro/attribute.hpp" #include "coro/concepts/awaitable.hpp" +#include #include #include #include From 042b387ad9086b2d91523f9c62dda24644c2d47c Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Wed, 14 Feb 2024 17:43:46 -0700 Subject: [PATCH 02/24] CI explicitly run CMAKE_CXX_STANDARD=20|23 (#257) CI Added explicit 20/23 standards for: * ci-fedora * ci-macos * ci-ubuntu * ci-windows Did not add 23 builds for * ci-opensuse * ci_emscripten Closes #256 --- .github/workflows/ci-fedora.yml | 6 +++++- .github/workflows/ci-macos.yml | 17 ++++++++++++----- .github/workflows/ci-opensuse.yml | 2 ++ .github/workflows/ci-ubuntu.yml | 6 ++++++ .github/workflows/ci-windows.yml | 3 ++- 5 files changed, 27 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci-fedora.yml b/.github/workflows/ci-fedora.yml index 58ce6d63..9aea9a03 100644 --- a/.github/workflows/ci-fedora.yml +++ b/.github/workflows/ci-fedora.yml @@ -9,8 +9,10 @@ jobs: strategy: fail-fast: false matrix: - fedora_version: [32, 33, 34, 35, 36, 37, 38, 39, 40] + fedora_version: [37, 38, 39, 40] + cxx_standard: [20, 23] libcoro_feature_networking: [ {enabled: ON, tls: ON} ] + libcoro_build_shared_libs: [OFF] container: image: fedora:${{ matrix.fedora_version }} steps: @@ -36,8 +38,10 @@ jobs: -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=gcc \ -DCMAKE_CXX_COMPILER=g++ \ + -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ + -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ .. cmake --build . --config Release - name: Test diff --git a/.github/workflows/ci-macos.yml b/.github/workflows/ci-macos.yml index 7daec91c..379786d9 100644 --- a/.github/workflows/ci-macos.yml +++ b/.github/workflows/ci-macos.yml @@ -8,11 +8,16 @@ jobs: runs-on: macos-latest strategy: fail-fast: false + matrix: + clang_version: [17] + cxx_standard: [20, 23] + libcoro_feature_networking: [ {enabled: OFF, tls: OFF} ] + libcoro_build_shared_libs: [OFF, ON] steps: - name: Install Dependencies run: | brew update - brew install llvm@17 + brew install llvm@${{ matrix.clang_version }} brew install ninja - name: Checkout uses: actions/checkout@v4 @@ -25,10 +30,12 @@ jobs: cmake \ -GNinja \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_C_COMPILER=/usr/local/opt/llvm/bin//clang-17 \ - -DCMAKE_CXX_COMPILER=/usr/local/opt/llvm/bin//clang-17 \ - -DLIBCORO_FEATURE_NETWORKING=OFF \ - -DLIBCORO_FEATURE_TLS=ON \ + -DCMAKE_C_COMPILER=/usr/local/opt/llvm/bin//clang-${{ matrix.clang_version }} \ + -DCMAKE_CXX_COMPILER=/usr/local/opt/llvm/bin//clang-${{ matrix.clang_version }} \ + -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ + -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ + -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ + -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ .. cmake --build . --config Release - name: Test diff --git a/.github/workflows/ci-opensuse.yml b/.github/workflows/ci-opensuse.yml index cd335c5f..1748f16a 100644 --- a/.github/workflows/ci-opensuse.yml +++ b/.github/workflows/ci-opensuse.yml @@ -9,6 +9,7 @@ jobs: strategy: matrix: gplusplus_version: [10] + cxx_standard: [20] libcoro_feature_networking: [ {enabled: ON, tls: ON} ] container: image: opensuse/leap:15.2 @@ -37,6 +38,7 @@ jobs: -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=gcc-${{ matrix.gplusplus_version }} \ -DCMAKE_CXX_COMPILER=g++-${{ matrix.gplusplus_version }} \ + -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ .. diff --git a/.github/workflows/ci-ubuntu.yml b/.github/workflows/ci-ubuntu.yml index 1c0aad12..d62a1901 100644 --- a/.github/workflows/ci-ubuntu.yml +++ b/.github/workflows/ci-ubuntu.yml @@ -9,6 +9,7 @@ jobs: strategy: matrix: gplusplus_version: [10] + cxx_standard: [20] libcoro_feature_networking: [ {enabled: ON, tls: ON}, {enabled: ON, tls: OFF}, {enabled: OFF, tls: OFF} ] libcoro_build_shared_libs: [OFF, ON] container: @@ -42,6 +43,7 @@ jobs: -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=gcc-${{ matrix.gplusplus_version }} \ -DCMAKE_CXX_COMPILER=g++-${{ matrix.gplusplus_version }} \ + -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ @@ -57,6 +59,7 @@ jobs: strategy: matrix: gplusplus_version: [11, 12, 13] + cxx_standard: [20, 23] libcoro_feature_networking: [ {enabled: ON, tls: ON}] container: image: ubuntu:22.04 @@ -89,6 +92,7 @@ jobs: -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=gcc-${{ matrix.gplusplus_version }} \ -DCMAKE_CXX_COMPILER=g++-${{ matrix.gplusplus_version }} \ + -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ .. @@ -103,6 +107,7 @@ jobs: strategy: matrix: clang_version: [16, 17] + cxx_standard: [20, 23] libcoro_feature_networking: [ {enabled: ON, tls: ON}] container: image: ubuntu:22.04 @@ -139,6 +144,7 @@ jobs: -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=clang-${{ matrix.clang_version }} \ -DCMAKE_CXX_COMPILER=clang++-${{ matrix.clang_version }} \ + -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ .. diff --git a/.github/workflows/ci-windows.yml b/.github/workflows/ci-windows.yml index 8ca8fa06..bbb41b6a 100644 --- a/.github/workflows/ci-windows.yml +++ b/.github/workflows/ci-windows.yml @@ -8,6 +8,7 @@ jobs: runs-on: windows-latest strategy: matrix: + cxx_standard: [20, 23] libcoro_build_shared_libs: [OFF, ON] steps: - name: Checkout @@ -18,7 +19,7 @@ jobs: run: | mkdir Release cd Release - cmake .. -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} + cmake -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} .. cmake --build . --config Release - name: Test run: | From 18d96ceb531200225d1bcb64ccefd1b34fef7440 Mon Sep 17 00:00:00 2001 From: Uilian Ries Date: Wed, 21 Feb 2024 22:37:52 +0100 Subject: [PATCH 03/24] Initial try to include Conan in the CI (#259) Signed-off-by: Uilian Ries --- .github/workflows/ci-conan.yml | 70 ++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 .github/workflows/ci-conan.yml diff --git a/.github/workflows/ci-conan.yml b/.github/workflows/ci-conan.yml new file mode 100644 index 00000000..accb7e91 --- /dev/null +++ b/.github/workflows/ci-conan.yml @@ -0,0 +1,70 @@ +name: ci-conan + +on: [pull_request, workflow_dispatch] + +jobs: + ci-conan-gcc10: + name: ci-conan-g++-11-shared-${{ matrix.shared }}-build-type-${{ matrix.build_type }} + runs-on: ubuntu-latest + strategy: + matrix: + shared: ["False", "True"] + build_type: ["Release", "Debug"] + container: + image: ubuntu:20.04 + env: + TZ: America/New_York + DEBIAN_FRONTEND: noninteractive + steps: + - name: Install System Dependencies + run: | + apt-get update + apt-get -y upgrade + apt install -y build-essential software-properties-common + add-apt-repository ppa:ubuntu-toolchain-r/test + apt-get install -y \ + cmake \ + git \ + ninja-build \ + g++-11 \ + wget + wget -q -O /tmp/conan.tar.gz https://github.com/conan-io/conan/releases/download/2.0.17/conan-linux-64.tar.gz + tar -xvf /tmp/conan.tar.gz -C /usr/bin + - name: Detect Conan profile + run: | + update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-11 100 + update-alternatives --install /usr/bin/c++ c++ /usr/bin/g++-11 100 + update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-11 100 + update-alternatives --install /usr/bin/cc cc /usr/bin/gcc-11 100 + conan profile detect + - name: Checkout + uses: actions/checkout@v4 + - name: Install Conan Dependencies + run: | + conan install -r conancenter \ + --requires=openssl/3.2.0 \ + --requires=c-ares/1.22.1 \ + --requires=tl-expected/1.1.0 \ + -g CMakeToolchain \ + -g CMakeDeps \ + -of "${GITHUB_WORKSPACE}/build/conan" \ + --build=missing \ + -s build_type=${{ matrix.build_type }} \ + -s compiler.cppstd=20 \ + -o "*/*:shared=${{ matrix.shared }}" + - name: Build + run: | + cmake -S . -B "${GITHUB_WORKSPACE}/build" \ + -GNinja \ + -DCMAKE_CXX_STANDARD=20 \ + -DCMAKE_TOOLCHAIN_FILE=build/conan/conan_toolchain.cmake \ + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \ + -DLIBCORO_EXTERNAL_DEPENDENCIES=ON \ + -DLIBCORO_FEATURE_NETWORKING=ON \ + -DLIBCORO_FEATURE_TLS=ON \ + -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.shared }} + cmake --build "${GITHUB_WORKSPACE}/build" + - name: Test + run: | + cd build + ctest -VVqw From 3e3f94830448476a62af2d42787c31228101f1d0 Mon Sep 17 00:00:00 2001 From: mscppppppppp <102710916+mscppppppppp@users.noreply.github.com> Date: Sun, 17 Mar 2024 01:15:11 +0800 Subject: [PATCH 04/24] Update when_all.hpp to fix msvc build waring (#260) --- include/coro/when_all.hpp | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/include/coro/when_all.hpp b/include/coro/when_all.hpp index 6c0ef13e..c7a71fad 100644 --- a/include/coro/when_all.hpp +++ b/include/coro/when_all.hpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace coro { @@ -288,7 +289,7 @@ class when_all_task_promise coroutine_handle_type::from_promise(*this).resume(); } - auto return_value() & -> return_type& + auto result() & -> return_type& { if (m_exception_ptr) { @@ -297,7 +298,7 @@ class when_all_task_promise return *m_return_value; } - auto return_value() && -> return_type&& + auto result() && -> return_type&& { if (m_exception_ptr) { @@ -306,6 +307,14 @@ class when_all_task_promise return std::forward(*m_return_value); } + auto return_void() noexcept -> void + { + // We should have either suspended at co_yield point or + // an exception was thrown before running off the end of + // the coroutine. + assert(false); + } + private: when_all_latch* m_latch{nullptr}; std::exception_ptr m_exception_ptr; @@ -401,7 +410,7 @@ class when_all_task } else { - return m_coroutine.promise().return_value(); + return m_coroutine.promise().result(); } } @@ -414,7 +423,7 @@ class when_all_task } else { - return m_coroutine.promise().return_value(); + return m_coroutine.promise().result(); } } @@ -427,7 +436,7 @@ class when_all_task } else { - return m_coroutine.promise().return_value(); + return m_coroutine.promise().result(); } } From 3e8a735801a8ed9837c3e431a43b153d461b5b85 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Thu, 23 May 2024 20:36:44 -0600 Subject: [PATCH 05/24] Pin macos ci to 12 (#267) macos-latest is m1 and is building but crashing --- .github/workflows/ci-macos.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-macos.yml b/.github/workflows/ci-macos.yml index 379786d9..30662e56 100644 --- a/.github/workflows/ci-macos.yml +++ b/.github/workflows/ci-macos.yml @@ -4,8 +4,8 @@ on: [pull_request, workflow_dispatch] jobs: macos: - name: macos-latest - runs-on: macos-latest + name: macos-12 + runs-on: macos-12 strategy: fail-fast: false matrix: @@ -25,13 +25,14 @@ jobs: submodules: recursive - name: Release run: | + brew --prefix llvm@17 mkdir Release cd Release cmake \ -GNinja \ -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_C_COMPILER=/usr/local/opt/llvm/bin//clang-${{ matrix.clang_version }} \ - -DCMAKE_CXX_COMPILER=/usr/local/opt/llvm/bin//clang-${{ matrix.clang_version }} \ + -DCMAKE_C_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ + -DCMAKE_CXX_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ From d4a55159f9a61509dd30668672b5374147912bb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Miku=C4=8Dionis?= Date: Fri, 31 May 2024 03:59:47 +0200 Subject: [PATCH 06/24] Fix unintended shallow copying when used with std::views::take (issue 261) (#263) * Fix for issue 261 Enables `view` concept on `generator` so that lvalue references cannot be accepts by `std::views:take`, which in turn would lead to erronious behavior. * Added a test case to check std::ranges:view concept, just like std::generator --- include/coro/generator.hpp | 2 +- test/test_generator.cpp | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/include/coro/generator.hpp b/include/coro/generator.hpp index 5fba1d0e..a15305ad 100644 --- a/include/coro/generator.hpp +++ b/include/coro/generator.hpp @@ -118,7 +118,7 @@ class generator_iterator } // namespace detail template -class generator +class generator : public std::ranges::view_base { public: using promise_type = detail::generator_promise; diff --git a/test/test_generator.cpp b/test/test_generator.cpp index 0b869814..49163a3f 100644 --- a/test/test_generator.cpp +++ b/test/test_generator.cpp @@ -39,3 +39,37 @@ TEST_CASE("generator infinite incrementing integer yield", "[generator]") } } } + +TEST_CASE("generator satisfies view concept for compatibility with std::views::take") +{ + auto counter = size_t{0}; + auto natural = [n = counter]() mutable -> coro::generator { + while (true) + co_yield ++n; + }; + auto nat = natural(); + static_assert(std::ranges::view, "does not satisfy view concept"); + SECTION("Count the items") + { + for (auto&& n : natural() | std::views::take(5)) + { + ++counter; + REQUIRE(n == counter); + } + REQUIRE(counter == 5); + } + SECTION("Not supported when std::ranges::view is satisfied, see issue 261") + { + /// the following may fail to compile to prevent loss of items in the std::views:take: + /* + for (auto&& n : nat | std::views::take(3)) { + ++counter; + REQUIRE(n == counter); // expect 1, 2, 3 + } + for (auto&& n : nat | std::views::take(3)) { + ++counter; + REQUIRE(n == counter); // expect 4, 5, 6 (4 may get lost if view is not enabled) + } + */ + } +} From 5697678d79e479cf318ab0f3fd9e246871b6e345 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Fri, 5 Jul 2024 12:29:31 -0600 Subject: [PATCH 07/24] coro::thread_pool high cpu usage when tasks < threads (#265) * coro::thread_pool high cpu usage when tasks < threads The check for m_size > 0 was keeping threads awake in a spin state until all tasks completed. This correctl now uses m_queue.size() behind the lock to correctly only wake up threads on the condition variable when tasks are waiting to be processed. * Fix deadlock with task_container and tls::client with the client's destructor scheduling a tls cleanup task, the task_container's lock was being locked twice when the cleanup task was being destroyed. Closes #262 * Adjust when task_container's user_task is deleted It is now deleted inline in make_user_task so any destructors that get invoked that possibly schedule more coroutines do not cause a deadlock * io_scheduler is now std::enable_shared_from_this --- README.md | 17 +- examples/coro_http_200_ok_server.cpp | 2 +- examples/coro_io_scheduler.cpp | 2 +- examples/coro_latch.cpp | 13 +- examples/coro_task_container.cpp | 2 +- examples/coro_tcp_echo_server.cpp | 2 +- include/coro/concepts/executor.hpp | 5 +- include/coro/io_scheduler.hpp | 38 ++- include/coro/task_container.hpp | 68 +++-- include/coro/thread_pool.hpp | 21 +- src/io_scheduler.cpp | 31 ++- src/thread_pool.cpp | 62 +++-- test/bench.cpp | 41 +-- test/net/test_dns_resolver.cpp | 2 +- test/net/test_tcp_server.cpp | 4 +- test/net/test_tls_server.cpp | 2 +- test/net/test_udp_peers.cpp | 4 +- test/test_io_scheduler.cpp | 386 +++++++++++++-------------- test/test_shared_mutex.cpp | 2 +- test/test_thread_pool.cpp | 32 +++ 20 files changed, 419 insertions(+), 317 deletions(-) diff --git a/README.md b/README.md index 96a183b0..d6719952 100644 --- a/README.md +++ b/README.md @@ -362,10 +362,12 @@ int main() // Complete worker tasks faster on a thread pool, using the io_scheduler version so the worker // tasks can yield for a specific amount of time to mimic difficult work. The pool is only // setup with a single thread to showcase yield_for(). - coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto tp = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); // This task will wait until the given latch setters have completed. - auto make_latch_task = [](coro::latch& l) -> coro::task { + auto make_latch_task = [](coro::latch& l) -> coro::task + { // It seems like the dependent worker tasks could be created here, but in that case it would // be superior to simply do: `co_await coro::when_all(tasks);` // It is also important to note that the last dependent task will resume the waiting latch @@ -381,14 +383,15 @@ int main() // This task does 'work' and counts down on the latch when completed. The final child task to // complete will end up resuming the latch task when the latch's count reaches zero. - auto make_worker_task = [](coro::io_scheduler& tp, coro::latch& l, int64_t i) -> coro::task { + auto make_worker_task = [](std::shared_ptr& tp, coro::latch& l, int64_t i) -> coro::task + { // Schedule the worker task onto the thread pool. - co_await tp.schedule(); + co_await tp->schedule(); std::cout << "worker task " << i << " is working...\n"; // Do some expensive calculations, yield to mimic work...! Its also important to never use // std::this_thread::sleep_for() within the context of coroutines, it will block the thread // and other tasks that are ready to execute will be blocked. - co_await tp.yield_for(std::chrono::milliseconds{i * 20}); + co_await tp->yield_for(std::chrono::milliseconds{i * 20}); std::cout << "worker task " << i << " is done, counting down on the latch\n"; l.count_down(); co_return; @@ -846,7 +849,7 @@ The example provided here shows an i/o scheduler that spins up a basic `coro::ne int main() { - auto scheduler = std::make_shared(coro::io_scheduler::options{ + auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ // The scheduler will spawn a dedicated event processing thread. This is the default, but // it is possible to use 'manual' and call 'process_events()' to drive the scheduler yourself. .thread_strategy = coro::io_scheduler::thread_strategy_t::spawn, @@ -1017,7 +1020,7 @@ All tasks that are stored within a `coro::task_container` must have a `void` ret int main() { - auto scheduler = std::make_shared( + auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_server_task = [&]() -> coro::task diff --git a/examples/coro_http_200_ok_server.cpp b/examples/coro_http_200_ok_server.cpp index 7b783f5f..e33b98cf 100644 --- a/examples/coro_http_200_ok_server.cpp +++ b/examples/coro_http_200_ok_server.cpp @@ -67,7 +67,7 @@ Connection: keep-alive std::vector> workers{}; for (size_t i = 0; i < std::thread::hardware_concurrency(); ++i) { - auto scheduler = std::make_shared(coro::io_scheduler::options{ + auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); workers.push_back(make_http_200_ok_server(scheduler)); diff --git a/examples/coro_io_scheduler.cpp b/examples/coro_io_scheduler.cpp index 8fc2d291..3c0dec5a 100644 --- a/examples/coro_io_scheduler.cpp +++ b/examples/coro_io_scheduler.cpp @@ -3,7 +3,7 @@ int main() { - auto scheduler = std::make_shared(coro::io_scheduler::options{ + auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ // The scheduler will spawn a dedicated event processing thread. This is the default, but // it is possible to use 'manual' and call 'process_events()' to drive the scheduler yourself. .thread_strategy = coro::io_scheduler::thread_strategy_t::spawn, diff --git a/examples/coro_latch.cpp b/examples/coro_latch.cpp index 01fc899a..75777ccc 100644 --- a/examples/coro_latch.cpp +++ b/examples/coro_latch.cpp @@ -6,10 +6,12 @@ int main() // Complete worker tasks faster on a thread pool, using the io_scheduler version so the worker // tasks can yield for a specific amount of time to mimic difficult work. The pool is only // setup with a single thread to showcase yield_for(). - coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto tp = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); // This task will wait until the given latch setters have completed. - auto make_latch_task = [](coro::latch& l) -> coro::task { + auto make_latch_task = [](coro::latch& l) -> coro::task + { // It seems like the dependent worker tasks could be created here, but in that case it would // be superior to simply do: `co_await coro::when_all(tasks);` // It is also important to note that the last dependent task will resume the waiting latch @@ -25,14 +27,15 @@ int main() // This task does 'work' and counts down on the latch when completed. The final child task to // complete will end up resuming the latch task when the latch's count reaches zero. - auto make_worker_task = [](coro::io_scheduler& tp, coro::latch& l, int64_t i) -> coro::task { + auto make_worker_task = [](std::shared_ptr& tp, coro::latch& l, int64_t i) -> coro::task + { // Schedule the worker task onto the thread pool. - co_await tp.schedule(); + co_await tp->schedule(); std::cout << "worker task " << i << " is working...\n"; // Do some expensive calculations, yield to mimic work...! Its also important to never use // std::this_thread::sleep_for() within the context of coroutines, it will block the thread // and other tasks that are ready to execute will be blocked. - co_await tp.yield_for(std::chrono::milliseconds{i * 20}); + co_await tp->yield_for(std::chrono::milliseconds{i * 20}); std::cout << "worker task " << i << " is done, counting down on the latch\n"; l.count_down(); co_return; diff --git a/examples/coro_task_container.cpp b/examples/coro_task_container.cpp index 31a64ddc..7ae29c55 100644 --- a/examples/coro_task_container.cpp +++ b/examples/coro_task_container.cpp @@ -3,7 +3,7 @@ int main() { - auto scheduler = std::make_shared( + auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_server_task = [&]() -> coro::task diff --git a/examples/coro_tcp_echo_server.cpp b/examples/coro_tcp_echo_server.cpp index a318176a..dba150d0 100644 --- a/examples/coro_tcp_echo_server.cpp +++ b/examples/coro_tcp_echo_server.cpp @@ -61,7 +61,7 @@ auto main() -> int std::vector> workers{}; for (size_t i = 0; i < std::thread::hardware_concurrency(); ++i) { - auto scheduler = std::make_shared(coro::io_scheduler::options{ + auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); workers.push_back(make_tcp_echo_server(scheduler)); diff --git a/include/coro/concepts/executor.hpp b/include/coro/concepts/executor.hpp index dc8394f9..ecb009cf 100644 --- a/include/coro/concepts/executor.hpp +++ b/include/coro/concepts/executor.hpp @@ -21,7 +21,10 @@ concept executor = requires(type t, std::coroutine_handle<> c) { { t.schedule() } -> coro::concepts::awaiter; { t.yield() } -> coro::concepts::awaiter; - { t.resume(c) } -> std::same_as; + { t.resume(c) } -> std::same_as; + { t.size() } -> std::same_as; + { t.empty() } -> std::same_as; + { t.shutdown() } -> std::same_as; }; #ifdef LIBCORO_FEATURE_NETWORKING diff --git a/include/coro/io_scheduler.hpp b/include/coro/io_scheduler.hpp index 1c1f46d7..db48b7c7 100644 --- a/include/coro/io_scheduler.hpp +++ b/include/coro/io_scheduler.hpp @@ -21,10 +21,15 @@ namespace coro { -class io_scheduler +class io_scheduler : public std::enable_shared_from_this { using timed_events = detail::poll_info::timed_events; + struct private_constructor + { + private_constructor() = default; + }; + public: class schedule_operation; friend schedule_operation; @@ -69,7 +74,18 @@ class io_scheduler const execution_strategy_t execution_strategy{execution_strategy_t::process_tasks_on_thread_pool}; }; - explicit io_scheduler( + /** + * @see io_scheduler::make_shared + */ + explicit io_scheduler(options&& opts, private_constructor); + + /** + * @brief Creates an io_scheduler. + * + * @param opts + * @return std::shared_ptr + */ + static auto make_shared( options opts = options{ .thread_strategy = thread_strategy_t::spawn, .on_io_thread_start_functor = nullptr, @@ -79,7 +95,7 @@ class io_scheduler ((std::thread::hardware_concurrency() > 1) ? (std::thread::hardware_concurrency() - 1) : 1), .on_thread_start_functor = nullptr, .on_thread_stop_functor = nullptr}, - .execution_strategy = execution_strategy_t::process_tasks_on_thread_pool}); + .execution_strategy = execution_strategy_t::process_tasks_on_thread_pool}) -> std::shared_ptr; io_scheduler(const io_scheduler&) = delete; io_scheduler(io_scheduler&&) = delete; @@ -229,8 +245,18 @@ class io_scheduler * Resumes execution of a direct coroutine handle on this io scheduler. * @param handle The coroutine handle to resume execution. */ - auto resume(std::coroutine_handle<> handle) -> void + auto resume(std::coroutine_handle<> handle) -> bool { + if (handle == nullptr) + { + return false; + } + + if (m_shutdown_requested.load(std::memory_order::acquire)) + { + return false; + } + if (m_opts.execution_strategy == execution_strategy_t::process_tasks_inline) { { @@ -245,10 +271,12 @@ class io_scheduler eventfd_t value{1}; eventfd_write(m_schedule_fd, value); } + + return true; } else { - m_thread_pool->resume(handle); + return m_thread_pool->resume(handle); } } diff --git a/include/coro/task_container.hpp b/include/coro/task_container.hpp index 6d0497b8..373ea900 100644 --- a/include/coro/task_container.hpp +++ b/include/coro/task_container.hpp @@ -36,8 +36,7 @@ class task_container task_container( std::shared_ptr e, const options opts = options{.reserve_size = 8, .growth_factor = 2}) : m_growth_factor(opts.growth_factor), - m_executor(std::move(e)), - m_executor_ptr(m_executor.get()) + m_executor(std::move(e)) { if (m_executor == nullptr) { @@ -78,22 +77,25 @@ class task_container { m_size.fetch_add(1, std::memory_order::relaxed); - std::scoped_lock lk{m_mutex}; - - if (cleanup == garbage_collect_t::yes) + std::size_t index{}; { - gc_internal(); - } + std::unique_lock lk{m_mutex}; - // Only grow if completely full and attempting to add more. - if (m_free_task_indices.empty()) - { - grow(); - } + if (cleanup == garbage_collect_t::yes) + { + gc_internal(); + } + + // Only grow if completely full and attempting to add more. + if (m_free_task_indices.empty()) + { + grow(); + } - // Reserve a free task index - std::size_t index = m_free_task_indices.front(); - m_free_task_indices.pop(); + // Reserve a free task index + index = m_free_task_indices.front(); + m_free_task_indices.pop(); + } // Store the task inside a cleanup task for self deletion. m_tasks[index] = make_cleanup_task(std::move(user_task), index); @@ -103,7 +105,7 @@ class task_container } /** - * Garbage collects any tasks that are marked as deleted. This frees up space to be re-used by + * Garbage collects any tasks that are marked as deleted. This frees up space to be re-used by * the task container for newly stored tasks. * @return The number of tasks that were deleted. */ @@ -144,7 +146,7 @@ class task_container while (!empty()) { garbage_collect(); - co_await m_executor_ptr->yield(); + co_await m_executor->yield(); } } @@ -170,7 +172,7 @@ class task_container auto gc_internal() -> std::size_t { std::size_t deleted{0}; - auto pos = std::begin(m_tasks_to_delete); + auto pos = std::begin(m_tasks_to_delete); while (pos != std::end(m_tasks_to_delete)) { // Skip tasks that are still running or have yet to start. @@ -179,7 +181,7 @@ class task_container pos++; continue; } - // Destroy the cleanup task and the user task. + // Destroy the cleanup task. m_tasks[*pos].destroy(); // Put the deleted position at the end of the free indexes list. m_free_task_indices.emplace(*pos); @@ -207,7 +209,7 @@ class task_container auto make_cleanup_task(task user_task, std::size_t index) -> coro::task { // Immediately move the task onto the executor. - co_await m_executor_ptr->schedule(); + co_await m_executor->schedule(); try { @@ -228,8 +230,16 @@ class task_container std::cerr << "coro::task_container user_task had unhandle exception, not derived from std::exception.\n"; } - std::scoped_lock lk{m_mutex}; - m_tasks_to_delete.emplace_back(index); + // Destroy the user task since it is complete. This is important to do so outside the lock + // since the user could schedule a new task from the destructor (tls::client does this interanlly) + // causing a deadlock. + user_task.destroy(); + + { + std::scoped_lock lk{m_mutex}; + m_tasks_to_delete.emplace_back(index); + } + co_return; } @@ -248,20 +258,6 @@ class task_container double m_growth_factor{}; /// The executor to schedule tasks that have just started. std::shared_ptr m_executor{nullptr}; - /// This is used internally since io_scheduler cannot pass itself in as a shared_ptr. - executor_type* m_executor_ptr{nullptr}; - - /** - * Special constructor for internal types to create their embeded task containers. - */ - - friend io_scheduler; - task_container(executor_type& e, const options opts = options{.reserve_size = 8, .growth_factor = 2}) - : m_growth_factor(opts.growth_factor), - m_executor_ptr(&e) - { - init(opts.reserve_size); - } auto init(std::size_t reserve_size) -> void { diff --git a/include/coro/thread_pool.hpp b/include/coro/thread_pool.hpp index 56d81893..c8e8c0bf 100644 --- a/include/coro/thread_pool.hpp +++ b/include/coro/thread_pool.hpp @@ -134,15 +134,17 @@ class thread_pool /** * Schedules any coroutine handle that is ready to be resumed. * @param handle The coroutine handle to schedule. + * @return True if the coroutine is resumed, false if its a nullptr. */ - auto resume(std::coroutine_handle<> handle) noexcept -> void; + auto resume(std::coroutine_handle<> handle) noexcept -> bool; /** * Schedules the set of coroutine handles that are ready to be resumed. * @param handles The coroutine handles to schedule. + * @param uint64_t The number of tasks resumed, if any where null they are discarded. */ template> range_type> - auto resume(const range_type& handles) noexcept -> void + auto resume(const range_type& handles) noexcept -> uint64_t { m_size.fetch_add(std::size(handles), std::memory_order::release); @@ -168,7 +170,20 @@ class thread_pool m_size.fetch_sub(null_handles, std::memory_order::release); } - m_wait_cv.notify_one(); + uint64_t total = std::size(handles) - null_handles; + if (total >= m_threads.size()) + { + m_wait_cv.notify_all(); + } + else + { + for (uint64_t i = 0; i < total; ++i) + { + m_wait_cv.notify_one(); + } + } + + return total; } /** diff --git a/src/io_scheduler.cpp b/src/io_scheduler.cpp index e639bd82..7b7f3b9a 100644 --- a/src/io_scheduler.cpp +++ b/src/io_scheduler.cpp @@ -14,36 +14,47 @@ using namespace std::chrono_literals; namespace coro { -io_scheduler::io_scheduler(options opts) - : m_opts(std::move(opts)), +io_scheduler::io_scheduler(options&& opts, private_constructor) + : m_opts(opts), m_epoll_fd(epoll_create1(EPOLL_CLOEXEC)), m_shutdown_fd(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)), m_timer_fd(timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK | TFD_CLOEXEC)), - m_schedule_fd(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)), - m_owned_tasks(new coro::task_container(*this)) + m_schedule_fd(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)) + +{ +} + +auto io_scheduler::make_shared(options opts) -> std::shared_ptr { + auto s = std::make_shared(std::move(opts), private_constructor{}); + + // std::enable_shared_from_this cannot be used until the object is fully created. + s->m_owned_tasks = new coro::task_container(s->shared_from_this()); + if (opts.execution_strategy == execution_strategy_t::process_tasks_on_thread_pool) { - m_thread_pool = std::make_unique(std::move(m_opts.pool)); + s->m_thread_pool = std::make_unique(std::move(s->m_opts.pool)); } epoll_event e{}; e.events = EPOLLIN; e.data.ptr = const_cast(m_shutdown_ptr); - epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, m_shutdown_fd, &e); + epoll_ctl(s->m_epoll_fd, EPOLL_CTL_ADD, s->m_shutdown_fd, &e); e.data.ptr = const_cast(m_timer_ptr); - epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, m_timer_fd, &e); + epoll_ctl(s->m_epoll_fd, EPOLL_CTL_ADD, s->m_timer_fd, &e); e.data.ptr = const_cast(m_schedule_ptr); - epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, m_schedule_fd, &e); + epoll_ctl(s->m_epoll_fd, EPOLL_CTL_ADD, s->m_schedule_fd, &e); - if (m_opts.thread_strategy == thread_strategy_t::spawn) + if (s->m_opts.thread_strategy == thread_strategy_t::spawn) { - m_io_thread = std::thread([this]() { process_events_dedicated_thread(); }); + s->m_io_thread = std::thread([s]() { s->process_events_dedicated_thread(); }); } // else manual mode, the user must call process_events. + + return s; } io_scheduler::~io_scheduler() diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 07d37f19..3c33b0fe 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -44,15 +44,21 @@ auto thread_pool::schedule() -> operation throw std::runtime_error("coro::thread_pool is shutting down, unable to schedule new tasks."); } -auto thread_pool::resume(std::coroutine_handle<> handle) noexcept -> void +auto thread_pool::resume(std::coroutine_handle<> handle) noexcept -> bool { if (handle == nullptr) { - return; + return false; + } + + if (m_shutdown_requested.load(std::memory_order::acquire)) + { + return false; } m_size.fetch_add(1, std::memory_order::release); schedule_impl(handle); + return true; } auto thread_pool::shutdown() noexcept -> void @@ -84,29 +90,44 @@ auto thread_pool::executor(std::size_t idx) -> void m_opts.on_thread_start_functor(idx); } - // Process until shutdown is requested and the total number of tasks reaches zero. - while (!m_shutdown_requested.load(std::memory_order::acquire) || m_size.load(std::memory_order::acquire) > 0) + // Process until shutdown is requested. + while (!m_shutdown_requested.load(std::memory_order::acquire)) { std::unique_lock lk{m_wait_mutex}; - m_wait_cv.wait( - lk, - [&] { - return m_size.load(std::memory_order::acquire) > 0 || - m_shutdown_requested.load(std::memory_order::acquire); - }); - // Process this batch until the queue is empty. - while (!m_queue.empty()) + m_wait_cv.wait(lk, [&]() { return !m_queue.empty() || m_shutdown_requested.load(std::memory_order::acquire); }); + + if (m_queue.empty()) { - auto handle = m_queue.front(); - m_queue.pop_front(); + continue; + } + + auto handle = m_queue.front(); + m_queue.pop_front(); + lk.unlock(); - // Release the lock while executing the coroutine. - lk.unlock(); - handle.resume(); + // Release the lock while executing the coroutine. + handle.resume(); + m_size.fetch_sub(1, std::memory_order::release); + } - m_size.fetch_sub(1, std::memory_order::release); - lk.lock(); + // Process until there are no ready tasks left. + while (m_size.load(std::memory_order::acquire) > 0) + { + std::unique_lock lk{m_wait_mutex}; + // m_size will only drop to zero once all executing coroutines are finished + // but the queue could be empty for threads that finished early. + if (m_queue.empty()) + { + break; } + + auto handle = m_queue.front(); + m_queue.pop_front(); + lk.unlock(); + + // Release the lock while executing the coroutine. + handle.resume(); + m_size.fetch_sub(1, std::memory_order::release); } if (m_opts.on_thread_stop_functor != nullptr) @@ -125,9 +146,8 @@ auto thread_pool::schedule_impl(std::coroutine_handle<> handle) noexcept -> void { std::scoped_lock lk{m_wait_mutex}; m_queue.emplace_back(handle); + m_wait_cv.notify_one(); } - - m_wait_cv.notify_one(); } } // namespace coro diff --git a/test/bench.cpp b/test/bench.cpp index 9cd7196f..f102fe26 100644 --- a/test/bench.cpp +++ b/test/bench.cpp @@ -237,7 +237,8 @@ TEST_CASE("benchmark counter task scheduler{1} yield", "[benchmark]") constexpr std::size_t iterations = default_iterations; constexpr std::size_t ops = iterations * 2; // the external resume is still a resume op - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); std::atomic counter{0}; std::vector> tasks{}; @@ -245,8 +246,8 @@ TEST_CASE("benchmark counter task scheduler{1} yield", "[benchmark]") auto make_task = [&]() -> coro::task { - co_await s.schedule(); - co_await s.yield(); + co_await s->schedule(); + co_await s->yield(); counter.fetch_add(1, std::memory_order::relaxed); co_return; }; @@ -262,7 +263,7 @@ TEST_CASE("benchmark counter task scheduler{1} yield", "[benchmark]") auto stop = sc::now(); print_stats("benchmark counter task scheduler{1} yield", ops, start, stop); - REQUIRE(s.empty()); + REQUIRE(s->empty()); REQUIRE(counter == iterations); } @@ -271,7 +272,8 @@ TEST_CASE("benchmark counter task scheduler{1} yield_for", "[benchmark]") constexpr std::size_t iterations = default_iterations; constexpr std::size_t ops = iterations * 2; // the external resume is still a resume op - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); std::atomic counter{0}; std::vector> tasks{}; @@ -279,8 +281,8 @@ TEST_CASE("benchmark counter task scheduler{1} yield_for", "[benchmark]") auto make_task = [&]() -> coro::task { - co_await s.schedule(); - co_await s.yield_for(std::chrono::milliseconds{1}); + co_await s->schedule(); + co_await s->yield_for(std::chrono::milliseconds{1}); counter.fetch_add(1, std::memory_order::relaxed); co_return; }; @@ -296,7 +298,7 @@ TEST_CASE("benchmark counter task scheduler{1} yield_for", "[benchmark]") auto stop = sc::now(); print_stats("benchmark counter task scheduler{1} yield", ops, start, stop); - REQUIRE(s.empty()); + REQUIRE(s->empty()); REQUIRE(counter == iterations); } @@ -305,7 +307,8 @@ TEST_CASE("benchmark counter task scheduler await event from another coroutine", constexpr std::size_t iterations = default_iterations; constexpr std::size_t ops = iterations * 3; // two tasks + event resume - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); std::vector> events{}; events.reserve(iterations); @@ -321,7 +324,7 @@ TEST_CASE("benchmark counter task scheduler await event from another coroutine", auto wait_func = [&](std::size_t index) -> coro::task { - co_await s.schedule(); + co_await s->schedule(); co_await *events[index]; counter.fetch_add(1, std::memory_order::relaxed); co_return; @@ -329,7 +332,7 @@ TEST_CASE("benchmark counter task scheduler await event from another coroutine", auto resume_func = [&](std::size_t index) -> coro::task { - co_await s.schedule(); + co_await s->schedule(); events[index]->set(); co_return; }; @@ -349,11 +352,11 @@ TEST_CASE("benchmark counter task scheduler await event from another coroutine", REQUIRE(counter == iterations); // valgrind workaround - while (!s.empty()) + while (!s->empty()) { std::this_thread::sleep_for(std::chrono::milliseconds{1}); } - REQUIRE(s.empty()); + REQUIRE(s->empty()); } #ifdef LIBCORO_FEATURE_NETWORKING @@ -399,7 +402,7 @@ TEST_CASE("benchmark tcp::server echo server thread pool", "[benchmark]") co_return; }; - auto server_scheduler = std::make_shared(coro::io_scheduler::options{ + auto server_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .pool = coro::thread_pool::options{}, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); auto make_server_task = [&]() -> coro::task @@ -433,7 +436,7 @@ TEST_CASE("benchmark tcp::server echo server thread pool", "[benchmark]") std::mutex g_histogram_mutex; std::map g_histogram; - auto client_scheduler = std::make_shared(coro::io_scheduler::options{ + auto client_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .pool = coro::thread_pool::options{}, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); auto make_client_task = [&]() -> coro::task @@ -538,7 +541,7 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") struct server { uint64_t id; - std::shared_ptr scheduler{std::make_shared( + std::shared_ptr scheduler{coro::io_scheduler::make_shared( coro::io_scheduler::options{.execution_strategy = estrat::process_tasks_inline})}; uint64_t live_clients{0}; coro::event wait_for_clients{}; @@ -546,7 +549,7 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") struct client { - std::shared_ptr scheduler{std::make_shared( + std::shared_ptr scheduler{coro::io_scheduler::make_shared( coro::io_scheduler::options{.execution_strategy = estrat::process_tasks_inline})}; std::vector> tasks{}; }; @@ -785,7 +788,7 @@ TEST_CASE("benchmark tls::server echo server thread pool", "[benchmark]") co_return; }; - auto server_scheduler = std::make_shared(coro::io_scheduler::options{ + auto server_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .pool = coro::thread_pool::options{}, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); auto make_server_task = [&]() -> coro::task @@ -823,7 +826,7 @@ TEST_CASE("benchmark tls::server echo server thread pool", "[benchmark]") coro::mutex histogram_mutex; std::map g_histogram; - auto client_scheduler = std::make_shared(coro::io_scheduler::options{ + auto client_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .pool = coro::thread_pool::options{}, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); auto make_client_task = [&](coro::mutex& histogram_mutex) -> coro::task diff --git a/test/net/test_dns_resolver.cpp b/test/net/test_dns_resolver.cpp index 071d5dc6..4c69fc70 100644 --- a/test/net/test_dns_resolver.cpp +++ b/test/net/test_dns_resolver.cpp @@ -8,7 +8,7 @@ TEST_CASE("dns_resolver basic", "[dns]") { - auto scheduler = std::make_shared( + auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); coro::net::dns::resolver dns_resolver{scheduler, std::chrono::milliseconds{5000}}; diff --git a/test/net/test_tcp_server.cpp b/test/net/test_tcp_server.cpp index 33ebebf7..09966482 100644 --- a/test/net/test_tcp_server.cpp +++ b/test/net/test_tcp_server.cpp @@ -11,7 +11,7 @@ TEST_CASE("tcp_server ping server", "[tcp_server]") const std::string client_msg{"Hello from client"}; const std::string server_msg{"Reply from server!"}; - auto scheduler = std::make_shared( + auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_client_task = [&]() -> coro::task @@ -91,7 +91,7 @@ TEST_CASE("tcp_server concurrent polling on the same socket", "[tcp_server]") // Issue 224: This test duplicates a client and issues two different poll operations per coroutine. using namespace std::chrono_literals; - auto scheduler = std::make_shared(coro::io_scheduler::options{ + auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); auto make_read_task = [](coro::net::tcp::client client) -> coro::task diff --git a/test/net/test_tls_server.cpp b/test/net/test_tls_server.cpp index 1ddebab4..230d4d43 100644 --- a/test/net/test_tls_server.cpp +++ b/test/net/test_tls_server.cpp @@ -9,7 +9,7 @@ TEST_CASE("tls_server hello world server", "[tls_server]") { - auto scheduler = std::make_shared( + auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); std::string client_msg = "Hello world from TLS client!"; diff --git a/test/net/test_udp_peers.cpp b/test/net/test_udp_peers.cpp index 4d96fe72..bfa6b7dc 100644 --- a/test/net/test_udp_peers.cpp +++ b/test/net/test_udp_peers.cpp @@ -8,7 +8,7 @@ TEST_CASE("udp one way") { const std::string msg{"aaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbcccccccccccccccccc"}; - auto scheduler = std::make_shared( + auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_send_task = [&]() -> coro::task @@ -54,7 +54,7 @@ TEST_CASE("udp echo peers") const std::string peer1_msg{"Hello from peer1!"}; const std::string peer2_msg{"Hello from peer2!!"}; - auto scheduler = std::make_shared( + auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_peer_task = [&scheduler]( diff --git a/test/test_io_scheduler.cpp b/test/test_io_scheduler.cpp index 012dc4b6..9e5d73cd 100644 --- a/test/test_io_scheduler.cpp +++ b/test/test_io_scheduler.cpp @@ -18,20 +18,21 @@ using namespace std::chrono_literals; TEST_CASE("io_scheduler schedule single task", "[io_scheduler]") { - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); co_return 42; }; auto value = coro::sync_wait(make_task()); REQUIRE(value == 42); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler submit mutiple tasks", "[io_scheduler]") @@ -40,11 +41,11 @@ TEST_CASE("io_scheduler submit mutiple tasks", "[io_scheduler]") std::atomic counter{0}; std::vector> tasks{}; tasks.reserve(n); - coro::io_scheduler s{}; + auto s = coro::io_scheduler::make_shared(); auto make_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); counter++; co_return; }; @@ -57,16 +58,17 @@ TEST_CASE("io_scheduler submit mutiple tasks", "[io_scheduler]") REQUIRE(counter == n); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]") { std::atomic counter{0}; - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); coro::event e1; coro::event e2; @@ -74,7 +76,7 @@ TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]") auto make_wait_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); co_await e1; counter++; co_await e2; @@ -86,7 +88,7 @@ TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]") auto make_set_task = [&](coro::event& e) -> coro::task { - co_await s.schedule(); + co_await s->schedule(); e.set(); }; @@ -94,28 +96,29 @@ TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]") REQUIRE(counter == 3); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler task with read poll", "[io_scheduler]") { - auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_poll_read_task = [&]() -> coro::task { - co_await s.schedule(); - auto status = co_await s.poll(trigger_fd, coro::poll_op::read); + co_await s->schedule(); + auto status = co_await s->poll(trigger_fd, coro::poll_op::read); REQUIRE(status == coro::poll_status::event); co_return; }; auto make_poll_write_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); uint64_t value{42}; auto unused = write(trigger_fd, &value, sizeof(value)); (void)unused; @@ -124,30 +127,31 @@ TEST_CASE("io_scheduler task with read poll", "[io_scheduler]") coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task())); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); close(trigger_fd); } TEST_CASE("io_scheduler task with read poll with timeout", "[io_scheduler]") { - auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_poll_read_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); // Poll with a timeout but don't timeout. - auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 50ms); + auto status = co_await s->poll(trigger_fd, coro::poll_op::read, 50ms); REQUIRE(status == coro::poll_status::event); co_return; }; auto make_poll_write_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); uint64_t value{42}; auto unused = write(trigger_fd, &value, sizeof(value)); (void)unused; @@ -156,73 +160,49 @@ TEST_CASE("io_scheduler task with read poll with timeout", "[io_scheduler]") coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task())); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); close(trigger_fd); } TEST_CASE("io_scheduler task with read poll timeout", "[io_scheduler]") { - auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); // Poll with a timeout and timeout. - auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 10ms); + auto status = co_await s->poll(trigger_fd, coro::poll_op::read, 10ms); REQUIRE(status == coro::poll_status::timeout); co_return; }; coro::sync_wait(make_task()); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); close(trigger_fd); } -// TODO: This probably requires a TCP socket? -// TEST_CASE("io_scheduler task with read poll closed socket", "[io_scheduler]") -// { -// auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); -// coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options { .thread_count = 1 }}}; - -// auto make_poll_task = [&]() -> coro::task { -// co_await s.schedule(); -// auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 1000ms); -// REQUIRE(status == coro::poll_status::closed); -// co_return; -// }; - -// auto make_close_task = [&]() -> coro::task { -// co_await s.schedule(); -// std::this_thread::sleep_for(100ms); -// // shutdown(trigger_fd, SHUT_RDWR); -// close(trigger_fd); -// co_return; -// }; - -// coro::sync_wait(coro::when_all(make_poll_task(), make_close_task())); - -// s.shutdown(); -// REQUIRE(s.empty()); -// } - TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]") { - coro::io_scheduler s1{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; - coro::io_scheduler s2{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s1 = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); + auto s2 = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); coro::event e{}; auto make_s1_task = [&]() -> coro::task { - co_await s1.schedule(); + co_await s1->schedule(); auto tid = std::this_thread::get_id(); co_await e; @@ -234,7 +214,7 @@ TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]") auto make_s2_task = [&]() -> coro::task { - co_await s2.schedule(); + co_await s2->schedule(); // Wait a bit to be sure the wait on 'e' in the other scheduler is done first. std::this_thread::sleep_for(10ms); e.set(); @@ -243,19 +223,20 @@ TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]") coro::sync_wait(coro::when_all(make_s1_task(), make_s2_task())); - s1.shutdown(); - REQUIRE(s1.empty()); - s2.shutdown(); - REQUIRE(s2.empty()); + s1->shutdown(); + REQUIRE(s1->empty()); + s2->shutdown(); + REQUIRE(s2->empty()); } TEST_CASE("io_scheduler separate thread resume spawned thread", "[io_scheduler]") { - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); coro::event e{}; auto tid = std::this_thread::get_id(); @@ -269,7 +250,7 @@ TEST_CASE("io_scheduler separate thread resume spawned thread", "[io_scheduler]" { // mimic some expensive computation // Resume the coroutine back onto the scheduler, not this background thread. - e.set(s); + e.set(*s); }); third_party_thread.detach(); @@ -280,14 +261,15 @@ TEST_CASE("io_scheduler separate thread resume spawned thread", "[io_scheduler]" coro::sync_wait(make_task()); - s.shutdown(); - REQUIRE(s.empty()); + s->shutdown(); + REQUIRE(s->empty()); } TEST_CASE("io_scheduler separate thread resume with return", "[io_scheduler]") { constexpr uint64_t expected_value{1337}; - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); coro::event start_service{}; coro::event service_done{}; @@ -302,7 +284,7 @@ TEST_CASE("io_scheduler separate thread resume with return", "[io_scheduler]") } output = expected_value; - service_done.set(s); + service_done.set(*s); }}; auto third_party_service = [&](int multiplier) -> coro::task @@ -314,7 +296,7 @@ TEST_CASE("io_scheduler separate thread resume with return", "[io_scheduler]") auto make_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); int multiplier{5}; uint64_t value = co_await third_party_service(multiplier); @@ -324,26 +306,27 @@ TEST_CASE("io_scheduler separate thread resume with return", "[io_scheduler]") coro::sync_wait(make_task()); service.join(); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler with basic task", "[io_scheduler]") { constexpr std::size_t expected_value{5}; - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto add_data = [&](uint64_t val) -> coro::task { - co_await s.schedule(); + co_await s->schedule(); co_return val; }; auto func = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); auto output_tasks = co_await coro::when_all(add_data(1), add_data(1), add_data(1), add_data(1), add_data(1)); @@ -357,10 +340,10 @@ TEST_CASE("io_scheduler with basic task", "[io_scheduler]") REQUIRE(counter == expected_value); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler scheduler_after", "[io_scheduler]") @@ -379,38 +362,38 @@ TEST_CASE("io_scheduler scheduler_after", "[io_scheduler]") }; { - coro::io_scheduler s{coro::io_scheduler::options{ - .pool = coro::thread_pool::options{ - .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}}; - auto start = std::chrono::steady_clock::now(); - coro::sync_wait(func(s, 0ms)); + auto s = coro::io_scheduler::make_shared(coro::io_scheduler::options{ + .pool = coro::thread_pool::options{ + .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}); + auto start = std::chrono::steady_clock::now(); + coro::sync_wait(func(*s, 0ms)); auto stop = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(stop - start); REQUIRE(counter == 1); REQUIRE(duration < wait_for); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } { - coro::io_scheduler s{coro::io_scheduler::options{ + auto s = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .pool = coro::thread_pool::options{ - .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}}; + .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}); auto start = std::chrono::steady_clock::now(); - coro::sync_wait(func(s, wait_for)); + coro::sync_wait(func(*s, wait_for)); auto stop = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(stop - start); REQUIRE(counter == 2); REQUIRE(duration >= wait_for); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } } @@ -422,13 +405,13 @@ TEST_CASE("io_scheduler schedule_at", "[io_scheduler]") std::atomic counter{0}; std::thread::id tid; - coro::io_scheduler s{coro::io_scheduler::options{ + auto s = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .pool = coro::thread_pool::options{ - .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}}; + .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}); auto func = [&](std::chrono::steady_clock::time_point time) -> coro::task { - co_await s.schedule_at(time); + co_await s->schedule_at(time); ++counter; REQUIRE(tid == std::this_thread::get_id()); co_return; @@ -467,55 +450,57 @@ TEST_CASE("io_scheduler schedule_at", "[io_scheduler]") TEST_CASE("io_scheduler yield", "[io_scheduler]") { - std::thread::id tid; - coro::io_scheduler s{coro::io_scheduler::options{ - .pool = coro::thread_pool::options{ - .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}}; + std::thread::id tid; + auto s = coro::io_scheduler::make_shared(coro::io_scheduler::options{ + .pool = coro::thread_pool::options{ + .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}); auto func = [&]() -> coro::task { REQUIRE(tid != std::this_thread::get_id()); - co_await s.schedule(); + co_await s->schedule(); REQUIRE(tid == std::this_thread::get_id()); - co_await s.yield(); // this is really a thread pool function but /shrug + co_await s->yield(); // this is really a thread pool function but /shrug REQUIRE(tid == std::this_thread::get_id()); co_return; }; coro::sync_wait(func()); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler yield_for", "[io_scheduler]") { - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); const std::chrono::milliseconds wait_for{50}; auto make_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); auto start = std::chrono::steady_clock::now(); - co_await s.yield_for(wait_for); + co_await s->yield_for(wait_for); co_return std::chrono::duration_cast(std::chrono::steady_clock::now() - start); }; auto duration = coro::sync_wait(make_task()); REQUIRE(duration >= wait_for); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler yield_until", "[io_scheduler]") { - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); // Because yield_until() takes its own time internally the wait_for might be off by a bit. const std::chrono::milliseconds epsilon{3}; @@ -523,26 +508,26 @@ TEST_CASE("io_scheduler yield_until", "[io_scheduler]") auto make_task = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); auto start = std::chrono::steady_clock::now(); - co_await s.yield_until(start + wait_for); + co_await s->yield_until(start + wait_for); co_return std::chrono::duration_cast(std::chrono::steady_clock::now() - start); }; auto duration = coro::sync_wait(make_task()); REQUIRE(duration >= (wait_for - epsilon)); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]") { const constexpr std::size_t total{10}; coro::event e{}; - coro::io_scheduler s{}; + auto s = coro::io_scheduler::make_shared(); auto func = [&]() -> coro::task { @@ -552,7 +537,7 @@ TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]") auto spawn = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); std::vector> tasks; for (size_t i = 0; i < total; ++i) { @@ -571,30 +556,31 @@ TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]") auto release = [&]() -> coro::task { - co_await s.schedule_after(10ms); - e.set(s); + co_await s->schedule_after(10ms); + e.set(*s); }; coro::sync_wait(coro::when_all(spawn(), release())); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_scheduler]") { const constexpr std::size_t total{1'000'000}; uint64_t counter{0}; - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); std::vector> tasks; tasks.reserve(total); auto func = [&](auto f) -> coro::task { - co_await s.schedule(); + co_await s->schedule(); ++counter; if (counter % total == 0) @@ -619,44 +605,44 @@ TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_ REQUIRE(tasks.size() == total - 1); - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); } TEST_CASE("io_scheduler manual process events thread pool", "[io_scheduler]") { - auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - coro::io_scheduler s{coro::io_scheduler::options{ - .thread_strategy = coro::io_scheduler::thread_strategy_t::manual, - .pool = coro::thread_pool::options{ - .thread_count = 1, - }}}; + auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + auto s = coro::io_scheduler::make_shared(coro::io_scheduler::options{ + .thread_strategy = coro::io_scheduler::thread_strategy_t::manual, + .pool = coro::thread_pool::options{ + .thread_count = 1, + }}); std::atomic polling{false}; auto make_poll_read_task = [&]() -> coro::task { - std::cerr << "poll task start s.size() == " << s.size() << "\n"; - co_await s.schedule(); + std::cerr << "poll task start s.size() == " << s->size() << "\n"; + co_await s->schedule(); polling = true; - std::cerr << "poll task polling s.size() == " << s.size() << "\n"; - auto status = co_await s.poll(trigger_fd, coro::poll_op::read); + std::cerr << "poll task polling s.size() == " << s->size() << "\n"; + auto status = co_await s->poll(trigger_fd, coro::poll_op::read); REQUIRE(status == coro::poll_status::event); - std::cerr << "poll task exiting s.size() == " << s.size() << "\n"; + std::cerr << "poll task exiting s.size() == " << s->size() << "\n"; co_return; }; auto make_poll_write_task = [&]() -> coro::task { - std::cerr << "write task start s.size() == " << s.size() << "\n"; - co_await s.schedule(); + std::cerr << "write task start s.size() == " << s->size() << "\n"; + co_await s->schedule(); uint64_t value{42}; - std::cerr << "write task writing s.size() == " << s.size() << "\n"; + std::cerr << "write task writing s.size() == " << s->size() << "\n"; auto unused = write(trigger_fd, &value, sizeof(value)); (void)unused; - std::cerr << "write task exiting s.size() == " << s.size() << "\n"; + std::cerr << "write task exiting s.size() == " << s->size() << "\n"; co_return; }; @@ -671,43 +657,43 @@ TEST_CASE("io_scheduler manual process events thread pool", "[io_scheduler]") write_task.resume(); - while (s.process_events(100ms) > 0) + while (s->process_events(100ms) > 0) ; - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); close(trigger_fd); } TEST_CASE("io_scheduler manual process events inline", "[io_scheduler]") { - auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); - coro::io_scheduler s{coro::io_scheduler::options{ - .thread_strategy = coro::io_scheduler::thread_strategy_t::manual, - .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}}; + auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); + auto s = coro::io_scheduler::make_shared(coro::io_scheduler::options{ + .thread_strategy = coro::io_scheduler::thread_strategy_t::manual, + .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); auto make_poll_read_task = [&]() -> coro::task { - std::cerr << "poll task start s.size() == " << s.size() << "\n"; - co_await s.schedule(); - std::cerr << "poll task polling s.size() == " << s.size() << "\n"; - auto status = co_await s.poll(trigger_fd, coro::poll_op::read); + std::cerr << "poll task start s.size() == " << s->size() << "\n"; + co_await s->schedule(); + std::cerr << "poll task polling s.size() == " << s->size() << "\n"; + auto status = co_await s->poll(trigger_fd, coro::poll_op::read); REQUIRE(status == coro::poll_status::event); - std::cerr << "poll task exiting s.size() == " << s.size() << "\n"; + std::cerr << "poll task exiting s.size() == " << s->size() << "\n"; co_return; }; auto make_poll_write_task = [&]() -> coro::task { - std::cerr << "write task start s.size() == " << s.size() << "\n"; - co_await s.schedule(); + std::cerr << "write task start s.size() == " << s->size() << "\n"; + co_await s->schedule(); uint64_t value{42}; - std::cerr << "write task writing s.size() == " << s.size() << "\n"; + std::cerr << "write task writing s.size() == " << s->size() << "\n"; auto unused = write(trigger_fd, &value, sizeof(value)); (void)unused; - std::cerr << "write task exiting s.size() == " << s.size() << "\n"; + std::cerr << "write task exiting s.size() == " << s->size() << "\n"; co_return; }; @@ -721,7 +707,7 @@ TEST_CASE("io_scheduler manual process events inline", "[io_scheduler]") // Now process them to completion. while (true) { - auto remaining = s.process_events(100ms); + auto remaining = s->process_events(100ms); std::cerr << "remaining " << remaining << "\n"; if (remaining == 0) { @@ -729,20 +715,21 @@ TEST_CASE("io_scheduler manual process events inline", "[io_scheduler]") } }; - std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n"; - s.shutdown(); - std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n"; - REQUIRE(s.empty()); + std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; + s->shutdown(); + std::cerr << "io_scheduler.size() after shutdown = " << s->size() << "\n"; + REQUIRE(s->empty()); close(trigger_fd); } TEST_CASE("io_scheduler task throws", "[io_scheduler]") { - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto func = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); throw std::runtime_error{"I always throw."}; co_return 42; }; @@ -752,13 +739,14 @@ TEST_CASE("io_scheduler task throws", "[io_scheduler]") TEST_CASE("io_scheduler task throws after resume", "[io_scheduler]") { - coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); auto make_thrower = [&]() -> coro::task { - co_await s.schedule(); + co_await s->schedule(); std::cerr << "Throwing task is doing some work...\n"; - co_await s.yield(); + co_await s->yield(); throw std::runtime_error{"I always throw."}; co_return true; }; diff --git a/test/test_shared_mutex.cpp b/test/test_shared_mutex.cpp index af2167a0..8f9b64be 100644 --- a/test/test_shared_mutex.cpp +++ b/test/test_shared_mutex.cpp @@ -83,7 +83,7 @@ TEST_CASE("mutex single waiter not locked shared", "[shared_mutex]") #ifdef LIBCORO_FEATURE_NETWORKING TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex]") { - auto tp = std::make_shared( + auto tp = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 8}}); coro::shared_mutex m{tp}; diff --git a/test/test_thread_pool.cpp b/test/test_thread_pool.cpp index 434a0902..d0ebf2b1 100644 --- a/test/test_thread_pool.cpp +++ b/test/test_thread_pool.cpp @@ -199,4 +199,36 @@ TEST_CASE("thread_pool event jump threads", "[thread_pool]") }; coro::sync_wait(coro::when_all(make_tp1_task(), make_tp2_task())); +} + +TEST_CASE("thread_pool high cpu usage when threadcount is greater than the number of tasks", "[thread_pool]") +{ + // https://github.com/jbaldwin/libcoro/issues/262 + // This test doesn't really trigger any error conditions but was reported via + // an issue that the thread_pool threads not doing work would spin on the CPU + // if there were less tasks running than threads in the pool. + // This was due to using m_size instead of m_queue.size() causing the threads + // that had no work to go into a spin trying to acquire work. + + auto sleep_for_task = [](std::chrono::seconds duration) -> coro::task + { + std::this_thread::sleep_for(duration); + co_return duration.count(); + }; + + auto wait_for_task = [&](coro::thread_pool& pool, std::chrono::seconds delay) -> coro::task<> + { + co_await pool.schedule(); + for (int i = 0; i < 5; ++i) + { + co_await sleep_for_task(delay); + std::cout << std::chrono::system_clock::now().time_since_epoch().count() << " wait for " << delay.count() + << "seconds\n"; + } + co_return; + }; + + coro::thread_pool pool{coro::thread_pool::options{.thread_count = 3}}; + coro::sync_wait( + coro::when_all(wait_for_task(pool, std::chrono::seconds{1}), wait_for_task(pool, std::chrono::seconds{3}))); } \ No newline at end of file From b698069b75206584bd2191188008177a3bdef7ed Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Fri, 2 Aug 2024 11:31:31 -0600 Subject: [PATCH 08/24] Upgrade supported opensuse/leap:15.6 (#275) 15.2 is having issues with nodejs and glibc /__e/node20/bin/node: /lib64/libm.so.6: version `GLIBC_2.27' not found (required by /__e/node20/bin/node Closes #274 --- .githooks/readme-template.md | 2 +- .github/workflows/ci-opensuse.yml | 2 +- README.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.githooks/readme-template.md b/.githooks/readme-template.md index 3f6d796a..673fe343 100644 --- a/.githooks/readme-template.md +++ b/.githooks/readme-template.md @@ -433,7 +433,7 @@ Transfer/sec: 18.33MB * ubuntu:20.04, 22.04 * fedora:32-40 - * openSUSE/leap:15.2 + * openSUSE/leap:15.6 * Windows 2022 * Emscripten 3.1.45 * MacOS 12 diff --git a/.github/workflows/ci-opensuse.yml b/.github/workflows/ci-opensuse.yml index 1748f16a..7ae079ea 100644 --- a/.github/workflows/ci-opensuse.yml +++ b/.github/workflows/ci-opensuse.yml @@ -12,7 +12,7 @@ jobs: cxx_standard: [20] libcoro_feature_networking: [ {enabled: ON, tls: ON} ] container: - image: opensuse/leap:15.2 + image: opensuse/leap:15.6 steps: - name: zypper run: | diff --git a/README.md b/README.md index d6719952..4db94af6 100644 --- a/README.md +++ b/README.md @@ -1222,7 +1222,7 @@ Transfer/sec: 18.33MB * ubuntu:20.04, 22.04 * fedora:32-40 - * openSUSE/leap:15.2 + * openSUSE/leap:15.6 * Windows 2022 * Emscripten 3.1.45 * MacOS 12 From ee02dc8bd526ec741750f1bd8c4f327a145434f7 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Fri, 2 Aug 2024 11:51:42 -0600 Subject: [PATCH 09/24] Use lock for sync_wait completion (#272) * Use lock for sync_wait completion * release/acquire memory ordering has a race condition * also reproduced on seq_cst * requiring a lock around the std::condition_variable to properly and always wake up the waiting sync_wait thread, this is necessary for correctness over speed Closes #270 --- include/coro/sync_wait.hpp | 2 +- src/sync_wait.cpp | 9 +++++--- test/test_sync_wait.cpp | 46 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 4 deletions(-) diff --git a/include/coro/sync_wait.hpp b/include/coro/sync_wait.hpp index 5a20def4..b403e953 100644 --- a/include/coro/sync_wait.hpp +++ b/include/coro/sync_wait.hpp @@ -328,7 +328,7 @@ auto sync_wait(awaitable_type&& a) -> decltype(auto) // For non-trivial types (or possibly types that don't fit in a register) // the compiler will end up calling the ~return_type() when the promise // is destructed at the end of sync_wait(). This causes the return_type - // object to also be destructed causingn the final return/move from + // object to also be destructed causing the final return/move from // sync_wait() to be a 'use after free' bug. To work around this the result // must be moved off the promise object before the promise is destructed. // Other solutions could be heap allocating the return_type but that has diff --git a/src/sync_wait.cpp b/src/sync_wait.cpp index e610ad25..509aff1c 100644 --- a/src/sync_wait.cpp +++ b/src/sync_wait.cpp @@ -8,19 +8,22 @@ sync_wait_event::sync_wait_event(bool initially_set) : m_set(initially_set) auto sync_wait_event::set() noexcept -> void { - m_set.exchange(true, std::memory_order::release); + // issue-270 100~ task's on a thread_pool within sync_wait(when_all(tasks)) can cause a deadlock/hang if using + // release/acquire or even seq_cst. + m_set.exchange(true, std::memory_order::seq_cst); + std::unique_lock lk{m_mutex}; m_cv.notify_all(); } auto sync_wait_event::reset() noexcept -> void { - m_set.exchange(false, std::memory_order::release); + m_set.exchange(false, std::memory_order::seq_cst); } auto sync_wait_event::wait() noexcept -> void { std::unique_lock lk{m_mutex}; - m_cv.wait(lk, [this] { return m_set.load(std::memory_order::acquire); }); + m_cv.wait(lk, [this] { return m_set.load(std::memory_order::seq_cst); }); } } // namespace coro::detail diff --git a/test/test_sync_wait.cpp b/test/test_sync_wait.cpp index 109956da..2099d2c1 100644 --- a/test/test_sync_wait.cpp +++ b/test/test_sync_wait.cpp @@ -3,6 +3,8 @@ #include #include +#include +#include TEST_CASE("sync_wait simple integer return", "[sync_wait]") { @@ -62,3 +64,47 @@ TEST_CASE("sync_wait task that throws", "[sync_wait]") REQUIRE_THROWS(coro::sync_wait(f())); } + +TEST_CASE("sync_wait very rarely hangs issue-270", "[sync_wait]") +{ + coro::thread_pool tp{}; + + const int ITERATIONS = 100; + + std::unordered_set data{}; + data.reserve(ITERATIONS); + + std::random_device dev; + std::mt19937 rng(dev()); + std::uniform_int_distribution dist(0, ITERATIONS); + + for (int i = 0; i < ITERATIONS; ++i) + { + data.insert(dist(rng)); + } + + std::atomic count{0}; + + auto make_task = [&](int i) -> coro::task + { + co_await tp.schedule(); + + if (data.find(i) != data.end()) + { + count.fetch_add(1); + } + + co_return; + }; + + std::vector> tasks{}; + tasks.reserve(ITERATIONS); + for (int i = 0; i < ITERATIONS; ++i) + { + tasks.emplace_back(make_task(i)); + } + + coro::sync_wait(coro::when_all(std::move(tasks))); + + REQUIRE(count > 0); +} From e6cc25b7ebb93abc105ae605600507f03c1c9890 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Fri, 15 Nov 2024 08:22:47 -0700 Subject: [PATCH 10/24] Upgrade emscripten build (#282) Currently failing due to nodejs dependencies: Setting up EMSDK environment (suppress these messages with EMSDK_QUIET=1) Adding directories to PATH: PATH += /__w/libcoro/libcoro/emsdk PATH += /__w/libcoro/libcoro/emsdk/upstream/emscripten PATH += /__w/libcoro/libcoro/emsdk/node/20.18.0_64bit/bin Setting environment variables: PATH = /__w/libcoro/libcoro/emsdk:/__w/libcoro/libcoro/emsdk/upstream/emscripten:/__w/libcoro/libcoro/emsdk/node/20.18.0_64bit/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin EMSDK = /__w/libcoro/libcoro/emsdk EMSDK_NODE = /__w/libcoro/libcoro/emsdk/node/20.18.0_64bit/bin/node node: bad option: --experimental-wasm-eh --- .github/workflows/ci-emscripten.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-emscripten.yml b/.github/workflows/ci-emscripten.yml index 6b98565f..7b09a79f 100644 --- a/.github/workflows/ci-emscripten.yml +++ b/.github/workflows/ci-emscripten.yml @@ -21,7 +21,8 @@ jobs: apt-get install -y \ cmake \ git \ - ninja-build + ninja-build \ + nodejs - name: Checkout uses: actions/checkout@v4 with: @@ -50,4 +51,4 @@ jobs: cd emsdk . ./emsdk_env.sh cd ../Release - node --experimental-wasm-eh ./test/libcoro_test.js + node --experimental-wasm-threads --experimental-wasm-bulk-memory ./test/libcoro_test.js From 86740cc7e6e0821ff426415295be8ac429e93884 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Fri, 15 Nov 2024 08:42:43 -0700 Subject: [PATCH 11/24] Add test for when_all throwing on individual tasks. (#281) The original behavior for coro::when_all was to capture the T result or an exception and then when the user iterates all of the completed tasks the task.return_value() would either return the result or re-throw the exception for that specific task. --- test/test_when_all.cpp | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/test/test_when_all.cpp b/test/test_when_all.cpp index 6ead3221..9a5caea4 100644 --- a/test/test_when_all.cpp +++ b/test/test_when_all.cpp @@ -170,3 +170,38 @@ TEST_CASE("when_all use std::ranges::view", "[when_all]") auto result = coro::sync_wait(make_runner_task()); REQUIRE(result == (1 + 2 + 3)); } + +TEST_CASE("when_all each task throws", "[when_all]") +{ + coro::thread_pool tp{}; + + auto make_task = [&](uint64_t i) -> coro::task + { + co_await tp.schedule(); + if (i % 2 == 0) + { + throw std::runtime_error{std::to_string(i)}; + } + co_return i; + }; + + std::vector> tasks; + for (auto i = 1; i <= 4; ++i) + { + tasks.emplace_back(make_task(i)); + } + + auto output_tasks = coro::sync_wait(coro::when_all(std::move(tasks))); + for (auto i = 1; i <= 4; ++i) + { + auto& task = output_tasks.at(i - 1); + if (i % 2 == 0) + { + REQUIRE_THROWS(task.return_value()); + } + else + { + REQUIRE((int)task.return_value() == i); + } + } +} From 26de94ded492938c8f235d78e7030c9257fdea09 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Wed, 29 Jan 2025 12:22:32 -0700 Subject: [PATCH 12/24] Disable mac ci builds (#290) I don't have a mac to test on and I've been unable to get the CI jobs to work.. debugging the github actions isn't straightforward. These jobs will be disabled until someone can provide a working CI job script. Closes #289 --- .github/workflows/ci-macos.yml | 86 +++++++++++++++++----------------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/.github/workflows/ci-macos.yml b/.github/workflows/ci-macos.yml index 30662e56..003b1c7b 100644 --- a/.github/workflows/ci-macos.yml +++ b/.github/workflows/ci-macos.yml @@ -1,45 +1,45 @@ -name: ci-macos +# name: ci-macos -on: [pull_request, workflow_dispatch] +# on: [pull_request, workflow_dispatch] -jobs: - macos: - name: macos-12 - runs-on: macos-12 - strategy: - fail-fast: false - matrix: - clang_version: [17] - cxx_standard: [20, 23] - libcoro_feature_networking: [ {enabled: OFF, tls: OFF} ] - libcoro_build_shared_libs: [OFF, ON] - steps: - - name: Install Dependencies - run: | - brew update - brew install llvm@${{ matrix.clang_version }} - brew install ninja - - name: Checkout - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Release - run: | - brew --prefix llvm@17 - mkdir Release - cd Release - cmake \ - -GNinja \ - -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_C_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ - -DCMAKE_CXX_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ - -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ - -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ - -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ - -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ - .. - cmake --build . --config Release - - name: Test - run: | - cd Release - ctest --build-config Release -VV +# jobs: +# macos: +# name: macos-12 +# runs-on: macos-12 +# strategy: +# fail-fast: false +# matrix: +# clang_version: [17] +# cxx_standard: [20, 23] +# libcoro_feature_networking: [ {enabled: OFF, tls: OFF} ] +# libcoro_build_shared_libs: [OFF, ON] +# steps: +# - name: Install Dependencies +# run: | +# brew update +# brew install llvm@${{ matrix.clang_version }} +# brew install ninja +# - name: Checkout +# uses: actions/checkout@v4 +# with: +# submodules: recursive +# - name: Release +# run: | +# brew --prefix llvm@17 +# mkdir Release +# cd Release +# cmake \ +# -GNinja \ +# -DCMAKE_BUILD_TYPE=Release \ +# -DCMAKE_C_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ +# -DCMAKE_CXX_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ +# -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ +# -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ +# -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ +# -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ +# .. +# cmake --build . --config Release +# - name: Test +# run: | +# cd Release +# ctest --build-config Release -VV From 49333e720ec48ea7a9080ef9da97c4c9a436a177 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Thu, 30 Jan 2025 08:02:47 -0700 Subject: [PATCH 13/24] coro::task_container gc fix not completing coroutines (#288) * coro::task_container gc fix not completing coroutines The coro::task_container::gc_internal function was deleting coroutines when marked as .done(), however there is some mechanism that rarely would cause the user_task coroutine to not actually execute. I'm still not sure exactly why this is the case but: 1) Simply disabling gc_internal() would stop the problem 2) Running gc_internal() and moving the coro::task to a 'dead' list still caused the issue. With these in mind I spent time re-reading the specification on the final_suspend and determined that coro::task_container should be a thing atomic counter to track the submitted coroutines and have them self delete. The self deletion is now done via a coro::detail::task_self_destroying coroutine type that takes advantage of the promise's final_suspend() not suspending. The spec states that if this doesn't suspend then the coroutine will call destroy() on itself. Closes #287 --- CMakeLists.txt | 1 + examples/coro_task_container.cpp | 2 +- include/coro/detail/task_self_deleting.hpp | 66 +++++++ include/coro/io_scheduler.hpp | 14 +- include/coro/task_container.hpp | 200 ++------------------- include/coro/thread_pool.hpp | 2 +- src/detail/task_self_deleting.cpp | 98 ++++++++++ src/io_scheduler.cpp | 10 +- src/thread_pool.cpp | 18 +- test/bench.cpp | 11 ++ test/test_io_scheduler.cpp | 26 ++- test/test_thread_pool.cpp | 28 ++- 12 files changed, 265 insertions(+), 211 deletions(-) create mode 100644 include/coro/detail/task_self_deleting.hpp create mode 100644 src/detail/task_self_deleting.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 226fd6c8..0cc35d3f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -76,6 +76,7 @@ set(LIBCORO_SOURCE_FILES include/coro/concepts/promise.hpp include/coro/concepts/range_of.hpp + include/coro/detail/task_self_deleting.hpp / src/detail/task_self_deleting.cpp include/coro/detail/void_value.hpp include/coro/attribute.hpp diff --git a/examples/coro_task_container.cpp b/examples/coro_task_container.cpp index 7ae29c55..800d1ae4 100644 --- a/examples/coro_task_container.cpp +++ b/examples/coro_task_container.cpp @@ -55,7 +55,7 @@ int main() tc.start(serve_client(std::move(client))); // Wait for all clients to complete before shutting down the tcp::server. - co_await tc.garbage_collect_and_yield_until_empty(); + co_await tc.yield_until_empty(); co_return; }; diff --git a/include/coro/detail/task_self_deleting.hpp b/include/coro/detail/task_self_deleting.hpp new file mode 100644 index 00000000..4c8ead9b --- /dev/null +++ b/include/coro/detail/task_self_deleting.hpp @@ -0,0 +1,66 @@ +#pragma once + +#include +#include +#include + +namespace coro::detail +{ + +class task_self_deleting; + +class promise_self_deleting +{ +public: + promise_self_deleting(); + ~promise_self_deleting(); + + promise_self_deleting(const promise_self_deleting&) = delete; + promise_self_deleting(promise_self_deleting&&); + auto operator=(const promise_self_deleting&) -> promise_self_deleting& = delete; + auto operator=(promise_self_deleting&&) -> promise_self_deleting&; + + auto get_return_object() -> task_self_deleting; + auto initial_suspend() -> std::suspend_always; + auto final_suspend() noexcept -> std::suspend_never; + auto return_void() noexcept -> void; + auto unhandled_exception() -> void; + + auto task_container_size(std::atomic& task_container_size) -> void; +private: + /** + * The coro::task_container m_size member to decrement upon the coroutine completing. + */ + std::atomic* m_task_container_size{nullptr}; +}; + +/** + * This task will self delete upon completing. This is useful for usecase that the lifetime of the + * coroutine cannot be determined and it needs to 'self' delete. This is achieved by returning + * std::suspend_never from the promise::final_suspend which then based on the spec tells the + * coroutine to delete itself. This means any classes that use this task cannot have owning + * pointers or relationships to this class and must not use it past its completion. + * + * This class is currently only used by coro::task_container and will decrement its + * m_size internal count when the coroutine completes. + */ +class task_self_deleting +{ +public: + using promise_type = promise_self_deleting; + + explicit task_self_deleting(promise_self_deleting& promise); + ~task_self_deleting(); + + task_self_deleting(const task_self_deleting&) = delete; + task_self_deleting(task_self_deleting&&); + auto operator=(const task_self_deleting&) -> task_self_deleting& = delete; + auto operator=(task_self_deleting&&) -> task_self_deleting&; + + auto promise() -> promise_self_deleting& { return *m_promise; } + auto handle() -> std::coroutine_handle { return std::coroutine_handle::from_promise(*m_promise); } +private: + promise_self_deleting* m_promise{nullptr}; +}; + +} // namespace coro::detail diff --git a/include/coro/io_scheduler.hpp b/include/coro/io_scheduler.hpp index db48b7c7..6f9ca7ad 100644 --- a/include/coro/io_scheduler.hpp +++ b/include/coro/io_scheduler.hpp @@ -177,8 +177,10 @@ class io_scheduler : public std::enable_shared_from_this * longer have control over the scheduled task. * @param task The task to execute on this io_scheduler. It's lifetime ownership will be transferred * to this io_scheduler. + * @return True if the task was succesfully scheduled onto the io_scheduler. This can fail if the task + * is already completed or does not contain a valid coroutine anymore. */ - auto schedule(coro::task&& task) -> void; + auto schedule(coro::task&& task) -> bool; /** * Schedules the current task to run after the given amount of time has elapsed. @@ -247,7 +249,7 @@ class io_scheduler : public std::enable_shared_from_this */ auto resume(std::coroutine_handle<> handle) -> bool { - if (handle == nullptr) + if (handle == nullptr || handle.done()) { return false; } @@ -259,6 +261,7 @@ class io_scheduler : public std::enable_shared_from_this if (m_opts.execution_strategy == execution_strategy_t::process_tasks_inline) { + m_size.fetch_add(1, std::memory_order::release); { std::scoped_lock lk{m_scheduled_tasks_mutex}; m_scheduled_tasks.emplace_back(handle); @@ -309,13 +312,6 @@ class io_scheduler : public std::enable_shared_from_this */ auto shutdown() noexcept -> void; - /** - * Scans for completed coroutines and destroys them freeing up resources. This is also done on starting - * new tasks but this allows the user to cleanup resources manually. One usage might be making sure fds - * are cleaned up as soon as possible. - */ - auto garbage_collect() noexcept -> void; - private: /// The configuration options. options m_opts; diff --git a/include/coro/task_container.hpp b/include/coro/task_container.hpp index 373ea900..5af5c460 100644 --- a/include/coro/task_container.hpp +++ b/include/coro/task_container.hpp @@ -2,6 +2,7 @@ #include "coro/attribute.hpp" #include "coro/concepts/executor.hpp" +#include "coro/detail/task_self_deleting.hpp" #include "coro/task.hpp" #include @@ -9,6 +10,7 @@ #include #include #include +#include #include #include @@ -16,34 +18,24 @@ namespace coro { class io_scheduler; + template class task_container { public: - struct options - { - /// The number of task spots to reserve space for upon creating the container. - std::size_t reserve_size{8}; - /// The growth factor for task space in the container when capacity is full. - double growth_factor{2}; - }; - /** * @param e Tasks started in the container are scheduled onto this executor. For tasks created * from a coro::io_scheduler, this would usually be that coro::io_scheduler instance. * @param opts Task container options. */ task_container( - std::shared_ptr e, const options opts = options{.reserve_size = 8, .growth_factor = 2}) - : m_growth_factor(opts.growth_factor), - m_executor(std::move(e)) + std::shared_ptr e) + : m_executor(std::move(e)) { if (m_executor == nullptr) { throw std::runtime_error{"task_container cannot have a nullptr executor"}; } - - init(opts.reserve_size); } task_container(const task_container&) = delete; task_container(task_container&&) = delete; @@ -54,86 +46,37 @@ class task_container // This will hang the current thread.. but if tasks are not complete thats also pretty bad. while (!empty()) { - garbage_collect(); + // Sleep a bit so the cpu doesn't totally churn. + std::this_thread::sleep_for(std::chrono::milliseconds{10}); } } - enum class garbage_collect_t - { - /// Execute garbage collection. - yes, - /// Do not execute garbage collection. - no - }; - /** * Stores a user task and starts its execution on the container's thread pool. * @param user_task The scheduled user's task to store in this task container and start its execution. - * @param cleanup Should the task container run garbage collect at the beginning of this store - * call? Calling at regular intervals will reduce memory usage of completed - * tasks and allow for the task container to re-use allocated space. + * @return True if the task was succesfully started into the task container. This can fail if the task + * is already completed or does not contain a valid coroutine anymore. */ - auto start(coro::task&& user_task, garbage_collect_t cleanup = garbage_collect_t::yes) -> void + auto start(coro::task&& user_task) -> bool { m_size.fetch_add(1, std::memory_order::relaxed); - std::size_t index{}; - { - std::unique_lock lk{m_mutex}; - - if (cleanup == garbage_collect_t::yes) - { - gc_internal(); - } - - // Only grow if completely full and attempting to add more. - if (m_free_task_indices.empty()) - { - grow(); - } - - // Reserve a free task index - index = m_free_task_indices.front(); - m_free_task_indices.pop(); - } - - // Store the task inside a cleanup task for self deletion. - m_tasks[index] = make_cleanup_task(std::move(user_task), index); - - // Start executing from the cleanup task to schedule the user's task onto the thread pool. - m_tasks[index].resume(); - } - - /** - * Garbage collects any tasks that are marked as deleted. This frees up space to be re-used by - * the task container for newly stored tasks. - * @return The number of tasks that were deleted. - */ - auto garbage_collect() -> std::size_t __ATTRIBUTE__(used) - { - std::scoped_lock lk{m_mutex}; - return gc_internal(); + auto task = make_self_deleting_task(std::move(user_task)); + // Hook the promise to decrement the size upon its self deletion of the coroutine frame. + task.promise().task_container_size(m_size); + return m_executor->resume(task.handle()); } /** * @return The number of active tasks in the container. */ - auto size() const -> std::size_t { return m_size.load(std::memory_order::relaxed); } + auto size() const -> std::size_t { return m_size.load(std::memory_order::acquire); } /** * @return True if there are no active tasks in the container. */ auto empty() const -> bool { return size() == 0; } - /** - * @return The capacity of this task manager before it will need to grow in size. - */ - auto capacity() const -> std::size_t - { - std::atomic_thread_fence(std::memory_order::acquire); - return m_tasks.size(); - } - /** * Will continue to garbage collect and yield until all tasks are complete. This method can be * co_await'ed to make it easier to wait for the task container to have all its tasks complete. @@ -141,132 +84,25 @@ class task_container * This does not shut down the task container, but can be used when shutting down, or if your * logic requires all the tasks contained within to complete, it is similar to coro::latch. */ - auto garbage_collect_and_yield_until_empty() -> coro::task + auto yield_until_empty() -> coro::task { while (!empty()) { - garbage_collect(); co_await m_executor->yield(); } } private: - /** - * Grows each task container by the growth factor. - * @return The position of the free index after growing. - */ - auto grow() -> void + auto make_self_deleting_task(task user_task) -> detail::task_self_deleting { - // Save an index at the current last item. - std::size_t new_size = m_tasks.size() * m_growth_factor; - for (std::size_t i = m_tasks.size(); i < new_size; ++i) - { - m_free_task_indices.emplace(i); - } - m_tasks.resize(new_size); - } - - /** - * Internal GC call, expects the public function to lock. - */ - auto gc_internal() -> std::size_t - { - std::size_t deleted{0}; - auto pos = std::begin(m_tasks_to_delete); - while (pos != std::end(m_tasks_to_delete)) - { - // Skip tasks that are still running or have yet to start. - if (!m_tasks[*pos].is_ready()) - { - pos++; - continue; - } - // Destroy the cleanup task. - m_tasks[*pos].destroy(); - // Put the deleted position at the end of the free indexes list. - m_free_task_indices.emplace(*pos); - // Remove index from tasks to delete - m_tasks_to_delete.erase(pos++); - // Indicate a task was deleted. - ++deleted; - } - m_size.fetch_sub(deleted, std::memory_order::relaxed); - return deleted; - } - - /** - * Encapsulate the users tasks in a cleanup task which marks itself for deletion upon - * completion. Simply co_await the users task until its completed and then mark the given - * position within the task manager as being deletable. The scheduler's next iteration - * in its event loop will then free that position up to be re-used. - * - * This function will also unconditionally catch all unhandled exceptions by the user's - * task to prevent the scheduler from throwing exceptions. - * @param user_task The user's task. - * @param index The index where the task data will be stored in the task manager. - * @return The user's task wrapped in a self cleanup task. - */ - auto make_cleanup_task(task user_task, std::size_t index) -> coro::task - { - // Immediately move the task onto the executor. - co_await m_executor->schedule(); - - try - { - // Await the users task to complete. - co_await user_task; - } - catch (const std::exception& e) - { - // TODO: what would be a good way to report this to the user...? Catching here is required - // since the co_await will unwrap the unhandled exception on the task. - // The user's task should ideally be wrapped in a catch all and handle it themselves, but - // that cannot be guaranteed. - std::cerr << "coro::task_container user_task had an unhandled exception e.what()= " << e.what() << "\n"; - } - catch (...) - { - // don't crash if they throw something that isn't derived from std::exception - std::cerr << "coro::task_container user_task had unhandle exception, not derived from std::exception.\n"; - } - - // Destroy the user task since it is complete. This is important to do so outside the lock - // since the user could schedule a new task from the destructor (tls::client does this interanlly) - // causing a deadlock. - user_task.destroy(); - - { - std::scoped_lock lk{m_mutex}; - m_tasks_to_delete.emplace_back(index); - } - + co_await user_task; co_return; } - /// Mutex for safely mutating the task containers across threads, expected usage is within - /// thread pools for indeterminate lifetime requests. - std::mutex m_mutex{}; /// The number of alive tasks. std::atomic m_size{}; - /// Maintains the lifetime of the tasks until they are completed. - std::vector> m_tasks{}; - /// The full set of free indicies into `m_tasks`. - std::queue m_free_task_indices{}; - /// The set of tasks that have completed and need to be deleted. - std::list m_tasks_to_delete{}; - /// The amount to grow the containers by when all spaces are taken. - double m_growth_factor{}; /// The executor to schedule tasks that have just started. std::shared_ptr m_executor{nullptr}; - - auto init(std::size_t reserve_size) -> void - { - m_tasks.resize(reserve_size); - for (std::size_t i = 0; i < reserve_size; ++i) - { - m_free_task_indices.emplace(i); - } - } }; } // namespace coro diff --git a/include/coro/thread_pool.hpp b/include/coro/thread_pool.hpp index c8e8c0bf..dbc663d5 100644 --- a/include/coro/thread_pool.hpp +++ b/include/coro/thread_pool.hpp @@ -134,7 +134,7 @@ class thread_pool /** * Schedules any coroutine handle that is ready to be resumed. * @param handle The coroutine handle to schedule. - * @return True if the coroutine is resumed, false if its a nullptr. + * @return True if the coroutine is resumed, false if its a nullptr or the coroutine is already done. */ auto resume(std::coroutine_handle<> handle) noexcept -> bool; diff --git a/src/detail/task_self_deleting.cpp b/src/detail/task_self_deleting.cpp new file mode 100644 index 00000000..9b6cb5aa --- /dev/null +++ b/src/detail/task_self_deleting.cpp @@ -0,0 +1,98 @@ +#include "coro/detail/task_self_deleting.hpp" + +#include + +namespace coro::detail +{ + +promise_self_deleting::promise_self_deleting() +{ + (void)m_task_container_size; // make codacy happy +} + +promise_self_deleting::~promise_self_deleting() +{ + +} + +promise_self_deleting::promise_self_deleting(promise_self_deleting&& other) + : m_task_container_size(std::exchange(other.m_task_container_size, nullptr)) +{ + +} + +auto promise_self_deleting::operator=(promise_self_deleting&& other) -> promise_self_deleting& +{ + if (std::addressof(other) != nullptr) + { + m_task_container_size = std::exchange(other.m_task_container_size, nullptr); + } + + return *this; +} + +auto promise_self_deleting::get_return_object() -> task_self_deleting +{ + return task_self_deleting{*this}; +} + +auto promise_self_deleting::initial_suspend() -> std::suspend_always +{ + return std::suspend_always{}; +} + +auto promise_self_deleting::final_suspend() noexcept -> std::suspend_never +{ + // Notify the task_container that this coroutine has completed. + if (m_task_container_size != nullptr) + { + m_task_container_size->fetch_sub(1); + } + + // By not suspending this lets the coroutine destroy itself. + return std::suspend_never{}; +} + +auto promise_self_deleting::return_void() noexcept -> void +{ + // no-op +} + +auto promise_self_deleting::unhandled_exception() -> void +{ + // The user cannot access the promise anyways, ignore the exception. +} + +auto promise_self_deleting::task_container_size(std::atomic& task_container_size) -> void +{ + m_task_container_size = &task_container_size; +} + +task_self_deleting::task_self_deleting(promise_self_deleting& promise) + : m_promise(&promise) +{ + +} + +task_self_deleting::~task_self_deleting() +{ + +} + +task_self_deleting::task_self_deleting(task_self_deleting&& other) + : m_promise(other.m_promise) +{ + +} + +auto task_self_deleting::operator=(task_self_deleting&& other) -> task_self_deleting& +{ + if (std::addressof(other) != this) + { + m_promise = other.m_promise; + } + + return *this; +} + +} // namespace coro::detail diff --git a/src/io_scheduler.cpp b/src/io_scheduler.cpp index 7b7f3b9a..7c1cb370 100644 --- a/src/io_scheduler.cpp +++ b/src/io_scheduler.cpp @@ -95,10 +95,10 @@ auto io_scheduler::process_events(std::chrono::milliseconds timeout) -> std::siz return size(); } -auto io_scheduler::schedule(coro::task&& task) -> void +auto io_scheduler::schedule(coro::task&& task) -> bool { auto* ptr = static_cast*>(m_owned_tasks); - ptr->start(std::move(task)); + return ptr->start(std::move(task)); } auto io_scheduler::schedule_after(std::chrono::milliseconds amount) -> coro::task @@ -219,12 +219,6 @@ auto io_scheduler::shutdown() noexcept -> void } } -auto io_scheduler::garbage_collect() noexcept -> void -{ - auto* ptr = static_cast*>(m_owned_tasks); - ptr->garbage_collect(); -} - auto io_scheduler::process_events_manual(std::chrono::milliseconds timeout) -> void { bool expected{false}; diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 3c33b0fe..1e6dbe57 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -1,7 +1,5 @@ #include "coro/thread_pool.hpp" -#include - namespace coro { thread_pool::operation::operation(thread_pool& tp) noexcept : m_thread_pool(tp) @@ -35,28 +33,32 @@ thread_pool::~thread_pool() auto thread_pool::schedule() -> operation { + m_size.fetch_add(1, std::memory_order::release); if (!m_shutdown_requested.load(std::memory_order::acquire)) { - m_size.fetch_add(1, std::memory_order::release); return operation{*this}; } - - throw std::runtime_error("coro::thread_pool is shutting down, unable to schedule new tasks."); + else + { + m_size.fetch_sub(1, std::memory_order::release); + throw std::runtime_error("coro::thread_pool is shutting down, unable to schedule new tasks."); + } } auto thread_pool::resume(std::coroutine_handle<> handle) noexcept -> bool { - if (handle == nullptr) + if (handle == nullptr || handle.done()) { return false; } + m_size.fetch_add(1, std::memory_order::release); if (m_shutdown_requested.load(std::memory_order::acquire)) { + m_size.fetch_sub(1, std::memory_order::release); return false; } - m_size.fetch_add(1, std::memory_order::release); schedule_impl(handle); return true; } @@ -138,7 +140,7 @@ auto thread_pool::executor(std::size_t idx) -> void auto thread_pool::schedule_impl(std::coroutine_handle<> handle) noexcept -> void { - if (handle == nullptr) + if (handle == nullptr || handle.done()) { return; } diff --git a/test/bench.cpp b/test/bench.cpp index f102fe26..7c85a74d 100644 --- a/test/bench.cpp +++ b/test/bench.cpp @@ -430,6 +430,7 @@ TEST_CASE("benchmark tcp::server echo server thread pool", "[benchmark]") std::cerr << "server co_await wait_for_clients\n"; co_await wait_for_clients; + std::cerr << "server co_return\n"; co_return; }; @@ -580,8 +581,10 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") } s.live_clients--; + std::cerr << "s.live_clients=" << s.live_clients << std::endl; if (s.live_clients == 0) { + std::cerr << "s.wait_for_clients.set()" << std::endl; s.wait_for_clients.set(); } co_return; @@ -611,7 +614,9 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") } } + std::cerr << "co_await s.wait_for_clients\n"; co_await s.wait_for_clients; + std::cerr << "make_server_task co_return\n"; co_return; }; @@ -671,8 +676,11 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") { server s{}; s.id = server_id++; + std::cerr << "coro::sync_wait(make_server_task(s));\n"; coro::sync_wait(make_server_task(s)); + std::cerr << "server.scheduler->shutdown()\n"; s.scheduler->shutdown(); + std::cerr << "server thread exiting\n"; }}); } @@ -695,8 +703,11 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") { c.tasks.emplace_back(make_client_task(c)); } + std::cerr << "coro::sync_wait(coro::when_all(std::move(c.tasks)));\n"; coro::sync_wait(coro::when_all(std::move(c.tasks))); + std::cerr << "client.scheduler->shutdown()\n"; c.scheduler->shutdown(); + std::cerr << "client thread exiting\n"; }}); } diff --git a/test/test_io_scheduler.cpp b/test/test_io_scheduler.cpp index 9e5d73cd..ff519bdf 100644 --- a/test/test_io_scheduler.cpp +++ b/test/test_io_scheduler.cpp @@ -752,4 +752,28 @@ TEST_CASE("io_scheduler task throws after resume", "[io_scheduler]") }; REQUIRE_THROWS(coro::sync_wait(make_thrower())); -} \ No newline at end of file +} + +TEST_CASE("issue-287", "[io_scheduler]") +{ + const int ITERATIONS = 200000; + + std::atomic g_count = 0; + auto scheduler = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); + + auto task = [](std::atomic& count) -> coro::task { + count++; + co_return; + }; + + for (int i = 0; i < ITERATIONS; ++i) + { + REQUIRE(scheduler->schedule(task(g_count))); + } + + scheduler->shutdown(); + + std::cerr << "g_count = \t" << g_count.load() << std::endl; + REQUIRE(g_count.load() == ITERATIONS); +} diff --git a/test/test_thread_pool.cpp b/test/test_thread_pool.cpp index d0ebf2b1..cf72be44 100644 --- a/test/test_thread_pool.cpp +++ b/test/test_thread_pool.cpp @@ -231,4 +231,30 @@ TEST_CASE("thread_pool high cpu usage when threadcount is greater than the numbe coro::thread_pool pool{coro::thread_pool::options{.thread_count = 3}}; coro::sync_wait( coro::when_all(wait_for_task(pool, std::chrono::seconds{1}), wait_for_task(pool, std::chrono::seconds{3}))); -} \ No newline at end of file +} + +TEST_CASE("issue-287", "[thread_pool]") +{ + const int ITERATIONS = 200000; + + std::atomic g_count = 0; + auto thread_pool = std::make_shared( + coro::thread_pool::options{.thread_count = 1} + ); + auto task_container = coro::task_container{thread_pool}; + + auto task = [](std::atomic& count) -> coro::task { + count++; + co_return; + }; + + for (int i = 0; i < ITERATIONS; ++i) + { + REQUIRE(task_container.start(task(g_count))); + } + + thread_pool->shutdown(); + + std::cerr << "g_count = \t" << g_count.load() << std::endl; + REQUIRE(g_count.load() == ITERATIONS); +} From cd4405c90d8dec4a44ffe73b6f996be4330be9b8 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Fri, 31 Jan 2025 11:56:41 -0700 Subject: [PATCH 14/24] Remove lambda captures (#294) * Remove lambda captures for coroutines Core guidlines point out that captures are only safe up until the first suspension point since the lambda object destructs at that point, so its recommended to remove them entirely. Closes #285 --- .githooks/readme-template.md | 13 +- README.md | 170 ++++++----- examples/coro_event.cpp | 6 +- examples/coro_io_scheduler.cpp | 6 +- examples/coro_mutex.cpp | 6 +- examples/coro_ring_buffer.cpp | 10 +- examples/coro_semaphore.cpp | 5 +- examples/coro_shared_mutex.cpp | 19 +- examples/coro_sync_wait.cpp | 9 +- examples/coro_task.cpp | 8 +- examples/coro_task_container.cpp | 6 +- examples/coro_thread_pool.cpp | 70 ++--- examples/coro_when_all.cpp | 8 +- include/coro/detail/task_self_deleting.hpp | 7 +- include/coro/task_container.hpp | 7 +- include/coro/when_all.hpp | 2 +- src/detail/task_self_deleting.cpp | 11 +- src/mutex.cpp | 2 +- test/bench.cpp | 339 +++++++++++---------- test/net/test_dns_resolver.cpp | 6 +- test/net/test_tcp_server.cpp | 27 +- test/net/test_tls_server.cpp | 15 +- test/net/test_udp_peers.cpp | 22 +- test/test_event.cpp | 54 ++-- test/test_generator.cpp | 13 +- test/test_io_scheduler.cpp | 181 ++++++----- test/test_latch.cpp | 20 +- test/test_mutex.cpp | 23 +- test/test_ring_buffer.cpp | 39 +-- test/test_semaphore.cpp | 37 +-- test/test_shared_mutex.cpp | 58 ++-- test/test_sync_wait.cpp | 22 +- test/test_task.cpp | 12 +- test/test_thread_pool.cpp | 27 +- test/test_when_all.cpp | 37 ++- 35 files changed, 711 insertions(+), 586 deletions(-) diff --git a/.githooks/readme-template.md b/.githooks/readme-template.md index 673fe343..4a638069 100644 --- a/.githooks/readme-template.md +++ b/.githooks/readme-template.md @@ -40,7 +40,7 @@ - coro::net::tls::client (OpenSSL) - coro::net::tls::server (OpenSSL) - coro::net::udp::peer -* +* * [Requirements](#requirements) * [Build Instructions](#build-instructions) * [Contributing](#contributing) @@ -51,6 +51,11 @@ ### A note on co_await and threads Its important to note with coroutines that _any_ `co_await` has the potential to switch the underyling thread that is executing the currently executing coroutine if the scheduler used has more than 1 thread. In general this shouldn't affect the way any user of the library would write code except for `thread_local`. Usage of `thread_local` should be extremely careful and _never_ used across any `co_await` boundary do to thread switching and work stealing on libcoro's schedulers. The only way this is safe is by using a `coro::thread_pool` with 1 thread or an inline `io_scheduler` which also only has 1 thread. +### A note on lambda captures (do not use them!) +[C++ Core Guidelines - CP.51: Do no use capturing lambdas that are coroutines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#Rcoro-capture) + +The recommendation is to not use lambda captures and instead pass any data into the coroutine via its function arguments to guarantee the argument lifetimes. Lambda captures will be destroyed at the coroutines first suspension point so if they are used past that point it will result in a use after free bug. + ### sync_wait The `sync_wait` construct is meant to be used outside of a coroutine context to block the calling thread until the coroutine has completed. The coroutine can be executed on the calling thread or scheduled on one of libcoro's schedulers. @@ -60,7 +65,7 @@ ${EXAMPLE_CORO_SYNC_WAIT} Expected output: ```bash -$ ./examples/coro_sync_wait +$ ./examples/coro_sync_wait Inline Result = 10 Offload Result = 20 ``` @@ -74,7 +79,7 @@ ${EXAMPLE_CORO_WHEN_ALL} Expected output: ```bash -$ ./examples/coro_when_all +$ ./examples/coro_when_all 2 4 6 @@ -520,7 +525,7 @@ If you open a PR for a bugfix or new feature please include tests to verify that File bug reports, feature requests and questions using [GitHub libcoro Issues](https://github.com/jbaldwin/libcoro/issues) -Copyright © 2020-2024 Josh Baldwin +Copyright © 2020-2025 Josh Baldwin [badge.language]: https://img.shields.io/badge/language-C%2B%2B20-yellow.svg [badge.license]: https://img.shields.io/badge/license-Apache--2.0-blue diff --git a/README.md b/README.md index 4db94af6..ce447edd 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ - coro::net::tls::client (OpenSSL) - coro::net::tls::server (OpenSSL) - coro::net::udp::peer -* +* * [Requirements](#requirements) * [Build Instructions](#build-instructions) * [Contributing](#contributing) @@ -51,6 +51,11 @@ ### A note on co_await and threads Its important to note with coroutines that _any_ `co_await` has the potential to switch the underyling thread that is executing the currently executing coroutine if the scheduler used has more than 1 thread. In general this shouldn't affect the way any user of the library would write code except for `thread_local`. Usage of `thread_local` should be extremely careful and _never_ used across any `co_await` boundary do to thread switching and work stealing on libcoro's schedulers. The only way this is safe is by using a `coro::thread_pool` with 1 thread or an inline `io_scheduler` which also only has 1 thread. +### A note on lambda captures (do not use them!) +[C++ Core Guidelines - CP.51: Do no use capturing lambdas that are coroutines](https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#Rcoro-capture) + +The recommendation is to not use lambda captures and instead pass any data into the coroutine via its function arguments to guarantee the argument lifetimes. Lambda captures will be destroyed at the coroutines first suspension point so if they are used past that point it will result in a use after free bug. + ### sync_wait The `sync_wait` construct is meant to be used outside of a coroutine context to block the calling thread until the coroutine has completed. The coroutine can be executed on the calling thread or scheduled on one of libcoro's schedulers. @@ -71,11 +76,12 @@ int main() std::cout << "Inline Result = " << result << "\n"; // We'll make a 1 thread coro::thread_pool to demonstrate offloading the task's - // execution to another thread. We'll capture the thread pool in the lambda, - // note that you will need to guarantee the thread pool outlives the coroutine. + // execution to another thread. We'll pass the thread pool as a parameter so + // the task can be scheduled. + // Note that you will need to guarantee the thread pool outlives the coroutine. coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}}; - auto make_task_offload = [&tp](uint64_t x) -> coro::task + auto make_task_offload = [](coro::thread_pool& tp, uint64_t x) -> coro::task { co_await tp.schedule(); // Schedules execution on the thread pool. co_return x + x; // This will execute on the thread pool. @@ -83,14 +89,14 @@ int main() // This will still block the calling thread, but it will now offload to the // coro::thread_pool since the coroutine task is immediately scheduled. - result = coro::sync_wait(make_task_offload(10)); + result = coro::sync_wait(make_task_offload(tp, 10)); std::cout << "Offload Result = " << result << "\n"; } ``` Expected output: ```bash -$ ./examples/coro_sync_wait +$ ./examples/coro_sync_wait Inline Result = 10 Offload Result = 20 ``` @@ -107,7 +113,7 @@ int main() // Create a thread pool to execute all the tasks in parallel. coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}}; // Create the task we want to invoke multiple times and execute in parallel on the thread pool. - auto twice = [&](uint64_t x) -> coro::task + auto twice = [](coro::thread_pool& tp, uint64_t x) -> coro::task { co_await tp.schedule(); // Schedule onto the thread pool. co_return x + x; // Executed on the thread pool. @@ -117,7 +123,7 @@ int main() std::vector> tasks{}; for (std::size_t i = 0; i < 5; ++i) { - tasks.emplace_back(twice(i + 1)); + tasks.emplace_back(twice(tp, i + 1)); } // Synchronously wait on this thread for the thread pool to finish executing all the tasks in parallel. @@ -136,7 +142,7 @@ int main() } // Use var args instead of a container as input to coro::when_all. - auto square = [&](double x) -> coro::task + auto square = [](coro::thread_pool& tp, double x) -> coro::task { co_await tp.schedule(); co_return x* x; @@ -144,7 +150,7 @@ int main() // Var args allows you to pass in tasks with different return types and returns // the result as a std::tuple. - auto tuple_results = coro::sync_wait(coro::when_all(square(1.1), twice(10))); + auto tuple_results = coro::sync_wait(coro::when_all(square(tp, 1.1), twice(tp, 10))); auto first = std::get<0>(tuple_results).return_value(); auto second = std::get<1>(tuple_results).return_value(); @@ -155,7 +161,7 @@ int main() Expected output: ```bash -$ ./examples/coro_when_all +$ ./examples/coro_when_all 2 4 6 @@ -174,13 +180,13 @@ The `coro::task` is the main coroutine building block within `libcoro`. Use int main() { - // Task that takes a value and doubles it. - auto double_task = [](uint64_t x) -> coro::task { co_return x * 2; }; - // Create a task that awaits the doubling of its given value and // then returns the result after adding 5. - auto double_and_add_5_task = [&](uint64_t input) -> coro::task + auto double_and_add_5_task = [](uint64_t input) -> coro::task { + // Task that takes a value and doubles it. + auto double_task = [](uint64_t x) -> coro::task { co_return x * 2; }; + auto doubled = co_await double_task(input); co_return doubled + 5; }; @@ -318,7 +324,8 @@ int main() coro::event e; // These tasks will wait until the given event has been set before advancing. - auto make_wait_task = [](const coro::event& e, uint64_t i) -> coro::task { + auto make_wait_task = [](const coro::event& e, uint64_t i) -> coro::task + { std::cout << "task " << i << " is waiting on the event...\n"; co_await e; std::cout << "task " << i << " event triggered, now resuming.\n"; @@ -326,7 +333,8 @@ int main() }; // This task will trigger the event allowing all waiting tasks to proceed. - auto make_set_task = [](coro::event& e) -> coro::task { + auto make_set_task = [](coro::event& e) -> coro::task + { std::cout << "set task is triggering the event\n"; e.set(); co_return; @@ -447,7 +455,9 @@ int main() std::vector output{}; coro::mutex mutex; - auto make_critical_section_task = [&](uint64_t i) -> coro::task { + auto make_critical_section_task = + [](coro::thread_pool& tp, coro::mutex& mutex, std::vector& output, uint64_t i) -> coro::task + { co_await tp.schedule(); // To acquire a mutex lock co_await its lock() function. Upon acquiring the lock the // lock() function returns a coro::scoped_lock that holds the mutex and automatically @@ -464,7 +474,7 @@ int main() tasks.reserve(num_tasks); for (size_t i = 1; i <= num_tasks; ++i) { - tasks.emplace_back(make_critical_section_task(i)); + tasks.emplace_back(make_critical_section_task(tp, mutex, output, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); @@ -504,10 +514,13 @@ int main() // to also show the interleaving of coroutines acquiring the shared lock in shared and // exclusive mode as they resume and suspend in a linear manner. Ideally the thread pool // executor would have more than 1 thread to resume all shared waiters in parallel. - auto tp = std::make_shared(coro::thread_pool::options{.thread_count = 1}); - coro::shared_mutex mutex{tp}; + auto tp = std::make_shared(coro::thread_pool::options{.thread_count = 1}); + coro::shared_mutex mutex{tp}; - auto make_shared_task = [&](uint64_t i) -> coro::task { + auto make_shared_task = [](std::shared_ptr tp, + coro::shared_mutex& mutex, + uint64_t i) -> coro::task + { co_await tp->schedule(); { std::cerr << "shared task " << i << " lock_shared()\n"; @@ -521,7 +534,9 @@ int main() co_return; }; - auto make_exclusive_task = [&]() -> coro::task { + auto make_exclusive_task = [](std::shared_ptr tp, + coro::shared_mutex& mutex) -> coro::task + { co_await tp->schedule(); std::cerr << "exclusive task lock()\n"; @@ -537,14 +552,14 @@ int main() std::vector> tasks{}; for (size_t i = 1; i <= num_tasks; ++i) { - tasks.emplace_back(make_shared_task(i)); + tasks.emplace_back(make_shared_task(tp, mutex, i)); } // Create an exclusive task. - tasks.emplace_back(make_exclusive_task()); + tasks.emplace_back(make_exclusive_task(tp, mutex)); // Create 3 more shared tasks that will be blocked until the exclusive task completes. for (size_t i = num_tasks + 1; i <= num_tasks * 2; ++i) { - tasks.emplace_back(make_shared_task(i)); + tasks.emplace_back(make_shared_task(tp, mutex, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); @@ -591,7 +606,8 @@ int main() coro::thread_pool tp{coro::thread_pool::options{.thread_count = 8}}; coro::semaphore semaphore{2}; - auto make_rate_limited_task = [&](uint64_t task_num) -> coro::task + auto make_rate_limited_task = + [](coro::thread_pool& tp, coro::semaphore& semaphore, uint64_t task_num) -> coro::task { co_await tp.schedule(); @@ -614,7 +630,7 @@ int main() std::vector> tasks{}; for (size_t i = 1; i <= num_tasks; ++i) { - tasks.emplace_back(make_rate_limited_task(i)); + tasks.emplace_back(make_rate_limited_task(tp, semaphore, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); @@ -644,7 +660,8 @@ int main() std::vector> tasks{}; - auto make_producer_task = [&]() -> coro::task + auto make_producer_task = + [](coro::thread_pool& tp, coro::ring_buffer& rb, coro::mutex& m) -> coro::task { co_await tp.schedule(); @@ -669,7 +686,8 @@ int main() co_return; }; - auto make_consumer_task = [&](size_t id) -> coro::task + auto make_consumer_task = + [](coro::thread_pool& tp, coro::ring_buffer& rb, coro::mutex& m, size_t id) -> coro::task { co_await tp.schedule(); @@ -698,10 +716,10 @@ int main() // Create N consumers for (size_t i = 0; i < consumers; ++i) { - tasks.emplace_back(make_consumer_task(i)); + tasks.emplace_back(make_consumer_task(tp, rb, m, i)); } // Create 1 producer. - tasks.emplace_back(make_producer_task()); + tasks.emplace_back(make_producer_task(tp, rb, m)); // Wait for all the values to be produced and consumed through the ring buffer. coro::sync_wait(coro::when_all(std::move(tasks))); @@ -737,53 +755,53 @@ int main() // Upon starting each worker thread an optional lambda callback with the worker's // index can be called to make thread changes, perhaps priority or change the thread's // name. - .on_thread_start_functor = [](std::size_t worker_idx) -> void { - std::cout << "thread pool worker " << worker_idx << " is starting up.\n"; - }, + .on_thread_start_functor = [](std::size_t worker_idx) -> void + { std::cout << "thread pool worker " << worker_idx << " is starting up.\n"; }, // Upon stopping each worker thread an optional lambda callback with the worker's // index can b called. - .on_thread_stop_functor = [](std::size_t worker_idx) -> void { - std::cout << "thread pool worker " << worker_idx << " is shutting down.\n"; - }}}; - - auto offload_task = [&](uint64_t child_idx) -> coro::task { - // Start by scheduling this offload worker task onto the thread pool. - co_await tp.schedule(); - // Now any code below this schedule() line will be executed on one of the thread pools - // worker threads. - - // Mimic some expensive task that should be run on a background thread... - std::random_device rd; - std::mt19937 gen{rd()}; - std::uniform_int_distribution<> d{0, 1}; + .on_thread_stop_functor = [](std::size_t worker_idx) -> void + { std::cout << "thread pool worker " << worker_idx << " is shutting down.\n"; }}}; - size_t calculation{0}; - for (size_t i = 0; i < 1'000'000; ++i) + auto primary_task = [](coro::thread_pool& tp) -> coro::task + { + auto offload_task = [](coro::thread_pool& tp, uint64_t child_idx) -> coro::task { - calculation += d(gen); - - // Lets be nice and yield() to let other coroutines on the thread pool have some cpu - // time. This isn't necessary but is illustrated to show how tasks can cooperatively - // yield control at certain points of execution. Its important to never call the - // std::this_thread::sleep_for() within the context of a coroutine, that will block - // and other coroutines which are ready for execution from starting, always use yield() - // or within the context of a coro::io_scheduler you can use yield_for(amount). - if (i == 500'000) + // Start by scheduling this offload worker task onto the thread pool. + co_await tp.schedule(); + // Now any code below this schedule() line will be executed on one of the thread pools + // worker threads. + + // Mimic some expensive task that should be run on a background thread... + std::random_device rd; + std::mt19937 gen{rd()}; + std::uniform_int_distribution<> d{0, 1}; + + size_t calculation{0}; + for (size_t i = 0; i < 1'000'000; ++i) { - std::cout << "Task " << child_idx << " is yielding()\n"; - co_await tp.yield(); + calculation += d(gen); + + // Lets be nice and yield() to let other coroutines on the thread pool have some cpu + // time. This isn't necessary but is illustrated to show how tasks can cooperatively + // yield control at certain points of execution. Its important to never call the + // std::this_thread::sleep_for() within the context of a coroutine, that will block + // and other coroutines which are ready for execution from starting, always use yield() + // or within the context of a coro::io_scheduler you can use yield_for(amount). + if (i == 500'000) + { + std::cout << "Task " << child_idx << " is yielding()\n"; + co_await tp.yield(); + } } - } - co_return calculation; - }; + co_return calculation; + }; - auto primary_task = [&]() -> coro::task { const size_t num_children{10}; std::vector> child_tasks{}; child_tasks.reserve(num_children); for (size_t i = 0; i < num_children; ++i) { - child_tasks.emplace_back(offload_task(i)); + child_tasks.emplace_back(offload_task(tp, i)); } // Wait for the thread pool workers to process all child tasks. @@ -798,7 +816,7 @@ int main() co_return calculation; }; - auto result = coro::sync_wait(primary_task()); + auto result = coro::sync_wait(primary_task(tp)); std::cout << "calculated thread pool result = " << result << "\n"; } ``` @@ -872,7 +890,7 @@ int main() }, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); - auto make_server_task = [&]() -> coro::task + auto make_server_task = [](std::shared_ptr scheduler) -> coro::task { // Start by creating a tcp server, we'll do this before putting it into the scheduler so // it is immediately available for the client to connect since this will create a socket, @@ -960,7 +978,7 @@ int main() co_return; }; - auto make_client_task = [&]() -> coro::task + auto make_client_task = [](std::shared_ptr scheduler) -> coro::task { // Immediately schedule onto the scheduler. co_await scheduler->schedule(); @@ -992,7 +1010,7 @@ int main() }; // Create and wait for the server and client tasks to complete. - coro::sync_wait(coro::when_all(make_server_task(), make_client_task())); + coro::sync_wait(coro::when_all(make_server_task(scheduler), make_client_task(scheduler))); } ``` @@ -1023,7 +1041,7 @@ int main() auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_server_task = [&]() -> coro::task + auto make_server_task = [](std::shared_ptr scheduler) -> coro::task { // This is the task that will handle processing a client's requests. auto serve_client = [](coro::net::tcp::client client) -> coro::task @@ -1072,11 +1090,11 @@ int main() tc.start(serve_client(std::move(client))); // Wait for all clients to complete before shutting down the tcp::server. - co_await tc.garbage_collect_and_yield_until_empty(); + co_await tc.yield_until_empty(); co_return; }; - auto make_client_task = [&](size_t request_count) -> coro::task + auto make_client_task = [](std::shared_ptr scheduler, size_t request_count) -> coro::task { co_await scheduler->schedule(); coro::net::tcp::client client{scheduler}; @@ -1104,7 +1122,7 @@ int main() co_return; // Upon exiting the tcp::client will close its connection to the server. }; - coro::sync_wait(coro::when_all(make_server_task(), make_client_task(5))); + coro::sync_wait(coro::when_all(make_server_task(scheduler), make_client_task(scheduler, 5))); } ``` @@ -1309,7 +1327,7 @@ If you open a PR for a bugfix or new feature please include tests to verify that File bug reports, feature requests and questions using [GitHub libcoro Issues](https://github.com/jbaldwin/libcoro/issues) -Copyright © 2020-2024 Josh Baldwin +Copyright © 2020-2025 Josh Baldwin [badge.language]: https://img.shields.io/badge/language-C%2B%2B20-yellow.svg [badge.license]: https://img.shields.io/badge/license-Apache--2.0-blue diff --git a/examples/coro_event.cpp b/examples/coro_event.cpp index 4469294f..09a9840d 100644 --- a/examples/coro_event.cpp +++ b/examples/coro_event.cpp @@ -6,7 +6,8 @@ int main() coro::event e; // These tasks will wait until the given event has been set before advancing. - auto make_wait_task = [](const coro::event& e, uint64_t i) -> coro::task { + auto make_wait_task = [](const coro::event& e, uint64_t i) -> coro::task + { std::cout << "task " << i << " is waiting on the event...\n"; co_await e; std::cout << "task " << i << " event triggered, now resuming.\n"; @@ -14,7 +15,8 @@ int main() }; // This task will trigger the event allowing all waiting tasks to proceed. - auto make_set_task = [](coro::event& e) -> coro::task { + auto make_set_task = [](coro::event& e) -> coro::task + { std::cout << "set task is triggering the event\n"; e.set(); co_return; diff --git a/examples/coro_io_scheduler.cpp b/examples/coro_io_scheduler.cpp index 3c0dec5a..d8ae8527 100644 --- a/examples/coro_io_scheduler.cpp +++ b/examples/coro_io_scheduler.cpp @@ -26,7 +26,7 @@ int main() }, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); - auto make_server_task = [&]() -> coro::task + auto make_server_task = [](std::shared_ptr scheduler) -> coro::task { // Start by creating a tcp server, we'll do this before putting it into the scheduler so // it is immediately available for the client to connect since this will create a socket, @@ -114,7 +114,7 @@ int main() co_return; }; - auto make_client_task = [&]() -> coro::task + auto make_client_task = [](std::shared_ptr scheduler) -> coro::task { // Immediately schedule onto the scheduler. co_await scheduler->schedule(); @@ -146,5 +146,5 @@ int main() }; // Create and wait for the server and client tasks to complete. - coro::sync_wait(coro::when_all(make_server_task(), make_client_task())); + coro::sync_wait(coro::when_all(make_server_task(scheduler), make_client_task(scheduler))); } diff --git a/examples/coro_mutex.cpp b/examples/coro_mutex.cpp index ebfde63c..80283c70 100644 --- a/examples/coro_mutex.cpp +++ b/examples/coro_mutex.cpp @@ -7,7 +7,9 @@ int main() std::vector output{}; coro::mutex mutex; - auto make_critical_section_task = [&](uint64_t i) -> coro::task { + auto make_critical_section_task = + [](coro::thread_pool& tp, coro::mutex& mutex, std::vector& output, uint64_t i) -> coro::task + { co_await tp.schedule(); // To acquire a mutex lock co_await its lock() function. Upon acquiring the lock the // lock() function returns a coro::scoped_lock that holds the mutex and automatically @@ -24,7 +26,7 @@ int main() tasks.reserve(num_tasks); for (size_t i = 1; i <= num_tasks; ++i) { - tasks.emplace_back(make_critical_section_task(i)); + tasks.emplace_back(make_critical_section_task(tp, mutex, output, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); diff --git a/examples/coro_ring_buffer.cpp b/examples/coro_ring_buffer.cpp index 8ea2c9c6..7957ba52 100644 --- a/examples/coro_ring_buffer.cpp +++ b/examples/coro_ring_buffer.cpp @@ -11,7 +11,8 @@ int main() std::vector> tasks{}; - auto make_producer_task = [&]() -> coro::task + auto make_producer_task = + [](coro::thread_pool& tp, coro::ring_buffer& rb, coro::mutex& m) -> coro::task { co_await tp.schedule(); @@ -36,7 +37,8 @@ int main() co_return; }; - auto make_consumer_task = [&](size_t id) -> coro::task + auto make_consumer_task = + [](coro::thread_pool& tp, coro::ring_buffer& rb, coro::mutex& m, size_t id) -> coro::task { co_await tp.schedule(); @@ -65,10 +67,10 @@ int main() // Create N consumers for (size_t i = 0; i < consumers; ++i) { - tasks.emplace_back(make_consumer_task(i)); + tasks.emplace_back(make_consumer_task(tp, rb, m, i)); } // Create 1 producer. - tasks.emplace_back(make_producer_task()); + tasks.emplace_back(make_producer_task(tp, rb, m)); // Wait for all the values to be produced and consumed through the ring buffer. coro::sync_wait(coro::when_all(std::move(tasks))); diff --git a/examples/coro_semaphore.cpp b/examples/coro_semaphore.cpp index 01852824..13005dfa 100644 --- a/examples/coro_semaphore.cpp +++ b/examples/coro_semaphore.cpp @@ -7,7 +7,8 @@ int main() coro::thread_pool tp{coro::thread_pool::options{.thread_count = 8}}; coro::semaphore semaphore{2}; - auto make_rate_limited_task = [&](uint64_t task_num) -> coro::task + auto make_rate_limited_task = + [](coro::thread_pool& tp, coro::semaphore& semaphore, uint64_t task_num) -> coro::task { co_await tp.schedule(); @@ -30,7 +31,7 @@ int main() std::vector> tasks{}; for (size_t i = 1; i <= num_tasks; ++i) { - tasks.emplace_back(make_rate_limited_task(i)); + tasks.emplace_back(make_rate_limited_task(tp, semaphore, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); diff --git a/examples/coro_shared_mutex.cpp b/examples/coro_shared_mutex.cpp index e3eda661..1d95d4df 100644 --- a/examples/coro_shared_mutex.cpp +++ b/examples/coro_shared_mutex.cpp @@ -8,10 +8,13 @@ int main() // to also show the interleaving of coroutines acquiring the shared lock in shared and // exclusive mode as they resume and suspend in a linear manner. Ideally the thread pool // executor would have more than 1 thread to resume all shared waiters in parallel. - auto tp = std::make_shared(coro::thread_pool::options{.thread_count = 1}); - coro::shared_mutex mutex{tp}; + auto tp = std::make_shared(coro::thread_pool::options{.thread_count = 1}); + coro::shared_mutex mutex{tp}; - auto make_shared_task = [&](uint64_t i) -> coro::task { + auto make_shared_task = [](std::shared_ptr tp, + coro::shared_mutex& mutex, + uint64_t i) -> coro::task + { co_await tp->schedule(); { std::cerr << "shared task " << i << " lock_shared()\n"; @@ -25,7 +28,9 @@ int main() co_return; }; - auto make_exclusive_task = [&]() -> coro::task { + auto make_exclusive_task = [](std::shared_ptr tp, + coro::shared_mutex& mutex) -> coro::task + { co_await tp->schedule(); std::cerr << "exclusive task lock()\n"; @@ -41,14 +46,14 @@ int main() std::vector> tasks{}; for (size_t i = 1; i <= num_tasks; ++i) { - tasks.emplace_back(make_shared_task(i)); + tasks.emplace_back(make_shared_task(tp, mutex, i)); } // Create an exclusive task. - tasks.emplace_back(make_exclusive_task()); + tasks.emplace_back(make_exclusive_task(tp, mutex)); // Create 3 more shared tasks that will be blocked until the exclusive task completes. for (size_t i = num_tasks + 1; i <= num_tasks * 2; ++i) { - tasks.emplace_back(make_shared_task(i)); + tasks.emplace_back(make_shared_task(tp, mutex, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); diff --git a/examples/coro_sync_wait.cpp b/examples/coro_sync_wait.cpp index 0629f81c..4364afe4 100644 --- a/examples/coro_sync_wait.cpp +++ b/examples/coro_sync_wait.cpp @@ -14,11 +14,12 @@ int main() std::cout << "Inline Result = " << result << "\n"; // We'll make a 1 thread coro::thread_pool to demonstrate offloading the task's - // execution to another thread. We'll capture the thread pool in the lambda, - // note that you will need to guarantee the thread pool outlives the coroutine. + // execution to another thread. We'll pass the thread pool as a parameter so + // the task can be scheduled. + // Note that you will need to guarantee the thread pool outlives the coroutine. coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}}; - auto make_task_offload = [&tp](uint64_t x) -> coro::task + auto make_task_offload = [](coro::thread_pool& tp, uint64_t x) -> coro::task { co_await tp.schedule(); // Schedules execution on the thread pool. co_return x + x; // This will execute on the thread pool. @@ -26,6 +27,6 @@ int main() // This will still block the calling thread, but it will now offload to the // coro::thread_pool since the coroutine task is immediately scheduled. - result = coro::sync_wait(make_task_offload(10)); + result = coro::sync_wait(make_task_offload(tp, 10)); std::cout << "Offload Result = " << result << "\n"; } diff --git a/examples/coro_task.cpp b/examples/coro_task.cpp index 891c6ed4..c9cfbf16 100644 --- a/examples/coro_task.cpp +++ b/examples/coro_task.cpp @@ -3,13 +3,13 @@ int main() { - // Task that takes a value and doubles it. - auto double_task = [](uint64_t x) -> coro::task { co_return x * 2; }; - // Create a task that awaits the doubling of its given value and // then returns the result after adding 5. - auto double_and_add_5_task = [&](uint64_t input) -> coro::task + auto double_and_add_5_task = [](uint64_t input) -> coro::task { + // Task that takes a value and doubles it. + auto double_task = [](uint64_t x) -> coro::task { co_return x * 2; }; + auto doubled = co_await double_task(input); co_return doubled + 5; }; diff --git a/examples/coro_task_container.cpp b/examples/coro_task_container.cpp index 800d1ae4..66682a44 100644 --- a/examples/coro_task_container.cpp +++ b/examples/coro_task_container.cpp @@ -6,7 +6,7 @@ int main() auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_server_task = [&]() -> coro::task + auto make_server_task = [](std::shared_ptr scheduler) -> coro::task { // This is the task that will handle processing a client's requests. auto serve_client = [](coro::net::tcp::client client) -> coro::task @@ -59,7 +59,7 @@ int main() co_return; }; - auto make_client_task = [&](size_t request_count) -> coro::task + auto make_client_task = [](std::shared_ptr scheduler, size_t request_count) -> coro::task { co_await scheduler->schedule(); coro::net::tcp::client client{scheduler}; @@ -87,5 +87,5 @@ int main() co_return; // Upon exiting the tcp::client will close its connection to the server. }; - coro::sync_wait(coro::when_all(make_server_task(), make_client_task(5))); + coro::sync_wait(coro::when_all(make_server_task(scheduler), make_client_task(scheduler, 5))); } diff --git a/examples/coro_thread_pool.cpp b/examples/coro_thread_pool.cpp index 398ff48b..8e9330f6 100644 --- a/examples/coro_thread_pool.cpp +++ b/examples/coro_thread_pool.cpp @@ -12,53 +12,53 @@ int main() // Upon starting each worker thread an optional lambda callback with the worker's // index can be called to make thread changes, perhaps priority or change the thread's // name. - .on_thread_start_functor = [](std::size_t worker_idx) -> void { - std::cout << "thread pool worker " << worker_idx << " is starting up.\n"; - }, + .on_thread_start_functor = [](std::size_t worker_idx) -> void + { std::cout << "thread pool worker " << worker_idx << " is starting up.\n"; }, // Upon stopping each worker thread an optional lambda callback with the worker's // index can b called. - .on_thread_stop_functor = [](std::size_t worker_idx) -> void { - std::cout << "thread pool worker " << worker_idx << " is shutting down.\n"; - }}}; + .on_thread_stop_functor = [](std::size_t worker_idx) -> void + { std::cout << "thread pool worker " << worker_idx << " is shutting down.\n"; }}}; - auto offload_task = [&](uint64_t child_idx) -> coro::task { - // Start by scheduling this offload worker task onto the thread pool. - co_await tp.schedule(); - // Now any code below this schedule() line will be executed on one of the thread pools - // worker threads. - - // Mimic some expensive task that should be run on a background thread... - std::random_device rd; - std::mt19937 gen{rd()}; - std::uniform_int_distribution<> d{0, 1}; - - size_t calculation{0}; - for (size_t i = 0; i < 1'000'000; ++i) + auto primary_task = [](coro::thread_pool& tp) -> coro::task + { + auto offload_task = [](coro::thread_pool& tp, uint64_t child_idx) -> coro::task { - calculation += d(gen); + // Start by scheduling this offload worker task onto the thread pool. + co_await tp.schedule(); + // Now any code below this schedule() line will be executed on one of the thread pools + // worker threads. + + // Mimic some expensive task that should be run on a background thread... + std::random_device rd; + std::mt19937 gen{rd()}; + std::uniform_int_distribution<> d{0, 1}; - // Lets be nice and yield() to let other coroutines on the thread pool have some cpu - // time. This isn't necessary but is illustrated to show how tasks can cooperatively - // yield control at certain points of execution. Its important to never call the - // std::this_thread::sleep_for() within the context of a coroutine, that will block - // and other coroutines which are ready for execution from starting, always use yield() - // or within the context of a coro::io_scheduler you can use yield_for(amount). - if (i == 500'000) + size_t calculation{0}; + for (size_t i = 0; i < 1'000'000; ++i) { - std::cout << "Task " << child_idx << " is yielding()\n"; - co_await tp.yield(); + calculation += d(gen); + + // Lets be nice and yield() to let other coroutines on the thread pool have some cpu + // time. This isn't necessary but is illustrated to show how tasks can cooperatively + // yield control at certain points of execution. Its important to never call the + // std::this_thread::sleep_for() within the context of a coroutine, that will block + // and other coroutines which are ready for execution from starting, always use yield() + // or within the context of a coro::io_scheduler you can use yield_for(amount). + if (i == 500'000) + { + std::cout << "Task " << child_idx << " is yielding()\n"; + co_await tp.yield(); + } } - } - co_return calculation; - }; + co_return calculation; + }; - auto primary_task = [&]() -> coro::task { const size_t num_children{10}; std::vector> child_tasks{}; child_tasks.reserve(num_children); for (size_t i = 0; i < num_children; ++i) { - child_tasks.emplace_back(offload_task(i)); + child_tasks.emplace_back(offload_task(tp, i)); } // Wait for the thread pool workers to process all child tasks. @@ -73,6 +73,6 @@ int main() co_return calculation; }; - auto result = coro::sync_wait(primary_task()); + auto result = coro::sync_wait(primary_task(tp)); std::cout << "calculated thread pool result = " << result << "\n"; } diff --git a/examples/coro_when_all.cpp b/examples/coro_when_all.cpp index 223ca455..99eeaa48 100644 --- a/examples/coro_when_all.cpp +++ b/examples/coro_when_all.cpp @@ -6,7 +6,7 @@ int main() // Create a thread pool to execute all the tasks in parallel. coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}}; // Create the task we want to invoke multiple times and execute in parallel on the thread pool. - auto twice = [&](uint64_t x) -> coro::task + auto twice = [](coro::thread_pool& tp, uint64_t x) -> coro::task { co_await tp.schedule(); // Schedule onto the thread pool. co_return x + x; // Executed on the thread pool. @@ -16,7 +16,7 @@ int main() std::vector> tasks{}; for (std::size_t i = 0; i < 5; ++i) { - tasks.emplace_back(twice(i + 1)); + tasks.emplace_back(twice(tp, i + 1)); } // Synchronously wait on this thread for the thread pool to finish executing all the tasks in parallel. @@ -35,7 +35,7 @@ int main() } // Use var args instead of a container as input to coro::when_all. - auto square = [&](double x) -> coro::task + auto square = [](coro::thread_pool& tp, double x) -> coro::task { co_await tp.schedule(); co_return x* x; @@ -43,7 +43,7 @@ int main() // Var args allows you to pass in tasks with different return types and returns // the result as a std::tuple. - auto tuple_results = coro::sync_wait(coro::when_all(square(1.1), twice(10))); + auto tuple_results = coro::sync_wait(coro::when_all(square(tp, 1.1), twice(tp, 10))); auto first = std::get<0>(tuple_results).return_value(); auto second = std::get<1>(tuple_results).return_value(); diff --git a/include/coro/detail/task_self_deleting.hpp b/include/coro/detail/task_self_deleting.hpp index 4c8ead9b..eec1217d 100644 --- a/include/coro/detail/task_self_deleting.hpp +++ b/include/coro/detail/task_self_deleting.hpp @@ -27,6 +27,7 @@ class promise_self_deleting auto unhandled_exception() -> void; auto task_container_size(std::atomic& task_container_size) -> void; + private: /** * The coro::task_container m_size member to decrement upon the coroutine completing. @@ -58,7 +59,11 @@ class task_self_deleting auto operator=(task_self_deleting&&) -> task_self_deleting&; auto promise() -> promise_self_deleting& { return *m_promise; } - auto handle() -> std::coroutine_handle { return std::coroutine_handle::from_promise(*m_promise); } + auto handle() -> std::coroutine_handle + { + return std::coroutine_handle::from_promise(*m_promise); + } + private: promise_self_deleting* m_promise{nullptr}; }; diff --git a/include/coro/task_container.hpp b/include/coro/task_container.hpp index 5af5c460..abd64d9a 100644 --- a/include/coro/task_container.hpp +++ b/include/coro/task_container.hpp @@ -10,15 +10,14 @@ #include #include #include -#include #include +#include #include namespace coro { class io_scheduler; - template class task_container { @@ -28,9 +27,7 @@ class task_container * from a coro::io_scheduler, this would usually be that coro::io_scheduler instance. * @param opts Task container options. */ - task_container( - std::shared_ptr e) - : m_executor(std::move(e)) + task_container(std::shared_ptr e) : m_executor(std::move(e)) { if (m_executor == nullptr) { diff --git a/include/coro/when_all.hpp b/include/coro/when_all.hpp index c7a71fad..93a26a76 100644 --- a/include/coro/when_all.hpp +++ b/include/coro/when_all.hpp @@ -5,11 +5,11 @@ #include "coro/detail/void_value.hpp" #include +#include #include #include #include #include -#include namespace coro { diff --git a/src/detail/task_self_deleting.cpp b/src/detail/task_self_deleting.cpp index 9b6cb5aa..4f45a613 100644 --- a/src/detail/task_self_deleting.cpp +++ b/src/detail/task_self_deleting.cpp @@ -12,13 +12,11 @@ promise_self_deleting::promise_self_deleting() promise_self_deleting::~promise_self_deleting() { - } promise_self_deleting::promise_self_deleting(promise_self_deleting&& other) : m_task_container_size(std::exchange(other.m_task_container_size, nullptr)) { - } auto promise_self_deleting::operator=(promise_self_deleting&& other) -> promise_self_deleting& @@ -68,21 +66,16 @@ auto promise_self_deleting::task_container_size(std::atomic& task_c m_task_container_size = &task_container_size; } -task_self_deleting::task_self_deleting(promise_self_deleting& promise) - : m_promise(&promise) +task_self_deleting::task_self_deleting(promise_self_deleting& promise) : m_promise(&promise) { - } task_self_deleting::~task_self_deleting() { - } -task_self_deleting::task_self_deleting(task_self_deleting&& other) - : m_promise(other.m_promise) +task_self_deleting::task_self_deleting(task_self_deleting&& other) : m_promise(other.m_promise) { - } auto task_self_deleting::operator=(task_self_deleting&& other) -> task_self_deleting& diff --git a/src/mutex.cpp b/src/mutex.cpp index 61f94ac7..b836929c 100644 --- a/src/mutex.cpp +++ b/src/mutex.cpp @@ -34,7 +34,7 @@ auto mutex::lock_operation::await_ready() const noexcept -> bool auto mutex::lock_operation::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool { m_awaiting_coroutine = awaiting_coroutine; - void* current = m_mutex.m_state.load(std::memory_order::acquire); + void* current = m_mutex.m_state.load(std::memory_order::acquire); void* new_value; const void* unlocked_value = m_mutex.unlocked_value(); diff --git a/test/bench.cpp b/test/bench.cpp index 7c85a74d..51468ffa 100644 --- a/test/bench.cpp +++ b/test/bench.cpp @@ -31,7 +31,7 @@ TEST_CASE("benchmark counter func direct call", "[benchmark]") { constexpr std::size_t iterations = default_iterations; std::atomic counter{0}; - auto func = [&]() -> void + auto func = [](std::atomic& counter) -> void { counter.fetch_add(1, std::memory_order::relaxed); return; @@ -41,7 +41,7 @@ TEST_CASE("benchmark counter func direct call", "[benchmark]") for (std::size_t i = 0; i < iterations; ++i) { - func(); + func(counter); } print_stats("benchmark counter func direct call", iterations, start, sc::now()); @@ -244,7 +244,7 @@ TEST_CASE("benchmark counter task scheduler{1} yield", "[benchmark]") std::vector> tasks{}; tasks.reserve(iterations); - auto make_task = [&]() -> coro::task + auto make_task = [](std::shared_ptr s, std::atomic& counter) -> coro::task { co_await s->schedule(); co_await s->yield(); @@ -256,7 +256,7 @@ TEST_CASE("benchmark counter task scheduler{1} yield", "[benchmark]") for (std::size_t i = 0; i < iterations; ++i) { - tasks.emplace_back(make_task()); + tasks.emplace_back(make_task(s, counter)); } coro::sync_wait(coro::when_all(std::move(tasks))); @@ -279,7 +279,7 @@ TEST_CASE("benchmark counter task scheduler{1} yield_for", "[benchmark]") std::vector> tasks{}; tasks.reserve(iterations); - auto make_task = [&]() -> coro::task + auto make_task = [](std::shared_ptr s, std::atomic& counter) -> coro::task { co_await s->schedule(); co_await s->yield_for(std::chrono::milliseconds{1}); @@ -291,7 +291,7 @@ TEST_CASE("benchmark counter task scheduler{1} yield_for", "[benchmark]") for (std::size_t i = 0; i < iterations; ++i) { - tasks.emplace_back(make_task()); + tasks.emplace_back(make_task(s, counter)); } coro::sync_wait(coro::when_all(std::move(tasks))); @@ -322,7 +322,10 @@ TEST_CASE("benchmark counter task scheduler await event from another coroutine", std::atomic counter{0}; - auto wait_func = [&](std::size_t index) -> coro::task + auto wait_func = [](std::shared_ptr s, + std::vector>& events, + std::atomic& counter, + std::size_t index) -> coro::task { co_await s->schedule(); co_await *events[index]; @@ -330,7 +333,9 @@ TEST_CASE("benchmark counter task scheduler await event from another coroutine", co_return; }; - auto resume_func = [&](std::size_t index) -> coro::task + auto resume_func = [](std::shared_ptr s, + std::vector>& events, + std::size_t index) -> coro::task { co_await s->schedule(); events[index]->set(); @@ -341,8 +346,8 @@ TEST_CASE("benchmark counter task scheduler await event from another coroutine", for (std::size_t i = 0; i < iterations; ++i) { - tasks.emplace_back(wait_func(i)); - tasks.emplace_back(resume_func(i)); + tasks.emplace_back(wait_func(s, events, counter, i)); + tasks.emplace_back(resume_func(s, events, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); @@ -372,41 +377,44 @@ TEST_CASE("benchmark tcp::server echo server thread pool", "[benchmark]") std::atomic accepted{0}; std::atomic clients_completed{0}; - auto make_on_connection_task = [&](coro::net::tcp::client client, coro::latch& wait_for_clients) -> coro::task + auto server_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ + .pool = coro::thread_pool::options{}, + .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); + auto make_server_task = [](std::shared_ptr server_scheduler, + std::atomic& listening, + std::atomic& accepted) -> coro::task { - std::string in(64, '\0'); - - // Echo the messages until the socket is closed. - while (true) + auto make_on_connection_task = [](coro::net::tcp::client client, + coro::latch& wait_for_clients) -> coro::task { - auto pstatus = co_await client.poll(coro::poll_op::read); - REQUIRE(pstatus == coro::poll_status::event); + std::string in(64, '\0'); - auto [rstatus, rspan] = client.recv(in); - if (rstatus == coro::net::recv_status::closed) + // Echo the messages until the socket is closed. + while (true) { - REQUIRE(rspan.empty()); - break; - } - REQUIRE(rstatus == coro::net::recv_status::ok); + auto pstatus = co_await client.poll(coro::poll_op::read); + REQUIRE(pstatus == coro::poll_status::event); + + auto [rstatus, rspan] = client.recv(in); + if (rstatus == coro::net::recv_status::closed) + { + REQUIRE(rspan.empty()); + break; + } + REQUIRE(rstatus == coro::net::recv_status::ok); - in.resize(rspan.size()); + in.resize(rspan.size()); - auto [sstatus, remaining] = client.send(in); - REQUIRE(sstatus == coro::net::send_status::ok); - REQUIRE(remaining.empty()); - } + auto [sstatus, remaining] = client.send(in); + REQUIRE(sstatus == coro::net::send_status::ok); + REQUIRE(remaining.empty()); + } - wait_for_clients.count_down(); - std::cerr << "wait_for_clients.count_down(1) -> " << wait_for_clients.remaining() << "\n"; - co_return; - }; + wait_for_clients.count_down(); + std::cerr << "wait_for_clients.count_down(1) -> " << wait_for_clients.remaining() << "\n"; + co_return; + }; - auto server_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ - .pool = coro::thread_pool::options{}, - .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); - auto make_server_task = [&]() -> coro::task - { co_await server_scheduler->schedule(); coro::latch wait_for_clients{connections}; @@ -440,7 +448,11 @@ TEST_CASE("benchmark tcp::server echo server thread pool", "[benchmark]") auto client_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .pool = coro::thread_pool::options{}, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); - auto make_client_task = [&]() -> coro::task + auto make_client_task = [](std::shared_ptr client_scheduler, + const std::string& msg, + std::atomic& clients_completed, + std::map& g_histogram, + std::mutex& g_histogram_mutex) -> coro::task { co_await client_scheduler->schedule(); std::map histogram; @@ -486,7 +498,8 @@ TEST_CASE("benchmark tcp::server echo server thread pool", "[benchmark]") auto start = sc::now(); // Create the server to accept incoming tcp connections. - auto server_thread = std::thread{[&]() { coro::sync_wait(make_server_task()); }}; + auto server_thread = + std::thread{[&]() { coro::sync_wait(make_server_task(server_scheduler, listening, accepted)); }}; // The server can take a small bit of time to start up, if we don't wait for it to notify then // the first few connections can easily fail to connect causing this test to fail. @@ -495,15 +508,17 @@ TEST_CASE("benchmark tcp::server echo server thread pool", "[benchmark]") std::this_thread::sleep_for(std::chrono::milliseconds{1}); } - auto client_thread = std::thread{[&]() - { - std::vector> tasks{}; - for (size_t i = 0; i < connections; ++i) - { - tasks.emplace_back(make_client_task()); - } - coro::sync_wait(coro::when_all(std::move(tasks))); - }}; + auto client_thread = + std::thread{[&]() + { + std::vector> tasks{}; + for (size_t i = 0; i < connections; ++i) + { + tasks.emplace_back(make_client_task( + client_scheduler, msg, clients_completed, g_histogram, g_histogram_mutex)); + } + coro::sync_wait(coro::when_all(std::move(tasks))); + }}; std::cerr << "joining client thread...\n"; client_thread.join(); @@ -555,43 +570,44 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") std::vector> tasks{}; }; - auto make_on_connection_task = [&](server& s, coro::net::tcp::client client) -> coro::task + auto make_server_task = + [](server& s, std::atomic& listening, std::atomic& accepted) -> coro::task { - std::string in(64, '\0'); - - // Echo the messages until the socket is closed. - while (true) + auto make_on_connection_task = [](server& s, coro::net::tcp::client client) -> coro::task { - auto pstatus = co_await client.poll(coro::poll_op::read); - REQUIRE(pstatus == coro::poll_status::event); + std::string in(64, '\0'); - auto [rstatus, rspan] = client.recv(in); - if (rstatus == coro::net::recv_status::closed) + // Echo the messages until the socket is closed. + while (true) { - REQUIRE(rspan.empty()); - break; - } - REQUIRE(rstatus == coro::net::recv_status::ok); + auto pstatus = co_await client.poll(coro::poll_op::read); + REQUIRE(pstatus == coro::poll_status::event); - in.resize(rspan.size()); + auto [rstatus, rspan] = client.recv(in); + if (rstatus == coro::net::recv_status::closed) + { + REQUIRE(rspan.empty()); + break; + } + REQUIRE(rstatus == coro::net::recv_status::ok); - auto [sstatus, remaining] = client.send(in); - REQUIRE(sstatus == coro::net::send_status::ok); - REQUIRE(remaining.empty()); - } + in.resize(rspan.size()); - s.live_clients--; - std::cerr << "s.live_clients=" << s.live_clients << std::endl; - if (s.live_clients == 0) - { - std::cerr << "s.wait_for_clients.set()" << std::endl; - s.wait_for_clients.set(); - } - co_return; - }; + auto [sstatus, remaining] = client.send(in); + REQUIRE(sstatus == coro::net::send_status::ok); + REQUIRE(remaining.empty()); + } + + s.live_clients--; + std::cerr << "s.live_clients=" << s.live_clients << std::endl; + if (s.live_clients == 0) + { + std::cerr << "s.wait_for_clients.set()" << std::endl; + s.wait_for_clients.set(); + } + co_return; + }; - auto make_server_task = [&](server& s) -> coro::task - { co_await s.scheduler->schedule(); coro::net::tcp::server server{s.scheduler}; @@ -623,7 +639,11 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") std::mutex g_histogram_mutex; std::map g_histogram; - auto make_client_task = [&](client& c) -> coro::task + auto make_client_task = [](client& c, + const std::string& msg, + std::atomic& clients_completed, + std::map& g_histogram, + std::mutex& g_histogram_mutex) -> coro::task { co_await c.scheduler->schedule(); std::map histogram; @@ -677,7 +697,7 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") server s{}; s.id = server_id++; std::cerr << "coro::sync_wait(make_server_task(s));\n"; - coro::sync_wait(make_server_task(s)); + coro::sync_wait(make_server_task(s, listening, accepted)); std::cerr << "server.scheduler->shutdown()\n"; s.scheduler->shutdown(); std::cerr << "server thread exiting\n"; @@ -696,19 +716,20 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") std::vector clients{}; for (size_t i = 0; i < client_count; ++i) { - client_threads.emplace_back(std::thread{[&]() - { - client c{}; - for (size_t i = 0; i < connections / client_count; ++i) - { - c.tasks.emplace_back(make_client_task(c)); - } - std::cerr << "coro::sync_wait(coro::when_all(std::move(c.tasks)));\n"; - coro::sync_wait(coro::when_all(std::move(c.tasks))); - std::cerr << "client.scheduler->shutdown()\n"; - c.scheduler->shutdown(); - std::cerr << "client thread exiting\n"; - }}); + client_threads.emplace_back(std::thread{ + [&]() + { + client c{}; + for (size_t i = 0; i < connections / client_count; ++i) + { + c.tasks.emplace_back(make_client_task(c, msg, clients_completed, g_histogram, g_histogram_mutex)); + } + std::cerr << "coro::sync_wait(coro::when_all(std::move(c.tasks)));\n"; + coro::sync_wait(coro::when_all(std::move(c.tasks))); + std::cerr << "client.scheduler->shutdown()\n"; + c.scheduler->shutdown(); + std::cerr << "client thread exiting\n"; + }}); } for (auto& ct : client_threads) @@ -743,67 +764,70 @@ TEST_CASE("benchmark tls::server echo server thread pool", "[benchmark]") std::atomic accepted{0}; std::atomic clients_completed{0}; - auto make_on_connection_task = [&](coro::net::tls::client client, coro::latch& wait_for_clients) -> coro::task + auto server_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ + .pool = coro::thread_pool::options{}, + .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); + auto make_server_task = [](std::shared_ptr server_scheduler, + std::atomic& listening, + std::atomic& accepted) -> coro::task { - std::string in(64, '\0'); - - auto closed = false; - // Echo the messages until the socket is closed. - while (!closed) + auto make_on_connection_task = [](coro::net::tls::client client, + coro::latch& wait_for_clients) -> coro::task { - auto [rstatus, rspan] = co_await client.recv(in); - // std::cerr << "SERVER CONNECTION: rstatus =" << coro::net::tls::to_string(rstatus) << "\n"; - std::string_view data{}; - switch (rstatus) + std::string in(64, '\0'); + + auto closed = false; + // Echo the messages until the socket is closed. + while (!closed) { - case coro::net::tls::recv_status::ok: - REQUIRE(rstatus == coro::net::tls::recv_status::ok); - data = std::string_view{rspan.begin(), rspan.end()}; - // std::cerr << "SERVER CONNECTION: recv() -> " << data << "\n"; - break; - case coro::net::tls::recv_status::closed: - // std::cerr << "SERVER CONNECTION: closed\n"; - REQUIRE(rspan.empty()); - closed = true; - break; - case coro::net::tls::recv_status::want_read: + auto [rstatus, rspan] = co_await client.recv(in); + // std::cerr << "SERVER CONNECTION: rstatus =" << coro::net::tls::to_string(rstatus) << "\n"; + std::string_view data{}; + switch (rstatus) { - // std::cerr << "SERVER CONNECTION: want_read\n"; + case coro::net::tls::recv_status::ok: + REQUIRE(rstatus == coro::net::tls::recv_status::ok); + data = std::string_view{rspan.begin(), rspan.end()}; + // std::cerr << "SERVER CONNECTION: recv() -> " << data << "\n"; + break; + case coro::net::tls::recv_status::closed: + // std::cerr << "SERVER CONNECTION: closed\n"; + REQUIRE(rspan.empty()); + closed = true; + break; + case coro::net::tls::recv_status::want_read: + { + // std::cerr << "SERVER CONNECTION: want_read\n"; + } + continue; + case coro::net::tls::recv_status::want_write: + { + // std::cerr << "SERVER CONNECTION: want_write\n"; + } + continue; + default: + // std::cerr << "SERVER CONNECTION: error (closing)\n"; + closed = true; + break; } - continue; - case coro::net::tls::recv_status::want_write: + + if (closed) { - // std::cerr << "SERVER CONNECTION: want_write\n"; - } - continue; - default: - // std::cerr << "SERVER CONNECTION: error (closing)\n"; - closed = true; break; - } + } - if (closed) - { - break; + // std::cerr << "SERVER CONNECTION: client.send()\n"; + auto [sstatus, remaining] = co_await client.send(data); + REQUIRE(sstatus == coro::net::tls::send_status::ok); + REQUIRE(remaining.empty()); + // std::cerr << "SERVER CONNECTION: send() -> " << data << "\n"; } - // std::cerr << "SERVER CONNECTION: client.send()\n"; - auto [sstatus, remaining] = co_await client.send(data); - REQUIRE(sstatus == coro::net::tls::send_status::ok); - REQUIRE(remaining.empty()); - // std::cerr << "SERVER CONNECTION: send() -> " << data << "\n"; - } - - wait_for_clients.count_down(); - // std::cerr << "SERVER CONNECTION: wait_for_clients.count_down(1) -> " << wait_for_clients.remaining() << "\n"; - co_return; - }; - - auto server_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ - .pool = coro::thread_pool::options{}, - .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); - auto make_server_task = [&]() -> coro::task - { + wait_for_clients.count_down(); + // std::cerr << "SERVER CONNECTION: wait_for_clients.count_down(1) -> " << wait_for_clients.remaining() << + // "\n"; + co_return; + }; co_await server_scheduler->schedule(); coro::latch wait_for_clients{connections}; @@ -840,7 +864,11 @@ TEST_CASE("benchmark tls::server echo server thread pool", "[benchmark]") auto client_scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .pool = coro::thread_pool::options{}, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool}); - auto make_client_task = [&](coro::mutex& histogram_mutex) -> coro::task + auto make_client_task = [](std::shared_ptr client_scheduler, + const std::string& msg, + std::atomic& clients_completed, + std::map& g_histogram, + coro::mutex& histogram_mutex) -> coro::task { co_await client_scheduler->schedule(); { @@ -929,7 +957,8 @@ TEST_CASE("benchmark tls::server echo server thread pool", "[benchmark]") auto start = sc::now(); // Create the server to accept incoming tcp connections. - auto server_thread = std::thread{[&]() { coro::sync_wait(make_server_task()); }}; + auto server_thread = + std::thread{[&]() { coro::sync_wait(make_server_task(server_scheduler, listening, accepted)); }}; // The server can take a small bit of time to start up, if we don't wait for it to notify then // the first few connections can easily fail to connect causing this test to fail. @@ -938,15 +967,17 @@ TEST_CASE("benchmark tls::server echo server thread pool", "[benchmark]") std::this_thread::sleep_for(std::chrono::milliseconds{1}); } - auto client_thread = std::thread{[&]() - { - std::vector> tasks{}; - for (size_t i = 0; i < connections; ++i) - { - tasks.emplace_back(make_client_task(histogram_mutex)); - } - coro::sync_wait(coro::when_all(std::move(tasks))); - }}; + auto client_thread = + std::thread{[&]() + { + std::vector> tasks{}; + for (size_t i = 0; i < connections; ++i) + { + tasks.emplace_back(make_client_task( + client_scheduler, msg, clients_completed, g_histogram, histogram_mutex)); + } + coro::sync_wait(coro::when_all(std::move(tasks))); + }}; std::cerr << "joining client thread...\n"; client_thread.join(); diff --git a/test/net/test_dns_resolver.cpp b/test/net/test_dns_resolver.cpp index 4c69fc70..9e2cd135 100644 --- a/test/net/test_dns_resolver.cpp +++ b/test/net/test_dns_resolver.cpp @@ -12,7 +12,9 @@ TEST_CASE("dns_resolver basic", "[dns]") coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); coro::net::dns::resolver dns_resolver{scheduler, std::chrono::milliseconds{5000}}; - auto make_host_by_name_task = [&](coro::net::hostname hn) -> coro::task + auto make_host_by_name_task = [](std::shared_ptr& scheduler, + coro::net::dns::resolver& dns_resolver, + coro::net::hostname hn) -> coro::task { co_await scheduler->schedule(); auto result_ptr = co_await std::move(dns_resolver.host_by_name(hn)); @@ -28,7 +30,7 @@ TEST_CASE("dns_resolver basic", "[dns]") co_return; }; - coro::sync_wait(make_host_by_name_task(coro::net::hostname{"www.example.com"})); + coro::sync_wait(make_host_by_name_task(scheduler, dns_resolver, coro::net::hostname{"www.example.com"})); std::cerr << "io_scheduler.size() before shutdown = " << scheduler->size() << "\n"; scheduler->shutdown(); diff --git a/test/net/test_tcp_server.cpp b/test/net/test_tcp_server.cpp index 09966482..2391d1e8 100644 --- a/test/net/test_tcp_server.cpp +++ b/test/net/test_tcp_server.cpp @@ -14,7 +14,9 @@ TEST_CASE("tcp_server ping server", "[tcp_server]") auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_client_task = [&]() -> coro::task + auto make_client_task = [](std::shared_ptr& scheduler, + const std::string& client_msg, + const std::string& server_msg) -> coro::task { co_await scheduler->schedule(); coro::net::tcp::client client{scheduler}; @@ -47,7 +49,9 @@ TEST_CASE("tcp_server ping server", "[tcp_server]") co_return; }; - auto make_server_task = [&]() -> coro::task + auto make_server_task = [](std::shared_ptr& scheduler, + const std::string& client_msg, + const std::string& server_msg) -> coro::task { co_await scheduler->schedule(); coro::net::tcp::server server{scheduler}; @@ -83,7 +87,8 @@ TEST_CASE("tcp_server ping server", "[tcp_server]") co_return; }; - coro::sync_wait(coro::when_all(make_server_task(), make_client_task())); + coro::sync_wait(coro::when_all( + make_server_task(scheduler, client_msg, server_msg), make_client_task(scheduler, client_msg, server_msg))); } TEST_CASE("tcp_server concurrent polling on the same socket", "[tcp_server]") @@ -94,14 +99,14 @@ TEST_CASE("tcp_server concurrent polling on the same socket", "[tcp_server]") auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); - auto make_read_task = [](coro::net::tcp::client client) -> coro::task + auto make_server_task = [](std::shared_ptr& scheduler) -> coro::task { - co_await client.poll(coro::poll_op::read, 2s); - co_return; - }; + auto make_read_task = [](coro::net::tcp::client client) -> coro::task + { + co_await client.poll(coro::poll_op::read, 2s); + co_return; + }; - auto make_server_task = [&]() -> coro::task - { co_await scheduler->schedule(); coro::net::tcp::server server{scheduler}; @@ -139,7 +144,7 @@ TEST_CASE("tcp_server concurrent polling on the same socket", "[tcp_server]") co_return data; }; - auto make_client_task = [&]() -> coro::task + auto make_client_task = [](std::shared_ptr& scheduler) -> coro::task { co_await scheduler->schedule(); coro::net::tcp::client client{scheduler}; @@ -162,7 +167,7 @@ TEST_CASE("tcp_server concurrent polling on the same socket", "[tcp_server]") co_return response; }; - auto result = coro::sync_wait(coro::when_all(make_server_task(), make_client_task())); + auto result = coro::sync_wait(coro::when_all(make_server_task(scheduler), make_client_task(scheduler))); auto request = std::move(std::get<0>(result).return_value()); auto response = std::move(std::get<1>(result).return_value()); diff --git a/test/net/test_tls_server.cpp b/test/net/test_tls_server.cpp index 230d4d43..ecd3cf8b 100644 --- a/test/net/test_tls_server.cpp +++ b/test/net/test_tls_server.cpp @@ -12,10 +12,12 @@ TEST_CASE("tls_server hello world server", "[tls_server]") auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - std::string client_msg = "Hello world from TLS client!"; - std::string server_msg = "Hello world from TLS server!!"; + const std::string client_msg = "Hello world from TLS client!"; + const std::string server_msg = "Hello world from TLS server!!"; - auto make_client_task = [&]() -> coro::task + auto make_client_task = [](std::shared_ptr scheduler, + const std::string& client_msg, + const std::string& server_msg) -> coro::task { co_await scheduler->schedule(); @@ -48,7 +50,9 @@ TEST_CASE("tls_server hello world server", "[tls_server]") co_return; }; - auto make_server_task = [&]() -> coro::task + auto make_server_task = [](std::shared_ptr scheduler, + const std::string& client_msg, + const std::string& server_msg) -> coro::task { co_await scheduler->schedule(); @@ -84,7 +88,8 @@ TEST_CASE("tls_server hello world server", "[tls_server]") co_return; }; - coro::sync_wait(coro::when_all(make_server_task(), make_client_task())); + coro::sync_wait(coro::when_all( + make_server_task(scheduler, client_msg, server_msg), make_client_task(scheduler, client_msg, server_msg))); } #endif // LIBCORO_FEATURE_TLS diff --git a/test/net/test_udp_peers.cpp b/test/net/test_udp_peers.cpp index bfa6b7dc..a79ddb5b 100644 --- a/test/net/test_udp_peers.cpp +++ b/test/net/test_udp_peers.cpp @@ -11,7 +11,7 @@ TEST_CASE("udp one way") auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_send_task = [&]() -> coro::task + auto make_send_task = [](std::shared_ptr scheduler, const std::string& msg) -> coro::task { co_await scheduler->schedule(); coro::net::udp::peer peer{scheduler}; @@ -24,7 +24,7 @@ TEST_CASE("udp one way") co_return; }; - auto make_recv_task = [&]() -> coro::task + auto make_recv_task = [](std::shared_ptr scheduler, const std::string& msg) -> coro::task { co_await scheduler->schedule(); coro::net::udp::peer::info self_info{.address = coro::net::ip_address::from_string("0.0.0.0")}; @@ -46,7 +46,7 @@ TEST_CASE("udp one way") co_return; }; - coro::sync_wait(coro::when_all(make_recv_task(), make_send_task())); + coro::sync_wait(coro::when_all(make_recv_task(scheduler, msg), make_send_task(scheduler, msg))); } TEST_CASE("udp echo peers") @@ -57,12 +57,12 @@ TEST_CASE("udp echo peers") auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_peer_task = [&scheduler]( - uint16_t my_port, - uint16_t peer_port, - bool send_first, - const std::string my_msg, - const std::string peer_msg) -> coro::task + auto make_peer_task = [](std::shared_ptr scheduler, + uint16_t my_port, + uint16_t peer_port, + bool send_first, + const std::string my_msg, + const std::string peer_msg) -> coro::task { co_await scheduler->schedule(); coro::net::udp::peer::info my_info{.address = coro::net::ip_address::from_string("0.0.0.0"), .port = my_port}; @@ -118,8 +118,8 @@ TEST_CASE("udp echo peers") }; coro::sync_wait(coro::when_all( - make_peer_task(8081, 8080, false, peer2_msg, peer1_msg), - make_peer_task(8080, 8081, true, peer1_msg, peer2_msg))); + make_peer_task(scheduler, 8081, 8080, false, peer2_msg, peer1_msg), + make_peer_task(scheduler, 8080, 8081, true, peer1_msg, peer2_msg))); } #endif // LIBCORO_FEATURE_NETWORKING diff --git a/test/test_event.cpp b/test/test_event.cpp index a0cfe489..c33af39f 100644 --- a/test/test_event.cpp +++ b/test/test_event.cpp @@ -9,13 +9,13 @@ TEST_CASE("event single awaiter", "[event]") { coro::event e{}; - auto func = [&]() -> coro::task + auto func = [](coro::event& e) -> coro::task { co_await e; co_return 42; }; - auto task = func(); + auto task = func(e); task.resume(); REQUIRE_FALSE(task.is_ready()); @@ -105,7 +105,8 @@ TEST_CASE("event fifo", "[event]") std::atomic counter{0}; - auto make_waiter = [&](uint64_t value) -> coro::task + auto make_waiter = + [](coro::thread_pool& tp, coro::event& e, std::atomic& counter, uint64_t value) -> coro::task { co_await tp.schedule(); co_await e; @@ -116,7 +117,7 @@ TEST_CASE("event fifo", "[event]") co_return; }; - auto make_setter = [&]() -> coro::task + auto make_setter = [](coro::thread_pool& tp, coro::event& e, std::atomic& counter) -> coro::task { co_await tp.schedule(); REQUIRE(counter == 0); @@ -124,8 +125,13 @@ TEST_CASE("event fifo", "[event]") co_return; }; - coro::sync_wait( - coro::when_all(make_waiter(1), make_waiter(2), make_waiter(3), make_waiter(4), make_waiter(5), make_setter())); + coro::sync_wait(coro::when_all( + make_waiter(tp, e, counter, 1), + make_waiter(tp, e, counter, 2), + make_waiter(tp, e, counter, 3), + make_waiter(tp, e, counter, 4), + make_waiter(tp, e, counter, 5), + make_setter(tp, e, counter))); REQUIRE(counter == 5); } @@ -139,7 +145,7 @@ TEST_CASE("event fifo none", "[event]") std::atomic counter{0}; - auto make_setter = [&]() -> coro::task + auto make_setter = [](coro::thread_pool& tp, coro::event& e, std::atomic& counter) -> coro::task { co_await tp.schedule(); REQUIRE(counter == 0); @@ -147,7 +153,7 @@ TEST_CASE("event fifo none", "[event]") co_return; }; - coro::sync_wait(coro::when_all(make_setter())); + coro::sync_wait(coro::when_all(make_setter(tp, e, counter))); REQUIRE(counter == 0); } @@ -161,7 +167,8 @@ TEST_CASE("event fifo single", "[event]") std::atomic counter{0}; - auto make_waiter = [&](uint64_t value) -> coro::task + auto make_waiter = + [](coro::thread_pool& tp, coro::event& e, std::atomic& counter, uint64_t value) -> coro::task { co_await tp.schedule(); co_await e; @@ -172,7 +179,7 @@ TEST_CASE("event fifo single", "[event]") co_return; }; - auto make_setter = [&]() -> coro::task + auto make_setter = [](coro::thread_pool& tp, coro::event& e, std::atomic& counter) -> coro::task { co_await tp.schedule(); REQUIRE(counter == 0); @@ -180,7 +187,7 @@ TEST_CASE("event fifo single", "[event]") co_return; }; - coro::sync_wait(coro::when_all(make_waiter(1), make_setter())); + coro::sync_wait(coro::when_all(make_waiter(tp, e, counter, 1), make_setter(tp, e, counter))); REQUIRE(counter == 1); } @@ -194,7 +201,8 @@ TEST_CASE("event fifo executor", "[event]") std::atomic counter{0}; - auto make_waiter = [&](uint64_t value) -> coro::task + auto make_waiter = + [](coro::thread_pool& tp, coro::event& e, std::atomic& counter, uint64_t value) -> coro::task { co_await tp.schedule(); co_await e; @@ -205,7 +213,7 @@ TEST_CASE("event fifo executor", "[event]") co_return; }; - auto make_setter = [&]() -> coro::task + auto make_setter = [](coro::thread_pool& tp, coro::event& e, std::atomic& counter) -> coro::task { co_await tp.schedule(); REQUIRE(counter == 0); @@ -213,8 +221,13 @@ TEST_CASE("event fifo executor", "[event]") co_return; }; - coro::sync_wait( - coro::when_all(make_waiter(1), make_waiter(2), make_waiter(3), make_waiter(4), make_waiter(5), make_setter())); + coro::sync_wait(coro::when_all( + make_waiter(tp, e, counter, 1), + make_waiter(tp, e, counter, 2), + make_waiter(tp, e, counter, 3), + make_waiter(tp, e, counter, 4), + make_waiter(tp, e, counter, 5), + make_setter(tp, e, counter))); REQUIRE(counter == 5); } @@ -228,7 +241,7 @@ TEST_CASE("event fifo none executor", "[event]") std::atomic counter{0}; - auto make_setter = [&]() -> coro::task + auto make_setter = [](coro::thread_pool& tp, coro::event& e, std::atomic& counter) -> coro::task { co_await tp.schedule(); REQUIRE(counter == 0); @@ -236,7 +249,7 @@ TEST_CASE("event fifo none executor", "[event]") co_return; }; - coro::sync_wait(coro::when_all(make_setter())); + coro::sync_wait(coro::when_all(make_setter(tp, e, counter))); REQUIRE(counter == 0); } @@ -250,7 +263,8 @@ TEST_CASE("event fifo single executor", "[event]") std::atomic counter{0}; - auto make_waiter = [&](uint64_t value) -> coro::task + auto make_waiter = + [](coro::thread_pool& tp, coro::event& e, std::atomic& counter, uint64_t value) -> coro::task { co_await tp.schedule(); co_await e; @@ -261,7 +275,7 @@ TEST_CASE("event fifo single executor", "[event]") co_return; }; - auto make_setter = [&]() -> coro::task + auto make_setter = [](coro::thread_pool& tp, coro::event& e, std::atomic& counter) -> coro::task { co_await tp.schedule(); REQUIRE(counter == 0); @@ -269,7 +283,7 @@ TEST_CASE("event fifo single executor", "[event]") co_return; }; - coro::sync_wait(coro::when_all(make_waiter(1), make_setter())); + coro::sync_wait(coro::when_all(make_waiter(tp, e, counter, 1), make_setter(tp, e, counter))); REQUIRE(counter == 1); } diff --git a/test/test_generator.cpp b/test/test_generator.cpp index 49163a3f..878231e0 100644 --- a/test/test_generator.cpp +++ b/test/test_generator.cpp @@ -4,10 +4,10 @@ TEST_CASE("generator single yield", "[generator]") { - std::string msg{"Hello World Generator!"}; - auto func = [&]() -> coro::generator { co_yield msg; }; + const std::string msg{"Hello World Generator!"}; + auto func = [](const std::string& msg) -> coro::generator { co_yield std::string{msg}; }; - for (const auto& v : func()) + for (const auto& v : func(msg)) { REQUIRE(v == msg); } @@ -43,15 +43,16 @@ TEST_CASE("generator infinite incrementing integer yield", "[generator]") TEST_CASE("generator satisfies view concept for compatibility with std::views::take") { auto counter = size_t{0}; - auto natural = [n = counter]() mutable -> coro::generator { + auto natural = [](size_t n) mutable -> coro::generator + { while (true) co_yield ++n; }; - auto nat = natural(); + auto nat = natural(counter); static_assert(std::ranges::view, "does not satisfy view concept"); SECTION("Count the items") { - for (auto&& n : natural() | std::views::take(5)) + for (auto&& n : natural(counter) | std::views::take(5)) { ++counter; REQUIRE(n == counter); diff --git a/test/test_io_scheduler.cpp b/test/test_io_scheduler.cpp index ff519bdf..e1c015b1 100644 --- a/test/test_io_scheduler.cpp +++ b/test/test_io_scheduler.cpp @@ -21,13 +21,13 @@ TEST_CASE("io_scheduler schedule single task", "[io_scheduler]") auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_task = [&]() -> coro::task + auto make_task = [](std::shared_ptr s) -> coro::task { co_await s->schedule(); co_return 42; }; - auto value = coro::sync_wait(make_task()); + auto value = coro::sync_wait(make_task(s)); REQUIRE(value == 42); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; s->shutdown(); @@ -43,7 +43,7 @@ TEST_CASE("io_scheduler submit mutiple tasks", "[io_scheduler]") tasks.reserve(n); auto s = coro::io_scheduler::make_shared(); - auto make_task = [&]() -> coro::task + auto make_task = [](std::shared_ptr s, std::atomic& counter) -> coro::task { co_await s->schedule(); counter++; @@ -51,7 +51,7 @@ TEST_CASE("io_scheduler submit mutiple tasks", "[io_scheduler]") }; for (std::size_t i = 0; i < n; ++i) { - tasks.emplace_back(make_task()); + tasks.emplace_back(make_task(s, counter)); } coro::sync_wait(coro::when_all(std::move(tasks))); @@ -74,7 +74,11 @@ TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]") coro::event e2; coro::event e3; - auto make_wait_task = [&]() -> coro::task + auto make_wait_task = [](std::shared_ptr s, + std::atomic& counter, + coro::event& e1, + coro::event& e2, + coro::event& e3) -> coro::task { co_await s->schedule(); co_await e1; @@ -86,13 +90,14 @@ TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]") co_return; }; - auto make_set_task = [&](coro::event& e) -> coro::task + auto make_set_task = [](std::shared_ptr s, coro::event& e) -> coro::task { co_await s->schedule(); e.set(); }; - coro::sync_wait(coro::when_all(make_wait_task(), make_set_task(e1), make_set_task(e2), make_set_task(e3))); + coro::sync_wait(coro::when_all( + make_wait_task(s, counter, e1, e2, e3), make_set_task(s, e1), make_set_task(s, e2), make_set_task(s, e3))); REQUIRE(counter == 3); @@ -108,7 +113,7 @@ TEST_CASE("io_scheduler task with read poll", "[io_scheduler]") auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_poll_read_task = [&]() -> coro::task + auto make_poll_read_task = [](std::shared_ptr s, int trigger_fd) -> coro::task { co_await s->schedule(); auto status = co_await s->poll(trigger_fd, coro::poll_op::read); @@ -116,7 +121,7 @@ TEST_CASE("io_scheduler task with read poll", "[io_scheduler]") co_return; }; - auto make_poll_write_task = [&]() -> coro::task + auto make_poll_write_task = [](std::shared_ptr s, int trigger_fd) -> coro::task { co_await s->schedule(); uint64_t value{42}; @@ -125,7 +130,7 @@ TEST_CASE("io_scheduler task with read poll", "[io_scheduler]") co_return; }; - coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task())); + coro::sync_wait(coro::when_all(make_poll_read_task(s, trigger_fd), make_poll_write_task(s, trigger_fd))); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; s->shutdown(); @@ -140,7 +145,7 @@ TEST_CASE("io_scheduler task with read poll with timeout", "[io_scheduler]") auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_poll_read_task = [&]() -> coro::task + auto make_poll_read_task = [](std::shared_ptr s, int trigger_fd) -> coro::task { co_await s->schedule(); // Poll with a timeout but don't timeout. @@ -149,7 +154,7 @@ TEST_CASE("io_scheduler task with read poll with timeout", "[io_scheduler]") co_return; }; - auto make_poll_write_task = [&]() -> coro::task + auto make_poll_write_task = [](std::shared_ptr s, int trigger_fd) -> coro::task { co_await s->schedule(); uint64_t value{42}; @@ -158,7 +163,7 @@ TEST_CASE("io_scheduler task with read poll with timeout", "[io_scheduler]") co_return; }; - coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task())); + coro::sync_wait(coro::when_all(make_poll_read_task(s, trigger_fd), make_poll_write_task(s, trigger_fd))); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; s->shutdown(); @@ -173,7 +178,7 @@ TEST_CASE("io_scheduler task with read poll timeout", "[io_scheduler]") auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_task = [&]() -> coro::task + auto make_task = [](std::shared_ptr s, int trigger_fd) -> coro::task { co_await s->schedule(); // Poll with a timeout and timeout. @@ -182,7 +187,7 @@ TEST_CASE("io_scheduler task with read poll timeout", "[io_scheduler]") co_return; }; - coro::sync_wait(make_task()); + coro::sync_wait(make_task(s, trigger_fd)); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; s->shutdown(); @@ -200,7 +205,7 @@ TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]") coro::event e{}; - auto make_s1_task = [&]() -> coro::task + auto make_s1_task = [](std::shared_ptr s1, coro::event& e) -> coro::task { co_await s1->schedule(); auto tid = std::this_thread::get_id(); @@ -212,7 +217,7 @@ TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]") co_return; }; - auto make_s2_task = [&]() -> coro::task + auto make_s2_task = [](std::shared_ptr s2, coro::event& e) -> coro::task { co_await s2->schedule(); // Wait a bit to be sure the wait on 'e' in the other scheduler is done first. @@ -221,7 +226,7 @@ TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]") co_return; }; - coro::sync_wait(coro::when_all(make_s1_task(), make_s2_task())); + coro::sync_wait(coro::when_all(make_s1_task(s1, e), make_s2_task(s2, e))); s1->shutdown(); REQUIRE(s1->empty()); @@ -234,7 +239,7 @@ TEST_CASE("io_scheduler separate thread resume spawned thread", "[io_scheduler]" auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_task = [&]() -> coro::task + auto make_task = [](std::shared_ptr s) -> coro::task { co_await s->schedule(); coro::event e{}; @@ -259,7 +264,7 @@ TEST_CASE("io_scheduler separate thread resume spawned thread", "[io_scheduler]" REQUIRE(tid == std::this_thread::get_id()); }; - coro::sync_wait(make_task()); + coro::sync_wait(make_task(s)); s->shutdown(); REQUIRE(s->empty()); @@ -287,23 +292,29 @@ TEST_CASE("io_scheduler separate thread resume with return", "[io_scheduler]") service_done.set(*s); }}; - auto third_party_service = [&](int multiplier) -> coro::task + auto make_task = [](std::shared_ptr s, + coro::event& start_service, + coro::event& service_done, + std::atomic& output) -> coro::task { - start_service.set(); - co_await service_done; - co_return output* multiplier; - }; + auto third_party_service = [](coro::event& start_service, + coro::event& service_done, + int multiplier, + std::atomic& output) -> coro::task + { + start_service.set(); + co_await service_done; + co_return output* multiplier; + }; - auto make_task = [&]() -> coro::task - { co_await s->schedule(); int multiplier{5}; - uint64_t value = co_await third_party_service(multiplier); + uint64_t value = co_await third_party_service(start_service, service_done, multiplier, output); REQUIRE(value == (expected_value * multiplier)); }; - coro::sync_wait(make_task()); + coro::sync_wait(make_task(s, start_service, service_done, output)); service.join(); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; @@ -318,17 +329,18 @@ TEST_CASE("io_scheduler with basic task", "[io_scheduler]") auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto add_data = [&](uint64_t val) -> coro::task + auto func = [](std::shared_ptr s) -> coro::task { - co_await s->schedule(); - co_return val; - }; + auto add_data = [](std::shared_ptr s, uint64_t val) -> coro::task + { + co_await s->schedule(); + co_return val; + }; - auto func = [&]() -> coro::task - { co_await s->schedule(); - auto output_tasks = co_await coro::when_all(add_data(1), add_data(1), add_data(1), add_data(1), add_data(1)); + auto output_tasks = + co_await coro::when_all(add_data(s, 1), add_data(s, 1), add_data(s, 1), add_data(s, 1), add_data(s, 1)); int counter{0}; std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks); @@ -336,7 +348,7 @@ TEST_CASE("io_scheduler with basic task", "[io_scheduler]") co_return counter; }; - auto counter = coro::sync_wait(func()); + auto counter = coro::sync_wait(func(s)); REQUIRE(counter == expected_value); @@ -352,7 +364,10 @@ TEST_CASE("io_scheduler scheduler_after", "[io_scheduler]") std::atomic counter{0}; std::thread::id tid; - auto func = [&](coro::io_scheduler& s, std::chrono::milliseconds amount) -> coro::task + auto func = [](coro::io_scheduler& s, + std::chrono::milliseconds amount, + std::atomic& counter, + std::thread::id& tid) -> coro::task { co_await s.schedule_after(amount); ++counter; @@ -366,7 +381,7 @@ TEST_CASE("io_scheduler scheduler_after", "[io_scheduler]") .pool = coro::thread_pool::options{ .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}); auto start = std::chrono::steady_clock::now(); - coro::sync_wait(func(*s, 0ms)); + coro::sync_wait(func(*s, 0ms, counter, tid)); auto stop = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(stop - start); @@ -384,7 +399,7 @@ TEST_CASE("io_scheduler scheduler_after", "[io_scheduler]") .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}); auto start = std::chrono::steady_clock::now(); - coro::sync_wait(func(*s, wait_for)); + coro::sync_wait(func(*s, wait_for, counter, tid)); auto stop = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(stop - start); @@ -409,7 +424,10 @@ TEST_CASE("io_scheduler schedule_at", "[io_scheduler]") .pool = coro::thread_pool::options{ .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}); - auto func = [&](std::chrono::steady_clock::time_point time) -> coro::task + auto func = [](std::shared_ptr s, + std::atomic& counter, + std::chrono::steady_clock::time_point time, + std::thread::id& tid) -> coro::task { co_await s->schedule_at(time); ++counter; @@ -419,7 +437,7 @@ TEST_CASE("io_scheduler schedule_at", "[io_scheduler]") { auto start = std::chrono::steady_clock::now(); - coro::sync_wait(func(std::chrono::steady_clock::now() + wait_for)); + coro::sync_wait(func(s, counter, std::chrono::steady_clock::now() + wait_for, tid)); auto stop = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(stop - start); @@ -429,7 +447,7 @@ TEST_CASE("io_scheduler schedule_at", "[io_scheduler]") { auto start = std::chrono::steady_clock::now(); - coro::sync_wait(func(std::chrono::steady_clock::now())); + coro::sync_wait(func(s, counter, std::chrono::steady_clock::now(), tid)); auto stop = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(stop - start); @@ -439,7 +457,7 @@ TEST_CASE("io_scheduler schedule_at", "[io_scheduler]") { auto start = std::chrono::steady_clock::now(); - coro::sync_wait(func(std::chrono::steady_clock::now() - 1s)); + coro::sync_wait(func(s, counter, std::chrono::steady_clock::now() - 1s, tid)); auto stop = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast(stop - start); @@ -455,7 +473,7 @@ TEST_CASE("io_scheduler yield", "[io_scheduler]") .pool = coro::thread_pool::options{ .thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}); - auto func = [&]() -> coro::task + auto func = [](std::shared_ptr s, std::thread::id& tid) -> coro::task { REQUIRE(tid != std::this_thread::get_id()); co_await s->schedule(); @@ -465,7 +483,7 @@ TEST_CASE("io_scheduler yield", "[io_scheduler]") co_return; }; - coro::sync_wait(func()); + coro::sync_wait(func(s, tid)); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; s->shutdown(); @@ -480,7 +498,8 @@ TEST_CASE("io_scheduler yield_for", "[io_scheduler]") const std::chrono::milliseconds wait_for{50}; - auto make_task = [&]() -> coro::task + auto make_task = [](std::shared_ptr s, + std::chrono::milliseconds wait_for) -> coro::task { co_await s->schedule(); auto start = std::chrono::steady_clock::now(); @@ -488,7 +507,7 @@ TEST_CASE("io_scheduler yield_for", "[io_scheduler]") co_return std::chrono::duration_cast(std::chrono::steady_clock::now() - start); }; - auto duration = coro::sync_wait(make_task()); + auto duration = coro::sync_wait(make_task(s, wait_for)); REQUIRE(duration >= wait_for); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; @@ -506,7 +525,8 @@ TEST_CASE("io_scheduler yield_until", "[io_scheduler]") const std::chrono::milliseconds epsilon{3}; const std::chrono::milliseconds wait_for{50}; - auto make_task = [&]() -> coro::task + auto make_task = [](std::shared_ptr s, + std::chrono::milliseconds wait_for) -> coro::task { co_await s->schedule(); auto start = std::chrono::steady_clock::now(); @@ -514,7 +534,7 @@ TEST_CASE("io_scheduler yield_until", "[io_scheduler]") co_return std::chrono::duration_cast(std::chrono::steady_clock::now() - start); }; - auto duration = coro::sync_wait(make_task()); + auto duration = coro::sync_wait(make_task(s, wait_for)); REQUIRE(duration >= (wait_for - epsilon)); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; @@ -529,19 +549,19 @@ TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]") coro::event e{}; auto s = coro::io_scheduler::make_shared(); - auto func = [&]() -> coro::task + auto spawn = [](std::shared_ptr s, coro::event& e) -> coro::task { - co_await e; - co_return 1; - }; + auto func = [](coro::event& e) -> coro::task + { + co_await e; + co_return 1; + }; - auto spawn = [&]() -> coro::task - { co_await s->schedule(); std::vector> tasks; for (size_t i = 0; i < total; ++i) { - tasks.emplace_back(func()); + tasks.emplace_back(func(e)); } auto results = co_await coro::when_all(std::move(tasks)); @@ -554,13 +574,13 @@ TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]") REQUIRE(counter == total); }; - auto release = [&]() -> coro::task + auto release = [](std::shared_ptr s, coro::event& e) -> coro::task { co_await s->schedule_after(10ms); e.set(*s); }; - coro::sync_wait(coro::when_all(spawn(), release())); + coro::sync_wait(coro::when_all(spawn(s, e), release(s, e))); std::cerr << "io_scheduler.size() before shutdown = " << s->size() << "\n"; s->shutdown(); @@ -578,7 +598,10 @@ TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_ std::vector> tasks; tasks.reserve(total); - auto func = [&](auto f) -> coro::task + auto func = [](std::shared_ptr& s, + uint64_t& counter, + auto f, + std::vector>& tasks) -> coro::task { co_await s->schedule(); ++counter; @@ -591,12 +614,12 @@ TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_ // co_await f(f) _will_ stack overflow since each coroutine links to its parent, by storing // each new invocation into the vector they are not linked, but we can make sure the scheduler // doesn't choke on this many tasks being scheduled. - tasks.emplace_back(f(f)); + tasks.emplace_back(f(s, counter, f, tasks)); tasks.back().resume(); co_return; }; - coro::sync_wait(func(func)); + coro::sync_wait(func(s, counter, func, tasks)); while (tasks.size() < total - 1) { @@ -622,7 +645,8 @@ TEST_CASE("io_scheduler manual process events thread pool", "[io_scheduler]") std::atomic polling{false}; - auto make_poll_read_task = [&]() -> coro::task + auto make_poll_read_task = + [](std::shared_ptr s, std::atomic& polling, int trigger_fd) -> coro::task { std::cerr << "poll task start s.size() == " << s->size() << "\n"; co_await s->schedule(); @@ -634,7 +658,7 @@ TEST_CASE("io_scheduler manual process events thread pool", "[io_scheduler]") co_return; }; - auto make_poll_write_task = [&]() -> coro::task + auto make_poll_write_task = [](std::shared_ptr s, int trigger_fd) -> coro::task { std::cerr << "write task start s.size() == " << s->size() << "\n"; co_await s->schedule(); @@ -646,8 +670,8 @@ TEST_CASE("io_scheduler manual process events thread pool", "[io_scheduler]") co_return; }; - auto poll_task = make_poll_read_task(); - auto write_task = make_poll_write_task(); + auto poll_task = make_poll_read_task(s, polling, trigger_fd); + auto write_task = make_poll_write_task(s, trigger_fd); poll_task.resume(); // get to co_await s.poll(); while (!polling) @@ -674,7 +698,7 @@ TEST_CASE("io_scheduler manual process events inline", "[io_scheduler]") .thread_strategy = coro::io_scheduler::thread_strategy_t::manual, .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); - auto make_poll_read_task = [&]() -> coro::task + auto make_poll_read_task = [](std::shared_ptr s, int trigger_fd) -> coro::task { std::cerr << "poll task start s.size() == " << s->size() << "\n"; co_await s->schedule(); @@ -685,7 +709,7 @@ TEST_CASE("io_scheduler manual process events inline", "[io_scheduler]") co_return; }; - auto make_poll_write_task = [&]() -> coro::task + auto make_poll_write_task = [](std::shared_ptr s, int trigger_fd) -> coro::task { std::cerr << "write task start s.size() == " << s->size() << "\n"; co_await s->schedule(); @@ -697,8 +721,8 @@ TEST_CASE("io_scheduler manual process events inline", "[io_scheduler]") co_return; }; - auto poll_task = make_poll_read_task(); - auto write_task = make_poll_write_task(); + auto poll_task = make_poll_read_task(s, trigger_fd); + auto write_task = make_poll_write_task(s, trigger_fd); // Start the tasks by scheduling them into the io scheduler. poll_task.resume(); @@ -727,14 +751,14 @@ TEST_CASE("io_scheduler task throws", "[io_scheduler]") auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto func = [&]() -> coro::task + auto func = [](std::shared_ptr s) -> coro::task { co_await s->schedule(); throw std::runtime_error{"I always throw."}; co_return 42; }; - REQUIRE_THROWS(coro::sync_wait(func())); + REQUIRE_THROWS(coro::sync_wait(func(s))); } TEST_CASE("io_scheduler task throws after resume", "[io_scheduler]") @@ -742,7 +766,7 @@ TEST_CASE("io_scheduler task throws after resume", "[io_scheduler]") auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_thrower = [&]() -> coro::task + auto make_thrower = [](std::shared_ptr s) -> coro::task { co_await s->schedule(); std::cerr << "Throwing task is doing some work...\n"; @@ -751,18 +775,19 @@ TEST_CASE("io_scheduler task throws after resume", "[io_scheduler]") co_return true; }; - REQUIRE_THROWS(coro::sync_wait(make_thrower())); + REQUIRE_THROWS(coro::sync_wait(make_thrower(s))); } TEST_CASE("issue-287", "[io_scheduler]") { const int ITERATIONS = 200000; - std::atomic g_count = 0; - auto scheduler = coro::io_scheduler::make_shared( - coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); + std::atomic g_count = 0; + auto scheduler = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto task = [](std::atomic& count) -> coro::task { + auto task = [](std::atomic& count) -> coro::task + { count++; co_return; }; diff --git a/test/test_latch.cpp b/test/test_latch.cpp index f3949d09..071b1d46 100644 --- a/test/test_latch.cpp +++ b/test/test_latch.cpp @@ -9,13 +9,13 @@ TEST_CASE("latch count=0", "[latch]") { coro::latch l{0}; - auto make_task = [&]() -> coro::task + auto make_task = [](coro::latch& l) -> coro::task { co_await l; co_return 42; }; - auto task = make_task(); + auto task = make_task(l); task.resume(); REQUIRE(task.is_ready()); // The latch never waits due to zero count. @@ -26,14 +26,14 @@ TEST_CASE("latch count=1", "[latch]") { coro::latch l{1}; - auto make_task = [&]() -> coro::task + auto make_task = [](coro::latch& l) -> coro::task { auto workers = l.remaining(); co_await l; co_return workers; }; - auto task = make_task(); + auto task = make_task(l); task.resume(); REQUIRE_FALSE(task.is_ready()); @@ -47,14 +47,14 @@ TEST_CASE("latch count=1 count_down=5", "[latch]") { coro::latch l{1}; - auto make_task = [&]() -> coro::task + auto make_task = [](coro::latch& l) -> coro::task { auto workers = l.remaining(); co_await l; co_return workers; }; - auto task = make_task(); + auto task = make_task(l); task.resume(); REQUIRE_FALSE(task.is_ready()); @@ -68,14 +68,14 @@ TEST_CASE("latch count=5 count_down=1 x5", "[latch]") { coro::latch l{5}; - auto make_task = [&]() -> coro::task + auto make_task = [](coro::latch& l) -> coro::task { auto workers = l.remaining(); co_await l; co_return workers; }; - auto task = make_task(); + auto task = make_task(l); task.resume(); REQUIRE_FALSE(task.is_ready()); @@ -97,14 +97,14 @@ TEST_CASE("latch count=5 count_down=5", "[latch]") { coro::latch l{5}; - auto make_task = [&]() -> coro::task + auto make_task = [](coro::latch& l) -> coro::task { auto workers = l.remaining(); co_await l; co_return workers; }; - auto task = make_task(); + auto task = make_task(l); task.resume(); REQUIRE_FALSE(task.is_ready()); diff --git a/test/test_mutex.cpp b/test/test_mutex.cpp index e8be36e7..03835eee 100644 --- a/test/test_mutex.cpp +++ b/test/test_mutex.cpp @@ -12,7 +12,7 @@ TEST_CASE("mutex single waiter not locked", "[mutex]") coro::mutex m; - auto make_emplace_task = [&](coro::mutex& m) -> coro::task + auto make_emplace_task = [](coro::mutex& m, std::vector& output) -> coro::task { std::cerr << "Acquiring lock\n"; { @@ -31,7 +31,7 @@ TEST_CASE("mutex single waiter not locked", "[mutex]") co_return; }; - coro::sync_wait(make_emplace_task(m)); + coro::sync_wait(make_emplace_task(m, output)); REQUIRE(m.try_lock()); m.unlock(); @@ -50,7 +50,8 @@ TEST_CASE("mutex many waiters until event", "[mutex]") coro::mutex m; // acquires and holds the lock until the event is triggered coro::event e; // triggers the blocking thread to release the lock - auto make_task = [&](uint64_t id) -> coro::task + auto make_task = + [](coro::thread_pool& tp, coro::mutex& m, std::atomic& value, uint64_t id) -> coro::task { co_await tp.schedule(); std::cerr << "id = " << id << " waiting to acquire the lock\n"; @@ -65,7 +66,7 @@ TEST_CASE("mutex many waiters until event", "[mutex]") co_return; }; - auto make_block_task = [&]() -> coro::task + auto make_block_task = [](coro::thread_pool& tp, coro::mutex& m, coro::event& e) -> coro::task { co_await tp.schedule(); std::cerr << "block task acquiring lock\n"; @@ -76,7 +77,7 @@ TEST_CASE("mutex many waiters until event", "[mutex]") co_return; }; - auto make_set_task = [&]() -> coro::task + auto make_set_task = [](coro::thread_pool& tp, coro::event& e) -> coro::task { co_await tp.schedule(); std::cerr << "set task setting event\n"; @@ -85,15 +86,15 @@ TEST_CASE("mutex many waiters until event", "[mutex]") }; // Grab mutex so all threads block. - tasks.emplace_back(make_block_task()); + tasks.emplace_back(make_block_task(tp, m, e)); // Create N tasks that attempt to lock the mutex. for (uint64_t i = 1; i <= 4; ++i) { - tasks.emplace_back(make_task(i)); + tasks.emplace_back(make_task(tp, m, value, i)); } - tasks.emplace_back(make_set_task()); + tasks.emplace_back(make_set_task(tp, e)); coro::sync_wait(coro::when_all(std::move(tasks))); @@ -104,7 +105,7 @@ TEST_CASE("mutex scoped_lock unlock prior to scope exit", "[mutex]") { coro::mutex m; - auto make_task = [&]() -> coro::task + auto make_task = [](coro::mutex& m) -> coro::task { { auto lk = co_await m.lock(); @@ -115,5 +116,5 @@ TEST_CASE("mutex scoped_lock unlock prior to scope exit", "[mutex]") co_return; }; - coro::sync_wait(make_task()); -} \ No newline at end of file + coro::sync_wait(make_task(m)); +} diff --git a/test/test_ring_buffer.cpp b/test/test_ring_buffer.cpp index 39fe6ebb..374ffd5c 100644 --- a/test/test_ring_buffer.cpp +++ b/test/test_ring_buffer.cpp @@ -12,7 +12,7 @@ TEST_CASE("ring_buffer single element", "[ring_buffer]") std::vector output{}; - auto make_producer_task = [&]() -> coro::task + auto make_producer_task = [](coro::ring_buffer& rb, size_t iterations) -> coro::task { for (size_t i = 1; i <= iterations; ++i) { @@ -22,7 +22,8 @@ TEST_CASE("ring_buffer single element", "[ring_buffer]") co_return; }; - auto make_consumer_task = [&]() -> coro::task + auto make_consumer_task = + [](coro::ring_buffer& rb, size_t iterations, std::vector& output) -> coro::task { for (size_t i = 1; i <= iterations; ++i) { @@ -35,7 +36,7 @@ TEST_CASE("ring_buffer single element", "[ring_buffer]") co_return; }; - coro::sync_wait(coro::when_all(make_producer_task(), make_consumer_task())); + coro::sync_wait(coro::when_all(make_producer_task(rb, iterations), make_consumer_task(rb, iterations, output))); for (size_t i = 1; i <= iterations; ++i) { @@ -54,7 +55,7 @@ TEST_CASE("ring_buffer many elements many producers many consumers", "[ring_buff coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}}; coro::ring_buffer rb{}; - auto make_producer_task = [&]() -> coro::task + auto make_producer_task = [](coro::thread_pool& tp, coro::ring_buffer& rb) -> coro::task { co_await tp.schedule(); auto to_produce = iterations / producers; @@ -75,7 +76,7 @@ TEST_CASE("ring_buffer many elements many producers many consumers", "[ring_buff co_return; }; - auto make_consumer_task = [&]() -> coro::task + auto make_consumer_task = [](coro::thread_pool& tp, coro::ring_buffer& rb) -> coro::task { co_await tp.schedule(); @@ -101,11 +102,11 @@ TEST_CASE("ring_buffer many elements many producers many consumers", "[ring_buff for (size_t i = 0; i < consumers; ++i) { - tasks.emplace_back(make_consumer_task()); + tasks.emplace_back(make_consumer_task(tp, rb)); } for (size_t i = 0; i < producers; ++i) { - tasks.emplace_back(make_producer_task()); + tasks.emplace_back(make_producer_task(tp, rb)); } coro::sync_wait(coro::when_all(std::move(tasks))); @@ -125,7 +126,7 @@ TEST_CASE("ring_buffer producer consumer separate threads", "[ring_buffer]") coro::thread_pool producer_tp{coro::thread_pool::options{.thread_count = 1}}; coro::thread_pool consumer_tp{coro::thread_pool::options{.thread_count = 1}}; - auto make_producer_task = [&]() -> coro::task + auto make_producer_task = [](coro::thread_pool& producer_tp, coro::ring_buffer& rb) -> coro::task { for (size_t i = 0; i < iterations; ++i) { @@ -145,7 +146,7 @@ TEST_CASE("ring_buffer producer consumer separate threads", "[ring_buffer]") co_return; }; - auto make_consumer_task = [&]() -> coro::task + auto make_consumer_task = [](coro::thread_pool& consumer_tp, coro::ring_buffer& rb) -> coro::task { while (true) { @@ -166,8 +167,8 @@ TEST_CASE("ring_buffer producer consumer separate threads", "[ring_buffer]") }; std::vector> tasks{}; - tasks.emplace_back(make_producer_task()); - tasks.emplace_back(make_consumer_task()); + tasks.emplace_back(make_producer_task(producer_tp, rb)); + tasks.emplace_back(make_consumer_task(consumer_tp, rb)); coro::sync_wait(coro::when_all(std::move(tasks))); @@ -255,7 +256,7 @@ TEST_CASE("ring_buffer issue-242 default constructed complex objects on consume" coro::ring_buffer buffer; - const auto produce = [&buffer]() -> coro::task + const auto produce = [](coro::ring_buffer& buffer) -> coro::task { std::cerr << "enter produce coroutine\n"; example data{}; @@ -270,7 +271,7 @@ TEST_CASE("ring_buffer issue-242 default constructed complex objects on consume" co_return; }; - coro::sync_wait(produce()); + coro::sync_wait(produce(buffer)); std::cerr << "enter sync_wait\n"; auto result = coro::sync_wait(buffer.consume()); std::cerr << "exit sync_wait\n"; @@ -315,7 +316,7 @@ TEST_CASE("ring_buffer issue-242 default constructed complex objects on consume coro::ring_buffer buffer; - const auto produce = [&buffer]() -> coro::task + const auto produce = [](coro::ring_buffer& buffer) -> coro::task { example data{}; data.msg = {message{.id = 1, .text = "Hello World!"}}; @@ -327,7 +328,7 @@ TEST_CASE("ring_buffer issue-242 default constructed complex objects on consume co_return; }; - const auto consume = [&buffer]() -> coro::task + const auto consume = [](coro::ring_buffer& buffer) -> coro::task { auto result = co_await buffer.consume(); REQUIRE(result.has_value()); @@ -336,8 +337,8 @@ TEST_CASE("ring_buffer issue-242 default constructed complex objects on consume co_return std::move(data); }; - coro::sync_wait(produce()); - auto data = coro::sync_wait(consume()); + coro::sync_wait(produce(buffer)); + auto data = coro::sync_wait(consume(buffer)); REQUIRE(data.msg.has_value()); REQUIRE(data.msg.value().id == 1); @@ -351,13 +352,13 @@ TEST_CASE("ring_buffer issue-242 basic type", "[ring_buffer]") { coro::ring_buffer buffer; - const auto foo = [&buffer]() -> coro::task + const auto foo = [](coro::ring_buffer& buffer) -> coro::task { co_await buffer.produce(1); co_return; }; - coro::sync_wait(foo()); + coro::sync_wait(foo(buffer)); auto result = coro::sync_wait(buffer.consume()); REQUIRE(result); diff --git a/test/test_semaphore.cpp b/test/test_semaphore.cpp index 10945b60..c2a6d8ca 100644 --- a/test/test_semaphore.cpp +++ b/test/test_semaphore.cpp @@ -3,8 +3,8 @@ #include #include -#include #include +#include #include TEST_CASE("semaphore binary", "[semaphore]") @@ -13,7 +13,7 @@ TEST_CASE("semaphore binary", "[semaphore]") coro::semaphore s{1}; - auto make_emplace_task = [&](coro::semaphore& s) -> coro::task + auto make_emplace_task = [](coro::semaphore& s, std::vector& output) -> coro::task { std::cerr << "Acquiring semaphore\n"; co_await s.acquire(); @@ -32,7 +32,7 @@ TEST_CASE("semaphore binary", "[semaphore]") co_return; }; - coro::sync_wait(make_emplace_task(s)); + coro::sync_wait(make_emplace_task(s, output)); REQUIRE(s.value() == 1); REQUIRE(s.try_acquire()); @@ -52,7 +52,7 @@ TEST_CASE("semaphore binary many waiters until event", "[semaphore]") coro::semaphore s{1}; // acquires and holds the semaphore until the event is triggered coro::event e; // triggers the blocking thread to release the semaphore - auto make_task = [&](uint64_t id) -> coro::task + auto make_task = [](coro::semaphore& s, std::atomic& value, uint64_t id) -> coro::task { std::cerr << "id = " << id << " waiting to acquire the semaphore\n"; co_await s.acquire(); @@ -67,7 +67,7 @@ TEST_CASE("semaphore binary many waiters until event", "[semaphore]") co_return; }; - auto make_block_task = [&]() -> coro::task + auto make_block_task = [](coro::semaphore& s, coro::event& e) -> coro::task { std::cerr << "block task acquiring lock\n"; co_await s.acquire(); @@ -79,22 +79,22 @@ TEST_CASE("semaphore binary many waiters until event", "[semaphore]") co_return; }; - auto make_set_task = [&]() -> coro::task + auto make_set_task = [](coro::event& e) -> coro::task { std::cerr << "set task setting event\n"; e.set(); co_return; }; - tasks.emplace_back(make_block_task()); + tasks.emplace_back(make_block_task(s, e)); // Create N tasks that attempt to acquire the semaphore. for (uint64_t i = 1; i <= 4; ++i) { - tasks.emplace_back(make_task(i)); + tasks.emplace_back(make_task(s, value, i)); } - tasks.emplace_back(make_set_task()); + tasks.emplace_back(make_set_task(e)); coro::sync_wait(coro::when_all(std::move(tasks))); @@ -113,7 +113,8 @@ TEST_CASE("semaphore ringbuffer", "[semaphore]") coro::semaphore s{2, 2}; - auto make_consumer_task = [&](uint64_t id) -> coro::task + auto make_consumer_task = + [](coro::thread_pool& tp, coro::semaphore& s, std::atomic& value, uint64_t id) -> coro::task { co_await tp.schedule(); @@ -138,7 +139,7 @@ TEST_CASE("semaphore ringbuffer", "[semaphore]") co_return; }; - auto make_producer_task = [&]() -> coro::task + auto make_producer_task = [](coro::thread_pool& tp, coro::semaphore& s) -> coro::task { co_await tp.schedule(); @@ -158,8 +159,8 @@ TEST_CASE("semaphore ringbuffer", "[semaphore]") co_return; }; - tasks.emplace_back(make_producer_task()); - tasks.emplace_back(make_consumer_task(1)); + tasks.emplace_back(make_producer_task(tp, s)); + tasks.emplace_back(make_consumer_task(tp, s, value, 1)); coro::sync_wait(coro::when_all(std::move(tasks))); @@ -178,7 +179,8 @@ TEST_CASE("semaphore ringbuffer many producers and consumers", "[semaphore]") coro::thread_pool tp{}; // let er rip - auto make_consumer_task = [&](uint64_t id) -> coro::task + auto make_consumer_task = + [](coro::thread_pool& tp, coro::semaphore& s, std::atomic& value, uint64_t id) -> coro::task { co_await tp.schedule(); @@ -199,7 +201,8 @@ TEST_CASE("semaphore ringbuffer many producers and consumers", "[semaphore]") co_return; }; - auto make_producer_task = [&](uint64_t id) -> coro::task + auto make_producer_task = + [](coro::thread_pool& tp, coro::semaphore& s, std::atomic& value, uint64_t id) -> coro::task { co_await tp.schedule(); @@ -223,11 +226,11 @@ TEST_CASE("semaphore ringbuffer many producers and consumers", "[semaphore]") std::vector> tasks{}; for (size_t i = 0; i < consumers; ++i) { - tasks.emplace_back(make_consumer_task(i)); + tasks.emplace_back(make_consumer_task(tp, s, value, i)); } for (size_t i = 0; i < producers; ++i) { - tasks.emplace_back(make_producer_task(i)); + tasks.emplace_back(make_producer_task(tp, s, value, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); diff --git a/test/test_shared_mutex.cpp b/test/test_shared_mutex.cpp index 8f9b64be..f9d87e4f 100644 --- a/test/test_shared_mutex.cpp +++ b/test/test_shared_mutex.cpp @@ -12,7 +12,8 @@ TEST_CASE("mutex single waiter not locked exclusive", "[shared_mutex]") coro::shared_mutex m{tp}; - auto make_emplace_task = [&](coro::shared_mutex& m) -> coro::task + auto make_emplace_task = [](coro::shared_mutex& m, + std::vector& output) -> coro::task { std::cerr << "Acquiring lock exclusive\n"; { @@ -31,7 +32,7 @@ TEST_CASE("mutex single waiter not locked exclusive", "[shared_mutex]") co_return; }; - coro::sync_wait(make_emplace_task(m)); + coro::sync_wait(make_emplace_task(m, output)); REQUIRE(m.try_lock()); m.unlock(); @@ -47,7 +48,8 @@ TEST_CASE("mutex single waiter not locked shared", "[shared_mutex]") coro::shared_mutex m{tp}; - auto make_emplace_task = [&](coro::shared_mutex& m) -> coro::task + auto make_emplace_task = [](coro::shared_mutex& m, + std::vector& values) -> coro::task { std::cerr << "Acquiring lock shared\n"; { @@ -71,7 +73,7 @@ TEST_CASE("mutex single waiter not locked shared", "[shared_mutex]") co_return; }; - coro::sync_wait(make_emplace_task(m)); + coro::sync_wait(make_emplace_task(m, values)); REQUIRE(m.try_lock_shared()); m.unlock_shared(); @@ -83,34 +85,25 @@ TEST_CASE("mutex single waiter not locked shared", "[shared_mutex]") #ifdef LIBCORO_FEATURE_NETWORKING TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex]") { - auto tp = coro::io_scheduler::make_shared( + auto s = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 8}}); - coro::shared_mutex m{tp}; + coro::shared_mutex m{s}; std::atomic read_value{false}; - auto make_shared_task = [&]() -> coro::task - { - co_await tp->schedule(); - std::cerr << "make_shared_task shared lock acquiring\n"; - auto scoped_lock = co_await m.lock_shared(); - std::cerr << "make_shared_task shared lock acquired\n"; - bool value = read_value.load(std::memory_order::acquire); - std::cerr << "make_shared_task shared lock releasing on thread_id = " << std::this_thread::get_id() << "\n"; - co_return value; - }; - - auto make_exclusive_task = [&]() -> coro::task + auto make_exclusive_task = [](std::shared_ptr& s, + coro::shared_mutex& m, + std::atomic& read_value) -> coro::task { // Let some readers get through. - co_await tp->yield_for(std::chrono::milliseconds{50}); + co_await s->yield_for(std::chrono::milliseconds{50}); { std::cerr << "make_shared_task exclusive lock acquiring\n"; auto scoped_lock = co_await m.lock(); std::cerr << "make_shared_task exclusive lock acquired\n"; // Stack readers on the mutex - co_await tp->yield_for(std::chrono::milliseconds{50}); + co_await s->yield_for(std::chrono::milliseconds{50}); read_value.exchange(true, std::memory_order::release); std::cerr << "make_shared_task exclusive lock releasing\n"; } @@ -118,19 +111,34 @@ TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex] co_return; }; - auto make_shared_tasks_task = [&]() -> coro::task + auto make_shared_tasks_task = [](std::shared_ptr& s, + coro::shared_mutex& m, + std::atomic& read_value) -> coro::task { - co_await tp->schedule(); + auto make_shared_task = [](std::shared_ptr& s, + coro::shared_mutex& m, + std::atomic& read_value) -> coro::task + { + co_await s->schedule(); + std::cerr << "make_shared_task shared lock acquiring\n"; + auto scoped_lock = co_await m.lock_shared(); + std::cerr << "make_shared_task shared lock acquired\n"; + bool value = read_value.load(std::memory_order::acquire); + std::cerr << "make_shared_task shared lock releasing on thread_id = " << std::this_thread::get_id() << "\n"; + co_return value; + }; + + co_await s->schedule(); std::vector> shared_tasks{}; bool stop{false}; while (!stop) { - shared_tasks.emplace_back(make_shared_task()); + shared_tasks.emplace_back(make_shared_task(s, m, read_value)); shared_tasks.back().resume(); - co_await tp->yield_for(std::chrono::milliseconds{1}); + co_await s->yield_for(std::chrono::milliseconds{1}); for (const auto& st : shared_tasks) { @@ -162,6 +170,6 @@ TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex] co_return; }; - coro::sync_wait(coro::when_all(make_shared_tasks_task(), make_exclusive_task())); + coro::sync_wait(coro::when_all(make_shared_tasks_task(s, m, read_value), make_exclusive_task(s, m, read_value))); } #endif // #ifdef LIBCORO_FEATURE_NETWORKING diff --git a/test/test_sync_wait.cpp b/test/test_sync_wait.cpp index 2099d2c1..1a7bcbab 100644 --- a/test/test_sync_wait.cpp +++ b/test/test_sync_wait.cpp @@ -18,26 +18,25 @@ TEST_CASE("sync_wait void", "[sync_wait]") { std::string output; - auto func = [&]() -> coro::task + auto func = [](std::string& output) -> coro::task { output = "hello from sync_wait\n"; co_return; }; - coro::sync_wait(func()); + coro::sync_wait(func(output)); REQUIRE(output == "hello from sync_wait\n"); } TEST_CASE("sync_wait task co_await single", "[sync_wait]") { - auto answer = []() -> coro::task - { - std::cerr << "\tThinking deep thoughts...\n"; - co_return 42; - }; - - auto await_answer = [&]() -> coro::task + auto await_answer = []() -> coro::task { + auto answer = []() -> coro::task + { + std::cerr << "\tThinking deep thoughts...\n"; + co_return 42; + }; std::cerr << "\tStarting to wait for answer.\n"; auto a = answer(); std::cerr << "\tGot the coroutine, getting the value.\n"; @@ -85,7 +84,8 @@ TEST_CASE("sync_wait very rarely hangs issue-270", "[sync_wait]") std::atomic count{0}; - auto make_task = [&](int i) -> coro::task + auto make_task = + [](coro::thread_pool& tp, std::unordered_set& data, std::atomic& count, int i) -> coro::task { co_await tp.schedule(); @@ -101,7 +101,7 @@ TEST_CASE("sync_wait very rarely hangs issue-270", "[sync_wait]") tasks.reserve(ITERATIONS); for (int i = 0; i < ITERATIONS; ++i) { - tasks.emplace_back(make_task(i)); + tasks.emplace_back(make_task(tp, data, count, i)); } coro::sync_wait(coro::when_all(std::move(tasks))); diff --git a/test/test_task.cpp b/test/test_task.cpp index 299e154f..5e0597ce 100644 --- a/test/test_task.cpp +++ b/test/test_task.cpp @@ -179,13 +179,13 @@ TEST_CASE("task multiple suspends return integer", "[task]") TEST_CASE("task resume from promise to coroutine handles of different types", "[task]") { - auto task1 = [&]() -> coro::task + auto task1 = []() -> coro::task { std::cerr << "Task ran\n"; co_return 42; }(); - auto task2 = [&]() -> coro::task + auto task2 = []() -> coro::task { std::cerr << "Task 2 ran\n"; co_return; @@ -323,8 +323,8 @@ TEST_CASE("task supports instantiation with rvalue reference", "[task]") // reference is supported. int i = 42; - auto make_task = [&i]() -> coro::task { co_return std::move(i); }; - int ret = coro::sync_wait(make_task()); + auto make_task = [](int& i) -> coro::task { co_return std::move(i); }; + int ret = coro::sync_wait(make_task(i)); REQUIRE(ret == 42); } @@ -416,10 +416,10 @@ TEST_CASE("task supports instantiation with non assignable type", "[task]") REQUIRE(move_copy_construct_only::move_count == 2); REQUIRE(move_copy_construct_only::copy_count == 1); - auto make_tuple_task = [&i]() -> coro::task> { + auto make_tuple_task = [](int i) -> coro::task> { co_return {i, i * 2}; }; - auto tuple_ret = coro::sync_wait(make_tuple_task()); + auto tuple_ret = coro::sync_wait(make_tuple_task(i)); REQUIRE(std::get<0>(tuple_ret) == 42); REQUIRE(std::get<1>(tuple_ret) == 84); diff --git a/test/test_thread_pool.cpp b/test/test_thread_pool.cpp index cf72be44..692d5303 100644 --- a/test/test_thread_pool.cpp +++ b/test/test_thread_pool.cpp @@ -175,7 +175,7 @@ TEST_CASE("thread_pool event jump threads", "[thread_pool]") coro::event e{}; - auto make_tp1_task = [&]() -> coro::task + auto make_tp1_task = [](coro::thread_pool& tp1, coro::event& e) -> coro::task { co_await tp1.schedule(); auto before_thread_id = std::this_thread::get_id(); @@ -189,7 +189,7 @@ TEST_CASE("thread_pool event jump threads", "[thread_pool]") co_return; }; - auto make_tp2_task = [&]() -> coro::task + auto make_tp2_task = [](coro::thread_pool& tp2, coro::event& e) -> coro::task { co_await tp2.schedule(); std::this_thread::sleep_for(std::chrono::milliseconds{10}); @@ -198,7 +198,7 @@ TEST_CASE("thread_pool event jump threads", "[thread_pool]") co_return; }; - coro::sync_wait(coro::when_all(make_tp1_task(), make_tp2_task())); + coro::sync_wait(coro::when_all(make_tp1_task(tp1, e), make_tp2_task(tp2, e))); } TEST_CASE("thread_pool high cpu usage when threadcount is greater than the number of tasks", "[thread_pool]") @@ -210,14 +210,14 @@ TEST_CASE("thread_pool high cpu usage when threadcount is greater than the numbe // This was due to using m_size instead of m_queue.size() causing the threads // that had no work to go into a spin trying to acquire work. - auto sleep_for_task = [](std::chrono::seconds duration) -> coro::task + auto wait_for_task = [](coro::thread_pool& pool, std::chrono::seconds delay) -> coro::task<> { - std::this_thread::sleep_for(duration); - co_return duration.count(); - }; + auto sleep_for_task = [](std::chrono::seconds duration) -> coro::task + { + std::this_thread::sleep_for(duration); + co_return duration.count(); + }; - auto wait_for_task = [&](coro::thread_pool& pool, std::chrono::seconds delay) -> coro::task<> - { co_await pool.schedule(); for (int i = 0; i < 5; ++i) { @@ -238,12 +238,11 @@ TEST_CASE("issue-287", "[thread_pool]") const int ITERATIONS = 200000; std::atomic g_count = 0; - auto thread_pool = std::make_shared( - coro::thread_pool::options{.thread_count = 1} - ); - auto task_container = coro::task_container{thread_pool}; + auto thread_pool = std::make_shared(coro::thread_pool::options{.thread_count = 1}); + auto task_container = coro::task_container{thread_pool}; - auto task = [](std::atomic& count) -> coro::task { + auto task = [](std::atomic& count) -> coro::task + { count++; co_return; }; diff --git a/test/test_when_all.cpp b/test/test_when_all.cpp index 9a5caea4..96faf2fe 100644 --- a/test/test_when_all.cpp +++ b/test/test_when_all.cpp @@ -112,18 +112,18 @@ TEST_CASE("when_all multple task withs list container", "[when_all]") TEST_CASE("when_all inside coroutine", "[when_all]") { coro::thread_pool tp{}; - auto make_task = [&](uint64_t amount) -> coro::task + auto make_task = [](coro::thread_pool& tp, uint64_t amount) -> coro::task { co_await tp.schedule(); co_return amount; }; - auto runner_task = [&]() -> coro::task + auto runner_task = [](coro::thread_pool& tp, auto make_task) -> coro::task { std::list> tasks; - tasks.emplace_back(make_task(1)); - tasks.emplace_back(make_task(2)); - tasks.emplace_back(make_task(3)); + tasks.emplace_back(make_task(tp, 1)); + tasks.emplace_back(make_task(tp, 2)); + tasks.emplace_back(make_task(tp, 3)); auto output_tasks = co_await coro::when_all(std::move(tasks)); @@ -135,7 +135,7 @@ TEST_CASE("when_all inside coroutine", "[when_all]") co_return result; }; - auto result = coro::sync_wait(runner_task()); + auto result = coro::sync_wait(runner_task(tp, make_task)); REQUIRE(result == (1 + 2 + 3)); } @@ -144,18 +144,17 @@ TEST_CASE("when_all use std::ranges::view", "[when_all]") { coro::thread_pool tp{}; - auto make_task = [&](uint64_t amount) -> coro::task - { - co_await tp.schedule(); - co_return amount; - }; - - auto make_runner_task = [&]() -> coro::task + auto make_runner_task = [](coro::thread_pool& tp) -> coro::task { + auto make_task = [](coro::thread_pool& tp, uint64_t amount) -> coro::task + { + co_await tp.schedule(); + co_return amount; + }; std::vector> tasks; - tasks.emplace_back(make_task(1)); - tasks.emplace_back(make_task(2)); - tasks.emplace_back(make_task(3)); + tasks.emplace_back(make_task(tp, 1)); + tasks.emplace_back(make_task(tp, 2)); + tasks.emplace_back(make_task(tp, 3)); auto output_tasks = co_await coro::when_all(std::ranges::views::all(tasks)); @@ -167,7 +166,7 @@ TEST_CASE("when_all use std::ranges::view", "[when_all]") co_return result; }; - auto result = coro::sync_wait(make_runner_task()); + auto result = coro::sync_wait(make_runner_task(tp)); REQUIRE(result == (1 + 2 + 3)); } @@ -175,7 +174,7 @@ TEST_CASE("when_all each task throws", "[when_all]") { coro::thread_pool tp{}; - auto make_task = [&](uint64_t i) -> coro::task + auto make_task = [](coro::thread_pool& tp, uint64_t i) -> coro::task { co_await tp.schedule(); if (i % 2 == 0) @@ -188,7 +187,7 @@ TEST_CASE("when_all each task throws", "[when_all]") std::vector> tasks; for (auto i = 1; i <= 4; ++i) { - tasks.emplace_back(make_task(i)); + tasks.emplace_back(make_task(tp, i)); } auto output_tasks = coro::sync_wait(coro::when_all(std::move(tasks))); From fec677923b530cd8469e7e04d8949445b01f84ce Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Sun, 2 Feb 2025 09:36:13 -0700 Subject: [PATCH 15/24] Add executor.schedule(coro::task) -> bool (#295) * Add executor.schedule(coro::task) -> bool Completely removes coro::task_container, it isn't necessary as both executors already have m_size counters to track the orphaned task_self_deleting frames. * added executor concept spawn(task) * added thread_pool and io_scheduler schedule(task -> task, this cannot be templated on the concept since the return type is dynamic as far as I can know how to do Closes #292 --- .githooks/pre-commit | 4 - .githooks/readme-template.md | 43 +++---- CMakeLists.txt | 1 - README.md | 133 +++------------------ examples/CMakeLists.txt | 4 - examples/coro_http_200_ok_server.cpp | 2 +- examples/coro_task_container.cpp | 91 -------------- examples/coro_tcp_echo_server.cpp | 2 +- include/coro/concepts/executor.hpp | 26 ++-- include/coro/coro.hpp | 1 - include/coro/detail/task_self_deleting.hpp | 13 +- include/coro/event.hpp | 2 +- include/coro/io_scheduler.hpp | 32 +++-- include/coro/latch.hpp | 5 +- include/coro/net/dns/resolver.hpp | 8 +- include/coro/task_container.hpp | 105 ---------------- include/coro/thread_pool.hpp | 33 +++-- src/detail/task_self_deleting.cpp | 19 +-- src/io_scheduler.cpp | 18 +-- src/thread_pool.cpp | 9 ++ test/bench.cpp | 6 +- test/net/test_tcp_server.cpp | 2 +- test/test_io_scheduler.cpp | 27 ++++- test/test_ring_buffer.cpp | 1 + test/test_shared_mutex.cpp | 1 + test/test_thread_pool.cpp | 72 ++++++----- 26 files changed, 198 insertions(+), 462 deletions(-) delete mode 100644 examples/coro_task_container.cpp delete mode 100644 include/coro/task_container.hpp diff --git a/.githooks/pre-commit b/.githooks/pre-commit index d3cdb754..c6ef205b 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -62,10 +62,6 @@ template_contents=$(cat 'README.md') example_contents=$(cat 'examples/coro_io_scheduler.cpp') echo "${template_contents/\$\{EXAMPLE_CORO_IO_SCHEDULER_CPP\}/$example_contents}" > README.md -template_contents=$(cat 'README.md') -example_contents=$(cat 'examples/coro_task_container.cpp') -echo "${template_contents/\$\{EXAMPLE_CORO_TASK_CONTAINER_CPP\}/$example_contents}" > README.md - template_contents=$(cat 'README.md') example_contents=$(cat 'examples/coro_semaphore.cpp') echo "${template_contents/\$\{EXAMPLE_CORO_SEMAPHORE_CPP\}/$example_contents}" > README.md diff --git a/.githooks/readme-template.md b/.githooks/readme-template.md index 4a638069..2246dbbf 100644 --- a/.githooks/readme-template.md +++ b/.githooks/readme-template.md @@ -30,7 +30,6 @@ - Can use `coro::thread_pool` for latency sensitive or long lived tasks. - Can use inline task processing for thread per core or short lived tasks. - Currently uses an epoll driver, only supported on linux. - - [coro::task_container](#task_container) for dynamic task lifetimes * Coroutine Networking - coro::net::dns::resolver for async dns - Uses libc-ares @@ -252,7 +251,12 @@ consumer 3 shutting down, stop signal received ``` ### thread_pool -`coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. To schedule a coroutine on a thread pool the pool's `schedule()` function should be `co_awaited` to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up. +`coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. One way to schedule a coroutine on a thread pool is to use the pool's `schedule()` function which should be `co_awaited` inside the coroutine to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up. + +#### Ways to schedule tasks onto a `coro::thread_pool` +* `coro::thread_pool::schedule()` Use `co_await` on this method inside a coroutine to transfer the tasks execution to the `coro::thread_pool`. +* `coro::thread_pool::spawn(coro::task)` Spawns the task to be detached and owned by the `coro::thread_pool`, use this if you want to fire and forget the task, the `coro::thread_pool` will maintain the task's lifetime. +* `coro::thread_pool::schedule(coro::task task) -> coro::task` schedules the task on the `coro::thread_pool` and then returns the result in a task that must be awaited. This is useful if you want to schedule work on the `coro::thread_pool` and want to wait for the result. ```C++ ${EXAMPLE_CORO_THREAD_POOL_CPP} @@ -283,7 +287,7 @@ thread pool worker 0 is shutting down. ``` ### io_scheduler -`coro::io_scheduler` is a i/o event scheduler that can use two methods of task processing: +`coro::io_scheduler` is a i/o event scheduler execution context that can use two methods of task processing: * A background `coro::thread_pool` * Inline task processing on the `coro::io_scheduler`'s event loop @@ -294,7 +298,15 @@ Using the inline processing strategy will have the event loop i/o thread process The `coro::io_scheduler` can use a dedicated spawned thread for processing events that are ready or it can be maually driven via its `process_events()` function for integration into existing event loops. By default i/o schedulers will spawn a dedicated event thread and use a thread pool to process tasks. -Before getting to an example there are two methods of scheduling work onto an i/o scheduler, the first is by having the caller maintain the lifetime of the task being scheduled and the second is by moving or transfering owership of the task into the i/o scheduler. The first can allow for return values but requires the caller to manage the lifetime of the coroutine while the second requires the return type of the task to be void but allows for variable or unknown task lifetimes. Transferring task lifetime to the scheduler can be useful, e.g. for a network request. +#### Ways to schedule tasks onto a `coro::io_scheduler` +* `coro::io_scheduler::schedule()` Use `co_await` on this method inside a coroutine to transfer the tasks execution to the `coro::io_scheduler`. +* `coro::io_scheduler::spawn(coro::task)` Spawns the task to be detached and owned by the `coro::io_scheduler`, use this if you want to fire and forget the task, the `coro::io_scheduler` will maintain the task's lifetime. +* `coro::io_scheduler::schedule(coro::task task) -> coro::task` schedules the task on the `coro::io_scheduler` and then returns the result in a task that must be awaited. This is useful if you want to schedule work on the `coro::io_scheduler` and want to wait for the result. +* `coro::io_scheduler::scheduler_after(std::chrono::milliseconds amount)` schedules the current task to be rescheduled after a specified amount of time has passed. +* `coro::io_scheduler::schedule_at(std::chrono::steady_clock::time_point time)` schedules the current task to be rescheduled at the specified timepoint. +* `coro::io_scheduler::yield()` will yield execution of the current task and resume after other tasks have had a chance to execute. This effectively places the task at the back of the queue of waiting tasks. +* `coro::io_scheduler::yield_for(std::chrono::milliseconds amount)` will yield for the given amount of time and then reschedule the task. This is a yield for at least this much time since its placed in the waiting execution queue and might take additional time to start executing again. +* `coro::io_scheduler::yield_until(std::chrono::steady_clock::time_point time)` will yield execution until the time point. The example provided here shows an i/o scheduler that spins up a basic `coro::net::tcp::server` and a `coro::net::tcp::client` that will connect to each other and then send a request and a response. @@ -315,29 +327,6 @@ io_scheduler::thread_pool worker 1 stopping io_scheduler::process event thread stop ``` -### task_container -`coro::task_container` is a special container type that will maintain the lifetime of tasks that do not have a known lifetime. This is extremely useful for tasks that hold open connections to clients and possibly process multiple requests from that client before shutting down. The task doesn't know how long it will be alive but at some point in the future it will complete and need to have its resources cleaned up. The `coro::task_container` does this by wrapping the users task into anothe coroutine task that will mark itself for deletion upon completing within the parent task container. The task container should then run garbage collection periodically, or by default when a new task is added, to prune completed tasks from the container. - -All tasks that are stored within a `coro::task_container` must have a `void` return type since their result cannot be accessed due to the task's lifetime being indeterminate. - -```C++ -${EXAMPLE_CORO_TASK_CONTAINER_CPP} -``` - -```bash -$ ./examples/coro_task_container -server: Hello from client 1 -client: Hello from server 1 -server: Hello from client 2 -client: Hello from server 2 -server: Hello from client 3 -client: Hello from server 3 -server: Hello from client 4 -client: Hello from server 4 -server: Hello from client 5 -client: Hello from server 5 -``` - ### tcp_echo_server See [examples/coro_tcp_echo_erver.cpp](./examples/coro_tcp_echo_server.cpp) for a basic TCP echo server implementation. You can use tools like `ab` to benchmark against this echo server. diff --git a/CMakeLists.txt b/CMakeLists.txt index 0cc35d3f..7b200f67 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -90,7 +90,6 @@ set(LIBCORO_SOURCE_FILES include/coro/shared_mutex.hpp include/coro/sync_wait.hpp src/sync_wait.cpp include/coro/task.hpp - include/coro/task_container.hpp include/coro/thread_pool.hpp src/thread_pool.cpp include/coro/time.hpp include/coro/when_all.hpp diff --git a/README.md b/README.md index ce447edd..6eefa6e1 100644 --- a/README.md +++ b/README.md @@ -30,7 +30,6 @@ - Can use `coro::thread_pool` for latency sensitive or long lived tasks. - Can use inline task processing for thread per core or short lived tasks. - Currently uses an epoll driver, only supported on linux. - - [coro::task_container](#task_container) for dynamic task lifetimes * Coroutine Networking - coro::net::dns::resolver for async dns - Uses libc-ares @@ -738,7 +737,12 @@ consumer 3 shutting down, stop signal received ``` ### thread_pool -`coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. To schedule a coroutine on a thread pool the pool's `schedule()` function should be `co_awaited` to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up. +`coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. One way to schedule a coroutine on a thread pool is to use the pool's `schedule()` function which should be `co_awaited` inside the coroutine to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up. + +#### Ways to schedule tasks onto a `coro::thread_pool` +* `coro::thread_pool::schedule()` Use `co_await` on this method inside a coroutine to transfer the tasks execution to the `coro::thread_pool`. +* `coro::thread_pool::spawn(coro::task)` Spawns the task to be detached and owned by the `coro::thread_pool`, use this if you want to fire and forget the task, the `coro::thread_pool` will maintain the task's lifetime. +* `coro::thread_pool::schedule(coro::task task) -> coro::task` schedules the task on the `coro::thread_pool` and then returns the result in a task that must be awaited. This is useful if you want to schedule work on the `coro::thread_pool` and want to wait for the result. ```C++ #include @@ -846,7 +850,7 @@ thread pool worker 0 is shutting down. ``` ### io_scheduler -`coro::io_scheduler` is a i/o event scheduler that can use two methods of task processing: +`coro::io_scheduler` is a i/o event scheduler execution context that can use two methods of task processing: * A background `coro::thread_pool` * Inline task processing on the `coro::io_scheduler`'s event loop @@ -857,7 +861,15 @@ Using the inline processing strategy will have the event loop i/o thread process The `coro::io_scheduler` can use a dedicated spawned thread for processing events that are ready or it can be maually driven via its `process_events()` function for integration into existing event loops. By default i/o schedulers will spawn a dedicated event thread and use a thread pool to process tasks. -Before getting to an example there are two methods of scheduling work onto an i/o scheduler, the first is by having the caller maintain the lifetime of the task being scheduled and the second is by moving or transfering owership of the task into the i/o scheduler. The first can allow for return values but requires the caller to manage the lifetime of the coroutine while the second requires the return type of the task to be void but allows for variable or unknown task lifetimes. Transferring task lifetime to the scheduler can be useful, e.g. for a network request. +#### Ways to schedule tasks onto a `coro::io_scheduler` +* `coro::io_scheduler::schedule()` Use `co_await` on this method inside a coroutine to transfer the tasks execution to the `coro::io_scheduler`. +* `coro::io_scheduler::spawn(coro::task)` Spawns the task to be detached and owned by the `coro::io_scheduler`, use this if you want to fire and forget the task, the `coro::io_scheduler` will maintain the task's lifetime. +* `coro::io_scheduler::schedule(coro::task task) -> coro::task` schedules the task on the `coro::io_scheduler` and then returns the result in a task that must be awaited. This is useful if you want to schedule work on the `coro::io_scheduler` and want to wait for the result. +* `coro::io_scheduler::scheduler_after(std::chrono::milliseconds amount)` schedules the current task to be rescheduled after a specified amount of time has passed. +* `coro::io_scheduler::schedule_at(std::chrono::steady_clock::time_point time)` schedules the current task to be rescheduled at the specified timepoint. +* `coro::io_scheduler::yield()` will yield execution of the current task and resume after other tasks have had a chance to execute. This effectively places the task at the back of the queue of waiting tasks. +* `coro::io_scheduler::yield_for(std::chrono::milliseconds amount)` will yield for the given amount of time and then reschedule the task. This is a yield for at least this much time since its placed in the waiting execution queue and might take additional time to start executing again. +* `coro::io_scheduler::yield_until(std::chrono::steady_clock::time_point time)` will yield execution until the time point. The example provided here shows an i/o scheduler that spins up a basic `coro::net::tcp::server` and a `coro::net::tcp::client` that will connect to each other and then send a request and a response. @@ -1027,119 +1039,6 @@ io_scheduler::thread_pool worker 1 stopping io_scheduler::process event thread stop ``` -### task_container -`coro::task_container` is a special container type that will maintain the lifetime of tasks that do not have a known lifetime. This is extremely useful for tasks that hold open connections to clients and possibly process multiple requests from that client before shutting down. The task doesn't know how long it will be alive but at some point in the future it will complete and need to have its resources cleaned up. The `coro::task_container` does this by wrapping the users task into anothe coroutine task that will mark itself for deletion upon completing within the parent task container. The task container should then run garbage collection periodically, or by default when a new task is added, to prune completed tasks from the container. - -All tasks that are stored within a `coro::task_container` must have a `void` return type since their result cannot be accessed due to the task's lifetime being indeterminate. - -```C++ -#include -#include - -int main() -{ - auto scheduler = coro::io_scheduler::make_shared( - coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - - auto make_server_task = [](std::shared_ptr scheduler) -> coro::task - { - // This is the task that will handle processing a client's requests. - auto serve_client = [](coro::net::tcp::client client) -> coro::task - { - size_t requests{1}; - - while (true) - { - // Continue to accept more requests until the client closes the connection. - co_await client.poll(coro::poll_op::read); - - std::string request(64, '\0'); - auto [recv_status, recv_bytes] = client.recv(request); - if (recv_status == coro::net::recv_status::closed) - { - break; - } - - request.resize(recv_bytes.size()); - std::cout << "server: " << request << "\n"; - - // Make sure the client socket can be written to. - co_await client.poll(coro::poll_op::write); - - auto response = "Hello from server " + std::to_string(requests); - client.send(response); - - ++requests; - } - - co_return; - }; - - // Spin up the tcp::server and schedule it onto the io_scheduler. - coro::net::tcp::server server{scheduler}; - co_await scheduler->schedule(); - - // All incoming connections will be stored into the task container until they are completed. - coro::task_container tc{scheduler}; - - // Wait for an incoming connection and accept it, this example will only use 1 connection. - co_await server.poll(); - auto client = server.accept(); - // Store the task that will serve the client into the container and immediately begin executing it - // on the task container's thread pool, which is the same as the scheduler. - tc.start(serve_client(std::move(client))); - - // Wait for all clients to complete before shutting down the tcp::server. - co_await tc.yield_until_empty(); - co_return; - }; - - auto make_client_task = [](std::shared_ptr scheduler, size_t request_count) -> coro::task - { - co_await scheduler->schedule(); - coro::net::tcp::client client{scheduler}; - - co_await client.connect(); - - // Send N requests on the same connection and wait for the server response to each one. - for (size_t i = 1; i <= request_count; ++i) - { - // Make sure the client socket can be written to. - co_await client.poll(coro::poll_op::write); - - // Send the request data. - auto request = "Hello from client " + std::to_string(i); - client.send(request); - - co_await client.poll(coro::poll_op::read); - std::string response(64, '\0'); - auto [recv_status, recv_bytes] = client.recv(response); - response.resize(recv_bytes.size()); - - std::cout << "client: " << response << "\n"; - } - - co_return; // Upon exiting the tcp::client will close its connection to the server. - }; - - coro::sync_wait(coro::when_all(make_server_task(scheduler), make_client_task(scheduler, 5))); -} -``` - -```bash -$ ./examples/coro_task_container -server: Hello from client 1 -client: Hello from server 1 -server: Hello from client 2 -client: Hello from server 2 -server: Hello from client 3 -client: Hello from server 3 -server: Hello from client 4 -client: Hello from server 4 -server: Hello from client 5 -client: Hello from server 5 -``` - ### tcp_echo_server See [examples/coro_tcp_echo_erver.cpp](./examples/coro_tcp_echo_server.cpp) for a basic TCP echo server implementation. You can use tools like `ab` to benchmark against this echo server. diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 41d9c499..2abd9b19 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -58,10 +58,6 @@ target_link_libraries(coro_when_all PUBLIC libcoro) target_compile_options(coro_when_all PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) if(LIBCORO_FEATURE_NETWORKING) - add_executable(coro_task_container coro_task_container.cpp) - target_link_libraries(coro_task_container PUBLIC libcoro) - target_compile_options(coro_task_container PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) - add_executable(coro_io_scheduler coro_io_scheduler.cpp) target_link_libraries(coro_io_scheduler PUBLIC libcoro) target_compile_options(coro_io_scheduler PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) diff --git a/examples/coro_http_200_ok_server.cpp b/examples/coro_http_200_ok_server.cpp index e33b98cf..dd18b84e 100644 --- a/examples/coro_http_200_ok_server.cpp +++ b/examples/coro_http_200_ok_server.cpp @@ -49,7 +49,7 @@ Connection: keep-alive auto client = server.accept(); if (client.socket().is_valid()) { - scheduler->schedule(make_on_connection_task(std::move(client))); + scheduler->spawn(make_on_connection_task(std::move(client))); } // else report error or something if the socket was invalid or could not be accepted. } break; diff --git a/examples/coro_task_container.cpp b/examples/coro_task_container.cpp deleted file mode 100644 index 66682a44..00000000 --- a/examples/coro_task_container.cpp +++ /dev/null @@ -1,91 +0,0 @@ -#include -#include - -int main() -{ - auto scheduler = coro::io_scheduler::make_shared( - coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - - auto make_server_task = [](std::shared_ptr scheduler) -> coro::task - { - // This is the task that will handle processing a client's requests. - auto serve_client = [](coro::net::tcp::client client) -> coro::task - { - size_t requests{1}; - - while (true) - { - // Continue to accept more requests until the client closes the connection. - co_await client.poll(coro::poll_op::read); - - std::string request(64, '\0'); - auto [recv_status, recv_bytes] = client.recv(request); - if (recv_status == coro::net::recv_status::closed) - { - break; - } - - request.resize(recv_bytes.size()); - std::cout << "server: " << request << "\n"; - - // Make sure the client socket can be written to. - co_await client.poll(coro::poll_op::write); - - auto response = "Hello from server " + std::to_string(requests); - client.send(response); - - ++requests; - } - - co_return; - }; - - // Spin up the tcp::server and schedule it onto the io_scheduler. - coro::net::tcp::server server{scheduler}; - co_await scheduler->schedule(); - - // All incoming connections will be stored into the task container until they are completed. - coro::task_container tc{scheduler}; - - // Wait for an incoming connection and accept it, this example will only use 1 connection. - co_await server.poll(); - auto client = server.accept(); - // Store the task that will serve the client into the container and immediately begin executing it - // on the task container's thread pool, which is the same as the scheduler. - tc.start(serve_client(std::move(client))); - - // Wait for all clients to complete before shutting down the tcp::server. - co_await tc.yield_until_empty(); - co_return; - }; - - auto make_client_task = [](std::shared_ptr scheduler, size_t request_count) -> coro::task - { - co_await scheduler->schedule(); - coro::net::tcp::client client{scheduler}; - - co_await client.connect(); - - // Send N requests on the same connection and wait for the server response to each one. - for (size_t i = 1; i <= request_count; ++i) - { - // Make sure the client socket can be written to. - co_await client.poll(coro::poll_op::write); - - // Send the request data. - auto request = "Hello from client " + std::to_string(i); - client.send(request); - - co_await client.poll(coro::poll_op::read); - std::string response(64, '\0'); - auto [recv_status, recv_bytes] = client.recv(response); - response.resize(recv_bytes.size()); - - std::cout << "client: " << response << "\n"; - } - - co_return; // Upon exiting the tcp::client will close its connection to the server. - }; - - coro::sync_wait(coro::when_all(make_server_task(scheduler), make_client_task(scheduler, 5))); -} diff --git a/examples/coro_tcp_echo_server.cpp b/examples/coro_tcp_echo_server.cpp index dba150d0..efd09e0f 100644 --- a/examples/coro_tcp_echo_server.cpp +++ b/examples/coro_tcp_echo_server.cpp @@ -43,7 +43,7 @@ auto main() -> int auto client = server.accept(); if (client.socket().is_valid()) { - scheduler->schedule(make_on_connection_task(std::move(client))); + scheduler->spawn(make_on_connection_task(std::move(client))); } // else report error or something if the socket was invalid or could not be accepted. } break; diff --git a/include/coro/concepts/executor.hpp b/include/coro/concepts/executor.hpp index ecb009cf..15d2f68a 100644 --- a/include/coro/concepts/executor.hpp +++ b/include/coro/concepts/executor.hpp @@ -2,36 +2,38 @@ #include "coro/concepts/awaitable.hpp" #include "coro/fd.hpp" +#include "coro/task.hpp" #ifdef LIBCORO_FEATURE_NETWORKING #include "coro/poll.hpp" - #include "coro/task.hpp" #endif // #ifdef LIBCORO_FEATURE_NETWORKING #include #include #include +#include namespace coro::concepts { // clang-format off -template -concept executor = requires(type t, std::coroutine_handle<> c) +template +concept executor = requires(executor_type e, std::coroutine_handle<> c) { - { t.schedule() } -> coro::concepts::awaiter; - { t.yield() } -> coro::concepts::awaiter; - { t.resume(c) } -> std::same_as; - { t.size() } -> std::same_as; - { t.empty() } -> std::same_as; - { t.shutdown() } -> std::same_as; + { e.schedule() } -> coro::concepts::awaiter; + { e.spawn(std::declval>()) } -> std::same_as; + { e.yield() } -> coro::concepts::awaiter; + { e.resume(c) } -> std::same_as; + { e.size() } -> std::same_as; + { e.empty() } -> std::same_as; + { e.shutdown() } -> std::same_as; }; #ifdef LIBCORO_FEATURE_NETWORKING -template -concept io_exceutor = executor and requires(type t, std::coroutine_handle<> c, fd_t fd, coro::poll_op op, std::chrono::milliseconds timeout) +template +concept io_exceutor = executor and requires(executor_type e, std::coroutine_handle<> c, fd_t fd, coro::poll_op op, std::chrono::milliseconds timeout) { - { t.poll(fd, op, timeout) } -> std::same_as>; + { e.poll(fd, op, timeout) } -> std::same_as>; }; #endif // #ifdef LIBCORO_FEATURE_NETWORKING diff --git a/include/coro/coro.hpp b/include/coro/coro.hpp index f44b72ca..0984a23b 100644 --- a/include/coro/coro.hpp +++ b/include/coro/coro.hpp @@ -38,7 +38,6 @@ #include "coro/shared_mutex.hpp" #include "coro/sync_wait.hpp" #include "coro/task.hpp" -#include "coro/task_container.hpp" #include "coro/thread_pool.hpp" #include "coro/time.hpp" #include "coro/when_all.hpp" diff --git a/include/coro/detail/task_self_deleting.hpp b/include/coro/detail/task_self_deleting.hpp index eec1217d..571365ca 100644 --- a/include/coro/detail/task_self_deleting.hpp +++ b/include/coro/detail/task_self_deleting.hpp @@ -1,5 +1,7 @@ #pragma once +#include "coro/task.hpp" + #include #include #include @@ -26,13 +28,13 @@ class promise_self_deleting auto return_void() noexcept -> void; auto unhandled_exception() -> void; - auto task_container_size(std::atomic& task_container_size) -> void; + auto executor_size(std::atomic& task_container_size) -> void; private: /** - * The coro::task_container m_size member to decrement upon the coroutine completing. + * The executor m_size member to decrement upon the coroutine completing. */ - std::atomic* m_task_container_size{nullptr}; + std::atomic* m_executor_size{nullptr}; }; /** @@ -68,4 +70,9 @@ class task_self_deleting promise_self_deleting* m_promise{nullptr}; }; +/** + * Turns a coro::task into a self deleting task (detached). + */ +auto make_task_self_deleting(coro::task user_task) -> task_self_deleting; + } // namespace coro::detail diff --git a/include/coro/event.hpp b/include/coro/event.hpp index 4fe370bf..dd6da889 100644 --- a/include/coro/event.hpp +++ b/include/coro/event.hpp @@ -86,7 +86,7 @@ class event /** * @return True if this event is currently in the set state. */ - auto is_set() const noexcept -> bool { return m_state.load(std::memory_order_acquire) == this; } + auto is_set() const noexcept -> bool { return m_state.load(std::memory_order::acquire) == this; } /** * Sets this event and resumes all awaiters. Note that all waiters will be resumed onto this diff --git a/include/coro/io_scheduler.hpp b/include/coro/io_scheduler.hpp index 6f9ca7ad..4cdae2d3 100644 --- a/include/coro/io_scheduler.hpp +++ b/include/coro/io_scheduler.hpp @@ -3,7 +3,6 @@ #include "coro/detail/poll_info.hpp" #include "coro/fd.hpp" #include "coro/poll.hpp" -#include "coro/task_container.hpp" #include "coro/thread_pool.hpp" #ifdef LIBCORO_FEATURE_NETWORKING @@ -172,15 +171,29 @@ class io_scheduler : public std::enable_shared_from_this auto schedule() -> schedule_operation { return schedule_operation{*this}; } /** - * Schedules a task onto the io_scheduler and moves ownership of the task to the io_scheduler. - * Only void return type tasks can be scheduled in this manner since the task submitter will no - * longer have control over the scheduled task. + * Spawns a task into the io_scheduler and moves ownership of the task to the io_scheduler. + * Only void return type tasks can be spawned in this manner since the task submitter will no + * longer have control over the spawned task, it is effectively detached. * @param task The task to execute on this io_scheduler. It's lifetime ownership will be transferred * to this io_scheduler. - * @return True if the task was succesfully scheduled onto the io_scheduler. This can fail if the task + * @return True if the task was succesfully spawned onto the io_scheduler. This can fail if the task * is already completed or does not contain a valid coroutine anymore. */ - auto schedule(coro::task&& task) -> bool; + auto spawn(coro::task&& task) -> bool; + + /** + * Schedules a task on the io_scheduler and returns another task that must be awaited on for completion. + * This can be done via co_await in a coroutine context or coro::sync_wait() outside of coroutine context. + * @tparam return_type The return value of the task. + * @param task The task to schedule on the io_scheduler. + * @return The task to await for the input task to complete. + */ + template + [[nodiscard]] auto schedule(coro::task task) -> coro::task + { + co_await schedule(); + co_return co_await task; + } /** * Schedules the current task to run after the given amount of time has elapsed. @@ -352,13 +365,6 @@ class io_scheduler : public std::enable_shared_from_this std::mutex m_scheduled_tasks_mutex{}; std::vector> m_scheduled_tasks{}; - /// Tasks that have their ownership passed into the scheduler. This is a bit strange for now - /// but the concept doesn't pass since io_scheduler isn't fully defined yet. - /// The type is coro::task_container* - /// Do not inline any functions that use this in the io_scheduler header, it can cause the linker - /// to complain about "defined in discarded section" because it gets defined multiple times - void* m_owned_tasks{nullptr}; - static constexpr const int m_shutdown_object{0}; static constexpr const void* m_shutdown_ptr = &m_shutdown_object; diff --git a/include/coro/latch.hpp b/include/coro/latch.hpp index 706ce847..d62f56b3 100644 --- a/include/coro/latch.hpp +++ b/include/coro/latch.hpp @@ -59,11 +59,12 @@ class latch * @param tp The thread pool to schedule the task that is waiting on the latch on. * @param n The number of tasks to complete towards the latch, defaults to 1. */ - auto count_down(coro::thread_pool& tp, std::int64_t n = 1) noexcept -> void + template + auto count_down(executor_type& executor, std::int64_t n = 1) noexcept -> void { if (m_count.fetch_sub(n, std::memory_order::acq_rel) <= n) { - m_event.set(tp); + m_event.set(executor); } } diff --git a/include/coro/net/dns/resolver.hpp b/include/coro/net/dns/resolver.hpp index f81a0d95..aa6941ac 100644 --- a/include/coro/net/dns/resolver.hpp +++ b/include/coro/net/dns/resolver.hpp @@ -7,7 +7,6 @@ #include "coro/net/ip_address.hpp" #include "coro/poll.hpp" #include "coro/task.hpp" -#include "coro/task_container.hpp" #include #include @@ -82,8 +81,7 @@ class resolver public: explicit resolver(std::shared_ptr executor, std::chrono::milliseconds timeout) : m_executor(std::move(executor)), - m_timeout(timeout), - m_task_container(m_executor) + m_timeout(timeout) { if (m_executor == nullptr) { @@ -166,8 +164,6 @@ class resolver /// are not setup when ares_poll() is called. std::unordered_set m_active_sockets{}; - task_container m_task_container; - auto ares_poll() -> void { std::array ares_sockets{}; @@ -211,7 +207,7 @@ class resolver // If this socket is not currently actively polling, start polling! if (m_active_sockets.emplace(fd).second) { - m_task_container.start(make_poll_task(fd, poll_ops[i])); + m_executor->spawn(make_poll_task(fd, poll_ops[i])); } } } diff --git a/include/coro/task_container.hpp b/include/coro/task_container.hpp deleted file mode 100644 index abd64d9a..00000000 --- a/include/coro/task_container.hpp +++ /dev/null @@ -1,105 +0,0 @@ -#pragma once - -#include "coro/attribute.hpp" -#include "coro/concepts/executor.hpp" -#include "coro/detail/task_self_deleting.hpp" -#include "coro/task.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace coro -{ -class io_scheduler; - -template -class task_container -{ -public: - /** - * @param e Tasks started in the container are scheduled onto this executor. For tasks created - * from a coro::io_scheduler, this would usually be that coro::io_scheduler instance. - * @param opts Task container options. - */ - task_container(std::shared_ptr e) : m_executor(std::move(e)) - { - if (m_executor == nullptr) - { - throw std::runtime_error{"task_container cannot have a nullptr executor"}; - } - } - task_container(const task_container&) = delete; - task_container(task_container&&) = delete; - auto operator=(const task_container&) -> task_container& = delete; - auto operator=(task_container&&) -> task_container& = delete; - ~task_container() - { - // This will hang the current thread.. but if tasks are not complete thats also pretty bad. - while (!empty()) - { - // Sleep a bit so the cpu doesn't totally churn. - std::this_thread::sleep_for(std::chrono::milliseconds{10}); - } - } - - /** - * Stores a user task and starts its execution on the container's thread pool. - * @param user_task The scheduled user's task to store in this task container and start its execution. - * @return True if the task was succesfully started into the task container. This can fail if the task - * is already completed or does not contain a valid coroutine anymore. - */ - auto start(coro::task&& user_task) -> bool - { - m_size.fetch_add(1, std::memory_order::relaxed); - - auto task = make_self_deleting_task(std::move(user_task)); - // Hook the promise to decrement the size upon its self deletion of the coroutine frame. - task.promise().task_container_size(m_size); - return m_executor->resume(task.handle()); - } - - /** - * @return The number of active tasks in the container. - */ - auto size() const -> std::size_t { return m_size.load(std::memory_order::acquire); } - - /** - * @return True if there are no active tasks in the container. - */ - auto empty() const -> bool { return size() == 0; } - - /** - * Will continue to garbage collect and yield until all tasks are complete. This method can be - * co_await'ed to make it easier to wait for the task container to have all its tasks complete. - * - * This does not shut down the task container, but can be used when shutting down, or if your - * logic requires all the tasks contained within to complete, it is similar to coro::latch. - */ - auto yield_until_empty() -> coro::task - { - while (!empty()) - { - co_await m_executor->yield(); - } - } - -private: - auto make_self_deleting_task(task user_task) -> detail::task_self_deleting - { - co_await user_task; - co_return; - } - - /// The number of alive tasks. - std::atomic m_size{}; - /// The executor to schedule tasks that have just started. - std::shared_ptr m_executor{nullptr}; -}; - -} // namespace coro diff --git a/include/coro/thread_pool.hpp b/include/coro/thread_pool.hpp index dbc663d5..77113930 100644 --- a/include/coro/thread_pool.hpp +++ b/include/coro/thread_pool.hpp @@ -1,7 +1,6 @@ #pragma once #include "coro/concepts/range_of.hpp" -#include "coro/event.hpp" #include "coro/task.hpp" #include @@ -110,25 +109,24 @@ class thread_pool [[nodiscard]] auto schedule() -> operation; /** - * @throw std::runtime_error If the thread pool is `shutdown()` scheduling new tasks is not permitted. - * @param f The function to execute on the thread pool. - * @param args The arguments to call the functor with. - * @return A task that wraps the given functor to be executed on the thread pool. + * Spawns the given task to be run on this thread pool, the task is detached from the user. + * @param task The task to spawn onto the thread pool. + * @return True if the task has been spawned onto this thread pool. + */ + auto spawn(coro::task&& task) noexcept -> bool; + + /** + * Schedules a task on the thread pool and returns another task that must be awaited on for completion. + * This can be done via co_await in a coroutine context or coro::sync_wait() outside of coroutine context. + * @tparam return_type The return value of the task. + * @param task The task to schedule on the thread pool. + * @return The task to await for the input task to complete. */ - template - [[nodiscard]] auto schedule(functor&& f, arguments... args) -> task(args)...))> + template + [[nodiscard]] auto schedule(coro::task task) -> coro::task { co_await schedule(); - - if constexpr (std::is_same_v(args)...))>) - { - f(std::forward(args)...); - co_return; - } - else - { - co_return f(std::forward(args)...); - } + co_return co_await task; } /** @@ -236,6 +234,7 @@ class thread_pool std::condition_variable_any m_wait_cv; /// FIFO queue of tasks waiting to be executed. std::deque> m_queue; + /** * Each background thread runs from this function. * @param idx The executor's idx for internal data structure accesses. diff --git a/src/detail/task_self_deleting.cpp b/src/detail/task_self_deleting.cpp index 4f45a613..c86155ab 100644 --- a/src/detail/task_self_deleting.cpp +++ b/src/detail/task_self_deleting.cpp @@ -7,7 +7,6 @@ namespace coro::detail promise_self_deleting::promise_self_deleting() { - (void)m_task_container_size; // make codacy happy } promise_self_deleting::~promise_self_deleting() @@ -15,7 +14,7 @@ promise_self_deleting::~promise_self_deleting() } promise_self_deleting::promise_self_deleting(promise_self_deleting&& other) - : m_task_container_size(std::exchange(other.m_task_container_size, nullptr)) + : m_executor_size(std::exchange(other.m_executor_size, nullptr)) { } @@ -23,7 +22,7 @@ auto promise_self_deleting::operator=(promise_self_deleting&& other) -> promise_ { if (std::addressof(other) != nullptr) { - m_task_container_size = std::exchange(other.m_task_container_size, nullptr); + m_executor_size = std::exchange(other.m_executor_size, nullptr); } return *this; @@ -42,9 +41,9 @@ auto promise_self_deleting::initial_suspend() -> std::suspend_always auto promise_self_deleting::final_suspend() noexcept -> std::suspend_never { // Notify the task_container that this coroutine has completed. - if (m_task_container_size != nullptr) + if (m_executor_size != nullptr) { - m_task_container_size->fetch_sub(1); + m_executor_size->fetch_sub(1, std::memory_order::release); } // By not suspending this lets the coroutine destroy itself. @@ -61,9 +60,9 @@ auto promise_self_deleting::unhandled_exception() -> void // The user cannot access the promise anyways, ignore the exception. } -auto promise_self_deleting::task_container_size(std::atomic& task_container_size) -> void +auto promise_self_deleting::executor_size(std::atomic& executor_size) -> void { - m_task_container_size = &task_container_size; + m_executor_size = &executor_size; } task_self_deleting::task_self_deleting(promise_self_deleting& promise) : m_promise(&promise) @@ -88,4 +87,10 @@ auto task_self_deleting::operator=(task_self_deleting&& other) -> task_self_dele return *this; } +auto make_task_self_deleting(coro::task user_task) -> task_self_deleting +{ + co_await user_task; + co_return; +} + } // namespace coro::detail diff --git a/src/io_scheduler.cpp b/src/io_scheduler.cpp index 7c1cb370..78475b69 100644 --- a/src/io_scheduler.cpp +++ b/src/io_scheduler.cpp @@ -1,4 +1,5 @@ #include "coro/io_scheduler.hpp" +#include "coro/detail/task_self_deleting.hpp" #include #include @@ -28,9 +29,6 @@ auto io_scheduler::make_shared(options opts) -> std::shared_ptr { auto s = std::make_shared(std::move(opts), private_constructor{}); - // std::enable_shared_from_this cannot be used until the object is fully created. - s->m_owned_tasks = new coro::task_container(s->shared_from_this()); - if (opts.execution_strategy == execution_strategy_t::process_tasks_on_thread_pool) { s->m_thread_pool = std::make_unique(std::move(s->m_opts.pool)); @@ -81,12 +79,6 @@ io_scheduler::~io_scheduler() close(m_schedule_fd); m_schedule_fd = -1; } - - if (m_owned_tasks != nullptr) - { - delete static_cast*>(m_owned_tasks); - m_owned_tasks = nullptr; - } } auto io_scheduler::process_events(std::chrono::milliseconds timeout) -> std::size_t @@ -95,10 +87,12 @@ auto io_scheduler::process_events(std::chrono::milliseconds timeout) -> std::siz return size(); } -auto io_scheduler::schedule(coro::task&& task) -> bool +auto io_scheduler::spawn(coro::task&& task) -> bool { - auto* ptr = static_cast*>(m_owned_tasks); - return ptr->start(std::move(task)); + m_size.fetch_add(1, std::memory_order::release); + auto owned_task = detail::make_task_self_deleting(std::move(task)); + owned_task.promise().executor_size(m_size); + return resume(owned_task.handle()); } auto io_scheduler::schedule_after(std::chrono::milliseconds amount) -> coro::task diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index 1e6dbe57..f15b62cf 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -1,4 +1,5 @@ #include "coro/thread_pool.hpp" +#include "coro/detail/task_self_deleting.hpp" namespace coro { @@ -45,6 +46,14 @@ auto thread_pool::schedule() -> operation } } +auto thread_pool::spawn(coro::task&& task) noexcept -> bool +{ + m_size.fetch_add(1, std::memory_order::release); + auto wrapper_task = detail::make_task_self_deleting(std::move(task)); + wrapper_task.promise().executor_size(m_size); + return resume(wrapper_task.handle()); +} + auto thread_pool::resume(std::coroutine_handle<> handle) noexcept -> bool { if (handle == nullptr || handle.done()) diff --git a/test/bench.cpp b/test/bench.cpp index 51468ffa..c77c5036 100644 --- a/test/bench.cpp +++ b/test/bench.cpp @@ -431,7 +431,7 @@ TEST_CASE("benchmark tcp::server echo server thread pool", "[benchmark]") if (c.socket().is_valid()) { accepted.fetch_add(1, std::memory_order::release); - server_scheduler->schedule(make_on_connection_task(std::move(c), wait_for_clients)); + server_scheduler->spawn(make_on_connection_task(std::move(c), wait_for_clients)); } } } @@ -625,7 +625,7 @@ TEST_CASE("benchmark tcp::server echo server inline", "[benchmark]") accepted.fetch_add(1, std::memory_order::release); s.live_clients++; - s.scheduler->schedule(make_on_connection_task(s, std::move(c))); + s.scheduler->spawn(make_on_connection_task(s, std::move(c))); } } } @@ -847,7 +847,7 @@ TEST_CASE("benchmark tls::server echo server thread pool", "[benchmark]") if (c.socket().is_valid()) { accepted.fetch_add(1, std::memory_order::release); - server_scheduler->schedule(make_on_connection_task(std::move(c), wait_for_clients)); + server_scheduler->spawn(make_on_connection_task(std::move(c), wait_for_clients)); } } } diff --git a/test/net/test_tcp_server.cpp b/test/net/test_tcp_server.cpp index 2391d1e8..44b3097e 100644 --- a/test/net/test_tcp_server.cpp +++ b/test/net/test_tcp_server.cpp @@ -119,7 +119,7 @@ TEST_CASE("tcp_server concurrent polling on the same socket", "[tcp_server]") // make a copy so we can poll twice at the same time in different coroutines auto write_client = read_client; - scheduler->schedule(make_read_task(std::move(read_client))); + scheduler->spawn(make_read_task(std::move(read_client))); // Make sure the read op has completed. co_await scheduler->yield_for(500ms); diff --git a/test/test_io_scheduler.cpp b/test/test_io_scheduler.cpp index e1c015b1..e4248f3e 100644 --- a/test/test_io_scheduler.cpp +++ b/test/test_io_scheduler.cpp @@ -778,8 +778,10 @@ TEST_CASE("io_scheduler task throws after resume", "[io_scheduler]") REQUIRE_THROWS(coro::sync_wait(make_thrower(s))); } -TEST_CASE("issue-287", "[io_scheduler]") +TEST_CASE("coro::io_scheduler::spawn", "[io_scheduler]") { + // issue-287 + const int ITERATIONS = 200000; std::atomic g_count = 0; @@ -794,7 +796,7 @@ TEST_CASE("issue-287", "[io_scheduler]") for (int i = 0; i < ITERATIONS; ++i) { - REQUIRE(scheduler->schedule(task(g_count))); + REQUIRE(scheduler->spawn(task(g_count))); } scheduler->shutdown(); @@ -802,3 +804,24 @@ TEST_CASE("issue-287", "[io_scheduler]") std::cerr << "g_count = \t" << g_count.load() << std::endl; REQUIRE(g_count.load() == ITERATIONS); } + +TEST_CASE("io_scheduler::schedule(task)", "[thread_pool]") +{ + auto scheduler = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); + uint64_t counter{0}; + std::thread::id coroutine_tid; + + auto make_task = [](uint64_t value, std::thread::id& coroutine_id) -> coro::task + { + coroutine_id = std::this_thread::get_id(); + co_return value; + }; + + auto main_tid = std::this_thread::get_id(); + + counter += coro::sync_wait(scheduler->schedule(make_task(53, coroutine_tid))); + + REQUIRE(counter == 53); + REQUIRE(main_tid != coroutine_tid); +} diff --git a/test/test_ring_buffer.cpp b/test/test_ring_buffer.cpp index 374ffd5c..d9ed98d3 100644 --- a/test/test_ring_buffer.cpp +++ b/test/test_ring_buffer.cpp @@ -3,6 +3,7 @@ #include #include +#include #include TEST_CASE("ring_buffer single element", "[ring_buffer]") diff --git a/test/test_shared_mutex.cpp b/test/test_shared_mutex.cpp index f9d87e4f..2a13cb2c 100644 --- a/test/test_shared_mutex.cpp +++ b/test/test_shared_mutex.cpp @@ -3,6 +3,7 @@ #include #include +#include #include TEST_CASE("mutex single waiter not locked exclusive", "[shared_mutex]") diff --git a/test/test_thread_pool.cpp b/test/test_thread_pool.cpp index 692d5303..1ec8da54 100644 --- a/test/test_thread_pool.cpp +++ b/test/test_thread_pool.cpp @@ -137,35 +137,6 @@ TEST_CASE("thread_pool shutdown", "[thread_pool]") REQUIRE(coro::sync_wait(f(tp)) == true); } -TEST_CASE("thread_pool schedule functor", "[thread_pool]") -{ - coro::thread_pool tp{coro::thread_pool::options{1}}; - - auto f = []() -> uint64_t { return 1; }; - - auto result = coro::sync_wait(tp.schedule(f)); - REQUIRE(result == 1); - - tp.shutdown(); - - REQUIRE_THROWS(coro::sync_wait(tp.schedule(f))); -} - -TEST_CASE("thread_pool schedule functor return_type = void", "[thread_pool]") -{ - coro::thread_pool tp{coro::thread_pool::options{1}}; - - std::atomic counter{0}; - auto f = [](std::atomic& c) -> void { c++; }; - - coro::sync_wait(tp.schedule(f, std::ref(counter))); - REQUIRE(counter == 1); - - tp.shutdown(); - - REQUIRE_THROWS(coro::sync_wait(tp.schedule(f, std::ref(counter)))); -} - TEST_CASE("thread_pool event jump threads", "[thread_pool]") { // This test verifies that the thread that sets the event ends up executing every waiter on the event @@ -239,7 +210,6 @@ TEST_CASE("issue-287", "[thread_pool]") std::atomic g_count = 0; auto thread_pool = std::make_shared(coro::thread_pool::options{.thread_count = 1}); - auto task_container = coro::task_container{thread_pool}; auto task = [](std::atomic& count) -> coro::task { @@ -249,7 +219,7 @@ TEST_CASE("issue-287", "[thread_pool]") for (int i = 0; i < ITERATIONS; ++i) { - REQUIRE(task_container.start(task(g_count))); + REQUIRE(thread_pool->spawn(task(g_count))); } thread_pool->shutdown(); @@ -257,3 +227,43 @@ TEST_CASE("issue-287", "[thread_pool]") std::cerr << "g_count = \t" << g_count.load() << std::endl; REQUIRE(g_count.load() == ITERATIONS); } + +TEST_CASE("thread_pool::spawn", "[thread_pool]") +{ + coro::thread_pool tp{coro::thread_pool::options{.thread_count = 2}}; + std::atomic counter{0}; + + auto make_task = [](std::atomic& counter, uint64_t amount) -> coro::task + { + counter += amount; + co_return; + }; + + REQUIRE(tp.spawn(make_task(counter, 1))); + REQUIRE(tp.spawn(make_task(counter, 2))); + REQUIRE(tp.spawn(make_task(counter, 3))); + + tp.shutdown(); + + REQUIRE(counter == 6); +} + +TEST_CASE("thread_pool::schedule(task)", "[thread_pool]") +{ + coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}}; + uint64_t counter{0}; + std::thread::id coroutine_tid; + + auto make_task = [](uint64_t value, std::thread::id& coroutine_id) -> coro::task + { + coroutine_id = std::this_thread::get_id(); + co_return value; + }; + + auto main_tid = std::this_thread::get_id(); + + counter += coro::sync_wait(tp.schedule(make_task(53, coroutine_tid))); + + REQUIRE(counter == 53); + REQUIRE(main_tid != coroutine_tid); +} From da10d37bbfba1cfc27a398f2b02970545903b1cb Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Mon, 17 Feb 2025 09:46:30 -0700 Subject: [PATCH 16/24] coro::when_any (#298) Adds a new construct that will return the first task's result upon its completion. All other task results will be discarded/detached/orphaned. There is two ways to currently invoke when_any, one with a std::stop_token that will signal to the other tasks that a task has already completed, this requires the user to check for that stop token requesting a stop, it is not automatic. The other way is fire and forget, all tasks will be required to complete but only the first tasks result will be used. This method isn't particularly recommended but the API is available in the case where a stop token isn't required. EMSCRIPTEN does not support std::stop_source|token so this new feature is currently disabled on that platform, I do not want to shim it in. Closes #279 --- .githooks/pre-commit | 4 ++ .githooks/readme-template.md | 18 ++++- CMakeLists.txt | 1 + README.md | 65 +++++++++++++++++- examples/CMakeLists.txt | 6 ++ examples/coro_when_any.cpp | 48 +++++++++++++ include/coro/coro.hpp | 1 + include/coro/when_all.hpp | 4 +- include/coro/when_any.hpp | 122 +++++++++++++++++++++++++++++++++ test/CMakeLists.txt | 6 ++ test/test_when_any.cpp | 128 +++++++++++++++++++++++++++++++++++ 11 files changed, 399 insertions(+), 4 deletions(-) create mode 100644 examples/coro_when_any.cpp create mode 100644 include/coro/when_any.hpp create mode 100644 test/test_when_any.cpp diff --git a/.githooks/pre-commit b/.githooks/pre-commit index c6ef205b..888ec379 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -82,4 +82,8 @@ template_contents=$(cat 'README.md') example_contents=$(cat 'examples/coro_when_all.cpp') echo "${template_contents/\$\{EXAMPLE_CORO_WHEN_ALL\}/$example_contents}" > README.md +template_contents=$(cat 'README.md') +example_contents=$(cat 'examples/coro_when_any.cpp') +echo "${template_contents/\$\{EXAMPLE_CORO_WHEN_ANY\}/$example_contents}" > README.md + git add README.md diff --git a/.githooks/readme-template.md b/.githooks/readme-template.md index 2246dbbf..8a2bb39a 100644 --- a/.githooks/readme-template.md +++ b/.githooks/readme-template.md @@ -16,6 +16,7 @@ * Higher level coroutine constructs - [coro::sync_wait(awaitable)](#sync_wait) - [coro::when_all(awaitable...) -> awaitable](#when_all) + - [coro::when_any(awaitable...) -> awaitable](#when_any) - [coro::task](#task) - [coro::generator](#generator) - [coro::event](#event) @@ -70,7 +71,7 @@ Offload Result = 20 ``` ### when_all -The `when_all` construct can be used within coroutines to await a set of tasks, or it can be used outside coroutinne context in conjunction with `sync_wait` to await multiple tasks. Each task passed into `when_all` will initially be executed serially by the calling thread so it is recommended to offload the tasks onto a scheduler like `coro::thread_pool` or `coro::io_scheduler` so they can execute in parallel. +The `when_all` construct can be used within coroutines to await a set of tasks, or it can be used outside coroutine context in conjunction with `sync_wait` to await multiple tasks. Each task passed into `when_all` will initially be executed serially by the calling thread so it is recommended to offload the tasks onto an executor like `coro::thread_pool` or `coro::io_scheduler` so they can execute in parallel. ```C++ ${EXAMPLE_CORO_WHEN_ALL} @@ -87,6 +88,21 @@ $ ./examples/coro_when_all first: 1.21 second: 20 ``` +### when_any +The `when_any` construct can be used within coroutines to await a set of tasks and only return the result of the first task that completes. This can also be used outside of a coroutine context in conjunction with `sync_wait` to await the first result. Each task passed into `when_any` will initially be executed serially by the calling thread so it is recommended to offload the tasks onto an executor like `coro::thread_pool` or `coro::io_scheduler` so they can execute in parallel. + +```C++ +${EXAMPLE_CORO_WHEN_ANY} +``` + +Expected output: +```bash +$ ./examples/coro_when_any +result = 1 +result = -1 +``` + + ### task The `coro::task` is the main coroutine building block within `libcoro`. Use task to create your coroutines and `co_await` or `co_yield` tasks within tasks to perform asynchronous operations, lazily evaluation or even spreading work out across a `coro::thread_pool`. Tasks are lightweight and only begin execution upon awaiting them. diff --git a/CMakeLists.txt b/CMakeLists.txt index 7b200f67..bdf9df8c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -93,6 +93,7 @@ set(LIBCORO_SOURCE_FILES include/coro/thread_pool.hpp src/thread_pool.cpp include/coro/time.hpp include/coro/when_all.hpp + include/coro/when_any.hpp ) if(LIBCORO_FEATURE_NETWORKING) diff --git a/README.md b/README.md index 6eefa6e1..3d988823 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ * Higher level coroutine constructs - [coro::sync_wait(awaitable)](#sync_wait) - [coro::when_all(awaitable...) -> awaitable](#when_all) + - [coro::when_any(awaitable...) -> awaitable](#when_any) - [coro::task](#task) - [coro::generator](#generator) - [coro::event](#event) @@ -101,7 +102,7 @@ Offload Result = 20 ``` ### when_all -The `when_all` construct can be used within coroutines to await a set of tasks, or it can be used outside coroutinne context in conjunction with `sync_wait` to await multiple tasks. Each task passed into `when_all` will initially be executed serially by the calling thread so it is recommended to offload the tasks onto a scheduler like `coro::thread_pool` or `coro::io_scheduler` so they can execute in parallel. +The `when_all` construct can be used within coroutines to await a set of tasks, or it can be used outside coroutine context in conjunction with `sync_wait` to await multiple tasks. Each task passed into `when_all` will initially be executed serially by the calling thread so it is recommended to offload the tasks onto an executor like `coro::thread_pool` or `coro::io_scheduler` so they can execute in parallel. ```C++ #include @@ -169,6 +170,68 @@ $ ./examples/coro_when_all first: 1.21 second: 20 ``` +### when_any +The `when_any` construct can be used within coroutines to await a set of tasks and only return the result of the first task that completes. This can also be used outside of a coroutine context in conjunction with `sync_wait` to await the first result. Each task passed into `when_any` will initially be executed serially by the calling thread so it is recommended to offload the tasks onto an executor like `coro::thread_pool` or `coro::io_scheduler` so they can execute in parallel. + +```C++ +#include +#include + +int main() +{ + // Create a scheduler to execute all tasks in parallel and also so we can + // suspend a task to act like a timeout event. + auto scheduler = coro::io_scheduler::make_shared(); + + // This task will behave like a long running task and will produce a valid result. + auto make_long_running_task = [](std::shared_ptr scheduler, + std::chrono::milliseconds execution_time) -> coro::task + { + // Schedule the task to execute in parallel. + co_await scheduler->schedule(); + // Fake doing some work... + co_await scheduler->yield_for(execution_time); + // Return the result. + co_return 1; + }; + + auto make_timeout_task = [](std::shared_ptr scheduler) -> coro::task + { + // Schedule a timer to be fired so we know the task timed out. + co_await scheduler->schedule_after(std::chrono::milliseconds{100}); + co_return -1; + }; + + // Example showing the long running task completing first. + { + std::vector> tasks{}; + tasks.emplace_back(make_long_running_task(scheduler, std::chrono::milliseconds{50})); + tasks.emplace_back(make_timeout_task(scheduler)); + + auto result = coro::sync_wait(coro::when_any(std::move(tasks))); + std::cout << "result = " << result << "\n"; + } + + // Example showing the long running task timing out. + { + std::vector> tasks{}; + tasks.emplace_back(make_long_running_task(scheduler, std::chrono::milliseconds{500})); + tasks.emplace_back(make_timeout_task(scheduler)); + + auto result = coro::sync_wait(coro::when_any(std::move(tasks))); + std::cout << "result = " << result << "\n"; + } +} +``` + +Expected output: +```bash +$ ./examples/coro_when_any +result = 1 +result = -1 +``` + + ### task The `coro::task` is the main coroutine building block within `libcoro`. Use task to create your coroutines and `co_await` or `co_yield` tasks within tasks to perform asynchronous operations, lazily evaluation or even spreading work out across a `coro::thread_pool`. Tasks are lightweight and only begin execution upon awaiting them. diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 2abd9b19..03502791 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -69,4 +69,10 @@ if(LIBCORO_FEATURE_NETWORKING) add_executable(coro_http_200_ok_server coro_http_200_ok_server.cpp) target_link_libraries(coro_http_200_ok_server PUBLIC libcoro) target_compile_options(coro_http_200_ok_server PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) + + if(NOT EMSCRIPTEN) + add_executable(coro_when_any coro_when_any.cpp) + target_link_libraries(coro_when_any PUBLIC libcoro) + target_compile_options(coro_when_any PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) + endif() endif() diff --git a/examples/coro_when_any.cpp b/examples/coro_when_any.cpp new file mode 100644 index 00000000..7c2c32f6 --- /dev/null +++ b/examples/coro_when_any.cpp @@ -0,0 +1,48 @@ +#include +#include + +int main() +{ + // Create a scheduler to execute all tasks in parallel and also so we can + // suspend a task to act like a timeout event. + auto scheduler = coro::io_scheduler::make_shared(); + + // This task will behave like a long running task and will produce a valid result. + auto make_long_running_task = [](std::shared_ptr scheduler, + std::chrono::milliseconds execution_time) -> coro::task + { + // Schedule the task to execute in parallel. + co_await scheduler->schedule(); + // Fake doing some work... + co_await scheduler->yield_for(execution_time); + // Return the result. + co_return 1; + }; + + auto make_timeout_task = [](std::shared_ptr scheduler) -> coro::task + { + // Schedule a timer to be fired so we know the task timed out. + co_await scheduler->schedule_after(std::chrono::milliseconds{100}); + co_return -1; + }; + + // Example showing the long running task completing first. + { + std::vector> tasks{}; + tasks.emplace_back(make_long_running_task(scheduler, std::chrono::milliseconds{50})); + tasks.emplace_back(make_timeout_task(scheduler)); + + auto result = coro::sync_wait(coro::when_any(std::move(tasks))); + std::cout << "result = " << result << "\n"; + } + + // Example showing the long running task timing out. + { + std::vector> tasks{}; + tasks.emplace_back(make_long_running_task(scheduler, std::chrono::milliseconds{500})); + tasks.emplace_back(make_timeout_task(scheduler)); + + auto result = coro::sync_wait(coro::when_any(std::move(tasks))); + std::cout << "result = " << result << "\n"; + } +} diff --git a/include/coro/coro.hpp b/include/coro/coro.hpp index 0984a23b..82dc75d1 100644 --- a/include/coro/coro.hpp +++ b/include/coro/coro.hpp @@ -41,3 +41,4 @@ #include "coro/thread_pool.hpp" #include "coro/time.hpp" #include "coro/when_all.hpp" +#include "coro/when_any.hpp" diff --git a/include/coro/when_all.hpp b/include/coro/when_all.hpp index 93a26a76..5861db7a 100644 --- a/include/coro/when_all.hpp +++ b/include/coro/when_all.hpp @@ -180,12 +180,12 @@ class when_all_ready_awaitable when_all_ready_awaitable(when_all_ready_awaitable&& other) noexcept( std::is_nothrow_move_constructible_v) : m_latch(std::move(other.m_latch)), - m_tasks(std::move(m_tasks)) + m_tasks(std::move(other.m_tasks)) { } auto operator=(const when_all_ready_awaitable&) -> when_all_ready_awaitable& = delete; - auto operator=(when_all_ready_awaitable&) -> when_all_ready_awaitable& = delete; + auto operator=(when_all_ready_awaitable&&) -> when_all_ready_awaitable& = delete; auto operator co_await() & noexcept { diff --git a/include/coro/when_any.hpp b/include/coro/when_any.hpp new file mode 100644 index 00000000..cd35c032 --- /dev/null +++ b/include/coro/when_any.hpp @@ -0,0 +1,122 @@ +#pragma once + +// EMSCRIPTEN does not currently support std::jthread or std::stop_source|token. +#ifndef EMSCRIPTEN + + #include "coro/concepts/awaitable.hpp" + #include "coro/detail/task_self_deleting.hpp" + #include "coro/event.hpp" + #include "coro/mutex.hpp" + #include "coro/task.hpp" + + #include + #include + #include + #include + #include + #include + +namespace coro +{ + +namespace detail +{ + +template +static auto make_when_any_task( + awaitable a, + coro::mutex& m, + std::atomic& return_value_set, + coro::event& notify, + std::optional& return_value) -> coro::task +{ + auto result = co_await static_cast(a); + co_await m.lock(); + // Its important to only touch return_value and notify once since their lifetimes will be destroyed + // after being set ane notified the first time. + if (return_value_set.load(std::memory_order::acquire) == false) + { + return_value_set.store(true, std::memory_order::release); + return_value = std::move(result); + notify.set(); + } + + co_return; +} + +template< + std::ranges::range range_type, + concepts::awaitable awaitable_type = std::ranges::range_value_t, + typename return_type = typename concepts::awaitable_traits::awaiter_return_type, + typename return_type_base = std::remove_reference_t> +static auto make_when_any_controller_task( + range_type awaitables, coro::event& notify, std::optional& return_value) + -> coro::detail::task_self_deleting +{ + // These must live for as long as the longest running when_any task since each task tries to see + // if it was the first to complete. Only the very first task to complete will set the return_value + // and notify. + coro::mutex m{}; + std::atomic return_value_set{false}; + + // This detatched task will maintain the lifetime of all the when_any tasks. + std::vector> tasks{}; + + if constexpr (std::ranges::sized_range) + { + tasks.reserve(std::size(awaitables)); + } + + for (auto&& a : awaitables) + { + tasks.emplace_back(make_when_any_task( + std::move(a), m, return_value_set, notify, return_value)); + } + + co_await coro::when_all(std::move(tasks)); + co_return; +} + +} // namespace detail + +template< + std::ranges::range range_type, + concepts::awaitable awaitable_type = std::ranges::range_value_t, + typename return_type = typename concepts::awaitable_traits::awaiter_return_type, + typename return_type_base = std::remove_reference_t> +[[nodiscard]] auto when_any(std::stop_source stop_source, range_type awaitables) -> coro::task +{ + // Using an std::optional to prevent the need to default construct the type on the stack. + std::optional return_value{std::nullopt}; + coro::event notify{}; + + auto controller_task = + detail::make_when_any_controller_task(std::forward(awaitables), notify, return_value); + controller_task.handle().resume(); + + co_await notify; + stop_source.request_stop(); + co_return std::move(return_value.value()); +} + +template< + std::ranges::range range_type, + concepts::awaitable awaitable_type = std::ranges::range_value_t, + typename return_type = typename concepts::awaitable_traits::awaiter_return_type, + typename return_type_base = std::remove_reference_t> +[[nodiscard]] auto when_any(range_type awaitables) -> coro::task +{ + std::optional return_value{std::nullopt}; + coro::event notify{}; + + auto controller_task = + detail::make_when_any_controller_task(std::forward(awaitables), notify, return_value); + controller_task.handle().resume(); + + co_await notify; + co_return std::move(return_value.value()); +} + +} // namespace coro + +#endif // EMSCRIPTEN diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 45a96f13..854270d7 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -17,6 +17,12 @@ set(LIBCORO_TEST_SOURCE_FILES catch_amalgamated.cpp ) +if(NOT EMSCRIPTEN) + list(APPEND LIBCORO_TEST_SOURCE_FILES + test_when_any.cpp + ) +endif() + if(LIBCORO_FEATURE_NETWORKING) list(APPEND LIBCORO_TEST_SOURCE_FILES net/test_ip_address.cpp diff --git a/test/test_when_any.cpp b/test/test_when_any.cpp new file mode 100644 index 00000000..c3a9248b --- /dev/null +++ b/test/test_when_any.cpp @@ -0,0 +1,128 @@ +#include "catch_amalgamated.hpp" + +#include +#include +#include + +TEST_CASE("when_any two tasks", "[when_any]") +{ + auto make_task = [](uint64_t amount) -> coro::task { co_return amount; }; + + std::vector> tasks{}; + tasks.emplace_back(make_task(1)); + tasks.emplace_back(make_task(2)); + + auto result = coro::sync_wait(coro::when_any(std::move(tasks))); + REQUIRE(result == 1); +} + +#ifdef LIBCORO_FEATURE_NETWORKING + +TEST_CASE("when_any two tasks one long running", "[when_any]") +{ + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); + + auto make_task = [](std::shared_ptr s, uint64_t amount) -> coro::task + { + co_await s->schedule(); + if (amount == 1) + { + co_await s->yield_for(std::chrono::milliseconds{100}); + } + co_return amount; + }; + + std::vector> tasks{}; + tasks.emplace_back(make_task(s, 1)); + tasks.emplace_back(make_task(s, 2)); + + auto result = coro::sync_wait(coro::when_any(std::move(tasks))); + REQUIRE(result == 2); + + std::this_thread::sleep_for(std::chrono::milliseconds{250}); +} + +TEST_CASE("when_any two tasks one long running with cancellation", "[when_any]") +{ + std::stop_source stop_source{}; + auto s = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); + + auto make_task = + [](std::shared_ptr s, std::stop_token stop_token, uint64_t amount) -> coro::task + { + co_await s->schedule(); + try + { + if (amount == 1) + { + std::cerr << "yielding with amount=" << amount << "\n"; + co_await s->yield_for(std::chrono::milliseconds{100}); + if (stop_token.stop_requested()) + { + std::cerr << "throwing\n"; + throw std::runtime_error{"task was cancelled"}; + } + else + { + std::cerr << "not throwing\n"; + } + } + } + catch (const std::exception& e) + { + REQUIRE(amount == 1); + REQUIRE(e.what() == std::string{"task was cancelled"}); + } + co_return amount; + }; + + std::vector> tasks{}; + tasks.emplace_back(make_task(s, stop_source.get_token(), 1)); + tasks.emplace_back(make_task(s, stop_source.get_token(), 2)); + + auto result = coro::sync_wait(coro::when_any(std::move(stop_source), std::move(tasks))); + REQUIRE(result == 2); + + std::this_thread::sleep_for(std::chrono::milliseconds{250}); +} + +TEST_CASE("when_any timeout", "[when_any]") +{ + auto scheduler = coro::io_scheduler::make_shared(); + + auto make_long_running_task = [](std::shared_ptr scheduler, + std::chrono::milliseconds execution_time) -> coro::task + { + co_await scheduler->schedule(); + co_await scheduler->yield_for(execution_time); + co_return 1; + }; + + auto make_timeout_task = [](std::shared_ptr scheduler) -> coro::task + { + co_await scheduler->schedule_after(std::chrono::milliseconds{100}); + co_return -1; + }; + + { + std::vector> tasks{}; + tasks.emplace_back(make_long_running_task(scheduler, std::chrono::milliseconds{50})); + tasks.emplace_back(make_timeout_task(scheduler)); + + auto result = coro::sync_wait(coro::when_any(std::move(tasks))); + REQUIRE(result == 1); + } + + { + std::vector> tasks{}; + tasks.emplace_back(make_long_running_task(scheduler, std::chrono::milliseconds{500})); + tasks.emplace_back(make_timeout_task(scheduler)); + + auto result = coro::sync_wait(coro::when_any(std::move(tasks))); + REQUIRE(result == -1); + } +} + +#endif From 8658123dc47c82d0af787f07f4b1078aa8309dea Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Mon, 17 Feb 2025 16:35:21 -0700 Subject: [PATCH 17/24] coro::when_any support tuples (different return types!) (#301) when_any now supports taking a parameter pack of tasks that can all return unique types, the most useful use of this right now is by using when_any to have a task and a timeout. To facilitate this usage coro::io_scheduler has a new schedule function: schedule(stop_token, task, timeout) Closes #300 --- .githooks/readme-template.md | 1 + README.md | 1 + include/coro/io_scheduler.hpp | 89 ++++++++++++++++++++++++- include/coro/when_any.hpp | 70 ++++++++++++++++++++ src/net/tls/client.cpp | 2 +- test/test_when_any.cpp | 121 +++++++++++++++++++++++++++++++++- 6 files changed, 281 insertions(+), 3 deletions(-) diff --git a/.githooks/readme-template.md b/.githooks/readme-template.md index 8a2bb39a..be91f4db 100644 --- a/.githooks/readme-template.md +++ b/.githooks/readme-template.md @@ -318,6 +318,7 @@ The `coro::io_scheduler` can use a dedicated spawned thread for processing event * `coro::io_scheduler::schedule()` Use `co_await` on this method inside a coroutine to transfer the tasks execution to the `coro::io_scheduler`. * `coro::io_scheduler::spawn(coro::task)` Spawns the task to be detached and owned by the `coro::io_scheduler`, use this if you want to fire and forget the task, the `coro::io_scheduler` will maintain the task's lifetime. * `coro::io_scheduler::schedule(coro::task task) -> coro::task` schedules the task on the `coro::io_scheduler` and then returns the result in a task that must be awaited. This is useful if you want to schedule work on the `coro::io_scheduler` and want to wait for the result. +* `coro::io_scheduler::schedule(std::stop_source st, coro::task task, std::chrono::duration timeout) -> coro::expected` schedules the task on the `coro::io_scheduler` and then returns the result in a task that must be awaited. That task will then either return the completed task's value if it completes before the timeout, or a return value denoted the task timed out. If the task times out the `std::stop_source.request_stop()` will be invoked so the task can check for it and stop executing. This must be done by the user, the `coro::io_scheduler` cannot stop the execution of the task but it is able through the `std::stop_source` to signal to the task it should stop executing. * `coro::io_scheduler::scheduler_after(std::chrono::milliseconds amount)` schedules the current task to be rescheduled after a specified amount of time has passed. * `coro::io_scheduler::schedule_at(std::chrono::steady_clock::time_point time)` schedules the current task to be rescheduled at the specified timepoint. * `coro::io_scheduler::yield()` will yield execution of the current task and resume after other tasks have had a chance to execute. This effectively places the task at the back of the queue of waiting tasks. diff --git a/README.md b/README.md index 3d988823..4f69a9bf 100644 --- a/README.md +++ b/README.md @@ -928,6 +928,7 @@ The `coro::io_scheduler` can use a dedicated spawned thread for processing event * `coro::io_scheduler::schedule()` Use `co_await` on this method inside a coroutine to transfer the tasks execution to the `coro::io_scheduler`. * `coro::io_scheduler::spawn(coro::task)` Spawns the task to be detached and owned by the `coro::io_scheduler`, use this if you want to fire and forget the task, the `coro::io_scheduler` will maintain the task's lifetime. * `coro::io_scheduler::schedule(coro::task task) -> coro::task` schedules the task on the `coro::io_scheduler` and then returns the result in a task that must be awaited. This is useful if you want to schedule work on the `coro::io_scheduler` and want to wait for the result. +* `coro::io_scheduler::schedule(std::stop_source st, coro::task task, std::chrono::duration timeout) -> coro::expected` schedules the task on the `coro::io_scheduler` and then returns the result in a task that must be awaited. That task will then either return the completed task's value if it completes before the timeout, or a return value denoted the task timed out. If the task times out the `std::stop_source.request_stop()` will be invoked so the task can check for it and stop executing. This must be done by the user, the `coro::io_scheduler` cannot stop the execution of the task but it is able through the `std::stop_source` to signal to the task it should stop executing. * `coro::io_scheduler::scheduler_after(std::chrono::milliseconds amount)` schedules the current task to be rescheduled after a specified amount of time has passed. * `coro::io_scheduler::schedule_at(std::chrono::steady_clock::time_point time)` schedules the current task to be rescheduled at the specified timepoint. * `coro::io_scheduler::yield()` will yield execution of the current task and resume after other tasks have had a chance to execute. This effectively places the task at the back of the queue of waiting tasks. diff --git a/include/coro/io_scheduler.hpp b/include/coro/io_scheduler.hpp index 4cdae2d3..3b531480 100644 --- a/include/coro/io_scheduler.hpp +++ b/include/coro/io_scheduler.hpp @@ -1,6 +1,7 @@ #pragma once #include "coro/detail/poll_info.hpp" +#include "coro/expected.hpp" #include "coro/fd.hpp" #include "coro/poll.hpp" #include "coro/thread_pool.hpp" @@ -20,6 +21,12 @@ namespace coro { +enum timeout_status +{ + no_timeout, + timeout, +}; + class io_scheduler : public std::enable_shared_from_this { using timed_events = detail::poll_info::timed_events; @@ -195,6 +202,77 @@ class io_scheduler : public std::enable_shared_from_this co_return co_await task; } + /** + * Schedules a task on the io_scheduler that must complete within the given timeout. + * NOTE: This version of schedule does *NOT* cancel the given task, it will continue executing even if it times out. + * It is absolutely recommended to use the version of this schedule() function that takes an std::stop_token + * and have the scheduled task check to see if its been cancelled due to timeout to not waste resources. + * @tparam return_type The return value of the task. + * @param task The task to schedule on the io_scheduler with the given timeout. + * @param timeout How long should this task be given to complete before it times out? + * @return The task to await for the input task to complete. + */ + template + [[nodiscard]] auto schedule(coro::task task, std::chrono::duration timeout) + -> coro::task> + { + using namespace std::chrono_literals; + + // If negative or 0 timeout, just schedule the task as normal. + auto timeout_ms = std::max(std::chrono::duration_cast(timeout), 0ms); + if (timeout_ms == 0ms) + { + co_return coro::expected(co_await schedule(std::move(task))); + } + + auto result = co_await when_any(std::move(task), make_timeout_task(timeout_ms)); + if (!std::holds_alternative(result)) + { + co_return coro::expected(std::move(std::get<0>(result))); + } + else + { + co_return coro::unexpected(std::move(std::get<1>(result))); + } + } + +#ifndef EMSCRIPTEN + /** + * Schedules a task on the io_scheduler that must complete within the given timeout. + * NOTE: This version of the task will have the stop_source.request_stop() be called if the timeout triggers. + * It is up to you to check in the scheduled task if the stop has been requested to actually stop executing + * the task. + * @tparam return_type The return value of the task. + * @param task The task to schedule on the io_scheduler with the given timeout. + * @param timeout How long should this task be given to complete before it times out? + * @return The task to await for the input task to complete. + */ + template + [[nodiscard]] auto + schedule(std::stop_source stop_source, coro::task task, std::chrono::duration timeout) + -> coro::task> + { + using namespace std::chrono_literals; + + // If negative or 0 timeout, just schedule the task as normal. + auto timeout_ms = std::max(std::chrono::duration_cast(timeout), 0ms); + if (timeout_ms == 0ms) + { + co_return coro::expected(co_await schedule(std::move(task))); + } + + auto result = co_await when_any(std::move(stop_source), std::move(task), make_timeout_task(timeout_ms)); + if (!std::holds_alternative(result)) + { + co_return coro::expected(std::move(std::get<0>(result))); + } + else + { + co_return coro::unexpected(std::move(std::get<1>(result))); + } + } +#endif + /** * Schedules the current task to run after the given amount of time has elapsed. * @param amount The amount of time to wait before resuming execution of this task. @@ -212,7 +290,10 @@ class io_scheduler : public std::enable_shared_from_this /** * Yields the current task to the end of the queue of waiting tasks. */ - [[nodiscard]] auto yield() -> schedule_operation { return schedule_operation{*this}; }; + [[nodiscard]] auto yield() -> schedule_operation + { + return schedule_operation{*this}; + }; /** * Yields the current task for the given amount of time. @@ -386,6 +467,12 @@ class io_scheduler : public std::enable_shared_from_this auto add_timer_token(time_point tp, detail::poll_info& pi) -> timed_events::iterator; auto remove_timer_token(timed_events::iterator pos) -> void; auto update_timeout(time_point now) -> void; + + auto make_timeout_task(std::chrono::milliseconds timeout) -> coro::task + { + co_await schedule_after(timeout); + co_return timeout_status::timeout; + } }; } // namespace coro diff --git a/include/coro/when_any.hpp b/include/coro/when_any.hpp index cd35c032..a6c61a8c 100644 --- a/include/coro/when_any.hpp +++ b/include/coro/when_any.hpp @@ -6,6 +6,7 @@ #include "coro/concepts/awaitable.hpp" #include "coro/detail/task_self_deleting.hpp" #include "coro/event.hpp" + #include "coro/expected.hpp" #include "coro/mutex.hpp" #include "coro/task.hpp" @@ -22,6 +23,38 @@ namespace coro namespace detail { +template +auto make_when_any_tuple_task( + coro::mutex& m, + std::atomic& return_value_set, + coro::event& notify, + std::optional& return_value, + awaitable a) -> coro::task +{ + auto result = co_await static_cast(a); + auto scoped_lock = co_await m.lock(); + if (return_value_set.load(std::memory_order::acquire) == false) + { + return_value_set.store(true, std::memory_order::release); + return_value = std::move(result); + notify.set(); + } + + co_return; +} + +template +[[nodiscard]] auto make_when_any_tuple_controller_task( + coro::event& notify, std::optional& return_value, awaitable_type... awaitables) + -> coro::detail::task_self_deleting +{ + coro::mutex m{}; + std::atomic return_value_set{false}; + + co_await when_all(make_when_any_tuple_task(m, return_value_set, notify, return_value, std::move(awaitables))...); + co_return; +} + template static auto make_when_any_task( awaitable a, @@ -79,6 +112,43 @@ static auto make_when_any_controller_task( } // namespace detail +template +[[nodiscard]] auto when_any(std::stop_source stop_source, awaitable_type... awaitables) -> coro::task< + std::variant::awaiter_return_type>...>> +{ + using return_type = std::variant< + std::remove_reference_t::awaiter_return_type>...>; + + std::optional return_value{std::nullopt}; + coro::event notify{}; + + auto controller_task = + detail::make_when_any_tuple_controller_task(notify, return_value, std::forward(awaitables)...); + controller_task.handle().resume(); + + co_await notify; + stop_source.request_stop(); + co_return std::move(return_value.value()); +} + +template +[[nodiscard]] auto when_any(awaitable_type... awaitables) -> coro::task< + std::variant::awaiter_return_type>...>> +{ + using return_type = std::variant< + std::remove_reference_t::awaiter_return_type>...>; + + std::optional return_value{std::nullopt}; + coro::event notify{}; + + auto controller_task = + detail::make_when_any_tuple_controller_task(notify, return_value, std::forward(awaitables)...); + controller_task.handle().resume(); + + co_await notify; + co_return std::move(return_value.value()); +} + template< std::ranges::range range_type, concepts::awaitable awaitable_type = std::ranges::range_value_t, diff --git a/src/net/tls/client.cpp b/src/net/tls/client.cpp index ba5388de..1c67fdf7 100644 --- a/src/net/tls/client.cpp +++ b/src/net/tls/client.cpp @@ -58,7 +58,7 @@ client::~client() if (m_tls_info.m_tls_ptr != nullptr && !m_tls_info.m_tls_error) { // Should the shutdown timeout be configurable? - m_io_scheduler->schedule(tls_shutdown_and_free( + m_io_scheduler->spawn(tls_shutdown_and_free( m_io_scheduler, std::move(m_socket), std::move(m_tls_info.m_tls_ptr), std::chrono::seconds{30})); } } diff --git a/test/test_when_any.cpp b/test/test_when_any.cpp index c3a9248b..ffeb3f0f 100644 --- a/test/test_when_any.cpp +++ b/test/test_when_any.cpp @@ -1,5 +1,6 @@ #include "catch_amalgamated.hpp" +#include #include #include #include @@ -90,7 +91,8 @@ TEST_CASE("when_any two tasks one long running with cancellation", "[when_any]") TEST_CASE("when_any timeout", "[when_any]") { - auto scheduler = coro::io_scheduler::make_shared(); + auto scheduler = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 2}}); auto make_long_running_task = [](std::shared_ptr scheduler, std::chrono::milliseconds execution_time) -> coro::task @@ -125,4 +127,121 @@ TEST_CASE("when_any timeout", "[when_any]") } } +TEST_CASE("when_any io_scheduler::schedule(task, timeout)", "[when_any]") +{ + auto scheduler = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 2}}); + + auto make_task = [](std::shared_ptr scheduler, + std::chrono::milliseconds execution_time) -> coro::task + { + co_await scheduler->yield_for(execution_time); + co_return 1; + }; + + { + auto result = coro::sync_wait( + scheduler->schedule(make_task(scheduler, std::chrono::milliseconds{10}), std::chrono::milliseconds{50})); + REQUIRE(result.has_value()); + REQUIRE(result.value() == 1); + } + + { + auto result = coro::sync_wait( + scheduler->schedule(make_task(scheduler, std::chrono::milliseconds{50}), std::chrono::milliseconds{10})); + REQUIRE_FALSE(result.has_value()); + REQUIRE(result.error() == coro::timeout_status::timeout); + } +} + + #ifndef EMSCRIPTEN +TEST_CASE("when_any io_scheduler::schedule(task, timeout stop_token)", "[when_any]") +{ + auto scheduler = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 2}}); + + auto make_task = [](std::shared_ptr scheduler, + std::chrono::milliseconds execution_time, + std::stop_token stop_token) -> coro::task + { + co_await scheduler->yield_for(execution_time); + if (stop_token.stop_requested()) + { + co_return -1; + } + co_return 1; + }; + + { + std::stop_source stop_source{}; + auto result = coro::sync_wait(scheduler->schedule( + std::move(stop_source), + make_task(scheduler, std::chrono::milliseconds{10}, stop_source.get_token()), + std::chrono::milliseconds{50})); + REQUIRE(result.has_value()); + REQUIRE(result.value() == 1); + } + + { + std::stop_source stop_source{}; + auto result = coro::sync_wait(scheduler->schedule( + std::move(stop_source), + make_task(scheduler, std::chrono::milliseconds{50}, stop_source.get_token()), + std::chrono::milliseconds{10})); + REQUIRE_FALSE(result.has_value()); + REQUIRE(result.error() == coro::timeout_status::timeout); + } +} + #endif + +TEST_CASE("when_any tuple multiple", "[when_any]") +{ + using namespace std::chrono_literals; + + auto scheduler = coro::io_scheduler::make_shared( + coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 4}}); + + auto make_task1 = [](std::shared_ptr scheduler, + std::chrono::milliseconds execution_time) -> coro::task + { + co_await scheduler->schedule_after(execution_time); + co_return 1; + }; + + auto make_task2 = [](std::shared_ptr scheduler, + std::chrono::milliseconds execution_time) -> coro::task + { + co_await scheduler->schedule_after(execution_time); + co_return 3.14; + }; + + auto make_task3 = [](std::shared_ptr scheduler, + std::chrono::milliseconds execution_time) -> coro::task + { + co_await scheduler->schedule_after(execution_time); + co_return std::string{"hello world"}; + }; + + { + auto result = coro::sync_wait( + coro::when_any(make_task1(scheduler, 10ms), make_task2(scheduler, 50ms), make_task3(scheduler, 50ms))); + REQUIRE(result.index() == 0); + REQUIRE(std::get<0>(result) == 1); + } + + { + auto result = coro::sync_wait( + coro::when_any(make_task1(scheduler, 50ms), make_task2(scheduler, 10ms), make_task3(scheduler, 50ms))); + REQUIRE(result.index() == 1); + REQUIRE(std::get<1>(result) == 3.14); + } + + { + auto result = coro::sync_wait( + coro::when_any(make_task1(scheduler, 50ms), make_task2(scheduler, 50ms), make_task3(scheduler, 10ms))); + REQUIRE(result.index() == 2); + REQUIRE(std::get<2>(result) == "hello world"); + } +} + #endif From 5ace33797f4815266648aba077654c5b68baab2b Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Tue, 18 Feb 2025 15:28:24 -0700 Subject: [PATCH 18/24] sync_wait() remove extra move constructor call (#302) * sync_wait() remove extra move constructor call User @baderouaich identified that libcoro's sync_wait on a complex object would invoke an extra move constructor that other coroutine libraries did not. This change now correctly forwards the return_value into the promise if the object is move constructible instead of double moving. Closes #286 * fix windows build issues --- include/coro/task.hpp | 2 +- test/test_sync_wait.cpp | 81 +++++++++++++++++++++++++++++++++++++++ test/test_thread_pool.cpp | 2 +- 3 files changed, 83 insertions(+), 2 deletions(-) diff --git a/include/coro/task.hpp b/include/coro/task.hpp index d2cd6968..6a736a1c 100644 --- a/include/coro/task.hpp +++ b/include/coro/task.hpp @@ -102,7 +102,7 @@ struct promise final : public promise_base } } - auto return_value(stored_type value) -> void requires(not return_type_is_reference) + auto return_value(stored_type&& value) -> void requires(not return_type_is_reference) { if constexpr (std::is_move_constructible_v) { diff --git a/test/test_sync_wait.cpp b/test/test_sync_wait.cpp index 1a7bcbab..ee469535 100644 --- a/test/test_sync_wait.cpp +++ b/test/test_sync_wait.cpp @@ -108,3 +108,84 @@ TEST_CASE("sync_wait very rarely hangs issue-270", "[sync_wait]") REQUIRE(count > 0); } + +struct Foo +{ + static std::atomic m_copies; + static std::atomic m_moves; + int v; + Foo() { std::cerr << "Foo::Foo()" << std::endl; } + Foo(const Foo& other) : v(other.v) + { + std::cerr << "Foo::Foo(const Foo&)" << std::endl; + m_copies.fetch_add(1); + } + Foo(Foo&& other) : v(std::exchange(other.v, 0)) + { + std::cerr << "Foo::Foo(Foo&&)" << std::endl; + m_moves.fetch_add(1); + } + + auto operator=(const Foo& other) -> Foo& + { + std::cerr << "Foo::operator=(const Foo&) -> Foo&" << std::endl; + m_copies.fetch_add(1); + if (std::addressof(other) != this) + { + this->v = other.v; + } + return *this; + } + auto operator=(Foo&& other) -> Foo& + { + std::cerr << "Foo::operator=(Foo&&) -> Foo&" << std::endl; + m_moves.fetch_add(1); + if (std::addressof(other) != this) + { + this->v = std::exchange(other.v, 0); + } + return *this; + } + + ~Foo() + { + std::cerr << "Foo::~Foo()" + << "v=" << this->v << std::endl; + } +}; + +std::atomic Foo::m_copies = std::atomic{0}; +std::atomic Foo::m_moves = std::atomic{0}; + +TEST_CASE("issue-286", "[sync_wait]") +{ + /** + * The expected output from this should be the follow as of writing this test. + * https://github.com/jbaldwin/libcoro/issues/286 user @baderouaich reported + * that libcoro compared to other coroutine libraries sync_wait equivalent had + * and extra move. + * + * Foo::Foo() + * co_return foo; + * Foo::Foo(Foo &&) + * Foo::~Foo()v=0 + * Foo::Foo(Foo &&) + * Foo::~Foo()v=0 + * 1337 + * Foo::~Foo()v=1337 + */ + + auto getFoo = []() -> coro::task + { + Foo foo{}; + foo.v = 1337; + std::cerr << "co_return foo;" << std::endl; + co_return foo; + }; + + auto foo = coro::sync_wait(getFoo()); + std::cerr << foo.v << std::endl; + REQUIRE(foo.v == 1337); + REQUIRE(foo.m_copies == 0); + REQUIRE(foo.m_moves == 2); +} diff --git a/test/test_thread_pool.cpp b/test/test_thread_pool.cpp index 1ec8da54..448eb120 100644 --- a/test/test_thread_pool.cpp +++ b/test/test_thread_pool.cpp @@ -183,7 +183,7 @@ TEST_CASE("thread_pool high cpu usage when threadcount is greater than the numbe auto wait_for_task = [](coro::thread_pool& pool, std::chrono::seconds delay) -> coro::task<> { - auto sleep_for_task = [](std::chrono::seconds duration) -> coro::task + auto sleep_for_task = [](std::chrono::seconds duration) -> coro::task { std::this_thread::sleep_for(duration); co_return duration.count(); From eef5815cb12801b1b1ff0d1e66f4993825abd481 Mon Sep 17 00:00:00 2001 From: lyc <30356570@qq.com> Date: Sat, 22 Feb 2025 02:20:12 +0800 Subject: [PATCH 19/24] std::shared_ptr passed by value instead of by reference (#303) --- README.md | 2 +- examples/coro_latch.cpp | 2 +- test/net/test_dns_resolver.cpp | 2 +- test/net/test_tcp_server.cpp | 12 ++++++------ test/test_io_scheduler.cpp | 8 ++++---- test/test_shared_mutex.cpp | 6 +++--- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index 4f69a9bf..cdbae81e 100644 --- a/README.md +++ b/README.md @@ -453,7 +453,7 @@ int main() // This task does 'work' and counts down on the latch when completed. The final child task to // complete will end up resuming the latch task when the latch's count reaches zero. - auto make_worker_task = [](std::shared_ptr& tp, coro::latch& l, int64_t i) -> coro::task + auto make_worker_task = [](std::shared_ptr tp, coro::latch& l, int64_t i) -> coro::task { // Schedule the worker task onto the thread pool. co_await tp->schedule(); diff --git a/examples/coro_latch.cpp b/examples/coro_latch.cpp index 75777ccc..d7fe7724 100644 --- a/examples/coro_latch.cpp +++ b/examples/coro_latch.cpp @@ -27,7 +27,7 @@ int main() // This task does 'work' and counts down on the latch when completed. The final child task to // complete will end up resuming the latch task when the latch's count reaches zero. - auto make_worker_task = [](std::shared_ptr& tp, coro::latch& l, int64_t i) -> coro::task + auto make_worker_task = [](std::shared_ptr tp, coro::latch& l, int64_t i) -> coro::task { // Schedule the worker task onto the thread pool. co_await tp->schedule(); diff --git a/test/net/test_dns_resolver.cpp b/test/net/test_dns_resolver.cpp index 9e2cd135..3fac8216 100644 --- a/test/net/test_dns_resolver.cpp +++ b/test/net/test_dns_resolver.cpp @@ -12,7 +12,7 @@ TEST_CASE("dns_resolver basic", "[dns]") coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); coro::net::dns::resolver dns_resolver{scheduler, std::chrono::milliseconds{5000}}; - auto make_host_by_name_task = [](std::shared_ptr& scheduler, + auto make_host_by_name_task = [](std::shared_ptr scheduler, coro::net::dns::resolver& dns_resolver, coro::net::hostname hn) -> coro::task { diff --git a/test/net/test_tcp_server.cpp b/test/net/test_tcp_server.cpp index 44b3097e..95686af5 100644 --- a/test/net/test_tcp_server.cpp +++ b/test/net/test_tcp_server.cpp @@ -14,9 +14,9 @@ TEST_CASE("tcp_server ping server", "[tcp_server]") auto scheduler = coro::io_scheduler::make_shared( coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}); - auto make_client_task = [](std::shared_ptr& scheduler, - const std::string& client_msg, - const std::string& server_msg) -> coro::task + auto make_client_task = [](std::shared_ptr scheduler, + const std::string& client_msg, + const std::string& server_msg) -> coro::task { co_await scheduler->schedule(); coro::net::tcp::client client{scheduler}; @@ -49,7 +49,7 @@ TEST_CASE("tcp_server ping server", "[tcp_server]") co_return; }; - auto make_server_task = [](std::shared_ptr& scheduler, + auto make_server_task = [](std::shared_ptr scheduler, const std::string& client_msg, const std::string& server_msg) -> coro::task { @@ -99,7 +99,7 @@ TEST_CASE("tcp_server concurrent polling on the same socket", "[tcp_server]") auto scheduler = coro::io_scheduler::make_shared(coro::io_scheduler::options{ .execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}); - auto make_server_task = [](std::shared_ptr& scheduler) -> coro::task + auto make_server_task = [](std::shared_ptr scheduler) -> coro::task { auto make_read_task = [](coro::net::tcp::client client) -> coro::task { @@ -144,7 +144,7 @@ TEST_CASE("tcp_server concurrent polling on the same socket", "[tcp_server]") co_return data; }; - auto make_client_task = [](std::shared_ptr& scheduler) -> coro::task + auto make_client_task = [](std::shared_ptr scheduler) -> coro::task { co_await scheduler->schedule(); coro::net::tcp::client client{scheduler}; diff --git a/test/test_io_scheduler.cpp b/test/test_io_scheduler.cpp index e4248f3e..5d9e9ebf 100644 --- a/test/test_io_scheduler.cpp +++ b/test/test_io_scheduler.cpp @@ -598,10 +598,10 @@ TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_ std::vector> tasks; tasks.reserve(total); - auto func = [](std::shared_ptr& s, - uint64_t& counter, - auto f, - std::vector>& tasks) -> coro::task + auto func = [](std::shared_ptr s, + uint64_t& counter, + auto f, + std::vector>& tasks) -> coro::task { co_await s->schedule(); ++counter; diff --git a/test/test_shared_mutex.cpp b/test/test_shared_mutex.cpp index 2a13cb2c..179d37d6 100644 --- a/test/test_shared_mutex.cpp +++ b/test/test_shared_mutex.cpp @@ -92,7 +92,7 @@ TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex] std::atomic read_value{false}; - auto make_exclusive_task = [](std::shared_ptr& s, + auto make_exclusive_task = [](std::shared_ptr s, coro::shared_mutex& m, std::atomic& read_value) -> coro::task { @@ -112,11 +112,11 @@ TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex] co_return; }; - auto make_shared_tasks_task = [](std::shared_ptr& s, + auto make_shared_tasks_task = [](std::shared_ptr s, coro::shared_mutex& m, std::atomic& read_value) -> coro::task { - auto make_shared_task = [](std::shared_ptr& s, + auto make_shared_task = [](std::shared_ptr s, coro::shared_mutex& m, std::atomic& read_value) -> coro::task { From c298be09610bac7aae047607dd4959def40ebc12 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Tue, 25 Feb 2025 12:46:52 -0700 Subject: [PATCH 20/24] task_self_deleting bugfix in copy assignment operator (#305) It was comparing to `nullptr` not `this`, oops. --- src/detail/task_self_deleting.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/detail/task_self_deleting.cpp b/src/detail/task_self_deleting.cpp index c86155ab..3d4eaf44 100644 --- a/src/detail/task_self_deleting.cpp +++ b/src/detail/task_self_deleting.cpp @@ -20,7 +20,7 @@ promise_self_deleting::promise_self_deleting(promise_self_deleting&& other) auto promise_self_deleting::operator=(promise_self_deleting&& other) -> promise_self_deleting& { - if (std::addressof(other) != nullptr) + if (std::addressof(other) != this) { m_executor_size = std::exchange(other.m_executor_size, nullptr); } From 8fc3e2712f11d3862a2285a7915b4cfdc9d0535d Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Wed, 26 Mar 2025 12:32:26 -0600 Subject: [PATCH 21/24] Add support for coro::task for coro::when_any (#310) Adds support for return type `void` for `when_any` when using ranges. Tuple when_any requires the user to specify the return type as `std::monostate` instad of `void` because `std::variant` does not allow or support `void` as a type per the standard. Closes #306 --- include/coro/when_any.hpp | 142 +++++++++++++++++++++++++------------- test/test_when_all.cpp | 22 ++++++ test/test_when_any.cpp | 79 +++++++++++++++++++++ 3 files changed, 194 insertions(+), 49 deletions(-) diff --git a/include/coro/when_any.hpp b/include/coro/when_any.hpp index a6c61a8c..94e001f1 100644 --- a/include/coro/when_any.hpp +++ b/include/coro/when_any.hpp @@ -25,17 +25,13 @@ namespace detail template auto make_when_any_tuple_task( - coro::mutex& m, - std::atomic& return_value_set, - coro::event& notify, - std::optional& return_value, - awaitable a) -> coro::task + std::atomic& first_completed, coro::event& notify, std::optional& return_value, awaitable a) + -> coro::task { - auto result = co_await static_cast(a); - auto scoped_lock = co_await m.lock(); - if (return_value_set.load(std::memory_order::acquire) == false) + auto expected = false; + auto result = co_await static_cast(a); + if (first_completed.compare_exchange_strong(expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) { - return_value_set.store(true, std::memory_order::release); return_value = std::move(result); notify.set(); } @@ -48,28 +44,30 @@ template coro::event& notify, std::optional& return_value, awaitable_type... awaitables) -> coro::detail::task_self_deleting { - coro::mutex m{}; - std::atomic return_value_set{false}; + std::atomic first_completed{false}; + co_await when_all(make_when_any_tuple_task(first_completed, notify, return_value, std::move(awaitables))...); + co_return; +} - co_await when_all(make_when_any_tuple_task(m, return_value_set, notify, return_value, std::move(awaitables))...); +template +static auto make_when_any_task_return_void(awaitable a, coro::event& notify) -> coro::task +{ + co_await static_cast(a); + notify.set(); // This will trigger the controller task to wake up exactly once. co_return; } template static auto make_when_any_task( - awaitable a, - coro::mutex& m, - std::atomic& return_value_set, - coro::event& notify, - std::optional& return_value) -> coro::task + awaitable a, std::atomic& first_completed, coro::event& notify, std::optional& return_value) + -> coro::task { - auto result = co_await static_cast(a); - co_await m.lock(); + auto expected = false; + auto result = co_await static_cast(a); // Its important to only touch return_value and notify once since their lifetimes will be destroyed - // after being set ane notified the first time. - if (return_value_set.load(std::memory_order::acquire) == false) + // after being set and notified the first time. + if (first_completed.compare_exchange_strong(expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) { - return_value_set.store(true, std::memory_order::release); return_value = std::move(result); notify.set(); } @@ -77,6 +75,26 @@ static auto make_when_any_task( co_return; } +template> +static auto make_when_any_controller_task_return_void(range_type awaitables, coro::event& notify) + -> coro::detail::task_self_deleting +{ + std::vector> tasks{}; + + if constexpr (std::ranges::sized_range) + { + tasks.reserve(std::size(awaitables)); + } + + for (auto&& a : awaitables) + { + tasks.emplace_back(make_when_any_task_return_void(std::move(a), notify)); + } + + co_await coro::when_all(std::move(tasks)); + co_return; +} + template< std::ranges::range range_type, concepts::awaitable awaitable_type = std::ranges::range_value_t, @@ -86,11 +104,10 @@ static auto make_when_any_controller_task( range_type awaitables, coro::event& notify, std::optional& return_value) -> coro::detail::task_self_deleting { - // These must live for as long as the longest running when_any task since each task tries to see + // This must live for as long as the longest running when_any task since each task tries to see // if it was the first to complete. Only the very first task to complete will set the return_value // and notify. - coro::mutex m{}; - std::atomic return_value_set{false}; + std::atomic first_completed{false}; // This detatched task will maintain the lifetime of all the when_any tasks. std::vector> tasks{}; @@ -102,8 +119,8 @@ static auto make_when_any_controller_task( for (auto&& a : awaitables) { - tasks.emplace_back(make_when_any_task( - std::move(a), m, return_value_set, notify, return_value)); + tasks.emplace_back( + make_when_any_task(std::move(a), first_completed, notify, return_value)); } co_await coro::when_all(std::move(tasks)); @@ -119,10 +136,9 @@ template using return_type = std::variant< std::remove_reference_t::awaiter_return_type>...>; - std::optional return_value{std::nullopt}; coro::event notify{}; - - auto controller_task = + std::optional return_value{std::nullopt}; + auto controller_task = detail::make_when_any_tuple_controller_task(notify, return_value, std::forward(awaitables)...); controller_task.handle().resume(); @@ -138,10 +154,9 @@ template using return_type = std::variant< std::remove_reference_t::awaiter_return_type>...>; - std::optional return_value{std::nullopt}; coro::event notify{}; - - auto controller_task = + std::optional return_value{std::nullopt}; + auto controller_task = detail::make_when_any_tuple_controller_task(notify, return_value, std::forward(awaitables)...); controller_task.handle().resume(); @@ -156,17 +171,32 @@ template< typename return_type_base = std::remove_reference_t> [[nodiscard]] auto when_any(std::stop_source stop_source, range_type awaitables) -> coro::task { - // Using an std::optional to prevent the need to default construct the type on the stack. - std::optional return_value{std::nullopt}; - coro::event notify{}; + coro::event notify{}; - auto controller_task = - detail::make_when_any_controller_task(std::forward(awaitables), notify, return_value); - controller_task.handle().resume(); + if constexpr (std::is_void_v) + { + auto controller_task = + detail::make_when_any_controller_task_return_void(std::forward(awaitables), notify); + controller_task.handle().resume(); - co_await notify; - stop_source.request_stop(); - co_return std::move(return_value.value()); + co_await notify; + stop_source.request_stop(); + co_return; + } + else + { + // Using an std::optional to prevent the need to default construct the type on the stack. + std::optional return_value{std::nullopt}; + + auto controller_task = + detail::make_when_any_controller_task(std::forward(awaitables), notify, return_value); + controller_task.handle().resume(); + + co_await notify; + stop_source.request_stop(); + + co_return std::move(return_value.value()); + } } template< @@ -176,15 +206,29 @@ template< typename return_type_base = std::remove_reference_t> [[nodiscard]] auto when_any(range_type awaitables) -> coro::task { - std::optional return_value{std::nullopt}; - coro::event notify{}; + coro::event notify{}; - auto controller_task = - detail::make_when_any_controller_task(std::forward(awaitables), notify, return_value); - controller_task.handle().resume(); + if constexpr (std::is_void_v) + { + auto controller_task = + detail::make_when_any_controller_task_return_void(std::forward(awaitables), notify); + controller_task.handle().resume(); - co_await notify; - co_return std::move(return_value.value()); + co_await notify; + co_return; + } + else + { + std::optional return_value{std::nullopt}; + + auto controller_task = + detail::make_when_any_controller_task(std::forward(awaitables), notify, return_value); + controller_task.handle().resume(); + + co_await notify; + + co_return std::move(return_value.value()); + } } } // namespace coro diff --git a/test/test_when_all.cpp b/test/test_when_all.cpp index 96faf2fe..a450a836 100644 --- a/test/test_when_all.cpp +++ b/test/test_when_all.cpp @@ -204,3 +204,25 @@ TEST_CASE("when_all each task throws", "[when_all]") } } } + +TEST_CASE("when_all return void", "[when_all]") +{ + coro::thread_pool tp{}; + std::atomic counter{0}; + + auto make_task = [](coro::thread_pool& tp, std::atomic& counter, uint64_t i) -> coro::task + { + co_await tp.schedule(); + counter += i; + co_return; + }; + + std::vector> tasks; + for (auto i = 1; i <= 4; ++i) + { + tasks.emplace_back(make_task(tp, counter, i)); + } + + coro::sync_wait(coro::when_all(std::move(tasks))); + REQUIRE(counter == 1 + 2 + 3 + 4); +} diff --git a/test/test_when_any.cpp b/test/test_when_any.cpp index ffeb3f0f..11e44af5 100644 --- a/test/test_when_any.cpp +++ b/test/test_when_any.cpp @@ -4,6 +4,7 @@ #include #include #include +#include TEST_CASE("when_any two tasks", "[when_any]") { @@ -17,6 +18,84 @@ TEST_CASE("when_any two tasks", "[when_any]") REQUIRE(result == 1); } +TEST_CASE("when_any return void", "[when_any]") +{ + coro::thread_pool tp{}; + std::atomic counter{0}; + + auto make_task = [](coro::thread_pool& tp, std::atomic& counter, uint64_t i) -> coro::task + { + co_await tp.schedule(); + // One thread will win. + uint64_t expected = 0; + counter.compare_exchange_strong(expected, i); + co_return; + }; + + std::vector> tasks; + for (auto i = 1; i <= 4; ++i) + { + tasks.emplace_back(make_task(tp, counter, i)); + } + + coro::sync_wait(coro::when_any(std::move(tasks))); + REQUIRE(counter.load() > 0); +} + +TEST_CASE("when_any tuple return void (monostate)", "[when_any]") +{ + // This test needs to use a mutex to guarantee that the task that sets the counter + // is the first task to complete, otherwise there is a race condition if counter is atomic + // as the other task could complete first (unlikely but happens) and cause the REQUIRE statements + // between what is returned to mismatch from what is executed. + coro::mutex m{}; + coro::thread_pool tp{}; + uint64_t counter{0}; + + auto make_task_return_void = + [](coro::thread_pool& tp, coro::mutex& m, uint64_t& counter, uint64_t i) -> coro::task + { + co_await tp.schedule(); + co_await m.lock(); + if (counter == 0) + { + counter = i; + } + else + { + REQUIRE(counter == 2); + } + co_return std::monostate{}; + }; + + auto make_task = [](coro::thread_pool& tp, coro::mutex& m, uint64_t& counter, uint64_t i) -> coro::task + { + co_await tp.schedule(); + co_await m.lock(); + if (counter == 0) + { + counter = i; + } + else + { + REQUIRE(counter == 1); + } + co_return i; + }; + + auto result = + coro::sync_wait(coro::when_any(make_task_return_void(tp, m, counter, 1), make_task(tp, m, counter, 2))); + if (std::holds_alternative(result)) + { + REQUIRE(counter == 1); + } + else + { + REQUIRE(std::get(result) == 2); + REQUIRE(counter == 2); + } +} + #ifdef LIBCORO_FEATURE_NETWORKING TEST_CASE("when_any two tasks one long running", "[when_any]") From 1b82210b8eb3ca99a33761bc55c7bed1e62b57e1 Mon Sep 17 00:00:00 2001 From: Josh Baldwin Date: Sun, 4 May 2025 14:57:10 -0600 Subject: [PATCH 22/24] Adds coro::queue (#311) * Adds coro::queue * Pushing elements into the queue will either wake a waiter to immediately consume the value, or push into the queue if there are no current waiters. * Popping elements from the queue will either consume the front immediately if there are elements available, or wait for an element to be pushed. This structure differs from `coro::ring_buffer` in that it has an unbounded size if there are not enough consumers for say a spike in traffic. Use `coro::queue::shutdown_notify_waiters_drain()` to shutdown the queue and wake all waiters. Closes #307 * Make all coro::queue functions async --- .githooks/pre-commit | 4 + .githooks/readme-template.md | 38 ++++ CMakeLists.txt | 1 + README.md | 106 ++++++++++ examples/CMakeLists.txt | 4 + examples/coro_queue.cpp | 69 +++++++ include/coro/coro.hpp | 1 + include/coro/queue.hpp | 387 +++++++++++++++++++++++++++++++++++ include/coro/ring_buffer.hpp | 31 ++- test/CMakeLists.txt | 1 + test/test_queue.cpp | 200 ++++++++++++++++++ test/test_ring_buffer.cpp | 17 +- 12 files changed, 838 insertions(+), 21 deletions(-) create mode 100644 examples/coro_queue.cpp create mode 100644 include/coro/queue.hpp create mode 100644 test/test_queue.cpp diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 888ec379..d9e5a2fe 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -70,6 +70,10 @@ template_contents=$(cat 'README.md') example_contents=$(cat 'examples/coro_ring_buffer.cpp') echo "${template_contents/\$\{EXAMPLE_CORO_RING_BUFFER_CPP\}/$example_contents}" > README.md +template_contents=$(cat 'README.md') +example_contents=$(cat 'examples/coro_queue.cpp') +echo "${template_contents/\$\{EXAMPLE_CORO_QUEUE_CPP\}/$example_contents}" > README.md + template_contents=$(cat 'README.md') example_contents=$(cat 'examples/coro_shared_mutex.cpp') echo "${template_contents/\$\{EXAMPLE_CORO_SHARED_MUTEX_CPP\}/$example_contents}" > README.md diff --git a/.githooks/readme-template.md b/.githooks/readme-template.md index be91f4db..ac1f086f 100644 --- a/.githooks/readme-template.md +++ b/.githooks/readme-template.md @@ -25,6 +25,7 @@ - [coro::shared_mutex](#shared_mutex) - [coro::semaphore](#semaphore) - [coro::ring_buffer](#ring_buffer) + - [coro::queue](#queue) * Schedulers - [coro::thread_pool](#thread_pool) for coroutine cooperative multitasking - [coro::io_scheduler](#io_scheduler) for driving i/o events @@ -266,6 +267,43 @@ consumer 2 shutting down, stop signal received consumer 3 shutting down, stop signal received ``` +### queue +The `coro::queue` is thread safe async multi-producer multi-consumer queue. Producing into the queue is not an asynchronous operation, it will either immediately use a consumer that is awaiting on `pop()` to process the element, or if no consumer is available place the element into the queue. All consume waiters on the queue are resumed in a LIFO manner when an element becomes available to consume. + +```C++ +${EXAMPLE_CORO_QUEUE_CPP} +``` + +Expected output: +```bash +$ ./examples/coro_queue +consumed 0 +consumed 1 +consumed 0 +consumed 2 +consumed 3 +consumed 4 +consumed 1 +consumed 0 +consumed 0 +consumed 0 +consumed 1 +consumed 1 +consumed 2 +consumed 2 +consumed 3 +consumed 4 +consumed 3 +consumed 4 +consumed 2 +consumed 3 +consumed 4 +consumed 1 +consumed 2 +consumed 3 +consumed 4 +``` + ### thread_pool `coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. One way to schedule a coroutine on a thread pool is to use the pool's `schedule()` function which should be `co_awaited` inside the coroutine to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up. diff --git a/CMakeLists.txt b/CMakeLists.txt index bdf9df8c..3bffed4d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -85,6 +85,7 @@ set(LIBCORO_SOURCE_FILES include/coro/generator.hpp include/coro/latch.hpp include/coro/mutex.hpp src/mutex.cpp + include/coro/queue.hpp include/coro/ring_buffer.hpp include/coro/semaphore.hpp src/semaphore.cpp include/coro/shared_mutex.hpp diff --git a/README.md b/README.md index cdbae81e..cf827826 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,7 @@ - [coro::shared_mutex](#shared_mutex) - [coro::semaphore](#semaphore) - [coro::ring_buffer](#ring_buffer) + - [coro::queue](#queue) * Schedulers - [coro::thread_pool](#thread_pool) for coroutine cooperative multitasking - [coro::io_scheduler](#io_scheduler) for driving i/o events @@ -799,6 +800,111 @@ consumer 2 shutting down, stop signal received consumer 3 shutting down, stop signal received ``` +### queue +The `coro::queue` is thread safe async multi-producer multi-consumer queue. Producing into the queue is not an asynchronous operation, it will either immediately use a consumer that is awaiting on `pop()` to process the element, or if no consumer is available place the element into the queue. All consume waiters on the queue are resumed in a LIFO manner when an element becomes available to consume. + +```C++ +#include +#include + +int main() +{ + const size_t iterations = 5; + const size_t producers_count = 5; + const size_t consumers_count = 2; + + coro::thread_pool tp{}; + coro::queue q{}; + coro::latch producers_done{producers_count}; + coro::mutex m{}; /// Just for making the console prints look nice. + + auto make_producer_task = + [iterations](coro::thread_pool& tp, coro::queue& q, coro::latch& pd) -> coro::task + { + co_await tp.schedule(); + + for (size_t i = 0; i < iterations; ++i) + { + co_await q.push(i); + } + + pd.count_down(); // Notify the shutdown task this producer is complete. + co_return; + }; + + auto make_shutdown_task = [](coro::thread_pool& tp, coro::queue& q, coro::latch& pd) -> coro::task + { + // This task will wait for all the producers to complete and then for the + // entire queue to be drained before shutting it down. + co_await tp.schedule(); + co_await pd; + co_await q.shutdown_notify_waiters_drain(tp); + co_return; + }; + + auto make_consumer_task = [](coro::thread_pool& tp, coro::queue& q, coro::mutex& m) -> coro::task + { + co_await tp.schedule(); + + while (true) + { + auto expected = co_await q.pop(); + if (!expected) + { + break; // coro::queue is shutting down + } + + auto scoped_lock = co_await m.lock(); // Only used to make the output look nice. + std::cout << "consumed " << *expected << "\n"; + } + }; + + std::vector> tasks{}; + + for (size_t i = 0; i < producers_count; ++i) + { + tasks.push_back(make_producer_task(tp, q, producers_done)); + } + for (size_t i = 0; i < consumers_count; ++i) + { + tasks.push_back(make_consumer_task(tp, q, m)); + } + tasks.push_back(make_shutdown_task(tp, q, producers_done)); + + coro::sync_wait(coro::when_all(std::move(tasks))); +} +``` + +Expected output: +```bash +$ ./examples/coro_queue +consumed 0 +consumed 1 +consumed 0 +consumed 2 +consumed 3 +consumed 4 +consumed 1 +consumed 0 +consumed 0 +consumed 0 +consumed 1 +consumed 1 +consumed 2 +consumed 2 +consumed 3 +consumed 4 +consumed 3 +consumed 4 +consumed 2 +consumed 3 +consumed 4 +consumed 1 +consumed 2 +consumed 3 +consumed 4 +``` + ### thread_pool `coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. One way to schedule a coroutine on a thread pool is to use the pool's `schedule()` function which should be `co_awaited` inside the coroutine to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up. diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 03502791..7b3b6ec3 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -49,6 +49,10 @@ add_executable(coro_shared_mutex coro_shared_mutex.cpp) target_link_libraries(coro_shared_mutex PUBLIC libcoro) target_compile_options(coro_shared_mutex PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) +add_executable(coro_queue coro_queue.cpp) +target_link_libraries(coro_queue PUBLIC libcoro) +target_compile_options(coro_queue PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) + add_executable(coro_sync_wait coro_sync_wait.cpp) target_link_libraries(coro_sync_wait PUBLIC libcoro) target_compile_options(coro_sync_wait PUBLIC ${LIBCORO_EXAMPLE_OPTIONS}) diff --git a/examples/coro_queue.cpp b/examples/coro_queue.cpp new file mode 100644 index 00000000..fb54897d --- /dev/null +++ b/examples/coro_queue.cpp @@ -0,0 +1,69 @@ +#include +#include + +int main() +{ + const size_t iterations = 5; + const size_t producers_count = 5; + const size_t consumers_count = 2; + + coro::thread_pool tp{}; + coro::queue q{}; + coro::latch producers_done{producers_count}; + coro::mutex m{}; /// Just for making the console prints look nice. + + auto make_producer_task = + [iterations](coro::thread_pool& tp, coro::queue& q, coro::latch& pd) -> coro::task + { + co_await tp.schedule(); + + for (size_t i = 0; i < iterations; ++i) + { + co_await q.push(i); + } + + pd.count_down(); // Notify the shutdown task this producer is complete. + co_return; + }; + + auto make_shutdown_task = [](coro::thread_pool& tp, coro::queue& q, coro::latch& pd) -> coro::task + { + // This task will wait for all the producers to complete and then for the + // entire queue to be drained before shutting it down. + co_await tp.schedule(); + co_await pd; + co_await q.shutdown_notify_waiters_drain(tp); + co_return; + }; + + auto make_consumer_task = [](coro::thread_pool& tp, coro::queue& q, coro::mutex& m) -> coro::task + { + co_await tp.schedule(); + + while (true) + { + auto expected = co_await q.pop(); + if (!expected) + { + break; // coro::queue is shutting down + } + + auto scoped_lock = co_await m.lock(); // Only used to make the output look nice. + std::cout << "consumed " << *expected << "\n"; + } + }; + + std::vector> tasks{}; + + for (size_t i = 0; i < producers_count; ++i) + { + tasks.push_back(make_producer_task(tp, q, producers_done)); + } + for (size_t i = 0; i < consumers_count; ++i) + { + tasks.push_back(make_consumer_task(tp, q, m)); + } + tasks.push_back(make_shutdown_task(tp, q, producers_done)); + + coro::sync_wait(coro::when_all(std::move(tasks))); +} diff --git a/include/coro/coro.hpp b/include/coro/coro.hpp index 82dc75d1..99e53cb7 100644 --- a/include/coro/coro.hpp +++ b/include/coro/coro.hpp @@ -33,6 +33,7 @@ #include "coro/generator.hpp" #include "coro/latch.hpp" #include "coro/mutex.hpp" +#include "coro/queue.hpp" #include "coro/ring_buffer.hpp" #include "coro/semaphore.hpp" #include "coro/shared_mutex.hpp" diff --git a/include/coro/queue.hpp b/include/coro/queue.hpp new file mode 100644 index 00000000..c57e12f3 --- /dev/null +++ b/include/coro/queue.hpp @@ -0,0 +1,387 @@ +#pragma once + +#include "coro/concepts/executor.hpp" +#include "coro/expected.hpp" +#include "coro/sync_wait.hpp" + +#include +#include + +namespace coro +{ + +enum class queue_produce_result +{ + /** + * @brief The item was successfully produced. + */ + produced, + /** + * @brief The queue is shutting down or stopped, no more items are allowed to be produced. + */ + queue_stopped +}; + +enum class queue_consume_result +{ + /** + * @brief The queue has shut down/stopped and the user should stop calling pop(). + */ + queue_stopped +}; + + +/** + * @brief An unbounded queue. If the queue is empty and there are waiters to consume then + * there are no allocations and the coroutine context will simply be passed to the + * waiter. If there are no waiters the item being produced will be placed into the + * queue. + * + * @tparam element_type The type of items being produced and consumed. + */ +template +class queue +{ +public: + struct awaiter + { + explicit awaiter(queue& q) noexcept : m_queue(q) {} + + /** + * @brief Acquires the coro::queue lock. + * + * @return coro::task + */ + auto make_acquire_lock_task() -> coro::task + { + co_return co_await m_queue.m_mutex.lock(); + } + + auto await_ready() noexcept -> bool + { + // This awaiter is ready when it has actually acquired an element or it is shutting down. + if (m_queue.m_stopped.load(std::memory_order::acquire)) + { + return false; + } + + auto lock = coro::sync_wait(make_acquire_lock_task()); + if (!m_queue.empty()) + { + if constexpr (std::is_move_constructible_v) + { + m_element = std::move(m_queue.m_elements.front()); + } + else + { + m_element = m_queue.m_elements.front(); + } + + m_queue.m_elements.pop(); + return true; + } + + return false; + } + + auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool + { + // Don't suspend if the stop signal has been set. + if (m_queue.m_stopped.load(std::memory_order::acquire)) + { + return false; + } + + auto lock = coro::sync_wait(make_acquire_lock_task()); + if (!m_queue.empty()) + { + if constexpr (std::is_move_constructible_v) + { + m_element = std::move(m_queue.m_elements.front()); + } + else + { + m_element = m_queue.m_elements.front(); + } + + m_queue.m_elements.pop(); + return false; + } + + // No element is ready, put ourselves on the waiter list and suspend. + this->m_next = m_queue.m_waiters; + m_queue.m_waiters = this; + m_awaiting_coroutine = awaiting_coroutine; + + return true; + } + + [[nodiscard]] auto await_resume() noexcept -> expected + { + if (m_element.has_value()) + { + if constexpr (std::is_move_constructible_v) + { + return std::move(m_element.value()); + } + else + { + return m_element.value(); + } + } + else + { + // If we don't have an item the queue has stopped, the prior functions will have checked the state. + return unexpected(queue_consume_result::queue_stopped); + } + } + + std::optional m_element{std::nullopt}; + queue& m_queue; + std::coroutine_handle<> m_awaiting_coroutine{nullptr}; + /// The next awaiter in line for this queue, nullptr if this is the end. + awaiter* m_next{nullptr}; + }; + + queue() {} + ~queue() {} + + queue(const queue&) = delete; + queue(queue&& other) + { + m_waiters = std::exchange(other.m_waiters, nullptr); + m_mutex = std::move(other.m_mutex); + m_elements = std::move(other.m_elements); + m_shutting_down = std::move(other.m_shutting_down); + m_stopped = std::move(other.m_stopped); + } + + auto operator=(const queue&) -> queue& = delete; + auto operator=(queue&& other) -> queue& + { + if (std::addressof(other) != this) + { + m_waiters = std::exchange(other.m_waiters, nullptr); + m_mutex = std::move(other.m_mutex); + m_elements = std::move(other.m_elements); + m_shutting_down = std::move(other.m_shutting_down); + m_stopped = std::move(other.m_stopped); + } + + return *this; + } + + /** + * @brief Determines if the queue is empty. + * + * @return true If the queue is empty. + * @return false If the queue is not empty. + */ + auto empty() const -> bool { return size() == 0; } + + /** + * @brief Gets the number of elements in the queue. + * + * @return std::size_t The number of elements in the queue. + */ + auto size() const -> std::size_t + { + std::atomic_thread_fence(std::memory_order::acquire); + return m_elements.size(); + } + + /** + * @brief Pushes the element into the queue. If the queue is empty and there are waiters + * then the element will be processed immediately by transfering the coroutine task + * context to the waiter. + * + * @param element The element being produced. + * @return coro::task + */ + auto push(const element_type& element) -> coro::task + { + if (m_shutting_down.load(std::memory_order::acquire)) + { + co_return queue_produce_result::queue_stopped; + } + + // The general idea is to see if anyone is waiting, and if so directly transfer the element + // to that waiter. If there is nobody waiting then move the element into the queue. + auto lock = co_await m_mutex.lock(); + + if (m_waiters != nullptr) + { + awaiter* waiter = m_waiters; + m_waiters = m_waiters->m_next; + lock.unlock(); + + // Transfer the element directly to the awaiter. + waiter->m_element = element; + waiter->m_awaiting_coroutine.resume(); + } + else + { + m_elements.push(element); + } + + co_return queue_produce_result::produced; + } + + /** + * @brief Pushes the element into the queue. If the queue is empty and there are waiters + * then the element will be processed immediately by transfering the coroutine task + * context to the waiter. + * + * @param element The element being produced. + * @return coro::task + */ + auto push(element_type&& element) -> coro::task + { + if (m_shutting_down.load(std::memory_order::acquire)) + { + co_return queue_produce_result::queue_stopped; + } + + auto lock = co_await m_mutex.lock(); + + if (m_waiters != nullptr) + { + awaiter* waiter = m_waiters; + m_waiters = m_waiters->m_next; + lock.unlock(); + + // Transfer the element directly to the awaiter. + waiter->m_element = std::move(element); + waiter->m_awaiting_coroutine.resume(); + } + else + { + m_elements.push(std::move(element)); + } + + co_return queue_produce_result::produced; + } + + /** + * @brief Emplaces an element into the queue. Has the same behavior as push if the queue + * is empty and has waiters. + * + * @param args The element's constructor argument types and values. + * @return coro::task + */ + template + auto emplace(args_type&&... args) -> coro::task + { + if (m_shutting_down.load(std::memory_order::acquire)) + { + co_return queue_produce_result::queue_stopped; + } + + auto lock = co_await m_mutex.lock(); + + if (m_waiters != nullptr) + { + awaiter* waiter = m_waiters; + m_waiters = m_waiters->m_next; + lock.unlock(); + + waiter->m_element.emplace(std::forward(args)...); + waiter->m_awaiting_coroutine.resume(); + } + else + { + m_elements.emplace(std::forward(args)...); + } + + co_return queue_produce_result::produced; + } + + /** + * @brief Pops the head element of the queue if available, or waits for one to be available. + * + * @return awaiter A waiter task that upon co_await complete returns an element or the queue + * status that it is shut down. + */ + [[nodiscard]] auto pop() -> awaiter { return awaiter{*this}; } + + /** + * @brief Shuts down the queue immediately discarding any elements that haven't been processed. + * + * @return coro::task + */ + auto shutdown_notify_waiters() -> coro::task + { + auto expected = false; + if (!m_shutting_down.compare_exchange_strong(expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) + { + co_return; + } + + // Since this isn't draining just let the awaiters know we're stopped. + m_stopped.exchange(true, std::memory_order::release); + + auto lock = co_await m_mutex.lock(); + while (m_waiters != nullptr) + { + auto* to_resume = m_waiters; + m_waiters = m_waiters->m_next; + + lock.unlock(); + to_resume->m_awaiting_coroutine.resume(); + lock = co_await m_mutex.lock(); + } + } + + /** + * @brief Shuts down the queue but waits for it to be drained so all elements are processed. + * Will yield on the given executor between checking if the queue is empty so the tasks + * can be processed. + * + * @tparam executor_t The executor type. + * @param e The executor to yield this task to wait for elements to be processed. + * @return coro::task + */ + template + auto shutdown_notify_waiters_drain(executor_t& e) -> coro::task + { + auto expected = false; + if (!m_shutting_down.compare_exchange_strong(expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) + { + co_return; + } + + while(!empty()) + { + co_await e.yield(); + } + + // Now that the queue is drained let all the awaiters know that we're stopped. + m_stopped.exchange(true, std::memory_order::release); + + auto lock = co_await m_mutex.lock(); + while (m_waiters != nullptr) + { + auto* to_resume = m_waiters; + m_waiters = m_waiters->m_next; + + lock.unlock(); + to_resume->m_awaiting_coroutine.resume(); + lock = co_await m_mutex.lock(); + } + } + +private: + friend awaiter; + /// The list of pop() awaiters. + awaiter* m_waiters{nullptr}; + /// Mutex for properly maintaining the queue. + coro::mutex m_mutex{}; + /// The underlying queue data structure. + std::queue m_elements{}; + /// Has the shutdown process begun? + std::atomic m_shutting_down{false}; + /// Has this queue been shutdown? + std::atomic m_stopped{false}; +}; + +} // namespace coro diff --git a/include/coro/ring_buffer.hpp b/include/coro/ring_buffer.hpp index 3e5149dc..16657960 100644 --- a/include/coro/ring_buffer.hpp +++ b/include/coro/ring_buffer.hpp @@ -1,6 +1,6 @@ #pragma once -#include +#include "coro/expected.hpp" #include #include @@ -74,7 +74,6 @@ class ring_buffer // Don't suspend if the stop signal has been set. if (m_rb.m_stopped.load(std::memory_order::acquire)) { - m_stopped = true; return false; } @@ -89,7 +88,8 @@ class ring_buffer */ auto await_resume() -> rb::produce_result { - return !m_stopped ? rb::produce_result::produced : rb::produce_result::ring_buffer_stopped; + return !m_rb.m_stopped.load(std::memory_order::acquire) ? rb::produce_result::produced + : rb::produce_result::ring_buffer_stopped; } private: @@ -104,8 +104,6 @@ class ring_buffer produce_operation* m_next{nullptr}; /// The element this produce operation is producing into the ring buffer. element m_e; - /// Was the operation stopped? - bool m_stopped{false}; }; struct consume_operation @@ -131,7 +129,6 @@ class ring_buffer // Don't suspend if the stop signal has been set. if (m_rb.m_stopped.load(std::memory_order::acquire)) { - m_stopped = true; return false; } m_awaiting_coroutine = awaiting_coroutine; @@ -145,7 +142,7 @@ class ring_buffer */ auto await_resume() -> expected { - if (m_stopped) + if (m_rb.m_stopped.load(std::memory_order::acquire)) { return unexpected(rb::consume_result::ring_buffer_stopped); } @@ -165,8 +162,6 @@ class ring_buffer consume_operation* m_next{nullptr}; /// The element this consume operation will consume. element m_e; - /// Was the operation stopped? - bool m_stopped{false}; }; /** @@ -202,20 +197,19 @@ class ring_buffer */ auto notify_waiters() -> void { - std::unique_lock lk{m_mutex}; - // Only wake up waiters once. - if (m_stopped.load(std::memory_order::acquire)) + auto expected = false; + if (!m_stopped.compare_exchange_strong(expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) { + // Only wake up waiters once. return; } - m_stopped.exchange(true, std::memory_order::release); + std::unique_lock lk{m_mutex}; while (m_produce_waiters != nullptr) { - auto* to_resume = m_produce_waiters; - to_resume->m_stopped = true; - m_produce_waiters = m_produce_waiters->m_next; + auto* to_resume = m_produce_waiters; + m_produce_waiters = m_produce_waiters->m_next; lk.unlock(); to_resume->m_awaiting_coroutine.resume(); @@ -224,9 +218,8 @@ class ring_buffer while (m_consume_waiters != nullptr) { - auto* to_resume = m_consume_waiters; - to_resume->m_stopped = true; - m_consume_waiters = m_consume_waiters->m_next; + auto* to_resume = m_consume_waiters; + m_consume_waiters = m_consume_waiters->m_next; lk.unlock(); to_resume->m_awaiting_coroutine.resume(); diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 854270d7..3024aded 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -7,6 +7,7 @@ set(LIBCORO_TEST_SOURCE_FILES test_latch.cpp test_mutex.cpp test_ring_buffer.cpp + test_queue.cpp test_semaphore.cpp test_shared_mutex.cpp test_sync_wait.cpp diff --git a/test/test_queue.cpp b/test/test_queue.cpp new file mode 100644 index 00000000..0bd8cde8 --- /dev/null +++ b/test/test_queue.cpp @@ -0,0 +1,200 @@ +#include "catch_amalgamated.hpp" + +#include + +TEST_CASE("queue shutdown produce", "[queue]") +{ + coro::queue q{}; + + auto make_consumer_task = [](coro::queue& q) -> coro::task + { + auto expected = co_await q.pop(); + if (!expected) + { + co_return 0; + } + co_return std::move(*expected); + }; + + coro::sync_wait(q.shutdown_notify_waiters()); + coro::sync_wait(q.push(42)); + + auto result = coro::sync_wait(make_consumer_task(q)); + REQUIRE(result == 0); + REQUIRE(q.empty()); +} + +TEST_CASE("queue single produce consume", "[queue]") +{ + coro::queue q{}; + + auto make_consumer_task = [](coro::queue& q) -> coro::task + { + auto expected = co_await q.pop(); + if (!expected) + { + co_return 0; + } + co_return std::move(*expected); + }; + + coro::sync_wait(q.push(42)); + + auto result = coro::sync_wait(make_consumer_task(q)); + REQUIRE(result == 42); + REQUIRE(q.empty()); +} + +TEST_CASE("queue multiple produce and consume", "[queue]") +{ + const uint64_t ITERATIONS = 10; + coro::queue q{}; + + auto make_consumer_task = [](coro::queue& q) -> coro::task + { + auto expected = co_await q.pop(); + if (!expected) + { + co_return 0; + } + co_return std::move(*expected); + }; + + std::vector> tasks{}; + for (uint64_t i = 0; i < ITERATIONS; ++i) + { + coro::sync_wait(q.push(i)); + tasks.emplace_back(make_consumer_task(q)); + } + + auto results = coro::sync_wait(coro::when_all(std::move(tasks))); + for (uint64_t i = 0; i < ITERATIONS; ++i) + { + REQUIRE(results[i].return_value() == i); + } +} + +TEST_CASE("queue produce consume direct", "[queue]") +{ + const uint64_t ITERATIONS = 10; + coro::queue q{}; + coro::thread_pool tp{}; + + auto make_producer_task = [&ITERATIONS](coro::thread_pool& tp, coro::queue& q) -> coro::task + { + co_await tp.schedule(); + for (uint64_t i = 0; i < ITERATIONS; ++i) + { + co_await q.push(i); + co_await tp.yield(); + } + + co_await q.shutdown_notify_waiters_drain(tp); + + co_return 0; + }; + + auto make_consumer_task = [&ITERATIONS](coro::thread_pool& tp, coro::queue& q) -> coro::task + { + co_await tp.schedule(); + + uint64_t sum{0}; + + while (true) + { + auto expected = co_await q.pop(); + if (!expected) + { + co_return sum; + } + sum += *expected; + } + }; + + auto results = coro::sync_wait(coro::when_all(make_consumer_task(tp, q), make_producer_task(tp, q))); + REQUIRE(std::get<0>(results).return_value() == 45); + REQUIRE(std::get<1>(results).return_value() == 0); +} + +TEST_CASE("queue multithreaded produce consume", "[queue]") +{ + const uint64_t WORKERS = 3; + const uint64_t ITERATIONS = 100; + coro::queue q{}; + coro::thread_pool tp{}; + std::atomic counter{0}; + coro::latch wait{WORKERS}; + + auto make_producer_task = + [&ITERATIONS](coro::thread_pool& tp, coro::queue& q, coro::latch& w) -> coro::task + { + co_await tp.schedule(); + for (uint64_t i = 0; i < ITERATIONS; ++i) + { + co_await q.push(i); + co_await tp.yield(); + } + + w.count_down(); + co_return; + }; + + auto make_shutdown_task = [](coro::thread_pool& tp, coro::queue& q, coro::latch& w) -> coro::task + { + // Wait for all producers to complete. + co_await w; + + // Wake up all waiters. + co_await q.shutdown_notify_waiters_drain(tp); + }; + + auto make_consumer_task = + [&ITERATIONS]( + coro::thread_pool& tp, coro::queue& q, std::atomic& counter) -> coro::task + { + co_await tp.schedule(); + + while (true) + { + auto expected = co_await q.pop(); + if (!expected) + { + co_return; + } + counter += *expected; + } + }; + + std::vector> tasks{}; + for (uint64_t i = 0; i < WORKERS; ++i) + { + tasks.emplace_back(make_producer_task(tp, q, wait)); + tasks.emplace_back(make_consumer_task(tp, q, counter)); + } + tasks.emplace_back(make_shutdown_task(tp, q, wait)); + + coro::sync_wait(coro::when_all(std::move(tasks))); + REQUIRE(counter == 14850); +} + +TEST_CASE("queue stopped", "[queue]") +{ + coro::queue q{}; + + auto make_consumer_task = [](coro::queue& q) -> coro::task + { + auto expected = co_await q.pop(); + if (!expected) + { + co_return 0; + } + co_return std::move(*expected); + }; + + coro::sync_wait(q.push(42)); + coro::sync_wait(q.shutdown_notify_waiters()); + + auto result = coro::sync_wait(make_consumer_task(q)); + REQUIRE(result == 0); + REQUIRE(q.size() == 1); // The item was not consumed due to shutdown. +} diff --git a/test/test_ring_buffer.cpp b/test/test_ring_buffer.cpp index d9ed98d3..081d860c 100644 --- a/test/test_ring_buffer.cpp +++ b/test/test_ring_buffer.cpp @@ -55,8 +55,10 @@ TEST_CASE("ring_buffer many elements many producers many consumers", "[ring_buff coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}}; coro::ring_buffer rb{}; + coro::latch wait{producers}; - auto make_producer_task = [](coro::thread_pool& tp, coro::ring_buffer& rb) -> coro::task + auto make_producer_task = + [](coro::thread_pool& tp, coro::ring_buffer& rb, coro::latch& w) -> coro::task { co_await tp.schedule(); auto to_produce = iterations / producers; @@ -66,6 +68,16 @@ TEST_CASE("ring_buffer many elements many producers many consumers", "[ring_buff co_await rb.produce(i); } + w.count_down(); + co_return; + }; + + auto make_shutdown_task = + [](coro::thread_pool& tp, coro::ring_buffer& rb, coro::latch& w) -> coro::task + { + co_await tp.schedule(); + co_await w; + // Wait for all the values to be consumed prior to shutting down the ring buffer. while (!rb.empty()) { @@ -107,8 +119,9 @@ TEST_CASE("ring_buffer many elements many producers many consumers", "[ring_buff } for (size_t i = 0; i < producers; ++i) { - tasks.emplace_back(make_producer_task(tp, rb)); + tasks.emplace_back(make_producer_task(tp, rb, wait)); } + tasks.emplace_back(make_shutdown_task(tp, rb, wait)); coro::sync_wait(coro::when_all(std::move(tasks))); From 131cf5dca43fea117776cea593712c9963cbb592 Mon Sep 17 00:00:00 2001 From: Timo Date: Sat, 10 May 2025 18:34:57 +0200 Subject: [PATCH 23/24] Update mac os ci workflow to use newer version of llvm and mac os (#318) --- .github/workflows/ci-macos.yml | 87 +++++++++++++++++----------------- include/coro/queue.hpp | 42 ++++------------ include/coro/sync_wait.hpp | 16 ++++--- 3 files changed, 63 insertions(+), 82 deletions(-) diff --git a/.github/workflows/ci-macos.yml b/.github/workflows/ci-macos.yml index 003b1c7b..84ac7719 100644 --- a/.github/workflows/ci-macos.yml +++ b/.github/workflows/ci-macos.yml @@ -1,45 +1,46 @@ -# name: ci-macos +name: ci-macos -# on: [pull_request, workflow_dispatch] +on: [pull_request, workflow_dispatch] -# jobs: -# macos: -# name: macos-12 -# runs-on: macos-12 -# strategy: -# fail-fast: false -# matrix: -# clang_version: [17] -# cxx_standard: [20, 23] -# libcoro_feature_networking: [ {enabled: OFF, tls: OFF} ] -# libcoro_build_shared_libs: [OFF, ON] -# steps: -# - name: Install Dependencies -# run: | -# brew update -# brew install llvm@${{ matrix.clang_version }} -# brew install ninja -# - name: Checkout -# uses: actions/checkout@v4 -# with: -# submodules: recursive -# - name: Release -# run: | -# brew --prefix llvm@17 -# mkdir Release -# cd Release -# cmake \ -# -GNinja \ -# -DCMAKE_BUILD_TYPE=Release \ -# -DCMAKE_C_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ -# -DCMAKE_CXX_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ -# -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ -# -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ -# -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ -# -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ -# .. -# cmake --build . --config Release -# - name: Test -# run: | -# cd Release -# ctest --build-config Release -VV +jobs: + macos: + name: macos-15 + runs-on: macos-15 + strategy: + fail-fast: false + matrix: + clang_version: [20] + cxx_standard: [20, 23] + libcoro_feature_networking: [{ enabled: OFF, tls: OFF }] + libcoro_build_shared_libs: [OFF, ON] + steps: + - name: Install Dependencies + run: | + brew update + brew install llvm@${{ matrix.clang_version }} + brew install ninja + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: recursive + - name: Release + run: | + brew --prefix llvm@${{ matrix.clang_version }} + ls $(brew --prefix llvm@${{ matrix.clang_version }})/bin + mkdir Release + cd Release + cmake \ + -GNinja \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_C_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ + -DCMAKE_CXX_COMPILER=$(brew --prefix llvm@${{ matrix.clang_version }})/bin/clang-${{ matrix.clang_version }} \ + -DCMAKE_CXX_STANDARD=${{ matrix.cxx_standard }} \ + -DLIBCORO_FEATURE_NETWORKING=${{ matrix.libcoro_feature_networking.enabled }} \ + -DLIBCORO_FEATURE_TLS=${{ matrix.libcoro_feature_networking.tls }} \ + -DLIBCORO_BUILD_SHARED_LIBS=${{ matrix.libcoro_build_shared_libs }} \ + .. + cmake --build . --config Release + - name: Test + run: | + cd Release + ctest --build-config Release -VV diff --git a/include/coro/queue.hpp b/include/coro/queue.hpp index c57e12f3..61b7a6d1 100644 --- a/include/coro/queue.hpp +++ b/include/coro/queue.hpp @@ -4,7 +4,6 @@ #include "coro/expected.hpp" #include "coro/sync_wait.hpp" -#include #include namespace coro @@ -30,7 +29,6 @@ enum class queue_consume_result queue_stopped }; - /** * @brief An unbounded queue. If the queue is empty and there are waiters to consume then * there are no allocations and the coroutine context will simply be passed to the @@ -52,10 +50,7 @@ class queue * * @return coro::task */ - auto make_acquire_lock_task() -> coro::task - { - co_return co_await m_queue.m_mutex.lock(); - } + auto make_acquire_lock_task() -> coro::task { co_return co_await m_queue.m_mutex.lock(); } auto await_ready() noexcept -> bool { @@ -146,30 +141,11 @@ class queue queue() {} ~queue() {} - queue(const queue&) = delete; - queue(queue&& other) - { - m_waiters = std::exchange(other.m_waiters, nullptr); - m_mutex = std::move(other.m_mutex); - m_elements = std::move(other.m_elements); - m_shutting_down = std::move(other.m_shutting_down); - m_stopped = std::move(other.m_stopped); - } - - auto operator=(const queue&) -> queue& = delete; - auto operator=(queue&& other) -> queue& - { - if (std::addressof(other) != this) - { - m_waiters = std::exchange(other.m_waiters, nullptr); - m_mutex = std::move(other.m_mutex); - m_elements = std::move(other.m_elements); - m_shutting_down = std::move(other.m_shutting_down); - m_stopped = std::move(other.m_stopped); - } + queue(const queue&) = delete; + queue(queue&& other) = delete; - return *this; - } + auto operator=(const queue&) -> queue& = delete; + auto operator=(queue&& other) -> queue& = delete; /** * @brief Determines if the queue is empty. @@ -312,7 +288,8 @@ class queue auto shutdown_notify_waiters() -> coro::task { auto expected = false; - if (!m_shutting_down.compare_exchange_strong(expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) + if (!m_shutting_down.compare_exchange_strong( + expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) { co_return; } @@ -345,12 +322,13 @@ class queue auto shutdown_notify_waiters_drain(executor_t& e) -> coro::task { auto expected = false; - if (!m_shutting_down.compare_exchange_strong(expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) + if (!m_shutting_down.compare_exchange_strong( + expected, true, std::memory_order::acq_rel, std::memory_order::relaxed)) { co_return; } - while(!empty()) + while (!empty()) { co_await e.yield(); } diff --git a/include/coro/sync_wait.hpp b/include/coro/sync_wait.hpp index b403e953..36c93ea6 100644 --- a/include/coro/sync_wait.hpp +++ b/include/coro/sync_wait.hpp @@ -7,6 +7,7 @@ #include #include #include +#include #include namespace coro @@ -67,9 +68,9 @@ class sync_wait_task_promise : public sync_wait_task_promise_base static constexpr bool return_type_is_reference = std::is_reference_v; using stored_type = std::conditional_t< - return_type_is_reference, - std::remove_reference_t*, - std::remove_const_t>; + return_type_is_reference, + std::remove_reference_t*, + std::remove_const_t>; using variant_type = std::variant; sync_wait_task_promise() noexcept = default; @@ -88,9 +89,9 @@ class sync_wait_task_promise : public sync_wait_task_promise_base auto get_return_object() noexcept { return coroutine_type::from_promise(*this); } template - requires(return_type_is_reference and std::is_constructible_v) or - (not return_type_is_reference and - std::is_constructible_v) auto return_value(value_type&& value) -> void + requires(return_type_is_reference and std::is_constructible_v) or + (not return_type_is_reference and std::is_constructible_v) + auto return_value(value_type&& value) -> void { if constexpr (return_type_is_reference) { @@ -103,7 +104,8 @@ class sync_wait_task_promise : public sync_wait_task_promise_base } } - auto return_value(stored_type value) -> void requires(not return_type_is_reference) + auto return_value(stored_type value) -> void + requires(not return_type_is_reference) { if constexpr (std::is_move_constructible_v) { From 726843217841fb78747914307db6ad1098b7febe Mon Sep 17 00:00:00 2001 From: dobord Date: Sat, 24 May 2025 22:35:14 +0300 Subject: [PATCH 24/24] Add cmake options for address, memory, thread, undefined sanitizer (#323) --- CMakeLists.txt | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3bffed4d..bfdb470a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -21,6 +21,10 @@ if (NOT "$ENV{version}" STREQUAL "") set(PROJECT_VERSION "$ENV{version}" CACHE INTERNAL "Copied from environment variable") endif() +option(LIBCORO_ENABLE_ASAN "Build with address sanitizer") +option(LIBCORO_ENABLE_MSAN "Build with memory sanitizer") +option(LIBCORO_ENABLE_TSAN "Build with thread sanitizer") +option(LIBCORO_ENABLE_USAN "Build with undefined sanitizer") option(LIBCORO_EXTERNAL_DEPENDENCIES "Use Cmake find_package to resolve dependencies instead of embedded libraries, Default=OFF." OFF) option(LIBCORO_BUILD_TESTS "Build the tests, Default=ON." ON) option(LIBCORO_CODE_COVERAGE "Enable code coverage, tests must also be enabled, Default=OFF" OFF) @@ -40,6 +44,10 @@ endif() cmake_dependent_option(LIBCORO_FEATURE_NETWORKING "Include networking features, Default=ON." ON "NOT EMSCRIPTEN; NOT MSVC" OFF) cmake_dependent_option(LIBCORO_FEATURE_TLS "Include TLS encryption features, Default=ON." ON "NOT EMSCRIPTEN; NOT MSVC" OFF) +message("${PROJECT_NAME} LIBCORO_ENABLE_ASAN = ${LIBCORO_ENABLE_ASAN}") +message("${PROJECT_NAME} LIBCORO_ENABLE_MSAN = ${LIBCORO_ENABLE_MSAN}") +message("${PROJECT_NAME} LIBCORO_ENABLE_TSAN = ${LIBCORO_ENABLE_TSAN}") +message("${PROJECT_NAME} LIBCORO_ENABLE_USAN = ${LIBCORO_ENABLE_USAN}") message("${PROJECT_NAME} LIBCORO_EXTERNAL_DEPENDENCIES = ${LIBCORO_EXTERNAL_DEPENDENCIES}") message("${PROJECT_NAME} LIBCORO_BUILD_TESTS = ${LIBCORO_BUILD_TESTS}") message("${PROJECT_NAME} LIBCORO_CODE_COVERAGE = ${LIBCORO_CODE_COVERAGE}") @@ -146,6 +154,27 @@ endif() add_library(${PROJECT_NAME} ${LIBCORO_SOURCE_FILES}) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE CXX PREFIX "" VERSION ${PROJECT_VERSION} SOVERSION ${PROJECT_VERSION_MAJOR}) + +if(LIBCORO_ENABLE_ASAN) + add_compile_options(-g -O0 -fno-omit-frame-pointer -fsanitize=address) + add_link_options( -fsanitize=address) +endif() + +if(LIBCORO_ENABLE_MSAN) + add_compile_options(-g -O0 -fno-omit-frame-pointer -fsanitize=memory) + add_link_options( -fsanitize=memory) +endif() + +if(LIBCORO_ENABLE_TSAN) + add_compile_options(-g -O0 -fno-omit-frame-pointer -fsanitize=thread) + add_link_options(-fsanitize=thread) +endif() + +if(LIBCORO_ENABLE_USAN) + add_compile_options(-g -O0 -fno-omit-frame-pointer -fsanitize=undefined) + add_link_options(-fsanitize=undefined) +endif() + target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20) target_include_directories(${PROJECT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/include) generate_export_header(${PROJECT_NAME} BASE_NAME CORO EXPORT_FILE_NAME include/coro/export.hpp)