diff --git a/master/eigensolver_2band__to__tridiag_2mc_8h_source.html b/master/eigensolver_2band__to__tridiag_2mc_8h_source.html
index 35c826110a..a754c4c5e3 100644
--- a/master/eigensolver_2band__to__tridiag_2mc_8h_source.html
+++ b/master/eigensolver_2band__to__tridiag_2mc_8h_source.html
@@ -895,7 +895,7 @@
824 ex::when_all_vector(matrix::select(mat_v, common::iterate_range2d(LocalTileIndex{i, i},
825 LocalTileSize{n - i, 1}))) |
826 ex::then([](TileVector&& vector) {
return std::make_shared<TileVector>(std::move(vector)); }) |
-
+ 827 ex::drop_operation_state() | ex::split();
830 ex::when_all(std::move(sem_sender), ex::just(sem_next, sweep), w_pipeline(), tiles_v) |
@@ -1410,7 +1410,7 @@
1339 if (sweep % b == 0) {
1340 tile_v = panel_v.readwrite(LocalTileIndex{id_block_local, 0}) |
1341 ex::then([](Tile&& tile) {
return std::make_shared<Tile>(std::move(tile)); }) |
-
+ 1342 ex::drop_operation_state() | ex::split();
1345 ex::unique_any_sender<SemaphorePtr> sem_sender;
diff --git a/master/transform_8h_source.html b/master/transform_8h_source.html
index e32cc2c4fd..16bcc75f9b 100644
--- a/master/transform_8h_source.html
+++ b/master/transform_8h_source.html
@@ -121,143 +121,148 @@
51 typename F = void,
typename Sender = void,
52 typename = std::enable_if_t<pika::execution::experimental::is_sender_v<Sender>>>
53 [[nodiscard]] decltype(
auto) transform(const Policy<B> policy, F&& f, Sender&& sender) {
- 54 using pika::execution::experimental::then;
- 55 using pika::execution::experimental::transfer;
-
- 57 auto scheduler = getBackendScheduler<B>(policy.priority(), policy.stacksize());
- 58 auto transfer_sender = transfer(std::forward<Sender>(sender), std::move(scheduler));
-
-
-
-
- 63 if constexpr (B == Backend::MC) {
- 64 return then(std::move(transfer_sender), ConsumeRvalues{Unwrapping{std::forward<F>(f)}});
-
- 66 else if constexpr (B == Backend::GPU) {
- 67 #if defined(DLAF_WITH_GPU)
- 68 using pika::cuda::experimental::then_with_cublas;
- 69 using pika::cuda::experimental::then_with_cusolver;
- 70 using pika::cuda::experimental::then_with_stream;
-
- 72 if constexpr (Tag == TransformDispatchType::Plain) {
- 73 return then_with_stream(std::move(transfer_sender),
- 74 ConsumeRvalues{Unwrapping{std::forward<F>(f)}});
-
- 76 else if constexpr (Tag == TransformDispatchType::Blas) {
- 77 return then_with_cublas(std::move(transfer_sender), ConsumeRvalues{Unwrapping{std::forward<F>(f)}},
- 78 CUBLAS_POINTER_MODE_HOST);
-
- 80 else if constexpr (Tag == TransformDispatchType::Lapack) {
- 81 return then_with_cusolver(std::move(transfer_sender),
- 82 ConsumeRvalues{Unwrapping{std::forward<F>(f)}});
+ 54 using pika::execution::experimental::drop_operation_state;
+ 55 using pika::execution::experimental::then;
+ 56 using pika::execution::experimental::transfer;
+
+ 58 auto scheduler = getBackendScheduler<B>(policy.priority(), policy.stacksize());
+ 59 auto transfer_sender = transfer(std::forward<Sender>(sender), std::move(scheduler));
+
+
+
+
+ 64 if constexpr (B == Backend::MC) {
+ 65 return then(std::move(transfer_sender), ConsumeRvalues{Unwrapping{std::forward<F>(f)}}) |
+ 66 drop_operation_state();
+
+ 68 else if constexpr (B == Backend::GPU) {
+ 69 #if defined(DLAF_WITH_GPU)
+ 70 using pika::cuda::experimental::then_with_cublas;
+ 71 using pika::cuda::experimental::then_with_cusolver;
+ 72 using pika::cuda::experimental::then_with_stream;
+
+ 74 if constexpr (Tag == TransformDispatchType::Plain) {
+ 75 return then_with_stream(std::move(transfer_sender),
+ 76 ConsumeRvalues{Unwrapping{std::forward<F>(f)}}) |
+ 77 drop_operation_state();
+
+ 79 else if constexpr (Tag == TransformDispatchType::Blas) {
+ 80 return then_with_cublas(std::move(transfer_sender), ConsumeRvalues{Unwrapping{std::forward<F>(f)}},
+ 81 CUBLAS_POINTER_MODE_HOST) |
+ 82 drop_operation_state();
-
-
-
- 87 "Attempting to use transform with a GPU policy, but f is not invocable with a CUDA stream as the last argument or cuBLAS/cuSOLVER handle as the first argument.");
+ 84 else if constexpr (Tag == TransformDispatchType::Lapack) {
+ 85 return then_with_cusolver(std::move(transfer_sender),
+ 86 ConsumeRvalues{Unwrapping{std::forward<F>(f)}}) |
+ 87 drop_operation_state();
-
- 90 DLAF_STATIC_FAIL(Sender,
"Attempting to use transform with Backend::GPU but it is disabled");
-
-
-
- 94 DLAF_STATIC_FAIL(Sender,
"Unknown backend given to transform");
-
-
-
- 99 template <TransformDispatchType Tag = TransformDispatchType::Plain, Backend B = Backend::MC,
- 100 typename F = void,
typename Sender = void,
- 101 typename = std::enable_if_t<pika::execution::experimental::is_sender_v<Sender>>>
- 102 void transformDetach(
const Policy<B> policy, F&& f, Sender&& sender) {
- 103 pika::execution::experimental::start_detached(transform<Tag>(policy, std::forward<F>(f),
- 104 std::forward<Sender>(sender)));
-
-
- 110 template <TransformDispatchType Tag, Backend B,
typename F,
typename... Ts>
- 111 [[nodiscard]] decltype(
auto) transformLift(const Policy<B> policy, F&& f, Ts&&... ts) {
- 112 return transform<Tag>(policy, std::forward<F>(f), internal::whenAllLift(std::forward<Ts>(ts)...));
-
-
- 118 template <TransformDispatchType Tag = TransformDispatchType::Plain, Backend B = Backend::MC,
- 119 typename F = void,
typename... Ts>
- 120 void transformLiftDetach(
const Policy<B> policy, F&& f, Ts&&... ts) {
- 121 pika::execution::experimental::start_detached(transformLift<Tag>(policy, std::forward<F>(f),
- 122 std::forward<Ts>(ts)...));
-
-
- 125 template <TransformDispatchType Tag, Backend B,
typename F>
-
-
-
-
-
- 134 template <TransformDispatchType Tag, Backend B,
typename F>
-
-
- 137 template <
typename F_>
-
-
-
-
-
-
-
- 145 template <
typename Sender>
-
- 147 return transform<Tag, B>(pa.policy_, std::move(pa.f_), std::forward<Sender>(sender));
-
-
-
- 151 template <TransformDispatchType Tag, Backend B,
typename F>
- 152 auto makePartialTransform(
const Policy<B> policy, F&& f) {
-
-
+
+
+
+ 92 "Attempting to use transform with a GPU policy, but f is not invocable with a CUDA stream as the last argument or cuBLAS/cuSOLVER handle as the first argument.");
+
+
+ 95 DLAF_STATIC_FAIL(Sender,
"Attempting to use transform with Backend::GPU but it is disabled");
+
+
+
+ 99 DLAF_STATIC_FAIL(Sender,
"Unknown backend given to transform");
+
+
+
+ 104 template <TransformDispatchType Tag = TransformDispatchType::Plain, Backend B = Backend::MC,
+ 105 typename F = void,
typename Sender = void,
+ 106 typename = std::enable_if_t<pika::execution::experimental::is_sender_v<Sender>>>
+ 107 void transformDetach(
const Policy<B> policy, F&& f, Sender&& sender) {
+ 108 pika::execution::experimental::start_detached(transform<Tag>(policy, std::forward<F>(f),
+ 109 std::forward<Sender>(sender)));
+
+
+ 115 template <TransformDispatchType Tag, Backend B,
typename F,
typename... Ts>
+ 116 [[nodiscard]] decltype(
auto) transformLift(const Policy<B> policy, F&& f, Ts&&... ts) {
+ 117 return transform<Tag>(policy, std::forward<F>(f), internal::whenAllLift(std::forward<Ts>(ts)...));
+
+
+ 123 template <TransformDispatchType Tag = TransformDispatchType::Plain, Backend B = Backend::MC,
+ 124 typename F = void,
typename... Ts>
+ 125 void transformLiftDetach(
const Policy<B> policy, F&& f, Ts&&... ts) {
+ 126 pika::execution::experimental::start_detached(transformLift<Tag>(policy, std::forward<F>(f),
+ 127 std::forward<Ts>(ts)...));
+
+
+ 130 template <TransformDispatchType Tag, Backend B,
typename F>
+
+
+
+
+
+ 139 template <TransformDispatchType Tag, Backend B,
typename F>
+
+
+ 142 template <
typename F_>
+
+
+
+
+
+
+
+ 150 template <
typename Sender>
+
+ 152 return transform<Tag, B>(pa.policy_, std::move(pa.f_), std::forward<Sender>(sender));
+
+
- 159 template <TransformDispatchType Tag, Backend B,
typename F>
-
-
- 162 template <
typename F_>
-
-
-
-
-
-
-
- 170 template <
typename Sender>
-
- 172 return pika::execution::experimental::start_detached(
- 173 transform<Tag, B>(pa.policy_, std::move(pa.f_), std::forward<Sender>(sender)));
-
-
-
- 177 template <TransformDispatchType Tag, Backend B,
typename F>
- 178 auto makePartialTransformDetach(
const Policy<B> policy, F&& f) {
-
-
+ 156 template <TransformDispatchType Tag, Backend B,
typename F>
+ 157 auto makePartialTransform(
const Policy<B> policy, F&& f) {
+
+
+
+ 164 template <TransformDispatchType Tag, Backend B,
typename F>
+
+
+ 167 template <
typename F_>
+
+
+
+
+
+
+
+ 175 template <
typename Sender>
+
+ 177 return pika::execution::experimental::start_detached(
+ 178 transform<Tag, B>(pa.policy_, std::move(pa.f_), std::forward<Sender>(sender)));
+
+
- 186 template <TransformDispatchType Tag = TransformDispatchType::Plain, Backend B = Backend::MC,
-
- 188 [[nodiscard]] decltype(
auto) transform(const Policy<B> policy, F&& f) {
- 189 return makePartialTransform<Tag>(policy, std::forward<F>(f));
-
-
- 196 template <TransformDispatchType Tag = TransformDispatchType::Plain, Backend B = Backend::MC,
-
- 198 [[nodiscard]] decltype(
auto) transformDetach(const Policy<B> policy, F&& f) {
- 199 return makePartialTransformDetach<Tag>(policy, std::forward<F>(f));
-
-
-
-
-
+ 182 template <TransformDispatchType Tag, Backend B,
typename F>
+ 183 auto makePartialTransformDetach(
const Policy<B> policy, F&& f) {
+
+
+
+ 191 template <TransformDispatchType Tag = TransformDispatchType::Plain, Backend B = Backend::MC,
+
+ 193 [[nodiscard]] decltype(
auto) transform(const Policy<B> policy, F&& f) {
+ 194 return makePartialTransform<Tag>(policy, std::forward<F>(f));
+
+
+ 201 template <TransformDispatchType Tag = TransformDispatchType::Plain, Backend B = Backend::MC,
+
+ 203 [[nodiscard]] decltype(
auto) transformDetach(const Policy<B> policy, F&& f) {
+ 204 return makePartialTransformDetach<Tag>(policy, std::forward<F>(f));
+
+
+
+
+
Definition: consume_rvalues.h:33
-
+
diff --git a/master/transform__mpi_8h_source.html b/master/transform__mpi_8h_source.html
index 8a5f9307d9..c11ad2beb9 100644
--- a/master/transform__mpi_8h_source.html
+++ b/master/transform__mpi_8h_source.html
@@ -82,153 +82,156 @@
12 #include <type_traits>
-
-
-
-
- 18 #include <dlaf/sender/transform.h>
- 19 #include <dlaf/sender/when_all_lift.h>
-
- 21 namespace dlaf::comm::internal {
+ 14 #include <pika/execution.hpp>
+
+
+
+
+
+ 20 #include <dlaf/sender/transform.h>
+ 21 #include <dlaf/sender/when_all_lift.h>
- 26 inline void consumeCommunicatorWrapper(common::Pipeline<Communicator>::Wrapper& comm_wrapper) {
- 27 [[maybe_unused]]
auto comm_wrapper_local = std::move(comm_wrapper);
-
-
-
- 32 void consumeCommunicatorWrapper(T&) {}
-
-
-
-
- 47 template <
typename... Ts>
- 48 auto operator()(Ts&&... ts)
- 49 -> decltype(std::move(f)(dlaf::common::internal::unwrap(ts)..., std::declval<MPI_Request*>())) {
-
- 51 auto is_request_completed = [&req] {
-
- 53 MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
-
-
-
-
-
-
-
-
-
-
-
-
-
- 67 using result_type = decltype(std::move(f)(dlaf::common::internal::unwrap(ts)..., &req));
- 68 if constexpr (std::is_void_v<result_type>) {
- 69 std::move(f)(dlaf::common::internal::unwrap(ts)..., &req);
- 70 (internal::consumeCommunicatorWrapper(ts), ...);
- 71 pika::util::yield_while(is_request_completed);
-
-
- 74 auto r = std::move(f)(dlaf::common::internal::unwrap(ts)..., &req);
- 75 (internal::consumeCommunicatorWrapper(ts), ...);
- 76 pika::util::yield_while(is_request_completed);
-
-
-
-
-
-
-
-
- 86 template <
typename F,
typename Sender,
- 87 typename = std::enable_if_t<pika::execution::experimental::is_sender_v<Sender>>>
- 88 [[nodiscard]] decltype(
auto) transformMPI(F&& f, Sender&& sender) {
- 89 namespace ex = pika::execution::experimental;
-
- 91 return ex::transfer(std::forward<Sender>(sender),
- 92 ex::with_priority(dlaf::internal::getMPIScheduler(),
- 93 pika::execution::thread_priority::boost)) |
-
-
-
- 98 template <
typename F,
typename Sender,
- 99 typename = std::enable_if_t<pika::execution::experimental::is_sender_v<Sender>>>
- 100 void transformMPIDetach(F&& f, Sender&& sender) {
- 101 pika::execution::experimental::start_detached(transformMPI(std::forward<F>(f),
- 102 std::forward<Sender>(sender)));
-
-
- 108 template <
typename F,
typename... Ts>
- 109 [[nodiscard]] decltype(
auto) transformMPILift(F&& f, Ts&&... ts) {
- 110 return transformMPI(std::forward<F>(f), dlaf::internal::whenAllLift(std::forward<Ts>(ts)...));
-
-
- 116 template <
typename F,
typename... Ts>
- 117 void transformMPILiftDetach(F&& f, Ts&&... ts) {
- 118 pika::execution::experimental::start_detached(transformLift(std::forward<F>(f),
- 119 std::forward<Ts>(ts)...));
-
-
- 122 template <
typename F>
-
-
-
-
- 130 template <
typename F>
-
-
- 133 template <
typename F_>
-
-
-
-
-
-
- 140 template <
typename Sender>
-
- 142 return transformMPI(std::move(pa.f_), std::forward<Sender>(sender));
-
-
-
- 146 template <
typename F>
-
+ 23 namespace dlaf::comm::internal {
+
+ 28 inline void consumeCommunicatorWrapper(common::Pipeline<Communicator>::Wrapper& comm_wrapper) {
+ 29 [[maybe_unused]]
auto comm_wrapper_local = std::move(comm_wrapper);
+
+
+
+ 34 void consumeCommunicatorWrapper(T&) {}
+
+
+
+
+ 49 template <
typename... Ts>
+ 50 auto operator()(Ts&&... ts)
+ 51 -> decltype(std::move(f)(dlaf::common::internal::unwrap(ts)..., std::declval<MPI_Request*>())) {
+
+ 53 auto is_request_completed = [&req] {
+
+ 55 MPI_Test(&req, &flag, MPI_STATUS_IGNORE);
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 69 using result_type = decltype(std::move(f)(dlaf::common::internal::unwrap(ts)..., &req));
+ 70 if constexpr (std::is_void_v<result_type>) {
+ 71 std::move(f)(dlaf::common::internal::unwrap(ts)..., &req);
+ 72 (internal::consumeCommunicatorWrapper(ts), ...);
+ 73 pika::util::yield_while(is_request_completed);
+
+
+ 76 auto r = std::move(f)(dlaf::common::internal::unwrap(ts)..., &req);
+ 77 (internal::consumeCommunicatorWrapper(ts), ...);
+ 78 pika::util::yield_while(is_request_completed);
+
+
+
+
+
+
+
+
+ 88 template <
typename F,
typename Sender,
+ 89 typename = std::enable_if_t<pika::execution::experimental::is_sender_v<Sender>>>
+ 90 [[nodiscard]] decltype(
auto) transformMPI(F&& f, Sender&& sender) {
+ 91 namespace ex = pika::execution::experimental;
+
+ 93 return ex::transfer(std::forward<Sender>(sender),
+ 94 ex::with_priority(dlaf::internal::getMPIScheduler(),
+ 95 pika::execution::thread_priority::boost)) |
+
+ 97 ex::drop_operation_state();
+
+
+ 101 template <
typename F,
typename Sender,
+ 102 typename = std::enable_if_t<pika::execution::experimental::is_sender_v<Sender>>>
+ 103 void transformMPIDetach(F&& f, Sender&& sender) {
+ 104 pika::execution::experimental::start_detached(transformMPI(std::forward<F>(f),
+ 105 std::forward<Sender>(sender)));
+
+
+ 111 template <
typename F,
typename... Ts>
+ 112 [[nodiscard]] decltype(
auto) transformMPILift(F&& f, Ts&&... ts) {
+ 113 return transformMPI(std::forward<F>(f), dlaf::internal::whenAllLift(std::forward<Ts>(ts)...));
+
+
+ 119 template <
typename F,
typename... Ts>
+ 120 void transformMPILiftDetach(F&& f, Ts&&... ts) {
+ 121 pika::execution::experimental::start_detached(transformLift(std::forward<F>(f),
+ 122 std::forward<Ts>(ts)...));
+
+
+ 125 template <
typename F>
+
+
+
+
+ 133 template <
typename F>
+
+
+ 136 template <
typename F_>
+
+
+
+
+
+
+ 143 template <
typename Sender>
+
+ 145 return transformMPI(std::move(pa.f_), std::forward<Sender>(sender));
+
+
- 152 template <
typename F>
-
-
- 155 template <
typename F_>
-
-
-
-
-
-
- 162 template <
typename Sender>
-
- 164 return pika::execution::experimental::start_detached(transformMPI(std::move(pa.f_),
- 165 std::forward<Sender>(sender)));
-
-
-
- 169 template <
typename F>
-
+ 149 template <
typename F>
+
+
+ 155 template <
typename F>
+
+
+ 158 template <
typename F_>
+
+
+
+
+
+
+ 165 template <
typename Sender>
+
+ 167 return pika::execution::experimental::start_detached(transformMPI(std::move(pa.f_),
+ 168 std::forward<Sender>(sender)));
+
+
- 176 template <
typename F>
- 177 [[nodiscard]] decltype(
auto) transformMPI(F&& f) {
-
-
-
- 185 template <
typename F>
- 186 [[nodiscard]] decltype(
auto) transformMPIDetach(F&& f) {
- 187 return PartialTransformMPIDetach{std::forward<F>(f)};
-
-
-
-
+ 172 template <
typename F>
+
+
+ 179 template <
typename F>
+ 180 [[nodiscard]] decltype(
auto) transformMPI(F&& f) {
+
+
+
+ 188 template <
typename F>
+ 189 [[nodiscard]] decltype(
auto) transformMPIDetach(F&& f) {
+ 190 return PartialTransformMPIDetach{std::forward<F>(f)};
+
+
+
+
-Definition: transform_mpi.h:45
-
+Definition: transform_mpi.h:47
+
Definition: consume_rvalues.h:33