diff --git a/cudax/include/cuda/experimental/__memory_resource/pinned_memory_resource.cuh b/cudax/include/cuda/experimental/__memory_resource/pinned_memory_resource.cuh index 68ccaf2344b..f94d5e77af3 100644 --- a/cudax/include/cuda/experimental/__memory_resource/pinned_memory_resource.cuh +++ b/cudax/include/cuda/experimental/__memory_resource/pinned_memory_resource.cuh @@ -4,7 +4,7 @@ // under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. // //===----------------------------------------------------------------------===// @@ -37,11 +37,19 @@ #include //! @file -//! The \c managed_memory_resource class provides a memory resource that allocates pinned memory. +//! The \c pinned_memory_resource class provides a memory resource that allocates pinned memory. namespace cuda::experimental { -//! @brief pinned_memory_resource uses `cudaMallocHost` / `cudaFreeHost` for allocation / deallocation. +//! @rst +//! .. _cudax-memory-resource-pinned-memory-resource: +//! +//! Pinned memory resource +//! ---------------------- +//! +//! ``pinned_memory_resource`` uses `cudaMallocHost` / `cudaFreeHost` for allocation / deallocation. +//! +//! @endrst class pinned_memory_resource { private: diff --git a/cudax/include/cuda/experimental/__memory_resource/managed_memory_resource.cuh b/cudax/include/cuda/experimental/__memory_resource/unified_memory_resource.cuh similarity index 80% rename from cudax/include/cuda/experimental/__memory_resource/managed_memory_resource.cuh rename to cudax/include/cuda/experimental/__memory_resource/unified_memory_resource.cuh index b583450d414..bac50b9e206 100644 --- a/cudax/include/cuda/experimental/__memory_resource/managed_memory_resource.cuh +++ b/cudax/include/cuda/experimental/__memory_resource/unified_memory_resource.cuh @@ -4,12 +4,12 @@ // under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. // //===----------------------------------------------------------------------===// -#ifndef _CUDAX__MEMORY_RESOURCE_MANAGED_MEMORY_RESOURCE_CUH -#define _CUDAX__MEMORY_RESOURCE_MANAGED_MEMORY_RESOURCE_CUH +#ifndef _CUDAX__MEMORY_RESOURCE_UNIFIED_MEMORY_RESOURCE_CUH +#define _CUDAX__MEMORY_RESOURCE_UNIFIED_MEMORY_RESOURCE_CUH #include @@ -36,12 +36,20 @@ #include //! @file -//! The \c managed_memory_resource class provides a memory resource that allocates managed memory. +//! The \c unified_memory_resource class provides a memory resource that allocates unified memory. namespace cuda::experimental { -//! @brief \c managed_memory_resource uses `cudaMallocManaged` / `cudaFree` for allocation / deallocation. -class managed_memory_resource +//! @rst +//! .. _cudax-memory-resource-unified-memory-resource: +//! +//! Pinned memory resource +//! ---------------------- +//! +//! ``unified_memory_resource`` uses `cudaMallocManaged` / `cudaFree` for allocation / deallocation. +//! +//! @endrst +class unified_memory_resource { private: unsigned int __flags_ = cudaMemAttachGlobal; @@ -49,10 +57,10 @@ private: static constexpr unsigned int __available_flags = cudaMemAttachGlobal | cudaMemAttachHost; public: - constexpr managed_memory_resource(const unsigned int __flags = cudaMemAttachGlobal) noexcept + constexpr unified_memory_resource(const unsigned int __flags = cudaMemAttachGlobal) noexcept : __flags_(__flags & __available_flags) { - _CCCL_ASSERT(__flags_ == __flags, "Unexpected flags passed to managed_memory_resource"); + _CCCL_ASSERT(__flags_ == __flags, "Unexpected flags passed to unified_memory_resource"); } //! @brief Allocate CUDA unified memory of size at least \p __bytes. @@ -66,7 +74,7 @@ public: // We need to ensure that the provided alignment matches the minimal provided alignment if (!__is_valid_alignment(__alignment)) { - _CUDA_VSTD::__throw_invalid_argument("Invalid alignment passed to managed_memory_resource::allocate."); + _CUDA_VSTD::__throw_invalid_argument("Invalid alignment passed to unified_memory_resource::allocate."); } void* __ptr{nullptr}; @@ -107,8 +115,8 @@ public: void* __ptr, const size_t, const size_t __alignment = _CUDA_VMR::default_cuda_malloc_alignment) const noexcept { // We need to ensure that the provided alignment matches the minimal provided alignment - _CCCL_ASSERT(__is_valid_alignment(__alignment), "Invalid alignment passed to managed_memory_resource::deallocate."); - _CCCL_ASSERT_CUDA_API(::cudaFree, "managed_memory_resource::deallocate failed", __ptr); + _CCCL_ASSERT(__is_valid_alignment(__alignment), "Invalid alignment passed to unified_memory_resource::deallocate."); + _CCCL_ASSERT_CUDA_API(::cudaFree, "unified_memory_resource::deallocate failed", __ptr); (void) __alignment; } @@ -142,18 +150,18 @@ public: (void) __stream; } - //! @brief Equality comparison with another \c managed_memory_resource. - //! @param __other The other \c managed_memory_resource. - //! @return Whether both \c managed_memory_resource were constructed with the same flags. - _CCCL_NODISCARD constexpr bool operator==(managed_memory_resource const& __other) const noexcept + //! @brief Equality comparison with another \c unified_memory_resource. + //! @param __other The other \c unified_memory_resource. + //! @return Whether both \c unified_memory_resource were constructed with the same flags. + _CCCL_NODISCARD constexpr bool operator==(unified_memory_resource const& __other) const noexcept { return __flags_ == __other.__flags_; } #if _CCCL_STD_VER <= 2017 - //! @brief Inequality comparison with another \c managed_memory_resource. - //! @param __other The other \c managed_memory_resource. - //! @return Whether both \c managed_memory_resource were constructed with different flags. - _CCCL_NODISCARD constexpr bool operator!=(managed_memory_resource const& __other) const noexcept + //! @brief Inequality comparison with another \c unified_memory_resource. + //! @param __other The other \c unified_memory_resource. + //! @return Whether both \c unified_memory_resource were constructed with different flags. + _CCCL_NODISCARD constexpr bool operator!=(unified_memory_resource const& __other) const noexcept { return __flags_ != __other.__flags_; } @@ -167,12 +175,12 @@ private: { if constexpr (has_property<_Resource, device_accessible>) { - return resource_ref{*const_cast(this)} + return resource_ref{*const_cast(this)} == __cudax::__as_resource_ref(const_cast<_Resource&>(__rhs)); } else if constexpr (has_property<_Resource, host_accessible>) { - return resource_ref{*const_cast(this)} + return resource_ref{*const_cast(this)} == __cudax::__as_resource_ref(const_cast<_Resource&>(__rhs)); } else @@ -183,50 +191,50 @@ private: public: # if _CCCL_STD_VER >= 2020 - //! @brief Equality comparison between a \c managed_memory_resource and another resource + //! @brief Equality comparison between a \c unified_memory_resource and another resource //! @param __rhs The resource to compare to //! @return If the underlying types are equality comparable, returns the result of equality comparison of both //! resources. Otherwise, returns false. template - requires _CUDA_VMR::__different_resource + requires _CUDA_VMR::__different_resource _CCCL_NODISCARD bool operator==(_Resource const& __rhs) const noexcept { return this->__equal_to(__rhs); } # else // ^^^ C++20 ^^^ / vvv C++17 template - _CCCL_NODISCARD_FRIEND auto operator==(managed_memory_resource const& __lhs, _Resource const& __rhs) noexcept - _CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource) + _CCCL_NODISCARD_FRIEND auto operator==(unified_memory_resource const& __lhs, _Resource const& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource) { return __lhs.__equal_to(__rhs); } template - _CCCL_NODISCARD_FRIEND auto operator==(_Resource const& __lhs, managed_memory_resource const& __rhs) noexcept - _CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource) + _CCCL_NODISCARD_FRIEND auto operator==(_Resource const& __lhs, unified_memory_resource const& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource) { return __rhs.__equal_to(__lhs); } template - _CCCL_NODISCARD_FRIEND auto operator!=(managed_memory_resource const& __lhs, _Resource const& __rhs) noexcept - _CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource) + _CCCL_NODISCARD_FRIEND auto operator!=(unified_memory_resource const& __lhs, _Resource const& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource) { return !__lhs.__equal_to(__rhs); } template - _CCCL_NODISCARD_FRIEND auto operator!=(_Resource const& __lhs, managed_memory_resource const& __rhs) noexcept - _CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource) + _CCCL_NODISCARD_FRIEND auto operator!=(_Resource const& __lhs, unified_memory_resource const& __rhs) noexcept + _CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource) { return !__rhs.__equal_to(__lhs); } # endif // _CCCL_STD_VER <= 2017 //! @brief Enables the \c device_accessible property - friend constexpr void get_property(managed_memory_resource const&, device_accessible) noexcept {} + friend constexpr void get_property(unified_memory_resource const&, device_accessible) noexcept {} //! @brief Enables the \c host_accessible property - friend constexpr void get_property(managed_memory_resource const&, host_accessible) noexcept {} + friend constexpr void get_property(unified_memory_resource const&, host_accessible) noexcept {} #endif // _CCCL_DOXYGEN_INVOKED //! @brief Checks whether the passed in alignment is valid @@ -236,9 +244,9 @@ public: && (_CUDA_VMR::default_cuda_malloc_alignment % __alignment == 0); } }; -static_assert(_CUDA_VMR::async_resource_with, ""); -static_assert(_CUDA_VMR::async_resource_with, ""); +static_assert(_CUDA_VMR::async_resource_with, ""); +static_assert(_CUDA_VMR::async_resource_with, ""); } // namespace cuda::experimental -#endif //_CUDAX__MEMORY_RESOURCE_MANAGED_MEMORY_RESOURCE_CUH +#endif //_CUDAX__MEMORY_RESOURCE_UNIFIED_MEMORY_RESOURCE_CUH diff --git a/cudax/include/cuda/experimental/memory_resource.cuh b/cudax/include/cuda/experimental/memory_resource.cuh index 16d621da2c5..2a09f665281 100644 --- a/cudax/include/cuda/experimental/memory_resource.cuh +++ b/cudax/include/cuda/experimental/memory_resource.cuh @@ -4,7 +4,7 @@ // under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. // //===----------------------------------------------------------------------===// @@ -15,9 +15,9 @@ #include #include #include -#include #include #include #include +#include #endif // __CUDAX_MEMORY_RESOURCE___ diff --git a/cudax/test/CMakeLists.txt b/cudax/test/CMakeLists.txt index f63336f7b55..25c68c9868c 100644 --- a/cudax/test/CMakeLists.txt +++ b/cudax/test/CMakeLists.txt @@ -114,7 +114,7 @@ foreach(cn_target IN LISTS cudax_TARGETS) memory_resource/device_memory_pool.cu memory_resource/device_memory_resource.cu memory_resource/get_memory_resource.cu - memory_resource/managed_memory_resource.cu + memory_resource/unified_memory_resource.cu memory_resource/pinned_memory_resource.cu memory_resource/shared_resource.cu ) diff --git a/cudax/test/algorithm/copy.cu b/cudax/test/algorithm/copy.cu index 60bf54f4892..8508d3460f0 100644 --- a/cudax/test/algorithm/copy.cu +++ b/cudax/test/algorithm/copy.cu @@ -4,7 +4,7 @@ // under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. // //===----------------------------------------------------------------------===// @@ -44,14 +44,14 @@ TEST_CASE("1d Copy", "[data_manipulation]") } } - SECTION("Host and managed resource") + SECTION("Host and uninfied resource") { - cudax::managed_memory_resource managed_resource; + cudax::unified_memory_resource unified_resource; cudax::pinned_memory_resource host_resource; { cudax::uninitialized_buffer host_buffer(host_resource, buffer_size); - cudax::uninitialized_buffer device_buffer(managed_resource, buffer_size); + cudax::uninitialized_buffer device_buffer(unified_resource, buffer_size); cudax::fill_bytes(_stream, host_buffer, fill_byte); @@ -64,7 +64,7 @@ TEST_CASE("1d Copy", "[data_manipulation]") { cudax::uninitialized_buffer not_yet_const_host_buffer(host_resource, buffer_size); - cudax::uninitialized_buffer device_buffer(managed_resource, buffer_size); + cudax::uninitialized_buffer device_buffer(unified_resource, buffer_size); cudax::fill_bytes(_stream, not_yet_const_host_buffer, fill_byte); const auto& const_host_buffer = not_yet_const_host_buffer; diff --git a/cudax/test/containers/uninitialized_buffer.cu b/cudax/test/containers/uninitialized_buffer.cu index 1aa8f467d8a..cc82b7faa47 100644 --- a/cudax/test/containers/uninitialized_buffer.cu +++ b/cudax/test/containers/uninitialized_buffer.cu @@ -4,7 +4,7 @@ // under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. // //===----------------------------------------------------------------------===// @@ -123,7 +123,7 @@ TEMPLATE_TEST_CASE( { static_assert(!cuda::std::is_copy_assignable::value, ""); { - cudax::managed_memory_resource other_resource{}; + cudax::unified_memory_resource other_resource{}; uninitialized_buffer input{other_resource, 42}; uninitialized_buffer buf{resource, 1337}; const auto* old_ptr = buf.data(); diff --git a/cudax/test/memory_resource/any_resource.cu b/cudax/test/memory_resource/any_resource.cu index c7aae10fea5..76a6a7399b8 100644 --- a/cudax/test/memory_resource/any_resource.cu +++ b/cudax/test/memory_resource/any_resource.cu @@ -4,7 +4,7 @@ // under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. // //===----------------------------------------------------------------------===// @@ -116,10 +116,10 @@ TEMPLATE_TEST_CASE_METHOD(test_fixture, "any_resource", "[container][resource]", Counts expected{}; CHECK(this->counts == expected); { - cudax::managed_memory_resource managed1{}, managed2{}; - CHECK(managed1 == managed2); - cudax::any_resource mr{managed1}; - CHECK(mr == managed1); + cudax::unified_memory_resource unified1{}, unified2{}; + CHECK(unified1 == unified2); + cudax::any_resource mr{unified1}; + CHECK(mr == unified1); } CHECK(this->counts == expected); } diff --git a/cudax/test/memory_resource/managed_memory_resource.cu b/cudax/test/memory_resource/unified_memory_resource.cu similarity index 77% rename from cudax/test/memory_resource/managed_memory_resource.cu rename to cudax/test/memory_resource/unified_memory_resource.cu index c0a4f66dc62..875c5edc9e6 100644 --- a/cudax/test/memory_resource/managed_memory_resource.cu +++ b/cudax/test/memory_resource/unified_memory_resource.cu @@ -4,7 +4,7 @@ // under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. +// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. // //===----------------------------------------------------------------------===// @@ -22,15 +22,15 @@ namespace cudax = cuda::experimental; -using managed_resource = cudax::managed_memory_resource; -static_assert(!cuda::std::is_trivial::value, ""); -static_assert(!cuda::std::is_trivially_default_constructible::value, ""); -static_assert(cuda::std::is_trivially_copy_constructible::value, ""); -static_assert(cuda::std::is_trivially_move_constructible::value, ""); -static_assert(cuda::std::is_trivially_copy_assignable::value, ""); -static_assert(cuda::std::is_trivially_move_assignable::value, ""); -static_assert(cuda::std::is_trivially_destructible::value, ""); -static_assert(!cuda::std::is_empty::value, ""); +using unified_resource = cudax::unified_memory_resource; +static_assert(!cuda::std::is_trivial::value, ""); +static_assert(!cuda::std::is_trivially_default_constructible::value, ""); +static_assert(cuda::std::is_trivially_copy_constructible::value, ""); +static_assert(cuda::std::is_trivially_move_constructible::value, ""); +static_assert(cuda::std::is_trivially_copy_assignable::value, ""); +static_assert(cuda::std::is_trivially_move_assignable::value, ""); +static_assert(cuda::std::is_trivially_destructible::value, ""); +static_assert(!cuda::std::is_empty::value, ""); static void ensure_managed_ptr(void* ptr) { @@ -41,24 +41,24 @@ static void ensure_managed_ptr(void* ptr) CHECK(attributes.type == cudaMemoryTypeManaged); } -TEST_CASE("managed_memory_resource construction", "[memory_resource]") +TEST_CASE("unified_memory_resource construction", "[memory_resource]") { SECTION("Default construction") { - STATIC_REQUIRE(cuda::std::is_default_constructible_v); + STATIC_REQUIRE(cuda::std::is_default_constructible_v); } SECTION("Construct with flag") { - managed_resource defaulted{}; - managed_resource with_flag{cudaMemAttachHost}; + unified_resource defaulted{}; + unified_resource with_flag{cudaMemAttachHost}; CHECK(defaulted != with_flag); } } -TEST_CASE("managed_memory_resource allocation", "[memory_resource]") +TEST_CASE("unified_memory_resource allocation", "[memory_resource]") { - managed_resource res{}; + unified_resource res{}; cudax::stream stream{}; { // allocate / deallocate @@ -203,29 +203,29 @@ static_assert(cuda::mr::async_resource>, static_assert(cuda::mr::async_resource>, ""); // test for cccl#2214: https://github.com/NVIDIA/cccl/issues/2214 -struct derived_managed_resource : cudax::managed_memory_resource +struct derived_unified_resource : cudax::unified_memory_resource { - using cudax::managed_memory_resource::managed_memory_resource; + using cudax::unified_memory_resource::unified_memory_resource; }; -static_assert(cuda::mr::resource, ""); +static_assert(cuda::mr::resource, ""); -TEST_CASE("managed_memory_resource comparison", "[memory_resource]") +TEST_CASE("unified_memory_resource comparison", "[memory_resource]") { - managed_resource first{}; - { // comparison against a plain managed_memory_resource - managed_resource second{}; + unified_resource first{}; + { // comparison against a plain unified_memory_resource + unified_resource second{}; CHECK((first == second)); CHECK(!(first != second)); } - { // comparison against a plain managed_memory_resource with a different pool - managed_resource second{cudaMemAttachHost}; + { // comparison against a plain unified_memory_resource with a different pool + unified_resource second{cudaMemAttachHost}; CHECK((first != second)); CHECK(!(first == second)); } - { // comparison against a managed_memory_resource wrapped inside a resource_ref - managed_resource second{}; + { // comparison against a unified_memory_resource wrapped inside a resource_ref + unified_resource second{}; cuda::mr::resource_ref second_ref{second}; CHECK((first == second_ref)); CHECK(!(first != second_ref)); @@ -233,8 +233,8 @@ TEST_CASE("managed_memory_resource comparison", "[memory_resource]") CHECK(!(second_ref != first)); } - { // comparison against a managed_memory_resource wrapped inside a async_resource_ref - managed_resource second{}; + { // comparison against a unified_memory_resource wrapped inside a async_resource_ref + unified_resource second{}; cuda::mr::async_resource_ref second_ref{second}; CHECK((first == second_ref)); @@ -243,7 +243,7 @@ TEST_CASE("managed_memory_resource comparison", "[memory_resource]") CHECK(!(second_ref != first)); } - { // comparison against a different managed_resource through resource_ref + { // comparison against a different unified_resource through resource_ref resource host_resource{}; resource device_resource{}; CHECK(!(first == host_resource)); @@ -257,7 +257,7 @@ TEST_CASE("managed_memory_resource comparison", "[memory_resource]") CHECK((device_resource != first)); } - { // comparison against a different managed_resource through resource_ref + { // comparison against a different unified_resource through resource_ref resource host_async_resource{}; resource device_async_resource{}; CHECK(!(first == host_async_resource)); diff --git a/docs/cudax/memory_resource.rst b/docs/cudax/memory_resource.rst index dc3f5a8f82b..11eb8ecfea8 100644 --- a/docs/cudax/memory_resource.rst +++ b/docs/cudax/memory_resource.rst @@ -8,24 +8,32 @@ Memory Resources :maxdepth: 3 ${repo_docs_api_path}/*any__resource* + ${repo_docs_api_path}/*shared__resource* ${repo_docs_api_path}/enum*device__memory__pool* ${repo_docs_api_path}/struct*memory__pool__properties* ${repo_docs_api_path}/class*device__memory__pool* ${repo_docs_api_path}/class*device__memory__resource* - ${repo_docs_api_path}/*shared__resource* + ${repo_docs_api_path}/class*pinned__memory__resource* + ${repo_docs_api_path}/class*unified__memory__resource* The ```` header provides: - :ref:`any_resource ` and :ref:`any_async_resource ` type erased memory resources similar to ``std::any``. In contrast to :ref:`resource_ref ` they own the contained resource. + - :ref:`shared_resource ` a type erased reference counted memory resource. + In contrast to :ref:`any_resource ` it additionally provides shared ownership + semantics. - :ref:`device_memory_resource ` A standard C++ interface for *heterogeneous*, *stream-ordered* memory allocation tailored to the needs of CUDA C++ developers. This design builds off of the success of the `RAPIDS Memory Manager (RMM) `__ project and evolves the design based on lessons learned. - - :ref:`shared_resource ` a type erased reference counted memory resource. - In contrast to :ref:`any_resource ` it additionally provides shared ownership - semantics. + - :ref:`pinned_memory_resource ` + A standard C++ interface for memory allocation of + `pinned memory `__. + - :ref:`universal_memory_resource ` + A standard C++ interface for memory allocation of + `unified memory `__. ```` is not intended to replace RMM, but instead moves the definition of the memory allocation interface to a more centralized home in CCCL. RMM will remain as a collection of implementations of diff --git a/docs/cudax/stf.rst b/docs/cudax/stf.rst index b4b1005f508..b9b5fc4c77d 100644 --- a/docs/cudax/stf.rst +++ b/docs/cudax/stf.rst @@ -327,7 +327,7 @@ high-bandwidth memory used by GPUs). CUDASTF refers to such conceptual data as *logical data*, an abstract handle for data that may get transparently transferred to or replicated over the different places used by CUDASTF tasks. When user code creates a logical data object from -a user-provided object (e.g. an array of ``double``), they transfer the +a user-provided object (e.g. an array of ``double``), they transfer the ownership of the original data to CUDASTF. As a result, any access to the original data should be performed through the logical data interface, as CUDASTF may transfer the logical data to a CUDA device @@ -407,7 +407,7 @@ Write-back policy When a logical data object is destroyed, the original data instance is updated (unless the logical data was created without a reference value, -e.g. from a shape). The result is only guaranteed to be available on the +e.g. from a shape). The result is only guaranteed to be available on the corresponding data place when after the ``finalize()`` method was called on the context. Likewise, when calling ``finalize()`` a write-back mechanism is automatically issued on all logical data associated to the