Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename managed_memory_resource to unified_memory_resource #4103

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@
// under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
//
//===----------------------------------------------------------------------===//

@@ -37,11 +37,19 @@
#include <cuda/experimental/__memory_resource/properties.cuh>

//! @file
//! The \c managed_memory_resource class provides a memory resource that allocates pinned memory.
//! The \c pinned_memory_resource class provides a memory resource that allocates pinned memory.
namespace cuda::experimental
{

//! @brief pinned_memory_resource uses `cudaMallocHost` / `cudaFreeHost` for allocation / deallocation.
//! @rst
//! .. _cudax-memory-resource-pinned-memory-resource:
//!
//! Pinned memory resource
//! ----------------------
//!
//! ``pinned_memory_resource`` uses `cudaMallocHost` / `cudaFreeHost` for allocation / deallocation.
//!
//! @endrst
class pinned_memory_resource
{
private:
Original file line number Diff line number Diff line change
@@ -4,12 +4,12 @@
// under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
//
//===----------------------------------------------------------------------===//

#ifndef _CUDAX__MEMORY_RESOURCE_MANAGED_MEMORY_RESOURCE_CUH
#define _CUDAX__MEMORY_RESOURCE_MANAGED_MEMORY_RESOURCE_CUH
#ifndef _CUDAX__MEMORY_RESOURCE_UNIFIED_MEMORY_RESOURCE_CUH
#define _CUDAX__MEMORY_RESOURCE_UNIFIED_MEMORY_RESOURCE_CUH

#include <cuda/std/detail/__config>

@@ -36,23 +36,31 @@
#include <cuda/experimental/__memory_resource/properties.cuh>

//! @file
//! The \c managed_memory_resource class provides a memory resource that allocates managed memory.
//! The \c unified_memory_resource class provides a memory resource that allocates unified memory.
namespace cuda::experimental
{

//! @brief \c managed_memory_resource uses `cudaMallocManaged` / `cudaFree` for allocation / deallocation.
class managed_memory_resource
//! @rst
//! .. _cudax-memory-resource-unified-memory-resource:
//!
//! Pinned memory resource
//! ----------------------
//!
//! ``unified_memory_resource`` uses `cudaMallocManaged` / `cudaFree` for allocation / deallocation.
//!
//! @endrst
class unified_memory_resource
{
private:
unsigned int __flags_ = cudaMemAttachGlobal;

static constexpr unsigned int __available_flags = cudaMemAttachGlobal | cudaMemAttachHost;

public:
constexpr managed_memory_resource(const unsigned int __flags = cudaMemAttachGlobal) noexcept
constexpr unified_memory_resource(const unsigned int __flags = cudaMemAttachGlobal) noexcept
: __flags_(__flags & __available_flags)
{
_CCCL_ASSERT(__flags_ == __flags, "Unexpected flags passed to managed_memory_resource");
_CCCL_ASSERT(__flags_ == __flags, "Unexpected flags passed to unified_memory_resource");
}

//! @brief Allocate CUDA unified memory of size at least \p __bytes.
@@ -66,7 +74,7 @@ public:
// We need to ensure that the provided alignment matches the minimal provided alignment
if (!__is_valid_alignment(__alignment))
{
_CUDA_VSTD::__throw_invalid_argument("Invalid alignment passed to managed_memory_resource::allocate.");
_CUDA_VSTD::__throw_invalid_argument("Invalid alignment passed to unified_memory_resource::allocate.");
}

void* __ptr{nullptr};
@@ -107,8 +115,8 @@ public:
void* __ptr, const size_t, const size_t __alignment = _CUDA_VMR::default_cuda_malloc_alignment) const noexcept
{
// We need to ensure that the provided alignment matches the minimal provided alignment
_CCCL_ASSERT(__is_valid_alignment(__alignment), "Invalid alignment passed to managed_memory_resource::deallocate.");
_CCCL_ASSERT_CUDA_API(::cudaFree, "managed_memory_resource::deallocate failed", __ptr);
_CCCL_ASSERT(__is_valid_alignment(__alignment), "Invalid alignment passed to unified_memory_resource::deallocate.");
_CCCL_ASSERT_CUDA_API(::cudaFree, "unified_memory_resource::deallocate failed", __ptr);
(void) __alignment;
}

@@ -142,18 +150,18 @@ public:
(void) __stream;
}

//! @brief Equality comparison with another \c managed_memory_resource.
//! @param __other The other \c managed_memory_resource.
//! @return Whether both \c managed_memory_resource were constructed with the same flags.
_CCCL_NODISCARD constexpr bool operator==(managed_memory_resource const& __other) const noexcept
//! @brief Equality comparison with another \c unified_memory_resource.
//! @param __other The other \c unified_memory_resource.
//! @return Whether both \c unified_memory_resource were constructed with the same flags.
_CCCL_NODISCARD constexpr bool operator==(unified_memory_resource const& __other) const noexcept
{
return __flags_ == __other.__flags_;
}
#if _CCCL_STD_VER <= 2017
//! @brief Inequality comparison with another \c managed_memory_resource.
//! @param __other The other \c managed_memory_resource.
//! @return Whether both \c managed_memory_resource were constructed with different flags.
_CCCL_NODISCARD constexpr bool operator!=(managed_memory_resource const& __other) const noexcept
//! @brief Inequality comparison with another \c unified_memory_resource.
//! @param __other The other \c unified_memory_resource.
//! @return Whether both \c unified_memory_resource were constructed with different flags.
_CCCL_NODISCARD constexpr bool operator!=(unified_memory_resource const& __other) const noexcept
{
return __flags_ != __other.__flags_;
}
@@ -167,12 +175,12 @@ private:
{
if constexpr (has_property<_Resource, device_accessible>)
{
return resource_ref<device_accessible>{*const_cast<managed_memory_resource*>(this)}
return resource_ref<device_accessible>{*const_cast<unified_memory_resource*>(this)}
== __cudax::__as_resource_ref<device_accessible>(const_cast<_Resource&>(__rhs));
}
else if constexpr (has_property<_Resource, host_accessible>)
{
return resource_ref<host_accessible>{*const_cast<managed_memory_resource*>(this)}
return resource_ref<host_accessible>{*const_cast<unified_memory_resource*>(this)}
== __cudax::__as_resource_ref<host_accessible>(const_cast<_Resource&>(__rhs));
}
else
@@ -183,50 +191,50 @@ private:

public:
# if _CCCL_STD_VER >= 2020
//! @brief Equality comparison between a \c managed_memory_resource and another resource
//! @brief Equality comparison between a \c unified_memory_resource and another resource
//! @param __rhs The resource to compare to
//! @return If the underlying types are equality comparable, returns the result of equality comparison of both
//! resources. Otherwise, returns false.
template <class _Resource>
requires _CUDA_VMR::__different_resource<managed_memory_resource, _Resource>
requires _CUDA_VMR::__different_resource<unified_memory_resource, _Resource>
_CCCL_NODISCARD bool operator==(_Resource const& __rhs) const noexcept
{
return this->__equal_to(__rhs);
}
# else // ^^^ C++20 ^^^ / vvv C++17
template <class _Resource>
_CCCL_NODISCARD_FRIEND auto operator==(managed_memory_resource const& __lhs, _Resource const& __rhs) noexcept
_CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource<managed_memory_resource, _Resource>)
_CCCL_NODISCARD_FRIEND auto operator==(unified_memory_resource const& __lhs, _Resource const& __rhs) noexcept
_CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource<unified_memory_resource, _Resource>)
{
return __lhs.__equal_to(__rhs);
}

template <class _Resource>
_CCCL_NODISCARD_FRIEND auto operator==(_Resource const& __lhs, managed_memory_resource const& __rhs) noexcept
_CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource<managed_memory_resource, _Resource>)
_CCCL_NODISCARD_FRIEND auto operator==(_Resource const& __lhs, unified_memory_resource const& __rhs) noexcept
_CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource<unified_memory_resource, _Resource>)
{
return __rhs.__equal_to(__lhs);
}

template <class _Resource>
_CCCL_NODISCARD_FRIEND auto operator!=(managed_memory_resource const& __lhs, _Resource const& __rhs) noexcept
_CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource<managed_memory_resource, _Resource>)
_CCCL_NODISCARD_FRIEND auto operator!=(unified_memory_resource const& __lhs, _Resource const& __rhs) noexcept
_CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource<unified_memory_resource, _Resource>)
{
return !__lhs.__equal_to(__rhs);
}

template <class _Resource>
_CCCL_NODISCARD_FRIEND auto operator!=(_Resource const& __lhs, managed_memory_resource const& __rhs) noexcept
_CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource<managed_memory_resource, _Resource>)
_CCCL_NODISCARD_FRIEND auto operator!=(_Resource const& __lhs, unified_memory_resource const& __rhs) noexcept
_CCCL_TRAILING_REQUIRES(bool)(_CUDA_VMR::__different_resource<unified_memory_resource, _Resource>)
{
return !__rhs.__equal_to(__lhs);
}
# endif // _CCCL_STD_VER <= 2017

//! @brief Enables the \c device_accessible property
friend constexpr void get_property(managed_memory_resource const&, device_accessible) noexcept {}
friend constexpr void get_property(unified_memory_resource const&, device_accessible) noexcept {}
//! @brief Enables the \c host_accessible property
friend constexpr void get_property(managed_memory_resource const&, host_accessible) noexcept {}
friend constexpr void get_property(unified_memory_resource const&, host_accessible) noexcept {}
#endif // _CCCL_DOXYGEN_INVOKED

//! @brief Checks whether the passed in alignment is valid
@@ -236,9 +244,9 @@ public:
&& (_CUDA_VMR::default_cuda_malloc_alignment % __alignment == 0);
}
};
static_assert(_CUDA_VMR::async_resource_with<managed_memory_resource, device_accessible>, "");
static_assert(_CUDA_VMR::async_resource_with<managed_memory_resource, host_accessible>, "");
static_assert(_CUDA_VMR::async_resource_with<unified_memory_resource, device_accessible>, "");
static_assert(_CUDA_VMR::async_resource_with<unified_memory_resource, host_accessible>, "");

} // namespace cuda::experimental

#endif //_CUDAX__MEMORY_RESOURCE_MANAGED_MEMORY_RESOURCE_CUH
#endif //_CUDAX__MEMORY_RESOURCE_UNIFIED_MEMORY_RESOURCE_CUH
4 changes: 2 additions & 2 deletions cudax/include/cuda/experimental/memory_resource.cuh
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@
// under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
//
//===----------------------------------------------------------------------===//

@@ -15,9 +15,9 @@
#include <cuda/experimental/__memory_resource/device_memory_pool.cuh>
#include <cuda/experimental/__memory_resource/device_memory_resource.cuh>
#include <cuda/experimental/__memory_resource/get_memory_resource.cuh>
#include <cuda/experimental/__memory_resource/managed_memory_resource.cuh>
#include <cuda/experimental/__memory_resource/pinned_memory_resource.cuh>
#include <cuda/experimental/__memory_resource/properties.cuh>
#include <cuda/experimental/__memory_resource/shared_resource.cuh>
#include <cuda/experimental/__memory_resource/unified_memory_resource.cuh>

#endif // __CUDAX_MEMORY_RESOURCE___
2 changes: 1 addition & 1 deletion cudax/test/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -114,7 +114,7 @@ foreach(cn_target IN LISTS cudax_TARGETS)
memory_resource/device_memory_pool.cu
memory_resource/device_memory_resource.cu
memory_resource/get_memory_resource.cu
memory_resource/managed_memory_resource.cu
memory_resource/unified_memory_resource.cu
memory_resource/pinned_memory_resource.cu
memory_resource/shared_resource.cu
)
10 changes: 5 additions & 5 deletions cudax/test/algorithm/copy.cu
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@
// under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
//
//===----------------------------------------------------------------------===//

@@ -44,14 +44,14 @@ TEST_CASE("1d Copy", "[data_manipulation]")
}
}

SECTION("Host and managed resource")
SECTION("Host and uninfied resource")
{
cudax::managed_memory_resource managed_resource;
cudax::unified_memory_resource unified_resource;
cudax::pinned_memory_resource host_resource;

{
cudax::uninitialized_buffer<int, cuda::mr::host_accessible> host_buffer(host_resource, buffer_size);
cudax::uninitialized_buffer<int, cuda::mr::device_accessible> device_buffer(managed_resource, buffer_size);
cudax::uninitialized_buffer<int, cuda::mr::device_accessible> device_buffer(unified_resource, buffer_size);

cudax::fill_bytes(_stream, host_buffer, fill_byte);

@@ -64,7 +64,7 @@ TEST_CASE("1d Copy", "[data_manipulation]")

{
cudax::uninitialized_buffer<int, cuda::mr::host_accessible> not_yet_const_host_buffer(host_resource, buffer_size);
cudax::uninitialized_buffer<int, cuda::mr::device_accessible> device_buffer(managed_resource, buffer_size);
cudax::uninitialized_buffer<int, cuda::mr::device_accessible> device_buffer(unified_resource, buffer_size);
cudax::fill_bytes(_stream, not_yet_const_host_buffer, fill_byte);

const auto& const_host_buffer = not_yet_const_host_buffer;
4 changes: 2 additions & 2 deletions cudax/test/containers/uninitialized_buffer.cu
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@
// under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
//
//===----------------------------------------------------------------------===//

@@ -123,7 +123,7 @@ TEMPLATE_TEST_CASE(
{
static_assert(!cuda::std::is_copy_assignable<uninitialized_buffer>::value, "");
{
cudax::managed_memory_resource other_resource{};
cudax::unified_memory_resource other_resource{};
uninitialized_buffer input{other_resource, 42};
uninitialized_buffer buf{resource, 1337};
const auto* old_ptr = buf.data();
10 changes: 5 additions & 5 deletions cudax/test/memory_resource/any_resource.cu
Original file line number Diff line number Diff line change
@@ -4,7 +4,7 @@
// under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES.
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
//
//===----------------------------------------------------------------------===//

@@ -116,10 +116,10 @@ TEMPLATE_TEST_CASE_METHOD(test_fixture, "any_resource", "[container][resource]",
Counts expected{};
CHECK(this->counts == expected);
{
cudax::managed_memory_resource managed1{}, managed2{};
CHECK(managed1 == managed2);
cudax::any_resource<cudax::device_accessible> mr{managed1};
CHECK(mr == managed1);
cudax::unified_memory_resource unified1{}, unified2{};
CHECK(unified1 == unified2);
cudax::any_resource<cudax::device_accessible> mr{unified1};
CHECK(mr == unified1);
}
CHECK(this->counts == expected);
}
Loading
Loading