Skip to content

Commit

Permalink
Remove deprecated host_memory_layout
Browse files Browse the repository at this point in the history
  • Loading branch information
fknorr authored and psalz committed Jul 13, 2023
1 parent b0ebd00 commit f5e6510
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 186 deletions.
102 changes: 0 additions & 102 deletions include/accessor.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,79 +38,6 @@ namespace detail {

namespace celerity {

/**
* Maps slices of the accessor backing buffer present on a host to the virtual global range of the Celerity buffer.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
class [[deprecated("host_memory_layout will be removed in favor of buffer_allocation_window in a future version of Celerity")]] host_memory_layout {
public:
/**
* Layout map for a single dimension describing the offset and strides of its hyperplanes.
*
* - A zero-dimensional layout corresponds to an individual data item and is not explicitly modelled in the dimension vector.
* - A one-dimensional layout is an interval of one-dimensional space and is fully described by global and local offsets and a count of data items (aka
* 0-dimensional hyperplanes).
* - A two-dimensional layout is modelled as an interval of rows, which manifests as an offset (a multiple of the row width) and a stride (the row width
* itself). Each row (aka 1-dimensional hyperplane) is modelled by the same one-dimensional layout.
* - and so on for arbitrary dimensioned layouts.
*/
class [[deprecated("host_memory_layout will be removed in favor of buffer_allocation_window in a future version of Celerity")]] dimension {
public:
dimension() noexcept = default;

dimension(size_t global_size, size_t global_offset, size_t local_size, size_t local_offset, size_t extent)
: m_global_size(global_size), m_global_offset(global_offset), m_local_size(local_size), m_local_offset(local_offset), m_extent(extent) {
assert(global_offset >= local_offset);
assert(global_size >= local_size);
}

size_t get_global_size() const { return m_global_size; }

size_t get_local_size() const { return m_local_size; }

size_t get_global_offset() const { return m_global_offset; }

size_t get_local_offset() const { return m_local_offset; }

size_t get_extent() const { return m_extent; }

private:
size_t m_global_size{};
size_t m_global_offset{};
size_t m_local_size{};
size_t m_local_offset{};
size_t m_extent{};
};

class [[deprecated("host_memory_layout will be removed in favor of buffer_allocation_window in a future version of Celerity")]] dimension_vector {
public:
dimension_vector(size_t size) : m_this_size(size) {}

dimension& operator[](size_t idx) { return m_values[idx]; }
const dimension& operator[](size_t idx) const { return m_values[idx]; }

size_t size() const { return m_this_size; }

private:
/**
* Since contiguous dimensions can be merged when generating the memory layout, host_memory_layout is not generic over a fixed dimension count
*/
constexpr static size_t max_dimensionality = 4;
std::array<dimension, max_dimensionality> m_values;
size_t m_this_size;
};

explicit host_memory_layout(const dimension_vector& dimensions) : m_dimensions(dimensions) {}

/** The layout maps per dimension, in descending dimensionality */
const dimension_vector& get_dimensions() const { return m_dimensions; }

private:
dimension_vector m_dimensions;
};
#pragma GCC diagnostic pop

/**
* In addition to the usual per-item access through the subscript operator, accessors in distributed and collective host tasks can access the underlying memory
* of the node-local copy of a buffer directly through `accessor::get_allocation_window()`. Celerity does not replicate buffers fully on all nodes unless
Expand Down Expand Up @@ -577,35 +504,6 @@ class accessor<DataT, Dims, Mode, target::host_task> : public detail::accessor_b
};
}

/**
* Returns a pointer to the host-local backing buffer along with a mapping to the global virtual buffer.
*
* Each host keeps only part of the global (virtual) buffer locally. The layout information can be used, for example, to perform distributed I/O on the
* partial buffer present at each host.
*/
// TODO remove this together with host_memory_layout after a grace period
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
template <int KernelDims>
[[deprecated("get_host_memory will be removed in a future version of Celerity. Use get_allocation_window instead")]] std::pair<DataT*, host_memory_layout>
get_host_memory(const partition<KernelDims>& part) const {
// We already know the range mapper output for "chunk" from the constructor. The parameter is a purely semantic dependency which ensures that
// this function is not called outside a host task.
(void)part;

host_memory_layout::dimension_vector dimensions(Dims);
for(int d = 0; d < Dims; ++d) {
dimensions[d] = {/* global_size */ m_virtual_buffer_range[d],
/* global_offset */ m_accessed_virtual_subrange.offset[d],
/* local_size */ m_backing_buffer_range[d],
/* local_offset */ m_accessed_virtual_subrange.offset[d] - m_backing_buffer_offset[d],
/* extent */ m_accessed_virtual_subrange.range[d]};
}

return {m_host_ptr, host_memory_layout{dimensions}};
}
#pragma GCC diagnostic pop

private:
// Subange of the accessor, as set by the range mapper or requested by the user (master node host tasks only).
// This does not necessarily correspond to the backing buffer's range.
Expand Down
84 changes: 0 additions & 84 deletions test/runtime_deprecation_tests.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,90 +49,6 @@ namespace detail {
CHECK(all<3, 1>{}(chunk3d, range1d) == subrange1d);
}

TEST_CASE_METHOD(test_utils::runtime_fixture, "deprecated host_memory_layout continues to work", "[task][deprecated]") {
distr_queue q;

std::vector<char> memory1d(10);
buffer<char, 1> buf1d(memory1d.data(), range<1>(10));

q.submit([=](handler& cgh) {
auto b = buf1d.get_access<cl::sycl::access::mode::discard_write, target::host_task>(cgh, all{});
cgh.host_task(on_master_node, [=](partition<0> part) {
auto [ptr, layout] = b.get_host_memory(part);
auto& dims = layout.get_dimensions();
REQUIRE(dims.size() == 1);
CHECK(dims[0].get_global_offset() == 0);
CHECK(dims[0].get_local_offset() == 0);
CHECK(dims[0].get_global_size() == 10);
CHECK(dims[0].get_local_size() >= 10);
CHECK(dims[0].get_extent() == 10);
});
});

q.submit([=](handler& cgh) {
auto b = buf1d.get_access<cl::sycl::access::mode::discard_write, target::host_task>(cgh, one_to_one{});
cgh.host_task(range<1>(6), id<1>(2), [=](partition<1> part) {
auto [ptr, layout] = b.get_host_memory(part);
auto& dims = layout.get_dimensions();
REQUIRE(dims.size() == 1);
CHECK(dims[0].get_global_offset() == 2);
CHECK(dims[0].get_local_offset() <= 2);
CHECK(dims[0].get_global_size() == 10);
CHECK(dims[0].get_local_size() >= 6);
CHECK(dims[0].get_local_size() <= 10);
CHECK(dims[0].get_extent() == 6);
});
});

std::vector<char> memory2d(10 * 10);
buffer<char, 2> buf2d(memory2d.data(), range<2>(10, 10));

q.submit([=](handler& cgh) {
auto b = buf2d.get_access<cl::sycl::access::mode::discard_write, target::host_task>(cgh, one_to_one{});
cgh.host_task(range<2>(5, 6), id<2>(1, 2), [=](partition<2> part) {
auto [ptr, layout] = b.get_host_memory(part);
auto& dims = layout.get_dimensions();
REQUIRE(dims.size() == 2);
CHECK(dims[0].get_global_offset() == 1);
CHECK(dims[0].get_global_size() == 10);
CHECK(dims[0].get_local_offset() <= 1);
CHECK(dims[0].get_local_size() >= 6);
CHECK(dims[0].get_local_size() <= 10);
CHECK(dims[0].get_extent() == 5);
CHECK(dims[1].get_global_offset() == 2);
CHECK(dims[1].get_global_size() == 10);
CHECK(dims[1].get_extent() == 6);
});
});

std::vector<char> memory3d(10 * 10 * 10);
buffer<char, 3> buf3d(memory3d.data(), range<3>(10, 10, 10));

q.submit([=](handler& cgh) {
auto b = buf3d.get_access<cl::sycl::access::mode::discard_write, target::host_task>(cgh, one_to_one{});
cgh.host_task(range<3>(5, 6, 7), id<3>(1, 2, 3), [=](partition<3> part) {
auto [ptr, layout] = b.get_host_memory(part);
auto& dims = layout.get_dimensions();
REQUIRE(dims.size() == 3);
CHECK(dims[0].get_global_offset() == 1);
CHECK(dims[0].get_local_offset() <= 1);
CHECK(dims[0].get_global_size() == 10);
CHECK(dims[0].get_local_size() >= 5);
CHECK(dims[0].get_local_size() <= 10);
CHECK(dims[0].get_extent() == 5);
CHECK(dims[1].get_global_offset() == 2);
CHECK(dims[1].get_local_offset() <= 2);
CHECK(dims[1].get_global_size() == 10);
CHECK(dims[1].get_local_size() >= 6);
CHECK(dims[1].get_local_size() <= 10);
CHECK(dims[1].get_extent() == 6);
CHECK(dims[2].get_global_offset() == 3);
CHECK(dims[2].get_global_size() == 10);
CHECK(dims[2].get_extent() == 7);
});
});
}

TEST_CASE_METHOD(test_utils::runtime_fixture,
"distr_queue::submit(allow_by_ref_t, ...) and creation of accessors/side-effects/reductions from const buffers/host-objects continues to work",
"[handler][deprecated]") {
Expand Down

0 comments on commit f5e6510

Please sign in to comment.