Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use size_t for element counts & byte sizes #1007

Merged
Merged
23 changes: 12 additions & 11 deletions morpheus/_lib/include/morpheus/objects/dev_mem_info.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@

#include <rmm/device_buffer.hpp> // for device_buffer

#include <memory> // for shared_ptr, unique_ptr & make_unique
#include <cstddef> // for size_t
#include <memory> // for shared_ptr, unique_ptr & make_unique

namespace morpheus {
/****** Component public implementations *******************/
Expand Down Expand Up @@ -58,7 +59,7 @@ class MORPHEUS_EXPORT DevMemInfo
std::shared_ptr<MemoryDescriptor> md,
ShapeType shape,
ShapeType stride,
TensorIndex offset_bytes = 0);
std::size_t offset_bytes = 0);

/**
* @brief Construct a new DevMemInfo object from an existing `rmm::device_buffer`.
Expand All @@ -73,29 +74,29 @@ class MORPHEUS_EXPORT DevMemInfo
DType dtype,
ShapeType shape,
ShapeType stride,
TensorIndex offset_bytes = 0);
std::size_t offset_bytes = 0);
DevMemInfo(DevMemInfo&& other) = default;

/**
* @brief Return the number of bytes stored in the underlying buffer
*
* @return TensorIndex
* @return std::size_t
*/
TensorIndex bytes() const;
std::size_t bytes() const;

/**
* @brief Return the element count stored in the underlying buffer
*
* @return TensorIndex
* @return std::size_t
*/
TensorIndex count() const;
std::size_t count() const;

/**
* @brief Return the number of bytes offset from the head of the buffer
*
* @return TensorIndex
* @return std::size_t
*/
TensorIndex offset_bytes() const;
std::size_t offset_bytes() const;

/**
* @brief Return the type of the data stored in the buffer
Expand Down Expand Up @@ -161,7 +162,7 @@ class MORPHEUS_EXPORT DevMemInfo
* @param bytes
* @return std::unique_ptr<rmm::device_buffer>
*/
std::unique_ptr<rmm::device_buffer> make_new_buffer(TensorIndex bytes) const;
std::unique_ptr<rmm::device_buffer> make_new_buffer(std::size_t bytes) const;

private:
// Pointer to the head of our data
Expand All @@ -175,7 +176,7 @@ class MORPHEUS_EXPORT DevMemInfo
const ShapeType m_stride;

// Offset from head of data in bytes
const TensorIndex m_offset_bytes;
const std::size_t m_offset_bytes;

// Device resources used to allocate this memory
std::shared_ptr<MemoryDescriptor> m_md;
Expand Down
11 changes: 6 additions & 5 deletions morpheus/_lib/include/morpheus/objects/rmm_tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

#include <rmm/device_buffer.hpp>

#include <cstddef> // for size_t
#include <cstdint>
#include <memory>
#include <vector>
Expand All @@ -46,7 +47,7 @@ class RMMTensor : public ITensor
{
public:
RMMTensor(std::shared_ptr<rmm::device_buffer> device_buffer,
TensorIndex offset,
std::size_t offset,
DType dtype,
ShapeType shape,
ShapeType stride = {});
Expand Down Expand Up @@ -102,12 +103,12 @@ class RMMTensor : public ITensor
/**
* TODO(Documentation)
*/
TensorIndex bytes() const final;
std::size_t bytes() const final;

/**
* TODO(Documentation)
*/
TensorIndex count() const final;
std::size_t count() const final;

/**
* TODO(Documentation)
Expand Down Expand Up @@ -152,12 +153,12 @@ class RMMTensor : public ITensor
/**
* TODO(Documentation)
*/
TensorIndex offset_bytes() const;
std::size_t offset_bytes() const;

// Memory info
std::shared_ptr<MemoryDescriptor> m_mem_descriptor;
std::shared_ptr<rmm::device_buffer> m_md;
TensorIndex m_offset;
std::size_t m_offset;

// // Type info
// std::string m_typestr;
Expand Down
8 changes: 4 additions & 4 deletions morpheus/_lib/include/morpheus/objects/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class Tensor
std::string init_typestr,
ShapeType init_shape,
ShapeType init_strides,
TensorIndex init_offset = 0);
std::size_t init_offset = 0);

ShapeType shape;
ShapeType strides;
Expand All @@ -66,7 +66,7 @@ class Tensor
/**
* TODO(Documentation)
*/
TensorIndex bytes_count() const;
std::size_t bytes_count() const;

/**
* TODO(Documentation)
Expand All @@ -85,10 +85,10 @@ class Tensor
DType dtype,
ShapeType shape,
ShapeType strides,
TensorIndex offset = 0);
std::size_t offset = 0);

private:
TensorIndex m_offset;
std::size_t m_offset;
std::shared_ptr<rmm::device_buffer> m_device_buffer;
};

Expand Down
8 changes: 4 additions & 4 deletions morpheus/_lib/include/morpheus/objects/tensor_object.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ struct ITensorStorage
virtual void* data() const = 0;

// virtual const void* data() const = 0;
virtual TensorIndex bytes() const = 0;
virtual std::size_t bytes() const = 0;

virtual std::shared_ptr<MemoryDescriptor> get_memory() const = 0;
// virtual TensorStorageType storage_type() const = 0;
Expand Down Expand Up @@ -136,7 +136,7 @@ struct ITensor : public ITensorStorage, public ITensorOperations

virtual RankType rank() const = 0;

virtual TensorIndex count() const = 0;
virtual std::size_t count() const = 0;

virtual DType dtype() const = 0;

Expand Down Expand Up @@ -200,12 +200,12 @@ struct TensorObject final
return m_tensor->dtype();
}

TensorIndex count() const
std::size_t count() const
{
return m_tensor->count();
}

TensorIndex bytes() const
std::size_t bytes() const
{
return m_tensor->bytes();
}
Expand Down
11 changes: 7 additions & 4 deletions morpheus/_lib/include/morpheus/utilities/tensor_util.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
#include "morpheus/export.h"
#include "morpheus/types.hpp" // for ShapeType, TensorIndex

#include <algorithm> // IWYU pragma: keep
#include <algorithm> // IWYU pragma: keep
#include <cstddef>
#include <functional> // for multiplies
#include <iosfwd> // for ostream
#include <numeric> // for accumulate
Expand Down Expand Up @@ -111,13 +112,15 @@ struct MORPHEUS_EXPORT TensorUtils
* @brief Compute the number of elements in a tensor based on the shape
*
* @tparam IndexT
* @tparam RetTypeT
* @param shape
* @return IndexT
*/
template <typename IndexT>
static inline IndexT get_elem_count(const std::vector<IndexT>& shape)
template <typename IndexT, typename RetTypeT = std::size_t>
static inline RetTypeT get_elem_count(const std::vector<IndexT>& shape)
dagardner-nv marked this conversation as resolved.
Show resolved Hide resolved
{
return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<>());
RetTypeT init_val{1};
return std::accumulate(shape.begin(), shape.end(), init_val, std::multiplies<>());
}
};

Expand Down
14 changes: 8 additions & 6 deletions morpheus/_lib/src/objects/dev_mem_info.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,12 @@

#include "morpheus/objects/dev_mem_info.hpp"

#include "morpheus/types.hpp"
#include "morpheus/utilities/tensor_util.hpp" // for get_elem_count

#include <glog/logging.h> // for DCHECK

#include <cstddef>
#include <cstdint> // for uint8_t
#include <memory>
#include <ostream>
Expand All @@ -34,7 +36,7 @@ DevMemInfo::DevMemInfo(void* data,
std::shared_ptr<MemoryDescriptor> md,
ShapeType shape,
ShapeType stride,
TensorIndex offset_bytes) :
std::size_t offset_bytes) :
m_data(data),
m_dtype(std::move(dtype)),
m_md(std::move(md)),
Expand All @@ -49,7 +51,7 @@ DevMemInfo::DevMemInfo(std::shared_ptr<rmm::device_buffer> buffer,
DType dtype,
ShapeType shape,
ShapeType stride,
TensorIndex offset_bytes) :
std::size_t offset_bytes) :
m_data(buffer->data()),
m_dtype(std::move(dtype)),
m_shape(std::move(shape)),
Expand All @@ -61,17 +63,17 @@ DevMemInfo::DevMemInfo(std::shared_ptr<rmm::device_buffer> buffer,
<< "Inconsistent dimensions, values would extend past the end of the device_buffer";
}

TensorIndex DevMemInfo::bytes() const
std::size_t DevMemInfo::bytes() const
{
return count() * m_dtype.item_size();
}

TensorIndex DevMemInfo::count() const
std::size_t DevMemInfo::count() const
{
return TensorUtils::get_elem_count(m_shape);
}

TensorIndex DevMemInfo::offset_bytes() const
std::size_t DevMemInfo::offset_bytes() const
{
return m_offset_bytes;
}
Expand Down Expand Up @@ -111,7 +113,7 @@ std::shared_ptr<MemoryDescriptor> DevMemInfo::memory() const
return m_md;
}

std::unique_ptr<rmm::device_buffer> DevMemInfo::make_new_buffer(TensorIndex bytes) const
std::unique_ptr<rmm::device_buffer> DevMemInfo::make_new_buffer(std::size_t bytes) const
{
return std::make_unique<rmm::device_buffer>(bytes, m_md->cuda_stream, m_md->memory_resource);
}
Expand Down
10 changes: 5 additions & 5 deletions morpheus/_lib/src/objects/rmm_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
#include <rmm/cuda_stream_view.hpp> // for cuda_stream_per_thread
#include <rmm/device_buffer.hpp>

#include <algorithm> // for copy, transform
#include <algorithm> // for copy, transform
#include <functional> // for multiplies, plus, minus
#include <iterator> // for back_insert_iterator, back_inserter
#include <memory>
Expand All @@ -42,7 +42,7 @@ namespace morpheus {
/****** Component public implementations *******************/
/****** RMMTensor****************************************/
RMMTensor::RMMTensor(std::shared_ptr<rmm::device_buffer> device_buffer,
TensorIndex offset,
std::size_t offset,
DType dtype,
ShapeType shape,
ShapeType stride) :
Expand Down Expand Up @@ -82,12 +82,12 @@ DType RMMTensor::dtype() const
return m_dtype;
}

TensorIndex RMMTensor::count() const
std::size_t RMMTensor::count() const
{
return TensorUtils::get_elem_count(m_shape);
}

TensorIndex RMMTensor::bytes() const
std::size_t RMMTensor::bytes() const
{
return count() * m_dtype.item_size();
}
Expand Down Expand Up @@ -175,7 +175,7 @@ std::shared_ptr<ITensor> RMMTensor::as_type(DType new_dtype) const
return std::make_shared<RMMTensor>(new_data_buffer, 0, new_dtype, m_shape, m_stride);
}

TensorIndex RMMTensor::offset_bytes() const
std::size_t RMMTensor::offset_bytes() const
{
return m_offset * m_dtype.item_size();
}
Expand Down
6 changes: 3 additions & 3 deletions morpheus/_lib/src/objects/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ Tensor::Tensor(std::shared_ptr<rmm::device_buffer> buffer,
std::string init_typestr,
ShapeType init_shape,
ShapeType init_strides,
TensorIndex init_offset) :
std::size_t init_offset) :
m_device_buffer(std::move(buffer)),
typestr(std::move(init_typestr)),
shape(std::move(init_shape)),
Expand All @@ -50,7 +50,7 @@ void* Tensor::data() const
return static_cast<uint8_t*>(m_device_buffer->data()) + m_offset;
}

TensorIndex Tensor::bytes_count() const
std::size_t Tensor::bytes_count() const
{
// temp just return without shape, size, offset, etc
return m_device_buffer->size();
Expand All @@ -73,7 +73,7 @@ auto Tensor::get_stream() const
}

TensorObject Tensor::create(
std::shared_ptr<rmm::device_buffer> buffer, DType dtype, ShapeType shape, ShapeType strides, TensorIndex offset)
std::shared_ptr<rmm::device_buffer> buffer, DType dtype, ShapeType shape, ShapeType strides, std::size_t offset)
{
auto md = std::make_shared<MemoryDescriptor>(buffer->stream(), buffer->memory_resource());

Expand Down
9 changes: 6 additions & 3 deletions morpheus/_lib/src/utilities/cupy_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,8 +80,11 @@ pybind11::object CupyUtil::tensor_to_cupy(const TensorObject& tensor)

auto ptr = (uintptr_t)tensor.data();
auto nbytes = tensor.bytes();
auto owner = py_tensor;
int dev_id = -1;

DCHECK(nbytes > 0);

auto owner = py_tensor;
int dev_id = -1;
dagardner-nv marked this conversation as resolved.
Show resolved Hide resolved

pybind11::list shape_list;
pybind11::list stride_list;
Expand Down Expand Up @@ -132,7 +135,7 @@ TensorObject CupyUtil::cupy_to_tensor(pybind11::object cupy_array)
auto dtype = DType::from_numpy(typestr);

// Get the size from the shape and dtype
auto size = static_cast<size_t>(TensorUtils::get_elem_count(shape)) * dtype.item_size();
auto size = TensorUtils::get_elem_count(shape) * dtype.item_size();

// Finally, handle the stream
auto stream_value = arr_interface["stream"].cast<std::optional<intptr_t>>();
Expand Down
Loading
Loading