Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added toeplitz operator #683

Merged
merged 2 commits into from
Jul 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions docs_input/api/creation/tensors/make.rst
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,18 @@ Return by Value
.. doxygenfunction:: make_tensor( TensorType &tensor, ShapeType &&shape, matxMemorySpace_t space = MATX_MANAGED_MEMORY, cudaStream_t stream = 0)
.. doxygenfunction:: make_tensor( TensorType &tensor, matxMemorySpace_t space = MATX_MANAGED_MEMORY, cudaStream_t stream = 0)
.. doxygenfunction:: make_tensor( T *data, const index_t (&shape)[RANK], bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type *data, const index_t (&shape)[TensorType::Rank()], bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type *data, const index_t (&shape)[TensorType::Rank()], bool owning = false)
.. doxygenfunction:: make_tensor( T *data, ShapeType &&shape, bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type *data, typename TensorType::shape_container &&shape, bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type *ptr, bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type *data, typename TensorType::shape_container &&shape, bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type *ptr, bool owning = false)
.. doxygenfunction:: make_tensor( Storage &&s, ShapeType &&shape)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::storage_type &&s, typename TensorType::shape_container &&shape)
.. doxygenfunction:: make_tensor( T* const data, D &&desc, bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type* const data, typename TensorType::desc_type &&desc, bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type* const data, typename TensorType::desc_type &&desc, bool owning = false)
.. doxygenfunction:: make_tensor( D &&desc, matxMemorySpace_t space = MATX_MANAGED_MEMORY, cudaStream_t stream = 0)
.. doxygenfunction:: make_tensor( TensorType &&tensor, typename TensorType::desc_type &&desc, matxMemorySpace_t space = MATX_MANAGED_MEMORY, cudaStream_t stream = 0)
.. doxygenfunction:: make_tensor( T *const data, const index_t (&shape)[RANK], const index_t (&strides)[RANK], bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type *const data, const index_t (&shape)[TensorType::Rank()], const index_t (&strides)[TensorType::Rank()], bool owning = false)
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type *const data, const index_t (&shape)[TensorType::Rank()], const index_t (&strides)[TensorType::Rank()], bool owning = false)

Return by Pointer
~~~~~~~~~~~~~~~~~
Expand Down
33 changes: 33 additions & 0 deletions docs_input/api/linalg/other/toeplitz.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
.. _toeplitz_func:

toeplitz
========

Generate a toeplitz matrix

`c` represents the first column of the matrix while `r` represents the first row. `c` and `r` must
have the same first value; if they don't match, the first value from `c` will be used.

Passing a single array/operator as input is equivalent to passing the conjugate of the same
input as the second parameter.

.. doxygenfunction:: toeplitz(const T (&c)[D])
.. doxygenfunction:: toeplitz(const Op &c)
.. doxygenfunction:: toeplitz(const T (&c)[D1], const T (&r)[D2])
.. doxygenfunction:: toeplitz(const COp &cop, const ROp &rop)

Examples
~~~~~~~~

.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
:language: cpp
:start-after: example-begin toeplitz-test-1
:end-before: example-end toeplitz-test-1
:dedent:

.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
:language: cpp
:start-after: example-begin toeplitz-test-2
:end-before: example-end toeplitz-test-2
:dedent:

4 changes: 2 additions & 2 deletions docs_input/api/signalimage/general/chirp.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ chirp
Creates a real chirp signal (swept-frequency cosine)

.. doxygenfunction:: chirp(index_t num, TimeType last, FreqType f0, TimeType t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
.. doxygenfunction:: chirp(SpaceOp t, FreqType f0, typename SpaceOp::scalar_type t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
.. doxygenfunction:: chirp(SpaceOp t, FreqType f0, typename SpaceOp::value_type t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)

Examples
~~~~~~~~
Expand All @@ -24,7 +24,7 @@ cchirp
Creates a complex chirp signal (swept-frequency cosine)

.. doxygenfunction:: cchirp(index_t num, TimeType last, FreqType f0, TimeType t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
.. doxygenfunction:: cchirp(SpaceOp t, FreqType f0, typename SpaceOp::scalar_type t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
.. doxygenfunction:: cchirp(SpaceOp t, FreqType f0, typename SpaceOp::value_type t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)

Examples
~~~~~~~~
Expand Down
2 changes: 1 addition & 1 deletion docs_input/api/stats/hist/hist.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ hist

Compute a histogram of input `a` with bounds specified by `upper` and `lower`

.. doxygenfunction:: hist(const InputOperator &a, const typename InputOperator::scalar_type lower, const typename InputOperator::scalar_type upper)
.. doxygenfunction:: hist(const InputOperator &a, const typename InputOperator::value_type lower, const typename InputOperator::value_type upper)

Examples
~~~~~~~~
Expand Down
2 changes: 1 addition & 1 deletion include/matx/core/file_io.h
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ void read_csv(TensorType &t, const std::string fname,
auto np = pybind11::module_::import("numpy");
auto obj = np.attr("genfromtxt")("fname"_a = fname.c_str(), "delimiter"_a = delimiter,
"skip_header"_a = skip_header,
"dtype"_a = detail::MatXPybind::GetNumpyDtype<typename TensorType::scalar_type>());
"dtype"_a = detail::MatXPybind::GetNumpyDtype<typename TensorType::value_type>());
pb->NumpyToTensorView(t, obj);
}

Expand Down
6 changes: 2 additions & 4 deletions include/matx/core/iterator.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,7 @@ namespace matx {
template <typename OperatorType, bool ConvertType = true>
struct RandomOperatorIterator {
using self_type = RandomOperatorIterator<OperatorType, ConvertType>;
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::scalar_type>, typename OperatorType::scalar_type>;
using scalar_type = value_type;
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::value_type>, typename OperatorType::value_type>;
// using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
// index_t>;
using stride_type = index_t;
Expand Down Expand Up @@ -174,8 +173,7 @@ __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t operator-(const RandomOper
template <typename OperatorType, bool ConvertType = true>
struct RandomOperatorOutputIterator {
using self_type = RandomOperatorOutputIterator<OperatorType, ConvertType>;
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::scalar_type>, typename OperatorType::scalar_type>;
using scalar_type = value_type;
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::value_type>, typename OperatorType::value_type>;
// using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
// index_t>;
using stride_type = index_t;
Expand Down
30 changes: 15 additions & 15 deletions include/matx/core/make_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ void make_tensor( TensorType &tensor,
cudaStream_t stream = 0) {
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)

auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(shape, space, stream);
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(shape, space, stream);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -166,7 +166,7 @@ auto make_tensor( TensorType &tensor,
cudaStream_t stream = 0) {
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)

auto tmp = make_tensor<typename TensorType::scalar_type, ShapeType>(std::forward<ShapeType>(shape), space, stream);
auto tmp = make_tensor<typename TensorType::value_type, ShapeType>(std::forward<ShapeType>(shape), space, stream);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -225,7 +225,7 @@ auto make_tensor_p( TensorType &tensor,
cudaStream_t stream = 0) {
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)

auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::shape_container>(std::forward<typename TensorType::shape_container>(shape), space, stream);
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::shape_container>(std::forward<typename TensorType::shape_container>(shape), space, stream);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -261,7 +261,7 @@ template <typename TensorType,
auto make_tensor( TensorType &tensor,
matxMemorySpace_t space = MATX_MANAGED_MEMORY,
cudaStream_t stream = 0) {
auto tmp = make_tensor<typename TensorType::scalar_type>({}, space, stream);
auto tmp = make_tensor<typename TensorType::value_type>({}, space, stream);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -322,12 +322,12 @@ auto make_tensor( T *data,
template <typename TensorType,
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
auto make_tensor( TensorType &tensor,
typename TensorType::scalar_type *data,
typename TensorType::value_type *data,
const index_t (&shape)[TensorType::Rank()],
bool owning = false) {
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)

auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(data, shape, owning);
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(data, shape, owning);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -373,12 +373,12 @@ auto make_tensor( T *data,
template <typename TensorType,
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
auto make_tensor( TensorType &tensor,
typename TensorType::scalar_type *data,
typename TensorType::value_type *data,
typename TensorType::shape_container &&shape,
bool owning = false) {
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)

auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::shape_container>(data, std::forward<typename TensorType::shape_container>(shape), owning);
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::shape_container>(data, std::forward<typename TensorType::shape_container>(shape), owning);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -414,9 +414,9 @@ auto make_tensor( T *ptr,
template <typename TensorType,
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
auto make_tensor( TensorType &tensor,
typename TensorType::scalar_type *ptr,
typename TensorType::value_type *ptr,
bool owning = false) {
auto tmp = make_tensor<typename TensorType::scalar_type>(ptr, owning);
auto tmp = make_tensor<typename TensorType::value_type>(ptr, owning);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -534,12 +534,12 @@ auto make_tensor( T* const data,
template <typename TensorType,
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
auto make_tensor( TensorType &tensor,
typename TensorType::scalar_type* const data,
typename TensorType::value_type* const data,
typename TensorType::desc_type &&desc,
bool owning = false) {
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)

auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::desc_type>(data, std::forward<typename TensorType::desc_type>(desc), owning);
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::desc_type>(data, std::forward<typename TensorType::desc_type>(desc), owning);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -585,7 +585,7 @@ auto make_tensor( TensorType &&tensor,
cudaStream_t stream = 0) {
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)

auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::desc_type>(std::forward<typename TensorType::desc_type>(desc), space, stream);
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::desc_type>(std::forward<typename TensorType::desc_type>(desc), space, stream);
tensor.Shallow(tmp);
}

Expand Down Expand Up @@ -633,13 +633,13 @@ auto make_tensor( T *const data,
template <typename TensorType,
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
auto make_tensor( TensorType &tensor,
typename TensorType::scalar_type *const data,
typename TensorType::value_type *const data,
const index_t (&shape)[TensorType::Rank()],
const index_t (&strides)[TensorType::Rank()],
bool owning = false) {
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)

auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(data, shape, strides, owning);
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(data, shape, strides, owning);
tensor.Shallow(tmp);
}

Expand Down
12 changes: 6 additions & 6 deletions include/matx/core/operator_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,13 @@ namespace matx {
if (out.IsContiguous()) {
if constexpr(ConvertType) {
return func( in,
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<OutputOp>::scalar_type> *>(out.Data()),
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<OutputOp>::value_type> *>(out.Data()),
bi,
ei);
}
else {
return func( in,
reinterpret_cast<typename remove_cvref_t<OutputOp>::scalar_type *>(out.Data()),
reinterpret_cast<typename remove_cvref_t<OutputOp>::value_type *>(out.Data()),
bi,
ei);
}
Expand All @@ -70,14 +70,14 @@ namespace matx {
if constexpr (ConvertType) {
return ReduceOutput<ConvertType>( std::forward<Func>(func),
std::forward<OutputOp>(out),
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<InputOp>::scalar_type> *>(in_base.Data()),
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<InputOp>::value_type> *>(in_base.Data()),
BeginOffset{in_base},
EndOffset{in_base});
}
else {
return ReduceOutput<ConvertType>( std::forward<Func>(func),
std::forward<OutputOp>(out),
reinterpret_cast<typename remove_cvref_t<InputOp>::scalar_type *>(in_base.Data()),
reinterpret_cast<typename remove_cvref_t<InputOp>::value_type *>(in_base.Data()),
BeginOffset{in_base},
EndOffset{in_base});
}
Expand Down Expand Up @@ -118,9 +118,9 @@ namespace matx {
namespace detail {
// Used inside of transforms to allocate temporary output
template <typename TensorType, typename Executor, typename ShapeType>
__MATX_HOST__ __MATX_INLINE__ void AllocateTempTensor(TensorType &tensor, Executor &&ex, ShapeType &&shape, typename TensorType::scalar_type **ptr) {
__MATX_HOST__ __MATX_INLINE__ void AllocateTempTensor(TensorType &tensor, Executor &&ex, ShapeType &&shape, typename TensorType::value_type **ptr) {
const auto ttl_size = std::accumulate(shape.begin(), shape.end(), static_cast<index_t>(1),
std::multiplies<index_t>()) * sizeof(typename TensorType::scalar_type);
std::multiplies<index_t>()) * sizeof(typename TensorType::value_type);
if constexpr (is_cuda_executor_v<Executor>) {
matxAlloc((void**)ptr, ttl_size, MATX_ASYNC_DEVICE_MEMORY, ex.getStream());
make_tensor(tensor, *ptr, shape);
Expand Down
12 changes: 6 additions & 6 deletions include/matx/core/pybind.h
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ class MatXPybind {
template <typename TensorType>
static pybind11::object GetEmptyNumpy(const TensorType &ten)
{
using T = typename TensorType::scalar_type;
using T = typename TensorType::value_type;
auto np = pybind11::module_::import("numpy");
pybind11::list dims;

Expand Down Expand Up @@ -329,7 +329,7 @@ class MatXPybind {
void NumpyToTensorView(TensorType ten,
const pybind11::object &np_ten)
{
using T = typename TensorType::scalar_type;
using T = typename TensorType::value_type;
constexpr int RANK = TensorType::Rank();
static_assert(RANK <=5, "NumpyToTensorView only supports max(RANK) = 5 at the moment.");

Expand Down Expand Up @@ -377,7 +377,7 @@ class MatXPybind {
template <typename TensorType>
auto NumpyToTensorView(const pybind11::object &np_ten)
{
using T = typename TensorType::scalar_type;
using T = typename TensorType::value_type;
constexpr int RANK = TensorType::Rank();
using ntype = matx_convert_complex_type<T>;
auto ften = pybind11::array_t<ntype, pybind11::array::c_style | pybind11::array::forcecast>(np_ten);
Expand All @@ -398,7 +398,7 @@ class MatXPybind {

template <typename TensorType>
auto TensorViewToNumpy(const TensorType &ten) {
using tensor_type = typename TensorType::scalar_type;
using tensor_type = typename TensorType::value_type;
using ntype = matx_convert_complex_type<tensor_type>;
constexpr int RANK = TensorType::Rank();

Expand Down Expand Up @@ -466,12 +466,12 @@ class MatXPybind {


template <typename TensorType,
typename CT = matx_convert_cuda_complex_type<typename TensorType::scalar_type>>
typename CT = matx_convert_cuda_complex_type<typename TensorType::value_type>>
std::optional<TestFailResult<CT>>
CompareOutput(const TensorType &ten,
const std::string fname, double thresh, bool debug = false)
{
using raw_type = typename TensorType::scalar_type;
using raw_type = typename TensorType::value_type;
using ntype = matx_convert_complex_type<raw_type>;
using ctype = matx_convert_cuda_complex_type<raw_type>;
auto resobj = res_dict[fname.c_str()];
Expand Down
2 changes: 1 addition & 1 deletion include/matx/core/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class tensor_t : public detail::tensor_impl_t<T,RANK,Desc> {
public:
// Type specifier for reflection on class
using type = T; ///< Type of traits
using scalar_type = T; ///< Type of traits
using value_type = T; ///< Type of traits
// Type specifier for signaling this is a matx operation or tensor view
using matxop = bool; ///< Indicate this is a MatX operator
using matxoplvalue = bool; ///< Indicate this is a MatX operator that can be on the lhs of an equation
Expand Down
1 change: 0 additions & 1 deletion include/matx/core/tensor_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,6 @@ class tensor_impl_t {
public:
// Type specifier for reflection on class
using type = T; // TODO is this necessary
using scalar_type = T;
using value_type = T;
using tensor_view = bool;
using desc_type = Desc;
Expand Down
Loading