From 4657954210b1eee84f40169a14e1d9ed477d5f09 Mon Sep 17 00:00:00 2001 From: Micka Date: Wed, 7 Jun 2023 21:38:25 +0200 Subject: [PATCH] Add identity matrix function (#1548) This PR adds the identity matrix function `eye` (will be used in LOBPCG) and fixes a check on the layout for norm function. Authors: - Micka (https://github.com/lowener) - Corey J. Nolet (https://github.com/cjnolet) Approvers: - Corey J. Nolet (https://github.com/cjnolet) URL: https://github.com/rapidsai/raft/pull/1548 --- cpp/include/raft/linalg/norm.cuh | 2 +- cpp/include/raft/matrix/detail/matrix.cuh | 33 ++++---- cpp/include/raft/matrix/diagonal.cuh | 26 +++++++ cpp/include/raft/matrix/matrix.cuh | 6 +- cpp/test/CMakeLists.txt | 1 + cpp/test/matrix/eye.cu | 92 +++++++++++++++++++++++ 6 files changed, 140 insertions(+), 20 deletions(-) create mode 100644 cpp/test/matrix/eye.cu diff --git a/cpp/include/raft/linalg/norm.cuh b/cpp/include/raft/linalg/norm.cuh index c426250e18..9dad96356b 100644 --- a/cpp/include/raft/linalg/norm.cuh +++ b/cpp/include/raft/linalg/norm.cuh @@ -121,7 +121,7 @@ void norm(raft::resources const& handle, { RAFT_EXPECTS(raft::is_row_or_column_major(in), "Input must be contiguous"); - auto constexpr row_major = std::is_same_v; + auto constexpr row_major = std::is_same_v; auto along_rows = apply == Apply::ALONG_ROWS; if (along_rows) { diff --git a/cpp/include/raft/matrix/detail/matrix.cuh b/cpp/include/raft/matrix/detail/matrix.cuh index 6b6c00c391..aba119ee73 100644 --- a/cpp/include/raft/matrix/detail/matrix.cuh +++ b/cpp/include/raft/matrix/detail/matrix.cuh @@ -230,52 +230,53 @@ void copyUpperTriangular(const m_t* src, m_t* dst, idx_t n_rows, idx_t n_cols, c /** * @brief Copy a vector to the diagonal of a matrix * @param vec: vector of length k = min(n_rows, n_cols) - * @param matrix: matrix of size n_rows x n_cols - * @param m: number of rows of the matrix - * @param n: number of columns of the matrix + * @param matrix: matrix of size n_rows x n_cols (leading dimension = lda) + * @param lda: leading dimension of the matrix * @param k: dimensionality */ template -__global__ void copyVectorToMatrixDiagonal(const m_t* vec, m_t* matrix, idx_t m, idx_t n, idx_t k) +__global__ void copyVectorToMatrixDiagonal(const m_t* vec, m_t* matrix, idx_t lda, idx_t k) { idx_t idx = threadIdx.x + blockDim.x * blockIdx.x; - if (idx < k) { matrix[idx + idx * m] = vec[idx]; } + if (idx < k) { matrix[idx + idx * lda] = vec[idx]; } } /** * @brief Copy matrix diagonal to vector * @param vec: vector of length k = min(n_rows, n_cols) - * @param matrix: matrix of size n_rows x n_cols - * @param m: number of rows of the matrix - * @param n: number of columns of the matrix + * @param matrix: matrix of size n_rows x n_cols (leading dimension = lda) + * @param lda: leading dimension of the matrix * @param k: dimensionality */ template -__global__ void copyVectorFromMatrixDiagonal(m_t* vec, const m_t* matrix, idx_t m, idx_t n, idx_t k) +__global__ void copyVectorFromMatrixDiagonal(m_t* vec, const m_t* matrix, idx_t lda, idx_t k) { idx_t idx = threadIdx.x + blockDim.x * blockIdx.x; - if (idx < k) { vec[idx] = matrix[idx + idx * m]; } + if (idx < k) { vec[idx] = matrix[idx + idx * lda]; } } template void initializeDiagonalMatrix( - const m_t* vec, m_t* matrix, idx_t n_rows, idx_t n_cols, cudaStream_t stream) + const m_t* vec, m_t* matrix, idx_t n_rows, idx_t n_cols, bool row_major, cudaStream_t stream) { - idx_t k = std::min(n_rows, n_cols); + idx_t k = std::min(n_rows, n_cols); + idx_t lda = row_major ? n_cols : n_rows; dim3 block(64); dim3 grid((k + block.x - 1) / block.x); - copyVectorToMatrixDiagonal<<>>(vec, matrix, n_rows, n_cols, k); + copyVectorToMatrixDiagonal<<>>(vec, matrix, lda, k); } template -void getDiagonalMatrix(m_t* vec, const m_t* matrix, idx_t n_rows, idx_t n_cols, cudaStream_t stream) +void getDiagonalMatrix( + m_t* vec, const m_t* matrix, idx_t n_rows, idx_t n_cols, bool row_major, cudaStream_t stream) { - idx_t k = std::min(n_rows, n_cols); + idx_t k = std::min(n_rows, n_cols); + idx_t lda = row_major ? n_cols : n_rows; dim3 block(64); dim3 grid((k + block.x - 1) / block.x); - copyVectorFromMatrixDiagonal<<>>(vec, matrix, n_rows, n_cols, k); + copyVectorFromMatrixDiagonal<<>>(vec, matrix, lda, k); } /** diff --git a/cpp/include/raft/matrix/diagonal.cuh b/cpp/include/raft/matrix/diagonal.cuh index c7a3681983..5cd2cd5c26 100644 --- a/cpp/include/raft/matrix/diagonal.cuh +++ b/cpp/include/raft/matrix/diagonal.cuh @@ -19,6 +19,8 @@ #include #include #include +#include +#include namespace raft::matrix { @@ -40,11 +42,13 @@ void set_diagonal(raft::resources const& handle, { RAFT_EXPECTS(vec.extent(0) == std::min(matrix.extent(0), matrix.extent(1)), "Diagonal vector must be min(matrix.n_rows, matrix.n_cols)"); + constexpr auto is_row_major = std::is_same_v; detail::initializeDiagonalMatrix(vec.data_handle(), matrix.data_handle(), matrix.extent(0), matrix.extent(1), + is_row_major, resource::get_cuda_stream(handle)); } @@ -61,10 +65,12 @@ void get_diagonal(raft::resources const& handle, { RAFT_EXPECTS(vec.extent(0) == std::min(matrix.extent(0), matrix.extent(1)), "Diagonal vector must be min(matrix.n_rows, matrix.n_cols)"); + constexpr auto is_row_major = std::is_same_v; detail::getDiagonalMatrix(vec.data_handle(), matrix.data_handle(), matrix.extent(0), matrix.extent(1), + is_row_major, resource::get_cuda_stream(handle)); } @@ -83,6 +89,26 @@ void invert_diagonal(raft::resources const& handle, inout.data_handle(), inout.extent(0), resource::get_cuda_stream(handle)); } +/** + * @brief create an identity matrix + * @tparam math_t data-type upon which the math operation will be performed + * @tparam idx_t indexing type used for the output + * @tparam layout_t layout of the matrix data (must be row or col major) + * @param[in] handle: raft handle + * @param[out] out: output matrix + */ +template +void eye(const raft::resources& handle, raft::device_matrix_view out) +{ + RAFT_EXPECTS(raft::is_row_or_column_major(out), "Output must be contiguous"); + + auto diag = raft::make_device_vector(handle, min(out.extent(0), out.extent(1))); + RAFT_CUDA_TRY(cudaMemsetAsync( + out.data_handle(), 0, out.size() * sizeof(math_t), resource::get_cuda_stream(handle))); + raft::matrix::fill(handle, diag.view(), math_t(1)); + set_diagonal(handle, raft::make_const_mdspan(diag.view()), out); +} + /** @} */ // end of group matrix_diagonal } // namespace raft::matrix diff --git a/cpp/include/raft/matrix/matrix.cuh b/cpp/include/raft/matrix/matrix.cuh index bc553011c0..6851b8739e 100644 --- a/cpp/include/raft/matrix/matrix.cuh +++ b/cpp/include/raft/matrix/matrix.cuh @@ -221,9 +221,9 @@ void copyUpperTriangular(m_t* src, m_t* dst, idx_t n_rows, idx_t n_cols, cudaStr } /** - * @brief Initialize a diagonal matrix with a vector + * @brief Initialize a diagonal col-major matrix with a vector * @param vec: vector of length k = min(n_rows, n_cols) - * @param matrix: matrix of size n_rows x n_cols + * @param matrix: matrix of size n_rows x n_cols (col-major) * @param n_rows: number of rows of the matrix * @param n_cols: number of columns of the matrix * @param stream: cuda stream @@ -232,7 +232,7 @@ template void initializeDiagonalMatrix( m_t* vec, m_t* matrix, idx_t n_rows, idx_t n_cols, cudaStream_t stream) { - detail::initializeDiagonalMatrix(vec, matrix, n_rows, n_cols, stream); + detail::initializeDiagonalMatrix(vec, matrix, n_rows, n_cols, false, stream); } /** diff --git a/cpp/test/CMakeLists.txt b/cpp/test/CMakeLists.txt index 871869102c..33d4dd9423 100644 --- a/cpp/test/CMakeLists.txt +++ b/cpp/test/CMakeLists.txt @@ -238,6 +238,7 @@ if(BUILD_TESTS) test/matrix/columnSort.cu test/matrix/diagonal.cu test/matrix/gather.cu + test/matrix/eye.cu test/matrix/linewise_op.cu test/matrix/math.cu test/matrix/matrix.cu diff --git a/cpp/test/matrix/eye.cu b/cpp/test/matrix/eye.cu new file mode 100644 index 0000000000..33ed8a00ba --- /dev/null +++ b/cpp/test/matrix/eye.cu @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2023, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../test_utils.cuh" +#include +#include +#include + +#include +#include + +namespace raft::matrix { + +template +struct InitInputs { + int n_row; + int n_col; +}; + +template +::std::ostream& operator<<(::std::ostream& os, const InitInputs& dims) +{ + return os; +} + +template +class InitTest : public ::testing::TestWithParam> { + public: + InitTest() + : params(::testing::TestWithParam>::GetParam()), + stream(resource::get_cuda_stream(handle)) + { + } + + protected: + void test_eye() + { + ASSERT_TRUE(params.n_row == 4 && params.n_col == 5); + auto eyemat_col = + raft::make_device_matrix(handle, params.n_row, params.n_col); + raft::matrix::eye(handle, eyemat_col.view()); + std::vector eye_exp{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0}; + std::vector eye_act(params.n_col * params.n_row); + raft::copy(eye_act.data(), eyemat_col.data_handle(), eye_act.size(), stream); + resource::sync_stream(handle, stream); + ASSERT_TRUE(hostVecMatch(eye_exp, eye_act, raft::Compare())); + + auto eyemat_row = + raft::make_device_matrix(handle, params.n_row, params.n_col); + raft::matrix::eye(handle, eyemat_row.view()); + eye_exp = std::vector{1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0}; + raft::copy(eye_act.data(), eyemat_row.data_handle(), eye_act.size(), stream); + resource::sync_stream(handle, stream); + ASSERT_TRUE(hostVecMatch(eye_exp, eye_act, raft::Compare())); + } + + void SetUp() override { test_eye(); } + + protected: + raft::resources handle; + cudaStream_t stream; + + InitInputs params; +}; + +const std::vector> inputsf1 = {{4, 5}}; + +const std::vector> inputsd1 = {{4, 5}}; + +typedef InitTest InitTestF; +TEST_P(InitTestF, Result) {} + +typedef InitTest InitTestD; +TEST_P(InitTestD, Result) {} + +INSTANTIATE_TEST_SUITE_P(InitTests, InitTestF, ::testing::ValuesIn(inputsf1)); +INSTANTIATE_TEST_SUITE_P(InitTests, InitTestD, ::testing::ValuesIn(inputsd1)); + +} // namespace raft::matrix