Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Apply clang-tidy autofixes #15894

Merged
merged 17 commits into from
Jun 12, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,18 @@ repos:
- id: clang-format
types_or: [c, c++, cuda]
args: ["-fallback-style=none", "-style=file", "-i"]
exclude: |
(?x)^(
^cpp/src/io/parquet/ipc/Schema_generated.h|
^cpp/include/cudf_test/cxxopts.hpp|
)
- repo: https://github.com/sirosen/texthooks
rev: 0.6.6
hooks:
- id: fix-smartquotes
exclude: |
(?x)^(
^cpp/src/io/parquet/ipc/Schema_generated.h|
^cpp/include/cudf_test/cxxopts.hpp|
^python/cudf/cudf/tests/data/subword_tokenizer_data/.*|
^python/cudf/cudf/tests/text/test_text_methods.py
Expand Down
7 changes: 5 additions & 2 deletions cpp/include/cudf/ast/expressions.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -478,7 +478,10 @@ class operation : public expression {
*
* @return Vector of operands
*/
std::vector<std::reference_wrapper<expression const>> get_operands() const { return operands; }
[[nodiscard]] std::vector<std::reference_wrapper<expression const>> get_operands() const
{
return operands;
}

/**
* @copydoc expression::accept
Expand Down
10 changes: 5 additions & 5 deletions cpp/include/cudf/column/column_device_view.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ class alignas(16) column_device_view : public detail::column_device_view_base {
* @return string_view instance representing this element at this index
*/
template <typename T, CUDF_ENABLE_IF(std::is_same_v<T, string_view>)>
__device__ T element(size_type element_index) const noexcept
__device__ [[nodiscard]] T element(size_type element_index) const noexcept
{
size_type index = element_index + offset(); // account for this view's _offset
char const* d_strings = static_cast<char const*>(_data);
Expand Down Expand Up @@ -501,7 +501,7 @@ class alignas(16) column_device_view : public detail::column_device_view_base {
* @return dictionary32 instance representing this element at this index
*/
template <typename T, CUDF_ENABLE_IF(std::is_same_v<T, dictionary32>)>
__device__ T element(size_type element_index) const noexcept
__device__ [[nodiscard]] T element(size_type element_index) const noexcept
{
size_type index = element_index + offset(); // account for this view's _offset
auto const indices = d_children[0];
Expand All @@ -519,7 +519,7 @@ class alignas(16) column_device_view : public detail::column_device_view_base {
* @return numeric::fixed_point representing the element at this index
*/
template <typename T, CUDF_ENABLE_IF(cudf::is_fixed_point<T>())>
__device__ T element(size_type element_index) const noexcept
__device__ [[nodiscard]] T element(size_type element_index) const noexcept
{
using namespace numeric;
using rep = typename T::rep;
Expand Down Expand Up @@ -858,7 +858,7 @@ class alignas(16) column_device_view : public detail::column_device_view_base {
*/
[[nodiscard]] __device__ device_span<column_device_view const> children() const noexcept
{
return device_span<column_device_view const>(d_children, _num_children);
return {d_children, static_cast<std::size_t>(_num_children)};
}

/**
Expand Down Expand Up @@ -1032,7 +1032,7 @@ class alignas(16) mutable_column_device_view : public detail::column_device_view
* @return Reference to the element at the specified index
*/
template <typename T, CUDF_ENABLE_IF(is_rep_layout_compatible<T>())>
__device__ T& element(size_type element_index) const noexcept
__device__ [[nodiscard]] T& element(size_type element_index) const noexcept
{
return data<T>()[element_index];
}
Expand Down
27 changes: 17 additions & 10 deletions cpp/include/cudf/detail/aggregation/aggregation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@

#include <functional>
#include <numeric>
#include <utility>

namespace cudf {
namespace detail {
Expand Down Expand Up @@ -510,7 +511,7 @@ class quantile_aggregation final : public groupby_aggregation, public reduce_agg
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }

private:
size_t hash_impl() const
[[nodiscard]] size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_interpolation)) ^
std::accumulate(
Expand Down Expand Up @@ -596,7 +597,10 @@ class nunique_aggregation final : public groupby_aggregation,
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }

private:
size_t hash_impl() const { return std::hash<int>{}(static_cast<int>(_null_handling)); }
[[nodiscard]] size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_null_handling));
}
};

/**
Expand Down Expand Up @@ -638,7 +642,7 @@ class nth_element_aggregation final : public groupby_aggregation,
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }

private:
size_t hash_impl() const
[[nodiscard]] size_t hash_impl() const
{
return std::hash<size_type>{}(_n) ^ std::hash<int>{}(static_cast<int>(_null_handling));
}
Expand Down Expand Up @@ -763,7 +767,10 @@ class collect_list_aggregation final : public rolling_aggregation,
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }

private:
size_t hash_impl() const { return std::hash<int>{}(static_cast<int>(_null_handling)); }
[[nodiscard]] size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_null_handling));
}
};

/**
Expand Down Expand Up @@ -813,7 +820,7 @@ class collect_set_aggregation final : public rolling_aggregation,
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }

protected:
size_t hash_impl() const
[[nodiscard]] size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_null_handling) ^ static_cast<int>(_nulls_equal) ^
static_cast<int>(_nans_equal));
Expand Down Expand Up @@ -866,10 +873,10 @@ class lead_lag_aggregation final : public rolling_aggregation {
class udf_aggregation final : public rolling_aggregation {
public:
udf_aggregation(aggregation::Kind type,
std::string const& user_defined_aggregator,
std::string user_defined_aggregator,
vyasr marked this conversation as resolved.
Show resolved Hide resolved
data_type output_type)
: aggregation{type},
_source{user_defined_aggregator},
_source{std::move(user_defined_aggregator)},
_operator_name{(type == aggregation::PTX) ? "rolling_udf_ptx" : "rolling_udf_cuda"},
_function_name{"rolling_udf"},
_output_type{output_type}
Expand Down Expand Up @@ -973,7 +980,7 @@ class merge_sets_aggregation final : public groupby_aggregation, public reduce_a
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }

protected:
size_t hash_impl() const
[[nodiscard]] size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_nulls_equal) ^ static_cast<int>(_nans_equal));
}
Expand Down Expand Up @@ -1046,7 +1053,7 @@ class covariance_aggregation final : public groupby_aggregation {
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }

protected:
size_t hash_impl() const
[[nodiscard]] size_t hash_impl() const
{
return std::hash<size_type>{}(_min_periods) ^ std::hash<size_type>{}(_ddof);
}
Expand Down Expand Up @@ -1088,7 +1095,7 @@ class correlation_aggregation final : public groupby_aggregation {
void finalize(aggregation_finalizer& finalizer) const override { finalizer.visit(*this); }

protected:
size_t hash_impl() const
[[nodiscard]] size_t hash_impl() const
{
return std::hash<int>{}(static_cast<int>(_type)) ^ std::hash<size_type>{}(_min_periods);
}
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/cudf/detail/contiguous_split.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ class metadata_builder {
*
* @returns A vector containing the serialized column metadata
*/
std::vector<uint8_t> build() const;
[[nodiscard]] std::vector<uint8_t> build() const;

/**
* @brief Clear the internal buffer containing all added metadata.
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/cudf/detail/interop.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ DLManagedTensor* to_dlpack(table_view const& input,

// Creating arrow as per given type_id and buffer arguments
template <typename... Ts>
std::shared_ptr<arrow::Array> to_arrow_array(cudf::type_id id, Ts&&... args)
std::shared_ptr<arrow::Array> to_arrow_array(type_id id, Ts&&... args)
{
switch (id) {
case type_id::BOOL8: return std::make_shared<arrow::BooleanArray>(std::forward<Ts>(args)...);
Expand Down Expand Up @@ -123,7 +123,7 @@ std::shared_ptr<arrow::Table> to_arrow(table_view input,
* @copydoc cudf::to_arrow(cudf::scalar const& input, column_metadata const& metadata,
* rmm::cuda_stream_view stream, arrow::MemoryPool* ar_mr)
*/
std::shared_ptr<arrow::Scalar> to_arrow(cudf::scalar const& input,
std::shared_ptr<arrow::Scalar> to_arrow(scalar const& input,
column_metadata const& metadata,
rmm::cuda_stream_view stream,
arrow::MemoryPool* ar_mr);
Expand Down
8 changes: 4 additions & 4 deletions cpp/include/cudf/detail/normalizing_iterator.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ struct alignas(16) base_normalator {
*/
CUDF_HOST_DEVICE inline Derived& operator++()
{
Derived& derived = static_cast<Derived&>(*this);
auto& derived = static_cast<Derived&>(*this);
derived.p_ += width_;
return derived;
}
Expand All @@ -71,7 +71,7 @@ struct alignas(16) base_normalator {
*/
CUDF_HOST_DEVICE inline Derived& operator--()
{
Derived& derived = static_cast<Derived&>(*this);
auto& derived = static_cast<Derived&>(*this);
derived.p_ -= width_;
return derived;
}
Expand All @@ -91,7 +91,7 @@ struct alignas(16) base_normalator {
*/
CUDF_HOST_DEVICE inline Derived& operator+=(difference_type offset)
{
Derived& derived = static_cast<Derived&>(*this);
auto& derived = static_cast<Derived&>(*this);
derived.p_ += offset * width_;
return derived;
}
Expand Down Expand Up @@ -121,7 +121,7 @@ struct alignas(16) base_normalator {
*/
CUDF_HOST_DEVICE inline Derived& operator-=(difference_type offset)
{
Derived& derived = static_cast<Derived&>(*this);
auto& derived = static_cast<Derived&>(*this);
derived.p_ -= offset * width_;
return derived;
}
Expand Down
22 changes: 12 additions & 10 deletions cpp/include/cudf/detail/structs/utilities.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
#include <rmm/device_buffer.hpp>
#include <rmm/resource_ref.hpp>

#include <utility>

namespace cudf::structs::detail {

enum class column_nullability {
Expand Down Expand Up @@ -117,7 +119,7 @@ class flattened_table {
std::vector<null_order> const& null_orders_,
std::vector<std::unique_ptr<column>>&& columns_,
temporary_nullable_data&& nullable_data_)
: _flattened_columns{flattened_columns_},
: _flattened_columns{std::move(flattened_columns_)},
vyasr marked this conversation as resolved.
Show resolved Hide resolved
_orders{orders_},
_null_orders{null_orders_},
_columns{std::move(columns_)},
Expand Down Expand Up @@ -170,11 +172,11 @@ class flattened_table {
* orders, flattened null precedence, alongside the supporting columns and device_buffers
* for the flattened table.
*/
[[nodiscard]] std::unique_ptr<flattened_table> flatten_nested_columns(
[[nodiscard]] std::unique_ptr<cudf::structs::detail::flattened_table> flatten_nested_columns(
vyasr marked this conversation as resolved.
Show resolved Hide resolved
table_view const& input,
std::vector<order> const& column_order,
std::vector<null_order> const& null_precedence,
column_nullability nullability,
std::vector<cudf::order> const& column_order,
std::vector<cudf::null_order> const& null_precedence,
cudf::structs::detail::column_nullability nullability,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr);

Expand All @@ -194,11 +196,11 @@ class flattened_table {
* @param mr Device memory resource used to allocate new device memory
* @return A new column with potentially new null mask
*/
[[nodiscard]] std::unique_ptr<column> superimpose_nulls(bitmask_type const* null_mask,
size_type null_count,
std::unique_ptr<column>&& input,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr);
[[nodiscard]] std::unique_ptr<cudf::column> superimpose_nulls(bitmask_type const* null_mask,
cudf::size_type null_count,
std::unique_ptr<cudf::column>&& input,
rmm::cuda_stream_view stream,
rmm::device_async_resource_ref mr);

/**
* @brief Push down nulls from the given input column into its children columns, using bitwise AND.
Expand Down
9 changes: 6 additions & 3 deletions cpp/include/cudf/detail/utilities/pinned_host_vector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,11 @@ class pinned_allocator {
* It is the responsibility of the caller to initialize the
* objects at the returned \c pointer.
*/
__host__ inline pointer allocate(size_type cnt, const_pointer /*hint*/ = 0)
__host__ inline pointer allocate(size_type cnt, const_pointer /*hint*/ = nullptr)
{
if (cnt > this->max_size()) { throw std::bad_alloc(); } // end if

pointer result(0);
pointer result(nullptr);
CUDF_CUDA_TRY(cudaMallocHost(reinterpret_cast<void**>(&result), cnt * sizeof(value_type)));
return result;
}
Expand Down Expand Up @@ -183,7 +183,10 @@ class pinned_allocator {
* @return The maximum number of objects that may be allocated
* by a single call to \p allocate().
*/
inline size_type max_size() const { return (std::numeric_limits<size_type>::max)() / sizeof(T); }
[[nodiscard]] inline size_type max_size() const
{
return (std::numeric_limits<size_type>::max)() / sizeof(T);
}

/**
* @brief This method tests this \p pinned_allocator for equality to
Expand Down
4 changes: 2 additions & 2 deletions cpp/include/cudf/detail/utilities/rmm_host_vector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ class rmm_host_allocator {
using size_type = std::size_t; ///< The type used for the size of the allocation
using difference_type = std::ptrdiff_t; ///< The type of the distance between two pointers

typedef cuda::std::true_type propagate_on_container_move_assignment;
using propagate_on_container_move_assignment = cuda::std::true_type;

/**
* @brief converts a `rmm_host_allocator<T>` to `rmm_host_allocator<U>`
Expand Down Expand Up @@ -147,7 +147,7 @@ class rmm_host_allocator {
* @return The maximum number of objects that may be allocated
* by a single call to \p allocate().
*/
constexpr inline size_type max_size() const
[[nodiscard]] constexpr inline size_type max_size() const
{
return (std::numeric_limits<size_type>::max)() / sizeof(T);
}
Expand Down
2 changes: 1 addition & 1 deletion cpp/include/cudf/detail/utilities/stream_pool.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ class cuda_stream_pool {
*
* @return the number of stream objects in the pool
*/
virtual std::size_t get_stream_pool_size() const = 0;
[[nodiscard]] virtual std::size_t get_stream_pool_size() const = 0;
};

/**
Expand Down
6 changes: 3 additions & 3 deletions cpp/include/cudf/fixed_point/fixed_point.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -291,14 +291,14 @@ class fixed_point {
*
* @return The underlying value of the `fixed_point` number
*/
CUDF_HOST_DEVICE inline rep value() const { return _value; }
CUDF_HOST_DEVICE [[nodiscard]] inline rep value() const { return _value; }

/**
* @brief Method that returns the scale of the `fixed_point` number
*
* @return The scale of the `fixed_point` number
*/
CUDF_HOST_DEVICE inline scale_type scale() const { return _scale; }
CUDF_HOST_DEVICE [[nodiscard]] inline scale_type scale() const { return _scale; }

/**
* @brief Explicit conversion operator to `bool`
Expand Down Expand Up @@ -573,7 +573,7 @@ class fixed_point {
* @param scale The `scale` of the returned `fixed_point` number
* @return `fixed_point` number with a new `scale`
*/
CUDF_HOST_DEVICE inline fixed_point<Rep, Rad> rescaled(scale_type scale) const
CUDF_HOST_DEVICE [[nodiscard]] inline fixed_point<Rep, Rad> rescaled(scale_type scale) const
{
if (scale == _scale) { return *this; }
Rep const value = detail::shift<Rep, Rad>(_value, scale_type{scale - _scale});
Expand Down
4 changes: 3 additions & 1 deletion cpp/include/cudf/interop.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@

#include <rmm/mr/device/per_device_resource.hpp>

#include <utility>

struct DLManagedTensor;

struct ArrowDeviceArray;
Expand Down Expand Up @@ -121,7 +123,7 @@ struct column_metadata {
*
* @param _name Name of the column
*/
column_metadata(std::string const& _name) : name(_name) {}
column_metadata(std::string _name) : name(std::move(_name)) {}
bdice marked this conversation as resolved.
Show resolved Hide resolved
column_metadata() = default;
};

Expand Down
Loading
Loading