From a388d78a293c5b294e9788da5687dac459f29d48 Mon Sep 17 00:00:00 2001 From: gouzi <530971494@qq.com> Date: Fri, 11 Aug 2023 21:27:31 +0800 Subject: [PATCH] [clang-tidy] Open cppcoreguidelines-avoid-c-arrays Check --- .clang-tidy | 2 +- .../controlflow/conditional_block_op.cc | 12 +++--- paddle/fluid/operators/recurrent_op.cc | 8 ++-- paddle/fluid/pybind/eager.cc | 2 +- paddle/fluid/pybind/eager_functions.cc | 2 +- paddle/fluid/pybind/eager_math_op_patch.cc | 2 +- paddle/fluid/pybind/eager_method.cc | 38 ++++++++++--------- paddle/fluid/pybind/eager_properties.cc | 2 +- paddle/fluid/pybind/ops_api.cc | 2 +- paddle/phi/backends/cpu/cpu_info.cc | 10 ++--- .../kernels/funcs/strided_reshape_utils.cc | 5 ++- .../phi/api/test_strings_lower_upper_api.cc | 11 ++++-- .../test_strings_lower_upper_dev_api.cc | 7 +++- 13 files changed, 57 insertions(+), 46 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 7c1f905f2eb3c..108414b4ec11e 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -151,7 +151,7 @@ bugprone-unused-raii, -clang-analyzer-valist.Uninitialized, -clang-analyzer-valist.Unterminated, -clang-analyzer-valist.ValistBase, --cppcoreguidelines-avoid-c-arrays, +cppcoreguidelines-avoid-c-arrays, -cppcoreguidelines-avoid-goto, -cppcoreguidelines-c-copy-assignment-signature, -cppcoreguidelines-explicit-virtual-functions, diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.cc b/paddle/fluid/operators/controlflow/conditional_block_op.cc index b80e0fc097ec9..a1ef201912dd9 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_op.cc @@ -30,12 +30,12 @@ PHI_DECLARE_bool(use_mkldnn); namespace paddle { namespace operators { -const char ConditionalOp::kInputs[] = "Input"; // NOLINT -const char ConditionalOp::kOutputs[] = "Out"; // NOLINT -const char ConditionalOp::kCondition[] = "Cond"; // NOLINT -const char ConditionalOp::kScope[] = "Scope"; // NOLINT -const char ConditionalOp::kSkipEagerDeletionVars[] = - "skip_eager_deletion_vars"; // NOLINT +const char ConditionalOp::kInputs[] = "Input"; // NOLINT +const char ConditionalOp::kOutputs[] = "Out"; // NOLINT +const char ConditionalOp::kCondition[] = "Cond"; // NOLINT +const char ConditionalOp::kScope[] = "Scope"; // NOLINT +const char ConditionalOp::kSkipEagerDeletionVars[] = // NOLINT + "skip_eager_deletion_vars"; using Executor = framework::Executor; using ExecutorPrepareContext = framework::ExecutorPrepareContext; diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index af925b13a0bb8..321c7ad54a5cb 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -41,14 +41,14 @@ const char RecurrentBase::kStates[] = "states"; // NOLINT const char RecurrentBase::kStepBlock[] = "sub_block"; // NOLINT const char RecurrentBase::kReverse[] = "reverse"; // NOLINT const char RecurrentBase::kIsTrain[] = "is_train"; // NOLINT -const char RecurrentBase::kSkipEagerDeletionVars[] = - "skip_eager_deletion_vars"; // NOLINT +const char RecurrentBase::kSkipEagerDeletionVars[] = // NOLINT + "skip_eager_deletion_vars"; #define GRAD_SUFFIX "@GRAD" const char RecurrentBase::kInputGrads[] = "inputs" GRAD_SUFFIX; // NOLINT const char RecurrentBase::kOutputGrads[] = "outputs" GRAD_SUFFIX; // NOLINT const char RecurrentBase::kParamGrads[] = "parameters" GRAD_SUFFIX; // NOLINT -const char RecurrentBase::kInitStateGrads[] = - "initial_states" GRAD_SUFFIX; // NOLINT +const char RecurrentBase::kInitStateGrads[] = // NOLINT + "initial_states" GRAD_SUFFIX; static void ClearStepScopes(const platform::DeviceContext &dev_ctx, framework::Scope *parent_scope, diff --git a/paddle/fluid/pybind/eager.cc b/paddle/fluid/pybind/eager.cc index bd2f5e75f50bd..2e449f251cf98 100644 --- a/paddle/fluid/pybind/eager.cc +++ b/paddle/fluid/pybind/eager.cc @@ -701,7 +701,7 @@ void AutoInitStringTensorByStringTensor( InitStringTensorWithStringTensor(py_tensor_ptr, src_tensor, place, act_name); } -PyDoc_STRVAR( +PyDoc_STRVAR( // NOLINT TensorDoc, R"DOC(Tensor($self, /, value, place, persistable, zero_copy, name, stop_gradient, dims, dtype, type) -- diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index 18fa888062444..a78c70f5d29a6 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -1273,7 +1273,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -PyMethodDef variable_functions[] = { +PyMethodDef variable_functions[] = { // NOLINT // TODO(jiabin): Remove scale when we have final state tests {"scale", (PyCFunction)(void (*)())eager_api_scale, diff --git a/paddle/fluid/pybind/eager_math_op_patch.cc b/paddle/fluid/pybind/eager_math_op_patch.cc index e26a35490ccbc..0418740f129de 100644 --- a/paddle/fluid/pybind/eager_math_op_patch.cc +++ b/paddle/fluid/pybind/eager_math_op_patch.cc @@ -1833,7 +1833,7 @@ static PyObject* tensor__eq__method(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -PyMethodDef math_op_patch_methods[] = { +PyMethodDef math_op_patch_methods[] = { // NOLINT {"__add__", (PyCFunction)(void (*)())tensor__add__method, METH_VARARGS | METH_KEYWORDS, diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index e78e3bb106df0..56fa4c777e6a6 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -101,7 +101,8 @@ Py_ssize_t GetSliceIndexFromPyObject(PyObject* obj) { } } -PyDoc_STRVAR(tensor_method_numpy__doc__, R"DOC(numpy($self, /) +PyDoc_STRVAR(tensor_method_numpy__doc__, // NOLINT + R"DOC(numpy($self, /) -- Returns a numpy array shows the value of current Tensor. @@ -128,8 +129,8 @@ static PyObject* tensor_method_numpy(TensorObject* self, EAGER_TRY auto& api = pybind11::detail::npy_api::get(); if (!self->tensor.impl()) { - Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; - Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; + Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT + Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT py_dims[0] = 0; py_strides[0] = 0; @@ -148,8 +149,8 @@ static PyObject* tensor_method_numpy(TensorObject* self, auto tensor_dims = self->tensor.shape(); auto numpy_dtype = TensorDtype2NumpyDtype(self->tensor.type()); auto sizeof_dtype = phi::SizeOf(self->tensor.type()); - Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; - Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; + Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT + Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT size_t py_rank = tensor_dims.size(); size_t numel = 1; if (py_rank == 0) { @@ -417,8 +418,8 @@ static PyObject* tensor_method_numpy_for_string_tensor(TensorObject* self, if (!self->tensor.impl() || !self->tensor.impl()->initialized()) { VLOG(6) << "The StringTensor is uninitialized. Return the empty string " "numpy array."; - Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; - Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; + Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; // NOLINT + Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; // NOLINT py_dims[0] = 0; py_strides[0] = 0; @@ -595,7 +596,8 @@ static PyObject* tensor_method_copy_(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -PyDoc_STRVAR(tensor_method_clone__doc__, R"DOC(clone($self, /) +PyDoc_STRVAR(tensor_method_clone__doc__, // NOLINT + R"DOC(clone($self, /) -- Returns a new Tensor, which is clone of origin Tensor, and it remains in the current graph. @@ -670,7 +672,7 @@ static PyObject* tensor_retain_grads(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -PyDoc_STRVAR(tensor_clear_gradient__doc__, +PyDoc_STRVAR(tensor_clear_gradient__doc__, // NOLINT R"DOC(clear_gradient($self, set_to_zero=True, /) -- @@ -893,7 +895,8 @@ static PyObject* tensor__is_shared_underline_tensor_with(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -PyDoc_STRVAR(tensor_method_detach__doc__, R"DOC(detach($self, /) +PyDoc_STRVAR(tensor_method_detach__doc__, // NOLINT + R"DOC(detach($self, /) -- Returns a new Tensor, detached from the current graph. @@ -1289,8 +1292,8 @@ static PyObject* tensor__getitem_from_offset(TensorObject* self, if (tensor.dtype() == proto_type) { \ auto numpy_dtype = TensorDtype2NumpyDtype(proto_type); \ T b = paddle::pybind::TensorGetElement(tensor, offset); \ - Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; \ - Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; \ + Py_intptr_t py_dims[paddle::framework::DDim::kMaxRank]; /* NOLINT */ \ + Py_intptr_t py_strides[paddle::framework::DDim::kMaxRank]; /* NOLINT */ \ auto& api = pybind11::detail::npy_api::get(); \ PyObject* array = api.PyArray_NewFromDescr_( \ api.PyArray_Type_, \ @@ -1636,7 +1639,7 @@ static PyObject* tensor_inplace_assign(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -PyDoc_STRVAR(tensor_method__register_reduce_hook__doc__, +PyDoc_STRVAR(tensor_method__register_reduce_hook__doc__, // NOLINT R"DOC(_register_backward_hook($self, hook, /) -- @@ -2025,7 +2028,8 @@ static PyObject* tensor__inplace_version(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -PyDoc_STRVAR(tensor_method_element_size__doc__, R"DOC(element_size($self, /) +PyDoc_STRVAR(tensor_method_element_size__doc__, // NOLINT + R"DOC(element_size($self, /) -- Returns the size in bytes of an element in the Tensor. @@ -2064,7 +2068,7 @@ static PyObject* tensor_method_element_size(TensorObject* self, EAGER_CATCH_AND_THROW_RETURN_NULL } -PyDoc_STRVAR(tensor_method__bump_inplace_version__doc__, +PyDoc_STRVAR(tensor_method__bump_inplace_version__doc__, // NOLINT R"DOC(_bump_inplace_version($self, /) -- @@ -2366,7 +2370,7 @@ static PyObject* tensor_method__is_string_tensor_hold_allocation( EAGER_CATCH_AND_THROW_RETURN_NULL } -PyMethodDef variable_methods[] = { +PyMethodDef variable_methods[] = { // NOLINT {"numpy", (PyCFunction)(void (*)())tensor_method_numpy, METH_VARARGS | METH_KEYWORDS, @@ -2628,7 +2632,7 @@ PyMethodDef variable_methods[] = { {nullptr, nullptr, 0, nullptr}}; // variable_methods for core.eager.StringTensor -PyMethodDef string_tensor_variable_methods[] = { +PyMethodDef string_tensor_variable_methods[] = { // NOLINT {"numpy", (PyCFunction)(void (*)())tensor_method_numpy_for_string_tensor, METH_VARARGS | METH_KEYWORDS, diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index f6ade48deb654..d6defb956bd0f 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -74,7 +74,7 @@ PyObject* tensor_properties_get_type(TensorObject* self, void* closure) { EAGER_CATCH_AND_THROW_RETURN_NULL } -PyDoc_STRVAR(tensor_is_leaf__doc__, +PyDoc_STRVAR(tensor_is_leaf__doc__, // NOLINT R"DOC(is_leaf Whether a Tensor is leaf Tensor. diff --git a/paddle/fluid/pybind/ops_api.cc b/paddle/fluid/pybind/ops_api.cc index 56998d621c736..ea6c010ec04ab 100644 --- a/paddle/fluid/pybind/ops_api.cc +++ b/paddle/fluid/pybind/ops_api.cc @@ -40,7 +40,7 @@ static PyObject *divide(PyObject *self, PyObject *args, PyObject *kwargs) { return static_api_divide(self, args, kwargs); } -static PyMethodDef OpsAPI[] = {{"add_n", +static PyMethodDef OpsAPI[] = {{"add_n", // NOLINT (PyCFunction)(void (*)(void))add_n, METH_VARARGS | METH_KEYWORDS, "C++ interface function for add_n."}, diff --git a/paddle/phi/backends/cpu/cpu_info.cc b/paddle/phi/backends/cpu/cpu_info.cc index 852b8de99c052..12ce65183d08c 100644 --- a/paddle/phi/backends/cpu/cpu_info.cc +++ b/paddle/phi/backends/cpu/cpu_info.cc @@ -152,12 +152,12 @@ bool MayIUse(const cpu_isa_t cpu_isa) { #if !defined(WITH_NV_JETSON) && !defined(PADDLE_WITH_ARM) && \ !defined(PADDLE_WITH_SW) && !defined(PADDLE_WITH_MIPS) && \ !defined(PADDLE_WITH_LOONGARCH) - int reg[4]; - cpuid(reg, 0); + std::array reg; + cpuid(reg.data(), 0); int nIds = reg[0]; if (nIds >= 0x00000001) { // EAX = 1 - cpuid(reg, 0x00000001); + cpuid(reg.data(), 0x00000001); // AVX: ECX Bit 28 if (cpu_isa == avx) { int avx_mask = (1 << 28); @@ -166,7 +166,7 @@ bool MayIUse(const cpu_isa_t cpu_isa) { } if (nIds >= 0x00000007) { // EAX = 7 - cpuid(reg, 0x00000007); + cpuid(reg.data(), 0x00000007); if (cpu_isa == avx2) { // AVX2: EBX Bit 5 int avx2_mask = (1 << 5); @@ -184,7 +184,7 @@ bool MayIUse(const cpu_isa_t cpu_isa) { (reg[1] & avx512bw_mask) && (reg[1] & avx512vl_mask)); } // EAX = 7, ECX = 1 - cpuid(reg, 0x00010007); + cpuid(reg.data(), 0x00010007); if (cpu_isa == avx512_bf16) { // AVX512BF16: EAX Bit 5 int avx512bf16_mask = (1 << 5); diff --git a/paddle/phi/kernels/funcs/strided_reshape_utils.cc b/paddle/phi/kernels/funcs/strided_reshape_utils.cc index 6afe5ead1e9bb..25d73ed69327c 100644 --- a/paddle/phi/kernels/funcs/strided_reshape_utils.cc +++ b/paddle/phi/kernels/funcs/strided_reshape_utils.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/phi/kernels/funcs/strided_reshape_utils.h" +#include #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/reshape_kernel.h" @@ -24,10 +25,10 @@ bool ReshapeStride(const DDim& old_dims, DDim& new_stride) { // NOLINT int64_t numel = product(old_dims); if (numel < 0) { - int64_t tmp[2]; + std::array tmp; tmp[0] = 1; tmp[1] = new_dims.size(); - new_stride = DDim(tmp, 2); + new_stride = DDim(tmp.data(), 2); return true; } else if (numel == 0) { if (old_dims == new_dims) { diff --git a/test/cpp/phi/api/test_strings_lower_upper_api.cc b/test/cpp/phi/api/test_strings_lower_upper_api.cc index 67b723a62ccc4..f66f4059c252c 100644 --- a/test/cpp/phi/api/test_strings_lower_upper_api.cc +++ b/test/cpp/phi/api/test_strings_lower_upper_api.cc @@ -53,8 +53,11 @@ TEST(API, case_convert) { cpu_strings_x_data[i] = strs[i]; } // 2. get expected results - std::string expected_results[] = { - strs[0], strs[0], strs[1], strs[1]}; // NOLINT + std::string expected_results[] = {// NOLINT + strs[0], + strs[0], + strs[1], + strs[1]}; std::transform( strs[0].begin(), strs[0].end(), expected_results[0].begin(), ::tolower); std::transform( @@ -103,8 +106,8 @@ TEST(API, case_convert_utf8) { pstring* cpu_strings_x_data = dev_ctx->template Alloc(cpu_strings_x.get()); - std::string strs[] = {"óÓsscHloëË", - "óÓsscHloëËóÓsscHloëËóÓsscHloëË"}; // NOLINT + std::string strs[] = {"óÓsscHloëË", // NOLINT + "óÓsscHloëËóÓsscHloëËóÓsscHloëË"}; for (int i = 0; i < 2; ++i) { cpu_strings_x_data[i] = strs[i]; } diff --git a/test/cpp/phi/kernels/test_strings_lower_upper_dev_api.cc b/test/cpp/phi/kernels/test_strings_lower_upper_dev_api.cc index 5f0cc69df62ca..ffddcb0c15fa4 100644 --- a/test/cpp/phi/kernels/test_strings_lower_upper_dev_api.cc +++ b/test/cpp/phi/kernels/test_strings_lower_upper_dev_api.cc @@ -56,8 +56,11 @@ TEST(DEV_API, strings_cast_convert) { dense_x_data[1] = long_str; // 2. get expected results - std::string expected_results[] = { - short_str, short_str, long_str, long_str}; // NOLINT + std::string expected_results[] = {// NOLINT + short_str, + short_str, + long_str, + long_str}; std::transform(short_str.begin(), short_str.end(), expected_results[0].begin(),