Skip to content

Commit

Permalink
[clang-tidy] NO.8 enable cppcoreguidelines-narrowing-conversions. s…
Browse files Browse the repository at this point in the history
…tep:4 (#57114)
  • Loading branch information
gouzil authored Sep 15, 2023
1 parent 534c804 commit 2ad4d49
Show file tree
Hide file tree
Showing 115 changed files with 579 additions and 495 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ cppcoreguidelines-avoid-c-arrays,
cppcoreguidelines-c-copy-assignment-signature,
cppcoreguidelines-explicit-virtual-functions,
-cppcoreguidelines-init-variables,
-cppcoreguidelines-narrowing-conversions,
cppcoreguidelines-narrowing-conversions,
-cppcoreguidelines-no-malloc,
-cppcoreguidelines-pro-type-const-cast,
-cppcoreguidelines-pro-type-member-init,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ std::vector<DimTrans*> MakeReshapeDimTrans(

if (tgt_splitted_shape.size() > 0) {
std::vector<DimTrans*> input_dims;
for (int64_t i = 0, n = src_dims.size(); i < n; i++) {
for (int i = 0, n = static_cast<int>(src_dims.size()); i < n; i++) {
int64_t in_dim = src_dims[i];
if (src_shape[in_dim] > 1) {
input_dims.emplace_back(new InputDim(in_dim));
Expand All @@ -141,7 +141,7 @@ paddle::distributed::auto_parallel::ReshapeSPMDRule::InferForward(
const std::vector<DistTensorSpec>& input_specs,
const paddle::framework::AttributeMap& attrs) {
// step0: Verify Input Args Based on Reshape Logic
int64_t ninputs = input_specs.size();
int64_t ninputs = static_cast<int64_t>(input_specs.size());
PADDLE_ENFORCE_EQ(
ninputs,
1,
Expand Down
7 changes: 4 additions & 3 deletions paddle/fluid/eager/auto_code_generator/eager_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -949,15 +949,15 @@ static bool CollectGradInformationFromOpInfo(
op_base_infos->resize(grad_node->size());
for (auto iter = grad_node->begin(); iter < grad_node->end(); iter++) {
// Each OpBase
int index = std::distance(grad_node->begin(), iter);
int index = static_cast<int>(std::distance(grad_node->begin(), iter));
paddle::imperative::OpBase& op_base = *iter;
(*op_base_infos)[index].SetOpBaseType(op_base.Type());
}

/* ------ Get Grad ins/outs/attrs ---- */
VLOG(6) << "In function size: " << grad_node->size();
for (auto iter = grad_node->begin(); iter < grad_node->end(); iter++) {
int index = std::distance(grad_node->begin(), iter);
int index = static_cast<int>(std::distance(grad_node->begin(), iter));
auto* op_base_grad_ins = (*op_base_infos)[index].GetMutableGradIns();
auto* op_base_grad_outs = (*op_base_infos)[index].GetMutableGradOuts();
auto* op_base_grad_attrs = (*op_base_infos)[index].GetMutableGradAttrs();
Expand Down Expand Up @@ -3160,7 +3160,8 @@ static void DygraphCodeGeneration(const std::string& output_dir,
op_info_map_need_gen.emplace(pair);
}

int each_cc_file_api_size = op_info_map_need_gen.size() / split_count;
int each_cc_file_api_size =
static_cast<int>(op_info_map_need_gen.size() / split_count);
if (op_info_map_need_gen.size() % split_count != 0) {
each_cc_file_api_size++;
}
Expand Down
39 changes: 21 additions & 18 deletions paddle/fluid/eager/custom_operator/custom_operator_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ static void ConstructFwdAndBwdMap(
<< "'s No." << j << " inputs: " << inputs_names[j]
<< " related to No." << i
<< " grad_outputs: " << grad_outputs_names[i];
in_out_map[op_type][1][0][j] = i;
in_out_map[op_type][1][0][j] = i; // NOLINT
}
}
} else {
Expand All @@ -71,7 +71,7 @@ static void ConstructFwdAndBwdMap(
<< "'s No." << j << " inputs: " << inputs_names[j]
<< " related to No." << i
<< " grad_outputs: " << grad_outputs_names[i];
in_out_map[op_type][1][0][j] = i;
in_out_map[op_type][1][0][j] = i; // NOLINT
}
}
} else {
Expand All @@ -84,7 +84,7 @@ static void ConstructFwdAndBwdMap(
<< "'s No." << j << " inputs: " << inputs_names[j]
<< " related to No." << i
<< " grad_outputs: " << grad_outputs_names[i];
in_out_map[op_type][1][0][j] = i;
in_out_map[op_type][1][0][j] = i; // NOLINT
}
}
} else {
Expand All @@ -107,7 +107,7 @@ static void ConstructFwdAndBwdMap(
<< "'s No." << j << " outputs: " << outputs_names[j]
<< " related to No." << i
<< " grad_inputs's grad: " << grad_inputs_names[i];
in_out_map[op_type][1][1][j] = i;
in_out_map[op_type][1][1][j] = i; // NOLINT
}
}
} else {
Expand All @@ -120,7 +120,7 @@ static void ConstructFwdAndBwdMap(
<< "'s No." << j << " outputs: " << outputs_names[j]
<< " related to No." << i
<< " grad_inputs fwd outputs: " << grad_inputs_names[i];
in_out_map[op_type][1][2][j] = i;
in_out_map[op_type][1][2][j] = i; // NOLINT
}
}
} else {
Expand All @@ -130,7 +130,7 @@ static void ConstructFwdAndBwdMap(
<< "'s No." << j << " inputs: " << inputs_names[j]
<< " related to No." << i
<< " grad_inputs fwd inputs: " << grad_inputs_names[i];
in_out_map[op_type][1][3][j] = i;
in_out_map[op_type][1][3][j] = i; // NOLINT
}
}
}
Expand Down Expand Up @@ -183,9 +183,10 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
<< ", whose grad_inputs_name size is: " << grad_inputs_name.size();
auto hooked_grads = ApplyGradientHooks(grads);
for (size_t i = 0; i < hooked_grads.size(); i++) {
if (map[0][1].find(i) != map[0][1].end()) {
VLOG(7) << "Insert grad: " << i << " to grad_inputs: " << map[0][1].at(i);
tmp_ins[map[0][1].at(i)] = hooked_grads[i];
if (map[0][1].find(static_cast<int>(i)) != map[0][1].end()) {
VLOG(7) << "Insert grad: " << i
<< " to grad_inputs: " << map[0][1].at(static_cast<int>(i));
tmp_ins[map[0][1].at(static_cast<int>(i))] = hooked_grads[i];
}
}

Expand Down Expand Up @@ -227,8 +228,8 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
tmp_outs(grad_outputs_names.size());
VLOG(6) << "Prepare Grad outputs for size: " << grad_outputs_names.size();
for (size_t i = 0; i < OutputMeta().size(); i++) {
if (map[0][0].find(i) != map[0][0].end()) {
int grad_output_idx = map[0][0].at(i);
if (map[0][0].find(static_cast<int>(i)) != map[0][0].end()) {
int grad_output_idx = map[0][0].at(static_cast<int>(i));
VLOG(7) << "Insert grad outputs: " << i
<< " with size: " << OutputMeta()[grad_output_idx].size()
<< " to tmp_outputs: " << grad_output_idx;
Expand Down Expand Up @@ -316,8 +317,9 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
const std::vector<paddle::Tensor>& in_tensors = ctx.InputsBetween(
ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second);

if (slot_map[1][0].find(i) != slot_map[1][0].end()) {
grad_node->SetGradOutMeta(in_tensors, slot_map[1][0].at(i));
if (slot_map[1][0].find(static_cast<int>(i)) != slot_map[1][0].end()) {
grad_node->SetGradOutMeta(in_tensors,
slot_map[1][0].at(static_cast<int>(i)));
} else {
grad_node->SetGradOutMeta(in_tensors, slot_ins_num - 1 - no_grad_cnt);
no_grad_cnt++;
Expand Down Expand Up @@ -397,9 +399,10 @@ RunCustomOpDoubleGradNode::operator()(
auto hooked_grads = ApplyGradientHooks(grads);

for (size_t i = 0; i < hooked_grads.size(); i++) {
if (map[1][1].find(i) != map[1][1].end()) {
VLOG(7) << "Insert grad: " << i << " to grad_inputs: " << map[1][1].at(i);
tmp_ins[map[1][1].at(i)] = hooked_grads[i];
if (map[1][1].find(static_cast<int>(i)) != map[1][1].end()) {
VLOG(7) << "Insert grad: " << i
<< " to grad_inputs: " << map[1][1].at(static_cast<int>(i));
tmp_ins[map[1][1].at(static_cast<int>(i))] = hooked_grads[i];
}
}

Expand All @@ -426,8 +429,8 @@ RunCustomOpDoubleGradNode::operator()(
VLOG(6) << "Prepare Grad outputs for size: " << grad_outputs_names.size();

for (size_t i = 0; i < OutputMeta().size(); i++) {
if (map[1][0].find(i) != map[1][0].end()) {
int grad_output_idx = map[1][0].at(i);
if (map[1][0].find(static_cast<int>(i)) != map[1][0].end()) {
int grad_output_idx = map[1][0].at(static_cast<int>(i));
VLOG(7) << "Insert grad outputs: " << i
<< " with size: " << OutputMeta()[grad_output_idx].size()
<< " to tmp_outputs: " << grad_output_idx;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/eager/pylayer/py_layer_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ GradNodePyLayer::operator()(
grads.size(),
ctx->forward_output_tensor_is_duplicable.size()));

auto backward_args = PyTuple_New(grads.size());
auto backward_args = PyTuple_New(static_cast<Py_ssize_t>(grads.size()));
for (size_t i = 0; i < grads.size(); i++) {
if (ctx->forward_output_tensor_is_duplicable[i]) {
PyObject* pylist = PyList_New((Py_ssize_t)grads[i].size());
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/framework/data_type_transform_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,13 +49,13 @@ TEST(DataTypeTransform, CPUTransform) {
int data_number = 2 * 3;

for (int i = 0; i < data_number; ++i) {
ptr[i] = i / 3;
ptr[i] = i / 3; // NOLINT
}

paddle::framework::TransDataType(kernel_fp32, kernel_fp64, in, &out);
double* out_data_double = out.data<double>();
for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_double[i], static_cast<double>(i / 3));
EXPECT_EQ(out_data_double[i], static_cast<double>(i / 3)); // NOLINT
}

paddle::framework::TransDataType(kernel_fp32, kernel_int32, in, &out);
Expand Down Expand Up @@ -113,7 +113,7 @@ TEST(DataTypeTransform, CPUTransform) {
float* in_data_float =
in.mutable_data<float>(phi::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) {
in_data_float[i] = i;
in_data_float[i] = static_cast<float>(i);
}

paddle::framework::TransDataType(kernel_fp32, kernel_fp16, in, &out);
Expand Down Expand Up @@ -227,7 +227,7 @@ TEST(DataTypeTransform, CPUTransform) {
float* in_data_float =
in.mutable_data<float>(phi::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) {
in_data_float[i] = i;
in_data_float[i] = static_cast<float>(i);
}

paddle::framework::TransDataType(kernel_fp32, kernel_bf16, in, &out);
Expand Down Expand Up @@ -341,7 +341,7 @@ TEST(DataTypeTransform, CPUTransform) {
float* in_data_float =
in.mutable_data<float>(phi::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) {
in_data_float[i] = i;
in_data_float[i] = static_cast<float>(i);
}

paddle::framework::TransDataType(kernel_fp32, kernel_int32, in, &out);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/build_strategy_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ static std::vector<platform::Place> CreatePlaces(size_t num, bool use_cuda) {
result.reserve(num);
for (size_t i = 0; i < num; ++i) {
if (use_cuda) {
result.emplace_back(platform::CUDAPlace(i));
result.emplace_back(platform::CUDAPlace(static_cast<int>(i)));
} else {
result.emplace_back(platform::CPUPlace());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,11 @@ struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle {
for (size_t i = 0; i < input_scope_idxes.size(); ++i) {
const std::string& varname("out_var" + std::to_string(i));
for (size_t j = 0; j < place_list_.size(); ++j) {
SelectedRowsEqual(
varname, input_scope_idxes[i], send_vector[i], rows, height);
SelectedRowsEqual(varname,
static_cast<int>(input_scope_idxes[i]),
send_vector[i],
rows,
height);
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,11 +63,11 @@ TEST(DeleteWeightDequantLinearOpPass, basic) {
graph->Set("__param_scope__", CreateParamScope<float>());
auto pass =
PassRegistry::Instance().Get("delete_weight_dequant_linear_op_pass");
int num_nodes_before = graph->Nodes().size();
int num_nodes_before = static_cast<int>(graph->Nodes().size());
VLOG(3) << DebugString(graph);

graph.reset(pass->Apply(graph.release()));
int num_nodes_after = graph->Nodes().size();
int num_nodes_after = static_cast<int>(graph->Nodes().size());
int num_dequant_nodes_after = GetNumOpNodes(graph, "dequantize_linear");
VLOG(3) << DebugString(graph);

Expand Down Expand Up @@ -110,11 +110,11 @@ TEST(DeleteWeightDequantLinearOpPass, basic_fp16) {
graph->Set("__param_scope__", CreateParamScope<phi::dtype::float16>());
auto pass =
PassRegistry::Instance().Get("delete_weight_dequant_linear_op_pass");
int num_nodes_before = graph->Nodes().size();
int num_nodes_before = static_cast<int>(graph->Nodes().size());
VLOG(3) << DebugString(graph);

graph.reset(pass->Apply(graph.release()));
int num_nodes_after = graph->Nodes().size();
int num_nodes_after = static_cast<int>(graph->Nodes().size());
int num_dequant_nodes_after = GetNumOpNodes(graph, "dequantize_linear");
VLOG(3) << DebugString(graph);

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/dense_fc_to_sparse_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,13 @@ TEST(FCFusePass, basic) {
fuse_pass->Set("use_gpu", new bool(true));
sparse_pass->Set("use_gpu", new bool(true));
graph->Set("__param_scope__", CreateParamScope());
int num_nodes_before = graph->Nodes().size();
int num_nodes_before = static_cast<int>(graph->Nodes().size());
int num_mul_nodes_before = GetNumOpNodes(graph, "mul");
VLOG(3) << DebugString(graph);

graph.reset(fuse_pass->Apply(graph.release()));
graph.reset(sparse_pass->Apply(graph.release()));
int num_nodes_after = graph->Nodes().size();
int num_nodes_after = static_cast<int>(graph->Nodes().size());
int num_fc_nodes_after = GetNumOpNodes(graph, "fc");
int num_sparse_fc_nodes_after = GetNumOpNodes(graph, "sparse_fc");
VLOG(3) << DebugString(graph);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,12 +121,12 @@ TEST(DenseMultiHeadMatmulToSparsePass, basic) {

if (fuse_pass.get() == nullptr || sparse_pass.get() == nullptr)
LOG(INFO) << "asdfasdf";
int num_nodes_before = graph->Nodes().size();
int num_nodes_before = static_cast<int>(graph->Nodes().size());
VLOG(3) << DebugString(graph);

graph.reset(fuse_pass->Apply(graph.release()));
graph.reset(sparse_pass->Apply(graph.release()));
int num_nodes_after = graph->Nodes().size();
int num_nodes_after = static_cast<int>(graph->Nodes().size());
int num_fused_nodes_after = GetNumOpNodes(graph, "sparse_multihead_matmul");
VLOG(3) << DebugString(graph);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,11 @@ TEST(FCElementwiseLayerNormFusePass, basic) {
std::unique_ptr<ir::Graph> graph(new ir::Graph(layers.main_program()));
auto pass =
PassRegistry::Instance().Get("fc_elementwise_layernorm_fuse_pass");
int num_nodes_before = graph->Nodes().size();
int num_nodes_before = static_cast<int>(graph->Nodes().size());
VLOG(3) << DebugString(graph);

graph.reset(pass->Apply(graph.release()));
int num_nodes_after = graph->Nodes().size();
int num_nodes_after = static_cast<int>(graph->Nodes().size());
int num_fused_nodes_after =
GetNumOpNodes(graph, "fused_fc_elementwise_layernorm");
VLOG(3) << DebugString(graph);
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/fc_fuse_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,12 +71,12 @@ TEST(FCFusePass, basic) {
auto pass = PassRegistry::Instance().Get("fc_fuse_pass");
pass->Set("use_gpu", new bool(true));
graph->Set("__param_scope__", CreateParamScope());
int num_nodes_before = graph->Nodes().size();
int num_nodes_before = static_cast<int>(graph->Nodes().size());
int num_mul_nodes_before = GetNumOpNodes(graph, "mul");
VLOG(3) << DebugString(graph);

graph.reset(pass->Apply(graph.release()));
int num_nodes_after = graph->Nodes().size();
int num_nodes_after = static_cast<int>(graph->Nodes().size());
int num_fc_nodes_after = GetNumOpNodes(graph, "fc");
VLOG(3) << DebugString(graph);

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/fc_gru_fuse_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ TEST(FcGruFusePass, basic) {
auto pass = PassRegistry::Instance().Get("fc_gru_fuse_pass");
pass->Set("use_gpu", new bool(true));
graph->Set("__param_scope__", CreateParamScope());
int num_nodes_before = graph->Nodes().size();
int num_nodes_before = static_cast<int>(graph->Nodes().size());
int num_gru_nodes_before = GetNumOpNodes(graph, "gru");
VLOG(3) << DebugString(graph);

graph.reset(pass->Apply(graph.release()));
int num_nodes_after = graph->Nodes().size();
int num_nodes_after = static_cast<int>(graph->Nodes().size());
int num_fuse_gru_nodes_after = GetNumOpNodes(graph, "fusion_gru");
VLOG(3) << DebugString(graph);

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/fc_lstm_fuse_pass_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@ TEST(FcLstmFusePass, basic) {
auto pass = PassRegistry::Instance().Get("fc_lstm_fuse_pass");
pass->Set("use_gpu", new bool(false));
graph->Set("__param_scope__", CreateParamScope());
int num_nodes_before = graph->Nodes().size();
int num_nodes_before = static_cast<int>(graph->Nodes().size());
int num_lstm_nodes_before = GetNumOpNodes(graph, "lstm");
VLOG(3) << DebugString(graph);

graph.reset(pass->Apply(graph.release()));
int num_nodes_after = graph->Nodes().size();
int num_nodes_after = static_cast<int>(graph->Nodes().size());
int num_fusion_lstm_nodes_after = GetNumOpNodes(graph, "fusion_lstm");
VLOG(3) << DebugString(graph);

Expand Down
Loading

0 comments on commit 2ad4d49

Please sign in to comment.