diff --git a/.clang-tidy b/.clang-tidy index 27b3a808dd5cf..88295df37c4c4 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -156,7 +156,7 @@ cppcoreguidelines-avoid-c-arrays, cppcoreguidelines-c-copy-assignment-signature, cppcoreguidelines-explicit-virtual-functions, -cppcoreguidelines-init-variables, --cppcoreguidelines-narrowing-conversions, +cppcoreguidelines-narrowing-conversions, -cppcoreguidelines-no-malloc, -cppcoreguidelines-pro-type-const-cast, -cppcoreguidelines-pro-type-member-init, diff --git a/paddle/fluid/distributed/auto_parallel/spmd_rules/reshape_spmd_rule.cc b/paddle/fluid/distributed/auto_parallel/spmd_rules/reshape_spmd_rule.cc index ec6b3c0e3e0f6..5e0c2c5a92c5b 100644 --- a/paddle/fluid/distributed/auto_parallel/spmd_rules/reshape_spmd_rule.cc +++ b/paddle/fluid/distributed/auto_parallel/spmd_rules/reshape_spmd_rule.cc @@ -117,7 +117,7 @@ std::vector MakeReshapeDimTrans( if (tgt_splitted_shape.size() > 0) { std::vector input_dims; - for (int64_t i = 0, n = src_dims.size(); i < n; i++) { + for (int i = 0, n = static_cast(src_dims.size()); i < n; i++) { int64_t in_dim = src_dims[i]; if (src_shape[in_dim] > 1) { input_dims.emplace_back(new InputDim(in_dim)); @@ -141,7 +141,7 @@ paddle::distributed::auto_parallel::ReshapeSPMDRule::InferForward( const std::vector& input_specs, const paddle::framework::AttributeMap& attrs) { // step0: Verify Input Args Based on Reshape Logic - int64_t ninputs = input_specs.size(); + int64_t ninputs = static_cast(input_specs.size()); PADDLE_ENFORCE_EQ( ninputs, 1, diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index d250989199a52..f94fe6d91a582 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -949,7 +949,7 @@ static bool CollectGradInformationFromOpInfo( op_base_infos->resize(grad_node->size()); for (auto iter = grad_node->begin(); iter < grad_node->end(); iter++) { // Each OpBase - int index = std::distance(grad_node->begin(), iter); + int index = static_cast(std::distance(grad_node->begin(), iter)); paddle::imperative::OpBase& op_base = *iter; (*op_base_infos)[index].SetOpBaseType(op_base.Type()); } @@ -957,7 +957,7 @@ static bool CollectGradInformationFromOpInfo( /* ------ Get Grad ins/outs/attrs ---- */ VLOG(6) << "In function size: " << grad_node->size(); for (auto iter = grad_node->begin(); iter < grad_node->end(); iter++) { - int index = std::distance(grad_node->begin(), iter); + int index = static_cast(std::distance(grad_node->begin(), iter)); auto* op_base_grad_ins = (*op_base_infos)[index].GetMutableGradIns(); auto* op_base_grad_outs = (*op_base_infos)[index].GetMutableGradOuts(); auto* op_base_grad_attrs = (*op_base_infos)[index].GetMutableGradAttrs(); @@ -3160,7 +3160,8 @@ static void DygraphCodeGeneration(const std::string& output_dir, op_info_map_need_gen.emplace(pair); } - int each_cc_file_api_size = op_info_map_need_gen.size() / split_count; + int each_cc_file_api_size = + static_cast(op_info_map_need_gen.size() / split_count); if (op_info_map_need_gen.size() % split_count != 0) { each_cc_file_api_size++; } diff --git a/paddle/fluid/eager/custom_operator/custom_operator_node.cc b/paddle/fluid/eager/custom_operator/custom_operator_node.cc index cdd1f7bfbe945..af914d3ae3c79 100644 --- a/paddle/fluid/eager/custom_operator/custom_operator_node.cc +++ b/paddle/fluid/eager/custom_operator/custom_operator_node.cc @@ -59,7 +59,7 @@ static void ConstructFwdAndBwdMap( << "'s No." << j << " inputs: " << inputs_names[j] << " related to No." << i << " grad_outputs: " << grad_outputs_names[i]; - in_out_map[op_type][1][0][j] = i; + in_out_map[op_type][1][0][j] = i; // NOLINT } } } else { @@ -71,7 +71,7 @@ static void ConstructFwdAndBwdMap( << "'s No." << j << " inputs: " << inputs_names[j] << " related to No." << i << " grad_outputs: " << grad_outputs_names[i]; - in_out_map[op_type][1][0][j] = i; + in_out_map[op_type][1][0][j] = i; // NOLINT } } } else { @@ -84,7 +84,7 @@ static void ConstructFwdAndBwdMap( << "'s No." << j << " inputs: " << inputs_names[j] << " related to No." << i << " grad_outputs: " << grad_outputs_names[i]; - in_out_map[op_type][1][0][j] = i; + in_out_map[op_type][1][0][j] = i; // NOLINT } } } else { @@ -107,7 +107,7 @@ static void ConstructFwdAndBwdMap( << "'s No." << j << " outputs: " << outputs_names[j] << " related to No." << i << " grad_inputs's grad: " << grad_inputs_names[i]; - in_out_map[op_type][1][1][j] = i; + in_out_map[op_type][1][1][j] = i; // NOLINT } } } else { @@ -120,7 +120,7 @@ static void ConstructFwdAndBwdMap( << "'s No." << j << " outputs: " << outputs_names[j] << " related to No." << i << " grad_inputs fwd outputs: " << grad_inputs_names[i]; - in_out_map[op_type][1][2][j] = i; + in_out_map[op_type][1][2][j] = i; // NOLINT } } } else { @@ -130,7 +130,7 @@ static void ConstructFwdAndBwdMap( << "'s No." << j << " inputs: " << inputs_names[j] << " related to No." << i << " grad_inputs fwd inputs: " << grad_inputs_names[i]; - in_out_map[op_type][1][3][j] = i; + in_out_map[op_type][1][3][j] = i; // NOLINT } } } @@ -183,9 +183,10 @@ RunCustomOpNode::operator()(paddle::small_vector, << ", whose grad_inputs_name size is: " << grad_inputs_name.size(); auto hooked_grads = ApplyGradientHooks(grads); for (size_t i = 0; i < hooked_grads.size(); i++) { - if (map[0][1].find(i) != map[0][1].end()) { - VLOG(7) << "Insert grad: " << i << " to grad_inputs: " << map[0][1].at(i); - tmp_ins[map[0][1].at(i)] = hooked_grads[i]; + if (map[0][1].find(static_cast(i)) != map[0][1].end()) { + VLOG(7) << "Insert grad: " << i + << " to grad_inputs: " << map[0][1].at(static_cast(i)); + tmp_ins[map[0][1].at(static_cast(i))] = hooked_grads[i]; } } @@ -227,8 +228,8 @@ RunCustomOpNode::operator()(paddle::small_vector, tmp_outs(grad_outputs_names.size()); VLOG(6) << "Prepare Grad outputs for size: " << grad_outputs_names.size(); for (size_t i = 0; i < OutputMeta().size(); i++) { - if (map[0][0].find(i) != map[0][0].end()) { - int grad_output_idx = map[0][0].at(i); + if (map[0][0].find(static_cast(i)) != map[0][0].end()) { + int grad_output_idx = map[0][0].at(static_cast(i)); VLOG(7) << "Insert grad outputs: " << i << " with size: " << OutputMeta()[grad_output_idx].size() << " to tmp_outputs: " << grad_output_idx; @@ -316,8 +317,9 @@ RunCustomOpNode::operator()(paddle::small_vector, const std::vector& in_tensors = ctx.InputsBetween( ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second); - if (slot_map[1][0].find(i) != slot_map[1][0].end()) { - grad_node->SetGradOutMeta(in_tensors, slot_map[1][0].at(i)); + if (slot_map[1][0].find(static_cast(i)) != slot_map[1][0].end()) { + grad_node->SetGradOutMeta(in_tensors, + slot_map[1][0].at(static_cast(i))); } else { grad_node->SetGradOutMeta(in_tensors, slot_ins_num - 1 - no_grad_cnt); no_grad_cnt++; @@ -397,9 +399,10 @@ RunCustomOpDoubleGradNode::operator()( auto hooked_grads = ApplyGradientHooks(grads); for (size_t i = 0; i < hooked_grads.size(); i++) { - if (map[1][1].find(i) != map[1][1].end()) { - VLOG(7) << "Insert grad: " << i << " to grad_inputs: " << map[1][1].at(i); - tmp_ins[map[1][1].at(i)] = hooked_grads[i]; + if (map[1][1].find(static_cast(i)) != map[1][1].end()) { + VLOG(7) << "Insert grad: " << i + << " to grad_inputs: " << map[1][1].at(static_cast(i)); + tmp_ins[map[1][1].at(static_cast(i))] = hooked_grads[i]; } } @@ -426,8 +429,8 @@ RunCustomOpDoubleGradNode::operator()( VLOG(6) << "Prepare Grad outputs for size: " << grad_outputs_names.size(); for (size_t i = 0; i < OutputMeta().size(); i++) { - if (map[1][0].find(i) != map[1][0].end()) { - int grad_output_idx = map[1][0].at(i); + if (map[1][0].find(static_cast(i)) != map[1][0].end()) { + int grad_output_idx = map[1][0].at(static_cast(i)); VLOG(7) << "Insert grad outputs: " << i << " with size: " << OutputMeta()[grad_output_idx].size() << " to tmp_outputs: " << grad_output_idx; diff --git a/paddle/fluid/eager/pylayer/py_layer_node.cc b/paddle/fluid/eager/pylayer/py_layer_node.cc index b1ac2b360b243..cec1d49f95a85 100644 --- a/paddle/fluid/eager/pylayer/py_layer_node.cc +++ b/paddle/fluid/eager/pylayer/py_layer_node.cc @@ -56,7 +56,7 @@ GradNodePyLayer::operator()( grads.size(), ctx->forward_output_tensor_is_duplicable.size())); - auto backward_args = PyTuple_New(grads.size()); + auto backward_args = PyTuple_New(static_cast(grads.size())); for (size_t i = 0; i < grads.size(); i++) { if (ctx->forward_output_tensor_is_duplicable[i]) { PyObject* pylist = PyList_New((Py_ssize_t)grads[i].size()); diff --git a/paddle/fluid/framework/data_type_transform_test.cc b/paddle/fluid/framework/data_type_transform_test.cc index 44ebdc96e6afe..b0ed332834848 100644 --- a/paddle/fluid/framework/data_type_transform_test.cc +++ b/paddle/fluid/framework/data_type_transform_test.cc @@ -49,13 +49,13 @@ TEST(DataTypeTransform, CPUTransform) { int data_number = 2 * 3; for (int i = 0; i < data_number; ++i) { - ptr[i] = i / 3; + ptr[i] = i / 3; // NOLINT } paddle::framework::TransDataType(kernel_fp32, kernel_fp64, in, &out); double* out_data_double = out.data(); for (int i = 0; i < data_number; ++i) { - EXPECT_EQ(out_data_double[i], static_cast(i / 3)); + EXPECT_EQ(out_data_double[i], static_cast(i / 3)); // NOLINT } paddle::framework::TransDataType(kernel_fp32, kernel_int32, in, &out); @@ -113,7 +113,7 @@ TEST(DataTypeTransform, CPUTransform) { float* in_data_float = in.mutable_data(phi::make_ddim({2, 3}), place); for (int i = 0; i < data_number; ++i) { - in_data_float[i] = i; + in_data_float[i] = static_cast(i); } paddle::framework::TransDataType(kernel_fp32, kernel_fp16, in, &out); @@ -227,7 +227,7 @@ TEST(DataTypeTransform, CPUTransform) { float* in_data_float = in.mutable_data(phi::make_ddim({2, 3}), place); for (int i = 0; i < data_number; ++i) { - in_data_float[i] = i; + in_data_float[i] = static_cast(i); } paddle::framework::TransDataType(kernel_fp32, kernel_bf16, in, &out); @@ -341,7 +341,7 @@ TEST(DataTypeTransform, CPUTransform) { float* in_data_float = in.mutable_data(phi::make_ddim({2, 3}), place); for (int i = 0; i < data_number; ++i) { - in_data_float[i] = i; + in_data_float[i] = static_cast(i); } paddle::framework::TransDataType(kernel_fp32, kernel_int32, in, &out); diff --git a/paddle/fluid/framework/details/build_strategy_test.cc b/paddle/fluid/framework/details/build_strategy_test.cc index 0990f134b3e1b..dc6a7e33e4f2f 100644 --- a/paddle/fluid/framework/details/build_strategy_test.cc +++ b/paddle/fluid/framework/details/build_strategy_test.cc @@ -73,7 +73,7 @@ static std::vector CreatePlaces(size_t num, bool use_cuda) { result.reserve(num); for (size_t i = 0; i < num; ++i) { if (use_cuda) { - result.emplace_back(platform::CUDAPlace(i)); + result.emplace_back(platform::CUDAPlace(static_cast(i))); } else { result.emplace_back(platform::CPUPlace()); } diff --git a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc index 221417401d505..3471a31134d55 100644 --- a/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/fused_broadcast_op_handle_test.cc @@ -148,8 +148,11 @@ struct TestFusedBroadcastOpHandle : TestBroadcastOpHandle { for (size_t i = 0; i < input_scope_idxes.size(); ++i) { const std::string& varname("out_var" + std::to_string(i)); for (size_t j = 0; j < place_list_.size(); ++j) { - SelectedRowsEqual( - varname, input_scope_idxes[i], send_vector[i], rows, height); + SelectedRowsEqual(varname, + static_cast(input_scope_idxes[i]), + send_vector[i], + rows, + height); } } } diff --git a/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass_tester.cc b/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass_tester.cc index 603bbf0872d52..40c522d39906e 100644 --- a/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass_tester.cc +++ b/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass_tester.cc @@ -63,11 +63,11 @@ TEST(DeleteWeightDequantLinearOpPass, basic) { graph->Set("__param_scope__", CreateParamScope()); auto pass = PassRegistry::Instance().Get("delete_weight_dequant_linear_op_pass"); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_dequant_nodes_after = GetNumOpNodes(graph, "dequantize_linear"); VLOG(3) << DebugString(graph); @@ -110,11 +110,11 @@ TEST(DeleteWeightDequantLinearOpPass, basic_fp16) { graph->Set("__param_scope__", CreateParamScope()); auto pass = PassRegistry::Instance().Get("delete_weight_dequant_linear_op_pass"); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_dequant_nodes_after = GetNumOpNodes(graph, "dequantize_linear"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/dense_fc_to_sparse_pass_tester.cc b/paddle/fluid/framework/ir/dense_fc_to_sparse_pass_tester.cc index 6151778a58c8c..7fb315de928a6 100644 --- a/paddle/fluid/framework/ir/dense_fc_to_sparse_pass_tester.cc +++ b/paddle/fluid/framework/ir/dense_fc_to_sparse_pass_tester.cc @@ -74,13 +74,13 @@ TEST(FCFusePass, basic) { fuse_pass->Set("use_gpu", new bool(true)); sparse_pass->Set("use_gpu", new bool(true)); graph->Set("__param_scope__", CreateParamScope()); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); int num_mul_nodes_before = GetNumOpNodes(graph, "mul"); VLOG(3) << DebugString(graph); graph.reset(fuse_pass->Apply(graph.release())); graph.reset(sparse_pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fc_nodes_after = GetNumOpNodes(graph, "fc"); int num_sparse_fc_nodes_after = GetNumOpNodes(graph, "sparse_fc"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/dense_multihead_matmul_to_sparse_pass_tester.cc b/paddle/fluid/framework/ir/dense_multihead_matmul_to_sparse_pass_tester.cc index a8975a08d6bae..98f55003b266b 100644 --- a/paddle/fluid/framework/ir/dense_multihead_matmul_to_sparse_pass_tester.cc +++ b/paddle/fluid/framework/ir/dense_multihead_matmul_to_sparse_pass_tester.cc @@ -121,12 +121,12 @@ TEST(DenseMultiHeadMatmulToSparsePass, basic) { if (fuse_pass.get() == nullptr || sparse_pass.get() == nullptr) LOG(INFO) << "asdfasdf"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(fuse_pass->Apply(graph.release())); graph.reset(sparse_pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fused_nodes_after = GetNumOpNodes(graph, "sparse_multihead_matmul"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc index d357a9bbdbca8..05d43788fb20d 100644 --- a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc @@ -47,11 +47,11 @@ TEST(FCElementwiseLayerNormFusePass, basic) { std::unique_ptr graph(new ir::Graph(layers.main_program())); auto pass = PassRegistry::Instance().Get("fc_elementwise_layernorm_fuse_pass"); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fused_nodes_after = GetNumOpNodes(graph, "fused_fc_elementwise_layernorm"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc index b1850cb9973c9..6b06c87c9beaf 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc @@ -71,12 +71,12 @@ TEST(FCFusePass, basic) { auto pass = PassRegistry::Instance().Get("fc_fuse_pass"); pass->Set("use_gpu", new bool(true)); graph->Set("__param_scope__", CreateParamScope()); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); int num_mul_nodes_before = GetNumOpNodes(graph, "mul"); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fc_nodes_after = GetNumOpNodes(graph, "fc"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/fc_gru_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_gru_fuse_pass_tester.cc index b8672db20ebac..8d94d708d9889 100644 --- a/paddle/fluid/framework/ir/fc_gru_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/fc_gru_fuse_pass_tester.cc @@ -24,12 +24,12 @@ TEST(FcGruFusePass, basic) { auto pass = PassRegistry::Instance().Get("fc_gru_fuse_pass"); pass->Set("use_gpu", new bool(true)); graph->Set("__param_scope__", CreateParamScope()); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); int num_gru_nodes_before = GetNumOpNodes(graph, "gru"); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fuse_gru_nodes_after = GetNumOpNodes(graph, "fusion_gru"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_lstm_fuse_pass_tester.cc index cf18c5712a3d0..876e949fdc3d3 100644 --- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass_tester.cc @@ -25,12 +25,12 @@ TEST(FcLstmFusePass, basic) { auto pass = PassRegistry::Instance().Get("fc_lstm_fuse_pass"); pass->Set("use_gpu", new bool(false)); graph->Set("__param_scope__", CreateParamScope()); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); int num_lstm_nodes_before = GetNumOpNodes(graph, "lstm"); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fusion_lstm_nodes_after = GetNumOpNodes(graph, "fusion_lstm"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/fused_multi_transformer_decoder_pass_tester.cc b/paddle/fluid/framework/ir/fused_multi_transformer_decoder_pass_tester.cc index 73026660c831c..6c08bd2941ff1 100644 --- a/paddle/fluid/framework/ir/fused_multi_transformer_decoder_pass_tester.cc +++ b/paddle/fluid/framework/ir/fused_multi_transformer_decoder_pass_tester.cc @@ -199,11 +199,11 @@ TEST(FusedMultiTransformerDecoderPass, basic) { PassRegistry::Instance().Get("fused_multi_transformer_decoder_pass"); if (pass.get() == nullptr) LOG(INFO) << "get fused_multi_transformer_decoder_pass failed"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); int num_fused_nodes_after = GetNumOpNodes(graph, "fused_multi_transformer"); @@ -353,11 +353,11 @@ TEST(FusedMultiTransformerDecoderFuseQKVPass, basic) { "fused_multi_transformer_decoder_fuse_qkv_pass"); if (pass.get() == nullptr) LOG(INFO) << "get fused_multi_transformer_decoder_fuse_qkv_pass failed"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); int num_fused_nodes_after = GetNumOpNodes(graph, "fused_multi_transformer"); @@ -517,11 +517,11 @@ TEST(MultiDevicesFusedMultiTransformerDecoderFuseQKVPass, basic) { LOG(INFO) << "get multi_devices_fused_multi_transformer_decoder_fuse_qkv_pass " "failed"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); int num_fused_nodes_after = GetNumOpNodes(graph, "fused_multi_transformer"); diff --git a/paddle/fluid/framework/ir/fused_multi_transformer_encoder_pass_tester.cc b/paddle/fluid/framework/ir/fused_multi_transformer_encoder_pass_tester.cc index 0254986c14c59..3b4f475df5f36 100644 --- a/paddle/fluid/framework/ir/fused_multi_transformer_encoder_pass_tester.cc +++ b/paddle/fluid/framework/ir/fused_multi_transformer_encoder_pass_tester.cc @@ -192,11 +192,11 @@ TEST(FusedMultiTransformerEncoderPass, basic) { PassRegistry::Instance().Get("fused_multi_transformer_encoder_pass"); if (pass.get() == nullptr) LOG(INFO) << "get fused_multi_transformer_encoder_pass failed"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); int num_fused_nodes_after = GetNumOpNodes(graph, "fused_multi_transformer"); @@ -368,11 +368,11 @@ TEST(MultiDevicesFusedMultiTransformerEncoderPass, basic) { if (pass.get() == nullptr) LOG(INFO) << "get multi_devices_fused_multi_transformer_encoder_pass failed"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); int num_fused_nodes_after = GetNumOpNodes(graph, "fused_multi_transformer"); @@ -518,11 +518,11 @@ TEST(FusedMultiTransformerEncoderFuseQKVPass, basic) { "fused_multi_transformer_encoder_fuse_qkv_pass"); if (pass.get() == nullptr) LOG(INFO) << "get fused_multi_transformer_encoder_fuse_qkv_pass failed"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); int num_fused_nodes_after = GetNumOpNodes(graph, "fused_multi_transformer"); @@ -678,11 +678,11 @@ TEST(MultiDevicesFusedMultiTransformerEncoderFuseQKVPass, basic) { LOG(INFO) << "get multi_devices_fused_multi_transformer_encoder_fuse_qkv_pass " "failed"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); int num_fused_nodes_after = GetNumOpNodes(graph, "fused_multi_transformer"); diff --git a/paddle/fluid/framework/ir/generate_pass_tester.cc b/paddle/fluid/framework/ir/generate_pass_tester.cc index f4ac8d472ba16..760e1e8ce4ef8 100644 --- a/paddle/fluid/framework/ir/generate_pass_tester.cc +++ b/paddle/fluid/framework/ir/generate_pass_tester.cc @@ -113,12 +113,12 @@ TEST(GeneratePass, generate_fc_fuse) { std::unique_ptr graph(new ir::Graph(layers.main_program())); auto pass = PassRegistry::Instance().Get("generate_fc_fuse"); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); int num_mul_nodes_before = GetNumOpNodes(graph, "mul"); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fc_nodes_after = GetNumOpNodes(graph, "fc"); VLOG(3) << DebugString(graph); @@ -154,12 +154,12 @@ TEST(GeneratePass, generate_multi_add_to_addn) { std::unique_ptr graph(new ir::Graph(layers.main_program())); auto pass = PassRegistry::Instance().Get("generate_multi_add_to_addn"); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); int num_add_nodes_before = GetNumOpNodes(graph, "elementwise_add"); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_addn_nodes_after = GetNumOpNodes(graph, "sum"); VLOG(3) << DebugString(graph); @@ -195,12 +195,12 @@ TEST(GeneratePass, generate_combine_matmul) { std::unique_ptr graph(new ir::Graph(layers.main_program())); auto pass = PassRegistry::Instance().Get("generate_combine_matmul"); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); int num_matmul_nodes_before = GetNumOpNodes(graph, "matmul"); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_matmul_nodes_after = GetNumOpNodes(graph, "matmul"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/graph_to_program_pass_test.cc b/paddle/fluid/framework/ir/graph_to_program_pass_test.cc index 4530faf53dcd3..687d334e37089 100644 --- a/paddle/fluid/framework/ir/graph_to_program_pass_test.cc +++ b/paddle/fluid/framework/ir/graph_to_program_pass_test.cc @@ -354,8 +354,8 @@ void CheckBlockOpsEqual(const BlockDesc& before_block, // op's order must be the same for (size_t op_idx = 0; op_idx < before_block.OpSize(); ++op_idx) { - const auto& before_op = before_block.Op(op_idx); - const auto& after_op = after_block.Op(op_idx); + const auto& before_op = before_block.Op(static_cast(op_idx)); + const auto& after_op = after_block.Op(static_cast(op_idx)); EXPECT_EQ(before_op->Type(), after_op->Type()); diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/test_reference_count_pass_last_lived_ops.cc b/paddle/fluid/framework/ir/memory_optimize_pass/test_reference_count_pass_last_lived_ops.cc index 2e2fd34ba7246..a4cc550938495 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/test_reference_count_pass_last_lived_ops.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/test_reference_count_pass_last_lived_ops.cc @@ -41,7 +41,7 @@ static std::vector CreatePlaces(size_t num, bool use_cuda) { result.reserve(num); for (size_t i = 0; i < num; ++i) { if (use_cuda) { - result.emplace_back(platform::CUDAPlace(i)); + result.emplace_back(platform::CUDAPlace(static_cast(i))); } else { result.emplace_back(platform::CPUPlace()); } diff --git a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass_tester.cc b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass_tester.cc index cbcb83bc4c906..fcd0959265bfd 100644 --- a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass_tester.cc @@ -115,11 +115,11 @@ TEST(MultiHeadMatmulFusePass, basic) { auto pass = PassRegistry::Instance().Get("multihead_matmul_fuse_pass_v2"); if (pass.get() == nullptr) LOG(INFO) << "asdfasdf"; - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fused_nodes_after = GetNumOpNodes(graph, "multihead_matmul"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass_tester.cc b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass_tester.cc index 78a88379d8eea..8532c585982f4 100644 --- a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass_tester.cc @@ -51,12 +51,12 @@ void TestMain(int num_fc) { std::unique_ptr graph(new ir::Graph(layers.main_program())); auto pass = PassRegistry::Instance().Get("repeated_fc_relu_fuse_pass"); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); int num_fc_nodes_before = GetNumOpNodes(graph, "fc"); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fused_nodes_after = GetNumOpNodes(graph, "fusion_repeated_fc_relu"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass_tester.cc b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass_tester.cc index 5065b6b5593b9..a0693e8a39433 100644 --- a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass_tester.cc @@ -62,9 +62,9 @@ std::unique_ptr GetNumNodesOfBeforeAfter( int* after, const std::string& pass_type = "seqpool_concat_fuse_pass") { auto pass = PassRegistry::Instance().Get(pass_type); - *before = graph->Nodes().size(); + *before = static_cast(graph->Nodes().size()); graph.reset(pass->Apply(graph.release())); - *after = graph->Nodes().size(); + *after = static_cast(graph->Nodes().size()); return graph; } diff --git a/paddle/fluid/framework/ir/seqpool_cvm_concat_fuse_pass_tester.cc b/paddle/fluid/framework/ir/seqpool_cvm_concat_fuse_pass_tester.cc index 5f67030e9a363..f3adab84d3a3d 100644 --- a/paddle/fluid/framework/ir/seqpool_cvm_concat_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/seqpool_cvm_concat_fuse_pass_tester.cc @@ -67,9 +67,9 @@ std::unique_ptr GetNumNodesOfBeforeAfter( int* after, const std::string& pass_type = "seqpool_cvm_concat_fuse_pass") { auto pass = PassRegistry::Instance().Get(pass_type); - *before = graph->Nodes().size(); + *before = static_cast(graph->Nodes().size()); graph.reset(pass->Apply(graph.release())); - *after = graph->Nodes().size(); + *after = static_cast(graph->Nodes().size()); return graph; } diff --git a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass_tester.cc b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass_tester.cc index dad4726b95d5c..bcd969c75c2ae 100644 --- a/paddle/fluid/framework/ir/skip_layernorm_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/skip_layernorm_fuse_pass_tester.cc @@ -39,11 +39,11 @@ TEST(SkipLayerNormFusePass, basic) { graph->Set(kEmbEltwiseLayernormPass, new bool(true)); graph->Set(kMultiheadMatmulPass, new bool(true)); auto pass = PassRegistry::Instance().Get("skip_layernorm_fuse_pass"); - int num_nodes_before = graph->Nodes().size(); + int num_nodes_before = static_cast(graph->Nodes().size()); VLOG(3) << DebugString(graph); graph.reset(pass->Apply(graph.release())); - int num_nodes_after = graph->Nodes().size(); + int num_nodes_after = static_cast(graph->Nodes().size()); int num_fused_nodes_after = GetNumOpNodes(graph, "skip_layernorm"); VLOG(3) << DebugString(graph); diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index 6682a3dd371c6..5b93a831ef15a 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -136,7 +136,7 @@ TEST(LoD, SplitLoDTensor) { lod_tensor.Resize({20, 1}); float* dst_ptr = lod_tensor.mutable_data(place); for (int i = 0; i < lod_tensor.numel(); ++i) { - dst_ptr[i] = i; + dst_ptr[i] = static_cast(i); } lod_tensor.set_lod(lod); @@ -190,7 +190,7 @@ TEST(LoD, MergeLoDTensor) { lod_tensor0.Resize({13, 1}); float* dst_ptr = lod_tensor0.mutable_data(place); for (int i = 0; i < lod_tensor0.numel(); ++i) { - dst_ptr[i] = i; + dst_ptr[i] = static_cast(i); } phi::DenseTensor lod_tensor1; @@ -201,7 +201,7 @@ TEST(LoD, MergeLoDTensor) { lod_tensor1.Resize({7, 1}); dst_ptr = lod_tensor1.mutable_data(place); for (int i = 0; i < lod_tensor1.numel(); ++i) { - dst_ptr[i] = i; + dst_ptr[i] = static_cast(i); } phi::DenseTensor lod_tensor2; diff --git a/paddle/fluid/framework/program_desc_test.cc b/paddle/fluid/framework/program_desc_test.cc index e41a0d1dbb623..c95982417fc1c 100644 --- a/paddle/fluid/framework/program_desc_test.cc +++ b/paddle/fluid/framework/program_desc_test.cc @@ -119,8 +119,8 @@ TEST(ProgramDesc, copy_ctor) { bool found_sub_block = false; bool found_sub_blocks = false; for (size_t i = 0; i < global_block->OpSize(); ++i) { - auto op_origin = global_block->Op(i); - auto op_copy = global_block_copy->Op(i); + auto op_origin = global_block->Op(static_cast(i)); + auto op_copy = global_block_copy->Op(static_cast(i)); ASSERT_EQ(op_origin->Type(), op_copy->Type()); ASSERT_EQ(op_origin->Inputs(), op_copy->Inputs()); @@ -204,8 +204,8 @@ TEST(ProgramDescBind, serialize_and_deserialize) { assert_same_var("Out", out); for (size_t i = 0; i < global_block->OpSize(); ++i) { - auto op_origin = global_block->Op(i); - auto op_restored = global_block_restored->Op(i); + auto op_origin = global_block->Op(static_cast(i)); + auto op_restored = global_block_restored->Op(static_cast(i)); ASSERT_EQ(op_origin->Type(), op_restored->Type()); ASSERT_EQ(op_origin->Inputs(), op_restored->Inputs()); diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc index fee6023db2c73..7eff211952298 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc @@ -142,7 +142,8 @@ void RenameAndGetOutputs( }; for (size_t index = 0; index < block_desc->OpSize(); ++index) { - framework::proto::OpDesc *op = block_desc->Op(index)->Proto(); + framework::proto::OpDesc *op = + block_desc->Op(static_cast(index))->Proto(); framework::OpDesc op_desc(*op, nullptr); auto correspond_node = subgraph_nodes[index]; PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc b/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc index 24d18e0c3f99b..d706113307009 100644 --- a/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc +++ b/paddle/fluid/inference/analysis/passes/convert_to_mixed_precision.cc @@ -184,7 +184,7 @@ void ConvertToMixedPrecisionPass::SaveMixedModel() { auto SerializeProg = [&](const std::string& path) { auto str = mixed_program_desc.Proto()->SerializeAsString(); std::ofstream file(path.c_str(), std::ios::binary); - file.write(str.c_str(), str.size()); + file.write(str.c_str(), str.size()); // NOLINT file.close(); }; diff --git a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc index b1f7cd5c301da..9061f72b3bd11 100644 --- a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc +++ b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc @@ -98,9 +98,9 @@ void MemoryOptimizePass::CollectLifeCycle( in_shape.end(), (int64_t)1, std::multiplies<>()); - persis_byte += + persis_byte += static_cast( paddle::framework::SizeOfType(node->Var()->GetDataType()) * - var_bytes; + var_bytes); continue; } std::string var = node->Name(); @@ -226,9 +226,9 @@ void MakeSimpleReusePlan( // Generating Memory Reuse Strategy Based on Greedy Way for (size_t i = 0; i < mem_nodes.size(); i++) { if (mem_nodes[i].cluster >= 0) continue; - int cluster_index = cluster_size->size(); + int cluster_index = static_cast(cluster_size->size()); mem_nodes[i].cluster = cluster_index; - (*cluster_size)[mem_nodes[i].name] = mem_nodes[i].size; + (*cluster_size)[mem_nodes[i].name] = static_cast(mem_nodes[i].size); (*node2cluster)[mem_nodes[i].name] = mem_nodes[i].name; std::unordered_set cluster_adj = mem_nodes[i].adj; for (size_t j = i + 1; j < mem_nodes.size(); j++) { diff --git a/paddle/fluid/inference/analysis/passes/save_optimized_model_pass.cc b/paddle/fluid/inference/analysis/passes/save_optimized_model_pass.cc index 80326b243ad46..cad0296369479 100644 --- a/paddle/fluid/inference/analysis/passes/save_optimized_model_pass.cc +++ b/paddle/fluid/inference/analysis/passes/save_optimized_model_pass.cc @@ -115,7 +115,7 @@ void SaveOptimizedModelPass::SaveOptimizedModel(Argument* argument) { std::string save_model_path = path + "/" + "_optimized.pdmodel"; auto str = optimized_program_desc.Proto()->SerializeAsString(); std::ofstream file(save_model_path.c_str(), std::ios::binary); - file.write(str.c_str(), str.size()); + file.write(str.c_str(), str.size()); // NOLINT file.close(); }; diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index eccc0b773ed1e..0088070e06a7b 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1220,7 +1220,7 @@ bool AnalysisPredictor::SetFeed(const std::vector &inputs, LOG(ERROR) << "feed names from program do not have name: [" << name << "] from specified input"; } - idx = feed_names_[name]; + idx = static_cast(feed_names_[name]); } else { idx = PADDLE_GET_CONST(int, feeds_[i]->GetAttr("col")); } @@ -2242,7 +2242,8 @@ void AnalysisPredictor::HookCollectShapeRangeInfo() { if (!tensor.initialized()) return; framework::DDim dim = tensor.dims(); std::vector shape(dim.size()); - for (size_t i = 0; i < shape.size(); ++i) shape[i] = dim[i]; + for (int i = 0; i < static_cast(shape.size()); ++i) + shape[i] = static_cast(dim[i]); if (!shape.empty()) { shape_info_[input_name].emplace_back(shape); } else if (tensor.numel() > 0) { @@ -2435,7 +2436,7 @@ bool AnalysisPredictor::LoadProgramDesc() { fin.seekg(0, std::ios::end); pb_content.resize(fin.tellg()); fin.seekg(0, std::ios::beg); - fin.read(&(pb_content.at(0)), pb_content.size()); + fin.read(&(pb_content.at(0)), pb_content.size()); // NOLINT fin.close(); proto.ParseFromString(pb_content); diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index d318042719a16..76b0410cc8e8f 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -287,7 +287,7 @@ bool NativePaddlePredictor::SetFeed(const std::vector &inputs, input.set_lod(lod); int idx = -1; if (config_.specify_input_name) { - idx = feed_names_[inputs[i].name]; + idx = static_cast(feed_names_[inputs[i].name]); } else { idx = PADDLE_GET_CONST(int, feeds_[i]->GetAttr("col")); } diff --git a/paddle/fluid/inference/api/details/zero_copy_tensor.cc b/paddle/fluid/inference/api/details/zero_copy_tensor.cc index 193e244f86e38..dd03de6e68d72 100644 --- a/paddle/fluid/inference/api/details/zero_copy_tensor.cc +++ b/paddle/fluid/inference/api/details/zero_copy_tensor.cc @@ -157,7 +157,7 @@ T *Tensor::data(PlaceType *place, int *size) const { *place = PlaceType::kUNK; } - *size = tensor->numel(); + *size = static_cast(tensor->numel()); return res; } diff --git a/paddle/fluid/inference/api/paddle_pass_builder.cc b/paddle/fluid/inference/api/paddle_pass_builder.cc index 2058525946914..c513ed9d95501 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.cc +++ b/paddle/fluid/inference/api/paddle_pass_builder.cc @@ -74,11 +74,11 @@ size_t PaddlePassBuilder::GetPassIndex(const std::string &pass_type) { } void PaddlePassBuilder::InsertPass(size_t idx, const std::string &pass_type) { - passes_.insert(std::begin(passes_) + idx, pass_type); + passes_.insert(std::begin(passes_) + idx, pass_type); // NOLINT } void PaddlePassBuilder::DeletePass(size_t idx) { - passes_.erase(std::begin(passes_) + idx); + passes_.erase(std::begin(passes_) + idx); // NOLINT } void PaddlePassBuilder::AppendAnalysisPass(const std::string &pass) { @@ -498,7 +498,7 @@ void CpuPassStrategy::EraseFcMkldnnPasses() { std::vector fc_passes_to_erase( {"fc_mkldnn_pass", "fc_act_mkldnn_fuse_pass"}); for (const auto &pass : fc_passes_to_erase) { - int idx = GetPassIndex(pass); + int idx = static_cast(GetPassIndex(pass)); if (idx != -1) { passes_.erase(std::begin(passes_) + idx); } diff --git a/paddle/fluid/inference/capi_exp/pd_config.cc b/paddle/fluid/inference/capi_exp/pd_config.cc index cb635116dffa5..7a7bc1109194c 100644 --- a/paddle/fluid/inference/capi_exp/pd_config.cc +++ b/paddle/fluid/inference/capi_exp/pd_config.cc @@ -112,7 +112,7 @@ void PD_ConfigDisableFCPadding(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigUseFcPadding(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->use_fc_padding(); + return config->use_fc_padding(); // NOLINT } void PD_ConfigEnableUseGpu(__pd_keep PD_Config* pd_config, @@ -130,7 +130,7 @@ void PD_ConfigDisableGpu(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigUseGpu(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->use_gpu(); + return config->use_gpu(); // NOLINT } void PD_ConfigEnableONNXRuntime(__pd_keep PD_Config* pd_config) { @@ -145,7 +145,7 @@ void PD_ConfigDisableONNXRuntime(__pd_keep PD_Config* pd_config) { PD_Bool PD_ConfigONNXRuntimeEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->use_onnxruntime(); + return config->use_onnxruntime(); // NOLINT } void PD_ConfigEnableORTOptimization(__pd_keep PD_Config* pd_config) { @@ -173,7 +173,7 @@ void PD_ConfigEnableXpu(__pd_keep PD_Config* pd_config, PD_Bool PD_ConfigUseXpu(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->use_xpu(); + return config->use_xpu(); // NOLINT } int32_t PD_ConfigGpuDeviceId(__pd_keep PD_Config* pd_config) { @@ -193,7 +193,7 @@ void PD_ConfigEnableCustomDevice(__pd_keep PD_Config* pd_config, } PD_Bool PD_ConfigUseCustomDevice(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->use_custom_device(); + return config->use_custom_device(); // NOLINT } int32_t PD_ConfigCustomDeviceId(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; @@ -221,7 +221,7 @@ void PD_ConfigEnableCudnn(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigCudnnEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->cudnn_enabled(); + return config->cudnn_enabled(); // NOLINT } void PD_ConfigSwitchIrOptim(__pd_keep PD_Config* pd_config, PD_Bool x) { @@ -230,7 +230,7 @@ void PD_ConfigSwitchIrOptim(__pd_keep PD_Config* pd_config, PD_Bool x) { } PD_Bool PD_ConfigIrOptim(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->ir_optim(); + return config->ir_optim(); // NOLINT } void PD_ConfigEnableTensorRtEngine(__pd_keep PD_Config* pd_config, @@ -250,7 +250,7 @@ void PD_ConfigEnableTensorRtEngine(__pd_keep PD_Config* pd_config, } PD_Bool PD_ConfigTensorRtEngineEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->tensorrt_engine_enabled(); + return config->tensorrt_engine_enabled(); // NOLINT } void PD_ConfigSetTrtDynamicShapeInfo(__pd_keep PD_Config* pd_config, @@ -286,7 +286,7 @@ void PD_ConfigSetTrtDynamicShapeInfo(__pd_keep PD_Config* pd_config, PD_Bool PD_ConfigTensorRtDynamicShapeEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->tensorrt_dynamic_shape_enabled(); + return config->tensorrt_dynamic_shape_enabled(); // NOLINT } void PD_ConfigEnableTunedTensorRtDynamicShape(__pd_keep PD_Config* pd_config, @@ -299,12 +299,12 @@ void PD_ConfigEnableTunedTensorRtDynamicShape(__pd_keep PD_Config* pd_config, PD_Bool PD_ConfigTunedTensorRtDynamicShape(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->tuned_tensorrt_dynamic_shape(); + return config->tuned_tensorrt_dynamic_shape(); // NOLINT } PD_Bool PD_ConfigTrtAllowBuildAtRuntime(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->trt_allow_build_at_runtime(); + return config->trt_allow_build_at_runtime(); // NOLINT } void PD_ConfigCollectShapeRangeInfo(__pd_keep PD_Config* pd_config, @@ -323,7 +323,7 @@ const char* PD_ConfigShapeRangeInfoPath(__pd_keep PD_Config* pd_config) { PD_Bool PD_ConfigShapeRangeInfoCollected(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->shape_range_info_collected(); + return config->shape_range_info_collected(); // NOLINT } void PD_ConfigDisableTensorRtOPs(__pd_keep PD_Config* pd_config, @@ -343,7 +343,7 @@ void PD_ConfigEnableVarseqlen(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigTensorRtOssEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->tensorrt_varseqlen_enabled(); + return config->tensorrt_varseqlen_enabled(); // NOLINT } void PD_ConfigEnableTensorRtDla(__pd_keep PD_Config* pd_config, @@ -353,7 +353,7 @@ void PD_ConfigEnableTensorRtDla(__pd_keep PD_Config* pd_config, } PD_Bool PD_ConfigTensorRtDlaEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->tensorrt_dla_enabled(); + return config->tensorrt_dla_enabled(); // NOLINT } void PD_ConfigEnableLiteEngine(__pd_keep PD_Config* pd_config, @@ -378,7 +378,7 @@ void PD_ConfigEnableLiteEngine(__pd_keep PD_Config* pd_config, } PD_Bool PD_ConfigLiteEngineEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->lite_engine_enabled(); + return config->lite_engine_enabled(); // NOLINT } void PD_ConfigSwitchIrDebug(__pd_keep PD_Config* pd_config, PD_Bool x) { @@ -396,7 +396,7 @@ void PD_ConfigSetMkldnnCacheCapacity(__pd_keep PD_Config* pd_config, } PD_Bool PD_ConfigMkldnnEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->mkldnn_enabled(); + return config->mkldnn_enabled(); // NOLINT } void PD_ConfigSetCpuMathLibraryNumThreads( __pd_keep PD_Config* pd_config, int32_t cpu_math_library_num_threads) { @@ -428,7 +428,7 @@ void PD_ConfigEnableMkldnnBfloat16(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigMkldnnBfloat16Enabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->mkldnn_bfloat16_enabled(); + return config->mkldnn_bfloat16_enabled(); // NOLINT } void PD_ConfigSetBfloat16Op(__pd_keep PD_Config* pd_config, size_t ops_num, @@ -446,15 +446,15 @@ void PD_ConfigEnableMkldnnInt8(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigMkldnnInt8Enabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->mkldnn_int8_enabled(); + return config->mkldnn_int8_enabled(); // NOLINT } PD_Bool PD_ConfigThreadLocalStreamEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->thread_local_stream_enabled(); + return config->thread_local_stream_enabled(); // NOLINT } PD_Bool PD_ConfigMkldnnQuantizerEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->mkldnn_quantizer_enabled(); + return config->mkldnn_quantizer_enabled(); // NOLINT } void PD_ConfigSetModelBuffer(__pd_keep PD_Config* pd_config, const char* prog_buffer, @@ -467,7 +467,7 @@ void PD_ConfigSetModelBuffer(__pd_keep PD_Config* pd_config, } PD_Bool PD_ConfigModelFromMemory(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->model_from_memory(); + return config->model_from_memory(); // NOLINT } void PD_ConfigEnableMemoryOptim(__pd_keep PD_Config* pd_config, PD_Bool x) { CHECK_AND_CONVERT_PD_CONFIG; @@ -475,7 +475,7 @@ void PD_ConfigEnableMemoryOptim(__pd_keep PD_Config* pd_config, PD_Bool x) { } PD_Bool PD_ConfigMemoryOptimEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->enable_memory_optim(); + return config->enable_memory_optim(); // NOLINT } void PD_ConfigEnableProfile(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; @@ -483,7 +483,7 @@ void PD_ConfigEnableProfile(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigProfileEnabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->profile_enabled(); + return config->profile_enabled(); // NOLINT } void PD_ConfigDisableGlogInfo(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; @@ -491,7 +491,7 @@ void PD_ConfigDisableGlogInfo(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigGlogInfoDisabled(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->glog_info_disabled(); + return config->glog_info_disabled(); // NOLINT } void PD_ConfigSetInvalid(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; @@ -499,7 +499,7 @@ void PD_ConfigSetInvalid(__pd_keep PD_Config* pd_config) { } PD_Bool PD_ConfigIsValid(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; - return config->is_valid(); + return config->is_valid(); // NOLINT } void PD_ConfigEnableGpuMultiStream(__pd_keep PD_Config* pd_config) { CHECK_AND_CONVERT_PD_CONFIG; diff --git a/paddle/fluid/inference/capi_exp/pd_predictor.cc b/paddle/fluid/inference/capi_exp/pd_predictor.cc index 54da0f9c91952..59f5a48eae0b8 100644 --- a/paddle/fluid/inference/capi_exp/pd_predictor.cc +++ b/paddle/fluid/inference/capi_exp/pd_predictor.cc @@ -140,7 +140,7 @@ __pd_give PD_Tensor* PD_PredictorGetOutputHandle( PD_Bool PD_PredictorRun(__pd_keep PD_Predictor* pd_predictor) { CHECK_AND_CONVERT_PD_PREDICTOR; - return predictor->Run(); + return predictor->Run(); // NOLINT } void PD_PredictorClearIntermediateTensor(__pd_keep PD_Predictor* pd_predictor) { diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 000d4f5430ed4..7257109035c23 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -62,7 +62,7 @@ void ReadBinaryFile(const std::string& filename, std::string* contents) { contents->clear(); contents->resize(fin.tellg()); fin.seekg(0, std::ios::beg); - fin.read(&(contents->at(0)), contents->size()); + fin.read(&(contents->at(0)), contents->size()); // NOLINT fin.close(); } diff --git a/paddle/fluid/inference/utils/io_utils.cc b/paddle/fluid/inference/utils/io_utils.cc index de11e9c96a5c1..0ee80e3700b5c 100644 --- a/paddle/fluid/inference/utils/io_utils.cc +++ b/paddle/fluid/inference/utils/io_utils.cc @@ -53,7 +53,7 @@ void SerializePDTensorToStream(std::ostream *os, const PaddleTensor &tensor) { // 2. Name uint64_t name_bytes = tensor.name.size(); os->write(reinterpret_cast(&name_bytes), sizeof(name_bytes)); - os->write(tensor.name.c_str(), name_bytes); + os->write(tensor.name.c_str(), name_bytes); // NOLINT // 3. LoD auto lod = tensor.lod; uint64_t lod_size = lod.size(); @@ -68,13 +68,14 @@ void SerializePDTensorToStream(std::ostream *os, const PaddleTensor &tensor) { size_t dims = tensor.shape.size(); os->write(reinterpret_cast(&dims), sizeof(dims)); os->write(reinterpret_cast(tensor.shape.data()), - sizeof(int) * dims); + sizeof(int) * dims); // NOLINT // 5. Data os->write(reinterpret_cast(&tensor.dtype), sizeof(tensor.dtype)); uint64_t length = tensor.data.length(); os->write(reinterpret_cast(&length), sizeof(size_t)); - os->write(reinterpret_cast(tensor.data.data()), length); + os->write(reinterpret_cast(tensor.data.data()), + length); // NOLINT } void DeserializePDTensorToStream(std::istream &is, PaddleTensor *tensor) { @@ -85,7 +86,7 @@ void DeserializePDTensorToStream(std::istream &is, PaddleTensor *tensor) { uint64_t name_bytes; is.read(reinterpret_cast(&name_bytes), sizeof(name_bytes)); std::vector bytes(name_bytes); - is.read(bytes.data(), name_bytes); + is.read(bytes.data(), name_bytes); // NOLINT tensor->name = std::string(bytes.data(), name_bytes); // 3. LoD uint64_t lod_level; @@ -104,13 +105,14 @@ void DeserializePDTensorToStream(std::istream &is, PaddleTensor *tensor) { size_t dims; is.read(reinterpret_cast(&dims), sizeof(dims)); tensor->shape.resize(dims); - is.read(reinterpret_cast(tensor->shape.data()), sizeof(int) * dims); + is.read(reinterpret_cast(tensor->shape.data()), + sizeof(int) * dims); // NOLINT // 5. Data uint64_t length; is.read(reinterpret_cast(&tensor->dtype), sizeof(tensor->dtype)); is.read(reinterpret_cast(&length), sizeof(length)); tensor->data.Resize(length); - is.read(reinterpret_cast(tensor->data.data()), length); + is.read(reinterpret_cast(tensor->data.data()), length); // NOLINT } // ========================================================= diff --git a/paddle/fluid/inference/utils/table_printer.cc b/paddle/fluid/inference/utils/table_printer.cc index e75d62d135896..7f192152e052f 100644 --- a/paddle/fluid/inference/utils/table_printer.cc +++ b/paddle/fluid/inference/utils/table_printer.cc @@ -78,7 +78,7 @@ TablePrinter::TablePrinter(const std::vector& header) { } terminal_witdh = terminal_witdh - (2 * num_cols) - (num_cols + 1); - int avg_width = terminal_witdh / num_cols; + int avg_width = static_cast(terminal_witdh / num_cols); for (size_t i = 0; i < num_cols; ++i) { shares_.emplace_back(avg_width); @@ -101,7 +101,7 @@ void TablePrinter::InsertRow(const std::vector& row) { if (line.length() > max_width) max_width = line.length(); } - if (max_width > widths_[i]) widths_[i] = max_width; + if (max_width > widths_[i]) widths_[i] = static_cast(max_width); size_t num_lines = table_row[i].size(); if (num_lines > max_height) max_height = num_lines; @@ -134,7 +134,7 @@ void TablePrinter::CalcLayout() { if (it == idx.end() - 1) break; auto next_it = it + 1; - float remain_per_column = remain / (idx.end() - next_it); + float remain_per_column = remain / (idx.end() - next_it); // NOLINT for (; next_it != idx.end(); ++next_it) { shares_[*next_it] += remain_per_column; } @@ -142,7 +142,7 @@ void TablePrinter::CalcLayout() { } for (auto& item : idx) { - shares_[item] = static_cast(shares_[item]); + shares_[item] = static_cast(shares_[item]); // NOLINT } // For each record. @@ -153,7 +153,8 @@ void TablePrinter::CalcLayout() { for (size_t line_index = 0; line_index < data_[i][j].size(); ++line_index) { std::string line = data_[i][j][line_index]; - size_t num_rows = (line.length() + shares_[j] - 1) / shares_[j]; + size_t num_rows = + (line.length() + shares_[j] - 1) / shares_[j]; // NOLINT // If the number of rows required for this record is larger than 1, we // will break that line and put it in multiple lines diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 8e4f32fcf92a5..fdbdf974fe582 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -586,7 +586,9 @@ void OpTranscriber::RecordOpResultMapping(pir::IrContext* ctx, bool generated_by_vector = value.type().isa(); (*param_map)[arg_name] = VariableDefiningInfo( - value, generated_by_vector, generated_by_vector ? idx_in_vec : -1); + value, + generated_by_vector, + static_cast(generated_by_vector ? idx_in_vec : -1)); } } @@ -1479,10 +1481,11 @@ struct ElementwiseTranscriber : public OpTranscriber { std::vector y_shape = phi::vectorize(y_tensor_type.dims()); if (axis < 0) { - axis += x_shape.size(); + axis += static_cast(x_shape.size()); } - int append_size = x_shape.size() - axis - 1 - y_shape.size(); + int append_size = + static_cast(x_shape.size() - axis - 1 - y_shape.size()); if (append_size < 0) { // which means x.rank <= y.rank, mostly // x.rank=y.rank return {x_value, y_value}; diff --git a/paddle/fluid/jit/function_utils.cc b/paddle/fluid/jit/function_utils.cc index d7c7f3ee6ac63..519bcb2a88877 100644 --- a/paddle/fluid/jit/function_utils.cc +++ b/paddle/fluid/jit/function_utils.cc @@ -94,7 +94,7 @@ void RemoveFeedFetch(framework::ProgramDesc *program_desc) { const auto &all_ops = block->AllOps(); size_t op_size = all_ops.size(); VLOG(3) << "op_size: " << op_size; - for (int i = op_size - 1; i >= 0; i--) { + for (int i = static_cast(op_size - 1); i >= 0; i--) { auto op = all_ops[i]; if (op->Type() == "feed") { VLOG(3) << "remove op type: " << op->Type() << ", index: " << i diff --git a/paddle/fluid/jit/serializer.cc b/paddle/fluid/jit/serializer.cc index 95f24aa0743ad..7f988cc6d7a2f 100644 --- a/paddle/fluid/jit/serializer.cc +++ b/paddle/fluid/jit/serializer.cc @@ -128,7 +128,7 @@ framework::ProgramDesc Deserializer::LoadProgram(const std::string& file_name) { fin.seekg(0, std::ios::end); std::string buffer(fin.tellg(), ' '); fin.seekg(0, std::ios::beg); - fin.read(&buffer[0], buffer.size()); + fin.read(&buffer[0], buffer.size()); // NOLINT fin.close(); return framework::ProgramDesc(buffer); } diff --git a/paddle/fluid/memory/stats_test.cc b/paddle/fluid/memory/stats_test.cc index a246f4343f8dd..74177636b21bb 100644 --- a/paddle/fluid/memory/stats_test.cc +++ b/paddle/fluid/memory/stats_test.cc @@ -56,7 +56,7 @@ class StatsTest : public ::testing::Test { for (size_t i = 0; i < thread_num; ++i) { threads.emplace_back([&]() { for (size_t data = 0; data < data_num; ++data) { - update_func_(stat_type_, 0, data); + update_func_(stat_type_, 0, static_cast(data)); } /* lock guard*/ { std::lock_guard lock_guard{mutex}; diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index d1dc7d8986bec..2325de03211a3 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -203,7 +203,9 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { if (len == 0) { continue; } - functor.in.emplace_back(x[x_idx].Slice(start_offset, end_offset)); + functor.in.emplace_back( + x[x_idx].Slice(static_cast(start_offset), + static_cast(end_offset))); } } functor.out = out; diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.cc b/paddle/fluid/operators/controlflow/conditional_block_op.cc index 60e5aa61b99d5..501761d82d034 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_op.cc @@ -292,7 +292,7 @@ class ConditionalBlockGradInferVarType : public framework::VarTypeInference { for (size_t i = 0; i < output_size; ++i) { ctx->SyncTypeAndDataType(ConditionalOp::kInputs, framework::GradVarName(ConditionalOp::kInputs), - i); + static_cast(i)); } } }; diff --git a/paddle/fluid/operators/controlflow/conditional_block_op_helper.cc b/paddle/fluid/operators/controlflow/conditional_block_op_helper.cc index f3a540061248f..08569d835fd82 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op_helper.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_op_helper.cc @@ -47,7 +47,7 @@ static void FindAllConditionalBlockAndConditionalBlockGradOp( for (size_t i = 1; i < program.Size(); ++i) { auto &block = program.Block(i); for (size_t j = 0; j < block.OpSize(); ++j) { - auto *op = block.Op(j); + auto *op = block.Op(static_cast(j)); if (op->Type() == "conditional_block") { fwd_ops->emplace_back(op); } else if (op->Type() == "conditional_block_grad") { diff --git a/paddle/fluid/operators/controlflow/pylayer_op.cc b/paddle/fluid/operators/controlflow/pylayer_op.cc index fe05f47707445..ff1d4e1b91a23 100644 --- a/paddle/fluid/operators/controlflow/pylayer_op.cc +++ b/paddle/fluid/operators/controlflow/pylayer_op.cc @@ -255,8 +255,9 @@ class PyLayerBackwardInferVarType : public framework::VarTypeInference { "input_size and output_size should be equal for " "pylayer_grad op.")); for (size_t i = 0; i < backward_output_size; ++i) { - ctx->SyncTypeAndDataType( - PyLayerOp::kInputs, framework::GradVarName(PyLayerOp::kInputs), i); + ctx->SyncTypeAndDataType(PyLayerOp::kInputs, + framework::GradVarName(PyLayerOp::kInputs), + static_cast(i)); } } }; diff --git a/paddle/fluid/operators/controlflow/recurrent_op_helper.cc b/paddle/fluid/operators/controlflow/recurrent_op_helper.cc index 56ac412b896c5..2851757dccc4d 100644 --- a/paddle/fluid/operators/controlflow/recurrent_op_helper.cc +++ b/paddle/fluid/operators/controlflow/recurrent_op_helper.cc @@ -83,7 +83,7 @@ static void FindAllOpAndGradOp(const framework::ProgramDesc &program, for (size_t i = 1; i < program.Size(); ++i) { auto &block = program.Block(i); for (size_t j = 0; j < block.OpSize(); ++j) { - auto *op = block.Op(j); + auto *op = block.Op(static_cast(j)); if (op->Type() == type_name) { ops.emplace(op); } else if (op->Type() == backward_type_name) { diff --git a/paddle/fluid/operators/controlflow/while_op_helper.cc b/paddle/fluid/operators/controlflow/while_op_helper.cc index 6ae32f33e957a..6627ba0482b26 100644 --- a/paddle/fluid/operators/controlflow/while_op_helper.cc +++ b/paddle/fluid/operators/controlflow/while_op_helper.cc @@ -125,7 +125,7 @@ static void FindAllWhileAndWhileGradOp(const framework::ProgramDesc &program, for (size_t i = 1; i < program.Size(); ++i) { auto &block = program.Block(i); for (size_t j = 0; j < block.OpSize(); ++j) { - auto *op = block.Op(j); + auto *op = block.Op(static_cast(j)); if (op->Type() == "while") { while_ops->emplace_back(op); } else if (op->Type() == "while_grad") { diff --git a/paddle/fluid/operators/detection/anchor_generator_op.cc b/paddle/fluid/operators/detection/anchor_generator_op.cc index 71f7bb938a92d..70c7430c0e23f 100644 --- a/paddle/fluid/operators/detection/anchor_generator_op.cc +++ b/paddle/fluid/operators/detection/anchor_generator_op.cc @@ -54,7 +54,7 @@ class AnchorGeneratorOp : public framework::OperatorWithKernel { std::vector dim_vec(4); dim_vec[0] = input_dims[2]; dim_vec[1] = input_dims[3]; - dim_vec[2] = num_anchors; + dim_vec[2] = static_cast(num_anchors); dim_vec[3] = 4; ctx->SetOutputDim("Anchors", phi::make_ddim(dim_vec)); ctx->SetOutputDim("Variances", phi::make_ddim(dim_vec)); diff --git a/paddle/fluid/operators/detection/bipartite_match_op.cc b/paddle/fluid/operators/detection/bipartite_match_op.cc index 8e2089de93d03..a87af78b06893 100644 --- a/paddle/fluid/operators/detection/bipartite_match_op.cc +++ b/paddle/fluid/operators/detection/bipartite_match_op.cc @@ -101,8 +101,8 @@ class BipartiteMatchKernel : public framework::OpKernel { break; } if (match_indices[j] == -1 && row_indices[i] == -1 && dist > 0) { - match_indices[j] = i; - row_indices[i] = j; + match_indices[j] = static_cast(i); + row_indices[i] = static_cast(j); match_dist[j] = dist; idx += 1; } @@ -127,7 +127,7 @@ class BipartiteMatchKernel : public framework::OpKernel { continue; } if (dist_data[m * col + j] > max_dist) { - max_idx = j; + max_idx = static_cast(j); max_row_idx = m; max_dist = dist_data[m * col + j]; } @@ -231,7 +231,8 @@ class BipartiteMatchKernel : public framework::OpKernel { auto lod = dist_mat->lod().back(); for (size_t i = 0; i < lod.size() - 1; ++i) { if (lod[i + 1] > lod[i]) { - phi::DenseTensor one_ins = dist_mat->Slice(lod[i], lod[i + 1]); + phi::DenseTensor one_ins = dist_mat->Slice( + static_cast(lod[i]), static_cast(lod[i + 1])); BipartiteMatch(one_ins, indices + i * col, dist + i * col); if (type == "per_prediction") { ArgMaxMatch(one_ins, indices + i * col, dist + i * col, threshold); diff --git a/paddle/fluid/operators/detection/density_prior_box_op.cc b/paddle/fluid/operators/detection/density_prior_box_op.cc index 085fd3ca9d417..bd4230fed5931 100644 --- a/paddle/fluid/operators/detection/density_prior_box_op.cc +++ b/paddle/fluid/operators/detection/density_prior_box_op.cc @@ -84,18 +84,19 @@ class DensityPriorBoxOp : public framework::OperatorWithKernel { densities.size())); size_t num_priors = 0; for (auto densitie : densities) { - num_priors += (fixed_ratios.size()) * (pow(densitie, 2)); + num_priors += (fixed_ratios.size()) * (pow(densitie, 2)); // NOLINT } if (!flatten) { std::vector dim_vec(4); dim_vec[0] = input_dims[2]; dim_vec[1] = input_dims[3]; - dim_vec[2] = num_priors; + dim_vec[2] = static_cast(num_priors); dim_vec[3] = 4; ctx->SetOutputDim("Boxes", phi::make_ddim(dim_vec)); ctx->SetOutputDim("Variances", phi::make_ddim(dim_vec)); } else if (ctx->IsRuntime()) { - int64_t dim0 = input_dims[2] * input_dims[3] * num_priors; + int64_t dim0 = + static_cast(input_dims[2] * input_dims[3] * num_priors); ctx->SetOutputDim("Boxes", {dim0, 4}); ctx->SetOutputDim("Variances", {dim0, 4}); } else { diff --git a/paddle/fluid/operators/detection/generate_mask_labels_op.cc b/paddle/fluid/operators/detection/generate_mask_labels_op.cc index 7f3f61f48e4ec..8267b544b9e6e 100644 --- a/paddle/fluid/operators/detection/generate_mask_labels_op.cc +++ b/paddle/fluid/operators/detection/generate_mask_labels_op.cc @@ -189,12 +189,12 @@ std::vector SampleMaskForOneImage( mask_gt_inds.emplace_back(i); // slice fg segmentation polys - int poly_num = polys_num[i]; + int poly_num = static_cast(polys_num[i]); std::vector> polys; - int s_idx = lod1[i]; + int s_idx = static_cast(lod1[i]); for (int j = 0; j < poly_num; ++j) { - int s = lod2[s_idx + j]; - int e = lod2[s_idx + j + 1]; + int s = static_cast(lod2[s_idx + j]); + int e = static_cast(lod2[s_idx + j + 1]); PADDLE_ENFORCE_NE(s, e, platform::errors::InvalidArgument( @@ -215,8 +215,8 @@ std::vector SampleMaskForOneImage( fg_inds.emplace_back(i); } } - int gt_num = mask_gt_inds.size(); - int fg_num = fg_inds.size(); + int gt_num = static_cast(mask_gt_inds.size()); + int fg_num = static_cast(fg_inds.size()); phi::DenseTensor boxes_from_polys; boxes_from_polys.mutable_data({gt_num, 4}, platform::CPUPlace()); @@ -235,7 +235,7 @@ std::vector SampleMaskForOneImage( Gather(label_int32_data, 1, fg_inds.data(), - fg_inds.size(), + static_cast(fg_inds.size()), mask_class_labels.data()); uint8_t* masks_data = masks.mutable_data( @@ -266,7 +266,7 @@ std::vector SampleMaskForOneImage( for (int64_t j = 0; j < gt_num; ++j) { if (v[j] > max_overlap) { max_overlap = v[j]; - id = j; + id = static_cast(j); } } fg_masks_inds.push_back(id); @@ -316,7 +316,7 @@ std::vector SampleMaskForOneImage( } phi::DenseTensor roi_has_mask_t; - int roi_has_mask_size = roi_has_mask.size(); + int roi_has_mask_size = static_cast(roi_has_mask.size()); int* roi_has_mask_data = roi_has_mask_t.mutable_data({roi_has_mask_size, 1}, ctx.GetPlace()); std::copy(roi_has_mask.begin(), roi_has_mask.end(), roi_has_mask_data); @@ -383,7 +383,7 @@ class GenerateMaskLabelsKernel : public framework::OpKernel { n)); int mask_dim = num_classes * resolution * resolution; - int roi_num = rois->lod().back()[n]; + int roi_num = static_cast(rois->lod().back()[n]); mask_rois->mutable_data({roi_num, kBoxDim}, ctx.GetPlace()); roi_has_mask_int32->mutable_data({roi_num, 1}, ctx.GetPlace()); mask_int32->mutable_data({roi_num, mask_dim}, ctx.GetPlace()); @@ -407,19 +407,25 @@ class GenerateMaskLabelsKernel : public framework::OpKernel { } phi::DenseTensor im_info_slice = im_info->Slice(i, i + 1); phi::DenseTensor gt_classes_slice = - gt_classes->Slice(gt_classes_lod[i], gt_classes_lod[i + 1]); + gt_classes->Slice(static_cast(gt_classes_lod[i]), + static_cast(gt_classes_lod[i + 1])); phi::DenseTensor is_crowd_slice = - is_crowd->Slice(is_crowd_lod[i], is_crowd_lod[i + 1]); + is_crowd->Slice(static_cast(is_crowd_lod[i]), + static_cast(is_crowd_lod[i + 1])); phi::DenseTensor label_int32_slice = - label_int32->Slice(label_int32_lod[i], label_int32_lod[i + 1]); - phi::DenseTensor rois_slice = rois->Slice(rois_lod[i], rois_lod[i + 1]); + label_int32->Slice(static_cast(label_int32_lod[i]), + static_cast(label_int32_lod[i + 1])); + phi::DenseTensor rois_slice = + rois->Slice(static_cast(rois_lod[i]), + static_cast(rois_lod[i + 1])); auto sub_lod_and_offset = framework::GetSubLoDAndAbsoluteOffset(gt_segms_lod, i, i + 1, 0); auto lod_length = sub_lod_and_offset.first; size_t s = sub_lod_and_offset.second.first; size_t e = sub_lod_and_offset.second.second; - phi::DenseTensor gt_segms_slice = gt_segms->Slice(s, e); + phi::DenseTensor gt_segms_slice = + gt_segms->Slice(static_cast(s), static_cast(e)); std::vector tensor_output = SampleMaskForOneImage(dev_ctx, diff --git a/paddle/fluid/operators/detection/generate_proposal_labels_op.cc b/paddle/fluid/operators/detection/generate_proposal_labels_op.cc index c5274c63ec799..f2d1d8acf4f7b 100644 --- a/paddle/fluid/operators/detection/generate_proposal_labels_op.cc +++ b/paddle/fluid/operators/detection/generate_proposal_labels_op.cc @@ -46,7 +46,7 @@ void FilterRoIs(const platform::DeviceContext& ctx, phi::DenseTensor* keep) { const T* rpn_rois_dt = rpn_rois.data(); const T* max_overlap_dt = max_overlap.data(); - int rois_num = max_overlap.numel(); + int rois_num = static_cast(max_overlap.numel()); keep->Resize({rois_num}); int* keep_data = keep->mutable_data(ctx.GetPlace()); int keep_len = 0; @@ -237,14 +237,14 @@ std::vector> SampleFgBgGt(const phi::CPUContext& context, // Reservoir Sampling // sampling fg std::uniform_real_distribution uniform(0, 1); - int fg_rois_per_im = std::floor(batch_size_per_im * fg_fraction); - int fg_rois_this_image = fg_inds.size(); + int fg_rois_per_im = std::floor(batch_size_per_im * fg_fraction); // NOLINT + int fg_rois_this_image = static_cast(fg_inds.size()); int fg_rois_per_this_image = std::min(fg_rois_per_im, fg_rois_this_image); if (use_random) { const int64_t fg_size = static_cast(fg_inds.size()); if (fg_size > fg_rois_per_this_image) { for (int64_t i = fg_rois_per_this_image; i < fg_size; ++i) { - int rng_ind = std::floor(uniform(engine) * i); + int rng_ind = std::floor(uniform(engine) * i); // NOLINT if (rng_ind < fg_rois_per_this_image) { std::iter_swap(fg_inds.begin() + rng_ind, fg_inds.begin() + i); std::iter_swap(mapped_gt_inds.begin() + rng_ind, @@ -260,14 +260,14 @@ std::vector> SampleFgBgGt(const phi::CPUContext& context, mapped_gt_inds.begin() + fg_rois_per_this_image); // sampling bg int bg_rois_per_image = batch_size_per_im - fg_rois_per_this_image; - int bg_rois_this_image = bg_inds.size(); + int bg_rois_this_image = static_cast(bg_inds.size()); int bg_rois_per_this_image = std::min(bg_rois_per_image, bg_rois_this_image); if (use_random) { const int64_t bg_size = static_cast(bg_inds.size()); if (bg_size > bg_rois_per_this_image) { for (int64_t i = bg_rois_per_this_image; i < bg_size; ++i) { - int rng_ind = std::floor(uniform(engine) * i); + int rng_ind = std::floor(uniform(engine) * i); // NOLINT if (rng_ind < fg_rois_per_this_image) std::iter_swap(bg_inds.begin() + rng_ind, bg_inds.begin() + i); } @@ -297,8 +297,8 @@ void GatherBoxesLabels(const phi::CPUContext& context, phi::DenseTensor* sampled_labels, phi::DenseTensor* sampled_gts, phi::DenseTensor* sampled_max_overlap) { - int fg_num = fg_inds.size(); - int bg_num = bg_inds.size(); + int fg_num = static_cast(fg_inds.size()); + int bg_num = static_cast(bg_inds.size()); phi::DenseTensor fg_inds_t, bg_inds_t, gt_box_inds_t, gt_label_inds_t; int* fg_inds_data = fg_inds_t.mutable_data({fg_num}, context.GetPlace()); int* bg_inds_data = bg_inds_t.mutable_data({bg_num}, context.GetPlace()); @@ -375,7 +375,7 @@ std::vector SampleRoisForOneImage( roi_filter.mutable_data({proposals_num, kBoxDim}, context.GetPlace()); set_zero(context, &roi_filter, static_cast(0)); } else { - proposals_num = keep.numel(); + proposals_num = static_cast(keep.numel()); roi_filter.mutable_data({proposals_num, kBoxDim}, context.GetPlace()); phi::funcs::CPUGather(context, rpn_rois, keep, &roi_filter); } @@ -383,10 +383,10 @@ std::vector SampleRoisForOneImage( memcpy(rpn_rois_dt, roi_filter_dt, roi_filter.numel() * sizeof(T)); rpn_rois.Resize(roi_filter.dims()); } else { - proposals_num = rpn_rois.dims()[0]; + proposals_num = static_cast(rpn_rois.dims()[0]); } // 1.2 compute overlaps - proposals_num += gt_boxes.dims()[0]; + proposals_num += static_cast(gt_boxes.dims()[0]); phi::DenseTensor proposal_to_gt_overlaps; proposal_to_gt_overlaps.mutable_data({proposals_num, gt_boxes.dims()[0]}, @@ -424,8 +424,8 @@ std::vector SampleRoisForOneImage( // Gather boxes and labels phi::DenseTensor sampled_boxes, sampled_labels, sampled_gts, sampled_max_overlap; - int fg_num = fg_inds.size(); - int bg_num = bg_inds.size(); + int fg_num = static_cast(fg_inds.size()); + int bg_num = static_cast(bg_inds.size()); int boxes_num = fg_num + bg_num; framework::DDim bbox_dim({boxes_num, kBoxDim}); sampled_boxes.mutable_data(bbox_dim, context.GetPlace()); @@ -484,8 +484,8 @@ std::vector SampleRoisForOneImage( if (is_cls_agnostic) { label = 1; } - int dst_idx = i * width + kBoxDim * label; - int src_idx = kBoxDim * i; + int dst_idx = static_cast(i * width + kBoxDim * label); + int src_idx = static_cast(kBoxDim * i); bbox_targets_data[dst_idx] = bbox_targets_single_data[src_idx]; bbox_targets_data[dst_idx + 1] = bbox_targets_single_data[src_idx + 1]; bbox_targets_data[dst_idx + 2] = bbox_targets_single_data[src_idx + 2]; @@ -592,7 +592,7 @@ class GenerateProposalLabelsKernel : public framework::OpKernel { std::random_device rnd; std::minstd_rand engine; - int seed = rnd(); + int seed = static_cast(rnd()); engine.seed(seed); framework::LoD lod; @@ -611,19 +611,24 @@ class GenerateProposalLabelsKernel : public framework::OpKernel { continue; } phi::DenseTensor rpn_rois_slice = - rpn_rois->Slice(rpn_rois_lod[i], rpn_rois_lod[i + 1]); + rpn_rois->Slice(static_cast(rpn_rois_lod[i]), + static_cast(rpn_rois_lod[i + 1])); phi::DenseTensor gt_classes_slice = - gt_classes->Slice(gt_classes_lod[i], gt_classes_lod[i + 1]); + gt_classes->Slice(static_cast(gt_classes_lod[i]), + static_cast(gt_classes_lod[i + 1])); phi::DenseTensor is_crowd_slice = - is_crowd->Slice(is_crowd_lod[i], is_crowd_lod[i + 1]); + is_crowd->Slice(static_cast(is_crowd_lod[i]), + static_cast(is_crowd_lod[i + 1])); phi::DenseTensor gt_boxes_slice = - gt_boxes->Slice(gt_boxes_lod[i], gt_boxes_lod[i + 1]); + gt_boxes->Slice(static_cast(gt_boxes_lod[i]), + static_cast(gt_boxes_lod[i + 1])); phi::DenseTensor im_info_slice = im_info->Slice(i, i + 1); phi::DenseTensor max_overlap_slice; if (is_cascade_rcnn) { auto* max_overlap = context.Input("MaxOverlap"); max_overlap_slice = - max_overlap->Slice(rpn_rois_lod[i], rpn_rois_lod[i + 1]); + max_overlap->Slice(static_cast(rpn_rois_lod[i]), + static_cast(rpn_rois_lod[i + 1])); } else { max_overlap_slice.mutable_data({rpn_rois_slice.dims()[0]}, context.GetPlace()); diff --git a/paddle/fluid/operators/detection/generate_proposals_op.cc b/paddle/fluid/operators/detection/generate_proposals_op.cc index d6987c7ba8c7e..710db1668e237 100644 --- a/paddle/fluid/operators/detection/generate_proposals_op.cc +++ b/paddle/fluid/operators/detection/generate_proposals_op.cc @@ -160,7 +160,7 @@ class GenerateProposalsKernel : public framework::OpKernel { AppendProposals(rpn_roi_probs, num_proposals, scores); num_proposals += proposals.dims()[0]; lod0.push_back(num_proposals); - tmp_num.push_back(proposals.dims()[0]); + tmp_num.push_back(proposals.dims()[0]); // NOLINT } if (context.HasOutput("RpnRoisNum")) { auto *rpn_rois_num = context.Output("RpnRoisNum"); diff --git a/paddle/fluid/operators/detection/locality_aware_nms_op.cc b/paddle/fluid/operators/detection/locality_aware_nms_op.cc index 1bdfb5b7e9450..9487925a3ba0e 100644 --- a/paddle/fluid/operators/detection/locality_aware_nms_op.cc +++ b/paddle/fluid/operators/detection/locality_aware_nms_op.cc @@ -135,10 +135,10 @@ void GetMaxScoreIndexWithLocalityAware( scores[index] += scores[i]; } else { skip[index] = false; - index = i; + index = static_cast(i); } } else { - index = i; + index = static_cast(i); } } @@ -262,9 +262,9 @@ class LocalityAwareNMSKernel : public framework::OpKernel { nms_threshold, nms_eta, nms_top_k, - &((*indices)[c]), + &((*indices)[c]), // NOLINT normalized); - num_det += (*indices)[c].size(); + num_det += (*indices)[c].size(); // NOLINT } *num_nmsed_out = num_det; @@ -298,7 +298,7 @@ class LocalityAwareNMSKernel : public framework::OpKernel { } new_indices.swap(*indices); - *num_nmsed_out = keep_top_k; + *num_nmsed_out = keep_top_k; // NOLINT } } @@ -365,7 +365,7 @@ class LocalityAwareNMSKernel : public framework::OpKernel { int64_t out_dim = box_dim + 2; int num_nmsed_out = 0; phi::DenseTensor boxes_slice, scores_slice; - int n = batch_size; + int n = static_cast(batch_size); for (int i = 0; i < n; ++i) { scores_slice = scores.Slice(i, i + 1); scores_slice.Resize({score_dims[1], score_dims[2]}); @@ -383,7 +383,7 @@ class LocalityAwareNMSKernel : public framework::OpKernel { batch_starts.push_back(batch_starts.back() + num_nmsed_out); } - int num_kept = batch_starts.back(); + int num_kept = static_cast(batch_starts.back()); if (num_kept == 0) { T* od = outs->mutable_data({1, 1}, ctx.GetPlace()); od[0] = -1; @@ -398,8 +398,8 @@ class LocalityAwareNMSKernel : public framework::OpKernel { scores_slice.Resize({score_dims[1], score_dims[2]}); boxes_slice.Resize({score_dims[2], box_dim}); - int64_t s = batch_starts[i]; - int64_t e = batch_starts[i + 1]; + int64_t s = static_cast(batch_starts[i]); + int64_t e = static_cast(batch_starts[i + 1]); if (e > s) { phi::DenseTensor out = outs->Slice(s, e); LocalityAwareNMSOutput(dev_ctx, diff --git a/paddle/fluid/operators/detection/mask_util.cc b/paddle/fluid/operators/detection/mask_util.cc index 83bb8cd8ccd69..eb08fe98460ea 100644 --- a/paddle/fluid/operators/detection/mask_util.cc +++ b/paddle/fluid/operators/detection/mask_util.cc @@ -51,12 +51,14 @@ void Poly2Mask(const float* xy, int k, int h, int w, uint8_t* mask) { x = reinterpret_cast(xptr->ptr()); y = x + (k + 1); - for (j = 0; j < k; j++) x[j] = std::lround(scale * xy[j * 2 + 0]); + for (j = 0; j < k; j++) + x[j] = static_cast(std::lround(scale * xy[j * 2 + 0])); x[k] = x[0]; - for (j = 0; j < k; j++) y[j] = std::lround(scale * xy[j * 2 + 1]); + for (j = 0; j < k; j++) + y[j] = static_cast(std::lround(scale * xy[j * 2 + 1])); y[k] = y[0]; for (j = 0; j < k; j++) { - m += UMax(abs(x[j] - x[j + 1]), abs(y[j] - y[j + 1])) + 1; + m += static_cast(UMax(abs(x[j] - x[j + 1]), abs(y[j] - y[j + 1])) + 1); } auto vptr = memory::Alloc(cpu, sizeof(int) * m * 2); u = reinterpret_cast(vptr->ptr()); @@ -82,7 +84,7 @@ void Poly2Mask(const float* xy, int k, int h, int w, uint8_t* mask) { for (d = 0; d <= dx; d++) { t = flip ? dx - d : d; u[m] = t + xs; - v[m] = std::lround(ys + s * t); + v[m] = static_cast(std::lround(ys + s * t)); m++; } } else { @@ -90,7 +92,7 @@ void Poly2Mask(const float* xy, int k, int h, int w, uint8_t* mask) { for (d = 0; d <= dy; d++) { t = flip ? dy - d : d; v[m] = t + ys; - u[m] = std::lround(xs + s * t); + u[m] = static_cast(std::lround(xs + s * t)); m++; } } @@ -199,11 +201,11 @@ void Polys2MaskWrtBox(const std::vector>& polygons, malloc(M * M * polygons.size() * sizeof(uint8_t))); } for (size_t i = 0; i < polygons.size(); ++i) { - int k = polygons[i].size() / 2; + int k = static_cast(polygons[i].size() / 2); std::vector p; for (int j = 0; j < k; ++j) { - float pw = (polygons[i][2 * j] - box[0]) * M / w; - float ph = (polygons[i][2 * j + 1] - box[1]) * M / h; + float pw = (polygons[i][2 * j] - box[0]) * M / w; // NOLINT + float ph = (polygons[i][2 * j + 1] - box[1]) * M / h; // NOLINT p.push_back(pw); p.push_back(ph); } diff --git a/paddle/fluid/operators/detection/mine_hard_examples_op.cc b/paddle/fluid/operators/detection/mine_hard_examples_op.cc index 3e2ad2c856459..4c3e934fab4dc 100644 --- a/paddle/fluid/operators/detection/mine_hard_examples_op.cc +++ b/paddle/fluid/operators/detection/mine_hard_examples_op.cc @@ -71,8 +71,8 @@ class MineHardExamplesKernel : public framework::OpKernel { framework::TensorCopy( *in_matched_indices, ctx.GetPlace(), out_match_indices); - int batch_size = in_matched_indices->dims()[0]; - int prior_num = in_matched_indices->dims()[1]; + int batch_size = static_cast(in_matched_indices->dims()[0]); + int prior_num = static_cast(in_matched_indices->dims()[1]); auto match_indices = framework::EigenMatrix::From(*in_matched_indices); @@ -111,7 +111,8 @@ class MineHardExamplesKernel : public framework::OpKernel { for (int m = 0; m < prior_num; ++m) { if (match_indices(n, m) != -1) ++num_pos; } - neg_sel = std::min(static_cast(num_pos * neg_pos_ratio), neg_sel); + neg_sel = std::min(static_cast(num_pos * neg_pos_ratio), // NOLINT + neg_sel); } else if (mining_type == MiningType::kHardExample) { neg_sel = std::min(sample_size, neg_sel); } @@ -156,7 +157,7 @@ class MineHardExamplesKernel : public framework::OpKernel { for (auto neg_indices : all_neg_indices) { std::copy(neg_indices.begin(), neg_indices.end(), neg_data + neg_offset); - neg_offset += neg_indices.size(); + neg_offset += static_cast(neg_indices.size()); } out_neg_indices->set_lod(out_neg_indices_lod); return; diff --git a/paddle/fluid/operators/detection/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc index 0cfb67c609e19..432713c60d969 100644 --- a/paddle/fluid/operators/detection/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -42,7 +42,7 @@ class MultiClassNMSOp : public framework::OperatorWithKernel { OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "MultiClassNMS"); auto box_dims = ctx->GetInputDim("BBoxes"); auto score_dims = ctx->GetInputDim("Scores"); - auto score_size = score_dims.size(); + int score_size = static_cast(score_dims.size()); if (ctx->IsRuntime()) { PADDLE_ENFORCE_EQ(score_size == 2 || score_size == 3, @@ -128,9 +128,9 @@ void SliceOneClass(const platform::DeviceContext& ctx, T* item_data = one_class_item->mutable_data(ctx.GetPlace()); const T* items_data = items.data(); const int64_t num_item = items.dims()[0]; - const int class_num = items.dims()[1]; + const int class_num = static_cast(items.dims()[1]); if (items.dims().size() == 3) { - int item_size = items.dims()[2]; + int item_size = static_cast(items.dims()[2]); for (int i = 0; i < num_item; ++i) { std::memcpy(item_data + i * item_size, items_data + i * class_num * item_size + class_id * item_size, @@ -243,12 +243,12 @@ class MultiClassNMSKernel : public framework::OpKernel { nms_threshold, nms_eta, nms_top_k, - &((*indices)[c]), + &((*indices)[c]), // NOLINT normalized); if (scores_size == 2) { - std::stable_sort((*indices)[c].begin(), (*indices)[c].end()); + std::stable_sort((*indices)[c].begin(), (*indices)[c].end()); // NOLINT } - num_det += (*indices)[c].size(); + num_det += (*indices)[c].size(); // NOLINT } *num_nmsed_out = num_det; @@ -292,7 +292,7 @@ class MultiClassNMSKernel : public framework::OpKernel { } } new_indices.swap(*indices); - *num_nmsed_out = keep_top_k; + *num_nmsed_out = keep_top_k; // NOLINT } } @@ -340,7 +340,8 @@ class MultiClassNMSKernel : public framework::OpKernel { bdata = bbox.data() + idx * box_size; odata[count * out_dim + 1] = *(scores_data + idx * class_num + label); if (oindices != nullptr) { - oindices[count] = offset + idx * class_num + label; + oindices[count] = + static_cast(offset + idx * class_num + label); } } // xmin, ymin, xmax, ymax or multi-points coordinates @@ -371,9 +372,10 @@ class MultiClassNMSKernel : public framework::OpKernel { phi::DenseTensor boxes_slice, scores_slice; int n = 0; if (has_roisnum) { - n = score_size == 3 ? batch_size : rois_num->numel(); + n = static_cast(score_size == 3 ? batch_size : rois_num->numel()); } else { - n = score_size == 3 ? batch_size : boxes->lod().back().size() - 1; + n = static_cast(score_size == 3 ? batch_size + : boxes->lod().back().size() - 1); } for (int i = 0; i < n; ++i) { std::map> indices; @@ -394,8 +396,10 @@ class MultiClassNMSKernel : public framework::OpKernel { batch_starts.push_back(batch_starts.back()); continue; } - scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); - boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); + scores_slice = scores->Slice(static_cast(boxes_lod[i]), + static_cast(boxes_lod[i + 1])); + boxes_slice = boxes->Slice(static_cast(boxes_lod[i]), + static_cast(boxes_lod[i + 1])); } MultiClassNMS( ctx, scores_slice, boxes_slice, score_size, &indices, &num_nmsed_out); @@ -403,7 +407,7 @@ class MultiClassNMSKernel : public framework::OpKernel { batch_starts.push_back(batch_starts.back() + num_nmsed_out); } - int num_kept = batch_starts.back(); + int num_kept = static_cast(batch_starts.back()); if (num_kept == 0) { if (return_index) { outs->mutable_data({0, out_dim}, ctx.GetPlace()); @@ -434,15 +438,17 @@ class MultiClassNMSKernel : public framework::OpKernel { boxes_lod = boxes->lod().back(); } if (boxes_lod[i] == boxes_lod[i + 1]) continue; - scores_slice = scores->Slice(boxes_lod[i], boxes_lod[i + 1]); - boxes_slice = boxes->Slice(boxes_lod[i], boxes_lod[i + 1]); + scores_slice = scores->Slice(static_cast(boxes_lod[i]), + static_cast(boxes_lod[i + 1])); + boxes_slice = boxes->Slice(static_cast(boxes_lod[i]), + static_cast(boxes_lod[i + 1])); if (return_index) { - offset = boxes_lod[i] * score_dims[1]; + offset = static_cast(boxes_lod[i] * score_dims[1]); } } - int64_t s = batch_starts[i]; - int64_t e = batch_starts[i + 1]; + int64_t s = static_cast(batch_starts[i]); + int64_t e = static_cast(batch_starts[i + 1]); if (e > s) { phi::DenseTensor out = outs->Slice(s, e); if (return_index) { @@ -466,7 +472,8 @@ class MultiClassNMSKernel : public framework::OpKernel { nms_rois_num->mutable_data({n}, ctx.GetPlace()); int* num_data = nms_rois_num->data(); for (int i = 1; i <= n; i++) { - num_data[i - 1] = batch_starts[i] - batch_starts[i - 1]; + num_data[i - 1] = + static_cast(batch_starts[i] - batch_starts[i - 1]); } nms_rois_num->Resize({n}); } diff --git a/paddle/fluid/operators/detection/retinanet_detection_output_op.cc b/paddle/fluid/operators/detection/retinanet_detection_output_op.cc index 9f4d9d5c4726e..1100f79b1b2d1 100644 --- a/paddle/fluid/operators/detection/retinanet_detection_output_op.cc +++ b/paddle/fluid/operators/detection/retinanet_detection_output_op.cc @@ -364,7 +364,7 @@ class RetinanetDetectionOutputKernel : public framework::OpKernel { if (static_cast(preds.count(c))) { const std::vector> cls_dets = preds.at(c); NMSFast(cls_dets, nms_threshold, nms_eta, &(indices[c])); - num_det += indices[c].size(); + num_det += static_cast(indices[c].size()); } } @@ -530,14 +530,14 @@ class RetinanetDetectionOutputKernel : public framework::OpKernel { batch_starts.push_back(batch_starts.back() + num_nmsed_out); } - int num_kept = batch_starts.back(); + int num_kept = static_cast(batch_starts.back()); if (num_kept == 0) { outs->Resize({0, out_dim}); } else { outs->mutable_data({num_kept, out_dim}, ctx.GetPlace()); for (int i = 0; i < batch_size; ++i) { - int64_t s = batch_starts[i]; - int64_t e = batch_starts[i + 1]; + int64_t s = static_cast(batch_starts[i]); + int64_t e = static_cast(batch_starts[i + 1]); if (e > s) { phi::DenseTensor out = outs->Slice(s, e); MultiClassOutput(dev_ctx, all_nmsed_out[i], &out); diff --git a/paddle/fluid/operators/detection/roi_perspective_transform_op.cc b/paddle/fluid/operators/detection/roi_perspective_transform_op.cc index 5f732e423b27c..51f058617edc6 100644 --- a/paddle/fluid/operators/detection/roi_perspective_transform_op.cc +++ b/paddle/fluid/operators/detection/roi_perspective_transform_op.cc @@ -259,10 +259,10 @@ class CPUROIPerspectiveTransformOpKernel : public framework::OpKernel { auto spatial_scale = ctx.Attr("spatial_scale"); auto in_dims = phi::vectorize(in->dims()); - int channels = in_dims[1]; - int in_height = in_dims[2]; - int in_width = in_dims[3]; - int rois_num = rois->dims()[0]; + int channels = static_cast(in_dims[1]); + int in_height = static_cast(in_dims[2]); + int in_width = static_cast(in_dims[3]); + int rois_num = static_cast(rois->dims()[0]); const T* input_data = in->data(); int* mask_data = mask->mutable_data(ctx.GetPlace()); @@ -273,7 +273,7 @@ class CPUROIPerspectiveTransformOpKernel : public framework::OpKernel { auto lod = rois->lod().back(); for (size_t i = 0; i < lod.size() - 1; ++i) { for (size_t j = lod[i]; j < lod[i + 1]; ++j) { - roi2image_data[j] = i; + roi2image_data[j] = static_cast(i); } } @@ -413,7 +413,7 @@ class CPUROIPerspectiveTransformGradOpKernel : public framework::OpKernel { int channels = in_dims[1]; int in_height = in_dims[2]; int in_width = in_dims[3]; - int rois_num = rois->dims()[0]; + int rois_num = static_cast(rois->dims()[0]); T* in_grad_data = in_grad->mutable_data(ctx.GetPlace()); const T* out_grad_data = out_grad->data(); @@ -425,7 +425,7 @@ class CPUROIPerspectiveTransformGradOpKernel : public framework::OpKernel { auto lod = rois->lod().back(); for (size_t i = 0; i < lod.size() - 1; ++i) { for (size_t j = lod[i]; j < lod[i + 1]; ++j) { - roi2image_data[j] = i; + roi2image_data[j] = static_cast(i); } } diff --git a/paddle/fluid/operators/detection/rpn_target_assign_op.cc b/paddle/fluid/operators/detection/rpn_target_assign_op.cc index 50a22307554c9..a41b8a70a4283 100644 --- a/paddle/fluid/operators/detection/rpn_target_assign_op.cc +++ b/paddle/fluid/operators/detection/rpn_target_assign_op.cc @@ -119,7 +119,7 @@ std::vector FilterStraddleAnchor( T im_height, T im_width) { std::vector inds_inside; - int anchor_num = anchor->dims()[0]; + int anchor_num = static_cast(anchor->dims()[0]); auto* anchor_data = anchor->data(); if (rpn_straddle_thresh >= 0) { int index; @@ -137,7 +137,7 @@ std::vector FilterStraddleAnchor( inds_inside.emplace_back(i); } } - int inside_num = inds_inside.size(); + int inside_num = static_cast(inds_inside.size()); phi::DenseTensor inds_inside_t; int* inds_inside_data = inds_inside_t.mutable_data({inside_num}, context.GetPlace()); @@ -157,7 +157,7 @@ template phi::DenseTensor FilterCrowdGt(const phi::CPUContext& context, phi::DenseTensor* gt_boxes, phi::DenseTensor* is_crowd) { - int gt_num = gt_boxes->dims()[0]; + int gt_num = static_cast(gt_boxes->dims()[0]); std::vector not_crowd_inds; auto* is_crowd_data = is_crowd->data(); for (int i = 0; i < gt_num; ++i) { @@ -165,7 +165,7 @@ phi::DenseTensor FilterCrowdGt(const phi::CPUContext& context, not_crowd_inds.emplace_back(i); } } - int ncrowd_num = not_crowd_inds.size(); + int ncrowd_num = static_cast(not_crowd_inds.size()); phi::DenseTensor ncrowd_gt_boxes; T* ncrowd_gt_boxes_data = ncrowd_gt_boxes.mutable_data({ncrowd_num, 4}, context.GetPlace()); @@ -182,11 +182,11 @@ void ReservoirSampling(const int num, std::minstd_rand engine, bool use_random) { std::uniform_real_distribution uniform(0, 1); - size_t len = inds->size(); - if (len > static_cast(num)) { + int len = static_cast(inds->size()); + if (len > num) { if (use_random) { - for (size_t i = num; i < len; ++i) { - int rng_ind = std::floor(uniform(engine) * i); + for (int i = num; i < len; ++i) { + int rng_ind = std::floor(uniform(engine) * i); // NOLINT if (rng_ind < num) std::iter_swap(inds->begin() + rng_ind, inds->begin() + i); } @@ -211,8 +211,8 @@ void ScoreAssign(const T* anchor_by_gt_overlap_data, std::minstd_rand engine, bool use_random) { float epsilon = 0.00001; - int anchor_num = anchor_to_gt_max.dims()[0]; - int gt_num = gt_to_anchor_max.dims()[0]; + int anchor_num = static_cast(anchor_to_gt_max.dims()[0]); + int gt_num = static_cast(gt_to_anchor_max.dims()[0]); std::vector target_label(anchor_num, -1); std::vector fg_inds_fake; std::vector bg_inds_fake; @@ -234,14 +234,15 @@ void ScoreAssign(const T* anchor_by_gt_overlap_data, bool is_anchor_great_than_thresh = (anchor_to_gt_max_data[i] >= rpn_positive_overlap); if (is_anchors_with_max_overlap || is_anchor_great_than_thresh) { - fg_inds_fake.push_back(i); + fg_inds_fake.push_back(i); // NOLINT } } // Reservoir Sampling int fg_num = 0; if (rpn_fg_fraction > 0 && rpn_batch_size_per_im > 0) { - fg_num = static_cast(rpn_fg_fraction * rpn_batch_size_per_im); + fg_num = + static_cast(rpn_fg_fraction * rpn_batch_size_per_im); // NOLINT ReservoirSampling(fg_num, &fg_inds_fake, engine, use_random); } else { fg_num = static_cast(fg_inds_fake.size()); @@ -253,7 +254,7 @@ void ScoreAssign(const T* anchor_by_gt_overlap_data, for (int64_t i = 0; i < anchor_num; ++i) { if (anchor_to_gt_max_data[i] < rpn_negative_overlap) { - bg_inds_fake.push_back(i); + bg_inds_fake.push_back(i); // NOLINT } } int bg_num = 0; @@ -289,8 +290,8 @@ void ScoreAssign(const T* anchor_by_gt_overlap_data, } if (target_label[i] == 0) bg_inds->emplace_back(i); } - fg_num = fg_inds->size(); - bg_num = bg_inds->size(); + fg_num = static_cast(fg_inds->size()); + bg_num = static_cast(bg_inds->size()); tgt_lbl->resize(fg_num + bg_num, 0); std::vector fg_lbl(fg_num, 1); @@ -310,8 +311,8 @@ std::vector SampleRpnFgBgGt( std::minstd_rand engine, bool use_random) { auto* anchor_by_gt_overlap_data = anchor_by_gt_overlap.data(); - int anchor_num = anchor_by_gt_overlap.dims()[0]; - int gt_num = anchor_by_gt_overlap.dims()[1]; + int anchor_num = static_cast(anchor_by_gt_overlap.dims()[0]); + int gt_num = static_cast(anchor_by_gt_overlap.dims()[1]); std::vector fg_inds; std::vector bg_inds; @@ -358,9 +359,9 @@ std::vector SampleRpnFgBgGt( engine, use_random); - int fg_num = fg_inds.size(); - int bg_num = bg_inds.size(); - int fg_fake_num = fg_fake.size(); + int fg_num = static_cast(fg_inds.size()); + int bg_num = static_cast(bg_inds.size()); + int fg_fake_num = static_cast(fg_fake.size()); gt_inds.reserve(fg_fake_num); for (int i = 0; i < fg_fake_num; ++i) { gt_inds.emplace_back(argmax[fg_fake[i]]); @@ -444,7 +445,7 @@ class RpnTargetAssignKernel : public framework::OpKernel { std::random_device rnd; std::minstd_rand engine; - int seed = rnd(); + int seed = static_cast(rnd()); engine.seed(seed); framework::LoD lod_loc, loc_score; @@ -457,9 +458,11 @@ class RpnTargetAssignKernel : public framework::OpKernel { auto is_crowd_lod = is_crowd->lod().back(); for (int i = 0; i < batch_num; ++i) { phi::DenseTensor gt_boxes_slice = - gt_boxes->Slice(gt_boxes_lod[i], gt_boxes_lod[i + 1]); + gt_boxes->Slice(static_cast(gt_boxes_lod[i]), + static_cast(gt_boxes_lod[i + 1])); phi::DenseTensor is_crowd_slice = - is_crowd->Slice(is_crowd_lod[i], is_crowd_lod[i + 1]); + is_crowd->Slice(static_cast(is_crowd_lod[i]), + static_cast(is_crowd_lod[i + 1])); phi::DenseTensor im_info_slice = im_info->Slice(i, i + 1); auto* im_info_data = im_info_slice.data(); auto im_height = im_info_data[0]; @@ -499,8 +502,8 @@ class RpnTargetAssignKernel : public framework::OpKernel { phi::DenseTensor sampled_gt_index = loc_score_tgtlbl_gt[3]; phi::DenseTensor sampled_bbox_inside_weight = loc_score_tgtlbl_gt[4]; - int loc_num = sampled_loc_index.dims()[0]; - int score_num = sampled_score_index.dims()[0]; + int loc_num = static_cast(sampled_loc_index.dims()[0]); + int score_num = static_cast(sampled_score_index.dims()[0]); // unmap to all anchor phi::DenseTensor sampled_loc_index_unmap, sampled_score_index_unmap; sampled_loc_index_unmap.mutable_data({loc_num}, place); @@ -540,7 +543,7 @@ class RpnTargetAssignKernel : public framework::OpKernel { &sampled_tgt_bbox); // Add anchor offset - int anchor_offset = i * anchor_num; + int anchor_offset = static_cast(i * anchor_num); auto sampled_loc_index_unmap_et = framework::EigenTensor::From(sampled_loc_index_unmap); sampled_loc_index_unmap_et = sampled_loc_index_unmap_et + anchor_offset; @@ -865,7 +868,7 @@ std::vector FilterCrowdGtBoxLabel( phi::DenseTensor* gt_boxes, phi::DenseTensor* gt_labels, phi::DenseTensor* is_crowd) { - int gt_num = gt_boxes->dims()[0]; + int gt_num = static_cast(gt_boxes->dims()[0]); std::vector not_crowd_inds; auto* is_crowd_data = is_crowd->data(); for (int i = 0; i < gt_num; ++i) { @@ -873,7 +876,7 @@ std::vector FilterCrowdGtBoxLabel( not_crowd_inds.emplace_back(i); } } - int ncrowd_num = not_crowd_inds.size(); + int ncrowd_num = static_cast(not_crowd_inds.size()); phi::DenseTensor ncrowd_gt_boxes, ncrowd_gt_labels; T* ncrowd_gt_boxes_data = ncrowd_gt_boxes.mutable_data({ncrowd_num, 4}, context.GetPlace()); @@ -904,8 +907,8 @@ std::vector GetAllFgBgGt( const float negative_overlap, std::minstd_rand engine) { auto* anchor_by_gt_overlap_data = anchor_by_gt_overlap.data(); - int anchor_num = anchor_by_gt_overlap.dims()[0]; - int gt_num = anchor_by_gt_overlap.dims()[1]; + int anchor_num = static_cast(anchor_by_gt_overlap.dims()[0]); + int gt_num = static_cast(anchor_by_gt_overlap.dims()[1]); std::vector fg_inds; std::vector bg_inds; @@ -951,14 +954,14 @@ std::vector GetAllFgBgGt( engine, false); const int* gt_labels_data = ncrowd_gt_labels.data(); - int64_t fg_num = fg_inds.size(); + int64_t fg_num = static_cast(fg_inds.size()); for (int64_t i = 0; i < fg_num; ++i) { int gt_idx = argmax[fg_inds[i]]; tgt_lbl[i] = gt_labels_data[gt_idx]; } - int bg_num = bg_inds.size(); - int fg_fake_num = fg_fake.size(); + int bg_num = static_cast(bg_inds.size()); + int fg_fake_num = static_cast(fg_fake.size()); gt_inds.reserve(fg_fake_num); for (int i = 0; i < fg_fake_num; ++i) { gt_inds.emplace_back(argmax[fg_fake[i]]); @@ -983,7 +986,7 @@ std::vector GetAllFgBgGt( std::copy(bbox_inside_weight.begin(), bbox_inside_weight.end(), bbox_inside_weight_data); - fg_num_data[0] = fg_fake.size() + 1; + fg_num_data[0] = static_cast(fg_fake.size()) + 1; std::vector loc_score_tgtlbl_gt; loc_score_tgtlbl_gt.emplace_back(loc_index_t); loc_score_tgtlbl_gt.emplace_back(score_index_t); @@ -1054,7 +1057,7 @@ class RetinanetTargetAssignKernel : public framework::OpKernel { std::random_device rnd; std::minstd_rand engine; - int seed = rnd(); + int seed = static_cast(rnd()); engine.seed(seed); framework::LoD lod_loc, loc_score, lod_fg; @@ -1070,11 +1073,14 @@ class RetinanetTargetAssignKernel : public framework::OpKernel { auto is_crowd_lod = is_crowd->lod().back(); for (int i = 0; i < batch_num; ++i) { phi::DenseTensor gt_boxes_slice = - gt_boxes->Slice(gt_boxes_lod[i], gt_boxes_lod[i + 1]); + gt_boxes->Slice(static_cast(gt_boxes_lod[i]), + static_cast(gt_boxes_lod[i + 1])); phi::DenseTensor gt_labels_slice = - gt_labels->Slice(gt_labels_lod[i], gt_labels_lod[i + 1]); + gt_labels->Slice(static_cast(gt_labels_lod[i]), + static_cast(gt_labels_lod[i + 1])); phi::DenseTensor is_crowd_slice = - is_crowd->Slice(is_crowd_lod[i], is_crowd_lod[i + 1]); + is_crowd->Slice(static_cast(is_crowd_lod[i]), + static_cast(is_crowd_lod[i + 1])); phi::DenseTensor im_info_slice = im_info->Slice(i, i + 1); auto* im_info_data = im_info_slice.data(); auto im_height = im_info_data[0]; @@ -1116,8 +1122,8 @@ class RetinanetTargetAssignKernel : public framework::OpKernel { phi::DenseTensor sampled_bbox_inside_weight = loc_score_tgtlbl_gt[4]; phi::DenseTensor sampled_fg_num = loc_score_tgtlbl_gt[5]; - int loc_num = sampled_loc_index.dims()[0]; - int score_num = sampled_score_index.dims()[0]; + int loc_num = static_cast(sampled_loc_index.dims()[0]); + int score_num = static_cast(sampled_score_index.dims()[0]); // unmap to all anchor phi::DenseTensor sampled_loc_index_unmap, sampled_score_index_unmap; sampled_loc_index_unmap.mutable_data({loc_num}, place); @@ -1157,7 +1163,7 @@ class RetinanetTargetAssignKernel : public framework::OpKernel { &sampled_tgt_bbox); // Add anchor offset - int anchor_offset = i * anchor_num; + int anchor_offset = static_cast(i * anchor_num); auto sampled_loc_index_unmap_et = framework::EigenTensor::From(sampled_loc_index_unmap); sampled_loc_index_unmap_et = sampled_loc_index_unmap_et + anchor_offset; diff --git a/paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.cc b/paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.cc index 1cc4067bef930..96c400ea625d4 100644 --- a/paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.cc +++ b/paddle/fluid/operators/fused/fused_embedding_fc_lstm_op.cc @@ -87,7 +87,7 @@ void FusedEmbeddingFCLSTMOp::InferShape( } auto wh_dims = ctx->GetInputDim("WeightH"); - int frame_size = wh_dims[1] / 4; + int frame_size = static_cast(wh_dims[1] / 4); PADDLE_ENFORCE_EQ( wh_dims.size(), 2, @@ -403,8 +403,8 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel { // log(INFO) << "====> SeqCompute" << "\n"; auto ids_lod = ids->lod(); - const int total_T = ids_dims[0]; - const int N = ids_lod[0].size() - 1; + const int total_T = static_cast(ids_dims[0]); + const int N = static_cast(ids_lod[0].size() - 1); const T* h0_data = h0 ? h0->data() : nullptr; const T* c0_data = c0 ? c0->data() : nullptr; T* xx_data = xx->mutable_data(place); @@ -545,7 +545,7 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel { auto batched_lod = batched_input->lod(); const auto& seq_order = batched_lod[2]; - const int max_bs = seq_order.size(); + const int max_bs = static_cast(seq_order.size()); reordered_h0->Resize({max_bs, D}); reordered_c0->Resize({max_bs, D}); @@ -589,7 +589,7 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel { prev_c_data = batched_c_out_data; } const auto& batch_starts = batched_lod[0]; - const int max_seq_len = batch_starts.size() - 1; + const int max_seq_len = static_cast(batch_starts.size() - 1); const int offset = tstart * max_bs * D; batched_input_data = batched_input_data + offset * 4; batched_h_out_data = batched_h_out_data + offset; @@ -616,7 +616,8 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel { if (use_peepholes) { for (int step = tstart; step < max_seq_len; ++step) { - const int cur_bs = batch_starts[step + 1] - batch_starts[step]; + const int cur_bs = + static_cast(batch_starts[step + 1] - batch_starts[step]); GEMM_WH_ADDON(cur_bs, prev_h_data, batched_input_data); DEFINE_CUR; for (int i = 0; i < cur_bs; ++i) { @@ -628,7 +629,8 @@ class FusedEmbeddingFCLSTMKernel : public framework::OpKernel { } } else { for (int step = tstart; step < max_seq_len; ++step) { - const int cur_bs = batch_starts[step + 1] - batch_starts[step]; + const int cur_bs = + static_cast(batch_starts[step + 1] - batch_starts[step]); GEMM_WH_ADDON(cur_bs, prev_h_data, batched_input_data); DEFINE_CUR; for (int i = 0; i < cur_bs; ++i) { diff --git a/paddle/fluid/operators/fused/fused_matmul_op.cc b/paddle/fluid/operators/fused/fused_matmul_op.cc index cfe9234d26973..ca3d02bf9bfa1 100644 --- a/paddle/fluid/operators/fused/fused_matmul_op.cc +++ b/paddle/fluid/operators/fused/fused_matmul_op.cc @@ -106,10 +106,10 @@ class FusedMatmulOp : public framework::OperatorWithKernel { } } if (!x_broadcasted) { - new_dims.push_back(M); + new_dims.push_back(M); // NOLINT } if (!y_broadcasted) { - new_dims.push_back(N); + new_dims.push_back(N); // NOLINT } ctx->SetOutputDim("Out", phi::make_ddim(new_dims)); diff --git a/paddle/fluid/operators/fused/fused_seqpool_cvm_op.cc b/paddle/fluid/operators/fused/fused_seqpool_cvm_op.cc index 9bfc5d3572563..d8ef46b040e8b 100644 --- a/paddle/fluid/operators/fused/fused_seqpool_cvm_op.cc +++ b/paddle/fluid/operators/fused/fused_seqpool_cvm_op.cc @@ -79,9 +79,9 @@ class FusedSeqpoolCVMOp : public framework::OperatorWithKernel { const auto& x_tensor = x_var->Get(); const auto& x_lod = x_tensor.lod(); if (!x_lod.empty()) { - cur_batch_size = x_lod[0].size() - 1; + cur_batch_size = static_cast(x_lod[0].size() - 1); } else { - cur_batch_size = x_tensor.dims()[0]; + cur_batch_size = static_cast(x_tensor.dims()[0]); } if (batch_size == -1) { batch_size = cur_batch_size; diff --git a/paddle/fluid/operators/fused/fusion_gru_op.cc b/paddle/fluid/operators/fused/fusion_gru_op.cc index 69b53709b22b5..0625d5c80c08e 100644 --- a/paddle/fluid/operators/fused/fusion_gru_op.cc +++ b/paddle/fluid/operators/fused/fusion_gru_op.cc @@ -64,7 +64,7 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const { wx_dims[0], x_mat_dims[1])); - int frame_size = wx_dims[1] / 3; + int frame_size = static_cast(wx_dims[1] / 3); auto wh_dims = ctx->GetInputDim("WeightH"); PADDLE_ENFORCE_EQ(wh_dims.size(), @@ -131,9 +131,10 @@ void FusionGRUOp::InferShape(framework::InferShapeContext* ctx) const { ctx->ShareLoD("X", "Hidden"); int xx_width; if (ctx->Attrs().Get("use_seq")) { - xx_width = wx_dims[1]; + xx_width = static_cast(wx_dims[1]); } else { - xx_width = x_mat_dims[1] > wx_dims[1] ? wx_dims[1] : x_mat_dims[1]; + xx_width = static_cast(x_mat_dims[1] > wx_dims[1] ? wx_dims[1] + : x_mat_dims[1]); OP_INOUT_CHECK( ctx->HasOutput("ReorderedH0"), "Output", "ReorderedH0", "fusion_gru"); OP_INOUT_CHECK( @@ -305,7 +306,7 @@ class FusionGRUKernel : public framework::OpKernel { void SeqCompute(const framework::ExecutionContext& ctx) const { INIT_BASE_DEFINES; INIT_OTHER_DEFINES; - const int N = x_lod[0].size() - 1; + const int N = static_cast(x_lod[0].size() - 1); const T* h0_data = h0 ? h0->data() : nullptr; const T* wh_state_data = wh_data + D * D2; T* hidden_out_data = hidden_out->mutable_data(place); @@ -338,7 +339,7 @@ class FusionGRUKernel : public framework::OpKernel { }; for (int i = 0; i < N; ++i) { int bid = is_reverse ? N - 1 - i : i; - int seq_len = x_lod[0][bid + 1] - x_lod[0][bid]; + int seq_len = static_cast(x_lod[0][bid + 1] - x_lod[0][bid]); const T* prev_hidden_data = nullptr; int tstart = 0; if (h0_data) { @@ -436,7 +437,7 @@ class FusionGRUKernel : public framework::OpKernel { auto batched_lod = batched_input->lod(); const auto& seq_order = batched_lod[2]; - const int max_bs = seq_order.size(); + const int max_bs = static_cast(seq_order.size()); reordered_h0->Resize({max_bs, D}); int tstart = 0; @@ -470,11 +471,12 @@ class FusionGRUKernel : public framework::OpKernel { // Then start from next const T* wh_state_data = wh_data + D * D2; const auto& batch_starts = batched_lod[0]; - const int max_seq_len = batch_starts.size() - 1; + const int max_seq_len = static_cast(batch_starts.size() - 1); batched_input_data = batched_input_data + tstart * max_bs * D3; batched_out_data = batched_out_data + tstart * max_bs * D; for (int step = tstart; step < max_seq_len; ++step) { - const int cur_bs = batch_starts[step + 1] - batch_starts[step]; + const int cur_bs = + static_cast(batch_starts[step + 1] - batch_starts[step]); // gemm prev * (Wu + Wr) blas.GEMM(CblasNoTrans, CblasNoTrans, diff --git a/paddle/fluid/operators/fused/fusion_lstm_op.cc b/paddle/fluid/operators/fused/fusion_lstm_op.cc index af3129ee2adf4..400d8dcdaad2f 100644 --- a/paddle/fluid/operators/fused/fusion_lstm_op.cc +++ b/paddle/fluid/operators/fused/fusion_lstm_op.cc @@ -72,7 +72,7 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { wx_dims[0], x_dims[1])); - int frame_size = wx_dims[1] / 4; + int frame_size = static_cast(wx_dims[1] / 4); auto wh_dims = ctx->GetInputDim("WeightH"); PADDLE_ENFORCE_EQ(wh_dims.size(), @@ -143,9 +143,10 @@ void FusionLSTMOp::InferShape(framework::InferShapeContext* ctx) const { ctx->ShareLoD("X", "Cell"); int xx_width; if (ctx->Attrs().Get("use_seq")) { - xx_width = wx_dims[1]; + xx_width = static_cast(wx_dims[1]); } else { - xx_width = x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1]; + xx_width = + static_cast(x_dims[1] > wx_dims[1] ? wx_dims[1] : x_dims[1]); OP_INOUT_CHECK(ctx->HasOutput("BatchedInput"), "Output", @@ -369,8 +370,8 @@ class FuisonLSTMKernel : public framework::OpKernel { INIT_BASE_DEFINES; INIT_OTHER_DEFINES; auto x_lod = x->lod(); - const int total_T = x_dims[0]; - const int N = x_lod[0].size() - 1; + const int total_T = static_cast(x_dims[0]); + const int N = static_cast(x_lod[0].size() - 1); const T* h0_data = h0 ? h0->data() : nullptr; const T* c0_data = c0 ? c0->data() : nullptr; T* xx_data = xx->mutable_data(place); @@ -395,7 +396,7 @@ class FuisonLSTMKernel : public framework::OpKernel { for (int i = 0; i < N; ++i) { int bid = is_reverse ? N - 1 - i : i; - int seq_len = x_lod[0][bid + 1] - x_lod[0][bid]; + int seq_len = static_cast(x_lod[0][bid + 1] - x_lod[0][bid]); const T* prev_c_data = nullptr; const T* prev_h_data = nullptr; int tstart = 0; @@ -476,7 +477,7 @@ class FuisonLSTMKernel : public framework::OpKernel { auto batched_lod = batched_input->lod(); const auto& seq_order = batched_lod[2]; - const int max_bs = seq_order.size(); + const int max_bs = static_cast(seq_order.size()); reordered_h0->Resize({max_bs, D}); reordered_c0->Resize({max_bs, D}); @@ -520,13 +521,14 @@ class FuisonLSTMKernel : public framework::OpKernel { // compute kernel part const auto& batch_starts = batched_lod[0]; - const int max_seq_len = batch_starts.size() - 1; + const int max_seq_len = static_cast(batch_starts.size() - 1); const int offset = tstart * max_bs * D; batched_input_data = batched_input_data + offset * 4; batched_h_out_data = batched_h_out_data + offset; batched_c_out_data = batched_c_out_data + offset; for (int step = tstart; step < max_seq_len; ++step) { - const int cur_bs = batch_starts[step + 1] - batch_starts[step]; + const int cur_bs = + static_cast(batch_starts[step + 1] - batch_starts[step]); GEMM_WH_ADDON(cur_bs, prev_h_data, batched_input_data); T* cur_in_data = batched_input_data; T* cur_prev_c_data = prev_c_data; diff --git a/paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc b/paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc index 38b8f0fe1728a..8b88316645d8a 100644 --- a/paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc +++ b/paddle/fluid/operators/fused/fusion_repeated_fc_relu_op.cc @@ -157,8 +157,8 @@ class FusionRepeatedFCReluKernel : public framework::OpKernel { const auto& w_dims = weights[0]->dims(); phi::jit::matmul_attr_t attr; attr.m = i_dims[0]; - attr.n = w_dims[1]; - attr.k = w_dims[0]; + attr.n = static_cast(w_dims[1]); + attr.k = static_cast(w_dims[0]); relus[0]->Resize({attr.m, attr.n}); fc_relu(in->data(), weights[0]->data(), @@ -169,9 +169,9 @@ class FusionRepeatedFCReluKernel : public framework::OpKernel { for (int i = 1; i < weight_sz - 1; ++i) { const auto& i_dims = relus[i - 1]->dims(); const auto& w_dims = weights[i]->dims(); - attr.m = i_dims[0]; - attr.n = w_dims[1]; - attr.k = w_dims[0]; + attr.m = static_cast(i_dims[0]); + attr.n = static_cast(w_dims[1]); + attr.k = static_cast(w_dims[0]); relus[i]->Resize({attr.m, attr.n}); fc_relu(relus[i - 1]->data(), weights[i]->data(), @@ -182,9 +182,9 @@ class FusionRepeatedFCReluKernel : public framework::OpKernel { const auto& i_dims_last = relus[weight_sz - 2]->dims(); const auto& w_dims_last = weights[weight_sz - 1]->dims(); - attr.m = i_dims_last[0]; - attr.n = w_dims_last[1]; - attr.k = w_dims_last[0]; + attr.m = static_cast(i_dims_last[0]); + attr.n = static_cast(w_dims_last[1]); + attr.k = static_cast(w_dims_last[0]); fc_relu(relus[weight_sz - 2]->data(), weights[weight_sz - 1]->data(), biases[weight_sz - 1]->data(), diff --git a/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc b/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc index ab74df426a747..de70a5b6b5cf5 100644 --- a/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqconv_eltadd_relu_op.cc @@ -192,8 +192,8 @@ class FusionSeqConvEltAddReluKernel : public framework::OpKernel { int col_mat_w = static_cast(w_dims[0]); int col_mat_w_sz = col_mat_w * sizeof(T); for (int i = 0; i < static_cast(x_lod[0].size()) - 1; ++i) { - int st = x_lod[0][i]; - int ed = x_lod[0][i + 1]; + int st = static_cast(x_lod[0][i]); + int ed = static_cast(x_lod[0][i + 1]); const T* src_data = x_data + st * src_mat_w; T* dst_data = col_data + st * col_mat_w; int seq_len = ed - st; diff --git a/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc index 796f236663110..03b5971b1482a 100644 --- a/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqexpand_concat_fc_op.cc @@ -49,10 +49,10 @@ void FusionSeqExpandConcatFCOp::InferShape( platform::errors::InvalidArgument( "Input(FCWeight)'s rank must be 2, but received value is: %d.", w_dims.size())); - const int D = w_dims[1]; - int sum = ins_dims[0][1]; + const int D = static_cast(w_dims[1]); + int sum = static_cast(ins_dims[0][1]); for (size_t i = 1; i < ins_dims.size(); ++i) { - sum += ins_dims[i][1]; + sum += static_cast(ins_dims[i][1]); } PADDLE_ENFORCE_EQ( sum, @@ -163,11 +163,11 @@ class FusionSeqExpandConcatFCOpKernel : public framework::OpKernel { auto ref_dims = ref_in->dims(); // T x M0 auto in1_dims = ins[1]->dims(); // N x M1 auto w_dims = w->dims(); - const int N = ref_lod[0].size() - 1; - const int total_T = ref_dims[0]; - const int M0 = ref_dims[1]; - const int M1 = in1_dims[1]; - const int D = w_dims[1]; + const int N = static_cast(ref_lod[0].size() - 1); + const int total_T = static_cast(ref_dims[0]); + const int M0 = static_cast(ref_dims[1]); + const int M1 = static_cast(in1_dims[1]); + const int D = static_cast(w_dims[1]); // some check and fcout should be reshape here // since infershape can not get lod info @@ -257,7 +257,7 @@ class FusionSeqExpandConcatFCOpKernel : public framework::OpKernel { for (size_t i = 2; i < ins.size(); ++i) { // add on const T* in_data = ins[i]->data(); - const int K = ins[i]->dims()[1]; + const int K = static_cast(ins[i]->dims()[1]); blas.GEMM(CblasNoTrans, CblasNoTrans, N, @@ -275,7 +275,7 @@ class FusionSeqExpandConcatFCOpKernel : public framework::OpKernel { } T* cur_out_data = out_data; for (int i = 0; i < N; ++i) { - int seq_len = ref_lod[0][i + 1] - ref_lod[0][i]; + int seq_len = static_cast(ref_lod[0][i + 1] - ref_lod[0][i]); T* src = fc_out_data + i * D; for (int step = 0; step < seq_len; ++step) { blas.VADD(D, cur_out_data, src, cur_out_data); diff --git a/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc b/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc index 1b41ebfe1af68..42c322a2163c2 100644 --- a/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqpool_concat_op.cc @@ -113,7 +113,7 @@ class FusionSeqPoolConcatKernel : public framework::OpKernel { auto place = ctx.GetPlace(); T* y_data = out->mutable_data(place); - int w = ins[0]->numel() / x0_dims[0]; + int w = static_cast(ins[0]->numel() / x0_dims[0]); PADDLE_ENFORCE_EQ(y_dims[1] % w, 0, platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc b/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc index 4162d503dae39..5bcd4d2fbc75a 100644 --- a/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc +++ b/paddle/fluid/operators/fused/fusion_seqpool_cvm_concat_op.cc @@ -117,7 +117,7 @@ class FusionSeqPoolCVMConcatKernel : public framework::OpKernel { auto place = ctx.GetPlace(); T* y_data = out->mutable_data(place); - int w = ins[0]->numel() / x0_dims[0]; + int w = static_cast(ins[0]->numel() / x0_dims[0]); PADDLE_ENFORCE_EQ(y_dims[1] % w, 0, paddle::platform::errors::InvalidArgument( diff --git a/paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc b/paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc index 220d21234731c..c1d902754a4be 100644 --- a/paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc +++ b/paddle/fluid/operators/fused/fusion_squared_mat_sub_op.cc @@ -100,9 +100,9 @@ class FusionSquaredMatSubKernel : public framework::OpKernel { auto x_dims = x->dims(); auto y_dims = y->dims(); phi::jit::matmul_attr_t attr; - attr.m = x_dims[0]; - attr.k = x_dims[1]; - attr.n = y_dims[1]; + attr.m = static_cast(x_dims[0]); + attr.k = static_cast(x_dims[1]); + attr.n = static_cast(y_dims[1]); int o_numel = attr.m * attr.n; auto vsquare_x = phi::jit::KernelFuncs, diff --git a/paddle/fluid/operators/fused/multi_gru_op.cc b/paddle/fluid/operators/fused/multi_gru_op.cc index bf4db89ec80e1..1ef675cb1d8f8 100644 --- a/paddle/fluid/operators/fused/multi_gru_op.cc +++ b/paddle/fluid/operators/fused/multi_gru_op.cc @@ -76,7 +76,7 @@ void MultiGRUOp::InferShape(framework::InferShapeContext* ctx) const { i, wh_dims[i].size(), wh_dims[i])); - int frame_size = wh_dims[i][0]; + int frame_size = static_cast(wh_dims[i][0]); PADDLE_ENFORCE_EQ( wh_dims[i][1], 3 * frame_size, @@ -102,7 +102,7 @@ void MultiGRUOp::InferShape(framework::InferShapeContext* ctx) const { if (ctx->HasInputs("Bias")) { auto b_dims = ctx->GetInputsDim("Bias"); for (int i = 0; i < 2 * layers; ++i) { - int frame_size = wh_dims[i][0]; + int frame_size = static_cast(wh_dims[i][0]); PADDLE_ENFORCE_EQ(b_dims[i].size(), 2, platform::errors::InvalidArgument( @@ -131,7 +131,7 @@ void MultiGRUOp::InferShape(framework::InferShapeContext* ctx) const { } } - int last_frame_size = wh_dims.back()[0]; + int last_frame_size = static_cast(wh_dims.back()[0]); framework::DDim out_dims({x_mat_dims[0], 2 * last_frame_size}); ctx->SetOutputDim("Hidden", out_dims); ctx->ShareLoD("X", "Hidden"); diff --git a/paddle/fluid/operators/math/unpooling.cc b/paddle/fluid/operators/math/unpooling.cc index bcfdc876b4b26..78c41f1b8387a 100644 --- a/paddle/fluid/operators/math/unpooling.cc +++ b/paddle/fluid/operators/math/unpooling.cc @@ -24,12 +24,12 @@ class Unpool2dMaxFunctor { const phi::DenseTensor& input, const phi::DenseTensor& indices, phi::DenseTensor* output) { - const int batch_size = input.dims()[0]; - const int input_height = input.dims()[2]; - const int input_width = input.dims()[3]; - const int output_channels = output->dims()[1]; - const int output_height = output->dims()[2]; - const int output_width = output->dims()[3]; + const int batch_size = static_cast(input.dims()[0]); + const int input_height = static_cast(input.dims()[2]); + const int input_width = static_cast(input.dims()[3]); + const int output_channels = static_cast(output->dims()[1]); + const int output_height = static_cast(output->dims()[2]); + const int output_width = static_cast(output->dims()[3]); int input_feasize = input_height * input_width; int output_feasize = output_height * output_width; const T* input_data = input.data(); @@ -69,12 +69,12 @@ class Unpool2dMaxGradFunctor { const phi::DenseTensor& output, const phi::DenseTensor& output_grad, phi::DenseTensor* input_grad) { - const int batch_size = input.dims()[0]; - const int input_height = input.dims()[2]; - const int input_width = input.dims()[3]; - const int output_channels = output.dims()[1]; - const int output_height = output.dims()[2]; - const int output_width = output.dims()[3]; + const int batch_size = static_cast(input.dims()[0]); + const int input_height = static_cast(input.dims()[2]); + const int input_width = static_cast(input.dims()[3]); + const int output_channels = static_cast(output.dims()[1]); + const int output_height = static_cast(output.dims()[2]); + const int output_width = static_cast(output.dims()[3]); int input_feasize = input_height * input_width; int output_feasize = output_height * output_width; const int* indices_data = indices.data(); @@ -113,14 +113,14 @@ class Unpool3dMaxFunctor { const phi::DenseTensor& input, const phi::DenseTensor& indices, phi::DenseTensor* output) { - const int batch_size = input.dims()[0]; - const int input_depth = input.dims()[2]; - const int input_height = input.dims()[3]; - const int input_width = input.dims()[4]; - const int output_channels = output->dims()[1]; - const int output_depth = output->dims()[2]; - const int output_height = output->dims()[3]; - const int output_width = output->dims()[4]; + const int batch_size = static_cast(input.dims()[0]); + const int input_depth = static_cast(input.dims()[2]); + const int input_height = static_cast(input.dims()[3]); + const int input_width = static_cast(input.dims()[4]); + const int output_channels = static_cast(output->dims()[1]); + const int output_depth = static_cast(output->dims()[2]); + const int output_height = static_cast(output->dims()[3]); + const int output_width = static_cast(output->dims()[4]); int input_feasize = input_depth * input_height * input_width; int output_feasize = output_depth * output_height * output_width; const T* input_data = input.data(); @@ -161,14 +161,14 @@ class Unpool3dMaxGradFunctor { const phi::DenseTensor& output, const phi::DenseTensor& output_grad, phi::DenseTensor* input_grad) { - const int batch_size = input.dims()[0]; - const int input_depth = input.dims()[2]; - const int input_height = input.dims()[3]; - const int input_width = input.dims()[4]; - const int output_channels = output.dims()[1]; - const int output_depth = output.dims()[2]; - const int output_height = output.dims()[3]; - const int output_width = output.dims()[4]; + const int batch_size = static_cast(input.dims()[0]); + const int input_depth = static_cast(input.dims()[2]); + const int input_height = static_cast(input.dims()[3]); + const int input_width = static_cast(input.dims()[4]); + const int output_channels = static_cast(output.dims()[1]); + const int output_depth = static_cast(output.dims()[2]); + const int output_height = static_cast(output.dims()[3]); + const int output_width = static_cast(output.dims()[4]); int input_feasize = input_depth * input_height * input_width; int output_feasize = output_depth * output_height * output_width; const int* indices_data = indices.data(); diff --git a/paddle/fluid/operators/prim_ops/gather_p_op.cc b/paddle/fluid/operators/prim_ops/gather_p_op.cc index cb8b7eee2ebc4..549b9aadfca00 100644 --- a/paddle/fluid/operators/prim_ops/gather_p_op.cc +++ b/paddle/fluid/operators/prim_ops/gather_p_op.cc @@ -76,7 +76,8 @@ class GatherPrimOpShapeInference : public framework::InferShapeBase { index_shape.size())); num_index = index_shape[0]; } else { - num_index = ctx->Attrs().Get>("index").size(); + num_index = static_cast( + ctx->Attrs().Get>("index").size()); } auto axis = ctx->Attrs().Get("axis"); diff --git a/paddle/fluid/operators/prim_ops/scatter_add_p_op.cc b/paddle/fluid/operators/prim_ops/scatter_add_p_op.cc index 4120de1adc7c6..b2d978f0ad4c9 100644 --- a/paddle/fluid/operators/prim_ops/scatter_add_p_op.cc +++ b/paddle/fluid/operators/prim_ops/scatter_add_p_op.cc @@ -80,7 +80,8 @@ class ScatterAddPrimOpShapeInference : public framework::InferShapeBase { index_shape.size())); num_index = index_shape[0]; } else { - num_index = ctx->Attrs().Get>("index").size(); + num_index = static_cast( + ctx->Attrs().Get>("index").size()); } auto axis = ctx->Attrs().Get("axis"); framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index e381de827427b..cf8197a04dd69 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -52,7 +52,7 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { } else { place_str = place_str.substr(0, place_str.length() - 1); std::istringstream sin(place_str); - sin.seekg(std::string("PLACE(GPU:").size(), std::ios::beg); + sin.seekg(std::string("PLACE(GPU:").size(), std::ios::beg); // NOLINT size_t num; sin >> num; place = platform::CUDAPlace(static_cast(num)); diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc index 68c73d5a77327..1f9fd565ca77c 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc @@ -71,7 +71,7 @@ class SequenceExpandAsOp : public framework::OperatorWithKernel { out_first_dim = x_dims[0]; } else { for (size_t i = 1; i < y_lod[0].size(); ++i) { - out_first_dim += (y_lod[0][i] - y_lod[0][i - 1]); + out_first_dim += static_cast(y_lod[0][i] - y_lod[0][i - 1]); } } out_dims[0] = out_first_dim; diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc index c3293643417bf..daa45fa514020 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc @@ -76,7 +76,7 @@ class SequenceExpandOp : public framework::OperatorWithKernel { y_lod.size(), ref_level)); - if (ref_level == -1) ref_level = y_lod.size() - 1; + if (ref_level == -1) ref_level = static_cast(y_lod.size() - 1); if (!x_lod.empty()) { PADDLE_ENFORCE_EQ( @@ -114,10 +114,10 @@ class SequenceExpandOp : public framework::OperatorWithKernel { for (size_t i = 1; i < y_lod[ref_level].size(); ++i) { int x_seq_len = 1; if (x_lod.size() == 1) { - x_seq_len = x_lod[0][i] - x_lod[0][i - 1]; + x_seq_len = static_cast(x_lod[0][i] - x_lod[0][i - 1]); } - out_first_dim += - (y_lod[ref_level][i] - y_lod[ref_level][i - 1]) * x_seq_len; + out_first_dim += static_cast( + (y_lod[ref_level][i] - y_lod[ref_level][i - 1]) * x_seq_len); } } out_dims[0] = out_first_dim; diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc index c00354dbc64db..ac78b18602360 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc @@ -93,8 +93,9 @@ class SequencePadOp : public framework::OperatorWithKernel { x_dims[0], static_cast(x_lod_0.back()))); - int seq_num = x_lod_0.size() - 1; - int max_seq_len = phi::funcs::MaximumSequenceLength(x_lod_0); + int seq_num = static_cast(x_lod_0.size() - 1); + int max_seq_len = + static_cast(phi::funcs::MaximumSequenceLength(x_lod_0)); if (padded_length == -1) { padded_length = max_seq_len; } diff --git a/paddle/fluid/operators/sequence_ops/sequence_topk_avg_pooling_op.cc b/paddle/fluid/operators/sequence_ops/sequence_topk_avg_pooling_op.cc index d9f67706ff9ae..c8ce5475e545b 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_topk_avg_pooling_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_topk_avg_pooling_op.cc @@ -55,8 +55,8 @@ class SequenceTopkAvgPoolingOp : public framework::OperatorWithKernel { auto row_shape_0 = row_dim[0]; std::vector vec_out_shape; - vec_out_shape.push_back(row_shape_0); - vec_out_shape.push_back(channel_num * num_k); + vec_out_shape.push_back(row_shape_0); // NOLINT + vec_out_shape.push_back(channel_num * num_k); // NOLINT ctx->SetOutputDim("Out", phi::make_ddim(vec_out_shape)); ctx->ShareLoD("ROW", "Out"); diff --git a/paddle/fluid/operators/string/faster_tokenizer_op.cc b/paddle/fluid/operators/string/faster_tokenizer_op.cc index 1dc1ff29a8f80..dd1e421e6cb1a 100644 --- a/paddle/fluid/operators/string/faster_tokenizer_op.cc +++ b/paddle/fluid/operators/string/faster_tokenizer_op.cc @@ -383,7 +383,7 @@ int BertTokenizer::Encode( } if (needs_to_be_padded) { - int64_t difference = max_seq_len - seq_len; + int64_t difference = static_cast(max_seq_len - seq_len); size_t pad_start = max_seq_len - 1 - difference; encoded_inputs->at("token_type_ids").resize(max_seq_len); for (size_t i = max_seq_len - 1; i > pad_start; i--) { diff --git a/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc index 475e06f936f19..609037c3ecd0f 100644 --- a/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc +++ b/paddle/fluid/pir/phi_kernel_adaptor/phi_kernel_util.cc @@ -698,7 +698,8 @@ std::shared_ptr BuildOperatorBase( } else if (array_list[0].isa()) { std::vector vec_int64; for (auto attribute : array_list) { - vec_int64.push_back(attribute.dyn_cast().data()); + vec_int64.push_back( + attribute.dyn_cast().data()); // NOLINT } attr_map[name] = vec_int64; } else if (array_list[0].isa()) { @@ -710,14 +711,15 @@ std::shared_ptr BuildOperatorBase( } else if (array_list[0].isa()) { std::vector vec_float; for (auto attribute : array_list) { - vec_float.push_back(attribute.dyn_cast().data()); + vec_float.push_back( + attribute.dyn_cast().data()); // NOLINT } attr_map[name] = vec_float; } else if (array_list[0].isa()) { std::vector vec_double; for (auto attribute : array_list) { vec_double.push_back( - attribute.dyn_cast().data()); + attribute.dyn_cast().data()); // NOLINT } attr_map[name] = vec_double; } else { diff --git a/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc b/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc index c7e0c0cb1d977..6c1edc901ee77 100644 --- a/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc +++ b/paddle/fluid/pir/transforms/pd_op_to_kernel_pass.cc @@ -337,7 +337,7 @@ phi::DataType GetKernelDataTypeByYamlInfo( kernel_data_type = find_it->second; } else if (input_map.count(slot_name)) { // parse from input - int in_index = input_map.at(slot_name); + int in_index = static_cast(input_map.at(slot_name)); auto type = map_value_pair.at(op->operand_source(in_index)).type(); if (type.isa()) { @@ -407,7 +407,7 @@ phi::Backend GetKernelBackendByYamlInfo( if (input_map.count(slot_name)) { // parse from input - int in_index = input_map.at(slot_name); + int in_index = static_cast(input_map.at(slot_name)); auto type = map_value_pair.at(op->operand_source(in_index)).type(); if (type.isa()) { @@ -499,7 +499,8 @@ phi::KernelKey GetKernelKey( if (op_info_parser != nullptr) { // only suppurt non vector input for now - int tensor_input_number = op_info_parser->InputTensorNumber(); + int tensor_input_number = + static_cast(op_info_parser->InputTensorNumber()); // get datatype info kernel_data_type = diff --git a/paddle/fluid/platform/cpu_info_test.cc b/paddle/fluid/platform/cpu_info_test.cc index fef343112dc03..a1bbee3ad4179 100644 --- a/paddle/fluid/platform/cpu_info_test.cc +++ b/paddle/fluid/platform/cpu_info_test.cc @@ -26,7 +26,7 @@ TEST(CpuMemoryUsage, Print) { std::stringstream ss; size_t memory_size = phi::backends::cpu::CpuMaxAllocSize() / 1024 / 1024 / 1024; - float use_percent = FLAGS_fraction_of_cpu_memory_to_use * 100; + float use_percent = FLAGS_fraction_of_cpu_memory_to_use * 100; // NOLINT std::cout << paddle::string::Sprintf("\n%.2f %% of CPU Memory Usage: %d GB\n", use_percent, diff --git a/paddle/fluid/platform/init_test.cc b/paddle/fluid/platform/init_test.cc index 66fb431af29e9..b4c11be038611 100644 --- a/paddle/fluid/platform/init_test.cc +++ b/paddle/fluid/platform/init_test.cc @@ -55,6 +55,6 @@ TEST(InitDevices, XPU) { #ifndef _WIN32 TEST(SignalHandle, SignalHandle) { std::string msg = "Signal raises"; - paddle::framework::SignalHandle(msg.c_str(), msg.size()); + paddle::framework::SignalHandle(msg.c_str(), static_cast(msg.size())); } #endif diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index c3279d445025d..67512474567d3 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -859,9 +859,8 @@ std::string PrintHostEvents() { oss << thr_evt_sec.thread_id << std::endl; for (const auto &evt : thr_evt_sec.events) { oss << "{ " << evt.name << " | " << evt.start_ns << "ns | " << evt.end_ns - << "ns | " << (evt.end_ns - evt.start_ns) / 1000.000 - << "us }" // NOLINT - << std::endl; + << "ns | " << (evt.end_ns - evt.start_ns) / 1000.000 // NOLINT + << "us }" << std::endl; } } return oss.str(); diff --git a/paddle/fluid/pybind/eager_functions.cc b/paddle/fluid/pybind/eager_functions.cc index e63790a65dfc8..1e1f40bf8e3d4 100644 --- a/paddle/fluid/pybind/eager_functions.cc +++ b/paddle/fluid/pybind/eager_functions.cc @@ -362,7 +362,7 @@ static void ConstructFwdAndBwdMap( VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j << " inputs: " << inputs_names[j] << " related to No." << i << " grad_outputs: " << grad_outputs_names[i]; - in_out_map[op_type][0][0][j] = i; + in_out_map[op_type][0][0][j] = i; // NOLINT } } } @@ -375,7 +375,7 @@ static void ConstructFwdAndBwdMap( VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j << " outputs: " << outputs_names[j] << " related to No." << i << " grad_inputs's grad: " << grad_inputs_names[i]; - in_out_map[op_type][0][1][j] = i; + in_out_map[op_type][0][1][j] = i; // NOLINT } } } else { @@ -388,7 +388,7 @@ static void ConstructFwdAndBwdMap( << " outputs: " << outputs_names[j] << " related to No." << i << " grad_inputs fwd outputs: " << grad_inputs_names[i]; - in_out_map[op_type][0][2][j] = i; + in_out_map[op_type][0][2][j] = i; // NOLINT } } } else { @@ -398,7 +398,7 @@ static void ConstructFwdAndBwdMap( << " inputs: " << inputs_names[j] << " related to No." << i << " grad_inputs fwd inputs: " << grad_inputs_names[i]; - in_out_map[op_type][0][3][j] = i; + in_out_map[op_type][0][3][j] = i; // NOLINT } } } @@ -421,7 +421,7 @@ static void ConstructFwdAndBwdMap( VLOG(7) << " ==== Custom Operator: " << op_type << "'s No." << j << " attrs: " << attrs_names[j] << " related to No." << i << " grad_attrs: " << grad_attrs_names[i]; - in_out_map[op_type][0][4][j] = i; + in_out_map[op_type][0][4][j] = i; // NOLINT } } } @@ -482,7 +482,7 @@ static PyObject* eager_api__get_custom_operator_inplace_reverse_idx( "the input of `Inplace` again and make " "sure you registered your op accurately. ", input)); - inplace_idx_map[distance(outputs.begin(), out_iter)] = in_idx; + inplace_idx_map[distance(outputs.begin(), out_iter)] = in_idx; // NOLINT } return ToPyObject(inplace_idx_map); @@ -551,7 +551,7 @@ static PyObject* eager_api_run_custom_op(PyObject* self, } if (paddle::framework::detail::IsDuplicableVar(input)) { std::vector tensors = - std::move(CastPyArg2VectorOfTensor(obj, i + 1)); + std::move(CastPyArg2VectorOfTensor(obj, i + 1)); // NOLINT for (auto& tensor : tensors) { if (tensor.initialized() && tensor.is_dense_tensor() && !std::dynamic_pointer_cast(tensor.impl()) @@ -568,7 +568,8 @@ static PyObject* eager_api_run_custom_op(PyObject* self, << " to CustomOpKernelContext. Add vector size = " << ctx.InputRangeAt(i).second - ctx.InputRangeAt(i).first; } else { - paddle::Tensor tensor = std::move(CastPyArg2Tensor(obj, i + 1)); + paddle::Tensor tensor = + std::move(CastPyArg2Tensor(obj, i + 1)); // NOLINT if (tensor.initialized() && tensor.is_dense_tensor() && !std::dynamic_pointer_cast(tensor.impl()) ->meta() @@ -583,7 +584,7 @@ static PyObject* eager_api_run_custom_op(PyObject* self, } } // Parse op_type and inputs first, so that use 1 + inputs.size() + i - int attr_start_idx = 1 + inputs.size(); + int attr_start_idx = static_cast(1 + inputs.size()); for (size_t i = 0; i < attrs.size(); ++i) { const auto& attr = attrs.at(i); std::vector attr_name_and_type = paddle::ParseAttrStr(attr); @@ -592,23 +593,30 @@ static PyObject* eager_api_run_custom_op(PyObject* self, << " to CustomOpKernelContext. Attribute type = " << attr_type_str; PyObject* obj = PyTuple_GET_ITEM(args, attr_start_idx + i); if (attr_type_str == "bool") { - ctx.EmplaceBackAttr(CastPyArg2AttrBoolean(obj, attr_start_idx + i)); + ctx.EmplaceBackAttr( + CastPyArg2AttrBoolean(obj, attr_start_idx + i)); // NOLINT } else if (attr_type_str == "int") { - ctx.EmplaceBackAttr(CastPyArg2AttrInt(obj, attr_start_idx + i)); + ctx.EmplaceBackAttr( + CastPyArg2AttrInt(obj, attr_start_idx + i)); // NOLINT } else if (attr_type_str == "float") { - ctx.EmplaceBackAttr(CastPyArg2AttrFloat(obj, attr_start_idx + i)); + ctx.EmplaceBackAttr( + CastPyArg2AttrFloat(obj, attr_start_idx + i)); // NOLINT } else if (attr_type_str == "int64_t") { - ctx.EmplaceBackAttr(CastPyArg2Long(obj, op_type, attr_start_idx + i)); + ctx.EmplaceBackAttr( + CastPyArg2Long(obj, op_type, attr_start_idx + i)); // NOLINT } else if (attr_type_str == "std::string") { - ctx.EmplaceBackAttr(CastPyArg2AttrString(obj, attr_start_idx + i)); + ctx.EmplaceBackAttr( + CastPyArg2AttrString(obj, attr_start_idx + i)); // NOLINT } else if (attr_type_str == "std::vector") { ctx.EmplaceBackAttr(CastPyArg2VectorOfInt(obj, attr_start_idx + i)); } else if (attr_type_str == "std::vector") { ctx.EmplaceBackAttr(CastPyArg2VectorOfFloat(obj, attr_start_idx + i)); } else if (attr_type_str == "std::vector") { - ctx.EmplaceBackAttr(CastPyArg2Longs(obj, op_type, attr_start_idx + i)); + ctx.EmplaceBackAttr( + CastPyArg2Longs(obj, op_type, attr_start_idx + i)); // NOLINT } else if (attr_type_str == "std::vector") { - ctx.EmplaceBackAttr(CastPyArg2VectorOfString(obj, attr_start_idx + i)); + ctx.EmplaceBackAttr( + CastPyArg2VectorOfString(obj, attr_start_idx + i)); // NOLINT } else { PADDLE_THROW(platform::errors::Unimplemented( "Unsupported `%s` type value as custom attribute now. " @@ -756,8 +764,9 @@ static PyObject* eager_api_run_custom_op(PyObject* self, const std::vector& in_tensors = ctx.InputsBetween( ctx.InputRangeAt(i).first, ctx.InputRangeAt(i).second); - if (slot_map[0][0].find(i) != slot_map[0][0].end()) { - grad_node->SetGradOutMeta(in_tensors, slot_map[0][0].at(i)); + if (slot_map[0][0].find(static_cast(i)) != slot_map[0][0].end()) { + grad_node->SetGradOutMeta(in_tensors, + slot_map[0][0].at(static_cast(i))); } else { grad_node->SetGradOutMeta(in_tensors, slot_ins_num - 1 - no_grad_cnt); no_grad_cnt++; diff --git a/paddle/fluid/pybind/eager_math_op_patch.cc b/paddle/fluid/pybind/eager_math_op_patch.cc index 0418740f129de..ecae39fb43a49 100644 --- a/paddle/fluid/pybind/eager_math_op_patch.cc +++ b/paddle/fluid/pybind/eager_math_op_patch.cc @@ -169,12 +169,15 @@ paddle::Tensor CallScalarFuction(const paddle::Tensor& self_tensor, std::string op_type) { paddle::Tensor ret; if (op_type == "add" || op_type == "radd") { - ret = scale_ad_func(self_tensor, phi::Scalar(1.0), other, true); + ret = scale_ad_func( + self_tensor, phi::Scalar(1.0), static_cast(other), true); } else if (op_type == "sub") { - ret = scale_ad_func(self_tensor, phi::Scalar(1.0), -other, true); + ret = scale_ad_func( + self_tensor, phi::Scalar(1.0), static_cast(-other), true); } else if (op_type == "rsub") { - ret = scale_ad_func(self_tensor, phi::Scalar(-1.0), other, true); + ret = scale_ad_func( + self_tensor, phi::Scalar(-1.0), static_cast(other), true); } else if (op_type == "mul") { ret = scale_ad_func(self_tensor, phi::Scalar(other), 0.0, true); } else if (op_type == "div") { @@ -1024,12 +1027,12 @@ static PyObject* tensor__mod__method(TensorObject* self, // 1. scalar exists cases // there is no scalar_mod function for __mod__ now - float other_double = 0.0; + float other_double = 0.0f; bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__mod__", 0); + other_double = CastPyArg2Double(other_obj, "__mod__", 0); // NOLINT has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { @@ -1037,7 +1040,7 @@ static PyObject* tensor__mod__method(TensorObject* self, self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__mod__", 0); + other_double = CastPyArg2Double(other_obj, "__mod__", 0); // NOLINT has_other_double = true; } } @@ -1114,12 +1117,12 @@ static PyObject* tensor__matmul__method(TensorObject* self, // 1. scalar exists cases // there is no scalar_matmul function for __matmul__ now - float other_double = 0.0; + float other_double = 0.0f; bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__matmul__", 0); + other_double = CastPyArg2Double(other_obj, "__matmul__", 0); // NOLINT has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { @@ -1127,7 +1130,7 @@ static PyObject* tensor__matmul__method(TensorObject* self, self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__matmul__", 0); + other_double = CastPyArg2Double(other_obj, "__matmul__", 0); // NOLINT has_other_double = true; } } @@ -1222,12 +1225,12 @@ static PyObject* tensor__lt__method(TensorObject* self, // 1. scalar exists cases // there is no scalar function for __lt__ now - float other_double = 0.0; + float other_double = 0.0f; bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__lt__", 0); + other_double = CastPyArg2Double(other_obj, "__lt__", 0); // NOLINT has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { @@ -1235,7 +1238,7 @@ static PyObject* tensor__lt__method(TensorObject* self, self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__lt__", 0); + other_double = CastPyArg2Double(other_obj, "__lt__", 0); // NOLINT has_other_double = true; } } @@ -1312,12 +1315,12 @@ static PyObject* tensor__le__method(TensorObject* self, // 1. scalar exists cases // there is no scalar function for __le__ now - float other_double = 0.0; + float other_double = 0.0f; bool has_other_double = false; if (PyFloat_Check(other_obj) || PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { if (PyFloat_Check(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__le__", 0); + other_double = CastPyArg2Double(other_obj, "__le__", 0); // NOLINT has_other_double = true; if (_supported_int_dtype_.find(self_tensor.dtype()) != _supported_int_dtype_.end()) { @@ -1325,7 +1328,7 @@ static PyObject* tensor__le__method(TensorObject* self, self_tensor = cast_ad_func(self_tensor, DataType::FLOAT32); } } else if (PyCheckInteger(other_obj) || IsNumpyType(other_obj)) { - other_double = CastPyArg2Double(other_obj, "__le__", 0); + other_double = CastPyArg2Double(other_obj, "__le__", 0); // NOLINT has_other_double = true; } } diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 4c1fb0b431070..f9fce2bfda938 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -201,20 +201,20 @@ static PyObject* tensor_method_numpy(TensorObject* self, "otherwise 'Tensor.numpy()[0]' will raise error in release 2.6."; py_rank = 1; py_dims[0] = 1; - py_strides[0] = sizeof_dtype * numel; + py_strides[0] = static_cast(sizeof_dtype * numel); } } else if (self->tensor.is_dense_tensor()) { auto tensor_stride = self->tensor.strides(); - for (int i = tensor_dims.size() - 1; i >= 0; --i) { - py_dims[i] = static_cast(tensor_dims[i]); - py_strides[i] = sizeof_dtype * tensor_stride[i]; + for (int i = static_cast(tensor_dims.size()) - 1; i >= 0; --i) { + py_dims[i] = static_cast(tensor_dims[i]); + py_strides[i] = static_cast(sizeof_dtype * tensor_stride[i]); numel *= py_dims[i]; } } else { - for (int i = tensor_dims.size() - 1; i >= 0; --i) { - py_dims[i] = static_cast(tensor_dims[i]); - py_strides[i] = sizeof_dtype * numel; + for (int i = static_cast(tensor_dims.size()) - 1; i >= 0; --i) { + py_dims[i] = static_cast(tensor_dims[i]); + py_strides[i] = static_cast(sizeof_dtype * numel); numel *= py_dims[i]; } } @@ -223,7 +223,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* array = api.PyArray_NewFromDescr_( api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype), - py_rank, + static_cast(py_rank), py_dims, py_strides, nullptr, @@ -471,7 +471,7 @@ static PyObject* tensor_method_numpy(TensorObject* self, PyObject* array = api.PyArray_NewFromDescr_( api.PyArray_Type_, api.PyArray_DescrFromType_(numpy_dtype), - py_rank, + static_cast(py_rank), py_dims, py_strides, reinterpret_cast(reinterpret_cast(array_buffer) + @@ -2817,9 +2817,9 @@ static PyObject* tensor_method_strides(TensorObject* self, return ToPyObject(value); } auto stride = self->tensor.strides(); - size_t rank = static_cast(stride.size()); + int rank = static_cast(stride.size()); value.resize(rank); - for (size_t i = 0; i < rank; i++) { + for (int i = 0; i < rank; i++) { value[i] = stride[i]; } return ToPyObject(value); diff --git a/paddle/fluid/pybind/eager_properties.cc b/paddle/fluid/pybind/eager_properties.cc index 3abae04523142..59ecee2c5d668 100644 --- a/paddle/fluid/pybind/eager_properties.cc +++ b/paddle/fluid/pybind/eager_properties.cc @@ -527,7 +527,7 @@ PyObject* tensor_properties_get_strides(TensorObject* self, void* closure) { size_t rank = static_cast(stride.size()); value.resize(rank); - for (size_t i = 0; i < rank; i++) { + for (int i = 0; i < static_cast(rank); i++) { value[i] = stride[i]; } diff --git a/paddle/fluid/pybind/eager_py_layer.cc b/paddle/fluid/pybind/eager_py_layer.cc index 8bd5060539965..94b7de25ed4b4 100644 --- a/paddle/fluid/pybind/eager_py_layer.cc +++ b/paddle/fluid/pybind/eager_py_layer.cc @@ -161,7 +161,7 @@ PyObject* pylayer_method_apply(PyObject* cls, args_size = PyTuple_GET_SIZE(args); } inputs_size = kwargs_size + args_size; - forward_args = PyTuple_New(args_size + 1); + forward_args = PyTuple_New(args_size + 1); // NOLINT Py_INCREF(ctx); PyTuple_SET_ITEM(forward_args, 0, reinterpret_cast(ctx)); @@ -175,7 +175,7 @@ PyObject* pylayer_method_apply(PyObject* cls, for (size_t i = 0; i < inputs_size; i++) { PyObject* obj = nullptr; if (i >= args_size) { - obj = PyList_GetItem(kwargs_value_list, i - args_size); + obj = PyList_GetItem(kwargs_value_list, i - args_size); // NOLINT } else { obj = PyTuple_GET_ITEM(args, i); } diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 90bde68961c96..70b5e99543dd0 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -1007,8 +1007,8 @@ PyObject* ToPyObject(const paddle::framework::Vocab& value) { PyObject* dict = PyDict_New(); for (const auto& map_iter : value) { // Convert Key - PyObject* key_string = - PyUnicode_FromWideChar(map_iter.first.c_str(), map_iter.first.size()); + PyObject* key_string = PyUnicode_FromWideChar( + map_iter.first.c_str(), map_iter.first.size()); // NOLINT if (!key_string) { PADDLE_THROW(platform::errors::Fatal( "Unable to convert std::wstring to PyObject")); diff --git a/paddle/fluid/pybind/inference_api.cc b/paddle/fluid/pybind/inference_api.cc index e464718a43cd5..36502d62b43c5 100644 --- a/paddle/fluid/pybind/inference_api.cc +++ b/paddle/fluid/pybind/inference_api.cc @@ -254,7 +254,7 @@ void PaddleInferShareExternalData(paddle_infer::Tensor &tensor, // NOLINT phi::DenseTensor input_tensor) { std::vector shape; for (int i = 0; i < input_tensor.dims().size(); ++i) { - shape.push_back(input_tensor.dims()[i]); + shape.push_back(input_tensor.dims()[i]); // NOLINT } if (input_tensor.dtype() == phi::DataType::FLOAT64) { tensor.ShareExternalData( @@ -302,7 +302,7 @@ void PaddleTensorShareExternalData(paddle_infer::Tensor &tensor, // NOLINT paddle::Tensor &&paddle_tensor) { std::vector shape; for (int i = 0; i < paddle_tensor.dims().size(); ++i) { - shape.push_back(paddle_tensor.dims()[i]); + shape.push_back(paddle_tensor.dims()[i]); // NOLINT } if (paddle_tensor.dtype() == phi::DataType::FLOAT64) { diff --git a/paddle/fluid/pybind/op_function_common.cc b/paddle/fluid/pybind/op_function_common.cc index 266578615e352..29c4c2fd0a7c5 100644 --- a/paddle/fluid/pybind/op_function_common.cc +++ b/paddle/fluid/pybind/op_function_common.cc @@ -245,7 +245,7 @@ phi::dtype::complex CastPyArg2Complex(PyObject* obj, if (PyComplex_Check(obj)) { double real = PyComplex_RealAsDouble(obj); double imag = PyComplex_ImagAsDouble(obj); - return phi::dtype::complex(real, imag); + return phi::dtype::complex(real, imag); // NOLINT } else { PADDLE_THROW(platform::errors::InvalidArgument( "%s(): argument (position %d) must be " diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 9d1cd87280179..efd473b50c15b 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1553,7 +1553,9 @@ All parameter, weight, gradient are variables in Paddle. ProgramDesc prog_with_targets(origin); for (const auto &t : targets) { - prog_with_targets.MutableBlock(t[0])->Op(t[1])->SetIsTarget(true); + prog_with_targets.MutableBlock(t[0]) + ->Op(static_cast(t[1])) + ->SetIsTarget(true); } proto::ProgramDesc pruned_desc; auto pruned_origin_block_id_map = diff --git a/paddle/fluid/pybind/reader_py.cc b/paddle/fluid/pybind/reader_py.cc index 0e581e45b5970..f4b4ff4cf42ca 100644 --- a/paddle/fluid/pybind/reader_py.cc +++ b/paddle/fluid/pybind/reader_py.cc @@ -70,8 +70,10 @@ static paddle::optional> DiffTensorShape( if (!tensor.lod().empty()) { tensor_shape[0] = -1; // unknown shape } else { - int64_t split_size = (tensor_shape[0] + num_places - 1) / num_places; - int64_t remainder = (split_size == 0 ? 0 : tensor_shape[0] % split_size); + int64_t split_size = + static_cast((tensor_shape[0] + num_places - 1) / num_places); + int64_t remainder = static_cast( + split_size == 0 ? 0 : tensor_shape[0] % split_size); tensor_shape[0] = split_size; if (target_shape[0] >= 0) { // need check dim 0 if (tensor_shape[0] != target_shape[0]) { @@ -91,7 +93,8 @@ static paddle::optional> DiffTensorShape( 0, platform::errors::InvalidArgument( "Tensor shape at dim %d must not be less than 0", idx)); - if (target_shape[idx] >= 0 && tensor_shape[idx] != target_shape[idx]) { + if (target_shape[idx] >= 0 && + tensor_shape[static_cast(idx)] != target_shape[idx]) { return phi::vectorize(tensor_shape); } } diff --git a/paddle/fluid/pybind/tensor.cc b/paddle/fluid/pybind/tensor.cc index 95e217365be3d..5b6efa9e1dba9 100644 --- a/paddle/fluid/pybind/tensor.cc +++ b/paddle/fluid/pybind/tensor.cc @@ -642,7 +642,8 @@ void BindTensor(pybind11::module &m) { // NOLINT [](phi::DenseTensor &self) -> bool { // Check that the lod info is valid and match the outermost // dimension of the Tensor data - return CheckLoD(self.lod(), vectorize(self.dims()).front()); + return CheckLoD(self.lod(), + static_cast(vectorize(self.dims()).front())); }, R"DOC( Check whether the LoD of the Tensor is valid. diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index 2e619a3566ff3..d07d335500407 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -1170,7 +1170,7 @@ void SqueezeExcitationInferMeta(const MetaTensor& x, std::vector out_shape( {in_dims[0], filter_dims[1], in_dims[2], in_dims[3]}); // set output dims - out->set_dims(DDim(out_shape.data(), out_shape.size())); + out->set_dims(DDim(out_shape.data(), static_cast(out_shape.size()))); } } // namespace phi diff --git a/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc b/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc index ff0ef722ee7ba..f1478d5e3b3e7 100644 --- a/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/interpolate_grad_kernel.cc @@ -360,17 +360,20 @@ static void NearestNeighbor3DInterpolateGrad(const DenseTensor& output_grad, for (int d = 0; d < out_d; d++) { int in_d = static_cast( - align_corners ? std::lround(ratio_d * static_cast(d)) - : (ratio_d * static_cast(d))); + align_corners + ? static_cast(std::lround(ratio_d * static_cast(d))) + : (ratio_d * static_cast(d))); for (int k = 0; k < out_h; k++) { // loop for images int in_k = static_cast( - align_corners ? std::lround(ratio_h * static_cast(k)) - : (ratio_h * static_cast(k))); + align_corners + ? static_cast(std::lround(ratio_h * static_cast(k))) + : (ratio_h * static_cast(k))); for (int l = 0; l < out_w; l++) { - int in_l = static_cast( - align_corners ? std::lround(ratio_w * static_cast(l)) - : (ratio_w * static_cast(l))); + int in_l = static_cast(align_corners + ? static_cast(std::lround( + ratio_w * static_cast(l))) + : (ratio_w * static_cast(l))); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels diff --git a/paddle/phi/kernels/cpu/interpolate_kernel.cc b/paddle/phi/kernels/cpu/interpolate_kernel.cc index 895410a75a414..198cba7d1e948 100644 --- a/paddle/phi/kernels/cpu/interpolate_kernel.cc +++ b/paddle/phi/kernels/cpu/interpolate_kernel.cc @@ -231,14 +231,16 @@ static void NearestNeighborInterpolate(const DenseTensor& input, auto output_t = EigenTensor::From(*output); for (int k = 0; k < out_h; k++) { // loop for images - int in_k = (align_corners) - ? std::lround(ratio_h * static_cast(k)) - : static_cast(ratio_h * static_cast(k)); + int in_k = + (align_corners) + ? static_cast(std::lround(ratio_h * static_cast(k))) + : static_cast(ratio_h * static_cast(k)); for (int l = 0; l < out_w; l++) { - int in_l = (align_corners) - ? std::lround(ratio_w * static_cast(l)) - : static_cast(ratio_w * static_cast(l)); + int in_l = + (align_corners) + ? static_cast(std::lround(ratio_w * static_cast(l))) + : static_cast(ratio_w * static_cast(l)); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels @@ -514,18 +516,21 @@ static void NearestNeighbor3DInterpolate(const DenseTensor& input, auto input_t = EigenTensor::From(input); auto output_t = EigenTensor::From(*output); for (int d = 0; d < out_d; d++) { // loop for images - int in_d = (align_corners) - ? std::lround(ratio_d * static_cast(d)) - : static_cast(ratio_d * static_cast(d)); + int in_d = + (align_corners) + ? static_cast(std::lround(ratio_d * static_cast(d))) + : static_cast(ratio_d * static_cast(d)); for (int k = 0; k < out_h; k++) { - int in_k = (align_corners) - ? std::lround(ratio_h * static_cast(k)) - : static_cast(ratio_h * static_cast(k)); + int in_k = + (align_corners) + ? static_cast(std::lround(ratio_h * static_cast(k))) + : static_cast(ratio_h * static_cast(k)); for (int l = 0; l < out_w; l++) { - int in_l = (align_corners) - ? std::lround(ratio_w * static_cast(l)) - : static_cast(ratio_w * static_cast(l)); + int in_l = + (align_corners) + ? static_cast(std::lround(ratio_w * static_cast(l))) + : static_cast(ratio_w * static_cast(l)); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels diff --git a/paddle/utils/flags_native_test.cc b/paddle/utils/flags_native_test.cc index 1c684f168fba7..26ef8c12c1875 100644 --- a/paddle/utils/flags_native_test.cc +++ b/paddle/utils/flags_native_test.cc @@ -35,7 +35,7 @@ void SplitCommandlineArg(const std::string& commandline, args.push_back(commandline.substr(start_pos, end_pos - start_pos)); } args.push_back(""); // test empty argument - *argc = args.size(); + *argc = static_cast(args.size()); *argv = new char*[*argc]; for (size_t i = 0; i < args.size(); i++) { (*argv)[i] = const_cast(args[i].c_str()); diff --git a/test/cpp/eager/task_tests/fwd_bwd_joint_test.cc b/test/cpp/eager/task_tests/fwd_bwd_joint_test.cc index 648b9d61abc1f..1aff3a2104fa1 100644 --- a/test/cpp/eager/task_tests/fwd_bwd_joint_test.cc +++ b/test/cpp/eager/task_tests/fwd_bwd_joint_test.cc @@ -50,7 +50,7 @@ paddle::Tensor hook_function(const paddle::Tensor& t) { float* t_ptr = t_dense->mutable_data(place); float* ret_ptr = ret_dense->mutable_data(place); for (int i = 0; i < ret_dense->numel(); i++) { - ret_ptr[i] = t_ptr[i] + 5.0; + ret_ptr[i] = t_ptr[i] + 5.0f; } auto ret_impl = std::dynamic_pointer_cast(ret_dense); diff --git a/test/cpp/eager/task_tests/hook_test.cc b/test/cpp/eager/task_tests/hook_test.cc index 49d7f20269682..898590201eef6 100644 --- a/test/cpp/eager/task_tests/hook_test.cc +++ b/test/cpp/eager/task_tests/hook_test.cc @@ -45,7 +45,7 @@ paddle::Tensor hook_function(const paddle::Tensor& t) { float* t_ptr = t_dense->mutable_data(place); float* ret_ptr = ret_dense->mutable_data(place); for (int i = 0; i < ret_dense->numel(); i++) { - ret_ptr[i] = t_ptr[i] + 3.0; + ret_ptr[i] = t_ptr[i] + 3.0f; } auto ret_impl = std::dynamic_pointer_cast(ret_dense); diff --git a/test/cpp/eager/task_tests/hook_test_intermidiate.cc b/test/cpp/eager/task_tests/hook_test_intermidiate.cc index 0b94c39e2a811..37070d9b7b8f2 100644 --- a/test/cpp/eager/task_tests/hook_test_intermidiate.cc +++ b/test/cpp/eager/task_tests/hook_test_intermidiate.cc @@ -48,7 +48,7 @@ paddle::Tensor hook_function(const paddle::Tensor& t) { float* t_ptr = t_dense->mutable_data(place); float* ret_ptr = ret_dense->mutable_data(place); for (int i = 0; i < ret_dense->numel(); i++) { - ret_ptr[i] = t_ptr[i] + 3.0; + ret_ptr[i] = t_ptr[i] + 3.0f; } auto ret_impl = std::dynamic_pointer_cast(ret_dense); diff --git a/test/cpp/fluid/math/beam_search_test.cc b/test/cpp/fluid/math/beam_search_test.cc index c96cb6f55edd4..d8e56e6102dd7 100644 --- a/test/cpp/fluid/math/beam_search_test.cc +++ b/test/cpp/fluid/math/beam_search_test.cc @@ -60,7 +60,7 @@ void PrepareCPUTensors(phi::DenseTensor* ids, // pre_scores pre_scores->Resize(phi::make_ddim({4, 1})); for (int i = 0; i < 4; i++) { - pre_scores->mutable_data(place)[i] = 0.1 * (i + 1); + pre_scores->mutable_data(place)[i] = 0.1 * (i + 1); // NOLINT } } diff --git a/test/cpp/fluid/math/im2col_test.cc b/test/cpp/fluid/math/im2col_test.cc index 13a4f0b6750dd..fab3086a820f2 100644 --- a/test/cpp/fluid/math/im2col_test.cc +++ b/test/cpp/fluid/math/im2col_test.cc @@ -364,7 +364,7 @@ void benchIm2col(int ic, int ih, int iw, int fh, int fw, int ph, int pw) { auto GetCurrentMs = []() -> double { struct timeval time; gettimeofday(&time, nullptr); - return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; + return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; // NOLINT }; auto t1 = GetCurrentMs(); for (int i = 0; i < repeat; ++i) { diff --git a/test/cpp/imperative/test_gradient_accmulator.cc b/test/cpp/imperative/test_gradient_accmulator.cc index 36e10b393f805..982fd81a98835 100644 --- a/test/cpp/imperative/test_gradient_accmulator.cc +++ b/test/cpp/imperative/test_gradient_accmulator.cc @@ -395,7 +395,7 @@ static void TestGradientAccumulatorTestUnchangeInput( int seed; { std::random_device rd; - seed = rd(); + seed = static_cast(rd()); } std::mt19937 engine(seed); diff --git a/test/cpp/imperative/test_hooks.cc b/test/cpp/imperative/test_hooks.cc index 8f28ebaf29485..5307139a42652 100644 --- a/test/cpp/imperative/test_hooks.cc +++ b/test/cpp/imperative/test_hooks.cc @@ -62,7 +62,7 @@ std::shared_ptr DoubleHook( auto* data = tensor.data(); auto* out_data = out_tensor->mutable_data(platform::CPUPlace()); for (int64_t i = 0; i < out_tensor->numel(); ++i) { - out_data[i] = data[i] * 2.0; + out_data[i] = data[i] * 2.0; // NOLINT } return out_var; diff --git a/test/cpp/phi/kernels/sequence_pooling_test.cc b/test/cpp/phi/kernels/sequence_pooling_test.cc index b9a6bda19a2df..037ad314890c5 100644 --- a/test/cpp/phi/kernels/sequence_pooling_test.cc +++ b/test/cpp/phi/kernels/sequence_pooling_test.cc @@ -92,8 +92,8 @@ void TestSequencePoolingSum(const DeviceContext &context, if (place == phi::CPUPlace()) { for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) { - int64_t begin = in_grad.lod()[0][i]; - int64_t end = in_grad.lod()[0][i + 1]; + int64_t begin = static_cast(in_grad.lod()[0][i]); + int64_t end = static_cast(in_grad.lod()[0][i + 1]); phi::DenseTensor tmp = in_grad.Slice(begin, end); for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) { for (int64_t m = 0; m != second_dim; ++m) { @@ -104,8 +104,8 @@ void TestSequencePoolingSum(const DeviceContext &context, } } else { for (size_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) { - int64_t begin = cpu_in_grad.lod()[0][i]; - int64_t end = cpu_in_grad.lod()[0][i + 1]; + int64_t begin = static_cast(cpu_in_grad.lod()[0][i]); + int64_t end = static_cast(cpu_in_grad.lod()[0][i + 1]); phi::DenseTensor tmp = cpu_in_grad.Slice(begin, end); for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) { for (int64_t m = 0; m != second_dim; ++m) { diff --git a/test/cpp/phi/kernels/test_cpu_vec.cc b/test/cpp/phi/kernels/test_cpu_vec.cc index 9a2b2994463bb..19583b7838956 100644 --- a/test/cpp/phi/kernels/test_cpu_vec.cc +++ b/test/cpp/phi/kernels/test_cpu_vec.cc @@ -27,7 +27,7 @@ namespace tests { inline double GetCurrentUS() { struct timeval time; gettimeofday(&time, nullptr); - return 1e+6 * time.tv_sec + time.tv_usec; + return 1e+6 * time.tv_sec + time.tv_usec; // NOLINT } constexpr int repeat = 1000; diff --git a/test/cpp/pir/core/program_translator_test.cc b/test/cpp/pir/core/program_translator_test.cc index 915ff2de42beb..635fb13817985 100644 --- a/test/cpp/pir/core/program_translator_test.cc +++ b/test/cpp/pir/core/program_translator_test.cc @@ -48,7 +48,7 @@ ProgramDesc load_from_file(const std::string &file_name) { std::string buffer(fin.tellg(), ' '); fin.seekg(0, std::ios::beg); - fin.read(&buffer[0], buffer.size()); + fin.read(&buffer[0], buffer.size()); // NOLINT fin.close(); return ProgramDesc(buffer); } diff --git a/test/cpp/pir/pattern_rewrite/pattern_rewrite_test.cc b/test/cpp/pir/pattern_rewrite/pattern_rewrite_test.cc index 985d00c4b0d1e..1bb9dc0cafae7 100644 --- a/test/cpp/pir/pattern_rewrite/pattern_rewrite_test.cc +++ b/test/cpp/pir/pattern_rewrite/pattern_rewrite_test.cc @@ -244,7 +244,7 @@ class RedundantTransposeFusePattern std::vector GetPerm(const std::vector &perm1, const std::vector &perm2) const { - int n = perm1.size(); + int n = static_cast(perm1.size()); std::vector axis(n), axis1(n), axis2(n); std::iota(axis.begin(), axis.end(), 0); for (int i = 0; i < n; ++i) {