Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[clang-tidy] NO.8 enable cppcoreguidelines-narrowing-conversions. step:2 #56895

Merged
merged 4 commits into from
Sep 5, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions paddle/fluid/distributed/auto_parallel/dist_attr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -378,13 +378,13 @@ std::string OperatorDistAttr::to_string() const {
}

void OperatorDistAttr::from_proto(const OperatorDistAttrProto& proto) {
for (int64_t i = 0; i < proto.input_dist_attrs_size(); ++i) {
for (int i = 0; i < proto.input_dist_attrs_size(); ++i) {
TensorDistAttr dist_attr;
std::string name = proto.input_dist_attrs(i).name();
dist_attr.from_proto(proto.input_dist_attrs(i).tensor_dist_attr());
input_dist_attrs_[name] = dist_attr;
}
for (int64_t i = 0; i < proto.output_dist_attrs_size(); ++i) {
for (int i = 0; i < proto.output_dist_attrs_size(); ++i) {
TensorDistAttr dist_attr;
std::string name = proto.output_dist_attrs(i).name();
dist_attr.from_proto(proto.output_dist_attrs(i).tensor_dist_attr());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ std::vector<int64_t> GetDimsMappingForAxes(
const std::unordered_map<std::string, int64_t>& axis_to_dim_map,
const bool unsharded_miss_axis) {
std::vector<int64_t> dims_mapping;
for (int64_t i = 0, n = axes.size(); i < n; i++) {
for (int64_t i = 0, n = static_cast<int64_t>(axes.size()); i < n; i++) {
std::string axis = axes.substr(i, 1);
if (axis == "1") {
dims_mapping.emplace_back(-1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ CrossEntropyWithSoftmaxSPMDRule::InferForward(
input_specs_size));

auto x_shape = input_specs[0].shape();
int x_ndim = x_shape.size();
int x_ndim = static_cast<int>(x_shape.size());
auto x_dist_attr_src = input_specs[0].dist_attr();
std::vector<int64_t> x_dims_mapping_src = x_dist_attr_src.dims_mapping();

Expand Down
27 changes: 15 additions & 12 deletions paddle/fluid/distributed/auto_parallel/spmd_rules/dim_trans.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ void Flatten::set_inputs(const std::vector<DimTrans*>& dims) {

std::string Flatten::to_string() {
std::string ret_str("Flatten(");
for (int64_t i = 0, n = input_dims_.size(); i < n; ++i) {
for (int i = 0, n = static_cast<int>(input_dims_.size()); i < n; ++i) {
ret_str += input_dims_[i]->to_string();
if (i < n - 1) {
ret_str += ",";
Expand Down Expand Up @@ -125,7 +125,7 @@ int64_t Split::local_splitted_shape_value() {
std::string Split::to_string() {
std::string ret_str("Split(");
ret_str += input_dim_trans_->to_string() + ", (";
for (int64_t i = 0, n = splitted_shape_.size(); i < n; ++i) {
for (int i = 0, n = static_cast<int>(splitted_shape_.size()); i < n; ++i) {
ret_str += std::to_string(splitted_shape_[i]);
if (i < n - 1) {
ret_str += ",";
Expand Down Expand Up @@ -161,9 +161,9 @@ DimTrans* make_split(DimTrans* dim,
std::vector<int64_t> new_shape;
// map between from idx in shape to new_shape
std::vector<int64_t> idx_map(shape.size(), -1);
for (int64_t i = 0, n = shape.size(); i < n; ++i) {
for (int i = 0, n = static_cast<int>(shape.size()); i < n; ++i) {
if (shape[id] != 1) {
idx_map[i] = new_shape.size();
idx_map[i] = static_cast<int64_t>(new_shape.size());
new_shape.emplace_back(shape[i]);
}
}
Expand All @@ -173,7 +173,8 @@ DimTrans* make_split(DimTrans* dim,
}

void CleanUp() {
for (int64_t i = 0, n = all_dim_trans.size(); i < n; i++) {
int n = static_cast<int>(all_dim_trans.size());
for (int i = 0; i < n; i++) {
if (all_dim_trans[i]) {
delete all_dim_trans[i];
all_dim_trans[i] = nullptr;
Expand Down Expand Up @@ -210,8 +211,8 @@ DimTrans* GetDimTrans(DimTrans* dim_trans,
} else if (type == DimTrans::Type::FLATTEN) {
Flatten* flatten = dynamic_cast<Flatten*>(dim_trans);
const std::vector<DimTrans*>& inputs = flatten->inputs();
int64_t nmesh = (*shardable)[0].size();
for (int64_t i = 1, n = inputs.size(); i < n; i++) {
int64_t nmesh = (*shardable)[0].size(); // NOLINT
for (int i = 1, n = static_cast<int>(inputs.size()); i < n; i++) {
DimTrans* input = inputs[i];
if (input->type() == DimTrans::Type::INPUTDIM) {
InputDim* inputdim = dynamic_cast<InputDim*>(input);
Expand Down Expand Up @@ -252,7 +253,7 @@ DimTrans* GetDimTrans(DimTrans* dim_trans,
phi::errors::InvalidArgument(
"The returned dim_trans must be INPUTDIM."));
InputDim* inputdim = dynamic_cast<InputDim*>(dim);
int64_t nmesh = mesh_shape.size();
int64_t nmesh = static_cast<int64_t>(mesh_shape.size());
int64_t input_axis = inputdim->input_dim();

// Check whether the sharded dim can be sharded on
Expand Down Expand Up @@ -295,13 +296,15 @@ std::vector<std::vector<int64_t>> InferFromDimTrans(
const std::vector<int64_t>& mesh_shape = mesh.shape();

std::set<int64_t> sharded_input_dims;
for (int64_t i = 0, n = input_dims_mapping.size(); i < n; ++i) {
for (int64_t i = 0, n = static_cast<int64_t>(input_dims_mapping.size());
i < n;
++i) {
if (input_dims_mapping[i] > -1) {
sharded_input_dims.insert(i);
}
}
int64_t ndim = input_shape.size();
int64_t nmesh = mesh_shape.size();
int64_t ndim = static_cast<int64_t>(input_shape.size());
int64_t nmesh = static_cast<int64_t>(mesh_shape.size());
std::vector<std::vector<bool>> shardable(ndim,
std::vector<bool>(nmesh, true));

Expand All @@ -319,7 +322,7 @@ std::vector<std::vector<int64_t>> InferFromDimTrans(

// get the map from sharded input dimensions to output dimensions.
std::vector<int64_t> dim_map_src2tgt(ndim, -1);
for (int64_t i = 0, n = dim_trans.size(); i < n; i++) {
for (int64_t i = 0, n = static_cast<int64_t>(dim_trans.size()); i < n; i++) {
DimTrans* dim = GetDimTrans(dim_trans[i],
&shardable,
&seen_input_dims,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ ElementwiseSPMDRule::InferForward(
const std::vector<DistTensorSpec>& input_specs,
const paddle::framework::AttributeMap& attrs) {
// step0: Verify Input Args Based on Elementwise Logic
int64_t ninputs = input_specs.size();
int64_t ninputs = static_cast<int64_t>(input_specs.size());
PADDLE_ENFORCE_GT(
ninputs,
0,
Expand All @@ -39,7 +39,7 @@ ElementwiseSPMDRule::InferForward(
std::vector<std::string> input_axes_vec;
int64_t max_ndim = 0;
for (int64_t i = 0; i < ninputs; ++i) {
int64_t ndim = input_specs[i].shape().size();
int64_t ndim = static_cast<int64_t>(input_specs[i].shape().size());
if (ndim > max_ndim) {
max_ndim = ndim;
}
Expand All @@ -49,7 +49,7 @@ ElementwiseSPMDRule::InferForward(
std::vector<int64_t> broadcast_axis_count(max_ndim, 0);
for (int64_t i = 0; i < ninputs; ++i) {
std::vector<int64_t> shape = input_specs[i].shape();
int64_t ndim = shape.size();
int64_t ndim = static_cast<int64_t>(shape.size());
int64_t start_dim = max_ndim - ndim;
std::string axes_notation = GetBroadcastAxes(ndim, max_ndim, alphabet);
if (ninputs > 1) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ EmbeddingSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
input_specs_size));
auto x_shape = input_specs[0].shape();
auto weight_shape = input_specs[1].shape();
int x_ndim = x_shape.size();
int weight_ndim = weight_shape.size();
int x_ndim = static_cast<int>(x_shape.size());
int weight_ndim = static_cast<int>(weight_shape.size());
auto x_dist_attr_src = input_specs[0].dist_attr();
auto weight_dist_attr_src = input_specs[1].dist_attr();
std::vector<int64_t> x_dims_mapping = x_dist_attr_src.dims_mapping();
Expand Down Expand Up @@ -170,9 +170,9 @@ EmbeddingSPMDRule::InferBackward(
output_specs_size));

auto x_shape = input_specs[0].shape();
int x_ndim = x_shape.size();
int x_ndim = static_cast<int>(x_shape.size());
auto out_shape = output_specs[0].shape();
int out_ndim = out_shape.size();
int out_ndim = static_cast<int>(out_shape.size());

PADDLE_ENFORCE_EQ(x_ndim,
out_ndim - 1,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ LayerNormSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
auto x_shape = input_specs[0].shape();
auto scale_shape = input_specs[1].shape();
auto bias_shape = input_specs[2].shape();
int x_ndim = x_shape.size();
int scale_ndim = scale_shape.size();
int bias_ndim = bias_shape.size();
int x_ndim = static_cast<int>(x_shape.size());
int scale_ndim = static_cast<int>(scale_shape.size());
int bias_ndim = static_cast<int>(bias_shape.size());

PADDLE_ENFORCE_EQ(
scale_ndim,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ std::pair<std::vector<TensorDistAttr>, std::vector<TensorDistAttr>>
ReductionSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
const paddle::framework::AttributeMap& attrs) {
// step0: Verify Input Args Based on Elementwise Logic
int64_t ninputs = input_specs.size();
int64_t ninputs = static_cast<int64_t>(input_specs.size());
PADDLE_ENFORCE_EQ(
ninputs,
1,
Expand All @@ -42,7 +42,7 @@ ReductionSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
std::string alphabet = "abcdefghijklmnopqrstuvwxyz";

// get einsum notation for input
int64_t ndim = input_specs[0].shape().size();
int64_t ndim = static_cast<int64_t>(input_specs[0].shape().size());
std::vector<std::string> input_axes_vec;
std::string input_axes = alphabet.substr(0, ndim);
input_axes_vec.emplace_back(input_axes);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ using phi::distributed::auto_parallel::str_join;
std::vector<int64_t> InferTargetShape(const std::vector<int64_t>& shape,
int64_t len) {
int64_t infer_idx = -1;
for (int64_t i = 0, n = shape.size(); i < n; i++) {
for (int64_t i = 0, n = static_cast<int64_t>(shape.size()); i < n; i++) {
if (shape[i] == -1) {
PADDLE_ENFORCE_EQ(
infer_idx,
Expand Down Expand Up @@ -74,8 +74,8 @@ std::vector<DimTrans*> MakeReshapeDimTrans(
int64_t src_idx = 0, tgt_idx = 0;
int64_t s, t;
int64_t src_len, tgt_len;
src_len = src_shape.size();
tgt_len = inferred_tgt_shape.size();
src_len = static_cast<int64_t>(src_shape.size());
tgt_len = static_cast<int64_t>(inferred_tgt_shape.size());
while (src_idx < src_len || tgt_idx < tgt_len) {
std::vector<int64_t> src_dims, tgt_splitted_shape;
if (src_idx >= src_len) {
Expand Down Expand Up @@ -125,7 +125,9 @@ std::vector<DimTrans*> MakeReshapeDimTrans(
}
DimTrans* flatten = make_flatten(input_dims);

for (int64_t i = 0, n = tgt_splitted_shape.size(); i < n; i++) {
for (int64_t i = 0, n = static_cast<int64_t>(tgt_splitted_shape.size());
i < n;
i++) {
ret.emplace_back(make_split(flatten, tgt_splitted_shape, i));
}
}
Expand Down Expand Up @@ -155,7 +157,7 @@ paddle::distributed::auto_parallel::ReshapeSPMDRule::InferForward(

// handle the '0' values in target shape, '0' indicates
// that the target shape is equal to the source shape
for (int64_t i = 0, n = tgt_shape.size(); i < n; i++) {
for (int64_t i = 0, n = static_cast<int64_t>(tgt_shape.size()); i < n; i++) {
if (tgt_shape[i] == 0) {
tgt_shape[i] = src_shape[i];
}
Expand All @@ -178,7 +180,7 @@ paddle::distributed::auto_parallel::ReshapeSPMDRule::InferForward(
VLOG(4) << "Reshape: input_shape: [" << str_join(src_shape)
<< "] output_shape: [" << str_join(tgt_shape) << "]";
VLOG(4) << "Transformation from input to output:";
for (int64_t i = 0, n = trans.size(); i < n; i++) {
for (int64_t i = 0, n = static_cast<int64_t>(trans.size()); i < n; i++) {
DimTrans* t = trans[i];
VLOG(4) << "\tOutput axis " << i << ": " << t->to_string();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ SoftmaxSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
input_specs_size));

auto x_shape = input_specs[0].shape();
int x_ndim = x_shape.size();
int x_ndim = static_cast<int>(x_shape.size());
auto x_dist_attr_src = input_specs[0].dist_attr();
std::vector<int64_t> x_dims_mapping = x_dist_attr_src.dims_mapping();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ std::pair<std::vector<TensorDistAttr>, std::vector<TensorDistAttr>>
SplitSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
const paddle::framework::AttributeMap& attrs) {
// step0: Verify Input Args Based on Elementwise Logic
int64_t ninputs = input_specs.size();
int64_t ninputs = static_cast<int64_t>(input_specs.size());
PADDLE_ENFORCE_EQ(
ninputs,
1,
Expand All @@ -37,15 +37,15 @@ SplitSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
VerifySpecs(input_specs, "split");

// step1: Build Einsum Notation
int64_t ndim = input_specs[0].shape().size();
int64_t ndim = static_cast<int64_t>(input_specs[0].shape().size());
int64_t noutput = 0;
// split api uses num or sections as attribute
if (attrs.find("num") != attrs.end()) {
noutput = ExtractAttr<int64_t>("num", attrs);
} else if (attrs.find("sections") != attrs.end()) {
std::vector<int64_t> sections =
ExtractAttr<std::vector<int64_t>>("sections", attrs);
noutput = sections.size();
noutput = static_cast<int64_t>(sections.size());
}
int64_t axis = ExtractAttr<int>("axis", attrs);
if (axis < 0) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ std::pair<std::vector<TensorDistAttr>, std::vector<TensorDistAttr>>
TransposeSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
const paddle::framework::AttributeMap& attrs) {
// step0: Verify Input Args Based on Transpose Logic
int64_t ninputs = input_specs.size();
int64_t ninputs = static_cast<int64_t>(input_specs.size());
PADDLE_ENFORCE_EQ(
ninputs,
1,
Expand All @@ -38,13 +38,13 @@ TransposeSPMDRule::InferForward(const std::vector<DistTensorSpec>& input_specs,
std::string alphabet = "abcdefghijklmnopqrstuvwxyz";

// get einsum notation for input
int64_t ndim = input_specs[0].shape().size();
int64_t ndim = static_cast<int64_t>(input_specs[0].shape().size());
std::vector<std::string> input_axes_vec;
std::string input_axes = alphabet.substr(0, ndim);
input_axes_vec.emplace_back(input_axes);

// get einsum notation for output
for (int64_t i = 0, n = perm_dims.size(); i < n; ++i) {
for (int64_t i = 0, n = static_cast<int64_t>(perm_dims.size()); i < n; ++i) {
// convert the negative dim value to normal dim value
if (perm_dims[i] < 0) {
perm_dims[i] = ndim + perm_dims[i];
Expand Down
9 changes: 5 additions & 4 deletions paddle/fluid/distributed/collective/reducer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -953,9 +953,9 @@ void EagerReducer::MarkGroupReady(size_t group_index) {
++next_group_) {
UNUSED auto &group = groups_[next_group_];
if (group.is_sparse_) {
AllReduceSparse(&group, next_group_);
AllReduceSparse(&group, static_cast<int>(next_group_));
} else {
FusedAllReduceSchedule(&group, next_group_);
FusedAllReduceSchedule(&group, static_cast<int>(next_group_));
}
}
}
Expand Down Expand Up @@ -1078,7 +1078,7 @@ void EagerReducer::FusedAllReduceSchedule(EagerGroup *group,

// div nranks
paddle::experimental::scale_(
group->dense_contents_, 1.0 / nranks_, 0.0, false);
group->dense_contents_, 1.0 / nranks_, 0.0, false); // NOLINT

// all_reduce
std::vector<Tensor> reduce_tensors = {group->dense_contents_};
Expand All @@ -1104,7 +1104,8 @@ void EagerReducer::AllReduceSparse(EagerGroup *group,
const int curr_group_index) {
// div nranks
Tensor sparse_tensor(group->sparse_contents_);
paddle::experimental::scale_(sparse_tensor, 1.0 / nranks_, 0.0, false);
paddle::experimental::scale_(
sparse_tensor, 1.0 / nranks_, 0.0, false); // NOLINT

VLOG(3) << "sparse_group [" << curr_group_index << "] start allreduce.";

Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/distributed/fleet_executor/carrier.cc
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,8 @@ Interceptor* Carrier::SetInterceptor(int64_t interceptor_id,
interceptor->RegisterCarrier(this);

// TODO(fleet_exe dev): get loop
auto* loop = thread_pool_.GetLoop(interceptor_id % thread_num_);
auto* loop =
thread_pool_.GetLoop(static_cast<int>(interceptor_id % thread_num_));
PADDLE_ENFORCE_NOT_NULL(
loop, platform::errors::Fatal("thread task loop must not null"));
interceptor->RegisterTaskLoop(loop);
Expand Down Expand Up @@ -296,7 +297,7 @@ void Carrier::CreateInterceptors(
auto gc = GetGC(place_);

// create source and sink task node
auto max_run_times = microbatch_scopes_.size();
int64_t max_run_times = static_cast<int64_t>(microbatch_scopes_.size());
TaskNode* source = new TaskNode(
rank_, SOURCE_ID, max_run_times); // rank, task_id, max_run_times
TaskNode* sink = new TaskNode(rank_, SINK_ID, max_run_times);
Expand Down
Loading