-
Notifications
You must be signed in to change notification settings - Fork 5.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Implement reshard from s to r with same process_mesh
- Loading branch information
Showing
19 changed files
with
459 additions
and
11 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
62 changes: 62 additions & 0 deletions
62
paddle/phi/core/distributed/auto_parallel/reshard_all_gather_functor.cc
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#include "paddle/phi/core/distributed/auto_parallel/reshard_all_gather_functor.h" | ||
|
||
#include "paddle/phi/backends/all_context.h" | ||
#include "paddle/phi/core/distributed/auto_parallel/reshard_utils.h" | ||
#include "paddle/phi/core/visit_type.h" | ||
#include "paddle/phi/infermeta/unary.h" | ||
#include "paddle/phi/kernels/all_gather_kernel.h" | ||
|
||
namespace phi { | ||
namespace distributed { | ||
|
||
DenseTensor ReshardAllGatherFunctor(DeviceContext* dev_ctx, | ||
const DenseTensor& input, | ||
const std::vector<int64_t>& process_ids) { | ||
DenseTensor out; | ||
|
||
int64_t world_size = process_ids.size(); | ||
auto* comm_context = CreateOrGetCommContext(*dev_ctx, process_ids); | ||
dev_ctx->SetCommContext(comm_context); | ||
|
||
if (phi::CPUContext::classof(dev_ctx)) { | ||
PD_VISIT_FLOATING_AND_INTEGRAL_TYPES( | ||
input.dtype(), "AllGather", ([&] { | ||
AllGather<data_t>(static_cast<const CPUContext&>(*dev_ctx), | ||
input, | ||
world_size, | ||
&out); | ||
})); | ||
return out; | ||
} | ||
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) | ||
if (phi::GPUContext::classof(dev_ctx)) { | ||
PD_VISIT_FLOATING_AND_INTEGRAL_TYPES( | ||
input.dtype(), "AllGather", ([&] { | ||
AllGather<data_t>(static_cast<const GPUContext&>(*dev_ctx), | ||
input, | ||
world_size, | ||
&out); | ||
})); | ||
return out; | ||
} | ||
#endif | ||
PADDLE_THROW(phi::errors::Unimplemented( | ||
"The all_gather in reshard only supported on CPU and GPU for now.")); | ||
} | ||
|
||
} // namespace distributed | ||
} // namespace phi |
31 changes: 31 additions & 0 deletions
31
paddle/phi/core/distributed/auto_parallel/reshard_all_gather_functor.h
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#pragma once | ||
|
||
#include <cstdint> | ||
#include <vector> | ||
|
||
namespace phi { | ||
class DenseTensor; | ||
class DeviceContext; | ||
|
||
namespace distributed { | ||
|
||
DenseTensor ReshardAllGatherFunctor(DeviceContext* dev_ctx, | ||
const DenseTensor& input, | ||
const std::vector<int64_t>& process_ids); | ||
|
||
} // namespace distributed | ||
} // namespace phi |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
73 changes: 73 additions & 0 deletions
73
paddle/phi/core/distributed/auto_parallel/s_to_r_reshard_function.cc
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
#include "paddle/phi/core/distributed/auto_parallel/s_to_r_reshard_function.h" | ||
|
||
#include "glog/logging.h" | ||
#include "paddle/phi/core/distributed/auto_parallel/dist_attr.h" | ||
#include "paddle/phi/core/distributed/auto_parallel/dist_tensor.h" | ||
#include "paddle/phi/core/distributed/auto_parallel/reshard_all_gather_functor.h" | ||
#include "paddle/phi/core/distributed/auto_parallel/reshard_utils.h" | ||
#include "paddle/phi/core/distributed/comm_context_manager.h" | ||
#include "paddle/phi/core/distributed/store/tcp_store.h" | ||
|
||
namespace phi { | ||
namespace distributed { | ||
|
||
bool SToRReshardFunction::IsSuitable( | ||
const DistTensor& in, | ||
const std::shared_ptr<TensorDistAttr>& out_dist_attr) { | ||
bool flag = true; | ||
const auto& in_dist_attr = in.dist_attr(); | ||
|
||
const auto& in_dims_mapping = in_dist_attr->dims_mapping(); | ||
const auto& out_dims_mapping = out_dist_attr->dims_mapping(); | ||
|
||
flag &= IsDimsMappingShard(in_dims_mapping); | ||
flag &= IsDimsMappingReplicated(out_dims_mapping); | ||
|
||
const auto& in_process_mesh = in_dist_attr->process_mesh(); | ||
const auto& out_process_mesh = out_dist_attr->process_mesh(); | ||
|
||
flag &= (in_process_mesh.ndim() == 1); | ||
flag &= (out_process_mesh.ndim() == 1); | ||
flag &= (in_process_mesh == out_process_mesh); | ||
|
||
return flag; | ||
} | ||
|
||
std::shared_ptr<DistTensor> SToRReshardFunction::Eval( | ||
DeviceContext* dev_ctx, | ||
const DistTensor& in, | ||
const std::shared_ptr<TensorDistAttr>& out_dist_attr) { | ||
// TODO(liyurui): Only support transfer shard(0) to replicate for now. | ||
// Concat is needed when transfer shard(x) to replicate, will be supported | ||
// later. | ||
const DenseTensor& in_physical_tensor_cur_rank = in.value(); | ||
const auto& in_dist_attr = in.dist_attr(); | ||
const auto& in_process_mesh = in_dist_attr->process_mesh(); | ||
const auto& in_process_ids = in_process_mesh.process_ids(); | ||
|
||
// Since the precondition ensure the out_process_ids is equal to the | ||
// in_process_ids, so the participate process ids mush equal to either | ||
// in_process_ids or out_process_ids. | ||
DenseTensor out_all_gather = ReshardAllGatherFunctor( | ||
dev_ctx, in_physical_tensor_cur_rank, in_process_ids); | ||
|
||
return std::make_shared<DistTensor>( | ||
std::make_shared<DenseTensor>(out_all_gather), out_dist_attr); | ||
} | ||
|
||
} // namespace distributed | ||
} // namespace phi |
Oops, something went wrong.