-
Notifications
You must be signed in to change notification settings - Fork 5.6k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
update dygraph auto parallel API interface. (#59059)
Co-authored-by: wuhuachao <wuhuachao@baidu.com>
- Loading branch information
1 parent
cac0a03
commit 33854f2
Showing
49 changed files
with
581 additions
and
809 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,89 @@ | ||
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. | ||
# | ||
# Licensed under the Apache License, Version 2.0 (the "License"); | ||
# you may not use this file except in compliance with the License. | ||
# You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
from typing import cast | ||
|
||
from paddle.base.core import Partial, Placement, ReduceType, Replicate, Shard | ||
|
||
__all__ = ["ReduceType", "Placement", "Replicate", "Shard", "Partial"] | ||
|
||
|
||
def to_placements(dim_map, mesh, partial_idx=[]): | ||
""" | ||
convert dim_map to placements. | ||
Args: | ||
dim_map(List[int]): a list of integer that represents sharding on each tensor dimension. | ||
mesh(paddle.distributed.ProcessMesh): The `ProcessMesh` object describes the Cartesian topology of the used processes. | ||
partial_idx(List[int], Optional): a list of integer that represents the DTensor have pending sum on which device mesh dimension | ||
Returns: | ||
List[Placement]: a list contains some `paddle.distributed.Placement`. | ||
""" | ||
placements = [Replicate() for _ in range(len(mesh.mesh.shape))] | ||
|
||
for s in partial_idx: | ||
placements[s] = Partial() | ||
|
||
for i, m in enumerate(dim_map): | ||
if m >= 0: | ||
p = placements[m] | ||
if p.is_shard(): | ||
p = cast(Shard, p) | ||
raise Exception( | ||
f"ProcessMesh dimension can not be mapped to two dimension of same tensor: {i} and {p.get_dim()}." | ||
) | ||
elif p.is_partial(): | ||
raise Exception( | ||
f"ProcessMesh dimension {m} can not be both shard and partial!" | ||
) | ||
placements[m] = Shard(i) | ||
|
||
return placements | ||
|
||
|
||
def to_dim_map(placements, tensor_dims): | ||
""" | ||
convert placements to dim_map. | ||
Args: | ||
placements(List[Placement]): a list contains some `paddle.distributed.Placement`. | ||
tensor_dims(int): the dimension of dist_tensor. | ||
Returns: | ||
List[int]: a list of integer that represents sharding on each tensor dimension. | ||
""" | ||
dim_map = [-1] * tensor_dims | ||
for i, placement in enumerate(placements): | ||
if placement.is_shard(): | ||
shard_dim = cast(Shard, placement).get_dim() | ||
if dim_map[shard_dim] > -1: | ||
raise Exception( | ||
"Tensor dim {shard_dim} is already sharded on mesh dim {dim_map[shard_dim]}" | ||
) | ||
|
||
dim_map[shard_dim] = i | ||
|
||
return dim_map | ||
|
||
|
||
def get_shard_spec(mesh, placements, tensor_dims): | ||
"""to get shard_spec for construct DistAttr for static API.""" | ||
dim_map = to_dim_map(placements, tensor_dims) | ||
mesh_dim_names = mesh.dim_names | ||
shard_spec = [None] * len(dim_map) | ||
for i, d in enumerate(dim_map): | ||
if d > -1: | ||
shard_spec[i] = mesh_dim_names[d] | ||
|
||
return shard_spec |
Oops, something went wrong.