Skip to content

Commit

Permalink
[xdoctest][task 184-185] reformat example code with google style in `…
Browse files Browse the repository at this point in the history
…distributed/auto_parallel/static/*` (PaddlePaddle#56666)

* [Doctest]fix No.184,185, test=docs_preview

* add env skip

* fix @staticmethod

* fix

* add xdoctest for v2

* fix
  • Loading branch information
ooooo-create authored and BeingGod committed Sep 9, 2023
1 parent 64e0863 commit c8e8965
Show file tree
Hide file tree
Showing 2 changed files with 82 additions and 69 deletions.
13 changes: 7 additions & 6 deletions python/paddle/distributed/auto_parallel/static/cluster_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,15 @@ class DeviceMesh(core.DeviceMesh):
Examples:
.. code-block:: python
import paddle
import paddle.distributed as dist
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import paddle
>>> import paddle.distributed as dist
paddle.enable_static()
>>> paddle.enable_static()
mesh = dist.DeviceMesh([[2, 4, 5], [0, 1, 3]])
assert mesh.shape == [2, 3]
assert mesh.device_ids == [2, 4, 5, 0, 1, 3]
>>> mesh = dist.DeviceMesh([[2, 4, 5], [0, 1, 3]])
>>> assert mesh.shape == [2, 3]
>>> assert mesh.device_ids == [2, 4, 5, 0, 1, 3]
"""

Expand Down
138 changes: 75 additions & 63 deletions python/paddle/distributed/auto_parallel/static/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,28 +101,30 @@ def convert(self, strict=True):
Examples:
.. code-block:: python
import numpy as np
complete_tensors = np.arange(4).reshape([2, 2])
partitial_tensors = np.split(complete_tensors, 2, axis=0)
name = "tmp_0"
tensors_dict = {name: partitial_tensors}
strategy_1 = {
name: {
"process_shape": [2],
"process_group": [0, 1],
"dims_mapping": [0, -1]
}
}
strategy_2 = {
name: {
"process_shape": [2],
"process_group": [0, 1],
"dims_mapping": [-1, -1]
}
}
converter = Converter(tensors_dict, strategy_1, strategy_2)
result = converter.convert()
# the result's value is equal to `complete_tensors`
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import numpy as np
>>> from paddle.distributed.auto_parallel.static.converter import Converter
>>> complete_tensors = np.arange(4).reshape([2, 2])
>>> partitial_tensors = np.split(complete_tensors, 2, axis=0)
>>> name = "tmp_0"
>>> tensors_dict = {name: partitial_tensors}
>>> strategy_1 = {
... name: {
... "process_shape": [2],
... "process_group": [0, 1],
... "dims_mapping": [0, -1]
... }
... }
>>> strategy_2 = {
... name: {
... "process_shape": [2],
... "process_group": [0, 1],
... "dims_mapping": [-1, -1]
... }
... }
>>> converter = Converter(tensors_dict, strategy_1, strategy_2)
>>> result = converter.convert()
>>> # the result's value is equal to `complete_tensors`
"""
tensors_dict = {}
# the name which is in cur_process but not in pre_process
Expand Down Expand Up @@ -352,13 +354,18 @@ def merge(partition_tensor_list, tensor, partition_index, complete_shape):
Examples:
.. code-block:: python
import numpy as np
partition_tensor_list = [(np.array([[[1.11, 1.12]]]), [[0,1],[0,1],[0,2]])]
tensor = np.array([[[1.13, 1.14]]])
partition_index = [[0,1],[0,1],[2,4]]
_merge_tensor(partition_tensor_list, tensor, partition_index)
# partition_tensor_list: [(np.array([[[1.11, 1.12, 1.13, 1.14]]]), [[0,1],[0,1],[0,4]])]
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import numpy as np
>>> import paddle
>>> from paddle.distributed.auto_parallel.static.converter import Converter
>>> partition_tensor_list = [(np.array([[[1.11, 1.12]]]), [[0,1],[0,1],[0,2]])]
>>> tensor = np.array([[[1.13, 1.14]]])
>>> partition_index = [[0,1],[0,1],[2,4]]
>>> complete_shape = [3, 2]
>>> Converter.merge(partition_tensor_list, tensor, partition_index, complete_shape)
>>> print(partition_tensor_list)
[(array([[[1.11, 1.12, 1.13, 1.14]]]), [[0, 1], [0, 1], [0, 4]])]
"""
from .reshard import Resharder

Expand Down Expand Up @@ -416,16 +423,19 @@ def split(complete_tensor, partition_index_list, length):
Examples:
.. code-block:: python
import numpy as np
complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
rank = 2
complete_shape = [1, 1, 6]
dims_mapping = [-1, -1, 0]
process_shape = [3]
process_group = [0, 1, 2]
sliced_tensor_list = split(complete_tensor, [[], [], [2, 4]], 3)
# [array([[[1.11, 1.12]]]), array([[[1.13, 1.14]]]), array([[[1.15, 1.16]]])]
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import numpy as np
>>> from paddle.distributed.auto_parallel.static.converter import Converter
>>> complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
>>> rank = 2
>>> complete_shape = [1, 1, 6]
>>> dims_mapping = [-1, -1, 0]
>>> process_shape = [3]
>>> process_group = [0, 1, 2]
>>> sliced_tensor_list = Converter.split(complete_tensor, [[], [], [2, 4]], 3)
>>> print(sliced_tensor_list)
[array([[[1.11, 1.12]]]), array([[[1.13, 1.14]]]), array([[[1.15, 1.16]]])]
"""
sliced_tensor_list = []
axis = len(complete_tensor.shape) - length
Expand Down Expand Up @@ -453,15 +463,18 @@ def _get_split_indices(
Examples:
.. code-block:: python
import numpy as np
complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
complete_shape = [1, 1, 6]
dims_mapping = [-1, -1, 0]
process_shape = [3]
process_group = [0, 1, 2]
index = _get_split_indices(complete_shape, dims_mapping, process_shape, process_group)
# index: [[], [], [2, 4]]
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import numpy as np
>>> from paddle.distributed.auto_parallel.static.utils import _get_split_indices
>>> complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
>>> complete_shape = [1, 1, 6]
>>> dims_mapping = [-1, -1, 0]
>>> process_shape = [3]
>>> process_group = [0, 1, 2]
>>> index = _get_split_indices(complete_shape, dims_mapping, process_shape, process_group)
>>> print(index)
[[], [], [2, 4]]
"""
from .reshard import Resharder

Expand Down Expand Up @@ -502,21 +515,20 @@ def _get_sliced_index(
Examples:
.. code-block:: python
import numpy as np
complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
rank = 2
complete_shape = [1, 1, 6]
dims_mapping = [-1, -1, 0]
process_shape = [3]
process_group = [0, 1, 2]
slice_tensor = _slice_tensor(complete_tensor, [[], [], [2, 4]], 3)
# slice_tensor:
# [array([[[1.11, 1.12]]]), array([[[1.13, 1.14]]]), array([[[1.15, 1.16]]])]
index = _get_sliced_index(rank, complete_shape, dims_mapping
process_shape, process_group)
# index: 2
>>> # doctest: +REQUIRES(env:DISTRIBUTED)
>>> import numpy as np
>>> from paddle.distributed.auto_parallel.static.converter import Converter
>>> complete_tensor = np.array([[[1.11, 1.12, 1.13, 1.14, 1.15, 1.16]]])
>>> rank = 2
>>> complete_shape = [1, 1, 6]
>>> dims_mapping = [-1, -1, 0]
>>> process_shape = [3]
>>> process_group = [0, 1, 2]
>>> index = Converter._get_sliced_index(rank, complete_shape, dims_mapping,
... process_shape, process_group)
>>> print(index)
2
"""
from .reshard import Resharder

Expand Down

0 comments on commit c8e8965

Please sign in to comment.