Skip to content

Commit

Permalink
[xdoctest] reformat example code with google style in 211,281,308,323 (
Browse files Browse the repository at this point in the history
…PaddlePaddle#57301)

* fix sample codes

* fix code-style

* fix bug

* fix bug
  • Loading branch information
longranger2 authored Sep 22, 2023
1 parent 81579d0 commit a828804
Show file tree
Hide file tree
Showing 4 changed files with 600 additions and 458 deletions.
103 changes: 51 additions & 52 deletions paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1027,15 +1027,15 @@ void BindImperative(py::module *m_ptr) {
shape with the input numpy array.
Examples:
.. code-block:: python
.. code-block:: python
# required: gpu
import numpy as np
import paddle
>>> # doctest: +REQUIRES(env:GPU)
>>> import numpy as np
>>> import paddle
>>> paddle.device.set_device('gpu')
data = np.random.randint(10, size=(3, 4))
tensor = paddle.base.core.to_uva_tensor(data)
print(tensor)
>>> data = np.random.randint(10, size=(3, 4))
>>> tensor = paddle.base.core.to_uva_tensor(data)
)DOC");

#endif
Expand Down Expand Up @@ -1161,29 +1161,29 @@ void BindImperative(py::module *m_ptr) {
should be one-dimensinal.
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.base import core
from paddle.device import cuda
if core.is_compiled_with_cuda():
src = paddle.rand(shape=[100, 50, 50])
dst = paddle.emtpy(shape=[200, 50, 50]).pin_memory()
offset = paddle.to_tensor(
np.array([0, 60], dtype="int64"), place=paddle.CPUPlace())
count = paddle.to_tensor(
np.array([40, 60], dtype="int64"), place=paddle.CPUPlace())
stream = cuda.Stream()
with cuda.stream_guard(stream):
core.async_write(src, dst, offset, count)
offset_a = paddle.gather(dst, paddle.to_tensor(np.arange(0, 40)))
offset_b = paddle.gather(dst, paddle.to_tensor(np.arange(60, 120)))
offset_array = paddle.concat([offset_a, offset_b], axis=0)
print(np.allclose(src.numpy(), offset_array.numpy())) # True
.. code-block:: python
>>> import numpy as np
>>> import paddle
>>> from paddle.base import core
>>> from paddle.device import cuda
>>> if core.is_compiled_with_cuda():
... src = paddle.rand(shape=[100, 50, 50])
... dst = paddle.empty(shape=[200, 50, 50]).pin_memory()
... offset = paddle.to_tensor(
... np.array([0, 60], dtype="int64"), place=paddle.CPUPlace())
... count = paddle.to_tensor(
... np.array([40, 60], dtype="int64"), place=paddle.CPUPlace())
...
... stream = cuda.Stream()
... with cuda.stream_guard(stream):
... core.eager.async_write(src, dst, offset, count)
...
... offset_a = paddle.gather(dst, paddle.to_tensor(np.arange(0, 40)))
... offset_b = paddle.gather(dst, paddle.to_tensor(np.arange(60, 120)))
... offset_array = paddle.concat([offset_a, offset_b], axis=0)
... print(np.allclose(src.numpy(), offset_array.numpy()))
True
)DOC");

m.def(
Expand Down Expand Up @@ -1393,28 +1393,27 @@ void BindImperative(py::module *m_ptr) {
should be one-dimensinal.
Examples:
.. code-block:: python
import numpy as np
import paddle
from paddle.base import core
from paddle.device import cuda
if core.is_compiled_with_cuda():
src = paddle.rand(shape=[100, 50, 50], dtype="float32").pin_memory()
dst = paddle.empty(shape=[100, 50, 50], dtype="float32")
offset = paddle.to_tensor(
np.array([0, 60], dtype="int64"), place=paddle.CPUPlace())
count = paddle.to_tensor(
np.array([40, 60], dtype="int64"), place=paddle.CPUPlace())
buffer = paddle.empty(shape=[50, 50, 50], dtype="float32").pin_memory()
index = paddle.to_tensor(
np.array([1, 3, 5, 7, 9], dtype="int64")).cpu()
stream = cuda.Stream()
with cuda.stream_guard(stream):
core.async_read(src, dst, index, buffer, offset, count)
.. code-block:: python
>>> import numpy as np
>>> import paddle
>>> from paddle.base import core
>>> from paddle.device import cuda
...
>>> if core.is_compiled_with_cuda():
... src = paddle.rand(shape=[100, 50, 50], dtype="float32").pin_memory()
... dst = paddle.empty(shape=[100, 50, 50], dtype="float32")
... offset = paddle.to_tensor(
... np.array([0, 60], dtype="int64"), place=paddle.CPUPlace())
... count = paddle.to_tensor(
... np.array([40, 60], dtype="int64"), place=paddle.CPUPlace())
... buffer = paddle.empty(shape=[50, 50, 50], dtype="float32").pin_memory()
... index = paddle.to_tensor(
... np.array([1, 3, 5, 7, 9], dtype="int64")).cpu()
...
... stream = cuda.Stream()
... with cuda.stream_guard(stream):
... core.eager.async_read(src, dst, index, buffer, offset, count)
)DOC");
#endif
}
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/send.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def send(tensor, dst=0, group=None, sync_op=True, use_calc_stream=False):
>>> task.wait()
>>> out = data.numpy()
>>> print(out)
>>> # [[4, 5, 6], [4, 5, 6]] (2 GPUs)
[[4, 5, 6], [4, 5, 6]]
"""
if _warn_cur_rank_not_in_group(group):
return
Expand Down
202 changes: 104 additions & 98 deletions python/paddle/distributed/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,17 @@ def _load_distributed_persistables(executor, dirname, main_program=None):
Examples:
.. code-block:: python
import paddle
import paddle.base as base
paddle.enable_static()
exe = base.Executor(base.CPUPlace())
param_path = "./my_paddle_model"
t = paddle.distributed.transpiler.DistributeTranspiler()
t.transpile(...)
pserver_prog = t.get_pserver_program(...)
_load_distributed_persistables(executor=exe, dirname=param_path, main_program=pserver_prog)
>>> # doctest: +REQUIRES(env: DISTRIBUTED)
>>> import paddle
>>> import paddle.base as base
>>> paddle.enable_static()
>>> exe = base.Executor(base.CPUPlace())
>>> param_path = "./my_paddle_model"
>>> t = paddle.distributed.transpiler.DistributeTranspiler()
>>> t.transpile(...)
>>> pserver_prog = t.get_pserver_program(...)
>>> _load_distributed_persistables(executor=exe, dirname=param_path, main_program=pserver_prog)
"""

def __is_distributed_part_var(varname):
Expand Down Expand Up @@ -160,15 +161,15 @@ def load_persistables(executor, dirname, main_program=None, filename=None):
Examples:
.. code-block:: python
import paddle
import paddle.base as base
>>> import paddle
>>> import paddle.base as base
paddle.enable_static()
exe = base.Executor(base.CPUPlace())
param_path = "./my_paddle_model"
prog = base.default_main_program()
paddle.distributed.io.load_persistables(executor=exe, dirname=param_path,
main_program=None)
>>> paddle.enable_static()
>>> exe = base.Executor(base.CPUPlace())
>>> param_path = "./my_paddle_model"
>>> prog = base.default_main_program()
>>> paddle.distributed.io.load_persistables(executor=exe, dirname=param_path,
... main_program=None)
"""

if main_program and main_program._is_distributed:
Expand Down Expand Up @@ -207,16 +208,17 @@ def _save_distributed_persistables(executor, dirname, main_program):
Examples:
.. code-block:: python
import paddle
import paddle
paddle.enable_static()
exe = paddle.static.Executor(paddle.CPUPlace())
param_path = "./my_paddle_model"
t = paddle.distributed.transpiler.DistributeTranspiler()
t.transpile(...)
train_program = t.get_trainer_program()
_save_distributed_persistables(executor=exe, dirname=param_path, main_program=train_program)
>>> # doctest: +REQUIRES(env: DISTRIBUTED)
>>> import paddle
>>> import paddle
>>> paddle.enable_static()
>>> exe = paddle.static.Executor(paddle.CPUPlace())
>>> param_path = "./my_paddle_model"
>>> t = paddle.distributed.transpiler.DistributeTranspiler()
>>> t.transpile(...)
>>> train_program = t.get_trainer_program()
>>> _save_distributed_persistables(executor=exe, dirname=param_path, main_program=train_program)
"""

def __save_remote_params(executor, dirname, remote_params_map):
Expand Down Expand Up @@ -366,12 +368,16 @@ def is_persistable(var):
Examples:
.. code-block:: python
import paddle
import paddle.base as base
paddle.enable_static()
param = base.default_main_program().global_block().var('fc.b')
res = base.io.is_persistable(param)
>>> import paddle
>>> paddle.enable_static()
>>> image = paddle.static.data(
... name='image', shape=[None, 28], dtype='float32')
>>> bias_attr = paddle.ParamAttr('fc.b')
>>> fc = paddle.static.nn.fc(image, size=10, bias_attr=bias_attr)
>>> param = paddle.static.default_main_program().global_block().var('fc.b')
>>> res = paddle.distributed.io.is_persistable(param)
"""
if (
var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH
Expand Down Expand Up @@ -420,24 +426,24 @@ def save_persistables(executor, dirname, main_program=None, filename=None):
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
dir_path = "./my_paddle_model"
file_name = "persistables"
image = paddle.static..data(name='img', shape=[None, 28, 28], dtype='float32')
label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
feeder = paddle.static.DataFeeder(feed_list=[image, label], place=paddle.CPUPlace())
predict = paddle.static.nn.fc(x=image, size=10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(input=predict, label=label)
avg_loss = paddle.mean(loss)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
paddle.distributed.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name)
# The persistables variables weights and bias in the fc layer of the network
# are going to be saved in the same file named "persistables" in the path
# "./my_paddle_model"
>>> import paddle
>>> paddle.enable_static()
>>> dir_path = "./my_paddle_model"
>>> file_name = "persistables"
>>> image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32')
>>> label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
>>> feeder = paddle.base.DataFeeder(feed_list=[image, label], place=paddle.CPUPlace())
>>> predict = paddle.static.nn.fc(x=image, size=10, activation='softmax')
>>> loss = paddle.nn.functional.cross_entropy(input=predict, label=label)
>>> avg_loss = paddle.mean(loss)
>>> exe = paddle.static.Executor(paddle.CPUPlace())
>>> exe.run(paddle.static.default_startup_program())
>>> paddle.distributed.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name)
>>> # The persistables variables weights and bias in the fc layer of the network
>>> # are going to be saved in the same file named "persistables" in the path
>>> # "./my_paddle_model"
"""
if main_program and main_program._is_distributed:
return _save_distributed_persistables(
Expand Down Expand Up @@ -504,53 +510,53 @@ def load_inference_model_distributed(
Examples:
.. code-block:: python
import paddle
import paddle.base as base
import numpy as np
paddle.enable_static()
# Build the model
main_prog = base.Program()
startup_prog = base.Program()
with base.program_guard(main_prog, startup_prog):
data = base.layers.data(name="img", shape=[64, 784], append_batch_size=False)
w = paddle.create_parameter(shape=[784, 200], dtype='float32')
b = paddle.create_parameter(shape=[200], dtype='float32')
hidden_w = paddle.matmul(x=data, y=w)
hidden_b = base.layers.elementwise_add(hidden_w, b)
place = base.CPUPlace()
exe = base.Executor(place)
exe.run(startup_prog)
# Save the inference model
path = "./infer_model"
base.io.save_inference_model(dirname=path, feeded_var_names=['img'],
target_vars=[hidden_b], executor=exe, main_program=main_prog)
# Demo one. Not need to set the distributed look up table, because the
# training doesn't use a distributed look up table.
[inference_program, feed_target_names, fetch_targets] = (
paddle.distributed.io.load_inference_model_distributed(dirname=path, executor=exe))
tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
# Demo two. If the training uses a distributed look up table, the pserver
# endpoints list should be supported when loading the inference model.
# The below is just an example.
endpoints = ["127.0.0.1:2023","127.0.0.1:2024"]
[dist_inference_program, dist_feed_target_names, dist_fetch_targets] = (
paddle.distributed.io.load_inference_model_distributed(dirname=path,
executor=exe,
pserver_endpoints=endpoints))
# In this example, the inference program was saved in the file
# "./infer_model/__model__" and parameters were saved in
# separate files under the directory "./infer_model".
# By the inference program, feed_target_names and
# fetch_targets, we can use an executor to run the inference
# program for getting the inference result.
>>> import paddle
>>> import paddle.base as base
>>> import numpy as np
>>> paddle.enable_static()
>>> # Build the model
>>> main_prog = paddle.static.Program()
>>> startup_prog = paddle.static.Program()
>>> with paddle.static.program_guard(main_prog, startup_prog):
... data = paddle.static.data(name="img", shape=[64, 784], append_batch_size=False)
... w = paddle.create_parameter(shape=[784, 200], dtype='float32')
... b = paddle.create_parameter(shape=[200], dtype='float32')
... hidden_w = paddle.matmul(x=data, y=w)
... hidden_b = base.layers.elementwise_add(hidden_w, b)
>>> place = base.CPUPlace()
>>> exe = base.Executor(place)
>>> exe.run(startup_prog)
>>> # Save the inference model
>>> path = "./infer_model"
>>> base.io.save_inference_model(dirname=path, feeded_var_names=['img'],
... target_vars=[hidden_b], executor=exe, main_program=main_prog)
...
>>> # Demo one. Not need to set the distributed look up table, because the
>>> # training doesn't use a distributed look up table.
>>> [inference_program, feed_target_names, fetch_targets] = (
... paddle.distributed.io.load_inference_model_distributed(dirname=path, executor=exe))
>>> tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32)
>>> results = exe.run(inference_program,
... feed={feed_target_names[0]: tensor_img},
... fetch_list=fetch_targets)
...
>>> # Demo two. If the training uses a distributed look up table, the pserver
>>> # endpoints list should be supported when loading the inference model.
>>> # The below is just an example.
>>> endpoints = ["127.0.0.1:2023","127.0.0.1:2024"]
>>> [dist_inference_program, dist_feed_target_names, dist_fetch_targets] = (
... paddle.distributed.io.load_inference_model_distributed(dirname=path,
... executor=exe,
... pserver_endpoints=endpoints))
...
>>> # In this example, the inference program was saved in the file
>>> # "./infer_model/__model__" and parameters were saved in
>>> # separate files under the directory "./infer_model".
>>> # By the inference program, feed_target_names and
>>> # fetch_targets, we can use an executor to run the inference
>>> # program for getting the inference result.
"""
load_from_memory = False
if dirname is not None:
Expand Down
Loading

0 comments on commit a828804

Please sign in to comment.