From a82880404f229ae9c3b844fb4dccff7c70232d45 Mon Sep 17 00:00:00 2001 From: LoneRanger <836253168@qq.com> Date: Fri, 22 Sep 2023 10:18:14 +0800 Subject: [PATCH] [xdoctest] reformat example code with google style in 211,281,308,323 (#57301) * fix sample codes * fix code-style * fix bug * fix bug --- paddle/fluid/pybind/imperative.cc | 103 ++- .../distributed/communication/stream/send.py | 2 +- python/paddle/distributed/io.py | 202 ++--- python/paddle/tensor/random.py | 751 +++++++++++------- 4 files changed, 600 insertions(+), 458 deletions(-) diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 295dffd53af422..80e6de07919611 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -1027,15 +1027,15 @@ void BindImperative(py::module *m_ptr) { shape with the input numpy array. Examples: - .. code-block:: python + .. code-block:: python - # required: gpu - import numpy as np - import paddle + >>> # doctest: +REQUIRES(env:GPU) + >>> import numpy as np + >>> import paddle + >>> paddle.device.set_device('gpu') - data = np.random.randint(10, size=(3, 4)) - tensor = paddle.base.core.to_uva_tensor(data) - print(tensor) + >>> data = np.random.randint(10, size=(3, 4)) + >>> tensor = paddle.base.core.to_uva_tensor(data) )DOC"); #endif @@ -1161,29 +1161,29 @@ void BindImperative(py::module *m_ptr) { should be one-dimensinal. Examples: - .. code-block:: python - - import numpy as np - import paddle - from paddle.base import core - from paddle.device import cuda - - if core.is_compiled_with_cuda(): - src = paddle.rand(shape=[100, 50, 50]) - dst = paddle.emtpy(shape=[200, 50, 50]).pin_memory() - offset = paddle.to_tensor( - np.array([0, 60], dtype="int64"), place=paddle.CPUPlace()) - count = paddle.to_tensor( - np.array([40, 60], dtype="int64"), place=paddle.CPUPlace()) - - stream = cuda.Stream() - with cuda.stream_guard(stream): - core.async_write(src, dst, offset, count) - - offset_a = paddle.gather(dst, paddle.to_tensor(np.arange(0, 40))) - offset_b = paddle.gather(dst, paddle.to_tensor(np.arange(60, 120))) - offset_array = paddle.concat([offset_a, offset_b], axis=0) - print(np.allclose(src.numpy(), offset_array.numpy())) # True + .. code-block:: python + + >>> import numpy as np + >>> import paddle + >>> from paddle.base import core + >>> from paddle.device import cuda + >>> if core.is_compiled_with_cuda(): + ... src = paddle.rand(shape=[100, 50, 50]) + ... dst = paddle.empty(shape=[200, 50, 50]).pin_memory() + ... offset = paddle.to_tensor( + ... np.array([0, 60], dtype="int64"), place=paddle.CPUPlace()) + ... count = paddle.to_tensor( + ... np.array([40, 60], dtype="int64"), place=paddle.CPUPlace()) + ... + ... stream = cuda.Stream() + ... with cuda.stream_guard(stream): + ... core.eager.async_write(src, dst, offset, count) + ... + ... offset_a = paddle.gather(dst, paddle.to_tensor(np.arange(0, 40))) + ... offset_b = paddle.gather(dst, paddle.to_tensor(np.arange(60, 120))) + ... offset_array = paddle.concat([offset_a, offset_b], axis=0) + ... print(np.allclose(src.numpy(), offset_array.numpy())) + True )DOC"); m.def( @@ -1393,28 +1393,27 @@ void BindImperative(py::module *m_ptr) { should be one-dimensinal. Examples: - .. code-block:: python - - import numpy as np - import paddle - from paddle.base import core - from paddle.device import cuda - - if core.is_compiled_with_cuda(): - src = paddle.rand(shape=[100, 50, 50], dtype="float32").pin_memory() - dst = paddle.empty(shape=[100, 50, 50], dtype="float32") - offset = paddle.to_tensor( - np.array([0, 60], dtype="int64"), place=paddle.CPUPlace()) - count = paddle.to_tensor( - np.array([40, 60], dtype="int64"), place=paddle.CPUPlace()) - buffer = paddle.empty(shape=[50, 50, 50], dtype="float32").pin_memory() - index = paddle.to_tensor( - np.array([1, 3, 5, 7, 9], dtype="int64")).cpu() - - stream = cuda.Stream() - with cuda.stream_guard(stream): - core.async_read(src, dst, index, buffer, offset, count) - + .. code-block:: python + + >>> import numpy as np + >>> import paddle + >>> from paddle.base import core + >>> from paddle.device import cuda + ... + >>> if core.is_compiled_with_cuda(): + ... src = paddle.rand(shape=[100, 50, 50], dtype="float32").pin_memory() + ... dst = paddle.empty(shape=[100, 50, 50], dtype="float32") + ... offset = paddle.to_tensor( + ... np.array([0, 60], dtype="int64"), place=paddle.CPUPlace()) + ... count = paddle.to_tensor( + ... np.array([40, 60], dtype="int64"), place=paddle.CPUPlace()) + ... buffer = paddle.empty(shape=[50, 50, 50], dtype="float32").pin_memory() + ... index = paddle.to_tensor( + ... np.array([1, 3, 5, 7, 9], dtype="int64")).cpu() + ... + ... stream = cuda.Stream() + ... with cuda.stream_guard(stream): + ... core.eager.async_read(src, dst, index, buffer, offset, count) )DOC"); #endif } diff --git a/python/paddle/distributed/communication/stream/send.py b/python/paddle/distributed/communication/stream/send.py index c04ba6b5736b71..2013c619f278fe 100644 --- a/python/paddle/distributed/communication/stream/send.py +++ b/python/paddle/distributed/communication/stream/send.py @@ -94,7 +94,7 @@ def send(tensor, dst=0, group=None, sync_op=True, use_calc_stream=False): >>> task.wait() >>> out = data.numpy() >>> print(out) - >>> # [[4, 5, 6], [4, 5, 6]] (2 GPUs) + [[4, 5, 6], [4, 5, 6]] """ if _warn_cur_rank_not_in_group(group): return diff --git a/python/paddle/distributed/io.py b/python/paddle/distributed/io.py index 69f6e42bf19543..8b104bd770a96d 100644 --- a/python/paddle/distributed/io.py +++ b/python/paddle/distributed/io.py @@ -37,16 +37,17 @@ def _load_distributed_persistables(executor, dirname, main_program=None): Examples: .. code-block:: python - import paddle - import paddle.base as base - - paddle.enable_static() - exe = base.Executor(base.CPUPlace()) - param_path = "./my_paddle_model" - t = paddle.distributed.transpiler.DistributeTranspiler() - t.transpile(...) - pserver_prog = t.get_pserver_program(...) - _load_distributed_persistables(executor=exe, dirname=param_path, main_program=pserver_prog) + >>> # doctest: +REQUIRES(env: DISTRIBUTED) + >>> import paddle + >>> import paddle.base as base + + >>> paddle.enable_static() + >>> exe = base.Executor(base.CPUPlace()) + >>> param_path = "./my_paddle_model" + >>> t = paddle.distributed.transpiler.DistributeTranspiler() + >>> t.transpile(...) + >>> pserver_prog = t.get_pserver_program(...) + >>> _load_distributed_persistables(executor=exe, dirname=param_path, main_program=pserver_prog) """ def __is_distributed_part_var(varname): @@ -160,15 +161,15 @@ def load_persistables(executor, dirname, main_program=None, filename=None): Examples: .. code-block:: python - import paddle - import paddle.base as base + >>> import paddle + >>> import paddle.base as base - paddle.enable_static() - exe = base.Executor(base.CPUPlace()) - param_path = "./my_paddle_model" - prog = base.default_main_program() - paddle.distributed.io.load_persistables(executor=exe, dirname=param_path, - main_program=None) + >>> paddle.enable_static() + >>> exe = base.Executor(base.CPUPlace()) + >>> param_path = "./my_paddle_model" + >>> prog = base.default_main_program() + >>> paddle.distributed.io.load_persistables(executor=exe, dirname=param_path, + ... main_program=None) """ if main_program and main_program._is_distributed: @@ -207,16 +208,17 @@ def _save_distributed_persistables(executor, dirname, main_program): Examples: .. code-block:: python - import paddle - import paddle - - paddle.enable_static() - exe = paddle.static.Executor(paddle.CPUPlace()) - param_path = "./my_paddle_model" - t = paddle.distributed.transpiler.DistributeTranspiler() - t.transpile(...) - train_program = t.get_trainer_program() - _save_distributed_persistables(executor=exe, dirname=param_path, main_program=train_program) + >>> # doctest: +REQUIRES(env: DISTRIBUTED) + >>> import paddle + >>> import paddle + + >>> paddle.enable_static() + >>> exe = paddle.static.Executor(paddle.CPUPlace()) + >>> param_path = "./my_paddle_model" + >>> t = paddle.distributed.transpiler.DistributeTranspiler() + >>> t.transpile(...) + >>> train_program = t.get_trainer_program() + >>> _save_distributed_persistables(executor=exe, dirname=param_path, main_program=train_program) """ def __save_remote_params(executor, dirname, remote_params_map): @@ -366,12 +368,16 @@ def is_persistable(var): Examples: .. code-block:: python - import paddle - import paddle.base as base - paddle.enable_static() - param = base.default_main_program().global_block().var('fc.b') - res = base.io.is_persistable(param) + >>> import paddle + >>> paddle.enable_static() + >>> image = paddle.static.data( + ... name='image', shape=[None, 28], dtype='float32') + >>> bias_attr = paddle.ParamAttr('fc.b') + >>> fc = paddle.static.nn.fc(image, size=10, bias_attr=bias_attr) + >>> param = paddle.static.default_main_program().global_block().var('fc.b') + >>> res = paddle.distributed.io.is_persistable(param) + """ if ( var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH @@ -420,24 +426,24 @@ def save_persistables(executor, dirname, main_program=None, filename=None): Examples: .. code-block:: python - import paddle - - paddle.enable_static() - dir_path = "./my_paddle_model" - file_name = "persistables" - image = paddle.static..data(name='img', shape=[None, 28, 28], dtype='float32') - label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - feeder = paddle.static.DataFeeder(feed_list=[image, label], place=paddle.CPUPlace()) - - predict = paddle.static.nn.fc(x=image, size=10, activation='softmax') - loss = paddle.nn.functional.cross_entropy(input=predict, label=label) - avg_loss = paddle.mean(loss) - exe = paddle.static.Executor(paddle.CPUPlace()) - exe.run(paddle.static.default_startup_program()) - paddle.distributed.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name) - # The persistables variables weights and bias in the fc layer of the network - # are going to be saved in the same file named "persistables" in the path - # "./my_paddle_model" + >>> import paddle + + >>> paddle.enable_static() + >>> dir_path = "./my_paddle_model" + >>> file_name = "persistables" + >>> image = paddle.static.data(name='img', shape=[None, 28, 28], dtype='float32') + >>> label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') + >>> feeder = paddle.base.DataFeeder(feed_list=[image, label], place=paddle.CPUPlace()) + + >>> predict = paddle.static.nn.fc(x=image, size=10, activation='softmax') + >>> loss = paddle.nn.functional.cross_entropy(input=predict, label=label) + >>> avg_loss = paddle.mean(loss) + >>> exe = paddle.static.Executor(paddle.CPUPlace()) + >>> exe.run(paddle.static.default_startup_program()) + >>> paddle.distributed.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name) + >>> # The persistables variables weights and bias in the fc layer of the network + >>> # are going to be saved in the same file named "persistables" in the path + >>> # "./my_paddle_model" """ if main_program and main_program._is_distributed: return _save_distributed_persistables( @@ -504,53 +510,53 @@ def load_inference_model_distributed( Examples: .. code-block:: python - import paddle - import paddle.base as base - import numpy as np - - paddle.enable_static() - # Build the model - main_prog = base.Program() - startup_prog = base.Program() - with base.program_guard(main_prog, startup_prog): - data = base.layers.data(name="img", shape=[64, 784], append_batch_size=False) - w = paddle.create_parameter(shape=[784, 200], dtype='float32') - b = paddle.create_parameter(shape=[200], dtype='float32') - hidden_w = paddle.matmul(x=data, y=w) - hidden_b = base.layers.elementwise_add(hidden_w, b) - place = base.CPUPlace() - exe = base.Executor(place) - exe.run(startup_prog) - - # Save the inference model - path = "./infer_model" - base.io.save_inference_model(dirname=path, feeded_var_names=['img'], - target_vars=[hidden_b], executor=exe, main_program=main_prog) - - # Demo one. Not need to set the distributed look up table, because the - # training doesn't use a distributed look up table. - [inference_program, feed_target_names, fetch_targets] = ( - paddle.distributed.io.load_inference_model_distributed(dirname=path, executor=exe)) - tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32) - results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_img}, - fetch_list=fetch_targets) - - # Demo two. If the training uses a distributed look up table, the pserver - # endpoints list should be supported when loading the inference model. - # The below is just an example. - endpoints = ["127.0.0.1:2023","127.0.0.1:2024"] - [dist_inference_program, dist_feed_target_names, dist_fetch_targets] = ( - paddle.distributed.io.load_inference_model_distributed(dirname=path, - executor=exe, - pserver_endpoints=endpoints)) - - # In this example, the inference program was saved in the file - # "./infer_model/__model__" and parameters were saved in - # separate files under the directory "./infer_model". - # By the inference program, feed_target_names and - # fetch_targets, we can use an executor to run the inference - # program for getting the inference result. + >>> import paddle + >>> import paddle.base as base + >>> import numpy as np + + >>> paddle.enable_static() + >>> # Build the model + >>> main_prog = paddle.static.Program() + >>> startup_prog = paddle.static.Program() + >>> with paddle.static.program_guard(main_prog, startup_prog): + ... data = paddle.static.data(name="img", shape=[64, 784], append_batch_size=False) + ... w = paddle.create_parameter(shape=[784, 200], dtype='float32') + ... b = paddle.create_parameter(shape=[200], dtype='float32') + ... hidden_w = paddle.matmul(x=data, y=w) + ... hidden_b = base.layers.elementwise_add(hidden_w, b) + >>> place = base.CPUPlace() + >>> exe = base.Executor(place) + >>> exe.run(startup_prog) + + >>> # Save the inference model + >>> path = "./infer_model" + >>> base.io.save_inference_model(dirname=path, feeded_var_names=['img'], + ... target_vars=[hidden_b], executor=exe, main_program=main_prog) + ... + >>> # Demo one. Not need to set the distributed look up table, because the + >>> # training doesn't use a distributed look up table. + >>> [inference_program, feed_target_names, fetch_targets] = ( + ... paddle.distributed.io.load_inference_model_distributed(dirname=path, executor=exe)) + >>> tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32) + >>> results = exe.run(inference_program, + ... feed={feed_target_names[0]: tensor_img}, + ... fetch_list=fetch_targets) + ... + >>> # Demo two. If the training uses a distributed look up table, the pserver + >>> # endpoints list should be supported when loading the inference model. + >>> # The below is just an example. + >>> endpoints = ["127.0.0.1:2023","127.0.0.1:2024"] + >>> [dist_inference_program, dist_feed_target_names, dist_fetch_targets] = ( + ... paddle.distributed.io.load_inference_model_distributed(dirname=path, + ... executor=exe, + ... pserver_endpoints=endpoints)) + ... + >>> # In this example, the inference program was saved in the file + >>> # "./infer_model/__model__" and parameters were saved in + >>> # separate files under the directory "./infer_model". + >>> # By the inference program, feed_target_names and + >>> # fetch_targets, we can use an executor to run the inference + >>> # program for getting the inference result. """ load_from_memory = False if dirname is not None: diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 4e16d8b022887f..f32978ca50706e 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -57,20 +57,26 @@ def bernoulli(x, name=None): Examples: .. code-block:: python - import paddle - - paddle.set_device('cpu') # on CPU device - paddle.seed(100) - - x = paddle.rand([2,3]) - print(x) - # [[0.55355281, 0.20714243, 0.01162981], - # [0.51577556, 0.36369765, 0.26091650]] - - out = paddle.bernoulli(x) - print(out) - # [[1., 0., 1.], - # [0., 1., 0.]] + >>> import paddle + + >>> paddle.set_device('cpu') # on CPU device + >>> paddle.seed(100) + + >>> x = paddle.rand([2,3]) + >>> print(x) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0.55355281, 0.20714243, 0.01162981], + [0.51577556, 0.36369765, 0.26091650]]) + >>> # doctest: -SKIP + + >>> out = paddle.bernoulli(x) + >>> print(out) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[1., 0., 1.], + [0., 1., 0.]]) + >>> # doctest: -SKIP """ @@ -112,15 +118,18 @@ def poisson(x, name=None): Examples: .. code-block:: python - import paddle - paddle.set_device('cpu') - paddle.seed(100) - - x = paddle.uniform([2,3], min=1.0, max=5.0) - out = paddle.poisson(x) - #[[2., 5., 0.], - # [5., 1., 3.]] - + >>> import paddle + >>> paddle.set_device('cpu') + >>> paddle.seed(100) + + >>> x = paddle.uniform([2,3], min=1.0, max=5.0) + >>> out = paddle.poisson(x) + >>> print(out) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[2., 5., 0.], + [5., 1., 3.]]) + >>> # doctest: -SKIP """ if in_dynamic_mode(): return _C_ops.poisson(x) @@ -157,29 +166,38 @@ def multinomial(x, num_samples=1, replacement=False, name=None): Examples: .. code-block:: python - import paddle - - paddle.seed(100) # on CPU device - x = paddle.rand([2,4]) - print(x) - # [[0.5535528 0.20714243 0.01162981 0.51577556] - # [0.36369765 0.2609165 0.18905126 0.5621971 ]] - - paddle.seed(200) # on CPU device - out1 = paddle.multinomial(x, num_samples=5, replacement=True) - print(out1) - # [[3 3 0 0 0] - # [3 3 3 1 0]] - - # out2 = paddle.multinomial(x, num_samples=5) - # InvalidArgumentError: When replacement is False, number of samples - # should be less than non-zero categories - - paddle.seed(300) # on CPU device - out3 = paddle.multinomial(x, num_samples=3) - print(out3) - # [[3 0 1] - # [3 1 0]] + >>> import paddle + >>> paddle.seed(100) # on CPU device + + >>> x = paddle.rand([2,4]) + >>> print(x) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 4], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0.55355281, 0.20714243, 0.01162981, 0.51577556], + [0.36369765, 0.26091650, 0.18905126, 0.56219709]]) + >>> # doctest: -SKIP + + >>> paddle.seed(200) # on CPU device + >>> out1 = paddle.multinomial(x, num_samples=5, replacement=True) + >>> print(out1) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 5], dtype=int64, place=Place(cpu), stop_gradient=True, + [[3, 3, 0, 0, 0], + [3, 3, 3, 1, 0]]) + >>> # doctest: -SKIP + + >>> # out2 = paddle.multinomial(x, num_samples=5) + >>> # InvalidArgumentError: When replacement is False, number of samples + >>> # should be less than non-zero categories + + >>> paddle.seed(300) # on CPU device + >>> out3 = paddle.multinomial(x, num_samples=3) + >>> print(out3) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, + [[3, 0, 1], + [3, 1, 0]]) + >>> # doctest: -SKIP """ @@ -254,15 +272,21 @@ def uniform_random_batch_size_like( Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor. Examples: .. code-block:: python - import paddle - import paddle.base as base - from paddle.tensor import random - paddle.enable_static() - # example 1: - input = paddle.static.data(name="input", shape=[1, 3], dtype='float32') - out_1 = random.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4] - # example 2: - out_2 = random.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3] + + >>> import paddle + >>> import paddle.base as base + >>> from paddle.tensor import random + >>> paddle.enable_static() + >>> # example 1: + >>> input = paddle.static.data(name="input", shape=[1, 3], dtype='float32') + >>> out_1 = random.uniform_random_batch_size_like(input, [2, 4]) + >>> print(out_1.shape) + [1, 4] + + >>> # example 2: + >>> out_2 = random.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) + >>> print(out_2.shape) + [2, 3] """ check_variable_and_dtype( input, @@ -395,29 +419,42 @@ def standard_normal(shape, dtype=None, name=None): Examples: .. code-block:: python - import paddle - - # example 1: attr shape is a list which doesn't contain Tensor. - out1 = paddle.standard_normal(shape=[2, 3]) - # [[-2.923464 , 0.11934398, -0.51249987], # random - # [ 0.39632758, 0.08177969, 0.2692008 ]] # random - - # example 2: attr shape is a list which contains Tensor. - dim1 = paddle.to_tensor(2, 'int64') - dim2 = paddle.to_tensor(3, 'int32') - out2 = paddle.standard_normal(shape=[dim1, dim2, 2]) - # [[[-2.8852394 , -0.25898588], # random - # [-0.47420555, 0.17683524], # random - # [-0.7989969 , 0.00754541]], # random - # [[ 0.85201347, 0.32320443], # random - # [ 1.1399018 , 0.48336947], # random - # [ 0.8086993 , 0.6868893 ]]] # random - - # example 3: attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor([2, 3]) - out3 = paddle.standard_normal(shape_tensor) - # [[-2.878077 , 0.17099959, 0.05111201] # random - # [-0.3761474, -1.044801 , 1.1870178 ]] # random + >>> import paddle + + >>> # doctest: +SKIP("Random output") + >>> # example 1: attr shape is a list which doesn't contain Tensor. + >>> out1 = paddle.standard_normal(shape=[2, 3]) + >>> print(out1) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[-0.33719197, -0.25688133, -0.42868865], + [-0.27804616, -0.25058213, -0.28209466]]) + >>> # doctest: -SKIP + + >>> # example 2: attr shape is a list which contains Tensor. + >>> dim1 = paddle.to_tensor(2, 'int64') + >>> dim2 = paddle.to_tensor(3, 'int32') + >>> out2 = paddle.standard_normal(shape=[dim1, dim2, 2]) + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3, 2], dtype=float32, place=Place(cpu), stop_gradient=True, + [[[ 0.81888396, -0.64831746], + [ 1.28911388, -1.88154876], + [-0.03271919, -0.32410008]], + [[-0.20224631, 0.46683890], + [ 1.91947734, 0.71657443], + [ 0.33410960, -0.64256823]]]) + >>> # doctest: -SKIP + + >>> # example 3: attr shape is a Tensor, the data type must be int64 or int32. + >>> shape_tensor = paddle.to_tensor([2, 3]) + >>> out3 = paddle.standard_normal(shape_tensor) + >>> print(out3) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[ 0.01182475, -0.44895259, -1.79227340], + [ 1.52022707, -0.83830303, 0.05261501]]) + >>> # doctest: -SKIP """ return gaussian(shape=shape, mean=0.0, std=1.0, dtype=dtype, name=name) @@ -448,29 +485,41 @@ def randn(shape, dtype=None, name=None): Examples: .. code-block:: python - import paddle - - # example 1: attr shape is a list which doesn't contain Tensor. - out1 = paddle.randn(shape=[2, 3]) - # [[-2.923464 , 0.11934398, -0.51249987], # random - # [ 0.39632758, 0.08177969, 0.2692008 ]] # random - - # example 2: attr shape is a list which contains Tensor. - dim1 = paddle.to_tensor(2, 'int64') - dim2 = paddle.to_tensor(3, 'int32') - out2 = paddle.randn(shape=[dim1, dim2, 2]) - # [[[-2.8852394 , -0.25898588], # random - # [-0.47420555, 0.17683524], # random - # [-0.7989969 , 0.00754541]], # random - # [[ 0.85201347, 0.32320443], # random - # [ 1.1399018 , 0.48336947], # random - # [ 0.8086993 , 0.6868893 ]]] # random - - # example 3: attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor([2, 3]) - out3 = paddle.randn(shape_tensor) - # [[-2.878077 , 0.17099959, 0.05111201] # random - # [-0.3761474, -1.044801 , 1.1870178 ]] # random + >>> import paddle + + >>> # example 1: attr shape is a list which doesn't contain Tensor. + >>> out1 = paddle.randn(shape=[2, 3]) + >>> print(out1) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[-0.29270014, -0.02925120, -1.07807338], + [ 1.19966674, -0.46673676, -0.18050613]]) + >>> # doctest: -SKIP + + >>> # example 2: attr shape is a list which contains Tensor. + >>> dim1 = paddle.to_tensor(2, 'int64') + >>> dim2 = paddle.to_tensor(3, 'int32') + >>> out2 = paddle.randn(shape=[dim1, dim2, 2]) + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3, 2], dtype=float32, place=Place(cpu), stop_gradient=True, + [[[-0.26019713, 0.54994684], + [ 0.46403214, -1.41178775], + [-0.15682915, -0.26639181]], + [[ 0.01364388, -2.81676364], + [ 0.86996621, 0.07524570], + [ 0.21443737, 0.90938759]]]) + >>> # doctest: -SKIP + + >>> # example 3: attr shape is a Tensor, the data type must be int64 or int32. + >>> shape_tensor = paddle.to_tensor([2, 3]) + >>> out3 = paddle.randn(shape_tensor) + >>> print(out3) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[ 0.57575506, -1.60349274, -0.27124876], + [ 1.08381045, 0.81270242, -0.26763600]]) + >>> # doctest: -SKIP """ return standard_normal(shape, dtype, name) @@ -509,20 +558,31 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): Examples: .. code-block:: python - import paddle - - out1 = paddle.normal(shape=[2, 3]) - # [[ 0.17501129 0.32364586 1.561118 ] # random - # [-1.7232178 1.1545963 -0.76156676]] # random - - mean_tensor = paddle.to_tensor([1.0, 2.0, 3.0]) - out2 = paddle.normal(mean=mean_tensor) - # [ 0.18644847 -1.19434458 3.93694787] # random - - std_tensor = paddle.to_tensor([1.0, 2.0, 3.0]) - out3 = paddle.normal(mean=mean_tensor, std=std_tensor) - # [1.00780561 3.78457445 5.81058198] # random - + >>> import paddle + + >>> out1 = paddle.normal(shape=[2, 3]) + >>> print(out1) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[-0.85107994, -0.85490644, -1.35941815], + [-0.55500370, 0.20964541, 2.24193954]]) + >>> # doctest: -SKIP + + >>> mean_tensor = paddle.to_tensor([1.0, 2.0, 3.0]) + >>> out2 = paddle.normal(mean=mean_tensor) + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, + [1.05411839, 3.71514320, 3.42665267]) + >>> # doctest: -SKIP + + >>> std_tensor = paddle.to_tensor([1.0, 2.0, 3.0]) + >>> out3 = paddle.normal(mean=mean_tensor, std=std_tensor) + >>> print(out3) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, + [0.48646951, 0.00815189, 3.74022293]) + >>> # doctest: -SKIP """ if not in_dynamic_mode(): check_type(mean, 'mean', (int, float, Variable), 'normal') @@ -606,31 +666,43 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): Examples: .. code-block:: python - :name: code-example1 - - import paddle - - # example 1: - # attr shape is a list which doesn't contain Tensor. - out1 = paddle.uniform(shape=[3, 4]) - # [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], # random - # [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random - # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random - - # example 2: - # attr shape is a list which contains Tensor. - dim1 = paddle.to_tensor(2, 'int64') - dim2 = paddle.to_tensor(3, 'int32') - out2 = paddle.uniform(shape=[dim1, dim2]) - # [[-0.9951253, 0.30757582, 0.9899647 ], # random - # [ 0.5864527, 0.6607096, -0.8886161]] # random - - # example 3: - # attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor([2, 3]) - out3 = paddle.uniform(shape_tensor) - # [[-0.8517412, -0.4006908, 0.2551912 ], # random - # [ 0.3364414, 0.36278176, -0.16085452]] # random + :name: code-example1 + + >>> import paddle + + >>> # example 1: + >>> # attr shape is a list which doesn't contain Tensor. + >>> out1 = paddle.uniform(shape=[3, 4]) + >>> print(out1) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True, + [[ 0.38170254, -0.47945309, 0.39794648, -0.94233936], + [-0.85296679, -0.76094693, 0.10565400, 0.59155810], + [ 0.11681318, -0.42144555, -0.81596589, 0.62113667]]) + >>> # doctest: -SKIP + + >>> # example 2: + >>> # attr shape is a list which contains Tensor. + >>> dim1 = paddle.to_tensor(2, 'int64') + >>> dim2 = paddle.to_tensor(3, 'int32') + >>> out2 = paddle.uniform(shape=[dim1, dim2]) + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[-0.00294012, -0.07210171, -0.44236207], + [ 0.70089281, 0.21500075, -0.22084606]]) + >>> # doctest: -SKIP + + >>> # example 3: + >>> # attr shape is a Tensor, the data type must be int64 or int32. + >>> shape_tensor = paddle.to_tensor([2, 3]) + >>> out3 = paddle.uniform(shape_tensor) + >>> print(out3) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[-0.60801756, 0.32448411, 0.90269291], + [-0.66421294, -0.95218551, -0.51022208]]) + >>> # doctest: -SKIP """ supported_dtypes = ['float32', 'float64', 'float16', 'uint16'] if dtype is None: @@ -705,14 +777,17 @@ def uniform_(x, min=-1.0, max=1.0, seed=0, name=None): Examples: .. code-block:: python - import paddle - # example: - x = paddle.ones(shape=[3, 4]) - x.uniform_() - print(x) - # [[ 0.84524226, 0.6921872, 0.56528175, 0.71690357], # random - # [-0.34646994, -0.45116323, -0.09902662, -0.11397249], # random - # [ 0.433519, 0.39483607, -0.8660099, 0.83664286]] # random + >>> import paddle + + >>> # example: + >>> x = paddle.ones(shape=[3, 4]) + >>> x.uniform_() + >>> # doctest: +SKIP("Random output") + Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True, + [[-0.50484276, 0.49580324, 0.33357990, -0.93924278], + [ 0.39779735, 0.87677515, -0.24377221, 0.06212139], + [-0.92499518, -0.96244860, 0.79210341, -0.78228098]]) + >>> # doctest: -SKIP """ return _C_ops.uniform_inplace_(x, min, max, seed, 0, 0, 1.0) @@ -747,38 +822,59 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): Examples: .. code-block:: python - import paddle - - # example 1: - # attr shape is a list which doesn't contain Tensor. - out1 = paddle.randint(low=-5, high=5, shape=[2, 3]) - # [0, -3, 2] # random - - # example 2: - # attr shape is a list which contains Tensor. - dim1 = paddle.to_tensor(2, 'int64') - dim2 = paddle.to_tensor(3, 'int32') - out2 = paddle.randint(low=-5, high=5, shape=[dim1, dim2]) - # [[0, -1, -3], # random - # [4, -2, 0]] # random - - # example 3: - # attr shape is a Tensor - shape_tensor = paddle.to_tensor([2, 3]) - out3 = paddle.randint(low=-5, high=5, shape=shape_tensor) - # [[ 2, -3, -1], # random - # [-3, -2, 1]]) # random - - # example 4: - # data type is int32 - out4 = paddle.randint(low=-5, high=5, shape=[3], dtype='int32') - # [-5, 4, -4] # random - - # example 5: - # Input only one parameter - # low=0, high=10, shape=[1], dtype='int64' - out5 = paddle.randint(10) - # [7] # random + >>> import paddle + + >>> # example 1: + >>> # attr shape is a list which doesn't contain Tensor. + >>> out1 = paddle.randint(low=-5, high=5, shape=[2, 3]) + >>> print(out1) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, + [[-1, 4, 4], + [-2, -5, -2]]) + >>> # doctest: -SKIP + + >>> # example 2: + >>> # attr shape is a list which contains Tensor. + >>> dim1 = paddle.to_tensor(2, 'int64') + >>> dim2 = paddle.to_tensor(3, 'int32') + >>> out2 = paddle.randint(low=-5, high=5, shape=[dim1, dim2]) + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, + [[-4, -4, 2], + [-3, -1, -5]]) + >>> # doctest: -SKIP + + >>> # example 3: + >>> # attr shape is a Tensor + >>> shape_tensor = paddle.to_tensor([2, 3]) + >>> out3 = paddle.randint(low=-5, high=5, shape=shape_tensor) + >>> print(out3) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True, + [[-1, 4, -3], + [ 1, 2, -1]]) + >>> # doctest: -SKIP + + >>> # example 4: + >>> # data type is int32 + >>> out4 = paddle.randint(low=-5, high=5, shape=[3], dtype='int32') + >>> print(out4) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[3], dtype=int32, place=Place(cpu), stop_gradient=True, + [4, 4, 0]) + >>> # doctest: -SKIP + + >>> # example 5: + >>> # Input only one parameter + >>> # low=0, high=10, shape=[1], dtype='int64' + >>> out5 = paddle.randint(10) + >>> print(out5) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1], dtype=int64, place=Place(cpu), stop_gradient=True, + [7]) + >>> # doctest: -SKIP """ if high is None: @@ -854,97 +950,115 @@ def randint_like(x, low=0, high=None, dtype=None, name=None): Examples: .. code-block:: python - import paddle - - # example 1: - # dtype is None and the dtype of x is float16 - x = paddle.zeros((1,2)).astype("float16") - out1 = paddle.randint_like(x, low=-5, high=5) - print(out1) - print(out1.dtype) - # [[0, -3]] # random - # paddle.float16 - - # example 2: - # dtype is None and the dtype of x is float32 - x = paddle.zeros((1,2)).astype("float32") - out2 = paddle.randint_like(x, low=-5, high=5) - print(out2) - print(out2.dtype) - # [[0, -3]] # random - # paddle.float32 - - # example 3: - # dtype is None and the dtype of x is float64 - x = paddle.zeros((1,2)).astype("float64") - out3 = paddle.randint_like(x, low=-5, high=5) - print(out3) - print(out3.dtype) - # [[0, -3]] # random - # paddle.float64 - - # example 4: - # dtype is None and the dtype of x is int32 - x = paddle.zeros((1,2)).astype("int32") - out4 = paddle.randint_like(x, low=-5, high=5) - print(out4) - print(out4.dtype) - # [[0, -3]] # random - # paddle.int32 - - # example 5: - # dtype is None and the dtype of x is int64 - x = paddle.zeros((1,2)).astype("int64") - out5 = paddle.randint_like(x, low=-5, high=5) - print(out5) - print(out5.dtype) - # [[0, -3]] # random - # paddle.int64 - - # example 6: - # dtype is float64 and the dtype of x is float32 - x = paddle.zeros((1,2)).astype("float32") - out6 = paddle.randint_like(x, low=-5, high=5, dtype="float64") - print(out6) - print(out6.dtype) - # [[0, -1]] # random - # paddle.float64 - - # example 7: - # dtype is bool and the dtype of x is float32 - x = paddle.zeros((1,2)).astype("float32") - out7 = paddle.randint_like(x, low=-5, high=5, dtype="bool") - print(out7) - print(out7.dtype) - # [[0, -1]] # random - # paddle.bool - - # example 8: - # dtype is int32 and the dtype of x is float32 - x = paddle.zeros((1,2)).astype("float32") - out8 = paddle.randint_like(x, low=-5, high=5, dtype="int32") - print(out8) - print(out8.dtype) - # [[0, -1]] # random - # paddle.int32 - - # example 9: - # dtype is int64 and the dtype of x is float32 - x = paddle.zeros((1,2)).astype("float32") - out9 = paddle.randint_like(x, low=-5, high=5, dtype="int64") - print(out9) - print(out9.dtype) - # [[0, -1]] # random - # paddle.int64 - - # example 10: - # dtype is int64 and the dtype of x is bool - x = paddle.zeros((1,2)).astype("bool") - out10 = paddle.randint_like(x, low=-5, high=5, dtype="int64") - print(out10) - print(out10.dtype) - # [[0, -1]] # random - # paddle.int64 + >>> import paddle + + >>> # example 1: + >>> # dtype is None and the dtype of x is float32 + >>> x = paddle.zeros((1,2)).astype("float32") + >>> out2 = paddle.randint_like(x, low=-5, high=5) + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0., 0.]]) + >>> # doctest: -SKIP + >>> print(out2.dtype) + paddle.float32 + + >>> # example 2: + >>> # dtype is None and the dtype of x is float64 + >>> x = paddle.zeros((1,2)).astype("float64") + >>> out2 = paddle.randint_like(x, low=-5, high=5) + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=float64, place=Place(cpu), stop_gradient=True, + [[ 4., -5.]]) + >>> # doctest: -SKIP + >>> print(out2.dtype) + paddle.float64 + + >>> # example 3: + >>> # dtype is None and the dtype of x is int32 + >>> x = paddle.zeros((1,2)).astype("int32") + >>> out3 = paddle.randint_like(x, low=-5, high=5) + >>> print(out3) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=int32, place=Place(cpu), stop_gradient=True, + [[ 0, -4]]) + >>> # doctest: -SKIP + >>> print(out3.dtype) + paddle.int32 + + >>> # example 4: + >>> # dtype is None and the dtype of x is int64 + >>> x = paddle.zeros((1,2)).astype("int64") + >>> out4 = paddle.randint_like(x, low=-5, high=5) + >>> print(out4) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=int64, place=Place(cpu), stop_gradient=True, + [[ 4, -3]]) + >>> # doctest: -SKIP + >>> print(out4.dtype) + paddle.int64 + + >>> # example 5: + >>> # dtype is float64 and the dtype of x is float32 + >>> x = paddle.zeros((1,2)).astype("float32") + >>> out5 = paddle.randint_like(x, low=-5, high=5, dtype="float64") + >>> print(out5) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=float64, place=Place(cpu), stop_gradient=True, + [[3., 1.]]) + >>> # doctest: -SKIP + >>> print(out5.dtype) + paddle.float64 + + >>> # example 6: + >>> # dtype is bool and the dtype of x is float32 + >>> x = paddle.zeros((1,2)).astype("float32") + >>> out6 = paddle.randint_like(x, low=-5, high=5, dtype="bool") + >>> print(out6) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=bool, place=Place(cpu), stop_gradient=True, + [[False, True ]]) + >>> # doctest: -SKIP + >>> print(out6.dtype) + paddle.bool + + >>> # example 7: + >>> # dtype is int32 and the dtype of x is float32 + >>> x = paddle.zeros((1,2)).astype("float32") + >>> out7 = paddle.randint_like(x, low=-5, high=5, dtype="int32") + >>> print(out7) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=int32, place=Place(cpu), stop_gradient=True, + [[-2, -2]]) + >>> # doctest: -SKIP + >>> print(out7.dtype) + paddle.int32 + + >>> # example 8: + >>> # dtype is int64 and the dtype of x is float32 + >>> x = paddle.zeros((1,2)).astype("float32") + >>> out8 = paddle.randint_like(x, low=-5, high=5, dtype="int64") + >>> print(out8) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=int64, place=Place(cpu), stop_gradient=True, + [[-5, 4]]) + >>> # doctest: -SKIP + >>> print(out8.dtype) + paddle.int64 + + >>> # example 9: + >>> # dtype is int64 and the dtype of x is bool + >>> x = paddle.zeros((1,2)).astype("bool") + >>> out9 = paddle.randint_like(x, low=-5, high=5, dtype="int64") + >>> print(out9) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[1, 2], dtype=int64, place=Place(cpu), stop_gradient=True, + [[ 1, -2]]) + >>> # doctest: -SKIP + >>> print(out9.dtype) + paddle.int64 """ if high is None: @@ -1034,13 +1148,21 @@ def randperm(n, dtype="int64", name=None): Examples: .. code-block:: python - import paddle + >>> import paddle - out1 = paddle.randperm(5) - # [4, 1, 2, 3, 0] # random + >>> out1 = paddle.randperm(5) + >>> print(out1) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[5], dtype=int64, place=Place(cpu), stop_gradient=True, + [3, 0, 1, 4, 2]) + >>> #doctest: -SKIP - out2 = paddle.randperm(7, 'int32') - # [1, 6, 2, 0, 4, 3, 5] # random + >>> out2 = paddle.randperm(7, 'int32') + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[7], dtype=int32, place=Place(cpu), stop_gradient=True, + [3, 2, 0, 6, 5, 4, 1]) + >>> #doctest: -SKIP """ if not isinstance(dtype, core.VarDesc.VarType): @@ -1091,29 +1213,41 @@ def rand(shape, dtype=None, name=None): Examples: .. code-block:: python - import paddle - - # example 1: attr shape is a list which doesn't contain Tensor. - out1 = paddle.rand(shape=[2, 3]) - # [[0.451152 , 0.55825245, 0.403311 ], # random - # [0.22550228, 0.22106001, 0.7877319 ]] # random - - # example 2: attr shape is a list which contains Tensor. - dim1 = paddle.to_tensor(2, 'int64') - dim2 = paddle.to_tensor(3, 'int32') - out2 = paddle.rand(shape=[dim1, dim2, 2]) - # [[[0.8879919 , 0.25788337], # random - # [0.28826773, 0.9712097 ], # random - # [0.26438272, 0.01796806]], # random - # [[0.33633623, 0.28654453], # random - # [0.79109055, 0.7305809 ], # random - # [0.870881 , 0.2984597 ]]] # random - - # example 3: attr shape is a Tensor, the data type must be int64 or int32. - shape_tensor = paddle.to_tensor([2, 3]) - out3 = paddle.rand(shape_tensor) - # [[0.22920267, 0.841956 , 0.05981819], # random - # [0.4836288 , 0.24573246, 0.7516129 ]] # random + >>> import paddle + + >>> # example 1: attr shape is a list which doesn't contain Tensor. + >>> out1 = paddle.rand(shape=[2, 3]) + >>> print(out1) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0.68532258, 0.69431782, 0.44835982], + [0.13204314, 0.48128194, 0.36574543]]) + >>> # doctest: -SKIP + + >>> # example 2: attr shape is a list which contains Tensor. + >>> dim1 = paddle.to_tensor(2, 'int64') + >>> dim2 = paddle.to_tensor(3, 'int32') + >>> out2 = paddle.rand(shape=[dim1, dim2, 2]) + >>> print(out2) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3, 2], dtype=float32, place=Place(cpu), stop_gradient=True, + [[[0.62102991, 0.45255184], + [0.81386960, 0.22463219], + [0.87946558, 0.28097662]], + [[0.36565998, 0.63203937], + [0.58640617, 0.92696166], + [0.85060406, 0.38138932]]]) + >>> # doctest: -SKIP + + >>> # example 3: attr shape is a Tensor, the data type must be int64 or int32. + >>> shape_tensor = paddle.to_tensor([2, 3]) + >>> out3 = paddle.rand(shape_tensor) + >>> print(out3) + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0.77650446, 0.12870903, 0.05153799], + [0.27029657, 0.03963696, 0.42487794]]) + >>> # doctest: -SKIP """ return uniform(shape, dtype, min=0.0, max=1.0, name=name) @@ -1140,14 +1274,17 @@ def exponential_(x, lam=1.0, name=None): Examples: .. code-block:: python - import paddle - paddle.set_device('cpu') - paddle.seed(100) - - x = paddle.empty([2,3]) - x.exponential_() - # [[0.80643415, 0.23211166, 0.01169797], - # [0.72520673, 0.45208144, 0.30234432]] + >>> import paddle + >>> paddle.set_device('cpu') + >>> paddle.seed(100) + + >>> x = paddle.empty([2,3]) + >>> x.exponential_() + >>> # doctest: +SKIP("Random output") + Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True, + [[0.80643415, 0.23211166, 0.01169797], + [0.72520679, 0.45208144, 0.30234432]]) + >>> # doctest: -SKIP """ if in_dynamic_mode():