From 95da913fe08be432a4e0c244741e3ebaa964fc53 Mon Sep 17 00:00:00 2001 From: Sekiro-x <76903040+Sekiro-x@users.noreply.github.com> Date: Wed, 20 Sep 2023 14:20:19 +0800 Subject: [PATCH] [Docathon] Fix NO.6 NO.21 API Label (#57512) --- python/paddle/incubate/optimizer/lars_momentum.py | 2 +- python/paddle/incubate/optimizer/lbfgs.py | 2 +- python/paddle/nn/layer/rnn.py | 6 +++--- python/paddle/optimizer/adadelta.py | 2 +- python/paddle/optimizer/adam.py | 2 +- python/paddle/optimizer/adamax.py | 2 +- python/paddle/optimizer/lbfgs.py | 2 +- python/paddle/optimizer/momentum.py | 2 +- python/paddle/optimizer/optimizer.py | 2 +- python/paddle/optimizer/rmsprop.py | 2 +- python/paddle/optimizer/sgd.py | 2 +- python/paddle/static/nn/common.py | 4 ++-- python/paddle/static/nn/loss.py | 4 ++-- python/paddle/static/nn/sequence_lod.py | 4 ++-- 14 files changed, 19 insertions(+), 19 deletions(-) diff --git a/python/paddle/incubate/optimizer/lars_momentum.py b/python/paddle/incubate/optimizer/lars_momentum.py index 5df9160d3d11b9..1c6ecc263e6f6b 100644 --- a/python/paddle/incubate/optimizer/lars_momentum.py +++ b/python/paddle/incubate/optimizer/lars_momentum.py @@ -45,7 +45,7 @@ class LarsMomentumOptimizer(Optimizer): The default value is None in static graph mode, at this time all parameters will be updated. regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \ :ref:`api_base_regularizer_L1Decay` , :ref:`api_base_regularizer_L2Decay` . If a parameter has set \ - regularizer using :ref:`api_base_ParamAttr` already, the regularization setting here in optimizer will be \ + regularizer using :ref:`api_paddle_ParamAttr` already, the regularization setting here in optimizer will be \ ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of diff --git a/python/paddle/incubate/optimizer/lbfgs.py b/python/paddle/incubate/optimizer/lbfgs.py index 2aa4ae1e21b7b4..137b8eb7ccbdc5 100644 --- a/python/paddle/incubate/optimizer/lbfgs.py +++ b/python/paddle/incubate/optimizer/lbfgs.py @@ -58,7 +58,7 @@ class LBFGS(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It canbe a float value as coeff of L2 regularization or \ :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 3f5adf54d3968c..1cafb1cf1b614d 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -740,7 +740,7 @@ class SimpleRNNCell(RNNCellBase): - **states** (Tensor): shape `[batch_size, hidden_size]`, the new hidden state, corresponding to :math:`h_{t}` in the formula. Notes: - All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more information about parameter initialization, please refer to :ref:`api_base_ParamAttr`. + All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more information about parameter initialization, please refer to :ref:`api_paddle_ParamAttr`. Examples: @@ -893,7 +893,7 @@ class LSTMCell(RNNCellBase): Notes: All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more - information about parameter initialization, please refer to :ref:`api_base_ParamAttr`. + information about parameter initialization, please refer to :ref:`api_paddle_ParamAttr`. Examples: @@ -1054,7 +1054,7 @@ class GRUCell(RNNCellBase): Notes: All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more - information about parameter initialization, please refer to s:ref:`api_base_ParamAttr`. + information about parameter initialization, please refer to s:ref:`api_paddle_ParamAttr`. Examples: diff --git a/python/paddle/optimizer/adadelta.py b/python/paddle/optimizer/adadelta.py index 3523ece1e831fe..ae8e5d2dc6b26b 100644 --- a/python/paddle/optimizer/adadelta.py +++ b/python/paddle/optimizer/adadelta.py @@ -55,7 +55,7 @@ class Adadelta(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It canbe a float value as coeff of L2 regularization or \ :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. diff --git a/python/paddle/optimizer/adam.py b/python/paddle/optimizer/adam.py index a096c8afd12b97..12e932c6fb2180 100644 --- a/python/paddle/optimizer/adam.py +++ b/python/paddle/optimizer/adam.py @@ -73,7 +73,7 @@ class Adam(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. It canbe a float value as coeff of L2 regularization or :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, the regularization setting here in optimizer will be ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. Default None, meaning there is no regularization. diff --git a/python/paddle/optimizer/adamax.py b/python/paddle/optimizer/adamax.py index ff7ce4a2322246..354c5a9bb531a2 100644 --- a/python/paddle/optimizer/adamax.py +++ b/python/paddle/optimizer/adamax.py @@ -68,7 +68,7 @@ class Adamax(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. It can be a float value as coeff of L2 regularization or :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, the regularization setting here in optimizer will be ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. Default None, meaning there is no regularization. diff --git a/python/paddle/optimizer/lbfgs.py b/python/paddle/optimizer/lbfgs.py index f552f2d67ab743..c2f9cb6b522630 100644 --- a/python/paddle/optimizer/lbfgs.py +++ b/python/paddle/optimizer/lbfgs.py @@ -340,7 +340,7 @@ class LBFGS(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It canbe a float value as coeff of L2 regularization or \ :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. diff --git a/python/paddle/optimizer/momentum.py b/python/paddle/optimizer/momentum.py index 8269663f1c500c..5dd0a424778bb3 100644 --- a/python/paddle/optimizer/momentum.py +++ b/python/paddle/optimizer/momentum.py @@ -60,7 +60,7 @@ class Momentum(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It can be a float value as coeff of L2 regularization or \ :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index ca30dc3c17024e..45a1069750bd98 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -109,7 +109,7 @@ class Optimizer: weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It canbe a float value as coeff of L2 regularization or \ :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. diff --git a/python/paddle/optimizer/rmsprop.py b/python/paddle/optimizer/rmsprop.py index 13537b7683387a..07bb27b46e6b3b 100644 --- a/python/paddle/optimizer/rmsprop.py +++ b/python/paddle/optimizer/rmsprop.py @@ -92,7 +92,7 @@ class RMSProp(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. It can be a float value as coeff of L2 regularization or \ :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, the regularization setting here in optimizer will be ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. Default None, meaning there is no regularization. diff --git a/python/paddle/optimizer/sgd.py b/python/paddle/optimizer/sgd.py index b2773f51908c19..e0edcbfc0e395e 100644 --- a/python/paddle/optimizer/sgd.py +++ b/python/paddle/optimizer/sgd.py @@ -41,7 +41,7 @@ class SGD(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It can be a float value as coeff of L2 regularization or \ :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ + If a parameter has set regularizer using :ref:`api_paddle_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index a5743cc2bce524..e0c189b11e356e 100644 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -2566,10 +2566,10 @@ def bilinear_tensor_product( :ref:`api_guide_Name` . Usually name is no need to set and None by default. param_attr (ParamAttr|None): To specify the weight parameter attribute. Default: None, which means the default weight parameter property is - used. See usage for details in :ref:`api_base_ParamAttr` . + used. See usage for details in :ref:`api_paddle_ParamAttr` . bias_attr (ParamAttr|None): To specify the bias parameter attribute. Default: None, which means the default bias parameter property is - used. See usage for details in :ref:`api_base_ParamAttr` . + used. See usage for details in :ref:`api_paddle_ParamAttr` . Returns: Tensor, A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**. diff --git a/python/paddle/static/nn/loss.py b/python/paddle/static/nn/loss.py index 7f1ef25622612c..870e2144fa86c8 100644 --- a/python/paddle/static/nn/loss.py +++ b/python/paddle/static/nn/loss.py @@ -62,10 +62,10 @@ def nce( sample is 1.0. param_attr (ParamAttr|None): To specify the weight parameter attribute. Default: None, which means the default weight parameter property is - used. See usage for details in :ref:`api_base_ParamAttr` . + used. See usage for details in :ref:`api_paddle_ParamAttr` . bias_attr (ParamAttr|None): To specify the bias parameter attribute. Default: None, which means the default bias parameter property is - used. See usage for details in :ref:`api_base_ParamAttr` . + used. See usage for details in :ref:`api_paddle_ParamAttr` . num_neg_samples (int): ${num_neg_samples_comment}. name(str|None): For detailed information, please refer to :ref:`api_guide_Name` . Usually name is no need to set and None by default. diff --git a/python/paddle/static/nn/sequence_lod.py b/python/paddle/static/nn/sequence_lod.py index eb4329d22086ab..1d1624c76c425c 100644 --- a/python/paddle/static/nn/sequence_lod.py +++ b/python/paddle/static/nn/sequence_lod.py @@ -108,9 +108,9 @@ def sequence_conv( on both sides of the sequence. If set 0, the length of :math:`filter\_size - 1` data is padded at the end of each input sequence. Default: None. bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the - default bias parameter property is used. See usage for details in :ref:`api_base_ParamAttr` . + default bias parameter property is used. See usage for details in :ref:`api_paddle_ParamAttr` . param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the - default weight parameter property is used. See usage for details in :ref:`api_base_ParamAttr` . + default weight parameter property is used. See usage for details in :ref:`api_paddle_ParamAttr` . act (str): Activation to be applied to the output of this layer, such as tanh, softmax, sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None. name (str, optional): The default value is None. Normally there is no need for user to set this property.