Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

migrate code example and doc #27627

Merged
merged 4 commits into from
Sep 29, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 21 additions & 74 deletions python/paddle/fluid/layers/loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -302,9 +302,6 @@ def cross_entropy2(input, label, ignore_index=kIgnoreIndex):

def square_error_cost(input, label):
"""
:alias_main: paddle.nn.functional.square_error_cost
:alias: paddle.nn.functional.square_error_cost,paddle.nn.functional.loss.square_error_cost
:old_api: paddle.fluid.layers.square_error_cost

This op accepts input predictions and target label and returns the
squared error cost.
Expand All @@ -316,49 +313,26 @@ def square_error_cost(input, label):
Out = (input - label)^2

Parameters:
input (Variable): Input tensor, the data type should be float32.
label (Variable): Label tensor, the data type should be float32.
input (Tensor): Input tensor, the data type should be float32.
label (Tensor): Label tensor, the data type should be float32.

Returns:
The tensor variable storing the element-wise squared error \
The tensor storing the element-wise squared error \
difference between input and label.

Return type: Variable.
Return type: Tensor.

Examples:

.. code-block:: python

# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[1])
label = fluid.data(name="label", shape=[1])
output = fluid.layers.square_error_cost(input,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)

print(output_data)
# [array([0.04000002], dtype=float32)]

# imperative mode
import paddle.fluid.dygraph as dg

with dg.guard(place) as g:
input = dg.to_variable(input_data)
label = dg.to_variable(label_data)
output = fluid.layers.square_error_cost(input, label)
print(output.numpy())

# [0.04000002]
import paddle
input = paddle.to_tensor([1.1, 1.9])
label = paddle.to_tensor([1.0, 2.0])
output = paddle.nn.functional.square_error_cost(input, label)
print(output.numpy())
# [0.01, 0.01]

"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'square_error_cost')
Expand Down Expand Up @@ -1777,9 +1751,6 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002):

def mse_loss(input, label):
"""
:alias_main: paddle.nn.functional.mse_loss
:alias: paddle.nn.functional.mse_loss,paddle.nn.functional.loss.mse_loss
:old_api: paddle.fluid.layers.mse_loss

This op accepts input predications and target label and returns the mean square error.

Expand All @@ -1790,47 +1761,23 @@ def mse_loss(input, label):
Out = MEAN((input - label)^2)

Parameters:
input (Variable): Input tensor, the data type should be float32.
label (Variable): Label tensor, the data type should be float32.
input (Tensor): Input tensor, the data type should be float32.
label (Tensor): Label tensor, the data type should be float32.

Returns:
Variable: The tensor variable storing the mean square error difference of input and label.
Tensor: The tensor storing the mean square error difference of input and label.

Return type: Variable.
Return type: Tensor.

Examples:
.. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[1])
label = fluid.data(name="label", shape=[1])
output = fluid.layers.mse_loss(input,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)

print(output_data)
# [array([0.04000002], dtype=float32)]

# imperative mode
import paddle.fluid.dygraph as dg

with dg.guard(place) as g:
input = dg.to_variable(input_data)
label = dg.to_variable(label_data)
output = fluid.layers.mse_loss(input, label)
print(output.numpy())

# [0.04000002]

import paddle
input = paddle.to_tensor([1.1, 1.9])
label = paddle.to_tensor([1.0, 2.0])
output = paddle.fluid.layers.mse_loss(input, label)
print(output.numpy())
# [0.01]
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'], 'mse_loss')
check_variable_and_dtype(label, "label", ['float32', 'float64'], 'mse_loss')
Expand Down
46 changes: 18 additions & 28 deletions python/paddle/fluid/layers/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2306,17 +2306,14 @@ def is_list_or_tuple(ele):
return pool_out


@deprecated(since="2.0.0", update_to="paddle.nn.functional.adaptive_pool2d")
@deprecated(since="2.0.0")
@templatedoc(op_type="pool2d")
def adaptive_pool2d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
"""
:alias_main: paddle.nn.functional.adaptive_pool2d
:alias: paddle.nn.functional.adaptive_pool2d,paddle.nn.functional.pooling.adaptive_pool2d
:old_api: paddle.fluid.layers.adaptive_pool2d

This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch
Expand All @@ -2340,7 +2337,7 @@ def adaptive_pool2d(input,
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}

Args:
input (Variable): The input tensor of pooling operator, which is a 4-D tensor
input (Tensor): The input tensor of pooling operator, which is a 4-D tensor
with shape [N, C, H, W]. The format of input tensor is NCHW,
where N is batch size, C is the number of channels, H is the
height of the feature, and W is the width of the feature.
Expand All @@ -2355,7 +2352,7 @@ def adaptive_pool2d(input,
None by default.

Returns:
Variable: The output tensor of adaptive pooling result. The data type is same
Tensor: The output tensor of adaptive pooling result. The data type is same
as input tensor.

Raises:
Expand All @@ -2381,9 +2378,9 @@ def adaptive_pool2d(input,
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool2d(
import paddle
data = paddle.rand(shape=[1,3,32,32])
pool_out = paddle.fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='avg')
Expand All @@ -2403,9 +2400,9 @@ def adaptive_pool2d(input,
# wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
#
import paddle.fluid as fluid
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool2d(
import paddle
data = paddle.rand(shape=[1,3,32,32])
pool_out = paddle.fluid.layers.adaptive_pool2d(
input=data,
pool_size=[3, 3],
pool_type='max')
Expand Down Expand Up @@ -2454,17 +2451,14 @@ def adaptive_pool2d(input,
return (pool_out, mask) if require_index else pool_out


@deprecated(since="2.0.0", update_to="paddle.nn.functional.adaptive_pool3d")
@deprecated(since="2.0.0")
@templatedoc(op_type="pool3d")
def adaptive_pool3d(input,
pool_size,
pool_type="max",
require_index=False,
name=None):
"""
:alias_main: paddle.nn.functional.adaptive_pool3d
:alias: paddle.nn.functional.adaptive_pool3d,paddle.nn.functional.pooling.adaptive_pool3d
:old_api: paddle.fluid.layers.adaptive_pool3d

This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch
Expand Down Expand Up @@ -2493,7 +2487,7 @@ def adaptive_pool3d(input,
Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}

Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
input (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is NCDHW, where
N is batch size, C is the number of channels, D is the depth of the feature,
H is the height of the feature, and W is the width of the feature.
Expand All @@ -2508,7 +2502,7 @@ def adaptive_pool3d(input,
None by default.

Returns:
Variable: The output tensor of adaptive pooling result. The data type is same as input tensor.
Tensor: The output tensor of adaptive pooling result. The data type is same as input tensor.

Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'.
Expand Down Expand Up @@ -2538,11 +2532,9 @@ def adaptive_pool3d(input,
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#

import paddle.fluid as fluid

data = fluid.data(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
import paddle
data = paddle.rand(shape=[1,3,32,32,32])
pool_out = paddle.fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='avg')
Expand All @@ -2567,11 +2559,9 @@ def adaptive_pool3d(input,
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
#

import paddle.fluid as fluid

data = fluid.data(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
import paddle
data = paddle.rand(shape=[1,3,32,32,32])
pool_out = paddle.fluid.layers.adaptive_pool3d(
input=data,
pool_size=[3, 3, 3],
pool_type='max')
Expand Down
4 changes: 0 additions & 4 deletions python/paddle/nn/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,16 +174,12 @@
from .pooling import pool2d #DEFINE_ALIAS
from .pooling import pool3d #DEFINE_ALIAS
from .pooling import avg_pool1d #DEFINE_ALIAS
from .pooling import adaptive_pool2d #DEFINE_ALIAS
from .pooling import adaptive_pool3d #DEFINE_ALIAS
from .pooling import avg_pool2d #DEFINE_ALIAS
from .pooling import avg_pool3d #DEFINE_ALIAS
from .pooling import max_pool1d #DEFINE_ALIAS
from .pooling import max_pool2d #DEFINE_ALIAS
from .pooling import max_pool3d #DEFINE_ALIAS

from .pooling import adaptive_pool2d #DEFINE_ALIAS
from .pooling import adaptive_pool3d #DEFINE_ALIAS
from .pooling import adaptive_max_pool1d #DEFINE_ALIAS
from .pooling import adaptive_max_pool2d #DEFINE_ALIAS
from .pooling import adaptive_max_pool3d #DEFINE_ALIAS
Expand Down
4 changes: 0 additions & 4 deletions python/paddle/nn/functional/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@
# TODO: define pooling functions
from ...fluid.layers import pool2d #DEFINE_ALIAS
from ...fluid.layers import pool3d #DEFINE_ALIAS
from ...fluid.layers import adaptive_pool2d #DEFINE_ALIAS
from ...fluid.layers import adaptive_pool3d #DEFINE_ALIAS
from ...fluid import core
from ...fluid.framework import in_dygraph_mode
from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze
Expand All @@ -25,8 +23,6 @@
__all__ = [
'pool2d',
'pool3d',
'adaptive_pool2d',
'adaptive_pool3d',
'avg_pool1d',
'avg_pool2d',
'avg_pool3d',
Expand Down