Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable eager mode on xpu #46227

Merged
merged 6 commits into from
Sep 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,33 @@ static PyObject* tensor_method_numpy(TensorObject* self,
kind);
}
#endif
#if defined(PADDLE_WITH_XPU)
} else if (self->tensor.is_xpu()) {
platform::CPUPlace place;
if (self->tensor.is_selected_rows()) {
VLOG(6) << "Getting SelectedRows's numpy value";
auto* selected_rows =
static_cast<phi::SelectedRows*>(self->tensor.impl().get());
auto* dense_tensor = static_cast<paddle::framework::LoDTensor*>(
selected_rows->mutable_value());
paddle::memory::Copy(
place,
reinterpret_cast<void*>(pybind11::detail::array_proxy(array)->data),
dense_tensor->place(),
dense_tensor->data(),
sizeof_dtype * numel);
} else {
VLOG(6) << "Getting DenseTensor's numpy value";
auto dense_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(self->tensor.impl());
paddle::memory::Copy(
place,
reinterpret_cast<void*>(pybind11::detail::array_proxy(array)->data),
dense_tensor->place(),
dense_tensor->data(),
sizeof_dtype * numel);
}
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
} else if (self->tensor.is_custom_device()) {
if (self->tensor.is_selected_rows()) {
Expand Down
8 changes: 8 additions & 0 deletions paddle/phi/api/include/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,14 @@ class PADDLE_API Tensor final {
*/
bool is_gpu_pinned() const;

/**
* @brief Determine whether the tensor device is XPU
*
* @return true
* @return false
*/
bool is_xpu() const;

/**
* @brief Determine whether the tensor device is CustomDevice
*
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/api/lib/tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,8 @@ bool Tensor::is_gpu_pinned() const {
return paddle::platform::is_cuda_pinned_place(place());
}

bool Tensor::is_xpu() const { return paddle::platform::is_xpu_place(place()); }

bool Tensor::is_custom_device() const {
return paddle::platform::is_custom_place(place());
}
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/fluid/framework.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,9 +181,9 @@ def _fallback_legacy_dygraph():
global _in_eager_mode_
global _is_first_import_
need_fallback = False
# Only enable eager on CPU/GPU
is_not_support = core.is_compiled_with_xpu() or core.is_compiled_with_npu(
) or core.is_compiled_with_ipu() or core.is_compiled_with_mlu()
# Only enable eager on CPU/GPU/XPU
is_not_support = core.is_compiled_with_npu() or core.is_compiled_with_ipu(
) or core.is_compiled_with_mlu()

if _in_eager_mode_ and is_not_support:
# switch into legacy dygraph mode
Expand Down Expand Up @@ -245,8 +245,8 @@ def _non_static_mode():

@signature_safe_contextmanager
def _test_eager_guard(place=None):
# FIXME(dev): We haven't fully verified eager mode on XPU/NPU et.al but
# only GPU/CPU. Remove this after we improve this feature.
# FIXME(dev): We haven't fully verified eager mode on NPU et.al but
# only GPU/CPU/XPU. Remove this after we improve this feature.
already_fallback = _fallback_legacy_dygraph()
if not already_fallback:
_disable_legacy_dygraph()
Expand Down
18 changes: 11 additions & 7 deletions python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle import _legacy_C_ops
from op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
Expand Down Expand Up @@ -185,7 +186,8 @@ def test_backward_downscale_in_infer(self):

input = paddle.uniform([40, 40], dtype=self.in_type)
input.stop_gradient = False
out, mask = core.ops.dropout(input, 'dropout_prob', 0.5)
out, mask = _legacy_C_ops.dropout(input, 'dropout_prob',
0.5)
out.backward()

np.testing.assert_allclose(
Expand All @@ -199,9 +201,10 @@ def test_backward_upscale_train(self):
prob = 0.5
input = paddle.uniform([40, 40], dtype=self.in_type)
input.stop_gradient = False
out, mask = core.ops.dropout(input, 'dropout_prob', prob,
"dropout_implementation",
"upscale_in_train")
out, mask = _legacy_C_ops.dropout(input, 'dropout_prob',
prob,
"dropout_implementation",
"upscale_in_train")
out.backward()

np.testing.assert_allclose(
Expand All @@ -215,9 +218,10 @@ def test_backward_upscale_train_2(self):
prob = 0.3
input = paddle.uniform([40, 40], dtype=self.in_type)
input.stop_gradient = False
out, mask = core.ops.dropout(input, 'dropout_prob', prob,
"dropout_implementation",
"upscale_in_train")
out, mask = _legacy_C_ops.dropout(input, 'dropout_prob',
prob,
"dropout_implementation",
"upscale_in_train")
out.backward()

np.testing.assert_allclose(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import numpy as np
import paddle
import paddle.fluid.core as core
from paddle import _legacy_C_ops
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper

Expand Down Expand Up @@ -251,15 +252,15 @@ def test_case_act(self):
x.stop_gradient = False
y.stop_gradient = False

out1 = core.ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False,
'trans_y', False, 'activation',
'none')
out2 = core.ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False,
'trans_y', False, 'activation',
'relu')
out3 = core.ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False,
'trans_y', False, 'activation',
'gelu')
out1 = _legacy_C_ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False,
'trans_y', False, 'activation',
'none')
out2 = _legacy_C_ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False,
'trans_y', False, 'activation',
'relu')
out3 = _legacy_C_ops.fused_gemm_epilogue(x, y, bias, 'trans_x', False,
'trans_y', False, 'activation',
'gelu')

out_np1 = get_output(x_np, y_np, bias_np, 'none')
out_np2 = get_output(x_np, y_np, bias_np, 'relu')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import paddle.fluid as fluid
import paddle.nn as nn
from paddle.fluid import core
from paddle import _legacy_C_ops
from paddle.incubate.xpu.resnet_block import ResNetBasicBlock
from paddle.fluid.framework import default_main_program
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/incubate/xpu/resnet_block.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def resnet_basic_block(x,
trainable_statistics=False,
find_conv_max=True):

if fluid.framework.in_dygraph_mode():
if fluid.framework._non_static_mode():
attrs = ('stride1', stride1, 'stride2', stride2, 'stride3', stride3,
'padding1', padding1, 'padding2', padding2, 'padding3',
padding3, 'dilation1', dilation1, 'dilation2', dilation2,
Expand All @@ -83,7 +83,7 @@ def resnet_basic_block(x,
find_conv_max)

out, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _ = \
getattr(_C_ops, "resnet_basic_block")(x, filter1, scale1, bias1, mean1, var1, filter2, scale2, bias2, mean2, var2, \
_legacy_C_ops.resnet_basic_block(x, filter1, scale1, bias1, mean1, var1, filter2, scale2, bias2, mean2, var2, \
filter3, scale3, bias3, mean3, var3, mean1, var1, mean2, var2, mean3, var3, *attrs)
return out
helper = LayerHelper('resnet_basic_block', **locals())
Expand Down