Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

correct sync behavior for XPU distributed training #47882

Merged
merged 3 commits into from
Nov 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 3 additions & 15 deletions paddle/fluid/distributed/collective/BKCLTools.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,23 +77,11 @@ class XPUEventManager {
device_index_));

platform::XPUDeviceGuard guard(device_index_);
PADDLE_ENFORCE_XPU_SUCCESS(xpu_event_record(event_, ctx.stream()));
// TODO(zhangxiaoci) temporary solution: xpu::event seems buggy
PADDLE_ENFORCE_XPU_SUCCESS(xpu_wait(ctx.stream()));
}

void Block(const XPUContext& ctx) const {
if (is_created_) {
auto device_index = ctx.GetPlace().device;
PADDLE_ENFORCE_EQ(device_index,
device_index_,
platform::errors::PreconditionNotMet(
"XPUContext's device %d does not match"
"Event's device %d",
device_index,
device_index_));
platform::XPUDeviceGuard guard(device_index_);
PADDLE_ENFORCE_XPU_SUCCESS(xpu_stream_wait_event(ctx.stream(), event_));
}
}
void Block(const XPUContext& ctx) const {}

private:
bool is_created_{false};
Expand Down
6 changes: 6 additions & 0 deletions paddle/fluid/distributed/collective/ProcessGroupBKCL.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,14 @@ bool ProcessGroupBKCL::BKCLTask::Wait(std::chrono::milliseconds timeout) {

if (barrier_) {
// If we use the work to do barrier, we should block cpu

// TODO(zhangxiaoci) There is no such function that can sync entire device
// for xpu (for now), so all we can do is sync whatever stream that we know
// and hope for the best. Note that for correctness the communication stream
// needs to be in sync mode.
platform::XPUDeviceGuard guard(place_.GetDeviceId());
xpu_wait();
calc_ctx->Wait();
}
return true;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/backends/xpu/xpu_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ struct XPUContext::Impl {
// manually destroy XPUStream here until xpu::api integrates this work
// into Context dtor
xpu_wait(context_->xpu_stream);
PADDLE_ENFORCE_XPU_SUCCESS(xpu_stream_destroy(context_->xpu_stream));
xpu_stream_destroy(context_->xpu_stream);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

为什么去掉返回值判断?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

编译器会complain在析构函数里抛异常,(这里其实有个try catch所以运行时倒不会出现程序异常终止,但还是可能会导致内存泄露)
最佳方法还是直接不去判断,如果xpu_stream_destroy出错可以依赖runtime的log

context_->xpu_stream = nullptr;
xpu::destroy_context(context_);
context_ = nullptr;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"

#include "paddle/fluid/platform/device_context.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"

namespace phi {
Expand Down Expand Up @@ -67,14 +65,7 @@ class ConcatFunctor<XPUContext, T> {
reinterpret_cast<XPUType*>(output->data<T>()),
xdims_list,
axis);
PADDLE_ENFORCE_EQ(
r,
XPU_SUCCESS,
paddle::platform::errors::External(
"XPU API return wrong value[%d %s], please check whether "
"Baidu Kunlun Card is properly installed.",
r,
XPUAPIErrorMsg[r]));
PADDLE_ENFORCE_XDNN_SUCCESS(r, "concat");
}
};

Expand Down Expand Up @@ -126,14 +117,7 @@ class SplitFunctor<XPUContext, T> {
xdims_list,
split_list,
axis);
PADDLE_ENFORCE_EQ(
r,
XPU_SUCCESS,
paddle::platform::errors::External(
"XPU API return wrong value[%d %s], please check whether "
"Baidu Kunlun Card is properly installed.",
r,
XPUAPIErrorMsg[r]));
PADDLE_ENFORCE_XDNN_SUCCESS(r, "split");
}
};

Expand Down