Skip to content

Commit

Permalink
[MLU]add mlu kernel for allreduce (#39788)
Browse files Browse the repository at this point in the history
  • Loading branch information
kangna-qi authored Feb 24, 2022
1 parent c5ae43a commit ce207c3
Show file tree
Hide file tree
Showing 7 changed files with 228 additions and 3 deletions.
68 changes: 66 additions & 2 deletions paddle/fluid/operators/collective/c_allreduce_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,9 @@ limitations under the License. */
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"

#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
defined(PADDLE_WITH_ASCEND_CL) || defined(PADDLE_WITH_XPU_BKCL)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) || \
defined(PADDLE_WITH_ASCEND_CL) || defined(PADDLE_WITH_XPU_BKCL) || \
defined(PADDLE_WITH_CNCL)
#include "paddle/fluid/platform/collective_helper.h"
#endif

Expand All @@ -45,6 +46,10 @@ limitations under the License. */
#include "paddle/fluid/platform/device/npu/hccl_helper.h"
#endif

#if defined(PADDLE_WITH_CNCL)
#include "paddle/fluid/platform/device/mlu/cncl_helper.h"
#endif

#if defined(PADDLE_WITH_ASCEND_CL)
DECLARE_bool(hccl_check_nan);
#endif
Expand Down Expand Up @@ -398,6 +403,65 @@ class CAllReduceOpCUDAKernel : public framework::OpKernel<T> {
}
};

template <ReduceType red_type, typename T>
class CAllReduceOpMLUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
#if defined(PADDLE_WITH_CNCL)
auto in = ctx.Input<framework::Tensor>("X");
auto out = ctx.Output<framework::Tensor>("Out");

auto place = ctx.GetPlace();
cnclDataType_t dtype =
platform::ToCNCLDataType(framework::TransToProtoVarType(in->type()));
int64_t numel = in->numel();
const void* sendbuff = in->data<T>();
out->Resize(in->dims());
void* recvbuff = out->mutable_data<T>(place);

int rid = ctx.Attr<int>("ring_id");
auto comm = platform::CNCLCommContext::Instance().Get(rid, place);

mluStream stream = nullptr;
if (ctx.Attr<bool>("use_calc_stream")) {
auto dev_ctx = platform::DeviceContextPool::Instance().Get(place);
stream = static_cast<platform::MLUDeviceContext*>(dev_ctx)->stream();
} else {
stream = comm->stream();
}

cnclReduceOp_t cncl_red_type = cnclSum;
switch (red_type) {
case kRedSum:
cncl_red_type = cnclSum;
break;

case kRedMax:
cncl_red_type = cnclMax;
break;

case kRedMin:
cncl_red_type = cnclMin;
break;

case kRedProd:
cncl_red_type = cnclProd;
break;

default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Invalid reduce type: %d", red_type));
}

PADDLE_ENFORCE_MLU_SUCCESS(cnclAllReduce(
sendbuff, recvbuff, numel, dtype, cncl_red_type, comm->comm(), stream));
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with MLU."));
#endif
}
};

class CAllReduceOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() {
Expand Down
26 changes: 26 additions & 0 deletions paddle/fluid/operators/collective/c_allreduce_sum_op_mlu.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/collective/c_allreduce_op.h"

namespace ops = paddle::operators;
namespace plat = paddle::platform;

REGISTER_OP_MLU_KERNEL(c_allreduce_sum,
ops::CAllReduceOpMLUKernel<ops::kRedSum, float>,
ops::CAllReduceOpMLUKernel<ops::kRedSum, plat::float16>,
ops::CAllReduceOpMLUKernel<ops::kRedSum, int>,
ops::CAllReduceOpMLUKernel<ops::kRedSum, int16_t>,
ops::CAllReduceOpMLUKernel<ops::kRedSum, int8_t>,
ops::CAllReduceOpMLUKernel<ops::kRedSum, uint8_t>)
3 changes: 2 additions & 1 deletion paddle/fluid/operators/collective/c_broadcast_op_mlu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ class CBroadcastOPMLUKernel : public framework::OpKernel<T> {
auto x = ctx.Input<framework::LoDTensor>("X");
auto out = ctx.Output<framework::LoDTensor>("Out");
int numel = x->numel();
cnclDataType_t dtype = platform::ToCNCLDataType(x->type());
cnclDataType_t dtype =
platform::ToCNCLDataType(framework::TransToProtoVarType(x->type()));

int rid = ctx.Attr<int>("ring_id");
auto place = ctx.GetPlace();
Expand Down
1 change: 1 addition & 0 deletions python/paddle/fluid/tests/unittests/mlu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,5 @@ if (WITH_MLU)
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach(TEST_OP)
set_tests_properties(test_collective_broadcast PROPERTIES TIMEOUT 120)
set_tests_properties(test_collective_allreduce PROPERTIES TIMEOUT 120)
endif()
70 changes: 70 additions & 0 deletions python/paddle/fluid/tests/unittests/mlu/collective_allreduce_op.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import numpy as np
import argparse
import os
import sys
import signal
import time
import socket
from contextlib import closing
from six import string_types
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import paddle.fluid.unique_name as nameGen
from paddle.fluid import core
import unittest
from multiprocessing import Process
import paddle.fluid.layers as layers
from functools import reduce
from test_collective_base_mlu import TestCollectiveRunnerBase, runtime_main

paddle.enable_static()


class TestCollectiveAllreduce(TestCollectiveRunnerBase):
def __init__(self):
self.global_ring_id = 0

def get_model(self, main_prog, startup_program):
ring_id = 0
with fluid.program_guard(main_prog, startup_program):
tindata = layers.data(
name="tindata", shape=[10, 1000], dtype='float32')
toutdata = main_prog.current_block().create_var(
name="outofallreduce",
dtype='float32',
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=False)
main_prog.global_block().append_op(
type="c_allreduce_sum",
inputs={'X': tindata},
attrs={'ring_id': ring_id},
outputs={'Out': toutdata})
main_prog.global_block().append_op(
type="c_sync_comm_stream",
inputs={'X': toutdata},
outputs={'Out': toutdata},
attrs={'ring_id': ring_id})
return toutdata


if __name__ == "__main__":
runtime_main(TestCollectiveAllreduce, "allreduce", 0)
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function
import sys
import unittest
import numpy as np
import paddle

from test_collective_base_mlu import TestDistBase

paddle.enable_static()


class TestCAllreduceOp(TestDistBase):
def _setup_config(self):
pass

def test_allreduce_fp32(self):
self.check_with_place("collective_allreduce_op.py", "allreduce",
"float32")

def test_allreduce_fp16(self):
self.check_with_place("collective_allreduce_op.py", "allreduce",
"float16")

def test_allreduce_int32(self):
self.check_with_place("collective_allreduce_op.py", "allreduce",
"int32")

def test_allreduce_int16(self):
self.check_with_place("collective_allreduce_op.py", "allreduce",
"int16")

def test_allreduce_int8(self):
self.check_with_place("collective_allreduce_op.py", "allreduce", "int8")

def test_allreduce_uint8(self):
self.check_with_place("collective_allreduce_op.py", "allreduce",
"uint8")


if __name__ == '__main__':
unittest.main()
Original file line number Diff line number Diff line change
Expand Up @@ -262,5 +262,13 @@ def check_with_place(self,
need_result = input2
self.assertTrue(np.allclose(tr0_out, need_result))
self.assertTrue(np.allclose(tr1_out, need_result))
elif col_type == "allreduce":
need_result = input1 + input2
self.assertTrue(
np.allclose(
tr0_out, need_result, rtol=1e-05, atol=1e-05))
self.assertTrue(
np.allclose(
tr1_out, need_result, rtol=1e-05, atol=1e-05))
else:
pass

0 comments on commit ce207c3

Please sign in to comment.