Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature/multigpu #4838

Merged
merged 48 commits into from
Oct 29, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
48dea84
"nccl multigpu init"
dzhwinter Sep 7, 2017
4e95c49
Merge remote-tracking branch 'origin/develop' into impl
dzhwinter Sep 19, 2017
408e21a
"remove clang format detect"
dzhwinter Sep 20, 2017
0fa34db
nccl init
dzhwinter Oct 12, 2017
4118782
Merge remote-tracking branch 'origin/develop' into multigpu
dzhwinter Oct 12, 2017
51abb6c
add test
dzhwinter Oct 12, 2017
d144310
"nccl add interface"
dzhwinter Oct 13, 2017
54d3dbd
"add enforce check"
dzhwinter Oct 14, 2017
d2be7ec
Merge remote-tracking branch 'origin/develop' into multigpu
dzhwinter Oct 15, 2017
d8aebaf
"fix enforce error"
dzhwinter Oct 15, 2017
5bcb638
"fix common test"
dzhwinter Oct 15, 2017
d4d215a
Merge remote-tracking branch 'origin/develop' into multigpu
dzhwinter Oct 15, 2017
73883bd
"fix error"
dzhwinter Oct 15, 2017
23cb825
"add python test case"
dzhwinter Oct 16, 2017
fdfc8f9
"switch to Init op"
dzhwinter Oct 19, 2017
333045d
"move nccl to another directory"
dzhwinter Oct 20, 2017
423d743
"add register gpu macro"
dzhwinter Oct 23, 2017
ec47565
"add reduce hash function"
dzhwinter Oct 23, 2017
f6106ff
merge develop into multigpu
dzhwinter Oct 23, 2017
50f04dc
"add init allreduce test"
dzhwinter Oct 24, 2017
ef257e6
write nccl c++ test case
dzhwinter Oct 24, 2017
da1181b
Merge remote-tracking branch 'origin/develop' into feature/multigpu
dzhwinter Oct 24, 2017
0990c87
checkin nccl operator
dzhwinter Oct 24, 2017
1e8474b
"delete python ops testcase"
dzhwinter Oct 24, 2017
026c61c
"fix allreduce python test"
dzhwinter Oct 24, 2017
63fb41b
"redefine the initop from kernel to OpBase"
dzhwinter Oct 24, 2017
5200c65
"move Tensor to LoDTensor"
dzhwinter Oct 25, 2017
6d1493a
"add bcast c++ test case"
dzhwinter Oct 25, 2017
11cf3e3
"refactorization of nccl test case"
dzhwinter Oct 25, 2017
bab5e53
Merge remote-tracking branch 'origin/develop' into feature/multigpu
dzhwinter Oct 25, 2017
94992a9
"add multiop testcase"
dzhwinter Oct 25, 2017
38d3adf
"add multioperator testcase"
dzhwinter Oct 25, 2017
61c1b04
"fix multigpu testcase"
dzhwinter Oct 25, 2017
4e165f4
"fix create output variable bug"
dzhwinter Oct 26, 2017
2573ac1
"remove python side test case to another PR."
dzhwinter Oct 26, 2017
626ff3b
"polish cmake file"
dzhwinter Oct 26, 2017
4b9cf0e
"add disable"
dzhwinter Oct 26, 2017
6bc261b
fix ci
dzhwinter Oct 26, 2017
dbfd130
"FIX CI"
dzhwinter Oct 26, 2017
16a39d2
fix conflict
dzhwinter Oct 26, 2017
6cce526
"fixed based on comment"
dzhwinter Oct 26, 2017
5220052
"polish code based on comment"
dzhwinter Oct 26, 2017
f632706
fix based on comment
dzhwinter Oct 26, 2017
75eaccc
"rerun ci"
dzhwinter Oct 26, 2017
37842d8
rerun ci
dzhwinter Oct 26, 2017
99308b1
rerun CI
dzhwinter Oct 27, 2017
6f009cf
rerun ci
dzhwinter Oct 27, 2017
71305e5
"polish code based on comment"
dzhwinter Oct 29, 2017
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions paddle/framework/op_registry.h
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,10 @@ class OpKernelRegistrar : public Registrar {
USE_OP_ITSELF(op_type); \
USE_OP_DEVICE_KERNEL(op_type, CPU);

#define USE_GPU_ONLY_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_DEVICE_KERNEL(op_type, GPU)

#define USE_OP(op_type) \
USE_OP_ITSELF(op_type); \
USE_OP_KERNEL(op_type)
Expand Down
12 changes: 11 additions & 1 deletion paddle/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ class OperatorBase {
protected:
std::string type_;
// NOTE: in case of OpGrad, inputs_ contains:
// I (Inputs)opear
// I (Inputs)
// O (Outputs)
// OG (Output Gradients)
VariableNameMap inputs_;
Expand Down Expand Up @@ -290,6 +290,16 @@ class ExecutionContext {
return device_context_;
}

//! Get actual name vector for this input.
const std::vector<std::string>& Inputs(const std::string& name) const {
return op_.Inputs(name);
}

//! Get actual name vector for this output.
const std::vector<std::string>& Outputs(const std::string& name) const {
return op_.Outputs(name);
}

#ifdef PADDLE_WITH_CUDA
const platform::CUDADeviceContext& cuda_device_context() const {
PADDLE_ENFORCE(platform::is_gpu_place(device_context_.GetPlace()));
Expand Down
16 changes: 16 additions & 0 deletions paddle/operators/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,13 @@ function(op_library TARGET)
file(APPEND ${pybind_file} "USE_OP(sigmoid);\n")
endif()

# nccl_op contains several operators
if ("${TARGET}" STREQUAL "nccl_op")
set(pybind_flag 1)
# It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_GPU_ONLY_OP(ncclAllReduce);\n")
endif()

# reduce_op contains several operators
if ("${TARGET}" STREQUAL "reduce_op")
set(pybind_flag 1)
Expand Down Expand Up @@ -114,6 +121,7 @@ function(op_library TARGET)
endfunction()

add_subdirectory(math)
add_subdirectory(nccl)

set(DEPS_OPS
recurrent_op
Expand All @@ -123,6 +131,7 @@ set(DEPS_OPS
sum_op
pool_op
pool_with_index_op
nccl_op
sequence_conv_op
lstm_op)

Expand All @@ -135,6 +144,9 @@ op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax)
op_library(sum_op DEPS net_op)
op_library(pool_op DEPS pooling)
op_library(pool_with_index_op DEPS pooling)
if(WITH_GPU)
op_library(nccl_op DEPS nccl_common)
endif()
op_library(sequence_conv_op DEPS context_project)
op_library(lstm_op DEPS sequence2batch lstm_compute)

Expand All @@ -150,4 +162,8 @@ cc_test(net_op_test SRCS net_op_test.cc DEPS net_op)
cc_test(scatter_test SRCS scatter_test.cc DEPS tensor)
cc_test(strided_memcpy_test SRCS strided_memcpy_test.cc DEPS tensor paddle_memory)
cc_test(dynamic_recurrent_op_test SRCS dynamic_recurrent_op_test.cc DEPS dynamic_recurrent_op recurrent_op tensor_array)

if(WITH_GPU)
nv_test(nccl_op_test SRCS nccl_op_test.cu DEPS nccl_op gpu_info device_context)
endif()
cc_test(save_load_op_test SRCS save_load_op_test.cc DEPS save_op load_op)
3 changes: 3 additions & 0 deletions paddle/operators/nccl/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
if(WITH_GPU)
nv_library(nccl_common SRCS nccl_gpu_common.cc DEPS device_context operator )
endif()
17 changes: 17 additions & 0 deletions paddle/operators/nccl/nccl_gpu_common.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what is this file used for

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

build an operator independent module, which may be used for Multiexecutor or some more module.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/operators/nccl/nccl_gpu_common.h"
#include "paddle/platform/gpu_info.h"

namespace paddle {
namespace platform {} // namespace platform
} // namespace paddle
63 changes: 63 additions & 0 deletions paddle/operators/nccl/nccl_gpu_common.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <algorithm>
#include <condition_variable>
#include <memory>
#include <mutex>
#include <string>
#include <unordered_map>
#include <vector>

#include "paddle/platform/device_context.h"
#include "paddle/platform/dynload/nccl.h"
#include "paddle/platform/enforce.h"
#include "paddle/platform/macros.h"

namespace paddle {
namespace platform {

constexpr int kInvalidGPUId = -1;

struct Communicator {
std::vector<ncclComm_t> comms_;
std::unordered_map<int, int> comm_id_map_;

Communicator() {}

int GetCommId(int device_id) const { return comm_id_map_.at(device_id); }

void InitAll(const std::vector<int>& gpus) {
comms_.resize(gpus.size());
for (size_t i = 0; i < gpus.size(); ++i) {
comm_id_map_[gpus[i]] = i;
}
PADDLE_ENFORCE(
dynload::ncclCommInitAll(comms_.data(), gpus.size(), gpus.data()));
}

~Communicator() {
for (size_t i = 0; i < comms_.size(); ++i) {
// FIXME(dzh) : PADDLE_ENFORCE return void
dynload::ncclCommDestroy(comms_[i]);
}
}

DISABLE_COPY_AND_ASSIGN(Communicator);
};

} // namespace platform
} // namespace paddle
206 changes: 206 additions & 0 deletions paddle/operators/nccl_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/framework/op_registry.h"
#include "paddle/operators/nccl/nccl_gpu_common.h"

namespace paddle {
namespace operators {

// NCCLinitOp
class NCCLInitOp : public framework::OperatorBase {
public:
NCCLInitOp(const std::string &type, const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorBase(type, inputs, outputs, attrs) {}

void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
const auto &name = Output("Communicator");
PADDLE_ENFORCE_NOT_NULL(scope.FindVar(name),
"Can not find variable '%s' in the scope.", name);
std::vector<int> gpus = Attr<std::vector<int>>("gpus");
PADDLE_ENFORCE(!gpus.empty(), "Attr(gpus) should not be empty.");

if (scope.FindVar(name) == nullptr) {
PADDLE_THROW("Output(Communicator) is needed for ncclInit operator.");
}

platform::Communicator *comm =
scope.FindVar(name)->GetMutable<platform::Communicator>();
comm->InitAll(gpus);
}
};

class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker {
public:
NCCLInitOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddOutput("Communicator",
"Create Communicator for communicating between gpus");
AddAttr<std::vector<int>>("gpus", "gpu id lists");
AddAttr<int>("data_type", "output data type")
.SetDefault(framework::DataType::FP32);
AddComment(R"DOC(
create communicator.
)DOC");
}
};

// AllReduceOp
class NCCLAllReduceOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
" Input(X) of AllReduce op input should not be NULL");
PADDLE_ENFORCE(
ctx->HasInput("Communicator"),
" Input(Communicator) of AllReduce op input should not be NULL");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
" Input(X) of AllReduce op input should not be NULL");

auto x_dims = ctx->GetInputsDim("X");

std::string reduction = ctx->Attrs().Get<std::string>("reduction");
PADDLE_ENFORCE((reduction == "ncclSum" || reduction == "ncclProd" ||
reduction == "ncclMin" || reduction == "ncclMax"),
"invalid reduction.");

ctx->SetOutputsDim("Out", x_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
};

// ReduceOp
class NCCLReduceOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
" Input(X) of Reduce op input should not be NULL");
PADDLE_ENFORCE(
ctx->HasInput("Communicator"),
" Input(Communicator) of Reduce op input should not be NULL");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
" Input(X) of Reduce op input should not be NULL");

std::string reduction = ctx->Attrs().Get<std::string>("reduction");
PADDLE_ENFORCE((reduction == "ncclSum" || reduction == "ncclProd" ||
reduction == "ncclMin" || reduction == "ncclMax"),
"invalid reduction.");

auto x_dims = ctx->GetInputsDim("X");
ctx->SetOutputsDim("Out", x_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
};

// BcastOp
class NCCLBcastOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;

protected:
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
" Input(X) of Bcast op input should not be NULL");
PADDLE_ENFORCE(ctx->HasInput("Communicator"),
" Input(Communicator) of Bcast op input should not be NULL");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
" Output(Out) of Bcast op output should not be NULL");

int root = ctx->Attrs().Get<int>("root");
PADDLE_ENFORCE(root != platform::kInvalidGPUId, "Bcast root must be set.");

auto x_dims = ctx->GetInputsDim("X");
ctx->SetOutputsDim("Out", x_dims);
ctx->ShareLoD("X", /*->*/ "Out");
}
};

// AllreduceOp
class NCCLAllReduceOpMaker : public framework::OpProtoAndCheckerMaker {
public:
NCCLAllReduceOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of AllReduce op");
AddInput("Communicator", "Communicator for communicating between gpus");
AddOutput("Out", "The output of AllReduce op");
AddAttr<std::string>("reduction",
"{'ncclMin', 'ncclMax', 'ncclProd', 'ncclSum'}.")
.SetDefault("ncclSum");
AddComment(R"DOC(
AllReduce the input tensors.
)DOC");
}
};

// ReduceOp
class NCCLReduceOpMaker : public framework::OpProtoAndCheckerMaker {
public:
NCCLReduceOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of Reduce op");
AddInput("Communicator", "Communicator for communicating between gpus");
AddOutput("Out", "The output of Reduce op");
AddAttr<std::string>("reduction",
"{'ncclMin', 'ncclMax', 'ncclProd', 'ncclSum'}.")
.SetDefault("ncclSum");
AddAttr<int>("root",
"root gpu of the parameter. if not "
"set(platform::kInvalidGPUId). hashed by name.")
.SetDefault(platform::kInvalidGPUId);
AddComment(R"DOC(
Reduce the tensors)DOC");
}
};

// BcastOp
class NCCLBcastOpMaker : public framework::OpProtoAndCheckerMaker {
public:
NCCLBcastOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input of BcastSend op");
AddInput("Communicator", "Communicator for communicating between gpus");
AddOutput("Out", "The output of Bcast");
AddAttr<int>("root",
"root gpu of the parameter. if not "
"set(platform::kInvalidGPUId). hashed by name.")
.SetDefault(platform::kInvalidGPUId);
AddComment(R"DOC(
Bcast the tensors.
)DOC");
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
REGISTER_OPERATOR(ncclInit, ops::NCCLInitOp,
paddle::framework::EmptyGradOpMaker, ops::NCCLInitOpMaker);

REGISTER_OP_WITHOUT_GRADIENT(ncclAllReduce, ops::NCCLAllReduceOp,
ops::NCCLAllReduceOpMaker);
REGISTER_OP_WITHOUT_GRADIENT(ncclBcast, ops::NCCLBcastOp,
ops::NCCLBcastOpMaker);
REGISTER_OP_WITHOUT_GRADIENT(ncclReduce, ops::NCCLReduceOp,
ops::NCCLReduceOpMaker);
Loading