Skip to content

Commit

Permalink
a lot of minor updates to address comments.
Browse files Browse the repository at this point in the history
  • Loading branch information
zheng-da authored and Olivier committed Feb 6, 2018
1 parent 15b2ef2 commit 4615463
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 38 deletions.
13 changes: 10 additions & 3 deletions include/mxnet/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,11 +138,18 @@ class NDArray {
dtype_(data.type_flag_), storage_type_(stype), entry_({nullptr, 0, 0}) {
}

/*
* This indicates whether an array is a view of another array (created by
* reshape or slice). If an array is a view and the the data is stored in
* MKLDNN format, we need to convert the data to the default format when
* data in the view is accessed.
*/
inline bool IsView() const {
// View only works on the default storage
if (storage_type() != kDefaultStorage)
return false;
// If the array reuses memory, it's not a view.
// If the array reuses memory, its shape may be different from the storage
// shape. However, we shouldn't consider it as a view.
if (reuse_)
return false;
return byte_offset_ > 0 || shape() != ptr_->storage_shape;
Expand Down Expand Up @@ -574,7 +581,7 @@ class NDArray {
/*
* Test if the data is stored in one of default MXNet formats.
*/
bool IsDefault() const {
bool IsDefaultData() const {
return ptr_->IsDefault();
}
/*
Expand Down Expand Up @@ -622,7 +629,7 @@ class NDArray {
ptr_->Reorder2Default();
}

void InvalidateData() {
void InvalidateMKLDNNData() {
// Removing mkl_mem_ means the NDArray will store data in the default format.
ptr_->mkl_mem_ = nullptr;
}
Expand Down
10 changes: 5 additions & 5 deletions src/common/exec_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ inline bool SetupDefaultBlobsIn(const std::vector<NDArray>& src,
bool is_default = nd.storage_type() == kDefaultStorage;
#if MXNET_USE_MKLDNN == 1
// We have to make sure it's default storage and default layout.
is_default = nd.IsDefault();
is_default = nd.IsDefaultData();
#endif
if (!is_default) {
(*idx_map)[i] = temp_dst->size();
NDArray temp = bufs != nullptr ? bufs->at(i) : NDArray(nd.shape(), nd.ctx(),
true, nd.dtype());
#if MXNET_USE_MKLDNN == 1
CHECK(temp.IsDefault());
CHECK(temp.IsDefaultData());
#endif
temp_src->emplace_back(nd);
temp_dst->emplace_back(temp);
Expand All @@ -88,15 +88,15 @@ inline bool SetupDefaultBlobsOut(const std::vector<NDArray>& src,
#if MXNET_USE_MKLDNN == 1
// If it's writeTo, we don't need to worry whether it contains valid data.
if (req[i] == kWriteTo && is_default)
const_cast<NDArray &>(nd).InvalidateData();
const_cast<NDArray &>(nd).InvalidateMKLDNNData();
// We have to make sure it's default storage and default layout.
is_default = nd.IsDefault();
is_default = nd.IsDefaultData();
#endif
if (!is_default) {
NDArray temp = bufs != nullptr ? bufs->at(i) : NDArray(nd.shape(), nd.ctx(),
true, nd.dtype());
#if MXNET_USE_MKLDNN == 1
CHECK(temp.IsDefault());
CHECK(temp.IsDefaultData());
#endif
temp_src->emplace_back(nd);
temp_dst->emplace_back(temp);
Expand Down
2 changes: 1 addition & 1 deletion src/executor/graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -828,7 +828,7 @@ void GraphExecutor::InitArguments(const nnvm::IndexedGraph& idx,
auto grad_eid = idx.entry_id(idx.outputs()[grad_oid]);
auto grad_stype = (NDArrayStorageType) inferred_stypes[grad_eid];
if (nullptr != shared_exec && grad_stype == kDefaultStorage &&
shared_exec->arg_grad_map().at(arg_name).storage_type() == grad_stype) {
shared_exec->arg_grad_map().at(arg_name).storage_type() == kDefaultStorage) {
// try to reuse memory from shared_exec
arg_grad_vec->emplace_back(shared_exec->arg_grad_map().at(arg_name));
} else {
Expand Down
16 changes: 5 additions & 11 deletions src/ndarray/ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -576,18 +576,12 @@ void NDArray::MKLDNNDataReorder(const mkldnn::memory::primitive_desc &pd) {
}

void NDArray::CopyFrom(const mkldnn::memory &mem) {
if (ptr_ == nullptr) {
LOG(FATAL) << "The NDArray hasn't been initialized";
return;
}
CHECK(ptr_ != nullptr) << "The NDArray hasn't been initialized";
if (ptr_->mkl_mem_.get() == &mem)
return;

if (mem.get_primitive_desc().get_size() != shape().Size() * GetTypeSize(dtype_)) {
LOG(FATAL) << "The size of NDArray doesn't match the requested MKLDNN memory desc";
return;
}

CHECK(mem.get_primitive_desc().get_size() == shape().Size() * GetTypeSize(dtype_))
<< "The size of NDArray doesn't match the requested MKLDNN memory desc";
MKLDNNStream *stream = MKLDNNStream::Get();
// If this array uses MKLDNN layout and it's a view, we have to change its
// layout to the default layout.
Expand Down Expand Up @@ -1028,8 +1022,8 @@ inline void CopyFromToDnsImpl(const NDArray& from, const NDArray& to, RunContext
tmp_from.CopyFrom(*tmp_mem);
MKLDNNStream::Get()->Submit();
}
CHECK(tmp_from.IsDefault());
CHECK(to.IsDefault());
CHECK(tmp_from.IsDefaultData());
CHECK(to.IsDefaultData());
TBlob tmp = to.data();
ndarray::Copy<from_xpu, to_xpu>(from.data(), &tmp,
from.ctx(), to.ctx(), ctx);
Expand Down
2 changes: 1 addition & 1 deletion src/operator/nn/mkldnn/mkldnn_base-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@ static inline void InvalidateOutputs(const std::vector<NDArray> &arrs,
const std::vector<OpReqType> &reqs) {
for (size_t i = 0; i < arrs.size(); i++) {
if (reqs[i] == kWriteTo || reqs[i] == kNullOp) {
const_cast<NDArray &>(arrs[i]).InvalidateData();
const_cast<NDArray &>(arrs[i]).InvalidateMKLDNNData();
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/operator/nn/mkldnn/mkldnn_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -248,8 +248,8 @@ void FallBackCompute(FCompute fn, const nnvm::NodeAttrs &attrs,
std::vector<TBlob> out_blobs(outputs.size());
for (size_t i = 0; i < out_blobs.size(); i++) {
if (req[i] == kWriteTo)
const_cast<NDArray &>(outputs[i]).InvalidateData();
CHECK(outputs[i].IsDefault());
const_cast<NDArray &>(outputs[i]).InvalidateMKLDNNData();
CHECK(outputs[i].IsDefaultData());
out_blobs[i] = outputs[i].data();
}
fn(attrs, ctx, in_blobs, req, out_blobs);
Expand Down
12 changes: 6 additions & 6 deletions src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -316,18 +316,18 @@ void MKLDNNBatchNormBackward(const OpContext &ctx, const BatchNormParam &param,
const NDArray &out_mean = out_data[batchnorm::kMean];
const NDArray &out_var = out_data[batchnorm::kVar];

CHECK(out_mean.IsDefault());
CHECK(out_var.IsDefault());
CHECK(moving_mean.IsDefault());
CHECK(moving_var.IsDefault());
CHECK(out_mean.IsDefaultData());
CHECK(out_var.IsDefaultData());
CHECK(moving_mean.IsDefaultData());
CHECK(moving_var.IsDefaultData());

auto data_mem = data.GetMKLDNNData();
auto diff_mem = diff.GetMKLDNNData();
// MKLDNN batchnorm should run on special layouts. If one of them isn't, we
// should reorder them.
if (data.IsDefault())
if (data.IsDefaultData())
data_mem = data.GetMKLDNNDataReorder(diff_mem->get_primitive_desc());
else if (diff.IsDefault())
else if (diff.IsDefaultData())
diff_mem = diff.GetMKLDNNDataReorder(data_mem->get_primitive_desc());
auto bwd_pd = _GetBwd(*data_mem, *diff_mem, param.eps, flags);
auto gradi_mem = const_cast<NDArray &>(gradIn).CreateMKLDNNData(data_mem->get_primitive_desc());
Expand Down
8 changes: 3 additions & 5 deletions src/operator/nn/mkldnn/mkldnn_sum.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,11 @@ namespace op {
void Sum(const mkldnn::memory &arr1, const mkldnn::memory &arr2,
const mkldnn::memory &out) {
std::vector<mkldnn::memory::primitive_desc> input_pds(2);
std::vector<float> scales(2);
std::vector<float> scales(2, 1);
std::vector<mkldnn::primitive::at> inputs;
input_pds[0] = arr1.get_primitive_desc();
input_pds[1] = arr2.get_primitive_desc();
CHECK(input_pds[0] == input_pds[1]);
scales[0] = 1;
scales[1] = 1;
inputs.push_back(arr1);
inputs.push_back(arr2);
// TODO(zhengda) I need to reorder memory here.
Expand All @@ -54,12 +52,12 @@ void MKLDNNSumForward(const nnvm::NodeAttrs& attrs, const OpContext &ctx,
TmpMemMgr::Get()->Init(ctx.requested[0]);
std::vector<mkldnn::primitive::at> in_prims;
std::vector<mkldnn::memory::primitive_desc> in_pds(inputs.size());
std::vector<float> scales(inputs.size());
std::vector<float> scales(inputs.size(), 1);
in_prims.reserve(inputs.size());
for (size_t i = 0; i < inputs.size(); i++) {
auto in_mem = inputs[i].GetMKLDNNData();
in_prims.push_back(*in_mem);
in_pds[i] = in_mem->get_primitive_desc();
scales[i] = 1;
}
mkldnn::sum::primitive_desc pdesc(scales, in_pds);

Expand Down
6 changes: 2 additions & 4 deletions src/operator/tensor/elemwise_unary_op_basic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,8 @@ static void CopyEx(const nnvm::NodeAttrs& attrs,
// This happens if inputs are supposed to be in MKLDNN format
// but MKLDNN doesn't support the data type or the shape. We're
// forced to convert it to the default format.
std::vector<TBlob> in_blobs(1);
std::vector<TBlob> out_blobs(1);
in_blobs[0] = inputs[0].data();
out_blobs[0] = outputs[0].data();
std::vector<TBlob> in_blobs {inputs[0].data()};
std::vector<TBlob> out_blobs {outputs[0].data()};
UnaryOp::IdentityCompute<cpu>(attrs, ctx, in_blobs, req, out_blobs);
return;
}
Expand Down

0 comments on commit 4615463

Please sign in to comment.