Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
bug fix
Browse files Browse the repository at this point in the history
  • Loading branch information
Rohit Kumar Srivastava committed Oct 24, 2019
1 parent 255bb21 commit 05c8ac1
Show file tree
Hide file tree
Showing 7 changed files with 50 additions and 38 deletions.
6 changes: 3 additions & 3 deletions python/mxnet/ndarray/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,9 @@ def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
size = 1
for idx in shape:
size = size * idx
if size > 2**32:
raise Exception("[Python] Size of tensor you are trying to allocate is larger than 2^32 elements. " +
"Please build with flag USE_INT64_TENSOR_SIZE=1")
if size > 2**31:
raise Exception("[_new_alloc_handle] Size of tensor you are trying to allocate is larger than 2^31 elements. " +
"Please build with flag USE_INT64_TENSOR_SIZE=1")
check_call(_LIB.MXNDArrayCreateEx(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
Expand Down
14 changes: 8 additions & 6 deletions src/c_api/c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -199,9 +199,9 @@ void CreateNDArray(const DataType* shape,
NDArrayHandle* out) {
mxnet::TShape requested_shape = mxnet::TShape(shape, shape + ndim);
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(requested_shape.Size(), (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
CHECK_LT(requested_shape.Size(), (int64_t{1} << 31) - 1) <<
"[CreateNDArray] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
*out = new NDArray(requested_shape,
Context::Create(static_cast<Context::DeviceType>(dev_type), dev_id),
Expand Down Expand Up @@ -591,6 +591,11 @@ inline void GetShape(NDArrayHandle handle, const dtype** out_pdata, int* out_dim
MXAPIThreadLocalEntry<dtype>* ret) {
NDArray* arr = static_cast<NDArray*>(handle);
if (!arr->is_none()) {
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(arr->shape().Size(), (int64_t{1} << 31) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
mxnet::TShape s = arr->shape();
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToLegacyShape(&s);
Expand All @@ -616,9 +621,6 @@ int MXNDArrayGetShapeEx(NDArrayHandle handle,
const int **out_pdata) {
MXAPIThreadLocalEntry<> *ret = MXAPIThreadLocalStore<>::Get();
API_BEGIN();
CHECK_LT(static_cast<NDArray*>(handle)->shape().Size(), (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
GetShape<int>(handle, out_pdata, out_dim, ret);
API_END();
}
Expand Down
6 changes: 3 additions & 3 deletions src/c_api/c_api_ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ void SetNDInputsOutputs(const nnvm::Op* op,
for (int i = 0; i < num_inputs; ++i) {
NDArray* inp = reinterpret_cast<NDArray*>(inputs[i]);
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(inp->shape().Size(), (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
CHECK_LT(inp->shape().Size(), (int64_t{1} << 31) - 1) <<
"[SetNDInputsOutputs] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
ndinputs->emplace_back(inp);
}
Expand Down
17 changes: 11 additions & 6 deletions src/ndarray/ndarray.cc
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,11 @@ void NDArray::Chunk::CheckAndAllocData(const mxnet::TShape &shape, int dtype) {
CHECK_NE(aux_shapes.size(), 0)
<< "data is expected to be allocated after aux_data";
auto dbytes = shape.Size() * mshadow::mshadow_sizeof(dtype);
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(shape.Size(), (int64_t{1} << 31) - 1) <<
"[CheckAndAllocData] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
if (shandle.size < dbytes) {
// free storage
Storage::Get()->Free(shandle);
Expand Down Expand Up @@ -1883,9 +1888,9 @@ NDArray NDArray::Copy(Context ctx) const {
void NDArray::SyncCopyFromCPU(const void *data, size_t size) const {
mxnet::TShape dshape = this->shape();
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(size, (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
CHECK_LT(size, (int64_t{1} << 31) - 1) <<
"[SyncCopyFromCPU] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
CHECK_EQ(dshape.Size(), size)
<< "Memory size do not match";
Expand Down Expand Up @@ -2023,9 +2028,9 @@ void NDArray::SyncCopyFromNDArray(const NDArray& src, int i, int j) {
void NDArray::SyncCopyToCPU(void *data, size_t size) const {
mxnet::TShape dshape = this->shape();
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(size, (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
CHECK_LT(size, (int64_t{1} << 31) - 1) <<
"[SyncCopyToCPU] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
CHECK_EQ(dshape.Size(), size)
<< "Memory size do not match";
Expand Down
5 changes: 5 additions & 0 deletions src/ndarray/ndarray_function.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,11 @@ void Copy<cpu, cpu>(const TBlob &from, TBlob *to,
RunContext ctx) {
MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, {
if (to->type_flag_ == from.type_flag_) {
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(from.Size(), (int64_t{1} << 31) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
const index_t size = static_cast<index_t>(from.Size());
CHECK_EQ(size, to->Size()) << "copying size mismatch, from: " << size * sizeof(DType)
<< " bytes, to: " << to->Size() * sizeof(DType) << " bytes.";
Expand Down
7 changes: 0 additions & 7 deletions src/operator/elemwise_op_common.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,13 +164,6 @@ inline bool ElemwiseShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *out_attrs) {
if (n_in != -1) {
CHECK_EQ(in_attrs->size(), static_cast<size_t>(n_in)) << " in operator " << attrs.name;
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
for(mxnet::TShape shape : *in_attrs){
CHECK_LT(shape.Size(), (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
}
}
if (n_out != -1) {
CHECK_EQ(out_attrs->size(), static_cast<size_t>(n_out)) << " in operator " << attrs.name;
Expand Down
33 changes: 20 additions & 13 deletions src/operator/tensor/init_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -272,15 +272,22 @@ inline bool InitShape(const nnvm::NodeAttrs& attrs,
CHECK_EQ(in_attrs->size(), 0U);
CHECK_EQ(out_attrs->size(), 1U);
mxnet::TShape param_shape = param.shape;
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(param_shape.Size(), (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
if (shape_is_known(param_shape) && !features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(param_shape.Size(), (int64_t{1} << 31) - 1) <<
"[InitShape-input] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
if (!Imperative::Get()->is_np_shape()) {
common::ConvertToNumpyShape(&param_shape);
}
if (shape_is_known((*out_attrs)[0]) && !shape_is_known(param_shape)) return true;
if (shape_is_known((*out_attrs)[0]) && !shape_is_known(param_shape)) {
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(out_attrs->at(0).Size() , (int64_t{1} << 31) - 1) <<
"[InitShape-output] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
return true;
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, param_shape);
return shape_is_known(out_attrs->at(0));
}
Expand Down Expand Up @@ -342,9 +349,9 @@ template <bool is_integer = false, typename ValueType, typename xpu>
void Fill(mshadow::Stream<xpu> *s, const TBlob& b, const OpReqType req, ValueType val) {
// If b is a zero-size tensor, do nothing.
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(b.Size(), (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
CHECK_LT(b.Size(), (int64_t{1} << 31) - 1) <<
"[Fill] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
if (b.Size() == 0) return;
if (req != kNullOp) {
Expand Down Expand Up @@ -592,9 +599,9 @@ inline bool RangeShape(const nnvm::NodeAttrs& attrs,
* param.repeat;
mxnet::TShape output_shape = mxnet::TShape({static_cast<nnvm::dim_t>(out_size)});
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(output_shape.Size(), (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
CHECK_LT(output_shape.Size(), (int64_t{1} << 31) - 1) <<
"[RangeShape] Size of tensor you are trying to allocate is larger than "
"2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, output_shape);
return true;
Expand Down Expand Up @@ -640,8 +647,8 @@ inline bool LinspaceShape(const nnvm::NodeAttrs& attrs,
<< "Number of sequence should be non-negative, received " << param.num;
mxnet::TShape shape = mxnet::TShape({static_cast<nnvm::dim_t>(param.num)});
if (!features::is_enabled(features::INT64_TENSOR_SIZE)) {
CHECK_LT(shape.Size(), (int64_t{1} << 32) - 1) <<
"Size of tensor you are trying to allocate is larger than "
CHECK_LT(shape.Size(), (int64_t{1} << 31) - 1) <<
"[LinspaceShape] Size of tensor you are trying to allocate is larger than "
"2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1";
}
SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape);
Expand Down

0 comments on commit 05c8ac1

Please sign in to comment.