diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc index 7cb7c15758cd..442304f653f9 100644 --- a/src/c_api/c_api.cc +++ b/src/c_api/c_api.cc @@ -198,11 +198,6 @@ void CreateNDArray(const DataType* shape, int dtype, NDArrayHandle* out) { mxnet::TShape requested_shape = mxnet::TShape(shape, shape + ndim); - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(requested_shape.Size(), (int64_t{1} << 31) - 1) << - "[CreateNDArray] Size of tensor you are trying to allocate is larger than " - "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } *out = new NDArray(requested_shape, Context::Create(static_cast(dev_type), dev_id), delay_alloc != 0, dtype); diff --git a/src/c_api/c_api_ndarray.cc b/src/c_api/c_api_ndarray.cc index 6bfb3b35743d..c8b36d685c96 100644 --- a/src/c_api/c_api_ndarray.cc +++ b/src/c_api/c_api_ndarray.cc @@ -55,11 +55,6 @@ void SetNDInputsOutputs(const nnvm::Op* op, ndinputs->reserve(num_inputs); for (int i = 0; i < num_inputs; ++i) { NDArray* inp = reinterpret_cast(inputs[i]); - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(inp->shape().Size(), (int64_t{1} << 31) - 1) << - "[SetNDInputsOutputs] Size of tensor you are trying to allocate is larger than " - "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } ndinputs->emplace_back(inp); } diff --git a/src/ndarray/ndarray.cc b/src/ndarray/ndarray.cc index 14311a8795dc..aaa7aedf8bcd 100644 --- a/src/ndarray/ndarray.cc +++ b/src/ndarray/ndarray.cc @@ -142,11 +142,6 @@ void NDArray::Chunk::CheckAndAllocData(const mxnet::TShape &shape, int dtype) { CHECK_NE(aux_shapes.size(), 0) << "data is expected to be allocated after aux_data"; auto dbytes = shape.Size() * mshadow::mshadow_sizeof(dtype); - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(shape.Size(), (int64_t{1} << 31) - 1) << - "[CheckAndAllocData] Size of tensor you are trying to allocate is larger than " - "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } if (shandle.size < dbytes) { // free storage Storage::Get()->Free(shandle); @@ -1887,11 +1882,6 @@ NDArray NDArray::Copy(Context ctx) const { void NDArray::SyncCopyFromCPU(const void *data, size_t size) const { mxnet::TShape dshape = this->shape(); - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(size, (int64_t{1} << 31) - 1) << - "[SyncCopyFromCPU] Size of tensor you are trying to allocate is larger than " - "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } CHECK_EQ(dshape.Size(), size) << "Memory size do not match"; // zero-size array, no need to copy @@ -2027,11 +2017,6 @@ void NDArray::SyncCopyFromNDArray(const NDArray& src, int i, int j) { void NDArray::SyncCopyToCPU(void *data, size_t size) const { mxnet::TShape dshape = this->shape(); - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(size, (int64_t{1} << 31) - 1) << - "[SyncCopyToCPU] Size of tensor you are trying to allocate is larger than " - "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } CHECK_EQ(dshape.Size(), size) << "Memory size do not match"; // zero-size array, no need to copy diff --git a/src/ndarray/ndarray_function.cc b/src/ndarray/ndarray_function.cc index ed121899436a..34429446bd62 100644 --- a/src/ndarray/ndarray_function.cc +++ b/src/ndarray/ndarray_function.cc @@ -38,11 +38,6 @@ void Copy(const TBlob &from, TBlob *to, RunContext ctx) { MSHADOW_TYPE_SWITCH_WITH_BOOL(to->type_flag_, DType, { if (to->type_flag_ == from.type_flag_) { - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(from.Size(), (int64_t{1} << 31) - 1) << - "Size of tensor you are trying to allocate is larger than " - "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } const index_t size = static_cast(from.Size()); CHECK_EQ(size, to->Size()) << "copying size mismatch, from: " << size * sizeof(DType) << " bytes, to: " << to->Size() * sizeof(DType) << " bytes."; diff --git a/src/operator/tensor/init_op.h b/src/operator/tensor/init_op.h index ac7be8366155..30ad1ded90c0 100644 --- a/src/operator/tensor/init_op.h +++ b/src/operator/tensor/init_op.h @@ -348,11 +348,6 @@ inline bool InitStorageType(const nnvm::NodeAttrs& attrs, template void Fill(mshadow::Stream *s, const TBlob& b, const OpReqType req, ValueType val) { // If b is a zero-size tensor, do nothing. - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(b.Size(), (int64_t{1} << 31) - 1) << - "[Fill] Size of tensor you are trying to allocate is larger than " - "2^31 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } if (b.Size() == 0) return; if (req != kNullOp) { const size_t size = b.Size(); @@ -646,11 +641,6 @@ inline bool LinspaceShape(const nnvm::NodeAttrs& attrs, CHECK_GE(param.num, 0) << "Number of sequence should be non-negative, received " << param.num; mxnet::TShape shape = mxnet::TShape({static_cast(param.num)}); - if (!features::is_enabled(features::INT64_TENSOR_SIZE)) { - CHECK_LT(shape.Size(), (int64_t{1} << 31) - 1) << - "[LinspaceShape] Size of tensor you are trying to allocate is larger than " - "2^32 elements. Please build with flag USE_INT64_TENSOR_SIZE=1"; - } SHAPE_ASSIGN_CHECK(*out_attrs, 0, shape); return true; }