Skip to content

Commit

Permalink
rocblas-remove-int8x4-uses(02) Removed the rest of INT8x4 support (ex…
Browse files Browse the repository at this point in the history
…cept the driver)
  • Loading branch information
atamazov committed Oct 6, 2023
1 parent d14242f commit 6737d66
Show file tree
Hide file tree
Showing 32 changed files with 13 additions and 77 deletions.
4 changes: 4 additions & 0 deletions driver/conv_driver.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ MIOPEN_DECLARE_ENV_VAR(MIOPEN_DRIVER_PAD_BUFFERS_2M)
MIOPEN_DECLARE_ENV_VAR(MIOPEN_DRIVER_USE_GPU_REFERENCE)
MIOPEN_DECLARE_ENV_VAR(MIOPEN_DRIVER_SUBNORM_PERCENTAGE)

// Support in the library discontinued, but left in the driver
// for reference in the future.
#define miopenInt8x4 (static_cast<miopenDataType_t>(4))

#if MIOPEN_BACKEND_OPENCL
#define STATUS_SUCCESS CL_SUCCESS
typedef cl_int status_t;
Expand Down
1 change: 0 additions & 1 deletion include/miopen/miopen.h
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,6 @@ typedef enum
miopenFloat = 1, /*!< 32-bit floating point (Fully supported) */
miopenInt32 = 2, /*!< 32-bit int point (Partially supported) */
miopenInt8 = 3, /*!< 8-bit int point (Partially supported) */
miopenInt8x4 = 4, /*!< Pack of four Int8 in NCHW_VECT_C format (Support discontinued) */
miopenBFloat16 = 5, /*!< 16-bit binary floating point (8-bit exponent, 7-bit fraction)
(Partially supported) */
miopenDouble = 6, /*!< 64-bit floating point (Partially supported) */
Expand Down
1 change: 0 additions & 1 deletion src/check_numerics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ std::string GetKernelName(miopenDataType_t data_type)
case miopenBFloat8: return {"check_numerics_bf8"};
case miopenInt32:
case miopenInt8:
case miopenInt8x4: // Support discontinued.
case miopenDouble:
default: return {""};
}
Expand Down
4 changes: 1 addition & 3 deletions src/driver_arguments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ void ConvDataType(std::stringstream& ss, const miopen::TensorDescriptor& desc)
{
ss << "convbfp16";
}
else if(desc.GetType() == miopenInt8 || desc.GetType() == miopenInt8x4)
else if(desc.GetType() == miopenInt8)
{
ss << "convint8";
}
Expand Down Expand Up @@ -190,8 +190,6 @@ std::string ConvArgsForMIOpenDriver(const miopen::TensorDescriptor& xDesc,
ss << " -g " << convDesc.group_count;
if(print_for_conv_driver)
ss << " -F " << std::to_string(static_cast<int>(conv_dir)) << " -t 1"; // clang-format on
if(xDesc.GetType() == miopenInt8x4)
ss << " -Z 1";
if(immediate_mode_solver_id.has_value())
{
ss << " -S " << *immediate_mode_solver_id;
Expand Down
16 changes: 8 additions & 8 deletions src/gemm_v2.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -610,9 +610,9 @@ miopenStatus_t CallGemm(const Handle& handle,
};
break;

case miopenInt8x4:
case miopenDouble: {
MIOPEN_THROW(miopenStatusBadParm, "Unknown or unsupported data type.");
MIOPEN_THROW(miopenStatusBadParm,
"miopenDouble data type not supported by MIOpenGEMM.");
};
break;
}
Expand Down Expand Up @@ -876,10 +876,10 @@ miopenStatus_t CallGemmStridedBatched(const Handle& handle,
break;
}

case miopenInt8x4:
case miopenDouble: {
MIOPEN_THROW(miopenStatusBadParm, "Unknown or unsupported data type.");
};
MIOPEN_THROW(miopenStatusBadParm,
"miopenDouble data type not supported by MIOpenGEMM.");
}
break;
}

Expand Down Expand Up @@ -1140,10 +1140,10 @@ miopenStatus_t CallGemmStridedBatchedSequential(const Handle& handle,
break;
}

case miopenInt8x4:
case miopenDouble: {
MIOPEN_THROW(miopenStatusBadParm, "Unknown or unsupported data type.");
};
MIOPEN_THROW(miopenStatusBadParm,
"miopenDouble data type not supported by MIOpenGEMM.");
}
break;
}

Expand Down
2 changes: 0 additions & 2 deletions src/hip/general_tensor_reorder_sol.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,6 @@ GenericReorderSolutionImpl::GenericReorderSolutionImpl(miopenDataType_t data_typ
order_2(order_2_),
order_3(order_3_)
{
if(data_type == miopenInt8x4)
MIOPEN_THROW("These data type are not supported");
std::size_t data_size = miopen::GetTypeSize(data_type);
kernel_param_heuristic = tensor_reorder::HeuristicGet(data_size, dim_0, dim_1, dim_2, dim_3);
}
Expand Down
1 change: 0 additions & 1 deletion src/include/miopen/conv/problem_description.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ inline std::string GetDataTypeName(miopenDataType_t data_type)
case miopenFloat: return "FP32";
case miopenHalf: return "FP16";
case miopenInt8: return "INT8";
case miopenInt8x4: return "INT8x4";
case miopenInt32: return "INT32";
case miopenBFloat16: return "BF16";
case miopenDouble: return "FP64";
Expand Down
5 changes: 0 additions & 5 deletions src/include/miopen/datatype.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,6 @@ inline std::string GetDataType(miopenDataType_t type)
type_str = "bfloat16";
}
break;
case miopenInt8x4: {
type_str = "UNSUPPORTED_TYPE";
}
break;
case miopenInt8: {
type_str = "int8_t";
}
Expand Down Expand Up @@ -157,7 +153,6 @@ inline KernelBuildParameters GetDataTypeKBP(miopenDataType_t type)
case miopenDouble: use_fp64 = 1; break;
case miopenFloat8: use_fp8 = 1; break;
case miopenBFloat8: use_bfp8 = 1; break;
case miopenInt8x4: // fallthrough
default:
MIOPEN_THROW("Only float, half, bfloat16, int8, float8, bfloat8 data types are supported.");
break;
Expand Down
1 change: 0 additions & 1 deletion src/include/miopen/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ inline std::size_t GetTypeSize(miopenDataType_t d)
case miopenFloat: return 4;
case miopenHalf:
case miopenBFloat16: return 2;
case miopenInt8x4: break;
case miopenInt8:
case miopenFloat8:
case miopenBFloat8: return 1;
Expand Down
1 change: 0 additions & 1 deletion src/include/miopen/visit_float.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ void visit_float(miopenDataType_t t, F f)
f(as_float<double>{});
break;
}
case miopenInt8x4: MIOPEN_THROW("miopenInt8x4: Support discontinued.");
}
}

Expand Down
6 changes: 0 additions & 6 deletions src/ocl/tensorocl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1944,7 +1944,6 @@ std::string GetCastTensorBuildOptionFromType(const std::string& buildOption, mio
case miopenDouble:
// TODO
MIOPEN_THROW(miopenStatusBadParm, "miopenDouble data type not supported in cast tensor.");
case miopenInt8x4: // fallthrough
default: MIOPEN_THROW(miopenStatusBadParm, "Invalid data type in cast tensor desc.");
}
}
Expand All @@ -1968,11 +1967,6 @@ void CastTensor(const Handle& handle,
MIOPEN_THROW(miopenStatusBadParm, "Tensor dimension lengths do not match.");
}

if(srcDesc.GetType() == miopenInt8x4 || dstDesc.GetType() == miopenInt8x4)
{
MIOPEN_THROW(miopenStatusBadParm, "Tensor cast operation is not supported for int8x4.");
}

auto flat_descriptors = GetConsistentFlattenedTensorDescriptors(srcDesc, dstDesc);
const TensorDescriptor& srcDesc_flat = std::get<0>(flat_descriptors);
const TensorDescriptor& dstDesc_flat = std::get<1>(flat_descriptors);
Expand Down
1 change: 0 additions & 1 deletion src/pooling_api.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ inline void Pooling_logging_cmd(const miopenPoolingDescriptor_t poolDesc,
case miopenFloat: ss << "pool"; break;
case miopenInt32:
case miopenInt8:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
case miopenFloat8:
Expand Down
3 changes: 0 additions & 3 deletions src/reducetensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,6 @@ inline int GetDataTypeSize(miopenDataType_t t)
case miopenFloat8:
case miopenBFloat8:
case miopenInt8: return (1);
case miopenInt8x4: return (4); // Support discontinued.
case miopenBFloat16: return (2);
case miopenInt32: return (4);
default: MIOPEN_THROW("Only float, half, double, bfloat16, int8 data types are supported.");
Expand Down Expand Up @@ -268,7 +267,6 @@ inline int GetDataTypeId(miopenDataType_t t)
case miopenBFloat16: return (static_cast<int>('B'));
case miopenDouble: return (static_cast<int>('D'));
case miopenInt8:
case miopenInt8x4: // Support discontinued.
case miopenFloat8:
case miopenBFloat8:
case miopenInt32: return (static_cast<int>('O'));
Expand Down Expand Up @@ -308,7 +306,6 @@ static ck::DataTypeEnum_t mapDataTypeId(miopenDataType_t t)
case miopenBFloat16: return DataTypeEnum_t::BFloat16;
case miopenDouble: return DataTypeEnum_t::Double;
case miopenInt8: return DataTypeEnum_t::Int8;
case miopenInt8x4: return DataTypeEnum_t::Int8x4;
case miopenInt32: return DataTypeEnum_t::Int32;
case miopenFloat8:
case miopenBFloat8:
Expand Down
2 changes: 0 additions & 2 deletions src/solver/batchnorm/forward_inference_ck.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,6 @@ bool BnCKFwdInference::IsApplicable(const ExecutionContext& context,
return (CheckCKApplicability<BF16, BF16, F32, BF16, BF16, F32>(bn_problem) != -1);
case miopenInt32:
case miopenInt8:
case miopenInt8x4: // Support discontinued.
case miopenFloat8:
case miopenBFloat8:
default: MIOPEN_THROW("Unsupported datatype");
Expand Down Expand Up @@ -244,7 +243,6 @@ BnCKFwdInference::GetSolution(const ExecutionContext& context,
break;
case miopenInt8:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenFloat8:
case miopenBFloat8:
default: MIOPEN_THROW("Unsupported datatype");
Expand Down
4 changes: 0 additions & 4 deletions src/solver/conv_ck_igemm_fwd_bias_activ_fused.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,6 @@ void PerformanceConfigConvCKIgemmFwdBiasActivFused::HeuristicInit(
case miopenInt8:
case miopenFloat:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
default: MIOPEN_THROW("Unsupported datatype");
Expand Down Expand Up @@ -342,7 +341,6 @@ bool PerformanceConfigConvCKIgemmFwdBiasActivFused::IsValid(
case miopenInt8:
case miopenFloat:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
default: MIOPEN_THROW("Unsupported datatype");
Expand Down Expand Up @@ -435,7 +433,6 @@ bool ConvCKIgemmFwdBiasActivFused::IsApplicable(const FusionContext& ctx,
case miopenInt8:
case miopenFloat:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
default: MIOPEN_THROW("Unsupported datatype");
Expand Down Expand Up @@ -469,7 +466,6 @@ ConvSolution ConvCKIgemmFwdBiasActivFused::GetSolution(
case miopenInt8:
case miopenFloat:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
default: MIOPEN_THROW("Unsupported datatype");
Expand Down
4 changes: 0 additions & 4 deletions src/solver/conv_hip_implicit_gemm_3d_grouped_bwd_xdlops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,6 @@ void PerformanceConfigHipImplicitGemm3DGroupBwdXdlops::HeuristicInit(
case miopenFloat: Init<float>(problem); break;
case miopenInt8: Init<int8_t>(problem); break;
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenFloat8:
case miopenBFloat8:
Expand Down Expand Up @@ -253,7 +252,6 @@ bool PerformanceConfigHipImplicitGemm3DGroupBwdXdlops::IsValid(
case miopenFloat: return CheckIsSupportCKArgs<float>(problem);
case miopenInt8: return CheckIsSupportCKArgs<int8_t>(problem);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenFloat8:
case miopenBFloat8:
Expand Down Expand Up @@ -324,7 +322,6 @@ bool ConvHipImplicitGemm3DGroupBwdXdlops::IsApplicable(
case miopenFloat: return CheckCKApplicability<float>(problem);
case miopenInt8: return CheckCKApplicability<int8_t>(problem);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenFloat8:
case miopenBFloat8:
Expand Down Expand Up @@ -352,7 +349,6 @@ ConvSolution ConvHipImplicitGemm3DGroupBwdXdlops::GetSolution(
return InitInvokerFactory<DeviceOpGBwdPtrs<float>, CKArgs, conv::DataInvokeParams>(
problem, config.kernel_id);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
case miopenFloat8:
Expand Down
4 changes: 0 additions & 4 deletions src/solver/conv_hip_implicit_gemm_3d_grouped_fwd_xdlops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,6 @@ void PerformanceConfigHipImplicitGemm3DGroupFwdXdlops::HeuristicInit(
case miopenFloat: Init<float>(problem); break;
case miopenInt8: Init<int8_t>(problem); break;
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenFloat8:
case miopenBFloat8:
case miopenBFloat16:
Expand Down Expand Up @@ -253,7 +252,6 @@ bool PerformanceConfigHipImplicitGemm3DGroupFwdXdlops::IsValid(
case miopenFloat: return CheckIsSupportCKArgs<float>(problem);
case miopenInt8: return CheckIsSupportCKArgs<int8_t>(problem);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenFloat8:
case miopenBFloat8:
case miopenBFloat16:
Expand Down Expand Up @@ -322,7 +320,6 @@ bool ConvHipImplicitGemm3DGroupFwdXdlops::IsApplicable(
case miopenFloat: return CheckCKApplicability<float>(problem);
case miopenInt8: return CheckCKApplicability<int8_t>(problem);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenFloat8:
case miopenBFloat8:
case miopenBFloat16:
Expand Down Expand Up @@ -350,7 +347,6 @@ ConvSolution ConvHipImplicitGemm3DGroupFwdXdlops::GetSolution(
return InitInvokerFactory<DeviceOpGFwdPtrs<float>, CKArgs, conv::DataInvokeParams>(
problem, config.kernel_id);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
case miopenFloat8:
Expand Down
4 changes: 0 additions & 4 deletions src/solver/conv_hip_implicit_gemm_3d_grouped_wrw_xdlops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,6 @@ void PerformanceConfigHipImplicitGemm3DGroupWrwXdlops::HeuristicInit(
case miopenFloat: Init<float>(problem); break;
case miopenInt8: Init<int8_t>(problem); break;
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenFloat8:
case miopenBFloat8:
Expand Down Expand Up @@ -249,7 +248,6 @@ bool PerformanceConfigHipImplicitGemm3DGroupWrwXdlops::IsValid(
case miopenFloat: return CheckIsSupportCKArgs<float>(problem);
case miopenInt8: return CheckIsSupportCKArgs<int8_t>(problem);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenFloat8:
case miopenBFloat8:
Expand Down Expand Up @@ -320,7 +318,6 @@ bool ConvHipImplicitGemm3DGroupWrwXdlops::IsApplicable(
case miopenFloat: return CheckCKApplicability<float>(problem);
case miopenInt8: return CheckCKApplicability<int8_t>(problem);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenFloat8:
case miopenBFloat8:
Expand Down Expand Up @@ -348,7 +345,6 @@ ConvSolution ConvHipImplicitGemm3DGroupWrwXdlops::GetSolution(
return InitInvokerFactory<DeviceOpGWrwPtrs<float>, CKArgs, conv::WrWInvokeParams>(
problem, config.kernel_id);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenFloat8:
case miopenBFloat8:
Expand Down
4 changes: 0 additions & 4 deletions src/solver/conv_hip_implicit_gemm_bwd_data_xdlops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,6 @@ void PerformanceConfigHipImplicitGemmBwdXdlops::HeuristicInit(
case miopenBFloat8:
case miopenInt8:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble: break;
}
Expand Down Expand Up @@ -214,7 +213,6 @@ bool PerformanceConfigHipImplicitGemmBwdXdlops::IsValid(
case miopenBFloat8:
case miopenInt8:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble: break;
}
Expand Down Expand Up @@ -294,7 +292,6 @@ bool ConvHipImplicitGemmBwdXdlops::IsApplicable(
case miopenBFloat8:
case miopenInt8:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble: break;
}
Expand All @@ -318,7 +315,6 @@ ConvSolution ConvHipImplicitGemmBwdXdlops::GetSolution(
problem, config.kernel_id);
case miopenInt8:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
case miopenFloat8:
Expand Down
4 changes: 0 additions & 4 deletions src/solver/conv_hip_implicit_gemm_fwd_xdlops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,6 @@ void PerformanceConfigHipImplicitGemmFwdXdlops::HeuristicInit(
case miopenFloat8:
case miopenBFloat8:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble: break;
}
Expand Down Expand Up @@ -215,7 +214,6 @@ bool PerformanceConfigHipImplicitGemmFwdXdlops::IsValid(
case miopenFloat8:
case miopenBFloat8:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble: break;
}
Expand Down Expand Up @@ -294,7 +292,6 @@ bool ConvHipImplicitGemmFwdXdlops::IsApplicable(
case miopenFloat8:
case miopenBFloat8:
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble: break;
}
Expand All @@ -320,7 +317,6 @@ ConvSolution ConvHipImplicitGemmFwdXdlops::GetSolution(
return InitInvokerFactory<DeviceOpPtrs<float>, CKArgs, conv::DataInvokeParams>(
problem, config.kernel_id);
case miopenInt32:
case miopenInt8x4: // Support discontinued.
case miopenBFloat16:
case miopenDouble:
case miopenFloat8:
Expand Down
Loading

0 comments on commit 6737d66

Please sign in to comment.