From 3f51097855ea283101199a5ab102f566fc4595c4 Mon Sep 17 00:00:00 2001 From: Thomas Lively <7121787+tlively@users.noreply.github.com> Date: Tue, 5 May 2020 10:01:13 -0700 Subject: [PATCH] Update WebAssembly SIMD opcodes (#56) * [WebAssembly] Enable recently implemented SIMD operations Summary: Moves a batch of instructions from unimplemented-simd128 to simd128 because they have recently become available in V8. Reviewers: aheejin Subscribers: dschuff, sbc100, jgravelle-google, hiraditya, sunfish, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D73926 * [WebAssembly] Simplify extract_vector lowering Summary: Removes patterns that were not doing useful work, changes the default extract instructions to be the unsigned versions now that they are enabled by default, fixes PR44988, and adds tests for sext_inreg lowering. Reviewers: aheejin Reviewed By: aheejin Subscribers: dschuff, sbc100, jgravelle-google, hiraditya, sunfish, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D75005 * [WebAssembly] Renumber SIMD opcodes Summary: As described in https://github.com/WebAssembly/simd/pull/209. This is the final reorganization of the SIMD opcode space before standardization. It has been landed in concert with corresponding changes in other projects in the WebAssembly SIMD ecosystem. Reviewers: aheejin Subscribers: dschuff, sbc100, jgravelle-google, hiraditya, sunfish, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D79224 --- .../clang/Basic/BuiltinsWebAssembly.def | 32 +- clang/lib/CodeGen/CGBuiltin.cpp | 6 +- clang/test/CodeGen/builtins-wasm.c | 12 - .../WebAssembly/WebAssemblyISelLowering.cpp | 152 +++-- .../WebAssembly/WebAssemblyInstrSIMD.td | 324 +++++------ llvm/test/CodeGen/WebAssembly/simd-arith.ll | 46 +- .../test/CodeGen/WebAssembly/simd-bitcasts.ll | 31 +- .../CodeGen/WebAssembly/simd-build-pair.ll | 8 +- .../CodeGen/WebAssembly/simd-comparisons.ll | 56 -- .../CodeGen/WebAssembly/simd-conversions.ll | 8 - .../WebAssembly/simd-extended-extract.ll | 53 +- .../CodeGen/WebAssembly/simd-intrinsics.ll | 22 - llvm/test/CodeGen/WebAssembly/simd-offset.ll | 64 --- .../WebAssembly/simd-scalar-to-vector.ll | 2 +- .../CodeGen/WebAssembly/simd-sext-inreg.ll | 105 +++- llvm/test/CodeGen/WebAssembly/simd.ll | 32 +- .../test/MC/Disassembler/WebAssembly/wasm.txt | 14 +- llvm/test/MC/WebAssembly/simd-encodings.s | 519 +++++++++--------- 18 files changed, 611 insertions(+), 875 deletions(-) diff --git a/clang/include/clang/Basic/BuiltinsWebAssembly.def b/clang/include/clang/Basic/BuiltinsWebAssembly.def index 38a2441b5fd47..ea09f39ec8d67 100644 --- a/clang/include/clang/Basic/BuiltinsWebAssembly.def +++ b/clang/include/clang/Basic/BuiltinsWebAssembly.def @@ -73,20 +73,20 @@ TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i64_f64, "LLid", "nc", "nontrappi TARGET_BUILTIN(__builtin_wasm_swizzle_v8x16, "V16cV16cV16c", "nc", "unimplemented-simd128") TARGET_BUILTIN(__builtin_wasm_extract_lane_s_i8x16, "iV16cIi", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i8x16, "iV16cIi", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i8x16, "iV16cIi", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_extract_lane_s_i16x8, "iV8sIi", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i16x8, "iV8sIi", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i16x8, "iV8sIi", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_extract_lane_i32x4, "iV4iIi", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_extract_lane_i64x2, "LLiV2LLiIi", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_extract_lane_i64x2, "LLiV2LLiIi", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_extract_lane_f32x4, "fV4fIi", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_extract_lane_f64x2, "dV2dIi", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_extract_lane_f64x2, "dV2dIi", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_replace_lane_i8x16, "V16cV16cIii", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_replace_lane_i16x8, "V8sV8sIii", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_replace_lane_i32x4, "V4iV4iIii", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_replace_lane_i64x2, "V2LLiV2LLiIiLLi", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_replace_lane_i64x2, "V2LLiV2LLiIiLLi", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_replace_lane_f32x4, "V4fV4fIif", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_replace_lane_f64x2, "V2dV2dIid", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_replace_lane_f64x2, "V2dV2dIid", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_add_saturate_s_i8x16, "V16cV16cV16c", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_add_saturate_u_i8x16, "V16cV16cV16c", "nc", "simd128") @@ -98,8 +98,8 @@ TARGET_BUILTIN(__builtin_wasm_sub_saturate_u_i8x16, "V16cV16cV16c", "nc", "simd1 TARGET_BUILTIN(__builtin_wasm_sub_saturate_s_i16x8, "V8sV8sV8s", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_sub_saturate_u_i16x8, "V8sV8sV8s", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_avgr_u_i8x16, "V16cV16cV16c", "nc", "unimplemented-simd128") -TARGET_BUILTIN(__builtin_wasm_avgr_u_i16x8, "V8sV8sV8s", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_avgr_u_i8x16, "V16cV16cV16c", "nc", "simd128") +TARGET_BUILTIN(__builtin_wasm_avgr_u_i16x8, "V8sV8sV8s", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_bitselect, "V4iV4iV4iV4i", "nc", "simd128") @@ -113,27 +113,25 @@ TARGET_BUILTIN(__builtin_wasm_all_true_i32x4, "iV4i", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_all_true_i64x2, "iV2LLi", "nc", "unimplemented-simd128") TARGET_BUILTIN(__builtin_wasm_abs_f32x4, "V4fV4f", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_abs_f64x2, "V2dV2d", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_abs_f64x2, "V2dV2d", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_min_f32x4, "V4fV4fV4f", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_max_f32x4, "V4fV4fV4f", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_min_f64x2, "V2dV2dV2d", "nc", "unimplemented-simd128") -TARGET_BUILTIN(__builtin_wasm_max_f64x2, "V2dV2dV2d", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_min_f64x2, "V2dV2dV2d", "nc", "simd128") +TARGET_BUILTIN(__builtin_wasm_max_f64x2, "V2dV2dV2d", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_dot_s_i32x4_i16x8, "V4iV8sV8s", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_sqrt_f32x4, "V4fV4f", "nc", "unimplemented-simd128") -TARGET_BUILTIN(__builtin_wasm_sqrt_f64x2, "V2dV2d", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_sqrt_f32x4, "V4fV4f", "nc", "simd128") +TARGET_BUILTIN(__builtin_wasm_sqrt_f64x2, "V2dV2d", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_qfma_f32x4, "V4fV4fV4fV4f", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_qfms_f32x4, "V4fV4fV4fV4f", "nc", "simd128") +TARGET_BUILTIN(__builtin_wasm_qfma_f32x4, "V4fV4fV4fV4f", "nc", "unimplemented-simd128") +TARGET_BUILTIN(__builtin_wasm_qfms_f32x4, "V4fV4fV4fV4f", "nc", "unimplemented-simd128") TARGET_BUILTIN(__builtin_wasm_qfma_f64x2, "V2dV2dV2dV2d", "nc", "unimplemented-simd128") TARGET_BUILTIN(__builtin_wasm_qfms_f64x2, "V2dV2dV2dV2d", "nc", "unimplemented-simd128") TARGET_BUILTIN(__builtin_wasm_trunc_saturate_s_i32x4_f32x4, "V4iV4f", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i32x4_f32x4, "V4iV4f", "nc", "simd128") -TARGET_BUILTIN(__builtin_wasm_trunc_saturate_s_i64x2_f64x2, "V2LLiV2d", "nc", "unimplemented-simd128") -TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i64x2_f64x2, "V2LLiV2d", "nc", "unimplemented-simd128") TARGET_BUILTIN(__builtin_wasm_narrow_s_i8x16_i16x8, "V16cV8sV8s", "nc", "simd128") TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16cV8sV8s", "nc", "simd128") diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 2d20f92fbb3d2..ac946b476f29e 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -14462,8 +14462,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64: case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32: case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64: - case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: - case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: { + case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: { Value *Src = EmitScalarExpr(E->getArg(0)); llvm::Type *ResT = ConvertType(E->getType()); Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed, @@ -14474,8 +14473,7 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64: case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32: case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64: - case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: - case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: { + case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: { Value *Src = EmitScalarExpr(E->getArg(0)); llvm::Type *ResT = ConvertType(E->getType()); Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned, diff --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c index e8e5ba3fbe097..5a6ae7a85cc48 100644 --- a/clang/test/CodeGen/builtins-wasm.c +++ b/clang/test/CodeGen/builtins-wasm.c @@ -519,18 +519,6 @@ i32x4 trunc_saturate_u_i32x4_f32x4(f32x4 f) { // WEBASSEMBLY-NEXT: ret } -i64x2 trunc_saturate_s_i64x2_f64x2(f64x2 f) { - return __builtin_wasm_trunc_saturate_s_i64x2_f64x2(f); - // WEBASSEMBLY: call <2 x i64> @llvm.wasm.trunc.saturate.signed.v2i64.v2f64(<2 x double> %f) - // WEBASSEMBLY-NEXT: ret -} - -i64x2 trunc_saturate_u_i64x2_f64x2(f64x2 f) { - return __builtin_wasm_trunc_saturate_u_i64x2_f64x2(f); - // WEBASSEMBLY: call <2 x i64> @llvm.wasm.trunc.saturate.unsigned.v2i64.v2f64(<2 x double> %f) - // WEBASSEMBLY-NEXT: ret -} - i8x16 narrow_s_i8x16_i16x8(i16x8 low, i16x8 high) { return __builtin_wasm_narrow_s_i8x16_i16x8(low, high); // WEBASSEMBLY: call <16 x i8> @llvm.wasm.narrow.signed.v16i8.v8i16( diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp index 5b177c0c5d9d5..b1f2f3fa18ef7 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp @@ -61,8 +61,6 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass); addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass); addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass); - } - if (Subtarget->hasUnimplementedSIMD128()) { addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass); addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass); } @@ -116,10 +114,8 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( for (auto T : {MVT::i32, MVT::i64}) setOperationAction(Op, T, Expand); if (Subtarget->hasSIMD128()) - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) setOperationAction(Op, T, Expand); - if (Subtarget->hasUnimplementedSIMD128()) - setOperationAction(Op, MVT::v2i64, Expand); } // SIMD-specific configuration @@ -130,83 +126,63 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering( setOperationAction(Op, T, Legal); // Custom lower BUILD_VECTORs to minimize number of replace_lanes - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, + MVT::v2f64}) setOperationAction(ISD::BUILD_VECTOR, T, Custom); - if (Subtarget->hasUnimplementedSIMD128()) - for (auto T : {MVT::v2i64, MVT::v2f64}) - setOperationAction(ISD::BUILD_VECTOR, T, Custom); // We have custom shuffle lowering to expose the shuffle mask - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, + MVT::v2f64}) setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); - if (Subtarget->hasUnimplementedSIMD128()) - for (auto T: {MVT::v2i64, MVT::v2f64}) - setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom); // Custom lowering since wasm shifts must have a scalar shift amount - for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) + for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) setOperationAction(Op, T, Custom); - if (Subtarget->hasUnimplementedSIMD128()) - setOperationAction(Op, MVT::v2i64, Custom); - } // Custom lower lane accesses to expand out variable indices - for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) + for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, + MVT::v2f64}) setOperationAction(Op, T, Custom); - if (Subtarget->hasUnimplementedSIMD128()) - for (auto T : {MVT::v2i64, MVT::v2f64}) - setOperationAction(Op, T, Custom); - } // There is no i64x2.mul instruction + // TODO: Actually, there is now. Implement it. setOperationAction(ISD::MUL, MVT::v2i64, Expand); // There are no vector select instructions - for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32}) + for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, + MVT::v2f64}) setOperationAction(Op, T, Expand); - if (Subtarget->hasUnimplementedSIMD128()) - for (auto T : {MVT::v2i64, MVT::v2f64}) - setOperationAction(Op, T, Expand); - } // Expand integer operations supported for scalars but not SIMD for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV, - ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) { - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) + ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) setOperationAction(Op, T, Expand); - if (Subtarget->hasUnimplementedSIMD128()) - setOperationAction(Op, MVT::v2i64, Expand); - } // But we do have integer min and max operations - if (Subtarget->hasUnimplementedSIMD128()) { - for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) - for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) - setOperationAction(Op, T, Legal); - } + for (auto Op : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) + for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32}) + setOperationAction(Op, T, Legal); // Expand float operations supported for scalars but not SIMD for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10, - ISD::FEXP, ISD::FEXP2, ISD::FRINT}) { - setOperationAction(Op, MVT::v4f32, Expand); - if (Subtarget->hasUnimplementedSIMD128()) - setOperationAction(Op, MVT::v2f64, Expand); - } + ISD::FEXP, ISD::FEXP2, ISD::FRINT}) + for (auto T : {MVT::v4f32, MVT::v2f64}) + setOperationAction(Op, T, Expand); // Expand operations not supported for i64x2 vectors - if (Subtarget->hasUnimplementedSIMD128()) - for (unsigned CC = 0; CC < ISD::SETCC_INVALID; ++CC) - setCondCodeAction(static_cast(CC), MVT::v2i64, Custom); - - // Expand additional SIMD ops that V8 hasn't implemented yet - if (!Subtarget->hasUnimplementedSIMD128()) { - setOperationAction(ISD::FSQRT, MVT::v4f32, Expand); - setOperationAction(ISD::FDIV, MVT::v4f32, Expand); - } + for (unsigned CC = 0; CC < ISD::SETCC_INVALID; ++CC) + setCondCodeAction(static_cast(CC), MVT::v2i64, Custom); + + // 64x2 conversions are not in the spec + for (auto Op : + {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}) + for (auto T : {MVT::v2i64, MVT::v2f64}) + setOperationAction(Op, T, Expand); } // As a special case, these operators use the type to mean the type to @@ -1270,39 +1246,42 @@ WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const { SDLoc DL(Op); // If sign extension operations are disabled, allow sext_inreg only if operand - // is a vector extract. SIMD does not depend on sign extension operations, but - // allowing sext_inreg in this context lets us have simple patterns to select - // extract_lane_s instructions. Expanding sext_inreg everywhere would be - // simpler in this file, but would necessitate large and brittle patterns to - // undo the expansion and select extract_lane_s instructions. + // is a vector extract of an i8 or i16 lane. SIMD does not depend on sign + // extension operations, but allowing sext_inreg in this context lets us have + // simple patterns to select extract_lane_s instructions. Expanding sext_inreg + // everywhere would be simpler in this file, but would necessitate large and + // brittle patterns to undo the expansion and select extract_lane_s + // instructions. assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128()); - if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) { - const SDValue &Extract = Op.getOperand(0); - MVT VecT = Extract.getOperand(0).getSimpleValueType(); - MVT ExtractedLaneT = static_cast(Op.getOperand(1).getNode()) - ->getVT() - .getSimpleVT(); - MVT ExtractedVecT = - MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits()); - if (ExtractedVecT == VecT) - return Op; - // Bitcast vector to appropriate type to ensure ISel pattern coverage - const SDValue &Index = Extract.getOperand(1); - unsigned IndexVal = - static_cast(Index.getNode())->getZExtValue(); - unsigned Scale = - ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements(); - assert(Scale > 1); - SDValue NewIndex = - DAG.getConstant(IndexVal * Scale, DL, Index.getValueType()); - SDValue NewExtract = DAG.getNode( - ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(), - DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex); - return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), - NewExtract, Op.getOperand(1)); - } - // Otherwise expand - return SDValue(); + if (Op.getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT) + return SDValue(); + + const SDValue &Extract = Op.getOperand(0); + MVT VecT = Extract.getOperand(0).getSimpleValueType(); + if (VecT.getVectorElementType().getSizeInBits() > 32) + return SDValue(); + MVT ExtractedLaneT = static_cast(Op.getOperand(1).getNode()) + ->getVT() + .getSimpleVT(); + MVT ExtractedVecT = + MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits()); + if (ExtractedVecT == VecT) + return Op; + + // Bitcast vector to appropriate type to ensure ISel pattern coverage + const SDValue &Index = Extract.getOperand(1); + unsigned IndexVal = + static_cast(Index.getNode())->getZExtValue(); + unsigned Scale = + ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements(); + assert(Scale > 1); + SDValue NewIndex = + DAG.getConstant(IndexVal * Scale, DL, Index.getValueType()); + SDValue NewExtract = DAG.getNode( + ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(), + DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex); + return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(), NewExtract, + Op.getOperand(1)); } SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op, @@ -1502,7 +1481,6 @@ SDValue WebAssemblyTargetLowering::LowerSETCC(SDValue Op, // expanding all i64x2 SETCC nodes, but that seems to expand f64x2 SETCC nodes // (which return i64x2 results) as well. So instead we manually unroll i64x2 // comparisons here. - assert(Subtarget->hasUnimplementedSIMD128()); assert(Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64); SmallVector LHS, RHS; DAG.ExtractVectorElements(Op->getOperand(0), LHS); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td index 64033c993e3f9..31414ee7d40f2 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td +++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td @@ -67,10 +67,10 @@ multiclass SIMDLoadSplat simdop> { vec#".load_splat\t$off$p2align", simdop>; } -defm "" : SIMDLoadSplat<"v8x16", 194>; -defm "" : SIMDLoadSplat<"v16x8", 195>; -defm "" : SIMDLoadSplat<"v32x4", 196>; -defm "" : SIMDLoadSplat<"v64x2", 197>; +defm "" : SIMDLoadSplat<"v8x16", 7>; +defm "" : SIMDLoadSplat<"v16x8", 8>; +defm "" : SIMDLoadSplat<"v32x4", 9>; +defm "" : SIMDLoadSplat<"v64x2", 10>; def wasm_load_splat_t : SDTypeProfile<1, 1, [SDTCisPtrTy<1>]>; def wasm_load_splat : SDNode<"WebAssemblyISD::LOAD_SPLAT", wasm_load_splat_t, @@ -116,9 +116,9 @@ multiclass SIMDLoadExtend simdop> { } } -defm "" : SIMDLoadExtend; -defm "" : SIMDLoadExtend; -defm "" : SIMDLoadExtend; +defm "" : SIMDLoadExtend; +defm "" : SIMDLoadExtend; +defm "" : SIMDLoadExtend; let Predicates = [HasUnimplementedSIMD128] in foreach types = [[v8i16, i8], [v4i32, i16], [v2i64, i32]] in @@ -144,7 +144,7 @@ defm STORE_V128 : SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr, V128:$vec), (outs), (ins P2Align:$p2align, offset32_op:$off), [], "v128.store\t${off}(${addr})$p2align, $vec", - "v128.store\t$off$p2align", 1>; + "v128.store\t$off$p2align", 11>; foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in { // Def load and store patterns from WebAssemblyInstrMemory.td for vector types @@ -166,7 +166,7 @@ multiclass ConstVec { defm CONST_V128_#vec_t : SIMD_I<(outs V128:$dst), ops, (outs), ops, [(set V128:$dst, (vec_t pat))], "v128.const\t$dst, "#args, - "v128.const\t"#args, 2>; + "v128.const\t"#args, 12>; } defm "" : ConstVec; + 13>; // Shuffles after custom lowering def wasm_shuffle_t : SDTypeProfile<1, 18, []>; @@ -278,7 +278,7 @@ defm SWIZZLE : SIMD_I<(outs V128:$dst), (ins V128:$src, V128:$mask), (outs), (ins), [(set (v16i8 V128:$dst), (wasm_swizzle (v16i8 V128:$src), (v16i8 V128:$mask)))], - "v8x16.swizzle\t$dst, $src, $mask", "v8x16.swizzle", 192>; + "v8x16.swizzle\t$dst, $src, $mask", "v8x16.swizzle", 14>; def : Pat<(int_wasm_swizzle (v16i8 V128:$src), (v16i8 V128:$mask)), (SWIZZLE V128:$src, V128:$mask)>; @@ -305,12 +305,12 @@ multiclass Splat; } -defm "" : Splat; -defm "" : Splat; -defm "" : Splat; -defm "" : Splat; -defm "" : Splat; -defm "" : Splat; +defm "" : Splat; +defm "" : Splat; +defm "" : Splat; +defm "" : Splat; +defm "" : Splat; +defm "" : Splat; // scalar_to_vector leaves high lanes undefined, so can be a splat class ScalarSplatPat; //===----------------------------------------------------------------------===// // Extract lane as a scalar: extract_lane / extract_lane_s / extract_lane_u -multiclass ExtractLane simdop, - string suffix = "", SDNode extract = vector_extract> { +multiclass ExtractLane simdop, string suffix = ""> { defm EXTRACT_LANE_#vec_t#suffix : SIMD_I<(outs reg_t:$dst), (ins V128:$vec, vec_i8imm_op:$idx), - (outs), (ins vec_i8imm_op:$idx), - [(set reg_t:$dst, (extract (vec_t V128:$vec), (i32 imm_t:$idx)))], + (outs), (ins vec_i8imm_op:$idx), [], vec#".extract_lane"#suffix#"\t$dst, $vec, $idx", vec#".extract_lane"#suffix#"\t$idx", simdop>; } -multiclass ExtractPat { - def _s : PatFrag<(ops node:$vec, node:$idx), - (i32 (sext_inreg - (i32 (vector_extract - node:$vec, - node:$idx - )), - lane_t - ))>; - def _u : PatFrag<(ops node:$vec, node:$idx), - (i32 (and - (i32 (vector_extract - node:$vec, - node:$idx - )), - (i32 mask) - ))>; -} - -defm extract_i8x16 : ExtractPat; -defm extract_i16x8 : ExtractPat; - -multiclass ExtractLaneExtended baseInst> { - defm "" : ExtractLane("extract_i8x16"#sign)>; - defm "" : ExtractLane("extract_i16x8"#sign)>; -} - -defm "" : ExtractLaneExtended<"_s", 5>; -let Predicates = [HasUnimplementedSIMD128] in -defm "" : ExtractLaneExtended<"_u", 6>; -defm "" : ExtractLane; -defm "" : ExtractLane; -defm "" : ExtractLane; -defm "" : ExtractLane; - -// It would be more conventional to use unsigned extracts, but v8 -// doesn't implement them yet -def : Pat<(i32 (vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx))), - (EXTRACT_LANE_v16i8_s V128:$vec, (i32 LaneIdx16:$idx))>; -def : Pat<(i32 (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx))), - (EXTRACT_LANE_v8i16_s V128:$vec, (i32 LaneIdx8:$idx))>; - -// Lower undef lane indices to zero -def : Pat<(and (i32 (vector_extract (v16i8 V128:$vec), undef)), (i32 0xff)), - (EXTRACT_LANE_v16i8_u V128:$vec, 0)>; -def : Pat<(and (i32 (vector_extract (v8i16 V128:$vec), undef)), (i32 0xffff)), - (EXTRACT_LANE_v8i16_u V128:$vec, 0)>; -def : Pat<(i32 (vector_extract (v16i8 V128:$vec), undef)), - (EXTRACT_LANE_v16i8_u V128:$vec, 0)>; -def : Pat<(i32 (vector_extract (v8i16 V128:$vec), undef)), - (EXTRACT_LANE_v8i16_u V128:$vec, 0)>; -def : Pat<(sext_inreg (i32 (vector_extract (v16i8 V128:$vec), undef)), i8), - (EXTRACT_LANE_v16i8_s V128:$vec, 0)>; -def : Pat<(sext_inreg (i32 (vector_extract (v8i16 V128:$vec), undef)), i16), - (EXTRACT_LANE_v8i16_s V128:$vec, 0)>; -def : Pat<(vector_extract (v4i32 V128:$vec), undef), - (EXTRACT_LANE_v4i32 V128:$vec, 0)>; -def : Pat<(vector_extract (v2i64 V128:$vec), undef), - (EXTRACT_LANE_v2i64 V128:$vec, 0)>; -def : Pat<(vector_extract (v4f32 V128:$vec), undef), - (EXTRACT_LANE_v4f32 V128:$vec, 0)>; -def : Pat<(vector_extract (v2f64 V128:$vec), undef), - (EXTRACT_LANE_v2f64 V128:$vec, 0)>; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; +defm "" : ExtractLane; + +def : Pat<(vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)), + (EXTRACT_LANE_v16i8_u V128:$vec, imm:$idx)>; +def : Pat<(vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), + (EXTRACT_LANE_v8i16_u V128:$vec, imm:$idx)>; +def : Pat<(vector_extract (v4i32 V128:$vec), (i32 LaneIdx4:$idx)), + (EXTRACT_LANE_v4i32 V128:$vec, imm:$idx)>; +def : Pat<(vector_extract (v4f32 V128:$vec), (i32 LaneIdx4:$idx)), + (EXTRACT_LANE_v4f32 V128:$vec, imm:$idx)>; +def : Pat<(vector_extract (v2i64 V128:$vec), (i32 LaneIdx2:$idx)), + (EXTRACT_LANE_v2i64 V128:$vec, imm:$idx)>; +def : Pat<(vector_extract (v2f64 V128:$vec), (i32 LaneIdx2:$idx)), + (EXTRACT_LANE_v2f64 V128:$vec, imm:$idx)>; + +def : Pat< + (sext_inreg (vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)), i8), + (EXTRACT_LANE_v16i8_s V128:$vec, imm:$idx)>; +def : Pat< + (and (vector_extract (v16i8 V128:$vec), (i32 LaneIdx16:$idx)), (i32 0xff)), + (EXTRACT_LANE_v16i8_u V128:$vec, imm:$idx)>; +def : Pat< + (sext_inreg (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), i16), + (EXTRACT_LANE_v8i16_s V128:$vec, imm:$idx)>; +def : Pat< + (and (vector_extract (v8i16 V128:$vec), (i32 LaneIdx8:$idx)), (i32 0xffff)), + (EXTRACT_LANE_v8i16_u V128:$vec, imm:$idx)>; // Replace lane value: replace_lane multiclass ReplaceLane; } -defm "" : ReplaceLane; -defm "" : ReplaceLane; -defm "" : ReplaceLane; -defm "" : ReplaceLane; -defm "" : ReplaceLane; -defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; +defm "" : ReplaceLane; // Lower undef lane indices to zero def : Pat<(vector_insert (v16i8 V128:$vec), I32:$x, undef), @@ -471,35 +438,35 @@ multiclass SIMDConditionFP baseInst> { // Equality: eq let isCommutable = 1 in { -defm EQ : SIMDConditionInt<"eq", SETEQ, 24>; -defm EQ : SIMDConditionFP<"eq", SETOEQ, 64>; +defm EQ : SIMDConditionInt<"eq", SETEQ, 35>; +defm EQ : SIMDConditionFP<"eq", SETOEQ, 65>; } // isCommutable = 1 // Non-equality: ne let isCommutable = 1 in { -defm NE : SIMDConditionInt<"ne", SETNE, 25>; -defm NE : SIMDConditionFP<"ne", SETUNE, 65>; +defm NE : SIMDConditionInt<"ne", SETNE, 36>; +defm NE : SIMDConditionFP<"ne", SETUNE, 66>; } // isCommutable = 1 // Less than: lt_s / lt_u / lt -defm LT_S : SIMDConditionInt<"lt_s", SETLT, 26>; -defm LT_U : SIMDConditionInt<"lt_u", SETULT, 27>; -defm LT : SIMDConditionFP<"lt", SETOLT, 66>; +defm LT_S : SIMDConditionInt<"lt_s", SETLT, 37>; +defm LT_U : SIMDConditionInt<"lt_u", SETULT, 38>; +defm LT : SIMDConditionFP<"lt", SETOLT, 67>; // Greater than: gt_s / gt_u / gt -defm GT_S : SIMDConditionInt<"gt_s", SETGT, 28>; -defm GT_U : SIMDConditionInt<"gt_u", SETUGT, 29>; -defm GT : SIMDConditionFP<"gt", SETOGT, 67>; +defm GT_S : SIMDConditionInt<"gt_s", SETGT, 39>; +defm GT_U : SIMDConditionInt<"gt_u", SETUGT, 40>; +defm GT : SIMDConditionFP<"gt", SETOGT, 68>; // Less than or equal: le_s / le_u / le -defm LE_S : SIMDConditionInt<"le_s", SETLE, 30>; -defm LE_U : SIMDConditionInt<"le_u", SETULE, 31>; -defm LE : SIMDConditionFP<"le", SETOLE, 68>; +defm LE_S : SIMDConditionInt<"le_s", SETLE, 41>; +defm LE_U : SIMDConditionInt<"le_u", SETULE, 42>; +defm LE : SIMDConditionFP<"le", SETOLE, 69>; // Greater than or equal: ge_s / ge_u / ge -defm GE_S : SIMDConditionInt<"ge_s", SETGE, 32>; -defm GE_U : SIMDConditionInt<"ge_u", SETUGE, 33>; -defm GE : SIMDConditionFP<"ge", SETOGE, 69>; +defm GE_S : SIMDConditionInt<"ge_s", SETGE, 43>; +defm GE_U : SIMDConditionInt<"ge_u", SETUGE, 44>; +defm GE : SIMDConditionFP<"ge", SETOGE, 70>; // Lower float comparisons that don't care about NaN to standard WebAssembly // float comparisons. These instructions are generated with nnan and in the @@ -548,19 +515,19 @@ multiclass SIMDUnary; +defm NOT: SIMDUnary; -// Bitwise logic: v128.and / v128.or / v128.xor +// Bitwise logic: v128.and / v128.andnot / v128.or / v128.xor let isCommutable = 1 in { -defm AND : SIMDBitwise; -defm OR : SIMDBitwise; -defm XOR : SIMDBitwise; +defm AND : SIMDBitwise; +defm OR : SIMDBitwise; +defm XOR : SIMDBitwise; } // isCommutable = 1 // Bitwise logic: v128.andnot def andnot : PatFrag<(ops node:$left, node:$right), (and $left, (vnot $right))>; let Predicates = [HasUnimplementedSIMD128] in -defm ANDNOT : SIMDBitwise; +defm ANDNOT : SIMDBitwise; // Bitwise select: v128.bitselect foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in @@ -571,7 +538,7 @@ foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in (vec_t V128:$v1), (vec_t V128:$v2), (vec_t V128:$c) )) )], - "v128.bitselect\t$dst, $v1, $v2, $c", "v128.bitselect", 80>; + "v128.bitselect\t$dst, $v1, $v2, $c", "v128.bitselect", 82>; // Bitselect is equivalent to (c & v1) | (~c & v2) foreach vec_t = [v16i8, v8i16, v4i32, v2i64] in @@ -586,9 +553,9 @@ foreach vec_t = [v16i8, v8i16, v4i32, v2i64] in multiclass SIMDUnaryInt baseInst> { defm "" : SIMDUnary; - defm "" : SIMDUnary; - defm "" : SIMDUnary; - defm "" : SIMDUnary; + defm "" : SIMDUnary; + defm "" : SIMDUnary; + defm "" : SIMDUnary; } multiclass SIMDReduceVec baseInst> { defm "" : SIMDReduceVec; - defm "" : SIMDReduceVec; - defm "" : SIMDReduceVec; - defm "" : SIMDReduceVec; + defm "" : SIMDReduceVec; + defm "" : SIMDReduceVec; + defm "" : SIMDReduceVec; } // Integer vector negation def ivneg : PatFrag<(ops node:$in), (sub immAllZerosV, node:$in)>; +// Integer absolute value: abs +defm ABS : SIMDUnaryInt; + // Integer negation: neg -defm NEG : SIMDUnaryInt; +defm NEG : SIMDUnaryInt; // Any lane true: any_true -defm ANYTRUE : SIMDReduce; +defm ANYTRUE : SIMDReduce; // All lanes true: all_true -defm ALLTRUE : SIMDReduce; +defm ALLTRUE : SIMDReduce; // Reductions already return 0 or 1, so and 1, setne 0, and seteq 1 // can be folded out @@ -655,19 +625,19 @@ multiclass SIMDShift baseInst> { defm "" : SIMDShift; defm "" : SIMDShift; + !add(baseInst, 32)>; defm "" : SIMDShift; + !add(baseInst, 64)>; defm "" : SIMDShift; + name, !add(baseInst, 96)>; } // Left shift by scalar: shl -defm SHL : SIMDShiftInt; +defm SHL : SIMDShiftInt; // Right shift by scalar: shr_s / shr_u -defm SHR_S : SIMDShiftInt; -defm SHR_U : SIMDShiftInt; +defm SHR_S : SIMDShiftInt; +defm SHR_U : SIMDShiftInt; // Truncate i64 shift operands to i32s, except if they are already i32s foreach shifts = [[shl, SHL_v2i64], [sra, SHR_S_v2i64], [srl, SHR_U_v2i64]] in { @@ -699,49 +669,49 @@ def : Pat<(v2i64 (shifts[0] (v2i64 V128:$vec), I32:$x)), multiclass SIMDBinaryIntSmall baseInst> { defm "" : SIMDBinary; - defm "" : SIMDBinary; + defm "" : SIMDBinary; } multiclass SIMDBinaryIntNoI64x2 baseInst> { defm "" : SIMDBinaryIntSmall; - defm "" : SIMDBinary; + defm "" : SIMDBinary; } multiclass SIMDBinaryInt baseInst> { defm "" : SIMDBinaryIntNoI64x2; - defm "" : SIMDBinary; + defm "" : SIMDBinary; } // Integer addition: add / add_saturate_s / add_saturate_u let isCommutable = 1 in { -defm ADD : SIMDBinaryInt; -defm ADD_SAT_S : SIMDBinaryIntSmall; -defm ADD_SAT_U : SIMDBinaryIntSmall; +defm ADD : SIMDBinaryInt; +defm ADD_SAT_S : SIMDBinaryIntSmall; +defm ADD_SAT_U : SIMDBinaryIntSmall; } // isCommutable = 1 // Integer subtraction: sub / sub_saturate_s / sub_saturate_u -defm SUB : SIMDBinaryInt; +defm SUB : SIMDBinaryInt; defm SUB_SAT_S : - SIMDBinaryIntSmall; + SIMDBinaryIntSmall; defm SUB_SAT_U : - SIMDBinaryIntSmall; + SIMDBinaryIntSmall; // Integer multiplication: mul let isCommutable = 1 in -defm MUL : SIMDBinaryIntNoI64x2; +defm MUL : SIMDBinaryIntNoI64x2; // Integer min_s / min_u / max_s / max_u let isCommutable = 1 in { -defm MIN_S : SIMDBinaryIntNoI64x2; -defm MIN_U : SIMDBinaryIntNoI64x2; -defm MAX_S : SIMDBinaryIntNoI64x2; -defm MAX_U : SIMDBinaryIntNoI64x2; +defm MIN_S : SIMDBinaryIntNoI64x2; +defm MIN_U : SIMDBinaryIntNoI64x2; +defm MAX_S : SIMDBinaryIntNoI64x2; +defm MAX_U : SIMDBinaryIntNoI64x2; } // isCommutable = 1 // Integer unsigned rounding average: avgr_u -let isCommutable = 1, Predicates = [HasUnimplementedSIMD128] in { -defm AVGR_U : SIMDBinary; -defm AVGR_U : SIMDBinary; +let isCommutable = 1 in { +defm AVGR_U : SIMDBinary; +defm AVGR_U : SIMDBinary; } def add_nuw : PatFrag<(ops node:$lhs, node:$rhs), @@ -763,7 +733,7 @@ let isCommutable = 1 in defm DOT : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), (outs), (ins), [(set V128:$dst, (int_wasm_dot V128:$lhs, V128:$rhs))], "i32x4.dot_i16x8_s\t$dst, $lhs, $rhs", "i32x4.dot_i16x8_s", - 219>; + 180>; //===----------------------------------------------------------------------===// // Floating-point unary arithmetic @@ -771,18 +741,17 @@ defm DOT : SIMD_I<(outs V128:$dst), (ins V128:$lhs, V128:$rhs), (outs), (ins), multiclass SIMDUnaryFP baseInst> { defm "" : SIMDUnary; - defm "" : SIMDUnary; + defm "" : SIMDUnary; } // Absolute value: abs -defm ABS : SIMDUnaryFP; +defm ABS : SIMDUnaryFP; // Negation: neg -defm NEG : SIMDUnaryFP; +defm NEG : SIMDUnaryFP; // Square root: sqrt -let Predicates = [HasUnimplementedSIMD128] in -defm SQRT : SIMDUnaryFP; +defm SQRT : SIMDUnaryFP; //===----------------------------------------------------------------------===// // Floating-point binary arithmetic @@ -790,29 +759,28 @@ defm SQRT : SIMDUnaryFP; multiclass SIMDBinaryFP baseInst> { defm "" : SIMDBinary; - defm "" : SIMDBinary; + defm "" : SIMDBinary; } // Addition: add let isCommutable = 1 in -defm ADD : SIMDBinaryFP; +defm ADD : SIMDBinaryFP; // Subtraction: sub -defm SUB : SIMDBinaryFP; +defm SUB : SIMDBinaryFP; // Multiplication: mul let isCommutable = 1 in -defm MUL : SIMDBinaryFP; +defm MUL : SIMDBinaryFP; // Division: div -let Predicates = [HasUnimplementedSIMD128] in -defm DIV : SIMDBinaryFP; +defm DIV : SIMDBinaryFP; // NaN-propagating minimum: min -defm MIN : SIMDBinaryFP; +defm MIN : SIMDBinaryFP; // NaN-propagating maximum: max -defm MAX : SIMDBinaryFP; +defm MAX : SIMDBinaryFP; //===----------------------------------------------------------------------===// // Conversions @@ -826,17 +794,13 @@ multiclass SIMDConvert; } -// Integer to floating point: convert -defm "" : SIMDConvert; -defm "" : SIMDConvert; -defm "" : SIMDConvert; -defm "" : SIMDConvert; - // Floating point to integer with saturation: trunc_sat -defm "" : SIMDConvert; -defm "" : SIMDConvert; -defm "" : SIMDConvert; -defm "" : SIMDConvert; +defm "" : SIMDConvert; +defm "" : SIMDConvert; + +// Integer to floating point: convert +defm "" : SIMDConvert; +defm "" : SIMDConvert; // Widening operations multiclass SIMDWiden; } -defm "" : SIMDWiden; -defm "" : SIMDWiden; +defm "" : SIMDWiden; +defm "" : SIMDWiden; // Narrowing operations multiclass SIMDNarrow; } -defm "" : SIMDNarrow; -defm "" : SIMDNarrow; +defm "" : SIMDNarrow; +defm "" : SIMDNarrow; // Lower llvm.wasm.trunc.saturate.* to saturating instructions def : Pat<(v4i32 (int_wasm_trunc_saturate_signed (v4f32 V128:$src))), (fp_to_sint_v4i32_v4f32 (v4f32 V128:$src))>; def : Pat<(v4i32 (int_wasm_trunc_saturate_unsigned (v4f32 V128:$src))), (fp_to_uint_v4i32_v4f32 (v4f32 V128:$src))>; -def : Pat<(v2i64 (int_wasm_trunc_saturate_signed (v2f64 V128:$src))), - (fp_to_sint_v2i64_v2f64 (v2f64 V128:$src))>; -def : Pat<(v2i64 (int_wasm_trunc_saturate_unsigned (v2f64 V128:$src))), - (fp_to_uint_v2i64_v2f64 (v2f64 V128:$src))>; // Bitcasts are nops // Matching bitcast t1 to t1 causes strange errors, so avoid repeating types @@ -914,5 +874,5 @@ multiclass SIMDQFM baseInst> { vec#".qfms\t$dst, $a, $b, $c", vec#".qfms", !add(baseInst, 1)>; } -defm "" : SIMDQFM; -defm "" : SIMDQFM; +defm "" : SIMDQFM; +defm "" : SIMDQFM; diff --git a/llvm/test/CodeGen/WebAssembly/simd-arith.ll b/llvm/test/CodeGen/WebAssembly/simd-arith.ll index 180f7d0661562..4ccc6f8b613fb 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-arith.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-arith.ll @@ -160,15 +160,15 @@ define <16 x i8> @shl_const_v16i8(<16 x i8> %v) { ; CHECK-LABEL: shl_vec_v16i8: ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shl_vec_v16i8 (v128, v128) -> (v128){{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} ; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] ; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes -; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128: i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}} ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} ; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} @@ -197,14 +197,14 @@ define <16 x i8> @shr_s_v16i8(<16 x i8> %v, i8 %x) { ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shr_s_vec_v16i8 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} ; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] ; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes ; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}} ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} ; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} @@ -233,14 +233,14 @@ define <16 x i8> @shr_u_v16i8(<16 x i8> %v, i8 %x) { ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype shr_u_vec_v16i8 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}} ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} ; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]] ; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]] ; Skip 14 lanes ; SIMD128: i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}} ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}} ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} ; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} @@ -470,15 +470,15 @@ define <8 x i16> @shl_const_v8i16(<8 x i16> %v) { ; CHECK-LABEL: shl_vec_v8i16: ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shl_vec_v8i16 (v128, v128) -> (v128){{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} ; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} ; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes -; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128: i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}} ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} ; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} @@ -506,14 +506,14 @@ define <8 x i16> @shr_s_v8i16(<8 x i16> %v, i16 %x) { ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shr_s_vec_v8i16 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} ; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} ; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes ; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}} ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} ; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} @@ -541,14 +541,14 @@ define <8 x i16> @shr_u_v8i16(<8 x i16> %v, i16 %x) { ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype shr_u_vec_v8i16 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}} ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}} ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}} ; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}} ; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}} ; Skip 6 lanes ; SIMD128: i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}} ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}} ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}} ; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}} @@ -905,7 +905,6 @@ define <4 x i32> @bitselect_v4i32(<4 x i32> %c, <4 x i32> %v1, <4 x i32> %v2) { ; ============================================================================== ; CHECK-LABEL: add_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype add_v2i64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i64x2.add $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -916,7 +915,6 @@ define <2 x i64> @add_v2i64(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: sub_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype sub_v2i64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: i64x2.sub $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -928,7 +926,6 @@ define <2 x i64> @sub_v2i64(<2 x i64> %x, <2 x i64> %y) { ; v2i64.mul is not in spec ; CHECK-LABEL: mul_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NOT: i64x2.mul ; SIMD128: i64x2.extract_lane ; SIMD128: i64.mul @@ -1150,7 +1147,6 @@ define <2 x i64> @shr_u_vec_v2i64(<2 x i64> %v, <2 x i64> %x) { ; CHECK-LABEL: and_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype and_v2i64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: v128.and $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1161,7 +1157,6 @@ define <2 x i64> @and_v2i64(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: or_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype or_v2i64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: v128.or $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1172,7 +1167,6 @@ define <2 x i64> @or_v2i64(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: xor_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype xor_v2i64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: v128.xor $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1183,7 +1177,6 @@ define <2 x i64> @xor_v2i64(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: not_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype not_v2i64 (v128) -> (v128){{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1209,7 +1202,6 @@ define <2 x i64> @andnot_v2i64(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: bitselect_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype bitselect_v2i64 (v128, v128, v128) -> (v128){{$}} ; SIMD128-SLOW-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}} ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}} @@ -1401,7 +1393,6 @@ define <4 x float> @sub_v4f32(<4 x float> %x, <4 x float> %y) { ; CHECK-LABEL: div_v4f32: ; NO-SIMD128-NOT: f32x4 -; SIMD128-VM-NOT: f32x4.div ; SIMD128-NEXT: .functype div_v4f32 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f32x4.div $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1422,7 +1413,6 @@ define <4 x float> @mul_v4f32(<4 x float> %x, <4 x float> %y) { ; CHECK-LABEL: sqrt_v4f32: ; NO-SIMD128-NOT: f32x4 -; SIMD128-VM-NOT: f32x4.sqrt ; SIMD128-NEXT: .functype sqrt_v4f32 (v128) -> (v128){{$}} ; SIMD128-NEXT: f32x4.sqrt $push[[R:[0-9]+]]=, $0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1565,7 +1555,6 @@ define <2 x double> @max_const_intrinsic_v2f64() { ; CHECK-LABEL: add_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f62x2 ; SIMD128-NEXT: .functype add_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.add $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1576,7 +1565,6 @@ define <2 x double> @add_v2f64(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: sub_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f62x2 ; SIMD128-NEXT: .functype sub_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.sub $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1587,7 +1575,6 @@ define <2 x double> @sub_v2f64(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: div_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f62x2 ; SIMD128-NEXT: .functype div_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.div $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1598,7 +1585,6 @@ define <2 x double> @div_v2f64(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: mul_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f62x2 ; SIMD128-NEXT: .functype mul_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.mul $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} diff --git a/llvm/test/CodeGen/WebAssembly/simd-bitcasts.ll b/llvm/test/CodeGen/WebAssembly/simd-bitcasts.ll index 5aab1c68ce281..f6d7c4ec2d1a8 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-bitcasts.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-bitcasts.ll @@ -1,5 +1,4 @@ -; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128 -; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM +; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128 ; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-keep-registers -wasm-disable-explicit-locals | FileCheck %s --check-prefixes CHECK,NO-SIMD128 ; Test that bitcasts between vector types are lowered to zero instructions @@ -33,8 +32,6 @@ define <4 x i32> @v16i8_to_v4i32(<16 x i8> %v) { ; CHECK-LABEL: v16i8_to_v2i64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM: v128.store -; SIMD128-VM-NEXT: return{{$}} ; SIMD128: return $0 define <2 x i64> @v16i8_to_v2i64(<16 x i8> %v) { %res = bitcast <16 x i8> %v to <2 x i64> @@ -51,8 +48,6 @@ define <4 x float> @v16i8_to_v4f32(<16 x i8> %v) { ; CHECK-LABEL: v16i8_to_v2f64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM: v128.store -; SIMD128-VM-NEXT: return{{$}} ; SIMD128: return $0 define <2 x double> @v16i8_to_v2f64(<16 x i8> %v) { %res = bitcast <16 x i8> %v to <2 x double> @@ -85,8 +80,6 @@ define <4 x i32> @v8i16_to_v4i32(<8 x i16> %v) { ; CHECK-LABEL: v8i16_to_v2i64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM: v128.store -; SIMD128-VM-NEXT: return{{$}} ; SIMD128: return $0 define <2 x i64> @v8i16_to_v2i64(<8 x i16> %v) { %res = bitcast <8 x i16> %v to <2 x i64> @@ -103,8 +96,6 @@ define <4 x float> @v8i16_to_v4f32(<8 x i16> %v) { ; CHECK-LABEL: v8i16_to_v2f64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM: v128.store -; SIMD128-VM-NEXT: return{{$}} ; SIMD128: return $0 define <2 x double> @v8i16_to_v2f64(<8 x i16> %v) { %res = bitcast <8 x i16> %v to <2 x double> @@ -137,8 +128,6 @@ define <4 x i32> @v4i32_to_v4i32(<4 x i32> %v) { ; CHECK-LABEL: v4i32_to_v2i64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM: v128.store -; SIMD128-VM-NEXT: return{{$}} ; SIMD128: return $0 define <2 x i64> @v4i32_to_v2i64(<4 x i32> %v) { %res = bitcast <4 x i32> %v to <2 x i64> @@ -155,8 +144,6 @@ define <4 x float> @v4i32_to_v4f32(<4 x i32> %v) { ; CHECK-LABEL: v4i32_to_v2f64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM: v128.store -; SIMD128-VM-NEXT: return{{$}} ; SIMD128: return $0 define <2 x double> @v4i32_to_v2f64(<4 x i32> %v) { %res = bitcast <4 x i32> %v to <2 x double> @@ -165,7 +152,6 @@ define <2 x double> @v4i32_to_v2f64(<4 x i32> %v) { ; CHECK-LABEL: v2i64_to_v16i8: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <16 x i8> @v2i64_to_v16i8(<2 x i64> %v) { %res = bitcast <2 x i64> %v to <16 x i8> @@ -174,7 +160,6 @@ define <16 x i8> @v2i64_to_v16i8(<2 x i64> %v) { ; CHECK-LABEL: v2i64_to_v8i16: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <8 x i16> @v2i64_to_v8i16(<2 x i64> %v) { %res = bitcast <2 x i64> %v to <8 x i16> @@ -183,7 +168,6 @@ define <8 x i16> @v2i64_to_v8i16(<2 x i64> %v) { ; CHECK-LABEL: v2i64_to_v4i32: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <4 x i32> @v2i64_to_v4i32(<2 x i64> %v) { %res = bitcast <2 x i64> %v to <4 x i32> @@ -192,7 +176,6 @@ define <4 x i32> @v2i64_to_v4i32(<2 x i64> %v) { ; CHECK-LABEL: v2i64_to_v2i64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <2 x i64> @v2i64_to_v2i64(<2 x i64> %v) { %res = bitcast <2 x i64> %v to <2 x i64> @@ -201,7 +184,6 @@ define <2 x i64> @v2i64_to_v2i64(<2 x i64> %v) { ; CHECK-LABEL: v2i64_to_v4f32: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <4 x float> @v2i64_to_v4f32(<2 x i64> %v) { %res = bitcast <2 x i64> %v to <4 x float> @@ -210,7 +192,6 @@ define <4 x float> @v2i64_to_v4f32(<2 x i64> %v) { ; CHECK-LABEL: v2i64_to_v2f64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <2 x double> @v2i64_to_v2f64(<2 x i64> %v) { %res = bitcast <2 x i64> %v to <2 x double> @@ -243,8 +224,6 @@ define <4 x i32> @v4f32_to_v4i32(<4 x float> %v) { ; CHECK-LABEL: v4f32_to_v2i64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM: v128.store -; SIMD128-VM-NEXT: return{{$}} ; SIMD128: return $0 define <2 x i64> @v4f32_to_v2i64(<4 x float> %v) { %res = bitcast <4 x float> %v to <2 x i64> @@ -261,8 +240,6 @@ define <4 x float> @v4f32_to_v4f32(<4 x float> %v) { ; CHECK-LABEL: v4f32_to_v2f64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM: v128.store -; SIMD128-VM-NEXT: return{{$}} ; SIMD128: return $0 define <2 x double> @v4f32_to_v2f64(<4 x float> %v) { %res = bitcast <4 x float> %v to <2 x double> @@ -271,7 +248,6 @@ define <2 x double> @v4f32_to_v2f64(<4 x float> %v) { ; CHECK-LABEL: v2f64_to_v16i8: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <16 x i8> @v2f64_to_v16i8(<2 x double> %v) { %res = bitcast <2 x double> %v to <16 x i8> @@ -280,7 +256,6 @@ define <16 x i8> @v2f64_to_v16i8(<2 x double> %v) { ; CHECK-LABEL: v2f64_to_v8i16: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <8 x i16> @v2f64_to_v8i16(<2 x double> %v) { %res = bitcast <2 x double> %v to <8 x i16> @@ -289,7 +264,6 @@ define <8 x i16> @v2f64_to_v8i16(<2 x double> %v) { ; CHECK-LABEL: v2f64_to_v4i32: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <4 x i32> @v2f64_to_v4i32(<2 x double> %v) { %res = bitcast <2 x double> %v to <4 x i32> @@ -298,7 +272,6 @@ define <4 x i32> @v2f64_to_v4i32(<2 x double> %v) { ; CHECK-LABEL: v2f64_to_v2i64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <2 x i64> @v2f64_to_v2i64(<2 x double> %v) { %res = bitcast <2 x double> %v to <2 x i64> @@ -307,7 +280,6 @@ define <2 x i64> @v2f64_to_v2i64(<2 x double> %v) { ; CHECK-LABEL: v2f64_to_v4f32: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <4 x float> @v2f64_to_v4f32(<2 x double> %v) { %res = bitcast <2 x double> %v to <4 x float> @@ -316,7 +288,6 @@ define <4 x float> @v2f64_to_v4f32(<2 x double> %v) { ; CHECK-LABEL: v2f64_to_v2f64: ; NO-SIMD128-NOT: return $0 -; SIMD128-VM-NOT: return $0 ; SIMD128: return $0 define <2 x double> @v2f64_to_v2f64(<2 x double> %v) { %res = bitcast <2 x double> %v to <2 x double> diff --git a/llvm/test/CodeGen/WebAssembly/simd-build-pair.ll b/llvm/test/CodeGen/WebAssembly/simd-build-pair.ll index a3b7ab8e8eca4..3438c49caca1c 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-build-pair.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-build-pair.ll @@ -14,13 +14,7 @@ target triple = "wasm32-unknown-unknown" ; t8: ch = store<(store 8 into `i64* undef`, align 1)> t3:1, t24, undef:i32, undef:i32 ; t9: ch = WebAssemblyISD::RETURN t8 -; CHECK: i32x4.extract_lane -; CHECK-NEXT: i64.extend_i32_u -; CHECK-NEXT: i32x4.extract_lane -; CHECK-NEXT: i64.extend_i32_u -; CHECK-NEXT: i64.const {{.*}} 32 -; CHECK-NEXT: i64.shl -; CHECK-NEXT: i64.or +; CHECK: i64x2.extract_lane ; CHECK-NEXT: i64.store define void @build_pair_i32s() { entry: diff --git a/llvm/test/CodeGen/WebAssembly/simd-comparisons.ll b/llvm/test/CodeGen/WebAssembly/simd-comparisons.ll index 3b6af73eb15f8..812dce4bfe791 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-comparisons.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-comparisons.ll @@ -1417,7 +1417,6 @@ define <4 x i32> @compare_sext_uno_nnan_v4f32 (<4 x float> %x, <4 x float> %y) { ; CHECK-LABEL: compare_oeq_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_oeq_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1428,7 +1427,6 @@ define <2 x i1> @compare_oeq_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_oeq_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_oeq_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1439,7 +1437,6 @@ define <2 x i1> @compare_oeq_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_oeq_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_oeq_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1451,7 +1448,6 @@ define <2 x i64> @compare_sext_oeq_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_oeq_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_oeq_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1463,7 +1459,6 @@ define <2 x i64> @compare_sext_oeq_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_ogt_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ogt_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.gt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1474,7 +1469,6 @@ define <2 x i1> @compare_ogt_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_ogt_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ogt_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.gt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1485,7 +1479,6 @@ define <2 x i1> @compare_ogt_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ogt_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ogt_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.gt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1497,7 +1490,6 @@ define <2 x i64> @compare_sext_ogt_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ogt_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ogt_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.gt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1509,7 +1501,6 @@ define <2 x i64> @compare_sext_ogt_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_oge_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_oge_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ge $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1520,7 +1511,6 @@ define <2 x i1> @compare_oge_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_oge_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_oge_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ge $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1531,7 +1521,6 @@ define <2 x i1> @compare_oge_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_oge_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_oge_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ge $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1543,7 +1532,6 @@ define <2 x i64> @compare_sext_oge_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_oge_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_oge_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ge $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1555,7 +1543,6 @@ define <2 x i64> @compare_sext_oge_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_olt_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_olt_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.lt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1566,7 +1553,6 @@ define <2 x i1> @compare_olt_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_olt_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_olt_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.lt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1577,7 +1563,6 @@ define <2 x i1> @compare_olt_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_olt_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_olt_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.lt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1589,7 +1574,6 @@ define <2 x i64> @compare_sext_olt_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_olt_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_olt_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.lt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1601,7 +1585,6 @@ define <2 x i64> @compare_sext_olt_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_ole_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ole_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.le $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1612,7 +1595,6 @@ define <2 x i1> @compare_ole_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_ole_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ole_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.le $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1623,7 +1605,6 @@ define <2 x i1> @compare_ole_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ole_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ole_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.le $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1635,7 +1616,6 @@ define <2 x i64> @compare_sext_ole_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ole_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ole_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.le $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1647,7 +1627,6 @@ define <2 x i64> @compare_sext_ole_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_one_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_one_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: f64x2.eq $push[[T1:[0-9]+]]=, $0, $0{{$}} @@ -1662,7 +1641,6 @@ define <2 x i1> @compare_one_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_one_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_one_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1673,7 +1651,6 @@ define <2 x i1> @compare_one_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_one_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_one_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: f64x2.eq $push[[T1:[0-9]+]]=, $0, $0{{$}} @@ -1689,7 +1666,6 @@ define <2 x i64> @compare_sext_one_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_one_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_one_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1701,7 +1677,6 @@ define <2 x i64> @compare_sext_one_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_ord_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ord_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[T0:[0-9]+]]=, $0, $0{{$}} ; SIMD128-NEXT: f64x2.eq $push[[T1:[0-9]+]]=, $1, $1{{$}} @@ -1714,7 +1689,6 @@ define <2 x i1> @compare_ord_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_ord_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ord_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[T0:[0-9]+]]=, $0, $0{{$}} ; SIMD128-NEXT: f64x2.eq $push[[T1:[0-9]+]]=, $1, $1{{$}} @@ -1727,7 +1701,6 @@ define <2 x i1> @compare_ord_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ord_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ord_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[T0:[0-9]+]]=, $0, $0{{$}} ; SIMD128-NEXT: f64x2.eq $push[[T1:[0-9]+]]=, $1, $1{{$}} @@ -1741,7 +1714,6 @@ define <2 x i64> @compare_sext_ord_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ord_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ord_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[T0:[0-9]+]]=, $0, $0{{$}} ; SIMD128-NEXT: f64x2.eq $push[[T1:[0-9]+]]=, $1, $1{{$}} @@ -1755,7 +1727,6 @@ define <2 x i64> @compare_sext_ord_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_ueq_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ueq_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: f64x2.ne $push[[T1:[0-9]+]]=, $0, $0{{$}} @@ -1770,7 +1741,6 @@ define <2 x i1> @compare_ueq_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_ueq_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ueq_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1781,7 +1751,6 @@ define <2 x i1> @compare_ueq_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ueq_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ueq_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: f64x2.ne $push[[T1:[0-9]+]]=, $0, $0{{$}} @@ -1797,7 +1766,6 @@ define <2 x i64> @compare_sext_ueq_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ueq_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ueq_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.eq $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1809,7 +1777,6 @@ define <2 x i64> @compare_sext_ueq_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_ugt_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ugt_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.le $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $pop[[T0]]{{$}} @@ -1821,7 +1788,6 @@ define <2 x i1> @compare_ugt_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_ugt_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ugt_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.gt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1832,7 +1798,6 @@ define <2 x i1> @compare_ugt_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ugt_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ugt_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.le $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $pop[[T0]]{{$}} @@ -1845,7 +1810,6 @@ define <2 x i64> @compare_sext_ugt_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ugt_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ugt_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.gt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1857,7 +1821,6 @@ define <2 x i64> @compare_sext_ugt_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_uge_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_uge_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.lt $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $pop[[T0]]{{$}} @@ -1869,7 +1832,6 @@ define <2 x i1> @compare_uge_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_uge_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_uge_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ge $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1880,7 +1842,6 @@ define <2 x i1> @compare_uge_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_uge_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_uge_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.lt $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $pop[[T0]]{{$}} @@ -1893,7 +1854,6 @@ define <2 x i64> @compare_sext_uge_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_uge_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_uge_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ge $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1905,7 +1865,6 @@ define <2 x i64> @compare_sext_uge_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_ult_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ult_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ge $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $pop[[T0]]{{$}} @@ -1917,7 +1876,6 @@ define <2 x i1> @compare_ult_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_ult_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ult_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.lt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1928,7 +1886,6 @@ define <2 x i1> @compare_ult_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ult_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ult_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ge $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $pop[[T0]]{{$}} @@ -1941,7 +1898,6 @@ define <2 x i64> @compare_sext_ult_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ult_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ult_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.lt $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1953,7 +1909,6 @@ define <2 x i64> @compare_sext_ult_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_ule_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ule_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.gt $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $pop[[T0]]{{$}} @@ -1965,7 +1920,6 @@ define <2 x i1> @compare_ule_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_ule_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_ule_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.le $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1976,7 +1930,6 @@ define <2 x i1> @compare_ule_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ule_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ule_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.gt $push[[T0:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $pop[[T0]]{{$}} @@ -1989,7 +1942,6 @@ define <2 x i64> @compare_sext_ule_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_ule_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_ule_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.le $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2001,7 +1953,6 @@ define <2 x i64> @compare_sext_ule_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_une_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_une_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2012,7 +1963,6 @@ define <2 x i1> @compare_une_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_une_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_une_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2023,7 +1973,6 @@ define <2 x i1> @compare_une_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_une_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_une_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2035,7 +1984,6 @@ define <2 x i64> @compare_sext_une_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_une_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_une_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[R:[0-9]+]]=, $0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2047,7 +1995,6 @@ define <2 x i64> @compare_sext_une_nnan_v2f64 (<2 x double> %x, <2 x double> %y) ; CHECK-LABEL: compare_uno_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_uno_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[T0:[0-9]+]]=, $0, $0{{$}} ; SIMD128-NEXT: f64x2.ne $push[[T1:[0-9]+]]=, $1, $1{{$}} @@ -2060,7 +2007,6 @@ define <2 x i1> @compare_uno_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_uno_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_uno_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[T0:[0-9]+]]=, $0, $0{{$}} ; SIMD128-NEXT: f64x2.ne $push[[T1:[0-9]+]]=, $1, $1{{$}} @@ -2073,7 +2019,6 @@ define <2 x i1> @compare_uno_nnan_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_uno_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_uno_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[T0:[0-9]+]]=, $0, $0{{$}} ; SIMD128-NEXT: f64x2.ne $push[[T1:[0-9]+]]=, $1, $1{{$}} @@ -2087,7 +2032,6 @@ define <2 x i64> @compare_sext_uno_v2f64 (<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: compare_sext_uno_nnan_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype compare_sext_uno_nnan_v2f64 (v128, v128) -> (v128){{$}} ; SIMD128-NEXT: f64x2.ne $push[[T0:[0-9]+]]=, $0, $0{{$}} ; SIMD128-NEXT: f64x2.ne $push[[T1:[0-9]+]]=, $1, $1{{$}} diff --git a/llvm/test/CodeGen/WebAssembly/simd-conversions.ll b/llvm/test/CodeGen/WebAssembly/simd-conversions.ll index 5437a9ab0a81d..53731b0f7c16d 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-conversions.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-conversions.ll @@ -31,8 +31,6 @@ define <4 x float> @convert_u_v4f32(<4 x i32> %x) { ; NO-SIMD128-NOT: i64x2 ; SIMD128-VM-NOT: f64x2.convert_i64x2_s ; SIMD128-NEXT: .functype convert_s_v2f64 (v128) -> (v128){{$}} -; SIMD128-NEXT: f64x2.convert_i64x2_s $push[[R:[0-9]+]]=, $0 -; SIMD128-NEXT: return $pop[[R]] define <2 x double> @convert_s_v2f64(<2 x i64> %x) { %a = sitofp <2 x i64> %x to <2 x double> ret <2 x double> %a @@ -42,8 +40,6 @@ define <2 x double> @convert_s_v2f64(<2 x i64> %x) { ; NO-SIMD128-NOT: i64x2 ; SIMD128-VM-NOT: f64x2.convert_i64x2_u ; SIMD128-NEXT: .functype convert_u_v2f64 (v128) -> (v128){{$}} -; SIMD128-NEXT: f64x2.convert_i64x2_u $push[[R:[0-9]+]]=, $0 -; SIMD128-NEXT: return $pop[[R]] define <2 x double> @convert_u_v2f64(<2 x i64> %x) { %a = uitofp <2 x i64> %x to <2 x double> ret <2 x double> %a @@ -73,8 +69,6 @@ define <4 x i32> @trunc_sat_u_v4i32(<4 x float> %x) { ; NO-SIMD128-NOT: f64x2 ; SIMD128-VM-NOT: i64x2.trunc_sat_f64x2_s ; SIMD128-NEXT: .functype trunc_sat_s_v2i64 (v128) -> (v128){{$}} -; SIMD128-NEXT: i64x2.trunc_sat_f64x2_s $push[[R:[0-9]+]]=, $0 -; SIMD128-NEXT: return $pop[[R]] define <2 x i64> @trunc_sat_s_v2i64(<2 x double> %x) { %a = fptosi <2 x double> %x to <2 x i64> ret <2 x i64> %a @@ -84,8 +78,6 @@ define <2 x i64> @trunc_sat_s_v2i64(<2 x double> %x) { ; NO-SIMD128-NOT: f64x2 ; SIMD128-VM-NOT: i64x2.trunc_sat_f64x2_u ; SIMD128-NEXT: .functype trunc_sat_u_v2i64 (v128) -> (v128){{$}} -; SIMD128-NEXT: i64x2.trunc_sat_f64x2_u $push[[R:[0-9]+]]=, $0 -; SIMD128-NEXT: return $pop[[R]] define <2 x i64> @trunc_sat_u_v2i64(<2 x double> %x) { %a = fptoui <2 x double> %x to <2 x i64> ret <2 x i64> %a diff --git a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll index 149b1842b6c86..f1cd26401a04a 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll @@ -16,58 +16,11 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" -define void @foo(<4 x i8>* %p) { ; CHECK-LABEL: foo: ; CHECK: .functype foo (i32) -> () -; CHECK-NEXT: i32.load8_u 0 -; CHECK-NEXT: i32x4.splat -; CHECK-NEXT: local.tee -; CHECK-NEXT: i8x16.extract_lane_s 0 -; CHECK-NEXT: f64.convert_i32_s -; CHECK-NEXT: f64.const 0x0p0 -; CHECK-NEXT: f64.mul -; CHECK-NEXT: f64.const 0x0p0 -; CHECK-NEXT: f64.add -; CHECK-NEXT: f32.demote_f64 -; CHECK-NEXT: f32x4.splat -; CHECK-NEXT: i32.load8_u 1 -; CHECK-NEXT: i32x4.replace_lane 1 -; CHECK-NEXT: local.tee -; CHECK-NEXT: i8x16.extract_lane_s 4 -; CHECK-NEXT: f64.convert_i32_s -; CHECK-NEXT: f64.const 0x0p0 -; CHECK-NEXT: f64.mul -; CHECK-NEXT: f64.const 0x0p0 -; CHECK-NEXT: f64.add -; CHECK-NEXT: f32.demote_f64 -; CHECK-NEXT: f32x4.replace_lane 1 -; CHECK-NEXT: i32.const 2 -; CHECK-NEXT: i32.add -; CHECK-NEXT: i32.load8_u 0 -; CHECK-NEXT: i32x4.replace_lane 2 -; CHECK-NEXT: local.tee -; CHECK-NEXT: i8x16.extract_lane_s 8 -; CHECK-NEXT: f64.convert_i32_s -; CHECK-NEXT: f64.const 0x0p0 -; CHECK-NEXT: f64.mul -; CHECK-NEXT: f64.const 0x0p0 -; CHECK-NEXT: f64.add -; CHECK-NEXT: f32.demote_f64 -; CHECK-NEXT: f32x4.replace_lane 2 -; CHECK-NEXT: i32.const 3 -; CHECK-NEXT: i32.add -; CHECK-NEXT: i32.load8_u 0 -; CHECK-NEXT: i32x4.replace_lane 3 -; CHECK-NEXT: i8x16.extract_lane_s 12 -; CHECK-NEXT: f64.convert_i32_s -; CHECK-NEXT: f64.const 0x0p0 -; CHECK-NEXT: f64.mul -; CHECK-NEXT: f64.const 0x0p0 -; CHECK-NEXT: f64.add -; CHECK-NEXT: f32.demote_f64 -; CHECK-NEXT: f32x4.replace_lane 3 -; CHECK-NEXT: v128.store 0 -; CHECK-NEXT: return +; Implementation omitted... +; CHECK: return +define void @foo(<4 x i8>* %p) { %1 = load <4 x i8>, <4 x i8>* %p %2 = sitofp <4 x i8> %1 to <4 x double> %3 = fmul <4 x double> zeroinitializer, %2 diff --git a/llvm/test/CodeGen/WebAssembly/simd-intrinsics.ll b/llvm/test/CodeGen/WebAssembly/simd-intrinsics.ll index b6680dd36aa79..8cf9f2b114e0e 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-intrinsics.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-intrinsics.ll @@ -426,28 +426,6 @@ define <2 x i64> @bitselect_v2i64(<2 x i64> %v1, <2 x i64> %v2, <2 x i64> %c) { ret <2 x i64> %a } -; CHECK-LABEL: trunc_sat_s_v2i64: -; NO-SIMD128-NOT: f32x4 -; SIMD128-NEXT: .functype trunc_sat_s_v2i64 (v128) -> (v128){{$}} -; SIMD128-NEXT: i64x2.trunc_sat_f64x2_s $push[[R:[0-9]+]]=, $0 -; SIMD128-NEXT: return $pop[[R]] -declare <2 x i64> @llvm.wasm.trunc.saturate.signed.v2i64.v2f64(<2 x double>) -define <2 x i64> @trunc_sat_s_v2i64(<2 x double> %x) { - %a = call <2 x i64> @llvm.wasm.trunc.saturate.signed.v2i64.v2f64(<2 x double> %x) - ret <2 x i64> %a -} - -; CHECK-LABEL: trunc_sat_u_v2i64: -; NO-SIMD128-NOT: f32x4 -; SIMD128-NEXT: .functype trunc_sat_u_v2i64 (v128) -> (v128){{$}} -; SIMD128-NEXT: i64x2.trunc_sat_f64x2_u $push[[R:[0-9]+]]=, $0 -; SIMD128-NEXT: return $pop[[R]] -declare <2 x i64> @llvm.wasm.trunc.saturate.unsigned.v2i64.v2f64(<2 x double>) -define <2 x i64> @trunc_sat_u_v2i64(<2 x double> %x) { - %a = call <2 x i64> @llvm.wasm.trunc.saturate.unsigned.v2i64.v2f64(<2 x double> %x) - ret <2 x i64> %a -} - ; ============================================================================== ; 4 x f32 ; ============================================================================== diff --git a/llvm/test/CodeGen/WebAssembly/simd-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-offset.ll index 03b6ca7c225b9..0b5bbb01b5f76 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-offset.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-offset.ll @@ -1550,7 +1550,6 @@ define void @store_v4i32_to_global_address(<4 x i32> %v) { ; ============================================================================== ; CHECK-LABEL: load_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2i64 (i32) -> (v128){{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1561,7 +1560,6 @@ define <2 x i64> @load_v2i64(<2 x i64>* %p) { ; CHECK-LABEL: load_splat_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2i64 (i32) -> (v128){{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, 0($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1574,7 +1572,6 @@ define <2 x i64> @load_splat_v2i64(i64* %p) { ; CHECK-LABEL: load_sext_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_sext_v2i64 (i32) -> (v128){{$}} ; SIMD128-NEXT: i64x2.load32x2_s $push[[R:[0-9]+]]=, 0($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1586,7 +1583,6 @@ define <2 x i64> @load_sext_v2i64(<2 x i32>* %p) { ; CHECK-LABEL: load_zext_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_zext_v2i64 (i32) -> (v128){{$}} ; SIMD128-NEXT: i64x2.load32x2_u $push[[R:[0-9]+]]=, 0($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1609,7 +1605,6 @@ define <2 x i32> @load_ext_v2i64(<2 x i32>* %p) { ; CHECK-LABEL: load_v2i64_with_folded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2i64_with_folded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1623,7 +1618,6 @@ define <2 x i64> @load_v2i64_with_folded_offset(<2 x i64>* %p) { ; CHECK-LABEL: load_splat_v2i64_with_folded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2i64_with_folded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, 16($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1639,7 +1633,6 @@ define <2 x i64> @load_splat_v2i64_with_folded_offset(i64* %p) { ; CHECK-LABEL: load_sext_v2i64_with_folded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_sext_v2i64_with_folded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i64x2.load32x2_s $push[[R:[0-9]+]]=, 16($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1654,7 +1647,6 @@ define <2 x i64> @load_sext_v2i64_with_folded_offset(<2 x i32>* %p) { ; CHECK-LABEL: load_zext_v2i64_with_folded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_zext_v2i64_with_folded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i64x2.load32x2_u $push[[R:[0-9]+]]=, 16($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1683,7 +1675,6 @@ define <2 x i32> @load_ext_v2i64_with_folded_offset(<2 x i32>* %p) { ; CHECK-LABEL: load_v2i64_with_folded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2i64_with_folded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1695,7 +1686,6 @@ define <2 x i64> @load_v2i64_with_folded_gep_offset(<2 x i64>* %p) { ; CHECK-LABEL: load_splat_v2i64_with_folded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2i64_with_folded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, 8($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1709,7 +1699,6 @@ define <2 x i64> @load_splat_v2i64_with_folded_gep_offset(i64* %p) { ; CHECK-LABEL: load_sext_v2i64_with_folded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_sext_v2i64_with_folded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i64x2.load32x2_s $push[[R:[0-9]+]]=, 8($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1722,7 +1711,6 @@ define <2 x i64> @load_sext_v2i64_with_folded_gep_offset(<2 x i32>* %p) { ; CHECK-LABEL: load_zext_v2i64_with_folded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_zext_v2i64_with_folded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i64x2.load32x2_u $push[[R:[0-9]+]]=, 8($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1747,7 +1735,6 @@ define <2 x i32> @load_ext_v2i64_with_folded_gep_offset(<2 x i32>* %p) { ; CHECK-LABEL: load_v2i64_with_unfolded_gep_negative_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1761,7 +1748,6 @@ define <2 x i64> @load_v2i64_with_unfolded_gep_negative_offset(<2 x i64>* %p) { ; CHECK-LABEL: load_splat_v2i64_with_unfolded_gep_negative_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -8{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1777,7 +1763,6 @@ define <2 x i64> @load_splat_v2i64_with_unfolded_gep_negative_offset(i64* %p) { ; CHECK-LABEL: load_sext_v2i64_with_unfolded_gep_negative_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_sext_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -8{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1792,7 +1777,6 @@ define <2 x i64> @load_sext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* % ; CHECK-LABEL: load_zext_v2i64_with_unfolded_gep_negative_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_zext_v2i64_with_unfolded_gep_negative_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -8{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1821,7 +1805,6 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* %p ; CHECK-LABEL: load_v2i64_with_unfolded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2i64_with_unfolded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1837,7 +1820,6 @@ define <2 x i64> @load_v2i64_with_unfolded_offset(<2 x i64>* %p) { ; CHECK-LABEL: load_splat_v2i64_with_unfolded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2i64_with_unfolded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1855,7 +1837,6 @@ define <2 x i64> @load_splat_v2i64_with_unfolded_offset(i64* %p) { ; CHECK-LABEL: load_sext_v2i64_with_unfolded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_sext_v2i64_with_unfolded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1872,7 +1853,6 @@ define <2 x i64> @load_sext_v2i64_with_unfolded_offset(<2 x i32>* %p) { ; CHECK-LABEL: load_zext_v2i64_with_unfolded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_zext_v2i64_with_unfolded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1905,7 +1885,6 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_offset(<2 x i32>* %p) { ; CHECK-LABEL: load_v2i64_with_unfolded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2i64_with_unfolded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1919,7 +1898,6 @@ define <2 x i64> @load_v2i64_with_unfolded_gep_offset(<2 x i64>* %p) { ; CHECK-LABEL: load_splat_v2i64_with_unfolded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2i64_with_unfolded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 8{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1935,7 +1913,6 @@ define <2 x i64> @load_splat_v2i64_with_unfolded_gep_offset(i64* %p) { ; CHECK-LABEL: load_sext_v2i64_with_unfolded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_sext_v2i64_with_unfolded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 8{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1950,7 +1927,6 @@ define <2 x i64> @load_sext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) { ; CHECK-LABEL: load_zext_v2i64_with_unfolded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_zext_v2i64_with_unfolded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 8{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -1979,7 +1955,6 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) { ; CHECK-LABEL: load_v2i64_from_numeric_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2i64_from_numeric_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}} @@ -1992,7 +1967,6 @@ define <2 x i64> @load_v2i64_from_numeric_address() { ; CHECK-LABEL: load_splat_v2i64_from_numeric_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2i64_from_numeric_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}} @@ -2007,7 +1981,6 @@ define <2 x i64> @load_splat_v2i64_from_numeric_address() { ; CHECK-LABEL: load_sext_v2i64_from_numeric_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_sext_v2i64_from_numeric_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: i64x2.load32x2_s $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}} @@ -2021,7 +1994,6 @@ define <2 x i64> @load_sext_v2i64_from_numeric_address() { ; CHECK-LABEL: load_zext_v2i64_from_numeric_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_zext_v2i64_from_numeric_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: i64x2.load32x2_u $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}} @@ -2048,7 +2020,6 @@ define <2 x i32> @load_ext_v2i64_from_numeric_address() { ; CHECK-LABEL: load_v2i64_from_global_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2i64_from_global_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v2i64($pop[[L0]]){{$}} @@ -2061,7 +2032,6 @@ define <2 x i64> @load_v2i64_from_global_address() { ; CHECK-LABEL: load_splat_v2i64_from_global_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2i64_from_global_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, gv_i64($pop[[L0]]){{$}} @@ -2076,7 +2046,6 @@ define <2 x i64> @load_splat_v2i64_from_global_address() { ; CHECK-LABEL: load_sext_v2i64_from_global_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_sext_v2i64_from_global_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: i64x2.load32x2_s $push[[R:[0-9]+]]=, gv_v2i32($pop[[L0]]){{$}} @@ -2090,7 +2059,6 @@ define <2 x i64> @load_sext_v2i64_from_global_address() { ; CHECK-LABEL: load_zext_v2i64_from_global_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_zext_v2i64_from_global_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: i64x2.load32x2_u $push[[R:[0-9]+]]=, gv_v2i32($pop[[L0]]){{$}} @@ -2115,7 +2083,6 @@ define <2 x i32> @load_ext_v2i64_from_global_address() { ; CHECK-LABEL: store_v2i64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2i64 (v128, i32) -> (){{$}} ; SIMD128-NEXT: v128.store 0($1), $0{{$}} define void @store_v2i64(<2 x i64> %v, <2 x i64>* %p) { @@ -2125,7 +2092,6 @@ define void @store_v2i64(<2 x i64> %v, <2 x i64>* %p) { ; CHECK-LABEL: store_v2i64_with_folded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2i64_with_folded_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: v128.store 16($1), $0{{$}} define void @store_v2i64_with_folded_offset(<2 x i64> %v, <2 x i64>* %p) { @@ -2138,7 +2104,6 @@ define void @store_v2i64_with_folded_offset(<2 x i64> %v, <2 x i64>* %p) { ; CHECK-LABEL: store_v2i64_with_folded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2i64_with_folded_gep_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: v128.store 16($1), $0{{$}} define void @store_v2i64_with_folded_gep_offset(<2 x i64> %v, <2 x i64>* %p) { @@ -2149,7 +2114,6 @@ define void @store_v2i64_with_folded_gep_offset(<2 x i64> %v, <2 x i64>* %p) { ; CHECK-LABEL: store_v2i64_with_unfolded_gep_negative_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2i64_with_unfolded_gep_negative_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}} ; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}} @@ -2162,7 +2126,6 @@ define void @store_v2i64_with_unfolded_gep_negative_offset(<2 x i64> %v, <2 x i6 ; CHECK-LABEL: store_v2i64_with_unfolded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2i64_with_unfolded_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}} ; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}} @@ -2175,7 +2138,6 @@ define void @store_v2i64_with_unfolded_offset(<2 x i64> %v, <2 x i64>* %p) { ; CHECK-LABEL: store_v2i64_with_unfolded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2i64_with_unfolded_gep_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}} @@ -2188,7 +2150,6 @@ define void @store_v2i64_with_unfolded_gep_offset(<2 x i64> %v, <2 x i64>* %p) { ; CHECK-LABEL: store_v2i64_to_numeric_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2i64_to_numeric_address (v128) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v128.store 32($pop[[L0]]), $0{{$}} @@ -2200,7 +2161,6 @@ define void @store_v2i64_to_numeric_address(<2 x i64> %v) { ; CHECK-LABEL: store_v2i64_to_global_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2i64_to_global_address (v128) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v128.store gv_v2i64($pop[[R]]), $0{{$}} @@ -2519,7 +2479,6 @@ define void @store_v4f32_to_global_address(<4 x float> %v) { ; ============================================================================== ; CHECK-LABEL: load_v2f64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2f64 (i32) -> (v128){{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 0($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2530,7 +2489,6 @@ define <2 x double> @load_v2f64(<2 x double>* %p) { ; CHECK-LABEL: load_splat_v2f64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2f64 (i32) -> (v128){{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, 0($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2543,7 +2501,6 @@ define <2 x double> @load_splat_v2f64(double* %p) { ; CHECK-LABEL: load_v2f64_with_folded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2f64_with_folded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2557,7 +2514,6 @@ define <2 x double> @load_v2f64_with_folded_offset(<2 x double>* %p) { ; CHECK-LABEL: load_splat_v2f64_with_folded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2f64_with_folded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, 16($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2573,7 +2529,6 @@ define <2 x double> @load_splat_v2f64_with_folded_offset(double* %p) { ; CHECK-LABEL: load_v2f64_with_folded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2f64_with_folded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 16($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2585,7 +2540,6 @@ define <2 x double> @load_v2f64_with_folded_gep_offset(<2 x double>* %p) { ; CHECK-LABEL: load_splat_v2f64_with_folded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2f64_with_folded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, 8($0){{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -2599,7 +2553,6 @@ define <2 x double> @load_splat_v2f64_with_folded_gep_offset(double* %p) { ; CHECK-LABEL: load_v2f64_with_unfolded_gep_negative_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2f64_with_unfolded_gep_negative_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -2613,7 +2566,6 @@ define <2 x double> @load_v2f64_with_unfolded_gep_negative_offset(<2 x double>* ; CHECK-LABEL: load_splat_v2f64_with_unfolded_gep_negative_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2f64_with_unfolded_gep_negative_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -8{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -2629,7 +2581,6 @@ define <2 x double> @load_splat_v2f64_with_unfolded_gep_negative_offset(double* ; CHECK-LABEL: load_v2f64_with_unfolded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2f64_with_unfolded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -2645,7 +2596,6 @@ define <2 x double> @load_v2f64_with_unfolded_offset(<2 x double>* %p) { ; CHECK-LABEL: load_splat_v2f64_with_unfolded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2f64_with_unfolded_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -2663,7 +2613,6 @@ define <2 x double> @load_splat_v2f64_with_unfolded_offset(double* %p) { ; CHECK-LABEL: load_v2f64_with_unfolded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2f64_with_unfolded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -2677,7 +2626,6 @@ define <2 x double> @load_v2f64_with_unfolded_gep_offset(<2 x double>* %p) { ; CHECK-LABEL: load_splat_v2f64_with_unfolded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2f64_with_unfolded_gep_offset (i32) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 8{{$}} ; SIMD128-NEXT: i32.add $push[[L1:[0-9]+]]=, $0, $pop[[L0]]{{$}} @@ -2693,7 +2641,6 @@ define <2 x double> @load_splat_v2f64_with_unfolded_gep_offset(double* %p) { ; CHECK-LABEL: load_v2f64_from_numeric_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2f64_from_numeric_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}} @@ -2706,7 +2653,6 @@ define <2 x double> @load_v2f64_from_numeric_address() { ; CHECK-LABEL: load_splat_v2f64_from_numeric_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2f64_from_numeric_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, 32($pop[[L0]]){{$}} @@ -2721,7 +2667,6 @@ define <2 x double> @load_splat_v2f64_from_numeric_address() { ; CHECK-LABEL: load_v2f64_from_global_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_v2f64_from_global_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v128.load $push[[R:[0-9]+]]=, gv_v2f64($pop[[L0]]){{$}} @@ -2734,7 +2679,6 @@ define <2 x double> @load_v2f64_from_global_address() { ; CHECK-LABEL: load_splat_v2f64_from_global_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype load_splat_v2f64_from_global_address () -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v64x2.load_splat $push[[R:[0-9]+]]=, gv_f64($pop[[L0]]){{$}} @@ -2749,7 +2693,6 @@ define <2 x double> @load_splat_v2f64_from_global_address() { ; CHECK-LABEL: store_v2f64: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2f64 (v128, i32) -> (){{$}} ; SIMD128-NEXT: v128.store 0($1), $0{{$}} define void @store_v2f64(<2 x double> %v, <2 x double>* %p) { @@ -2759,7 +2702,6 @@ define void @store_v2f64(<2 x double> %v, <2 x double>* %p) { ; CHECK-LABEL: store_v2f64_with_folded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2f64_with_folded_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: v128.store 16($1), $0{{$}} define void @store_v2f64_with_folded_offset(<2 x double> %v, <2 x double>* %p) { @@ -2772,7 +2714,6 @@ define void @store_v2f64_with_folded_offset(<2 x double> %v, <2 x double>* %p) { ; CHECK-LABEL: store_v2f64_with_folded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2f64_with_folded_gep_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: v128.store 16($1), $0{{$}} define void @store_v2f64_with_folded_gep_offset(<2 x double> %v, <2 x double>* %p) { @@ -2783,7 +2724,6 @@ define void @store_v2f64_with_folded_gep_offset(<2 x double> %v, <2 x double>* % ; CHECK-LABEL: store_v2f64_with_unfolded_gep_negative_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2f64_with_unfolded_gep_negative_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}} ; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}} @@ -2796,7 +2736,6 @@ define void @store_v2f64_with_unfolded_gep_negative_offset(<2 x double> %v, <2 x ; CHECK-LABEL: store_v2f64_with_unfolded_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2f64_with_unfolded_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, -16{{$}} ; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}} @@ -2809,7 +2748,6 @@ define void @store_v2f64_with_unfolded_offset(<2 x double> %v, <2 x double>* %p) ; CHECK-LABEL: store_v2f64_with_unfolded_gep_offset: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2f64_with_unfolded_gep_offset (v128, i32) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 16{{$}} ; SIMD128-NEXT: i32.add $push[[R:[0-9]+]]=, $1, $pop[[L0]]{{$}} @@ -2822,7 +2760,6 @@ define void @store_v2f64_with_unfolded_gep_offset(<2 x double> %v, <2 x double>* ; CHECK-LABEL: store_v2f64_to_numeric_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2f64_to_numeric_address (v128) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v128.store 32($pop[[L0]]), $0{{$}} @@ -2834,7 +2771,6 @@ define void @store_v2f64_to_numeric_address(<2 x double> %v) { ; CHECK-LABEL: store_v2f64_to_global_address: ; NO-SIMD128-NOT: v128 -; SIMD128-VM-NOT: v128 ; SIMD128-NEXT: .functype store_v2f64_to_global_address (v128) -> (){{$}} ; SIMD128-NEXT: i32.const $push[[R:[0-9]+]]=, 0{{$}} ; SIMD128-NEXT: v128.store gv_v2f64($pop[[R]]), $0{{$}} diff --git a/llvm/test/CodeGen/WebAssembly/simd-scalar-to-vector.ll b/llvm/test/CodeGen/WebAssembly/simd-scalar-to-vector.ll index b867bd6a31b90..506e7f113c56f 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-scalar-to-vector.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-scalar-to-vector.ll @@ -24,7 +24,7 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" ; CHECK-LABEL: foo: -; CHECK: i32x4.splat +; CHECK: i64x2.splat define void @foo() { entry: %a = load <2 x i16>, <2 x i16>* undef, align 1 diff --git a/llvm/test/CodeGen/WebAssembly/simd-sext-inreg.ll b/llvm/test/CodeGen/WebAssembly/simd-sext-inreg.ll index 0c375827c0f58..13f9ca14812de 100644 --- a/llvm/test/CodeGen/WebAssembly/simd-sext-inreg.ll +++ b/llvm/test/CodeGen/WebAssembly/simd-sext-inreg.ll @@ -1,5 +1,4 @@ ; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128 -; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -mattr=+simd128 | FileCheck %s --check-prefixes CHECK,SIMD128-VM ; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -wasm-keep-registers -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals | FileCheck %s --check-prefixes CHECK,NO-SIMD128 ; Test that vector sign extensions lower to shifts @@ -7,55 +6,133 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" -; CHECK-LABEL: sext_inreg_v16i8: +; CHECK-LABEL: sext_v16i8: ; NO-SIMD128-NOT: i8x16 -; SIMD128-NEXT: .functype sext_inreg_v16i8 (v128) -> (v128){{$}} +; SIMD128-NEXT: .functype sext_v16i8 (v128) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[T0:[0-9]+]]=, 7{{$}} ; SIMD128-NEXT: i8x16.shl $push[[T1:[0-9]+]]=, $0, $pop[[T0]]{{$}} ; SIMD128-NEXT: i32.const $push[[T2:[0-9]+]]=, 7{{$}} ; SIMD128-NEXT: i8x16.shr_s $push[[R:[0-9]+]]=, $pop[[T1]], $pop[[T2]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} -define <16 x i8> @sext_inreg_v16i8(<16 x i1> %x) { +define <16 x i8> @sext_v16i8(<16 x i1> %x) { %res = sext <16 x i1> %x to <16 x i8> ret <16 x i8> %res } -; CHECK-LABEL: sext_inreg_v8i16: +; CHECK-LABEL: sext_v8i16: ; NO-SIMD128-NOT: i16x8 -; SIMD128-NEXT: .functype sext_inreg_v8i16 (v128) -> (v128){{$}} +; SIMD128-NEXT: .functype sext_v8i16 (v128) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[T0:[0-9]+]]=, 15{{$}} ; SIMD128-NEXT: i16x8.shl $push[[T1:[0-9]+]]=, $0, $pop[[T0]]{{$}} ; SIMD128-NEXT: i32.const $push[[T2:[0-9]+]]=, 15{{$}} ; SIMD128-NEXT: i16x8.shr_s $push[[R:[0-9]+]]=, $pop[[T1]], $pop[[T2]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} -define <8 x i16> @sext_inreg_v8i16(<8 x i1> %x) { +define <8 x i16> @sext_v8i16(<8 x i1> %x) { %res = sext <8 x i1> %x to <8 x i16> ret <8 x i16> %res } -; CHECK-LABEL: sext_inreg_v4i32: +; CHECK-LABEL: sext_v4i32: ; NO-SIMD128-NOT: i32x4 -; SIMD128-NEXT: .functype sext_inreg_v4i32 (v128) -> (v128){{$}} +; SIMD128-NEXT: .functype sext_v4i32 (v128) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[T0:[0-9]+]]=, 31{{$}} ; SIMD128-NEXT: i32x4.shl $push[[T1:[0-9]+]]=, $0, $pop[[T0]]{{$}} ; SIMD128-NEXT: i32.const $push[[T2:[0-9]+]]=, 31{{$}} ; SIMD128-NEXT: i32x4.shr_s $push[[R:[0-9]+]]=, $pop[[T1]], $pop[[T2]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} -define <4 x i32> @sext_inreg_v4i32(<4 x i1> %x) { +define <4 x i32> @sext_v4i32(<4 x i1> %x) { %res = sext <4 x i1> %x to <4 x i32> ret <4 x i32> %res } -; CHECK-LABEL: sext_inreg_v2i64: +; CHECK-LABEL: sext_v2i64: ; NO-SIMD128-NOT: i64x2 -; SDIM128-VM-NOT: i64x2 -; SIMD128-NEXT: .functype sext_inreg_v2i64 (v128) -> (v128){{$}} +; SIMD128-NEXT: .functype sext_v2i64 (v128) -> (v128){{$}} ; SIMD128-NEXT: i32.const $push[[T0:[0-9]+]]=, 63{{$}} ; SIMD128-NEXT: i64x2.shl $push[[T1:[0-9]+]]=, $0, $pop[[T0]]{{$}} ; SIMD128-NEXT: i32.const $push[[T2:[0-9]+]]=, 63{{$}} ; SIMD128-NEXT: i64x2.shr_s $push[[R:[0-9]+]]=, $pop[[T1]], $pop[[T2]]{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} -define <2 x i64> @sext_inreg_v2i64(<2 x i1> %x) { +define <2 x i64> @sext_v2i64(<2 x i1> %x) { %res = sext <2 x i1> %x to <2 x i64> ret <2 x i64> %res } + +; CHECK-LABEL: sext_inreg_i8_to_i16: +; SIMD128-NEXT: .functype sext_inreg_i8_to_i16 (v128) -> (i32){{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[R:[0-9]+]]=, $0, 2{{$}} +; SIMD128-NEXT: return $pop[[R]]{{$}} +define i16 @sext_inreg_i8_to_i16(<8 x i16> %x) { + %lane = extractelement <8 x i16> %x, i32 1 + %a = shl i16 %lane, 8 + %res = ashr i16 %a, 8 + ret i16 %res +} + +; CHECK-LABEL: sext_inreg_i8_to_i32: +; SIMD128-NEXT: .functype sext_inreg_i8_to_i32 (v128) -> (i32){{$}} +; SIMD128-NEXT: i8x16.extract_lane_s $push[[R:[0-9]+]]=, $0, 4{{$}} +; SIMD128-NEXT: return $pop[[R]]{{$}} +define i32 @sext_inreg_i8_to_i32(<4 x i32> %x) { + %lane = extractelement <4 x i32> %x, i32 1 + %a = shl i32 %lane, 24 + %res = ashr i32 %a, 24 + ret i32 %res +} + +; CHECK-LABEL: sext_inreg_i16_to_i32: +; SIMD128-NEXT: .functype sext_inreg_i16_to_i32 (v128) -> (i32){{$}} +; SIMD128-NEXT: i16x8.extract_lane_s $push[[R:[0-9]+]]=, $0, 2{{$}} +; SIMD128-NEXT: return $pop[[R]]{{$}} +define i32 @sext_inreg_i16_to_i32(<4 x i32> %x) { + %lane = extractelement <4 x i32> %x, i32 1 + %a = shl i32 %lane, 16 + %res = ashr i32 %a, 16 + ret i32 %res +} + +; CHECK-LABEL: sext_inreg_i8_to_i64: +; SIMD128-NEXT: .functype sext_inreg_i8_to_i64 (v128) -> (i64){{$}} +; SIMD128-NEXT: i64x2.extract_lane $push[[T0:[0-9]+]]=, $0, 1{{$}} +; SIMD128-NEXT: i64.const $push[[T1:[0-9]+]]=, 56{{$}} +; SIMD128-NEXT: i64.shl $push[[T2:[0-9]+]]=, $pop[[T0]], $pop[[T1]]{{$}} +; SIMD128-NEXT: i64.const $push[[T3:[0-9]+]]=, 56{{$}} +; SIMD128-NEXT: i64.shr_s $push[[R:[0-9]+]]=, $pop[[T2]], $pop[[T3]]{{$}} +; SIMD128-NEXT: return $pop[[R]]{{$}} +define i64 @sext_inreg_i8_to_i64(<2 x i64> %x) { + %lane = extractelement <2 x i64> %x, i32 1 + %a = shl i64 %lane, 56 + %res = ashr i64 %a, 56 + ret i64 %res +} + +; CHECK-LABEL: sext_inreg_i16_to_i64: +; SIMD128-NEXT: .functype sext_inreg_i16_to_i64 (v128) -> (i64){{$}} +; SIMD128-NEXT: i64x2.extract_lane $push[[T0:[0-9]+]]=, $0, 1{{$}} +; SIMD128-NEXT: i64.const $push[[T1:[0-9]+]]=, 48{{$}} +; SIMD128-NEXT: i64.shl $push[[T2:[0-9]+]]=, $pop[[T0]], $pop[[T1]]{{$}} +; SIMD128-NEXT: i64.const $push[[T3:[0-9]+]]=, 48{{$}} +; SIMD128-NEXT: i64.shr_s $push[[R:[0-9]+]]=, $pop[[T2]], $pop[[T3]]{{$}} +; SIMD128-NEXT: return $pop[[R]]{{$}} +define i64 @sext_inreg_i16_to_i64(<2 x i64> %x) { + %lane = extractelement <2 x i64> %x, i32 1 + %a = shl i64 %lane, 48 + %res = ashr i64 %a, 48 + ret i64 %res +} + +; CHECK-LABEL: sext_inreg_i32_to_i64: +; NO-SIMD128-NOT: i64x2 +; SIMD128-NEXT: .functype sext_inreg_i32_to_i64 (v128) -> (i64){{$}} +; SIMD128-NEXT: i64x2.extract_lane $push[[T0:[0-9]+]]=, $0, 1{{$}} +; SIMD128-NEXT: i64.const $push[[T1:[0-9]+]]=, 32{{$}} +; SIMD128-NEXT: i64.shl $push[[T2:[0-9]+]]=, $pop[[T0]], $pop[[T1]]{{$}} +; SIMD128-NEXT: i64.const $push[[T3:[0-9]+]]=, 32{{$}} +; SIMD128-NEXT: i64.shr_s $push[[R:[0-9]+]]=, $pop[[T2]], $pop[[T3]]{{$}} +; SIMD128-NEXT: return $pop[[R]]{{$}} +define i64 @sext_inreg_i32_to_i64(<2 x i64> %x) { + %lane = extractelement <2 x i64> %x, i32 1 + %a = shl i64 %lane, 32 + %res = ashr i64 %a, 32 + ret i64 %res +} diff --git a/llvm/test/CodeGen/WebAssembly/simd.ll b/llvm/test/CodeGen/WebAssembly/simd.ll index b2d0638065177..2934d2c9beaca 100644 --- a/llvm/test/CodeGen/WebAssembly/simd.ll +++ b/llvm/test/CodeGen/WebAssembly/simd.ll @@ -85,7 +85,6 @@ define i32 @extract_undef_v16i8_s(<16 x i8> %v) { ; CHECK-LABEL: extract_v16i8_u: ; NO-SIMD128-NOT: i8x16 -; SIMD128-VM-NOT: i8x16.extract_lane_u ; SIMD128-NEXT: .functype extract_v16i8_u (v128) -> (i32){{$}} ; SIMD128-NEXT: i8x16.extract_lane_u $push[[R:[0-9]+]]=, $0, 13{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -116,7 +115,6 @@ define i32 @extract_var_v16i8_u(<16 x i8> %v, i32 %i) { ; CHECK-LABEL: extract_undef_v16i8_u: ; NO-SIMD128-NOT: i8x16 -; SIMD128-VM-NOT: i8x16.extract_lane_u ; SIMD128-NEXT: .functype extract_undef_v16i8_u (v128) -> (i32){{$}} ; SIMD128-NEXT: i8x16.extract_lane_u $push[[R:[0-9]+]]=, $0, 0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -129,7 +127,7 @@ define i32 @extract_undef_v16i8_u(<16 x i8> %v) { ; CHECK-LABEL: extract_v16i8: ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype extract_v16i8 (v128) -> (i32){{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[R:[0-9]+]]=, $0, 13{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[R:[0-9]+]]=, $0, 13{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define i8 @extract_v16i8(<16 x i8> %v) { %elem = extractelement <16 x i8> %v, i8 13 @@ -157,7 +155,7 @@ define i8 @extract_var_v16i8(<16 x i8> %v, i32 %i) { ; CHECK-LABEL: extract_undef_v16i8: ; NO-SIMD128-NOT: i8x16 ; SIMD128-NEXT: .functype extract_undef_v16i8 (v128) -> (i32){{$}} -; SIMD128-NEXT: i8x16.extract_lane_s $push[[R:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i8x16.extract_lane_u $push[[R:[0-9]+]]=, $0, 0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define i8 @extract_undef_v16i8(<16 x i8> %v) { %elem = extractelement <16 x i8> %v, i8 undef @@ -279,6 +277,7 @@ define <16 x i8> @build_v16i8(i8 %x0, i8 %x1, i8 %x2, i8 %x3, ; ============================================================================== ; CHECK-LABEL: const_v8i16: ; NO-SIMD128-NOT: i16x8 +; SIMD128-VM-NOT: v128.const ; SIMD128-NEXT: .functype const_v8i16 () -> (v128){{$}} ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, 256, 770, 1284, 1798, 2312, 2826, 3340, 3854{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -350,7 +349,6 @@ define i32 @extract_undef_v8i16_s(<8 x i16> %v) { ; CHECK-LABEL: extract_v8i16_u: ; NO-SIMD128-NOT: i16x8 -; SIMD128-VM-NOT: i16x8.extract_lane_u ; SIMD128-NEXT: .functype extract_v8i16_u (v128) -> (i32){{$}} ; SIMD128-NEXT: i16x8.extract_lane_u $push[[R:[0-9]+]]=, $0, 5{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -383,7 +381,6 @@ define i32 @extract_var_v8i16_u(<8 x i16> %v, i32 %i) { ; CHECK-LABEL: extract_undef_v8i16_u: ; NO-SIMD128-NOT: i16x8 -; SIMD128-VM-NOT: i16x8.extract_lane_u ; SIMD128-NEXT: .functype extract_undef_v8i16_u (v128) -> (i32){{$}} ; SIMD128-NEXT: i16x8.extract_lane_u $push[[R:[0-9]+]]=, $0, 0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -396,7 +393,7 @@ define i32 @extract_undef_v8i16_u(<8 x i16> %v) { ; CHECK-LABEL: extract_v8i16: ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype extract_v8i16 (v128) -> (i32){{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[R:[0-9]+]]=, $0, 5{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[R:[0-9]+]]=, $0, 5{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define i16 @extract_v8i16(<8 x i16> %v) { %elem = extractelement <8 x i16> %v, i16 5 @@ -426,7 +423,7 @@ define i16 @extract_var_v8i16(<8 x i16> %v, i32 %i) { ; CHECK-LABEL: extract_undef_v8i16: ; NO-SIMD128-NOT: i16x8 ; SIMD128-NEXT: .functype extract_undef_v8i16 (v128) -> (i32){{$}} -; SIMD128-NEXT: i16x8.extract_lane_s $push[[R:[0-9]+]]=, $0, 0{{$}} +; SIMD128-NEXT: i16x8.extract_lane_u $push[[R:[0-9]+]]=, $0, 0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} define i16 @extract_undef_v8i16(<8 x i16> %v) { %elem = extractelement <8 x i16> %v, i16 undef @@ -529,6 +526,7 @@ define <8 x i16> @build_v8i16(i16 %x0, i16 %x1, i16 %x2, i16 %x3, ; ============================================================================== ; CHECK-LABEL: const_v4i32: ; NO-SIMD128-NOT: i32x4 +; SIMD128-VM-NOT: v128.const ; SIMD128-NEXT: .functype const_v4i32 () -> (v128){{$}} ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, 50462976, 117835012, 185207048, 252579084{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -680,7 +678,7 @@ define <4 x i32> @build_v4i32(i32 %x0, i32 %x1, i32 %x2, i32 %x3) { ; ============================================================================== ; CHECK-LABEL: const_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 +; SIMD128-VM-NOT: v128.const ; SIMD128-NEXT: .functype const_v2i64 () -> (v128){{$}} ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, 506097522914230528, 1084818905618843912{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -690,7 +688,6 @@ define <2 x i64> @const_v2i64() { ; CHECK-LABEL: splat_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype splat_v2i64 (i64) -> (v128){{$}} ; SIMD128-NEXT: i64x2.splat $push[[R:[0-9]+]]=, $0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -708,7 +705,6 @@ define <2 x i64> @const_splat_v2i64() { ; CHECK-LABEL: extract_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype extract_v2i64 (v128) -> (i64){{$}} ; SIMD128-NEXT: i64x2.extract_lane $push[[R:[0-9]+]]=, $0, 1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -739,7 +735,6 @@ define i64 @extract_var_v2i64(<2 x i64> %v, i32 %i) { ; CHECK-LABEL: extract_zero_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype extract_zero_v2i64 (v128) -> (i64){{$}} ; SIMD128-NEXT: i64x2.extract_lane $push[[R:[0-9]+]]=, $0, 0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -750,7 +745,6 @@ define i64 @extract_zero_v2i64(<2 x i64> %v) { ; CHECK-LABEL: replace_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype replace_v2i64 (v128, i64) -> (v128){{$}} ; SIMD128-NEXT: i64x2.replace_lane $push[[R:[0-9]+]]=, $0, 0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -761,7 +755,6 @@ define <2 x i64> @replace_v2i64(<2 x i64> %v, i64 %x) { ; CHECK-LABEL: replace_var_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype replace_var_v2i64 (v128, i32, i64) -> (v128){{$}} ; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer{{$}} ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}} @@ -783,7 +776,6 @@ define <2 x i64> @replace_var_v2i64(<2 x i64> %v, i32 %i, i64 %x) { ; CHECK-LABEL: replace_zero_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype replace_zero_v2i64 (v128, i64) -> (v128){{$}} ; SIMD128-NEXT: i64x2.replace_lane $push[[R:[0-9]+]]=, $0, 0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -817,7 +809,6 @@ define <2 x i64> @shuffle_undef_v2i64(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: build_v2i64: ; NO-SIMD128-NOT: i64x2 -; SIMD128-VM-NOT: i64x2 ; SIMD128-NEXT: .functype build_v2i64 (i64, i64) -> (v128){{$}} ; SIMD128-NEXT: i64x2.splat $push[[L0:[0-9]+]]=, $0{{$}} ; SIMD128-NEXT: i64x2.replace_lane $push[[R:[0-9]+]]=, $pop[[L0]], 1, $1{{$}} @@ -833,6 +824,7 @@ define <2 x i64> @build_v2i64(i64 %x0, i64 %x1) { ; ============================================================================== ; CHECK-LABEL: const_v4f32: ; NO-SIMD128-NOT: f32x4 +; SIMD128-VM-NOT: v128.const ; SIMD128-NEXT: .functype const_v4f32 () -> (v128){{$}} ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, ; SIMD128-SAME: 0x1.0402p-121, 0x1.0c0a08p-113, 0x1.14121p-105, 0x1.1c1a18p-97{{$}} @@ -986,6 +978,7 @@ define <4 x float> @build_v4f32(float %x0, float %x1, float %x2, float %x3) { ; ============================================================================== ; CHECK-LABEL: const_v2f64: ; NO-SIMD128-NOT: f64x2 +; SIMD128-VM-NOT: v128.const ; SIMD128-NEXT: .functype const_v2f64 () -> (v128){{$}} ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, 0x1.60504030201p-911, 0x1.e0d0c0b0a0908p-783{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -995,7 +988,6 @@ define <2 x double> @const_v2f64() { ; CHECK-LABEL: splat_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype splat_v2f64 (f64) -> (v128){{$}} ; SIMD128-NEXT: f64x2.splat $push[[R:[0-9]+]]=, $0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1013,7 +1005,6 @@ define <2 x double> @const_splat_v2f64() { ; CHECK-LABEL: extract_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype extract_v2f64 (v128) -> (f64){{$}} ; SIMD128-NEXT: f64x2.extract_lane $push[[R:[0-9]+]]=, $0, 1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1044,7 +1035,6 @@ define double @extract_var_v2f64(<2 x double> %v, i32 %i) { ; CHECK-LABEL: extract_zero_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype extract_zero_v2f64 (v128) -> (f64){{$}} ; SIMD128-NEXT: f64x2.extract_lane $push[[R:[0-9]+]]=, $0, 0{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1055,7 +1045,6 @@ define double @extract_zero_v2f64(<2 x double> %v) { ; CHECK-LABEL: replace_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype replace_v2f64 (v128, f64) -> (v128){{$}} ; SIMD128-NEXT: f64x2.replace_lane $push[[R:[0-9]+]]=, $0, 0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1066,7 +1055,6 @@ define <2 x double> @replace_v2f64(<2 x double> %v, double %x) { ; CHECK-LABEL: replace_var_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype replace_var_v2f64 (v128, i32, f64) -> (v128){{$}} ; SIMD128-NEXT: global.get $push[[L0:[0-9]+]]=, __stack_pointer{{$}} ; SIMD128-NEXT: i32.const $push[[L1:[0-9]+]]=, 16{{$}} @@ -1088,7 +1076,6 @@ define <2 x double> @replace_var_v2f64(<2 x double> %v, i32 %i, double %x) { ; CHECK-LABEL: replace_zero_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype replace_zero_v2f64 (v128, f64) -> (v128){{$}} ; SIMD128-NEXT: f64x2.replace_lane $push[[R:[0-9]+]]=, $0, 0, $1{{$}} ; SIMD128-NEXT: return $pop[[R]]{{$}} @@ -1123,7 +1110,6 @@ define <2 x double> @shuffle_undef_v2f64(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: build_v2f64: ; NO-SIMD128-NOT: f64x2 -; SIMD128-VM-NOT: f64x2 ; SIMD128-NEXT: .functype build_v2f64 (f64, f64) -> (v128){{$}} ; SIMD128-NEXT: f64x2.splat $push[[L0:[0-9]+]]=, $0{{$}} ; SIMD128-NEXT: f64x2.replace_lane $push[[R:[0-9]+]]=, $pop[[L0]], 1, $1{{$}} diff --git a/llvm/test/MC/Disassembler/WebAssembly/wasm.txt b/llvm/test/MC/Disassembler/WebAssembly/wasm.txt index 08cc95434c16d..bb50b646ab549 100644 --- a/llvm/test/MC/Disassembler/WebAssembly/wasm.txt +++ b/llvm/test/MC/Disassembler/WebAssembly/wasm.txt @@ -34,19 +34,19 @@ 0xFC 0x07 # CHECK: v128.const 50462976, 117835012, 185207048, 252579084 -0xFD 0x02 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F +0xFD 0x0C 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F # CHECK: v8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 -0xFD 0x03 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F +0xFD 0x0D 0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F # Check LEB128 encoding of SIMD instructions -# CHECK: i64x2.all_true -0xFD 0x86 0x01 +# CHECK: i16x8.all_true +0xFD 0x83 0x01 # Including non-canonical LEB128 encodings -# CHECK: i64x2.any_true -# CHECK-NOT: i64.div_u -0xFD 0x85 0x81 0x80 0x80 0x80 0x80 0x00 +# CHECK: i16x8.any_true +# CHECK-NOT: i16x8.neg +0xFD 0x82 0x81 0x80 0x80 0x80 0x80 0x00 # Check br_table, which has its own operand type. # CHECK: br_table {0, 1, 2} diff --git a/llvm/test/MC/WebAssembly/simd-encodings.s b/llvm/test/MC/WebAssembly/simd-encodings.s index e40de53354704..cd5a002de412a 100644 --- a/llvm/test/MC/WebAssembly/simd-encodings.s +++ b/llvm/test/MC/WebAssembly/simd-encodings.s @@ -6,17 +6,47 @@ main: # CHECK: v128.load 48 # encoding: [0xfd,0x00,0x04,0x30] v128.load 48 - # CHECK: v128.store 48 # encoding: [0xfd,0x01,0x04,0x30] + # CHECK: i16x8.load8x8_s 32 # encoding: [0xfd,0x01,0x03,0x20] + i16x8.load8x8_s 32 + + # CHECK: i16x8.load8x8_u 32 # encoding: [0xfd,0x02,0x03,0x20] + i16x8.load8x8_u 32 + + # CHECK: i32x4.load16x4_s 32 # encoding: [0xfd,0x03,0x03,0x20] + i32x4.load16x4_s 32 + + # CHECK: i32x4.load16x4_u 32 # encoding: [0xfd,0x04,0x03,0x20] + i32x4.load16x4_u 32 + + # CHECK: i64x2.load32x2_s 32 # encoding: [0xfd,0x05,0x03,0x20] + i64x2.load32x2_s 32 + + # CHECK: i64x2.load32x2_u 32 # encoding: [0xfd,0x06,0x03,0x20] + i64x2.load32x2_u 32 + + # CHECK: v8x16.load_splat 48 # encoding: [0xfd,0x07,0x00,0x30] + v8x16.load_splat 48 + + # CHECK: v16x8.load_splat 48 # encoding: [0xfd,0x08,0x01,0x30] + v16x8.load_splat 48 + + # CHECK: v32x4.load_splat 48 # encoding: [0xfd,0x09,0x02,0x30] + v32x4.load_splat 48 + + # CHECK: v64x2.load_splat 48 # encoding: [0xfd,0x0a,0x03,0x30] + v64x2.load_splat 48 + + # CHECK: v128.store 48 # encoding: [0xfd,0x0b,0x04,0x30] v128.store 48 # CHECK: v128.const 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 - # CHECK-SAME: # encoding: [0xfd,0x02, + # CHECK-SAME: # encoding: [0xfd,0x0c, # CHECK-SAME: 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, # CHECK-SAME: 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f] v128.const 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 # CHECK: v128.const 256, 770, 1284, 1798, 2312, 2826, 3340, 3854 - # CHECK-SAME: # encoding: [0xfd,0x02, + # CHECK-SAME: # encoding: [0xfd,0x0c, # CHECK-SAME: 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, # CHECK-SAME: 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f] v128.const 256, 770, 1284, 1798, 2312, 2826, 3340, 3854 @@ -25,559 +55,526 @@ main: # CHECK: v128.const 0x1.0402p-121, 0x1.0c0a08p-113, # CHECK-SAME: 0x1.14121p-105, 0x1.1c1a18p-97 - # CHECK-SAME: # encoding: [0xfd,0x02, + # CHECK-SAME: # encoding: [0xfd,0x0c, # CHECK-SAME: 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, # CHECK-SAME: 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f] v128.const 0x1.0402p-121, 0x1.0c0a08p-113, 0x1.14121p-105, 0x1.1c1a18p-97 # CHECK: v128.const 0x1.60504030201p-911, 0x1.e0d0c0b0a0908p-783 - # CHECK-SAME: # encoding: [0xfd,0x02, + # CHECK-SAME: # encoding: [0xfd,0x0c, # CHECK-SAME: 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07, # CHECK-SAME: 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f] v128.const 0x1.60504030201p-911, 0x1.e0d0c0b0a0908p-783 # CHECK: v8x16.shuffle 0, 17, 2, 19, 4, 21, 6, 23, # CHECK-SAME: 8, 25, 10, 27, 12, 29, 14, 31 - # CHECK-SAME: # encoding: [0xfd,0x03, + # CHECK-SAME: # encoding: [0xfd,0x0d, # CHECK-SAME: 0x00,0x11,0x02,0x13,0x04,0x15,0x06,0x17, # CHECK-SAME: 0x08,0x19,0x0a,0x1b,0x0c,0x1d,0x0e,0x1f] v8x16.shuffle 0, 17, 2, 19, 4, 21, 6, 23, 8, 25, 10, 27, 12, 29, 14, 31 - # CHECK: i8x16.splat # encoding: [0xfd,0x04] + # CHECK: v8x16.swizzle # encoding: [0xfd,0x0e] + v8x16.swizzle + + # CHECK: i8x16.splat # encoding: [0xfd,0x0f] i8x16.splat - # CHECK: i8x16.extract_lane_s 15 # encoding: [0xfd,0x05,0x0f] + # CHECK: i16x8.splat # encoding: [0xfd,0x10] + i16x8.splat + + # CHECK: i32x4.splat # encoding: [0xfd,0x11] + i32x4.splat + + # CHECK: i64x2.splat # encoding: [0xfd,0x12] + i64x2.splat + + # CHECK: f32x4.splat # encoding: [0xfd,0x13] + f32x4.splat + + # CHECK: f64x2.splat # encoding: [0xfd,0x14] + f64x2.splat + + # CHECK: i8x16.extract_lane_s 15 # encoding: [0xfd,0x15,0x0f] i8x16.extract_lane_s 15 - # CHECK: i8x16.extract_lane_u 15 # encoding: [0xfd,0x06,0x0f] + # CHECK: i8x16.extract_lane_u 15 # encoding: [0xfd,0x16,0x0f] i8x16.extract_lane_u 15 - # CHECK: i8x16.replace_lane 15 # encoding: [0xfd,0x07,0x0f] + # CHECK: i8x16.replace_lane 15 # encoding: [0xfd,0x17,0x0f] i8x16.replace_lane 15 - # CHECK: i16x8.splat # encoding: [0xfd,0x08] - i16x8.splat - - # CHECK: i16x8.extract_lane_s 7 # encoding: [0xfd,0x09,0x07] + # CHECK: i16x8.extract_lane_s 7 # encoding: [0xfd,0x18,0x07] i16x8.extract_lane_s 7 - # CHECK: i16x8.extract_lane_u 7 # encoding: [0xfd,0x0a,0x07] + # CHECK: i16x8.extract_lane_u 7 # encoding: [0xfd,0x19,0x07] i16x8.extract_lane_u 7 - # CHECK: i16x8.replace_lane 7 # encoding: [0xfd,0x0b,0x07] + # CHECK: i16x8.replace_lane 7 # encoding: [0xfd,0x1a,0x07] i16x8.replace_lane 7 - # CHECK: i32x4.splat # encoding: [0xfd,0x0c] - i32x4.splat - - # CHECK: i32x4.extract_lane 3 # encoding: [0xfd,0x0d,0x03] + # CHECK: i32x4.extract_lane 3 # encoding: [0xfd,0x1b,0x03] i32x4.extract_lane 3 - # CHECK: i32x4.replace_lane 3 # encoding: [0xfd,0x0e,0x03] + # CHECK: i32x4.replace_lane 3 # encoding: [0xfd,0x1c,0x03] i32x4.replace_lane 3 - # CHECK: i64x2.splat # encoding: [0xfd,0x0f] - i64x2.splat - - # CHECK: i64x2.extract_lane 1 # encoding: [0xfd,0x10,0x01] + # CHECK: i64x2.extract_lane 1 # encoding: [0xfd,0x1d,0x01] i64x2.extract_lane 1 - # CHECK: i64x2.replace_lane 1 # encoding: [0xfd,0x11,0x01] + # CHECK: i64x2.replace_lane 1 # encoding: [0xfd,0x1e,0x01] i64x2.replace_lane 1 - # CHECK: f32x4.splat # encoding: [0xfd,0x12] - f32x4.splat - - # CHECK: f32x4.extract_lane 3 # encoding: [0xfd,0x13,0x03] + # CHECK: f32x4.extract_lane 3 # encoding: [0xfd,0x1f,0x03] f32x4.extract_lane 3 - # CHECK: f32x4.replace_lane 3 # encoding: [0xfd,0x14,0x03] + # CHECK: f32x4.replace_lane 3 # encoding: [0xfd,0x20,0x03] f32x4.replace_lane 3 - # CHECK: f64x2.splat # encoding: [0xfd,0x15] - f64x2.splat - - # CHECK: f64x2.extract_lane 1 # encoding: [0xfd,0x16,0x01] + # CHECK: f64x2.extract_lane 1 # encoding: [0xfd,0x21,0x01] f64x2.extract_lane 1 - # CHECK: f64x2.replace_lane 1 # encoding: [0xfd,0x17,0x01] + # CHECK: f64x2.replace_lane 1 # encoding: [0xfd,0x22,0x01] f64x2.replace_lane 1 - # CHECK: i8x16.eq # encoding: [0xfd,0x18] + # CHECK: i8x16.eq # encoding: [0xfd,0x23] i8x16.eq - # CHECK: i8x16.ne # encoding: [0xfd,0x19] + # CHECK: i8x16.ne # encoding: [0xfd,0x24] i8x16.ne - # CHECK: i8x16.lt_s # encoding: [0xfd,0x1a] + # CHECK: i8x16.lt_s # encoding: [0xfd,0x25] i8x16.lt_s - # CHECK: i8x16.lt_u # encoding: [0xfd,0x1b] + # CHECK: i8x16.lt_u # encoding: [0xfd,0x26] i8x16.lt_u - # CHECK: i8x16.gt_s # encoding: [0xfd,0x1c] + # CHECK: i8x16.gt_s # encoding: [0xfd,0x27] i8x16.gt_s - # CHECK: i8x16.gt_u # encoding: [0xfd,0x1d] + # CHECK: i8x16.gt_u # encoding: [0xfd,0x28] i8x16.gt_u - # CHECK: i8x16.le_s # encoding: [0xfd,0x1e] + # CHECK: i8x16.le_s # encoding: [0xfd,0x29] i8x16.le_s - # CHECK: i8x16.le_u # encoding: [0xfd,0x1f] + # CHECK: i8x16.le_u # encoding: [0xfd,0x2a] i8x16.le_u - # CHECK: i8x16.ge_s # encoding: [0xfd,0x20] + # CHECK: i8x16.ge_s # encoding: [0xfd,0x2b] i8x16.ge_s - # CHECK: i8x16.ge_u # encoding: [0xfd,0x21] + # CHECK: i8x16.ge_u # encoding: [0xfd,0x2c] i8x16.ge_u - # CHECK: i16x8.eq # encoding: [0xfd,0x22] + # CHECK: i16x8.eq # encoding: [0xfd,0x2d] i16x8.eq - # CHECK: i16x8.ne # encoding: [0xfd,0x23] + # CHECK: i16x8.ne # encoding: [0xfd,0x2e] i16x8.ne - # CHECK: i16x8.lt_s # encoding: [0xfd,0x24] + # CHECK: i16x8.lt_s # encoding: [0xfd,0x2f] i16x8.lt_s - # CHECK: i16x8.lt_u # encoding: [0xfd,0x25] + # CHECK: i16x8.lt_u # encoding: [0xfd,0x30] i16x8.lt_u - # CHECK: i16x8.gt_s # encoding: [0xfd,0x26] + # CHECK: i16x8.gt_s # encoding: [0xfd,0x31] i16x8.gt_s - # CHECK: i16x8.gt_u # encoding: [0xfd,0x27] + # CHECK: i16x8.gt_u # encoding: [0xfd,0x32] i16x8.gt_u - # CHECK: i16x8.le_s # encoding: [0xfd,0x28] + # CHECK: i16x8.le_s # encoding: [0xfd,0x33] i16x8.le_s - # CHECK: i16x8.le_u # encoding: [0xfd,0x29] + # CHECK: i16x8.le_u # encoding: [0xfd,0x34] i16x8.le_u - # CHECK: i16x8.ge_s # encoding: [0xfd,0x2a] + # CHECK: i16x8.ge_s # encoding: [0xfd,0x35] i16x8.ge_s - # CHECK: i16x8.ge_u # encoding: [0xfd,0x2b] + # CHECK: i16x8.ge_u # encoding: [0xfd,0x36] i16x8.ge_u - # CHECK: i32x4.eq # encoding: [0xfd,0x2c] + # CHECK: i32x4.eq # encoding: [0xfd,0x37] i32x4.eq - # CHECK: i32x4.ne # encoding: [0xfd,0x2d] + # CHECK: i32x4.ne # encoding: [0xfd,0x38] i32x4.ne - # CHECK: i32x4.lt_s # encoding: [0xfd,0x2e] + # CHECK: i32x4.lt_s # encoding: [0xfd,0x39] i32x4.lt_s - # CHECK: i32x4.lt_u # encoding: [0xfd,0x2f] + # CHECK: i32x4.lt_u # encoding: [0xfd,0x3a] i32x4.lt_u - # CHECK: i32x4.gt_s # encoding: [0xfd,0x30] + # CHECK: i32x4.gt_s # encoding: [0xfd,0x3b] i32x4.gt_s - # CHECK: i32x4.gt_u # encoding: [0xfd,0x31] + # CHECK: i32x4.gt_u # encoding: [0xfd,0x3c] i32x4.gt_u - # CHECK: i32x4.le_s # encoding: [0xfd,0x32] + # CHECK: i32x4.le_s # encoding: [0xfd,0x3d] i32x4.le_s - # CHECK: i32x4.le_u # encoding: [0xfd,0x33] + # CHECK: i32x4.le_u # encoding: [0xfd,0x3e] i32x4.le_u - # CHECK: i32x4.ge_s # encoding: [0xfd,0x34] + # CHECK: i32x4.ge_s # encoding: [0xfd,0x3f] i32x4.ge_s - # CHECK: i32x4.ge_u # encoding: [0xfd,0x35] + # CHECK: i32x4.ge_u # encoding: [0xfd,0x40] i32x4.ge_u - # CHECK: f32x4.eq # encoding: [0xfd,0x40] + # CHECK: f32x4.eq # encoding: [0xfd,0x41] f32x4.eq - # CHECK: f32x4.ne # encoding: [0xfd,0x41] + # CHECK: f32x4.ne # encoding: [0xfd,0x42] f32x4.ne - # CHECK: f32x4.lt # encoding: [0xfd,0x42] + # CHECK: f32x4.lt # encoding: [0xfd,0x43] f32x4.lt - # CHECK: f32x4.gt # encoding: [0xfd,0x43] + # CHECK: f32x4.gt # encoding: [0xfd,0x44] f32x4.gt - # CHECK: f32x4.le # encoding: [0xfd,0x44] + # CHECK: f32x4.le # encoding: [0xfd,0x45] f32x4.le - # CHECK: f32x4.ge # encoding: [0xfd,0x45] + # CHECK: f32x4.ge # encoding: [0xfd,0x46] f32x4.ge - # CHECK: f64x2.eq # encoding: [0xfd,0x46] + # CHECK: f64x2.eq # encoding: [0xfd,0x47] f64x2.eq - # CHECK: f64x2.ne # encoding: [0xfd,0x47] + # CHECK: f64x2.ne # encoding: [0xfd,0x48] f64x2.ne - # CHECK: f64x2.lt # encoding: [0xfd,0x48] + # CHECK: f64x2.lt # encoding: [0xfd,0x49] f64x2.lt - # CHECK: f64x2.gt # encoding: [0xfd,0x49] + # CHECK: f64x2.gt # encoding: [0xfd,0x4a] f64x2.gt - # CHECK: f64x2.le # encoding: [0xfd,0x4a] + # CHECK: f64x2.le # encoding: [0xfd,0x4b] f64x2.le - # CHECK: f64x2.ge # encoding: [0xfd,0x4b] + # CHECK: f64x2.ge # encoding: [0xfd,0x4c] f64x2.ge - # CHECK: v128.not # encoding: [0xfd,0x4c] + # CHECK: v128.not # encoding: [0xfd,0x4d] v128.not - # CHECK: v128.and # encoding: [0xfd,0x4d] + # CHECK: v128.and # encoding: [0xfd,0x4e] v128.and - # CHECK: v128.or # encoding: [0xfd,0x4e] + # CHECK: v128.andnot # encoding: [0xfd,0x4f] + v128.andnot + + # CHECK: v128.or # encoding: [0xfd,0x50] v128.or - # CHECK: v128.xor # encoding: [0xfd,0x4f] + # CHECK: v128.xor # encoding: [0xfd,0x51] v128.xor - # CHECK: v128.bitselect # encoding: [0xfd,0x50] + # CHECK: v128.bitselect # encoding: [0xfd,0x52] v128.bitselect - # CHECK: i8x16.neg # encoding: [0xfd,0x51] + # CHECK: i8x16.abs # encoding: [0xfd,0x60] + i8x16.abs + + # CHECK: i8x16.neg # encoding: [0xfd,0x61] i8x16.neg - # CHECK: i8x16.any_true # encoding: [0xfd,0x52] + # CHECK: i8x16.any_true # encoding: [0xfd,0x62] i8x16.any_true - # CHECK: i8x16.all_true # encoding: [0xfd,0x53] + # CHECK: i8x16.all_true # encoding: [0xfd,0x63] i8x16.all_true - # CHECK: i8x16.shl # encoding: [0xfd,0x54] + # CHECK: i8x16.narrow_i16x8_s # encoding: [0xfd,0x65] + i8x16.narrow_i16x8_s + + # CHECK: i8x16.narrow_i16x8_u # encoding: [0xfd,0x66] + i8x16.narrow_i16x8_u + + # CHECK: i8x16.shl # encoding: [0xfd,0x6b] i8x16.shl - # CHECK: i8x16.shr_s # encoding: [0xfd,0x55] + # CHECK: i8x16.shr_s # encoding: [0xfd,0x6c] i8x16.shr_s - # CHECK: i8x16.shr_u # encoding: [0xfd,0x56] + # CHECK: i8x16.shr_u # encoding: [0xfd,0x6d] i8x16.shr_u - # CHECK: i8x16.add # encoding: [0xfd,0x57] + # CHECK: i8x16.add # encoding: [0xfd,0x6e] i8x16.add - # CHECK: i8x16.add_saturate_s # encoding: [0xfd,0x58] + # CHECK: i8x16.add_saturate_s # encoding: [0xfd,0x6f] i8x16.add_saturate_s - # CHECK: i8x16.add_saturate_u # encoding: [0xfd,0x59] + # CHECK: i8x16.add_saturate_u # encoding: [0xfd,0x70] i8x16.add_saturate_u - # CHECK: i8x16.sub # encoding: [0xfd,0x5a] + # CHECK: i8x16.sub # encoding: [0xfd,0x71] i8x16.sub - # CHECK: i8x16.sub_saturate_s # encoding: [0xfd,0x5b] + # CHECK: i8x16.sub_saturate_s # encoding: [0xfd,0x72] i8x16.sub_saturate_s - # CHECK: i8x16.sub_saturate_u # encoding: [0xfd,0x5c] + # CHECK: i8x16.sub_saturate_u # encoding: [0xfd,0x73] i8x16.sub_saturate_u - # CHECK: i8x16.mul # encoding: [0xfd,0x5d] + # CHECK: i8x16.mul # encoding: [0xfd,0x75] i8x16.mul - # CHECK: i8x16.min_s # encoding: [0xfd,0x5e] + # CHECK: i8x16.min_s # encoding: [0xfd,0x76] i8x16.min_s - # CHECK: i8x16.min_u # encoding: [0xfd,0x5f] + # CHECK: i8x16.min_u # encoding: [0xfd,0x77] i8x16.min_u - # CHECK: i8x16.max_s # encoding: [0xfd,0x60] + # CHECK: i8x16.max_s # encoding: [0xfd,0x78] i8x16.max_s - # CHECK: i8x16.max_u # encoding: [0xfd,0x61] + # CHECK: i8x16.max_u # encoding: [0xfd,0x79] i8x16.max_u - # CHECK: i16x8.neg # encoding: [0xfd,0x62] + # CHECK: i8x16.avgr_u # encoding: [0xfd,0x7b] + i8x16.avgr_u + + # CHECK: i16x8.abs # encoding: [0xfd,0x80,0x01] + i16x8.abs + + # CHECK: i16x8.neg # encoding: [0xfd,0x81,0x01] i16x8.neg - # CHECK: i16x8.any_true # encoding: [0xfd,0x63] + # CHECK: i16x8.any_true # encoding: [0xfd,0x82,0x01] i16x8.any_true - # CHECK: i16x8.all_true # encoding: [0xfd,0x64] + # CHECK: i16x8.all_true # encoding: [0xfd,0x83,0x01] i16x8.all_true - # CHECK: i16x8.shl # encoding: [0xfd,0x65] + # CHECK: i16x8.narrow_i32x4_s # encoding: [0xfd,0x85,0x01] + i16x8.narrow_i32x4_s + + # CHECK: i16x8.narrow_i32x4_u # encoding: [0xfd,0x86,0x01] + i16x8.narrow_i32x4_u + + # CHECK: i16x8.widen_low_i8x16_s # encoding: [0xfd,0x87,0x01] + i16x8.widen_low_i8x16_s + + # CHECK: i16x8.widen_high_i8x16_s # encoding: [0xfd,0x88,0x01] + i16x8.widen_high_i8x16_s + + # CHECK: i16x8.widen_low_i8x16_u # encoding: [0xfd,0x89,0x01] + i16x8.widen_low_i8x16_u + + # CHECK: i16x8.widen_high_i8x16_u # encoding: [0xfd,0x8a,0x01] + i16x8.widen_high_i8x16_u + + # CHECK: i16x8.shl # encoding: [0xfd,0x8b,0x01] i16x8.shl - # CHECK: i16x8.shr_s # encoding: [0xfd,0x66] + # CHECK: i16x8.shr_s # encoding: [0xfd,0x8c,0x01] i16x8.shr_s - # CHECK: i16x8.shr_u # encoding: [0xfd,0x67] + # CHECK: i16x8.shr_u # encoding: [0xfd,0x8d,0x01] i16x8.shr_u - # CHECK: i16x8.add # encoding: [0xfd,0x68] + # CHECK: i16x8.add # encoding: [0xfd,0x8e,0x01] i16x8.add - # CHECK: i16x8.add_saturate_s # encoding: [0xfd,0x69] + # CHECK: i16x8.add_saturate_s # encoding: [0xfd,0x8f,0x01] i16x8.add_saturate_s - # CHECK: i16x8.add_saturate_u # encoding: [0xfd,0x6a] + # CHECK: i16x8.add_saturate_u # encoding: [0xfd,0x90,0x01] i16x8.add_saturate_u - # CHECK: i16x8.sub # encoding: [0xfd,0x6b] + # CHECK: i16x8.sub # encoding: [0xfd,0x91,0x01] i16x8.sub - # CHECK: i16x8.sub_saturate_s # encoding: [0xfd,0x6c] + # CHECK: i16x8.sub_saturate_s # encoding: [0xfd,0x92,0x01] i16x8.sub_saturate_s - # CHECK: i16x8.sub_saturate_u # encoding: [0xfd,0x6d] + # CHECK: i16x8.sub_saturate_u # encoding: [0xfd,0x93,0x01] i16x8.sub_saturate_u - # CHECK: i16x8.mul # encoding: [0xfd,0x6e] + # CHECK: i16x8.mul # encoding: [0xfd,0x95,0x01] i16x8.mul - # CHECK: i16x8.min_s # encoding: [0xfd,0x6f] + # CHECK: i16x8.min_s # encoding: [0xfd,0x96,0x01] i16x8.min_s - # CHECK: i16x8.min_u # encoding: [0xfd,0x70] + # CHECK: i16x8.min_u # encoding: [0xfd,0x97,0x01] i16x8.min_u - # CHECK: i16x8.max_s # encoding: [0xfd,0x71] + # CHECK: i16x8.max_s # encoding: [0xfd,0x98,0x01] i16x8.max_s - # CHECK: i16x8.max_u # encoding: [0xfd,0x72] + # CHECK: i16x8.max_u # encoding: [0xfd,0x99,0x01] i16x8.max_u - # CHECK: i32x4.neg # encoding: [0xfd,0x73] + # CHECK: i16x8.avgr_u # encoding: [0xfd,0x9b,0x01] + i16x8.avgr_u + + # CHECK: i32x4.abs # encoding: [0xfd,0xa0,0x01] + i32x4.abs + + # CHECK: i32x4.neg # encoding: [0xfd,0xa1,0x01] i32x4.neg - # CHECK: i32x4.any_true # encoding: [0xfd,0x74] + # CHECK: i32x4.any_true # encoding: [0xfd,0xa2,0x01] i32x4.any_true - # CHECK: i32x4.all_true # encoding: [0xfd,0x75] + # CHECK: i32x4.all_true # encoding: [0xfd,0xa3,0x01] i32x4.all_true - # CHECK: i32x4.shl # encoding: [0xfd,0x76] + # CHECK: i32x4.widen_low_i16x8_s # encoding: [0xfd,0xa7,0x01] + i32x4.widen_low_i16x8_s + + # CHECK: i32x4.widen_high_i16x8_s # encoding: [0xfd,0xa8,0x01] + i32x4.widen_high_i16x8_s + + # CHECK: i32x4.widen_low_i16x8_u # encoding: [0xfd,0xa9,0x01] + i32x4.widen_low_i16x8_u + + # CHECK: i32x4.widen_high_i16x8_u # encoding: [0xfd,0xaa,0x01] + i32x4.widen_high_i16x8_u + + # CHECK: i32x4.shl # encoding: [0xfd,0xab,0x01] i32x4.shl - # CHECK: i32x4.shr_s # encoding: [0xfd,0x77] + # CHECK: i32x4.shr_s # encoding: [0xfd,0xac,0x01] i32x4.shr_s - # CHECK: i32x4.shr_u # encoding: [0xfd,0x78] + # CHECK: i32x4.shr_u # encoding: [0xfd,0xad,0x01] i32x4.shr_u - # CHECK: i32x4.add # encoding: [0xfd,0x79] + # CHECK: i32x4.add # encoding: [0xfd,0xae,0x01] i32x4.add - # CHECK: i32x4.sub # encoding: [0xfd,0x7c] + # CHECK: i32x4.sub # encoding: [0xfd,0xb1,0x01] i32x4.sub - # CHECK: i32x4.mul # encoding: [0xfd,0x7f] + # CHECK: i32x4.dot_i16x8_s # encoding: [0xfd,0xb4,0x01] + i32x4.dot_i16x8_s + + # CHECK: i32x4.mul # encoding: [0xfd,0xb5,0x01] i32x4.mul - # CHECK: i32x4.min_s # encoding: [0xfd,0x80,0x01] + # CHECK: i32x4.min_s # encoding: [0xfd,0xb6,0x01] i32x4.min_s - # CHECK: i32x4.min_u # encoding: [0xfd,0x81,0x01] + # CHECK: i32x4.min_u # encoding: [0xfd,0xb7,0x01] i32x4.min_u - # CHECK: i32x4.max_s # encoding: [0xfd,0x82,0x01] + # CHECK: i32x4.max_s # encoding: [0xfd,0xb8,0x01] i32x4.max_s - # CHECK: i32x4.max_u # encoding: [0xfd,0x83,0x01] + # CHECK: i32x4.max_u # encoding: [0xfd,0xb9,0x01] i32x4.max_u - # CHECK: i64x2.neg # encoding: [0xfd,0x84,0x01] + # CHECK: i64x2.neg # encoding: [0xfd,0xc1,0x01] i64x2.neg - # CHECK: i64x2.any_true # encoding: [0xfd,0x85,0x01] + # CHECK: i64x2.any_true # encoding: [0xfd,0xc2,0x01] i64x2.any_true - # CHECK: i64x2.all_true # encoding: [0xfd,0x86,0x01] + # CHECK: i64x2.all_true # encoding: [0xfd,0xc3,0x01] i64x2.all_true - # CHECK: i64x2.shl # encoding: [0xfd,0x87,0x01] + # CHECK: i64x2.shl # encoding: [0xfd,0xcb,0x01] i64x2.shl - # CHECK: i64x2.shr_s # encoding: [0xfd,0x88,0x01] + # CHECK: i64x2.shr_s # encoding: [0xfd,0xcc,0x01] i64x2.shr_s - # CHECK: i64x2.shr_u # encoding: [0xfd,0x89,0x01] + # CHECK: i64x2.shr_u # encoding: [0xfd,0xcd,0x01] i64x2.shr_u - # CHECK: i64x2.add # encoding: [0xfd,0x8a,0x01] + # CHECK: i64x2.add # encoding: [0xfd,0xce,0x01] i64x2.add - # CHECK: i64x2.sub # encoding: [0xfd,0x8d,0x01] + # CHECK: i64x2.sub # encoding: [0xfd,0xd1,0x01] i64x2.sub - # CHECK: f32x4.abs # encoding: [0xfd,0x95,0x01] + # CHECK: f32x4.abs # encoding: [0xfd,0xe0,0x01] f32x4.abs - # CHECK: f32x4.neg # encoding: [0xfd,0x96,0x01] + # CHECK: f32x4.neg # encoding: [0xfd,0xe1,0x01] f32x4.neg - # CHECK: f32x4.sqrt # encoding: [0xfd,0x97,0x01] + # CHECK: f32x4.sqrt # encoding: [0xfd,0xe3,0x01] f32x4.sqrt - # CHECK: f32x4.qfma # encoding: [0xfd,0x98,0x01] - f32x4.qfma - - # CHECK: f32x4.qfms # encoding: [0xfd,0x99,0x01] - f32x4.qfms - - # CHECK: f32x4.add # encoding: [0xfd,0x9a,0x01] + # CHECK: f32x4.add # encoding: [0xfd,0xe4,0x01] f32x4.add - # CHECK: f32x4.sub # encoding: [0xfd,0x9b,0x01] + # CHECK: f32x4.sub # encoding: [0xfd,0xe5,0x01] f32x4.sub - # CHECK: f32x4.mul # encoding: [0xfd,0x9c,0x01] + # CHECK: f32x4.mul # encoding: [0xfd,0xe6,0x01] f32x4.mul - # CHECK: f32x4.div # encoding: [0xfd,0x9d,0x01] + # CHECK: f32x4.div # encoding: [0xfd,0xe7,0x01] f32x4.div - # CHECK: f32x4.min # encoding: [0xfd,0x9e,0x01] + # CHECK: f32x4.min # encoding: [0xfd,0xe8,0x01] f32x4.min - # CHECK: f32x4.max # encoding: [0xfd,0x9f,0x01] + # CHECK: f32x4.max # encoding: [0xfd,0xe9,0x01] f32x4.max - # CHECK: f64x2.abs # encoding: [0xfd,0xa0,0x01] + # CHECK: f64x2.abs # encoding: [0xfd,0xec,0x01] f64x2.abs - # CHECK: f64x2.neg # encoding: [0xfd,0xa1,0x01] + # CHECK: f64x2.neg # encoding: [0xfd,0xed,0x01] f64x2.neg - # CHECK: f64x2.sqrt # encoding: [0xfd,0xa2,0x01] + # CHECK: f64x2.sqrt # encoding: [0xfd,0xef,0x01] f64x2.sqrt - # CHECK: f64x2.qfma # encoding: [0xfd,0xa3,0x01] - f64x2.qfma - - # CHECK: f64x2.qfms # encoding: [0xfd,0xa4,0x01] - f64x2.qfms - - # CHECK: f64x2.add # encoding: [0xfd,0xa5,0x01] + # CHECK: f64x2.add # encoding: [0xfd,0xf0,0x01] f64x2.add - # CHECK: f64x2.sub # encoding: [0xfd,0xa6,0x01] + # CHECK: f64x2.sub # encoding: [0xfd,0xf1,0x01] f64x2.sub - # CHECK: f64x2.mul # encoding: [0xfd,0xa7,0x01] + # CHECK: f64x2.mul # encoding: [0xfd,0xf2,0x01] f64x2.mul - # CHECK: f64x2.div # encoding: [0xfd,0xa8,0x01] + # CHECK: f64x2.div # encoding: [0xfd,0xf3,0x01] f64x2.div - # CHECK: f64x2.min # encoding: [0xfd,0xa9,0x01] + # CHECK: f64x2.min # encoding: [0xfd,0xf4,0x01] f64x2.min - # CHECK: f64x2.max # encoding: [0xfd,0xaa,0x01] + # CHECK: f64x2.max # encoding: [0xfd,0xf5,0x01] f64x2.max - # CHECK: i32x4.trunc_sat_f32x4_s # encoding: [0xfd,0xab,0x01] + # CHECK: i32x4.trunc_sat_f32x4_s # encoding: [0xfd,0xf8,0x01] i32x4.trunc_sat_f32x4_s - # CHECK: i32x4.trunc_sat_f32x4_u # encoding: [0xfd,0xac,0x01] + # CHECK: i32x4.trunc_sat_f32x4_u # encoding: [0xfd,0xf9,0x01] i32x4.trunc_sat_f32x4_u - # CHECK: i64x2.trunc_sat_f64x2_s # encoding: [0xfd,0xad,0x01] - i64x2.trunc_sat_f64x2_s - - # CHECK: i64x2.trunc_sat_f64x2_u # encoding: [0xfd,0xae,0x01] - i64x2.trunc_sat_f64x2_u - - # CHECK: f32x4.convert_i32x4_s # encoding: [0xfd,0xaf,0x01] + # CHECK: f32x4.convert_i32x4_s # encoding: [0xfd,0xfa,0x01] f32x4.convert_i32x4_s - # CHECK: f32x4.convert_i32x4_u # encoding: [0xfd,0xb0,0x01] + # CHECK: f32x4.convert_i32x4_u # encoding: [0xfd,0xfb,0x01] f32x4.convert_i32x4_u - # CHECK: f64x2.convert_i64x2_s # encoding: [0xfd,0xb1,0x01] - f64x2.convert_i64x2_s - - # CHECK: f64x2.convert_i64x2_u # encoding: [0xfd,0xb2,0x01] - f64x2.convert_i64x2_u - - # CHECK: v8x16.swizzle # encoding: [0xfd,0xc0,0x01] - v8x16.swizzle - - # CHECK: v8x16.load_splat 48 # encoding: [0xfd,0xc2,0x01,0x00,0x30] - v8x16.load_splat 48 - - # CHECK: v16x8.load_splat 48 # encoding: [0xfd,0xc3,0x01,0x01,0x30] - v16x8.load_splat 48 - - # CHECK: v32x4.load_splat 48 # encoding: [0xfd,0xc4,0x01,0x02,0x30] - v32x4.load_splat 48 - - # CHECK: v64x2.load_splat 48 # encoding: [0xfd,0xc5,0x01,0x03,0x30] - v64x2.load_splat 48 - - # CHECK: i8x16.narrow_i16x8_s # encoding: [0xfd,0xc6,0x01] - i8x16.narrow_i16x8_s - - # CHECK: i8x16.narrow_i16x8_u # encoding: [0xfd,0xc7,0x01] - i8x16.narrow_i16x8_u - - # CHECK: i16x8.narrow_i32x4_s # encoding: [0xfd,0xc8,0x01] - i16x8.narrow_i32x4_s - - # CHECK: i16x8.narrow_i32x4_u # encoding: [0xfd,0xc9,0x01] - i16x8.narrow_i32x4_u - - # CHECK: i16x8.widen_low_i8x16_s # encoding: [0xfd,0xca,0x01] - i16x8.widen_low_i8x16_s - - # CHECK: i16x8.widen_high_i8x16_s # encoding: [0xfd,0xcb,0x01] - i16x8.widen_high_i8x16_s - - # CHECK: i16x8.widen_low_i8x16_u # encoding: [0xfd,0xcc,0x01] - i16x8.widen_low_i8x16_u - - # CHECK: i16x8.widen_high_i8x16_u # encoding: [0xfd,0xcd,0x01] - i16x8.widen_high_i8x16_u - - # CHECK: i32x4.widen_low_i16x8_s # encoding: [0xfd,0xce,0x01] - i32x4.widen_low_i16x8_s - - # CHECK: i32x4.widen_high_i16x8_s # encoding: [0xfd,0xcf,0x01] - i32x4.widen_high_i16x8_s - - # CHECK: i32x4.widen_low_i16x8_u # encoding: [0xfd,0xd0,0x01] - i32x4.widen_low_i16x8_u - - # CHECK: i32x4.widen_high_i16x8_u # encoding: [0xfd,0xd1,0x01] - i32x4.widen_high_i16x8_u - - # CHECK: i16x8.load8x8_s 32 # encoding: [0xfd,0xd2,0x01,0x03,0x20] - i16x8.load8x8_s 32 - - # CHECK: i16x8.load8x8_u 32 # encoding: [0xfd,0xd3,0x01,0x03,0x20] - i16x8.load8x8_u 32 - - # CHECK: i32x4.load16x4_s 32 # encoding: [0xfd,0xd4,0x01,0x03,0x20] - i32x4.load16x4_s 32 - - # CHECK: i32x4.load16x4_u 32 # encoding: [0xfd,0xd5,0x01,0x03,0x20] - i32x4.load16x4_u 32 - - # CHECK: i64x2.load32x2_s 32 # encoding: [0xfd,0xd6,0x01,0x03,0x20] - i64x2.load32x2_s 32 - - # CHECK: i64x2.load32x2_u 32 # encoding: [0xfd,0xd7,0x01,0x03,0x20] - i64x2.load32x2_u 32 - - # CHECK: v128.andnot # encoding: [0xfd,0xd8,0x01] - v128.andnot + # CHECK: f32x4.qfma # encoding: [0xfd,0xfc,0x01] + f32x4.qfma - # CHECK: i8x16.avgr_u # encoding: [0xfd,0xd9,0x01] - i8x16.avgr_u + # CHECK: f32x4.qfms # encoding: [0xfd,0xfd,0x01] + f32x4.qfms - # CHECK: i16x8.avgr_u # encoding: [0xfd,0xda,0x01] - i16x8.avgr_u + # CHECK: f64x2.qfma # encoding: [0xfd,0xfe,0x01] + f64x2.qfma - # CHECK: i32x4.dot_i16x8_s # encoding: [0xfd,0xdb,0x01] - i32x4.dot_i16x8_s + # CHECK: f64x2.qfms # encoding: [0xfd,0xff,0x01] + f64x2.qfms end_function