Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CLANG][AArch64]Add SVE tuple types for mfloat8_t #112687

Open
wants to merge 2 commits into
base: main
Choose a base branch
from

Conversation

CarolineConcatto
Copy link
Contributor

This patch adds scalable tuple types vectors for MFloat_8 type, according to the ACLE[1].

[1] https://github.com/ARM-software/acle.git

This patch adds scalable tuple types vectors for MFloat_8 type, according to
the ACLE[1].

[1] https://github.com/ARM-software/acle.git
@llvmbot llvmbot added clang Clang issues not falling into any other category clang:frontend Language frontend issues, e.g. anything involving "Sema" clang:modules C++20 modules and Clang Header Modules labels Oct 17, 2024
@llvmbot
Copy link
Collaborator

llvmbot commented Oct 17, 2024

@llvm/pr-subscribers-clang

Author: None (CarolineConcatto)

Changes

This patch adds scalable tuple types vectors for MFloat_8 type, according to the ACLE[1].

[1] https://github.com/ARM-software/acle.git


Patch is 27.58 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/112687.diff

5 Files Affected:

  • (modified) clang/include/clang/Basic/AArch64SVEACLETypes.def (+6)
  • (modified) clang/include/clang/Serialization/ASTBitCodes.h (+1-1)
  • (modified) clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp (+85-1)
  • (modified) clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp (+70)
  • (modified) clang/utils/TableGen/SveEmitter.cpp (+4)
diff --git a/clang/include/clang/Basic/AArch64SVEACLETypes.def b/clang/include/clang/Basic/AArch64SVEACLETypes.def
index 72df1e35aaec20..2b80e43b506384 100644
--- a/clang/include/clang/Basic/AArch64SVEACLETypes.def
+++ b/clang/include/clang/Basic/AArch64SVEACLETypes.def
@@ -138,6 +138,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x2_t", "svfloat64x2_t", SveFloat64x2, Sv
 
 SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x2_t", "svbfloat16x2_t", SveBFloat16x2, SveBFloat16x2Ty, 8, 16, 2)
 
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x2_t", "svmfloat8x2_t", SveMFloat8x2, SveMFloat8x2Ty, 16, 8, 2, false)
+
 //
 // x3
 //
@@ -158,6 +160,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x3_t", "svfloat64x3_t", SveFloat64x3, Sv
 
 SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x3_t", "svbfloat16x3_t", SveBFloat16x3, SveBFloat16x3Ty, 8, 16, 3)
 
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x3_t", "svmfloat8x3_t", SveMFloat8x3, SveMFloat8x3Ty, 16, 8, 3, false)
+
 //
 // x4
 //
@@ -178,6 +182,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x4_t", "svfloat64x4_t", SveFloat64x4, Sv
 
 SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x4_t", "svbfloat16x4_t", SveBFloat16x4, SveBFloat16x4Ty, 8, 16, 4)
 
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x4_t", "svmfloat8x4_t", SveMFloat8x4, SveMFloat8x4Ty, 16, 8, 4, false)
+
 SVE_PREDICATE_TYPE_ALL("__SVBool_t", "__SVBool_t", SveBool, SveBoolTy, 16, 1)
 SVE_PREDICATE_TYPE_ALL("__clang_svboolx2_t", "svboolx2_t", SveBoolx2, SveBoolx2Ty, 16, 2)
 SVE_PREDICATE_TYPE_ALL("__clang_svboolx4_t", "svboolx4_t", SveBoolx4, SveBoolx4Ty, 16, 4)
diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h
index d735e2dcaa8c56..e397dff097652b 100644
--- a/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/clang/include/clang/Serialization/ASTBitCodes.h
@@ -1149,7 +1149,7 @@ enum PredefinedTypeIDs {
 ///
 /// Type IDs for non-predefined types will start at
 /// NUM_PREDEF_TYPE_IDs.
-const unsigned NUM_PREDEF_TYPE_IDS = 506;
+const unsigned NUM_PREDEF_TYPE_IDS = 509;
 
 // Ensure we do not overrun the predefined types we reserved
 // in the enum PredefinedTypeIDs above.
diff --git a/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp b/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
index 3f2b0622d55162..9f481e1f0f0857 100644
--- a/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
+++ b/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
@@ -59,6 +59,9 @@ void f(__clang_svbfloat16x3_t, __clang_svbfloat16x3_t);
 void f(__clang_svbfloat16x4_t, __clang_svbfloat16x4_t);
 void f(__clang_svboolx2_t, __clang_svboolx2_t);
 void f(__clang_svboolx4_t, __clang_svboolx4_t);
+void f(__clang_svmfloat8x2_t, __clang_svmfloat8x2_t);
+void f(__clang_svmfloat8x3_t, __clang_svmfloat8x3_t);
+void f(__clang_svmfloat8x4_t, __clang_svmfloat8x4_t);
 
 // CHECK-LABEL: define dso_local void @_Z3foov(
 // CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
@@ -139,6 +142,12 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // CHECK-NEXT:    [[COERCE73:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // CHECK-NEXT:    [[COERCE74:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // CHECK-NEXT:    [[COERCE75:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
+// CHECK-NEXT:    [[COERCE76:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE77:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE78:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE79:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE80:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE81:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
 // CHECK-NEXT:    call void @_Z1fu10__SVInt8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu11__SVInt16_tS_(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu11__SVInt16_tS_(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
@@ -151,7 +160,7 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // CHECK-NEXT:    call void @_Z1fu13__SVFloat16_tS_(<vscale x 8 x half> zeroinitializer, <vscale x 8 x half> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu13__SVFloat32_tS_(<vscale x 4 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu13__SVFloat64_tS_(<vscale x 2 x double> zeroinitializer, <vscale x 2 x double> zeroinitializer)
-// CHECK-NEXT:     call void @_Z1fu13__SVMfloat8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT:    call void @_Z1fu13__SVMfloat8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu14__SVBfloat16_tS_(<vscale x 8 x bfloat> zeroinitializer, <vscale x 8 x bfloat> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu10__SVBool_tS_(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu11__SVCount_tS_(target("aarch64.svcount") zeroinitializer, target("aarch64.svcount") zeroinitializer)
@@ -573,6 +582,39 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // CHECK-NEXT:    [[COERCE75_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 2
 // CHECK-NEXT:    [[COERCE75_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 3
 // CHECK-NEXT:    call void @_Z1f10svboolx4_tS_(<vscale x 16 x i1> [[COERCE74_EXTRACT0]], <vscale x 16 x i1> [[COERCE74_EXTRACT1]], <vscale x 16 x i1> [[COERCE74_EXTRACT2]], <vscale x 16 x i1> [[COERCE74_EXTRACT3]], <vscale x 16 x i1> [[COERCE75_EXTRACT0]], <vscale x 16 x i1> [[COERCE75_EXTRACT1]], <vscale x 16 x i1> [[COERCE75_EXTRACT2]], <vscale x 16 x i1> [[COERCE75_EXTRACT3]])
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE76]], align 16
+// CHECK-NEXT:    [[COERCE76_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE76]], align 16
+// CHECK-NEXT:    [[COERCE76_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE76_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 1
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE77]], align 16
+// CHECK-NEXT:    [[COERCE77_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE77]], align 16
+// CHECK-NEXT:    [[COERCE77_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE77_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 1
+// CHECK-NEXT:    call void @_Z1f13svmfloat8x2_tS_(<vscale x 16 x i8> [[COERCE76_EXTRACT0]], <vscale x 16 x i8> [[COERCE76_EXTRACT1]], <vscale x 16 x i8> [[COERCE77_EXTRACT0]], <vscale x 16 x i8> [[COERCE77_EXTRACT1]])
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE78]], align 16
+// CHECK-NEXT:    [[COERCE78_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE78]], align 16
+// CHECK-NEXT:    [[COERCE78_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE78_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 1
+// CHECK-NEXT:    [[COERCE78_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 2
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE79]], align 16
+// CHECK-NEXT:    [[COERCE79_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE79]], align 16
+// CHECK-NEXT:    [[COERCE79_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE79_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 1
+// CHECK-NEXT:    [[COERCE79_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 2
+// CHECK-NEXT:    call void @_Z1f13svmfloat8x3_tS_(<vscale x 16 x i8> [[COERCE78_EXTRACT0]], <vscale x 16 x i8> [[COERCE78_EXTRACT1]], <vscale x 16 x i8> [[COERCE78_EXTRACT2]], <vscale x 16 x i8> [[COERCE79_EXTRACT0]], <vscale x 16 x i8> [[COERCE79_EXTRACT1]], <vscale x 16 x i8> [[COERCE79_EXTRACT2]])
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE80]], align 16
+// CHECK-NEXT:    [[COERCE80_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE80]], align 16
+// CHECK-NEXT:    [[COERCE80_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE80_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 1
+// CHECK-NEXT:    [[COERCE80_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 2
+// CHECK-NEXT:    [[COERCE80_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 3
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE81]], align 16
+// CHECK-NEXT:    [[COERCE81_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE81]], align 16
+// CHECK-NEXT:    [[COERCE81_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE81_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 1
+// CHECK-NEXT:    [[COERCE81_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 2
+// CHECK-NEXT:    [[COERCE81_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 3
+// CHECK-NEXT:    call void @_Z1f13svmfloat8x4_tS_(<vscale x 16 x i8> [[COERCE80_EXTRACT0]], <vscale x 16 x i8> [[COERCE80_EXTRACT1]], <vscale x 16 x i8> [[COERCE80_EXTRACT2]], <vscale x 16 x i8> [[COERCE80_EXTRACT3]], <vscale x 16 x i8> [[COERCE81_EXTRACT0]], <vscale x 16 x i8> [[COERCE81_EXTRACT1]], <vscale x 16 x i8> [[COERCE81_EXTRACT2]], <vscale x 16 x i8> [[COERCE81_EXTRACT3]])
 // CHECK-NEXT:    ret void
 //
 // COMPAT_17-LABEL: define dso_local void @_Z3foov(
@@ -654,6 +696,12 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // COMPAT_17-NEXT:    [[COERCE73:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // COMPAT_17-NEXT:    [[COERCE74:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // COMPAT_17-NEXT:    [[COERCE75:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
+// COMPAT_17-NEXT:    [[COERCE76:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE77:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE78:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE79:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE80:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE81:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
 // COMPAT_17-NEXT:    call void @_Z1fu10__SVInt8_tu10__SVInt8_t(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
 // COMPAT_17-NEXT:    call void @_Z1fu11__SVInt16_tu11__SVInt16_t(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
 // COMPAT_17-NEXT:    call void @_Z1fu11__SVInt16_tu11__SVInt16_t(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
@@ -1088,6 +1136,39 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // COMPAT_17-NEXT:    [[COERCE75_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 2
 // COMPAT_17-NEXT:    [[COERCE75_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 3
 // COMPAT_17-NEXT:    call void @_Z1f10svboolx4_t10svboolx4_t(<vscale x 16 x i1> [[COERCE74_EXTRACT0]], <vscale x 16 x i1> [[COERCE74_EXTRACT1]], <vscale x 16 x i1> [[COERCE74_EXTRACT2]], <vscale x 16 x i1> [[COERCE74_EXTRACT3]], <vscale x 16 x i1> [[COERCE75_EXTRACT0]], <vscale x 16 x i1> [[COERCE75_EXTRACT1]], <vscale x 16 x i1> [[COERCE75_EXTRACT2]], <vscale x 16 x i1> [[COERCE75_EXTRACT3]])
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE76]], align 16
+// COMPAT_17-NEXT:    [[COERCE76_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE76]], align 16
+// COMPAT_17-NEXT:    [[COERCE76_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE76_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 1
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE77]], align 16
+// COMPAT_17-NEXT:    [[COERCE77_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE77]], align 16
+// COMPAT_17-NEXT:    [[COERCE77_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE77_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 1
+// COMPAT_17-NEXT:    call void @_Z1f13svmfloat8x2_t13svmfloat8x2_t(<vscale x 16 x i8> [[COERCE76_EXTRACT0]], <vscale x 16 x i8> [[COERCE76_EXTRACT1]], <vscale x 16 x i8> [[COERCE77_EXTRACT0]], <vscale x 16 x i8> [[COERCE77_EXTRACT1]])
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE78]], align 16
+// COMPAT_17-NEXT:    [[COERCE78_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE78]], align 16
+// COMPAT_17-NEXT:    [[COERCE78_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE78_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 1
+// COMPAT_17-NEXT:    [[COERCE78_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 2
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE79]], align 16
+// COMPAT_17-NEXT:    [[COERCE79_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE79]], align 16
+// COMPAT_17-NEXT:    [[COERCE79_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE79_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 1
+// COMPAT_17-NEXT:    [[COERCE79_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 2
+// COMPAT_17-NEXT:    call void @_Z1f13svmfloat8x3_t13svmfloat8x3_t(<vscale x 16 x i8> [[COERCE78_EXTRACT0]], <vscale x 16 x i8> [[COERCE78_EXTRACT1]], <vscale x 16 x i8> [[COERCE78_EXTRACT2]], <vscale x 16 x i8> [[COERCE79_EXTRACT0]], <vscale x 16 x i8> [[COERCE79_EXTRACT1]], <vscale x 16 x i8> [[COERCE79_EXTRACT2]])
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE80]], align 16
+// COMPAT_17-NEXT:    [[COERCE80_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE80]], align 16
+// COMPAT_17-NEXT:    [[COERCE80_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE80_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 1
+// COMPAT_17-NEXT:    [[COERCE80_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 2
+// COMPAT_17-NEXT:    [[COERCE80_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 3
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE81]], align 16
+// COMPAT_17-NEXT:    [[COERCE81_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE81]], align 16
+// COMPAT_17-NEXT:    [[COERCE81_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE81_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 1
+// COMPAT_17-NEXT:    [[COERCE81_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 2
+// COMPAT_17-NEXT:    [[COERCE81_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 3
+// COMPAT_17-NEXT:    call void @_Z1f13svmfloat8x4_t13svmfloat8x4_t(<vscale x 16 x i8> [[COERCE80_EXTRACT0]], <vscale x 16 x i8> [[COERCE80_EXTRACT1]], <vscale x 16 x i8> [[COERCE80_EXTRACT2]], <vscale x 16 x i8> [[COERCE80_EXTRACT3]], <vscale x 16 x i8> [[COERCE81_EXTRACT0]], <vscale x 16 x i8> [[COERCE81_EXTRACT1]], <vscale x 16 x i8> [[COERCE81_EXTRACT2]], <vscale x 16 x i8> [[COERCE81_EXTRACT3]])
 // COMPAT_17-NEXT:    ret void
 //
 void foo() {
@@ -1146,4 +1227,7 @@ void foo() {
   f(__clang_svbfloat16x4_t(), __clang_svbfloat16x4_t());
   f(__clang_svboolx2_t(), __clang_svboolx2_t());
   f(__clang_svboolx4_t(), __clang_svboolx4_t());
+  f(__clang_svmfloat8x2_t(), __clang_svmfloat8x2_t());
+  f(__clang_svmfloat8x3_t(), __clang_svmfloat8x3_t());
+  f(__clang_svmfloat8x4_t(), __clang_svmfloat8x4_t());
 }
diff --git a/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp b/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
index 45cf8081eb3a4d..f9068364d0dcbb 100644
--- a/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
+++ b/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
@@ -57,6 +57,9 @@
 // CHECK-NEXT:    [[B8X2:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // CHECK-NEXT:    [[B8X4:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // CHECK-NEXT:    [[CNT:%.*]] = alloca target("aarch64.svcount"), align 2
+// CHECK-NEXT:    [[MF8X2:%.*]] = alloca { <vscale x ...
[truncated]

@llvmbot
Copy link
Collaborator

llvmbot commented Oct 17, 2024

@llvm/pr-subscribers-clang-modules

Author: None (CarolineConcatto)

Changes

This patch adds scalable tuple types vectors for MFloat_8 type, according to the ACLE[1].

[1] https://github.com/ARM-software/acle.git


Patch is 27.58 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/112687.diff

5 Files Affected:

  • (modified) clang/include/clang/Basic/AArch64SVEACLETypes.def (+6)
  • (modified) clang/include/clang/Serialization/ASTBitCodes.h (+1-1)
  • (modified) clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp (+85-1)
  • (modified) clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp (+70)
  • (modified) clang/utils/TableGen/SveEmitter.cpp (+4)
diff --git a/clang/include/clang/Basic/AArch64SVEACLETypes.def b/clang/include/clang/Basic/AArch64SVEACLETypes.def
index 72df1e35aaec20..2b80e43b506384 100644
--- a/clang/include/clang/Basic/AArch64SVEACLETypes.def
+++ b/clang/include/clang/Basic/AArch64SVEACLETypes.def
@@ -138,6 +138,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x2_t", "svfloat64x2_t", SveFloat64x2, Sv
 
 SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x2_t", "svbfloat16x2_t", SveBFloat16x2, SveBFloat16x2Ty, 8, 16, 2)
 
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x2_t", "svmfloat8x2_t", SveMFloat8x2, SveMFloat8x2Ty, 16, 8, 2, false)
+
 //
 // x3
 //
@@ -158,6 +160,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x3_t", "svfloat64x3_t", SveFloat64x3, Sv
 
 SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x3_t", "svbfloat16x3_t", SveBFloat16x3, SveBFloat16x3Ty, 8, 16, 3)
 
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x3_t", "svmfloat8x3_t", SveMFloat8x3, SveMFloat8x3Ty, 16, 8, 3, false)
+
 //
 // x4
 //
@@ -178,6 +182,8 @@ SVE_VECTOR_TYPE_FLOAT("__clang_svfloat64x4_t", "svfloat64x4_t", SveFloat64x4, Sv
 
 SVE_VECTOR_TYPE_BFLOAT("__clang_svbfloat16x4_t", "svbfloat16x4_t", SveBFloat16x4, SveBFloat16x4Ty, 8, 16, 4)
 
+SVE_VECTOR_TYPE_INT("__clang_svmfloat8x4_t", "svmfloat8x4_t", SveMFloat8x4, SveMFloat8x4Ty, 16, 8, 4, false)
+
 SVE_PREDICATE_TYPE_ALL("__SVBool_t", "__SVBool_t", SveBool, SveBoolTy, 16, 1)
 SVE_PREDICATE_TYPE_ALL("__clang_svboolx2_t", "svboolx2_t", SveBoolx2, SveBoolx2Ty, 16, 2)
 SVE_PREDICATE_TYPE_ALL("__clang_svboolx4_t", "svboolx4_t", SveBoolx4, SveBoolx4Ty, 16, 4)
diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h
index d735e2dcaa8c56..e397dff097652b 100644
--- a/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/clang/include/clang/Serialization/ASTBitCodes.h
@@ -1149,7 +1149,7 @@ enum PredefinedTypeIDs {
 ///
 /// Type IDs for non-predefined types will start at
 /// NUM_PREDEF_TYPE_IDs.
-const unsigned NUM_PREDEF_TYPE_IDS = 506;
+const unsigned NUM_PREDEF_TYPE_IDS = 509;
 
 // Ensure we do not overrun the predefined types we reserved
 // in the enum PredefinedTypeIDs above.
diff --git a/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp b/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
index 3f2b0622d55162..9f481e1f0f0857 100644
--- a/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
+++ b/clang/test/CodeGenCXX/aarch64-mangle-sve-vectors.cpp
@@ -59,6 +59,9 @@ void f(__clang_svbfloat16x3_t, __clang_svbfloat16x3_t);
 void f(__clang_svbfloat16x4_t, __clang_svbfloat16x4_t);
 void f(__clang_svboolx2_t, __clang_svboolx2_t);
 void f(__clang_svboolx4_t, __clang_svboolx4_t);
+void f(__clang_svmfloat8x2_t, __clang_svmfloat8x2_t);
+void f(__clang_svmfloat8x3_t, __clang_svmfloat8x3_t);
+void f(__clang_svmfloat8x4_t, __clang_svmfloat8x4_t);
 
 // CHECK-LABEL: define dso_local void @_Z3foov(
 // CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
@@ -139,6 +142,12 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // CHECK-NEXT:    [[COERCE73:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // CHECK-NEXT:    [[COERCE74:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // CHECK-NEXT:    [[COERCE75:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
+// CHECK-NEXT:    [[COERCE76:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE77:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE78:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE79:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE80:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// CHECK-NEXT:    [[COERCE81:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
 // CHECK-NEXT:    call void @_Z1fu10__SVInt8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu11__SVInt16_tS_(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu11__SVInt16_tS_(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
@@ -151,7 +160,7 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // CHECK-NEXT:    call void @_Z1fu13__SVFloat16_tS_(<vscale x 8 x half> zeroinitializer, <vscale x 8 x half> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu13__SVFloat32_tS_(<vscale x 4 x float> zeroinitializer, <vscale x 4 x float> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu13__SVFloat64_tS_(<vscale x 2 x double> zeroinitializer, <vscale x 2 x double> zeroinitializer)
-// CHECK-NEXT:     call void @_Z1fu13__SVMfloat8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
+// CHECK-NEXT:    call void @_Z1fu13__SVMfloat8_tS_(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu14__SVBfloat16_tS_(<vscale x 8 x bfloat> zeroinitializer, <vscale x 8 x bfloat> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu10__SVBool_tS_(<vscale x 16 x i1> zeroinitializer, <vscale x 16 x i1> zeroinitializer)
 // CHECK-NEXT:    call void @_Z1fu11__SVCount_tS_(target("aarch64.svcount") zeroinitializer, target("aarch64.svcount") zeroinitializer)
@@ -573,6 +582,39 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // CHECK-NEXT:    [[COERCE75_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 2
 // CHECK-NEXT:    [[COERCE75_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 3
 // CHECK-NEXT:    call void @_Z1f10svboolx4_tS_(<vscale x 16 x i1> [[COERCE74_EXTRACT0]], <vscale x 16 x i1> [[COERCE74_EXTRACT1]], <vscale x 16 x i1> [[COERCE74_EXTRACT2]], <vscale x 16 x i1> [[COERCE74_EXTRACT3]], <vscale x 16 x i1> [[COERCE75_EXTRACT0]], <vscale x 16 x i1> [[COERCE75_EXTRACT1]], <vscale x 16 x i1> [[COERCE75_EXTRACT2]], <vscale x 16 x i1> [[COERCE75_EXTRACT3]])
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE76]], align 16
+// CHECK-NEXT:    [[COERCE76_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE76]], align 16
+// CHECK-NEXT:    [[COERCE76_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE76_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 1
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE77]], align 16
+// CHECK-NEXT:    [[COERCE77_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE77]], align 16
+// CHECK-NEXT:    [[COERCE77_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE77_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 1
+// CHECK-NEXT:    call void @_Z1f13svmfloat8x2_tS_(<vscale x 16 x i8> [[COERCE76_EXTRACT0]], <vscale x 16 x i8> [[COERCE76_EXTRACT1]], <vscale x 16 x i8> [[COERCE77_EXTRACT0]], <vscale x 16 x i8> [[COERCE77_EXTRACT1]])
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE78]], align 16
+// CHECK-NEXT:    [[COERCE78_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE78]], align 16
+// CHECK-NEXT:    [[COERCE78_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE78_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 1
+// CHECK-NEXT:    [[COERCE78_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 2
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE79]], align 16
+// CHECK-NEXT:    [[COERCE79_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE79]], align 16
+// CHECK-NEXT:    [[COERCE79_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE79_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 1
+// CHECK-NEXT:    [[COERCE79_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 2
+// CHECK-NEXT:    call void @_Z1f13svmfloat8x3_tS_(<vscale x 16 x i8> [[COERCE78_EXTRACT0]], <vscale x 16 x i8> [[COERCE78_EXTRACT1]], <vscale x 16 x i8> [[COERCE78_EXTRACT2]], <vscale x 16 x i8> [[COERCE79_EXTRACT0]], <vscale x 16 x i8> [[COERCE79_EXTRACT1]], <vscale x 16 x i8> [[COERCE79_EXTRACT2]])
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE80]], align 16
+// CHECK-NEXT:    [[COERCE80_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE80]], align 16
+// CHECK-NEXT:    [[COERCE80_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE80_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 1
+// CHECK-NEXT:    [[COERCE80_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 2
+// CHECK-NEXT:    [[COERCE80_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 3
+// CHECK-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE81]], align 16
+// CHECK-NEXT:    [[COERCE81_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE81]], align 16
+// CHECK-NEXT:    [[COERCE81_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 0
+// CHECK-NEXT:    [[COERCE81_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 1
+// CHECK-NEXT:    [[COERCE81_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 2
+// CHECK-NEXT:    [[COERCE81_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 3
+// CHECK-NEXT:    call void @_Z1f13svmfloat8x4_tS_(<vscale x 16 x i8> [[COERCE80_EXTRACT0]], <vscale x 16 x i8> [[COERCE80_EXTRACT1]], <vscale x 16 x i8> [[COERCE80_EXTRACT2]], <vscale x 16 x i8> [[COERCE80_EXTRACT3]], <vscale x 16 x i8> [[COERCE81_EXTRACT0]], <vscale x 16 x i8> [[COERCE81_EXTRACT1]], <vscale x 16 x i8> [[COERCE81_EXTRACT2]], <vscale x 16 x i8> [[COERCE81_EXTRACT3]])
 // CHECK-NEXT:    ret void
 //
 // COMPAT_17-LABEL: define dso_local void @_Z3foov(
@@ -654,6 +696,12 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // COMPAT_17-NEXT:    [[COERCE73:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // COMPAT_17-NEXT:    [[COERCE74:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // COMPAT_17-NEXT:    [[COERCE75:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
+// COMPAT_17-NEXT:    [[COERCE76:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE77:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE78:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE79:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE80:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
+// COMPAT_17-NEXT:    [[COERCE81:%.*]] = alloca { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, align 16
 // COMPAT_17-NEXT:    call void @_Z1fu10__SVInt8_tu10__SVInt8_t(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x i8> zeroinitializer)
 // COMPAT_17-NEXT:    call void @_Z1fu11__SVInt16_tu11__SVInt16_t(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
 // COMPAT_17-NEXT:    call void @_Z1fu11__SVInt16_tu11__SVInt16_t(<vscale x 8 x i16> zeroinitializer, <vscale x 8 x i16> zeroinitializer)
@@ -1088,6 +1136,39 @@ void f(__clang_svboolx4_t, __clang_svboolx4_t);
 // COMPAT_17-NEXT:    [[COERCE75_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 2
 // COMPAT_17-NEXT:    [[COERCE75_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> } [[COERCE75_TUPLE]], 3
 // COMPAT_17-NEXT:    call void @_Z1f10svboolx4_t10svboolx4_t(<vscale x 16 x i1> [[COERCE74_EXTRACT0]], <vscale x 16 x i1> [[COERCE74_EXTRACT1]], <vscale x 16 x i1> [[COERCE74_EXTRACT2]], <vscale x 16 x i1> [[COERCE74_EXTRACT3]], <vscale x 16 x i1> [[COERCE75_EXTRACT0]], <vscale x 16 x i1> [[COERCE75_EXTRACT1]], <vscale x 16 x i1> [[COERCE75_EXTRACT2]], <vscale x 16 x i1> [[COERCE75_EXTRACT3]])
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE76]], align 16
+// COMPAT_17-NEXT:    [[COERCE76_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE76]], align 16
+// COMPAT_17-NEXT:    [[COERCE76_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE76_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE76_TUPLE]], 1
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE77]], align 16
+// COMPAT_17-NEXT:    [[COERCE77_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE77]], align 16
+// COMPAT_17-NEXT:    [[COERCE77_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE77_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE77_TUPLE]], 1
+// COMPAT_17-NEXT:    call void @_Z1f13svmfloat8x2_t13svmfloat8x2_t(<vscale x 16 x i8> [[COERCE76_EXTRACT0]], <vscale x 16 x i8> [[COERCE76_EXTRACT1]], <vscale x 16 x i8> [[COERCE77_EXTRACT0]], <vscale x 16 x i8> [[COERCE77_EXTRACT1]])
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE78]], align 16
+// COMPAT_17-NEXT:    [[COERCE78_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE78]], align 16
+// COMPAT_17-NEXT:    [[COERCE78_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE78_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 1
+// COMPAT_17-NEXT:    [[COERCE78_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE78_TUPLE]], 2
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE79]], align 16
+// COMPAT_17-NEXT:    [[COERCE79_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE79]], align 16
+// COMPAT_17-NEXT:    [[COERCE79_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE79_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 1
+// COMPAT_17-NEXT:    [[COERCE79_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE79_TUPLE]], 2
+// COMPAT_17-NEXT:    call void @_Z1f13svmfloat8x3_t13svmfloat8x3_t(<vscale x 16 x i8> [[COERCE78_EXTRACT0]], <vscale x 16 x i8> [[COERCE78_EXTRACT1]], <vscale x 16 x i8> [[COERCE78_EXTRACT2]], <vscale x 16 x i8> [[COERCE79_EXTRACT0]], <vscale x 16 x i8> [[COERCE79_EXTRACT1]], <vscale x 16 x i8> [[COERCE79_EXTRACT2]])
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE80]], align 16
+// COMPAT_17-NEXT:    [[COERCE80_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE80]], align 16
+// COMPAT_17-NEXT:    [[COERCE80_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE80_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 1
+// COMPAT_17-NEXT:    [[COERCE80_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 2
+// COMPAT_17-NEXT:    [[COERCE80_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE80_TUPLE]], 3
+// COMPAT_17-NEXT:    store { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } zeroinitializer, ptr [[COERCE81]], align 16
+// COMPAT_17-NEXT:    [[COERCE81_TUPLE:%.*]] = load { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }, ptr [[COERCE81]], align 16
+// COMPAT_17-NEXT:    [[COERCE81_EXTRACT0:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 0
+// COMPAT_17-NEXT:    [[COERCE81_EXTRACT1:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 1
+// COMPAT_17-NEXT:    [[COERCE81_EXTRACT2:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 2
+// COMPAT_17-NEXT:    [[COERCE81_EXTRACT3:%.*]] = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } [[COERCE81_TUPLE]], 3
+// COMPAT_17-NEXT:    call void @_Z1f13svmfloat8x4_t13svmfloat8x4_t(<vscale x 16 x i8> [[COERCE80_EXTRACT0]], <vscale x 16 x i8> [[COERCE80_EXTRACT1]], <vscale x 16 x i8> [[COERCE80_EXTRACT2]], <vscale x 16 x i8> [[COERCE80_EXTRACT3]], <vscale x 16 x i8> [[COERCE81_EXTRACT0]], <vscale x 16 x i8> [[COERCE81_EXTRACT1]], <vscale x 16 x i8> [[COERCE81_EXTRACT2]], <vscale x 16 x i8> [[COERCE81_EXTRACT3]])
 // COMPAT_17-NEXT:    ret void
 //
 void foo() {
@@ -1146,4 +1227,7 @@ void foo() {
   f(__clang_svbfloat16x4_t(), __clang_svbfloat16x4_t());
   f(__clang_svboolx2_t(), __clang_svboolx2_t());
   f(__clang_svboolx4_t(), __clang_svboolx4_t());
+  f(__clang_svmfloat8x2_t(), __clang_svmfloat8x2_t());
+  f(__clang_svmfloat8x3_t(), __clang_svmfloat8x3_t());
+  f(__clang_svmfloat8x4_t(), __clang_svmfloat8x4_t());
 }
diff --git a/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp b/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
index 45cf8081eb3a4d..f9068364d0dcbb 100644
--- a/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
+++ b/clang/test/CodeGenCXX/aarch64-sve-vector-init.cpp
@@ -57,6 +57,9 @@
 // CHECK-NEXT:    [[B8X2:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // CHECK-NEXT:    [[B8X4:%.*]] = alloca { <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1> }, align 2
 // CHECK-NEXT:    [[CNT:%.*]] = alloca target("aarch64.svcount"), align 2
+// CHECK-NEXT:    [[MF8X2:%.*]] = alloca { <vscale x ...
[truncated]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
clang:frontend Language frontend issues, e.g. anything involving "Sema" clang:modules C++20 modules and Clang Header Modules clang Clang issues not falling into any other category
Projects
None yet
Development

Successfully merging this pull request may close these issues.

4 participants