From 805f5836d1bbd03dcc9de78881bd81dc8cb09cc4 Mon Sep 17 00:00:00 2001 From: Alex Brachet Date: Wed, 26 Feb 2020 12:19:09 -0500 Subject: [PATCH 01/11] [libc] Fix build when -DBUILD_SHARED_LIBS=On Reviewers: gchatelet, sivachandra Reviewed By: gchatelet, sivachandra Subscribers: libc-commits, mgorny, MaskRay, tschuett Differential Revision: https://reviews.llvm.org/D75136 --- libc/utils/UnitTest/CMakeLists.txt | 1 + libc/utils/testutils/CMakeLists.txt | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/libc/utils/UnitTest/CMakeLists.txt b/libc/utils/UnitTest/CMakeLists.txt index c6e5d9a11824a..5c15e3f7fa4b2 100644 --- a/libc/utils/UnitTest/CMakeLists.txt +++ b/libc/utils/UnitTest/CMakeLists.txt @@ -6,3 +6,4 @@ add_llvm_library( ) target_include_directories(LibcUnitTest PUBLIC ${LIBC_SOURCE_DIR}) add_dependencies(LibcUnitTest standalone_cpp) +target_link_libraries(LibcUnitTest PUBLIC libc_test_utils) diff --git a/libc/utils/testutils/CMakeLists.txt b/libc/utils/testutils/CMakeLists.txt index 966c44802c187..aab8d34dc2db8 100644 --- a/libc/utils/testutils/CMakeLists.txt +++ b/libc/utils/testutils/CMakeLists.txt @@ -1,8 +1,10 @@ -add_library( +if(CMAKE_HOST_UNIX) + set(EFFile ExecuteFunctionUnix.cpp) +endif() + +add_llvm_library( libc_test_utils + ${EFFile} ExecuteFunction.h + LINK_COMPONENTS Support ) - -if(CMAKE_HOST_UNIX) - target_sources(libc_test_utils PRIVATE ExecuteFunctionUnix.cpp) -endif() From 590dc8d02cd781b110a87b82476c3557cb5957c3 Mon Sep 17 00:00:00 2001 From: John Brawn Date: Wed, 26 Feb 2020 16:31:24 +0000 Subject: [PATCH 02/11] Use virtual functions in ParsedAttrInfo instead of function pointers This doesn't do anything on its own, but it's the first step towards allowing plugins to define attributes. It does simplify the ParsedAttrInfo generation in ClangAttrEmitter a little though. Differential Revision: https://reviews.llvm.org/D31337 --- clang/lib/Sema/ParsedAttr.cpp | 45 ++-- clang/utils/TableGen/ClangAttrEmitter.cpp | 264 +++++++++------------- 2 files changed, 133 insertions(+), 176 deletions(-) diff --git a/clang/lib/Sema/ParsedAttr.cpp b/clang/lib/Sema/ParsedAttr.cpp index 5d0a734f237ae..c814639f00ead 100644 --- a/clang/lib/Sema/ParsedAttr.cpp +++ b/clang/lib/Sema/ParsedAttr.cpp @@ -110,13 +110,26 @@ struct ParsedAttrInfo { unsigned IsKnownToGCC : 1; unsigned IsSupportedByPragmaAttribute : 1; - bool (*DiagAppertainsToDecl)(Sema &S, const ParsedAttr &Attr, const Decl *); - bool (*DiagLangOpts)(Sema &S, const ParsedAttr &Attr); - bool (*ExistsInTarget)(const TargetInfo &Target); - unsigned (*SpellingIndexToSemanticSpelling)(const ParsedAttr &Attr); - void (*GetPragmaAttributeMatchRules)( - llvm::SmallVectorImpl> &Rules, - const LangOptions &LangOpts); + virtual ~ParsedAttrInfo() = default; + + virtual bool diagAppertainsToDecl(Sema &S, const ParsedAttr &Attr, + const Decl *) const { + return true; + } + virtual bool diagLangOpts(Sema &S, const ParsedAttr &Attr) const { + return true; + } + virtual bool existsInTarget(const TargetInfo &Target) const { + return true; + } + virtual unsigned + spellingIndexToSemanticSpelling(const ParsedAttr &Attr) const { + return UINT_MAX; + } + virtual void getPragmaAttributeMatchRules( + llvm::SmallVectorImpl> &Rules, + const LangOptions &LangOpts) const { + } }; namespace { @@ -126,7 +139,13 @@ namespace { } // namespace static const ParsedAttrInfo &getInfo(const ParsedAttr &A) { - return AttrInfoMap[A.getKind()]; + // If we have a ParsedAttrInfo for this ParsedAttr then return that, + // otherwise return a default ParsedAttrInfo. + if (A.getKind() < llvm::array_lengthof(AttrInfoMap)) + return *AttrInfoMap[A.getKind()]; + + static ParsedAttrInfo DefaultParsedAttrInfo; + return DefaultParsedAttrInfo; } unsigned ParsedAttr::getMinArgs() const { return getInfo(*this).NumArgs; } @@ -140,7 +159,7 @@ bool ParsedAttr::hasCustomParsing() const { } bool ParsedAttr::diagnoseAppertainsTo(Sema &S, const Decl *D) const { - return getInfo(*this).DiagAppertainsToDecl(S, *this, D); + return getInfo(*this).diagAppertainsToDecl(S, *this, D); } bool ParsedAttr::appliesToDecl(const Decl *D, @@ -152,11 +171,11 @@ void ParsedAttr::getMatchRules( const LangOptions &LangOpts, SmallVectorImpl> &MatchRules) const { - return getInfo(*this).GetPragmaAttributeMatchRules(MatchRules, LangOpts); + return getInfo(*this).getPragmaAttributeMatchRules(MatchRules, LangOpts); } bool ParsedAttr::diagnoseLangOpts(Sema &S) const { - return getInfo(*this).DiagLangOpts(S, *this); + return getInfo(*this).diagLangOpts(S, *this); } bool ParsedAttr::isTargetSpecificAttr() const { @@ -168,7 +187,7 @@ bool ParsedAttr::isTypeAttr() const { return getInfo(*this).IsType; } bool ParsedAttr::isStmtAttr() const { return getInfo(*this).IsStmt; } bool ParsedAttr::existsInTarget(const TargetInfo &Target) const { - return getInfo(*this).ExistsInTarget(Target); + return getInfo(*this).existsInTarget(Target); } bool ParsedAttr::isKnownToGCC() const { return getInfo(*this).IsKnownToGCC; } @@ -178,7 +197,7 @@ bool ParsedAttr::isSupportedByPragmaAttribute() const { } unsigned ParsedAttr::getSemanticSpelling() const { - return getInfo(*this).SpellingIndexToSemanticSpelling(*this); + return getInfo(*this).spellingIndexToSemanticSpelling(*this); } bool ParsedAttr::hasVariadicArg() const { diff --git a/clang/utils/TableGen/ClangAttrEmitter.cpp b/clang/utils/TableGen/ClangAttrEmitter.cpp index 116c382c1e8f7..251cbecd1c457 100644 --- a/clang/utils/TableGen/ClangAttrEmitter.cpp +++ b/clang/utils/TableGen/ClangAttrEmitter.cpp @@ -1814,7 +1814,7 @@ struct PragmaClangAttributeSupport { void emitMatchRuleList(raw_ostream &OS); - std::string generateStrictConformsTo(const Record &Attr, raw_ostream &OS); + void generateStrictConformsTo(const Record &Attr, raw_ostream &OS); void generateParsingHelpers(raw_ostream &OS); }; @@ -1975,21 +1975,17 @@ static std::string GenerateTestExpression(ArrayRef LangOpts) { return Test; } -std::string +void PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr, raw_ostream &OS) { - if (!isAttributedSupported(Attr)) - return "nullptr"; + if (!isAttributedSupported(Attr) || Attr.isValueUnset("Subjects")) + return; // Generate a function that constructs a set of matching rules that describe // to which declarations the attribute should apply to. - std::string FnName = "matchRulesFor" + Attr.getName().str(); - OS << "static void " << FnName << "(llvm::SmallVectorImpl> &MatchRules, const LangOptions &LangOpts) {\n"; - if (Attr.isValueUnset("Subjects")) { - OS << "}\n\n"; - return FnName; - } + << ", bool>> &MatchRules, const LangOptions &LangOpts) const {\n"; const Record *SubjectObj = Attr.getValueAsDef("Subjects"); std::vector Subjects = SubjectObj->getValueAsListOfDefs("Subjects"); for (const auto *Subject : Subjects) { @@ -2006,7 +2002,6 @@ PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr, } } OS << "}\n\n"; - return FnName; } void PragmaClangAttributeSupport::generateParsingHelpers(raw_ostream &OS) { @@ -3300,14 +3295,8 @@ static void emitArgInfo(const Record &R, raw_ostream &OS) { // If there is a variadic argument, we will set the optional argument count // to its largest value. Since it's currently a 4-bit number, we set it to 15. - OS << ArgCount << ", " << (HasVariadic ? 15 : OptCount); -} - -static void GenerateDefaultAppertainsTo(raw_ostream &OS) { - OS << "static bool defaultAppertainsTo(Sema &, const ParsedAttr &,"; - OS << "const Decl *) {\n"; - OS << " return true;\n"; - OS << "}\n\n"; + OS << " NumArgs = " << ArgCount << ";\n"; + OS << " OptArgs = " << (HasVariadic ? 15 : OptCount) << ";\n"; } static std::string GetDiagnosticSpelling(const Record &R) { @@ -3388,16 +3377,14 @@ static std::string functionNameForCustomAppertainsTo(const Record &Subject) { return "is" + Subject.getName().str(); } -static std::string GenerateCustomAppertainsTo(const Record &Subject, - raw_ostream &OS) { +static void GenerateCustomAppertainsTo(const Record &Subject, raw_ostream &OS) { std::string FnName = functionNameForCustomAppertainsTo(Subject); - // If this code has already been generated, simply return the previous - // instance of it. + // If this code has already been generated, we don't need to do anything. static std::set CustomSubjectSet; auto I = CustomSubjectSet.find(FnName); if (I != CustomSubjectSet.end()) - return *I; + return; // This only works with non-root Decls. Record *Base = Subject.getValueAsDef(BaseFieldName); @@ -3406,7 +3393,7 @@ static std::string GenerateCustomAppertainsTo(const Record &Subject, if (Base->isSubClassOf("SubsetSubject")) { PrintFatalError(Subject.getLoc(), "SubsetSubjects within SubsetSubjects is not supported"); - return ""; + return; } OS << "static bool " << FnName << "(const Decl *D) {\n"; @@ -3418,14 +3405,13 @@ static std::string GenerateCustomAppertainsTo(const Record &Subject, OS << "}\n\n"; CustomSubjectSet.insert(FnName); - return FnName; } -static std::string GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) { +static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) { // If the attribute does not contain a Subjects definition, then use the // default appertainsTo logic. if (Attr.isValueUnset("Subjects")) - return "defaultAppertainsTo"; + return; const Record *SubjectObj = Attr.getValueAsDef("Subjects"); std::vector Subjects = SubjectObj->getValueAsListOfDefs("Subjects"); @@ -3433,52 +3419,46 @@ static std::string GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) { // If the list of subjects is empty, it is assumed that the attribute // appertains to everything. if (Subjects.empty()) - return "defaultAppertainsTo"; + return; bool Warn = SubjectObj->getValueAsDef("Diag")->getValueAsBit("Warn"); // Otherwise, generate an appertainsTo check specific to this attribute which - // checks all of the given subjects against the Decl passed in. Return the - // name of that check to the caller. + // checks all of the given subjects against the Decl passed in. // // If D is null, that means the attribute was not applied to a declaration // at all (for instance because it was applied to a type), or that the caller // has determined that the check should fail (perhaps prior to the creation // of the declaration). - std::string FnName = "check" + Attr.getName().str() + "AppertainsTo"; - std::stringstream SS; - SS << "static bool " << FnName << "(Sema &S, const ParsedAttr &Attr, "; - SS << "const Decl *D) {\n"; - SS << " if (!D || ("; + OS << "virtual bool diagAppertainsToDecl(Sema &S, "; + OS << "const ParsedAttr &Attr, const Decl *D) const {\n"; + OS << " if (!D || ("; for (auto I = Subjects.begin(), E = Subjects.end(); I != E; ++I) { - // If the subject has custom code associated with it, generate a function - // for it. The function cannot be inlined into this check (yet) because it - // requires the subject to be of a specific type, and were that information - // inlined here, it would not support an attribute with multiple custom - // subjects. + // If the subject has custom code associated with it, use the generated + // function for it. The function cannot be inlined into this check (yet) + // because it requires the subject to be of a specific type, and were that + // information inlined here, it would not support an attribute with multiple + // custom subjects. if ((*I)->isSubClassOf("SubsetSubject")) { - SS << "!" << GenerateCustomAppertainsTo(**I, OS) << "(D)"; + OS << "!" << functionNameForCustomAppertainsTo(**I) << "(D)"; } else { - SS << "!isa<" << GetSubjectWithSuffix(*I) << ">(D)"; + OS << "!isa<" << GetSubjectWithSuffix(*I) << ">(D)"; } if (I + 1 != E) - SS << " && "; + OS << " && "; } - SS << ")) {\n"; - SS << " S.Diag(Attr.getLoc(), diag::"; - SS << (Warn ? "warn_attribute_wrong_decl_type_str" : + OS << ")) {\n"; + OS << " S.Diag(Attr.getLoc(), diag::"; + OS << (Warn ? "warn_attribute_wrong_decl_type_str" : "err_attribute_wrong_decl_type_str"); - SS << ")\n"; - SS << " << Attr << "; - SS << CalculateDiagnostic(*SubjectObj) << ";\n"; - SS << " return false;\n"; - SS << " }\n"; - SS << " return true;\n"; - SS << "}\n\n"; - - OS << SS.str(); - return FnName; + OS << ")\n"; + OS << " << Attr << "; + OS << CalculateDiagnostic(*SubjectObj) << ";\n"; + OS << " return false;\n"; + OS << " }\n"; + OS << " return true;\n"; + OS << "}\n\n"; } static void @@ -3517,37 +3497,16 @@ emitAttributeMatchRules(PragmaClangAttributeSupport &PragmaAttributeSupport, OS << "}\n\n"; } -static void GenerateDefaultLangOptRequirements(raw_ostream &OS) { - OS << "static bool defaultDiagnoseLangOpts(Sema &, "; - OS << "const ParsedAttr &) {\n"; - OS << " return true;\n"; - OS << "}\n\n"; -} - -static std::string GenerateLangOptRequirements(const Record &R, - raw_ostream &OS) { +static void GenerateLangOptRequirements(const Record &R, + raw_ostream &OS) { // If the attribute has an empty or unset list of language requirements, - // return the default handler. + // use the default handler. std::vector LangOpts = R.getValueAsListOfDefs("LangOpts"); if (LangOpts.empty()) - return "defaultDiagnoseLangOpts"; - - // Generate a unique function name for the diagnostic test. The list of - // options should usually be short (one or two options), and the - // uniqueness isn't strictly necessary (it is just for codegen efficiency). - std::string FnName = "check"; - for (auto I = LangOpts.begin(), E = LangOpts.end(); I != E; ++I) - FnName += (*I)->getValueAsString("Name"); - FnName += "LangOpts"; - - // If this code has already been generated, simply return the previous - // instance of it. - static std::set CustomLangOptsSet; - auto I = CustomLangOptsSet.find(FnName); - if (I != CustomLangOptsSet.end()) - return *I; - - OS << "static bool " << FnName << "(Sema &S, const ParsedAttr &Attr) {\n"; + return; + + OS << "virtual bool diagLangOpts(Sema &S, const ParsedAttr &Attr) "; + OS << "const {\n"; OS << " auto &LangOpts = S.LangOpts;\n"; OS << " if (" << GenerateTestExpression(LangOpts) << ")\n"; OS << " return true;\n\n"; @@ -3555,24 +3514,15 @@ static std::string GenerateLangOptRequirements(const Record &R, OS << "<< Attr;\n"; OS << " return false;\n"; OS << "}\n\n"; - - CustomLangOptsSet.insert(FnName); - return FnName; -} - -static void GenerateDefaultTargetRequirements(raw_ostream &OS) { - OS << "static bool defaultTargetRequirements(const TargetInfo &) {\n"; - OS << " return true;\n"; - OS << "}\n\n"; } -static std::string GenerateTargetRequirements(const Record &Attr, - const ParsedAttrMap &Dupes, - raw_ostream &OS) { - // If the attribute is not a target specific attribute, return the default +static void GenerateTargetRequirements(const Record &Attr, + const ParsedAttrMap &Dupes, + raw_ostream &OS) { + // If the attribute is not a target specific attribute, use the default // target handler. if (!Attr.isSubClassOf("TargetSpecificAttr")) - return "defaultTargetRequirements"; + return; // Get the list of architectures to be tested for. const Record *R = Attr.getValueAsDef("Target"); @@ -3600,55 +3550,37 @@ static std::string GenerateTargetRequirements(const Record &Attr, std::string Test; bool UsesT = GenerateTargetSpecificAttrChecks(R, Arches, Test, &FnName); - // If this code has already been generated, simply return the previous - // instance of it. - static std::set CustomTargetSet; - auto I = CustomTargetSet.find(FnName); - if (I != CustomTargetSet.end()) - return *I; - - OS << "static bool " << FnName << "(const TargetInfo &Target) {\n"; + OS << "virtual bool existsInTarget(const TargetInfo &Target) const {\n"; if (UsesT) OS << " const llvm::Triple &T = Target.getTriple(); (void)T;\n"; OS << " return " << Test << ";\n"; OS << "}\n\n"; - - CustomTargetSet.insert(FnName); - return FnName; -} - -static void GenerateDefaultSpellingIndexToSemanticSpelling(raw_ostream &OS) { - OS << "static unsigned defaultSpellingIndexToSemanticSpelling(" - << "const ParsedAttr &Attr) {\n"; - OS << " return UINT_MAX;\n"; - OS << "}\n\n"; } -static std::string GenerateSpellingIndexToSemanticSpelling(const Record &Attr, - raw_ostream &OS) { +static void GenerateSpellingIndexToSemanticSpelling(const Record &Attr, + raw_ostream &OS) { // If the attribute does not have a semantic form, we can bail out early. if (!Attr.getValueAsBit("ASTNode")) - return "defaultSpellingIndexToSemanticSpelling"; + return; std::vector Spellings = GetFlattenedSpellings(Attr); // If there are zero or one spellings, or all of the spellings share the same // name, we can also bail out early. if (Spellings.size() <= 1 || SpellingNamesAreCommon(Spellings)) - return "defaultSpellingIndexToSemanticSpelling"; + return; // Generate the enumeration we will use for the mapping. SemanticSpellingMap SemanticToSyntacticMap; std::string Enum = CreateSemanticSpellings(Spellings, SemanticToSyntacticMap); std::string Name = Attr.getName().str() + "AttrSpellingMap"; - OS << "static unsigned " << Name << "(const ParsedAttr &Attr) {\n"; + OS << "virtual unsigned spellingIndexToSemanticSpelling("; + OS << "const ParsedAttr &Attr) const {\n"; OS << Enum; OS << " unsigned Idx = Attr.getAttributeSpellingListIndex();\n"; WriteSemanticSpellingSwitch("Idx", SemanticToSyntacticMap, OS); OS << "}\n\n"; - - return Name; } static bool IsKnownToGCC(const Record &Attr) { @@ -3671,19 +3603,19 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) { ParsedAttrMap Dupes; ParsedAttrMap Attrs = getParsedAttrList(Records, &Dupes); - // Generate the default appertainsTo, target and language option diagnostic, - // and spelling list index mapping methods. - GenerateDefaultAppertainsTo(OS); - GenerateDefaultLangOptRequirements(OS); - GenerateDefaultTargetRequirements(OS); - GenerateDefaultSpellingIndexToSemanticSpelling(OS); - - // Generate the appertainsTo diagnostic methods and write their names into - // another mapping. At the same time, generate the AttrInfoMap object - // contents. Due to the reliance on generated code, use separate streams so - // that code will not be interleaved. - std::string Buffer; - raw_string_ostream SS {Buffer}; + // Generate all of the custom appertainsTo functions that the attributes + // will be using. + for (auto I : Attrs) { + const Record &Attr = *I.second; + if (Attr.isValueUnset("Subjects")) + continue; + const Record *SubjectObj = Attr.getValueAsDef("Subjects"); + for (auto Subject : SubjectObj->getValueAsListOfDefs("Subjects")) + if (Subject->isSubClassOf("SubsetSubject")) + GenerateCustomAppertainsTo(*Subject, OS); + } + + // Generate a ParsedAttrInfo struct for each of the attributes. for (auto I = Attrs.begin(), E = Attrs.end(); I != E; ++I) { // TODO: If the attribute's kind appears in the list of duplicates, that is // because it is a target-specific attribute that appears multiple times. @@ -3693,33 +3625,39 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) { // We need to generate struct instances based off ParsedAttrInfo from // ParsedAttr.cpp. - SS << " { "; - emitArgInfo(*I->second, SS); - SS << ", " << I->second->getValueAsBit("HasCustomParsing"); - SS << ", " << I->second->isSubClassOf("TargetSpecificAttr"); - SS << ", " - << (I->second->isSubClassOf("TypeAttr") || - I->second->isSubClassOf("DeclOrTypeAttr")); - SS << ", " << I->second->isSubClassOf("StmtAttr"); - SS << ", " << IsKnownToGCC(*I->second); - SS << ", " << PragmaAttributeSupport.isAttributedSupported(*I->second); - SS << ", " << GenerateAppertainsTo(*I->second, OS); - SS << ", " << GenerateLangOptRequirements(*I->second, OS); - SS << ", " << GenerateTargetRequirements(*I->second, Dupes, OS); - SS << ", " << GenerateSpellingIndexToSemanticSpelling(*I->second, OS); - SS << ", " - << PragmaAttributeSupport.generateStrictConformsTo(*I->second, OS); - SS << " }"; - - if (I + 1 != E) - SS << ","; - - SS << " // AT_" << I->first << "\n"; + const Record &Attr = *I->second; + OS << "struct ParsedAttrInfo" << I->first << " : public ParsedAttrInfo {\n"; + OS << " ParsedAttrInfo" << I->first << "() {\n"; + emitArgInfo(Attr, OS); + OS << " HasCustomParsing = "; + OS << Attr.getValueAsBit("HasCustomParsing") << ";\n"; + OS << " IsTargetSpecific = "; + OS << Attr.isSubClassOf("TargetSpecificAttr") << ";\n"; + OS << " IsType = "; + OS << (Attr.isSubClassOf("TypeAttr") || + Attr.isSubClassOf("DeclOrTypeAttr")) << ";\n"; + OS << " IsStmt = "; + OS << Attr.isSubClassOf("StmtAttr") << ";\n"; + OS << " IsKnownToGCC = "; + OS << IsKnownToGCC(Attr) << ";\n"; + OS << " IsSupportedByPragmaAttribute = "; + OS << PragmaAttributeSupport.isAttributedSupported(*I->second) << ";\n"; + OS << " }\n"; + GenerateAppertainsTo(Attr, OS); + GenerateLangOptRequirements(Attr, OS); + GenerateTargetRequirements(Attr, Dupes, OS); + GenerateSpellingIndexToSemanticSpelling(Attr, OS); + PragmaAttributeSupport.generateStrictConformsTo(*I->second, OS); + OS << "static const ParsedAttrInfo" << I->first << " Instance;\n"; + OS << "};\n"; + OS << "const ParsedAttrInfo" << I->first << " ParsedAttrInfo" << I->first + << "::Instance;\n"; } - OS << "static const ParsedAttrInfo AttrInfoMap[ParsedAttr::UnknownAttribute " - "+ 1] = {\n"; - OS << SS.str(); + OS << "static const ParsedAttrInfo *AttrInfoMap[] = {\n"; + for (auto I = Attrs.begin(), E = Attrs.end(); I != E; ++I) { + OS << "&ParsedAttrInfo" << I->first << "::Instance,\n"; + } OS << "};\n\n"; // Generate the attribute match rules. From 73c3b52676a1ec2418429665bd3724df4e695340 Mon Sep 17 00:00:00 2001 From: Sean Fertile Date: Wed, 26 Feb 2020 09:52:43 -0500 Subject: [PATCH 03/11] [PowerPC][NFC] Convert grep usage to FileCheck in lit test. --- llvm/test/CodeGen/PowerPC/vec_vrsave.ll | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/llvm/test/CodeGen/PowerPC/vec_vrsave.ll b/llvm/test/CodeGen/PowerPC/vec_vrsave.ll index 4d48c332fb100..784da55518cbb 100644 --- a/llvm/test/CodeGen/PowerPC/vec_vrsave.ll +++ b/llvm/test/CodeGen/PowerPC/vec_vrsave.ll @@ -1,12 +1,21 @@ -; RUN: llc -verify-machineinstrs < %s -mtriple=ppc32-- -mcpu=g5 -o %t -; RUN: grep vrlw %t -; RUN: not grep spr %t -; RUN: not grep vrsave %t +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-linux \ +; RUN: -mcpu=g5 < %s | FileCheck %s +; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc-unknown-freebsd \ +; RUN: -mcpu=g5 < %s | FileCheck %s + +; CHECK-LABEL: test_rol +; CHECK-NOT: spr +; CHECK-NOT: vrsave +; CHECK: vrlw +; CHECK-NEXT: blr define <4 x i32> @test_rol() { ret <4 x i32> < i32 -11534337, i32 -11534337, i32 -11534337, i32 -11534337 > } +; CHECK-LABEL: test_arg +; CHECK-NOT: spr +; CHECK-NOT: vrsave define <4 x i32> @test_arg(<4 x i32> %A, <4 x i32> %B) { %C = add <4 x i32> %A, %B ; <<4 x i32>> [#uses=1] ret <4 x i32> %C From 387c3f74fd8efdc0be464b0e1a8033cc1eeb739c Mon Sep 17 00:00:00 2001 From: Steven Wu Date: Wed, 26 Feb 2020 09:17:03 -0800 Subject: [PATCH 04/11] [compiler-rt] Build all alias in builtin as private external on Darwin Summary: For builtin compiler-rt, it is built with visibility hidden by default to avoid the client exporting symbols from libclang static library. The compiler option -fvisibility=hidden doesn't work on the aliases in c files because they are created with inline assembly. On Darwin platform, thoses aliases are exported by default if they are reference by the client. Fix the issue by adding ".private_extern" to all the aliases if the library is built with visibility hidden. rdar://problem/58960296 Reviewers: dexonsmith, arphaman, delcypher, kledzik Reviewed By: delcypher Subscribers: dberris, jkorous, ributzka, #sanitizers, llvm-commits Tags: #sanitizers, #llvm Differential Revision: https://reviews.llvm.org/D73577 --- compiler-rt/lib/builtins/int_lib.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/compiler-rt/lib/builtins/int_lib.h b/compiler-rt/lib/builtins/int_lib.h index 3092f68c084a6..7f5eb79903b20 100644 --- a/compiler-rt/lib/builtins/int_lib.h +++ b/compiler-rt/lib/builtins/int_lib.h @@ -52,8 +52,15 @@ #define COMPILER_RT_ALIAS(name, aliasname) \ COMPILER_RT_ABI __typeof(name) aliasname __attribute__((__alias__(#name))); #elif defined(__APPLE__) +#if defined(VISIBILITY_HIDDEN) +#define COMPILER_RT_ALIAS_VISIBILITY(name) \ + __asm__(".private_extern " SYMBOL_NAME(name)); +#else +#define COMPILER_RT_ALIAS_VISIBILITY(name) +#endif #define COMPILER_RT_ALIAS(name, aliasname) \ __asm__(".globl " SYMBOL_NAME(aliasname)); \ + COMPILER_RT_ALIAS_VISIBILITY(aliasname) \ __asm__(SYMBOL_NAME(aliasname) " = " SYMBOL_NAME(name)); \ COMPILER_RT_ABI __typeof(name) aliasname; #elif defined(_WIN32) From 1e9321e97aba43e41ccd7ab2f1bef41d5bcf65af Mon Sep 17 00:00:00 2001 From: Lei Zhang Date: Wed, 26 Feb 2020 09:12:56 -0500 Subject: [PATCH 05/11] [mlir][spirv] NFC: move folders and canonicalizers in a separate file This gives us better file organization and faster compilation time by avoid having a gigantic SPIRVOps.cpp file. --- mlir/lib/Dialect/SPIRV/CMakeLists.txt | 1 + .../Dialect/SPIRV/SPIRVCanonicalization.cpp | 367 ++++++++++++++++++ mlir/lib/Dialect/SPIRV/SPIRVOps.cpp | 326 ---------------- 3 files changed, 368 insertions(+), 326 deletions(-) create mode 100644 mlir/lib/Dialect/SPIRV/SPIRVCanonicalization.cpp diff --git a/mlir/lib/Dialect/SPIRV/CMakeLists.txt b/mlir/lib/Dialect/SPIRV/CMakeLists.txt index d0ff25ef68f01..85bb7390b7163 100644 --- a/mlir/lib/Dialect/SPIRV/CMakeLists.txt +++ b/mlir/lib/Dialect/SPIRV/CMakeLists.txt @@ -4,6 +4,7 @@ add_public_tablegen_target(MLIRSPIRVCanonicalizationIncGen) add_llvm_library(MLIRSPIRV LayoutUtils.cpp + SPIRVCanonicalization.cpp SPIRVDialect.cpp SPIRVOps.cpp SPIRVLowering.cpp diff --git a/mlir/lib/Dialect/SPIRV/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/SPIRVCanonicalization.cpp new file mode 100644 index 0000000000000..32090f3d1ec0d --- /dev/null +++ b/mlir/lib/Dialect/SPIRV/SPIRVCanonicalization.cpp @@ -0,0 +1,367 @@ +//===- SPIRVCanonicalization.cpp - MLIR SPIR-V canonicalization patterns --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file defines the folders and canonicalization patterns for SPIR-V ops. +// +//===----------------------------------------------------------------------===// + +#include "mlir/Dialect/SPIRV/SPIRVOps.h" + +#include "mlir/Dialect/CommonFolders.h" +#include "mlir/Dialect/SPIRV/SPIRVDialect.h" +#include "mlir/IR/Matchers.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Support/Functional.h" + +using namespace mlir; + +//===----------------------------------------------------------------------===// +// Common utility functions +//===----------------------------------------------------------------------===// + +// Extracts an element from the given `composite` by following the given +// `indices`. Returns a null Attribute if error happens. +static Attribute extractCompositeElement(Attribute composite, + ArrayRef indices) { + // Check that given composite is a constant. + if (!composite) + return {}; + // Return composite itself if we reach the end of the index chain. + if (indices.empty()) + return composite; + + if (auto vector = composite.dyn_cast()) { + assert(indices.size() == 1 && "must have exactly one index for a vector"); + return vector.getValue({indices[0]}); + } + + if (auto array = composite.dyn_cast()) { + assert(!indices.empty() && "must have at least one index for an array"); + return extractCompositeElement(array.getValue()[indices[0]], + indices.drop_front()); + } + + return {}; +} + +//===----------------------------------------------------------------------===// +// TableGen'erated canonicalizers +//===----------------------------------------------------------------------===// + +namespace { +#include "SPIRVCanonicalization.inc" +} + +//===----------------------------------------------------------------------===// +// spv.AccessChainOp +//===----------------------------------------------------------------------===// + +namespace { + +/// Combines chained `spirv::AccessChainOp` operations into one +/// `spirv::AccessChainOp` operation. +struct CombineChainedAccessChain + : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + PatternMatchResult matchAndRewrite(spirv::AccessChainOp accessChainOp, + PatternRewriter &rewriter) const override { + auto parentAccessChainOp = dyn_cast_or_null( + accessChainOp.base_ptr().getDefiningOp()); + + if (!parentAccessChainOp) { + return matchFailure(); + } + + // Combine indices. + SmallVector indices(parentAccessChainOp.indices()); + indices.append(accessChainOp.indices().begin(), + accessChainOp.indices().end()); + + rewriter.replaceOpWithNewOp( + accessChainOp, parentAccessChainOp.base_ptr(), indices); + + return matchSuccess(); + } +}; +} // end anonymous namespace + +void spirv::AccessChainOp::getCanonicalizationPatterns( + OwningRewritePatternList &results, MLIRContext *context) { + results.insert(context); +} + +//===----------------------------------------------------------------------===// +// spv.BitcastOp +//===----------------------------------------------------------------------===// + +void spirv::BitcastOp::getCanonicalizationPatterns( + OwningRewritePatternList &results, MLIRContext *context) { + results.insert(context); +} + +//===----------------------------------------------------------------------===// +// spv.CompositeExtractOp +//===----------------------------------------------------------------------===// + +OpFoldResult spirv::CompositeExtractOp::fold(ArrayRef operands) { + assert(operands.size() == 1 && "spv.CompositeExtract expects one operand"); + auto indexVector = functional::map( + [](Attribute attr) { + return static_cast(attr.cast().getInt()); + }, + indices()); + return extractCompositeElement(operands[0], indexVector); +} + +//===----------------------------------------------------------------------===// +// spv.constant +//===----------------------------------------------------------------------===// + +OpFoldResult spirv::ConstantOp::fold(ArrayRef operands) { + assert(operands.empty() && "spv.constant has no operands"); + return value(); +} + +//===----------------------------------------------------------------------===// +// spv.IAdd +//===----------------------------------------------------------------------===// + +OpFoldResult spirv::IAddOp::fold(ArrayRef operands) { + assert(operands.size() == 2 && "spv.IAdd expects two operands"); + // x + 0 = x + if (matchPattern(operand2(), m_Zero())) + return operand1(); + + // According to the SPIR-V spec: + // + // The resulting value will equal the low-order N bits of the correct result + // R, where N is the component width and R is computed with enough precision + // to avoid overflow and underflow. + return constFoldBinaryOp(operands, + [](APInt a, APInt b) { return a + b; }); +} + +//===----------------------------------------------------------------------===// +// spv.IMul +//===----------------------------------------------------------------------===// + +OpFoldResult spirv::IMulOp::fold(ArrayRef operands) { + assert(operands.size() == 2 && "spv.IMul expects two operands"); + // x * 0 == 0 + if (matchPattern(operand2(), m_Zero())) + return operand2(); + // x * 1 = x + if (matchPattern(operand2(), m_One())) + return operand1(); + + // According to the SPIR-V spec: + // + // The resulting value will equal the low-order N bits of the correct result + // R, where N is the component width and R is computed with enough precision + // to avoid overflow and underflow. + return constFoldBinaryOp(operands, + [](APInt a, APInt b) { return a * b; }); +} + +//===----------------------------------------------------------------------===// +// spv.ISub +//===----------------------------------------------------------------------===// + +OpFoldResult spirv::ISubOp::fold(ArrayRef operands) { + // x - x = 0 + if (operand1() == operand2()) + return Builder(getContext()).getIntegerAttr(getType(), 0); + + // According to the SPIR-V spec: + // + // The resulting value will equal the low-order N bits of the correct result + // R, where N is the component width and R is computed with enough precision + // to avoid overflow and underflow. + return constFoldBinaryOp(operands, + [](APInt a, APInt b) { return a - b; }); +} + +//===----------------------------------------------------------------------===// +// spv.LogicalNot +//===----------------------------------------------------------------------===// + +void spirv::LogicalNotOp::getCanonicalizationPatterns( + OwningRewritePatternList &results, MLIRContext *context) { + results.insert(context); +} + +//===----------------------------------------------------------------------===// +// spv.selection +//===----------------------------------------------------------------------===// + +namespace { +// Blocks from the given `spv.selection` operation must satisfy the following +// layout: +// +// +-----------------------------------------------+ +// | header block | +// | spv.BranchConditionalOp %cond, ^case0, ^case1 | +// +-----------------------------------------------+ +// / \ +// ... +// +// +// +------------------------+ +------------------------+ +// | case #0 | | case #1 | +// | spv.Store %ptr %value0 | | spv.Store %ptr %value1 | +// | spv.Branch ^merge | | spv.Branch ^merge | +// +------------------------+ +------------------------+ +// +// +// ... +// \ / +// v +// +-------------+ +// | merge block | +// +-------------+ +// +struct ConvertSelectionOpToSelect + : public OpRewritePattern { + using OpRewritePattern::OpRewritePattern; + + PatternMatchResult matchAndRewrite(spirv::SelectionOp selectionOp, + PatternRewriter &rewriter) const override { + auto *op = selectionOp.getOperation(); + auto &body = op->getRegion(0); + // Verifier allows an empty region for `spv.selection`. + if (body.empty()) { + return matchFailure(); + } + + // Check that region consists of 4 blocks: + // header block, `true` block, `false` block and merge block. + if (std::distance(body.begin(), body.end()) != 4) { + return matchFailure(); + } + + auto *headerBlock = selectionOp.getHeaderBlock(); + if (!onlyContainsBranchConditionalOp(headerBlock)) { + return matchFailure(); + } + + auto brConditionalOp = + cast(headerBlock->front()); + + auto *trueBlock = brConditionalOp.getSuccessor(0); + auto *falseBlock = brConditionalOp.getSuccessor(1); + auto *mergeBlock = selectionOp.getMergeBlock(); + + if (!canCanonicalizeSelection(trueBlock, falseBlock, mergeBlock)) { + return matchFailure(); + } + + auto trueValue = getSrcValue(trueBlock); + auto falseValue = getSrcValue(falseBlock); + auto ptrValue = getDstPtr(trueBlock); + auto storeOpAttributes = + cast(trueBlock->front()).getOperation()->getAttrs(); + + auto selectOp = rewriter.create( + selectionOp.getLoc(), trueValue.getType(), brConditionalOp.condition(), + trueValue, falseValue); + rewriter.create(selectOp.getLoc(), ptrValue, + selectOp.getResult(), storeOpAttributes); + + // `spv.selection` is not needed anymore. + rewriter.eraseOp(op); + return matchSuccess(); + } + +private: + // Checks that given blocks follow the following rules: + // 1. Each conditional block consists of two operations, the first operation + // is a `spv.Store` and the last operation is a `spv.Branch`. + // 2. Each `spv.Store` uses the same pointer and the same memory attributes. + // 3. A control flow goes into the given merge block from the given + // conditional blocks. + PatternMatchResult canCanonicalizeSelection(Block *trueBlock, + Block *falseBlock, + Block *mergeBlock) const; + + bool onlyContainsBranchConditionalOp(Block *block) const { + return std::next(block->begin()) == block->end() && + isa(block->front()); + } + + bool isSameAttrList(spirv::StoreOp lhs, spirv::StoreOp rhs) const { + return lhs.getOperation()->getAttrList().getDictionary() == + rhs.getOperation()->getAttrList().getDictionary(); + } + + // Checks that given type is valid for `spv.SelectOp`. + // According to SPIR-V spec: + // "Before version 1.4, Result Type must be a pointer, scalar, or vector. + // Starting with version 1.4, Result Type can additionally be a composite type + // other than a vector." + bool isValidType(Type type) const { + return spirv::SPIRVDialect::isValidScalarType(type) || + type.isa(); + } + + // Returns a source value for the given block. + Value getSrcValue(Block *block) const { + auto storeOp = cast(block->front()); + return storeOp.value(); + } + + // Returns a destination value for the given block. + Value getDstPtr(Block *block) const { + auto storeOp = cast(block->front()); + return storeOp.ptr(); + } +}; + +PatternMatchResult ConvertSelectionOpToSelect::canCanonicalizeSelection( + Block *trueBlock, Block *falseBlock, Block *mergeBlock) const { + // Each block must consists of 2 operations. + if ((std::distance(trueBlock->begin(), trueBlock->end()) != 2) || + (std::distance(falseBlock->begin(), falseBlock->end()) != 2)) { + return matchFailure(); + } + + auto trueBrStoreOp = dyn_cast(trueBlock->front()); + auto trueBrBranchOp = + dyn_cast(*std::next(trueBlock->begin())); + auto falseBrStoreOp = dyn_cast(falseBlock->front()); + auto falseBrBranchOp = + dyn_cast(*std::next(falseBlock->begin())); + + if (!trueBrStoreOp || !trueBrBranchOp || !falseBrStoreOp || + !falseBrBranchOp) { + return matchFailure(); + } + + // Check that each `spv.Store` uses the same pointer, memory access + // attributes and a valid type of the value. + if ((trueBrStoreOp.ptr() != falseBrStoreOp.ptr()) || + !isSameAttrList(trueBrStoreOp, falseBrStoreOp) || + !isValidType(trueBrStoreOp.value().getType())) { + return matchFailure(); + } + + if ((trueBrBranchOp.getOperation()->getSuccessor(0) != mergeBlock) || + (falseBrBranchOp.getOperation()->getSuccessor(0) != mergeBlock)) { + return matchFailure(); + } + + return matchSuccess(); +} +} // end anonymous namespace + +void spirv::SelectionOp::getCanonicalizationPatterns( + OwningRewritePatternList &results, MLIRContext *context) { + results.insert(context); +} diff --git a/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp index 01197498a7041..1dc4dd9aee0a3 100644 --- a/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp +++ b/mlir/lib/Dialect/SPIRV/SPIRVOps.cpp @@ -13,17 +13,13 @@ #include "mlir/Dialect/SPIRV/SPIRVOps.h" #include "mlir/Analysis/CallInterfaces.h" -#include "mlir/Dialect/CommonFolders.h" #include "mlir/Dialect/SPIRV/SPIRVDialect.h" #include "mlir/Dialect/SPIRV/SPIRVTypes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/Function.h" #include "mlir/IR/FunctionImplementation.h" -#include "mlir/IR/Matchers.h" #include "mlir/IR/OpImplementation.h" -#include "mlir/IR/PatternMatch.h" #include "mlir/IR/StandardTypes.h" -#include "mlir/Support/Functional.h" #include "mlir/Support/StringExtras.h" #include "llvm/ADT/bit.h" @@ -360,31 +356,6 @@ static void printVariableDecorations(Operation *op, OpAsmPrinter &printer, printer.printOptionalAttrDict(op->getAttrs(), elidedAttrs); } -// Extracts an element from the given `composite` by following the given -// `indices`. Returns a null Attribute if error happens. -static Attribute extractCompositeElement(Attribute composite, - ArrayRef indices) { - // Check that given composite is a constant. - if (!composite) - return {}; - // Return composite itself if we reach the end of the index chain. - if (indices.empty()) - return composite; - - if (auto vector = composite.dyn_cast()) { - assert(indices.size() == 1 && "must have exactly one index for a vector"); - return vector.getValue({indices[0]}); - } - - if (auto array = composite.dyn_cast()) { - assert(!indices.empty() && "must have at least one index for an array"); - return extractCompositeElement(array.getValue()[indices[0]], - indices.drop_front()); - } - - return {}; -} - // Get bit width of types. static unsigned getBitWidth(Type type) { if (type.isa()) { @@ -477,14 +448,6 @@ static inline bool isMergeBlock(Block &block) { isa(block.front()); } -//===----------------------------------------------------------------------===// -// TableGen'erated canonicalizers -//===----------------------------------------------------------------------===// - -namespace { -#include "SPIRVCanonicalization.inc" -} - //===----------------------------------------------------------------------===// // Common parsers and printers //===----------------------------------------------------------------------===// @@ -848,41 +811,6 @@ static LogicalResult verify(spirv::AccessChainOp accessChainOp) { return success(); } -namespace { - -/// Combines chained `spirv::AccessChainOp` operations into one -/// `spirv::AccessChainOp` operation. -struct CombineChainedAccessChain - : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - PatternMatchResult matchAndRewrite(spirv::AccessChainOp accessChainOp, - PatternRewriter &rewriter) const override { - auto parentAccessChainOp = dyn_cast_or_null( - accessChainOp.base_ptr().getDefiningOp()); - - if (!parentAccessChainOp) { - return matchFailure(); - } - - // Combine indices. - SmallVector indices(parentAccessChainOp.indices()); - indices.append(accessChainOp.indices().begin(), - accessChainOp.indices().end()); - - rewriter.replaceOpWithNewOp( - accessChainOp, parentAccessChainOp.base_ptr(), indices); - - return matchSuccess(); - } -}; -} // end anonymous namespace - -void spirv::AccessChainOp::getCanonicalizationPatterns( - OwningRewritePatternList &results, MLIRContext *context) { - results.insert(context); -} - //===----------------------------------------------------------------------===// // spv._address_of //===----------------------------------------------------------------------===// @@ -1013,11 +941,6 @@ static LogicalResult verify(spirv::BitcastOp bitcastOp) { return success(); } -void spirv::BitcastOp::getCanonicalizationPatterns( - OwningRewritePatternList &results, MLIRContext *context) { - results.insert(context); -} - //===----------------------------------------------------------------------===// // spv.BranchConditionalOp //===----------------------------------------------------------------------===// @@ -1230,16 +1153,6 @@ static LogicalResult verify(spirv::CompositeExtractOp compExOp) { return success(); } -OpFoldResult spirv::CompositeExtractOp::fold(ArrayRef operands) { - assert(operands.size() == 1 && "spv.CompositeExtract expects one operand"); - auto indexVector = functional::map( - [](Attribute attr) { - return static_cast(attr.cast().getInt()); - }, - indices()); - return extractCompositeElement(operands[0], indexVector); -} - //===----------------------------------------------------------------------===// // spv.CompositeInsert //===----------------------------------------------------------------------===// @@ -1390,11 +1303,6 @@ static LogicalResult verify(spirv::ConstantOp constOp) { return success(); } -OpFoldResult spirv::ConstantOp::fold(ArrayRef operands) { - assert(operands.empty() && "spv.constant has no operands"); - return value(); -} - bool spirv::ConstantOp::isBuildableWith(Type type) { // Must be valid SPIR-V type first. if (!SPIRVDialect::isValidType(type)) @@ -1890,65 +1798,6 @@ static LogicalResult verify(spirv::GroupNonUniformElectOp groupOp) { return success(); } -//===----------------------------------------------------------------------===// -// spv.IAdd -//===----------------------------------------------------------------------===// - -OpFoldResult spirv::IAddOp::fold(ArrayRef operands) { - assert(operands.size() == 2 && "spv.IAdd expects two operands"); - // x + 0 = x - if (matchPattern(operand2(), m_Zero())) - return operand1(); - - // According to the SPIR-V spec: - // - // The resulting value will equal the low-order N bits of the correct result - // R, where N is the component width and R is computed with enough precision - // to avoid overflow and underflow. - return constFoldBinaryOp(operands, - [](APInt a, APInt b) { return a + b; }); -} - -//===----------------------------------------------------------------------===// -// spv.IMul -//===----------------------------------------------------------------------===// - -OpFoldResult spirv::IMulOp::fold(ArrayRef operands) { - assert(operands.size() == 2 && "spv.IMul expects two operands"); - // x * 0 == 0 - if (matchPattern(operand2(), m_Zero())) - return operand2(); - // x * 1 = x - if (matchPattern(operand2(), m_One())) - return operand1(); - - // According to the SPIR-V spec: - // - // The resulting value will equal the low-order N bits of the correct result - // R, where N is the component width and R is computed with enough precision - // to avoid overflow and underflow. - return constFoldBinaryOp(operands, - [](APInt a, APInt b) { return a * b; }); -} - -//===----------------------------------------------------------------------===// -// spv.ISub -//===----------------------------------------------------------------------===// - -OpFoldResult spirv::ISubOp::fold(ArrayRef operands) { - // x - x = 0 - if (operand1() == operand2()) - return Builder(getContext()).getIntegerAttr(getType(), 0); - - // According to the SPIR-V spec: - // - // The resulting value will equal the low-order N bits of the correct result - // R, where N is the component width and R is computed with enough precision - // to avoid overflow and underflow. - return constFoldBinaryOp(operands, - [](APInt a, APInt b) { return a - b; }); -} - //===----------------------------------------------------------------------===// // spv.LoadOp //===----------------------------------------------------------------------===// @@ -2008,17 +1857,6 @@ static LogicalResult verify(spirv::LoadOp loadOp) { return verifyMemoryAccessAttribute(loadOp); } -//===----------------------------------------------------------------------===// -// spv.LogicalNot -//===----------------------------------------------------------------------===// - -void spirv::LogicalNotOp::getCanonicalizationPatterns( - OwningRewritePatternList &results, MLIRContext *context) { - results.insert(context); -} - //===----------------------------------------------------------------------===// // spv.loop //===----------------------------------------------------------------------===// @@ -2547,170 +2385,6 @@ spirv::SelectionOp spirv::SelectionOp::createIfThen( return selectionOp; } -namespace { -// Blocks from the given `spv.selection` operation must satisfy the following -// layout: -// -// +-----------------------------------------------+ -// | header block | -// | spv.BranchConditionalOp %cond, ^case0, ^case1 | -// +-----------------------------------------------+ -// / \ -// ... -// -// -// +------------------------+ +------------------------+ -// | case #0 | | case #1 | -// | spv.Store %ptr %value0 | | spv.Store %ptr %value1 | -// | spv.Branch ^merge | | spv.Branch ^merge | -// +------------------------+ +------------------------+ -// -// -// ... -// \ / -// v -// +-------------+ -// | merge block | -// +-------------+ -// -struct ConvertSelectionOpToSelect - : public OpRewritePattern { - using OpRewritePattern::OpRewritePattern; - - PatternMatchResult matchAndRewrite(spirv::SelectionOp selectionOp, - PatternRewriter &rewriter) const override { - auto *op = selectionOp.getOperation(); - auto &body = op->getRegion(0); - // Verifier allows an empty region for `spv.selection`. - if (body.empty()) { - return matchFailure(); - } - - // Check that region consists of 4 blocks: - // header block, `true` block, `false` block and merge block. - if (std::distance(body.begin(), body.end()) != 4) { - return matchFailure(); - } - - auto *headerBlock = selectionOp.getHeaderBlock(); - if (!onlyContainsBranchConditionalOp(headerBlock)) { - return matchFailure(); - } - - auto brConditionalOp = - cast(headerBlock->front()); - - auto *trueBlock = brConditionalOp.getSuccessor(0); - auto *falseBlock = brConditionalOp.getSuccessor(1); - auto *mergeBlock = selectionOp.getMergeBlock(); - - if (!canCanonicalizeSelection(trueBlock, falseBlock, mergeBlock)) { - return matchFailure(); - } - - auto trueValue = getSrcValue(trueBlock); - auto falseValue = getSrcValue(falseBlock); - auto ptrValue = getDstPtr(trueBlock); - auto storeOpAttributes = - cast(trueBlock->front()).getOperation()->getAttrs(); - - auto selectOp = rewriter.create( - selectionOp.getLoc(), trueValue.getType(), brConditionalOp.condition(), - trueValue, falseValue); - rewriter.create(selectOp.getLoc(), ptrValue, - selectOp.getResult(), storeOpAttributes); - - // `spv.selection` is not needed anymore. - rewriter.eraseOp(op); - return matchSuccess(); - } - -private: - // Checks that given blocks follow the following rules: - // 1. Each conditional block consists of two operations, the first operation - // is a `spv.Store` and the last operation is a `spv.Branch`. - // 2. Each `spv.Store` uses the same pointer and the same memory attributes. - // 3. A control flow goes into the given merge block from the given - // conditional blocks. - PatternMatchResult canCanonicalizeSelection(Block *trueBlock, - Block *falseBlock, - Block *mergeBlock) const; - - bool onlyContainsBranchConditionalOp(Block *block) const { - return std::next(block->begin()) == block->end() && - isa(block->front()); - } - - bool isSameAttrList(spirv::StoreOp lhs, spirv::StoreOp rhs) const { - return lhs.getOperation()->getAttrList().getDictionary() == - rhs.getOperation()->getAttrList().getDictionary(); - } - - // Checks that given type is valid for `spv.SelectOp`. - // According to SPIR-V spec: - // "Before version 1.4, Result Type must be a pointer, scalar, or vector. - // Starting with version 1.4, Result Type can additionally be a composite type - // other than a vector." - bool isValidType(Type type) const { - return spirv::SPIRVDialect::isValidScalarType(type) || - type.isa(); - } - - // Returns a source value for the given block. - Value getSrcValue(Block *block) const { - auto storeOp = cast(block->front()); - return storeOp.value(); - } - - // Returns a destination value for the given block. - Value getDstPtr(Block *block) const { - auto storeOp = cast(block->front()); - return storeOp.ptr(); - } -}; - -PatternMatchResult ConvertSelectionOpToSelect::canCanonicalizeSelection( - Block *trueBlock, Block *falseBlock, Block *mergeBlock) const { - // Each block must consists of 2 operations. - if ((std::distance(trueBlock->begin(), trueBlock->end()) != 2) || - (std::distance(falseBlock->begin(), falseBlock->end()) != 2)) { - return matchFailure(); - } - - auto trueBrStoreOp = dyn_cast(trueBlock->front()); - auto trueBrBranchOp = - dyn_cast(*std::next(trueBlock->begin())); - auto falseBrStoreOp = dyn_cast(falseBlock->front()); - auto falseBrBranchOp = - dyn_cast(*std::next(falseBlock->begin())); - - if (!trueBrStoreOp || !trueBrBranchOp || !falseBrStoreOp || - !falseBrBranchOp) { - return matchFailure(); - } - - // Check that each `spv.Store` uses the same pointer, memory access - // attributes and a valid type of the value. - if ((trueBrStoreOp.ptr() != falseBrStoreOp.ptr()) || - !isSameAttrList(trueBrStoreOp, falseBrStoreOp) || - !isValidType(trueBrStoreOp.value().getType())) { - return matchFailure(); - } - - if ((trueBrBranchOp.getOperation()->getSuccessor(0) != mergeBlock) || - (falseBrBranchOp.getOperation()->getSuccessor(0) != mergeBlock)) { - return matchFailure(); - } - - return matchSuccess(); -} -} // end anonymous namespace - -void spirv::SelectionOp::getCanonicalizationPatterns( - OwningRewritePatternList &results, MLIRContext *context) { - results.insert(context); -} - //===----------------------------------------------------------------------===// // spv.specConstant //===----------------------------------------------------------------------===// From 59fb9cde7a4a96fe8485a80d9010e4420ffdca82 Mon Sep 17 00:00:00 2001 From: Hiroshi Yamauchi Date: Fri, 31 Jan 2020 16:13:44 -0800 Subject: [PATCH 06/11] Devirtualize a call on alloca without waiting for post inline cleanup and next DevirtSCCRepeatedPass iteration. Needs ReviewPublic This aims to fix a missed inlining case. If there's a virtual call in the callee on an alloca (stack allocated object) in the caller, and the callee is inlined into the caller, the post-inline cleanup would devirtualize the virtual call, but if the next iteration of DevirtSCCRepeatedPass doesn't happen (under the new pass manager), which is based on a heuristic to determine whether to reiterate, we may miss inlining the devirtualized call. This enables inlining in clang/test/CodeGenCXX/member-function-pointer-calls.cpp. --- .../member-function-pointer-calls.cpp | 9 +- llvm/lib/Transforms/IPO/Inliner.cpp | 15 +- llvm/test/Transforms/Inline/devirtualize-4.ll | 214 ++++++++++++++++++ 3 files changed, 230 insertions(+), 8 deletions(-) create mode 100644 llvm/test/Transforms/Inline/devirtualize-4.ll diff --git a/clang/test/CodeGenCXX/member-function-pointer-calls.cpp b/clang/test/CodeGenCXX/member-function-pointer-calls.cpp index 0e98b12e6e9da..232b1f06df890 100644 --- a/clang/test/CodeGenCXX/member-function-pointer-calls.cpp +++ b/clang/test/CodeGenCXX/member-function-pointer-calls.cpp @@ -11,12 +11,8 @@ int f(A* a, int (A::*fp)()) { } // CHECK-LABEL: define i32 @_Z2g1v() -// CHECK-LEGACY: ret i32 1 -// CHECK-NEWPM: [[A:%.*]] = alloca %struct.A, align 8 -// CHECK-NEWPM: [[TMP:%.*]] = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 0 -// CHECK-NEWPM: store i32 (...)** bitcast (i8** getelementptr inbounds ({ [4 x i8*] }, { [4 x i8*] }* @_ZTV1A, i64 0, inrange i32 0, i64 2) to i32 (...)**), i32 (...)*** [[TMP]], align 8 -// CHECK-NEWPM: [[RET:%.*]] = call i32 @_ZN1A3vf1Ev(%struct.A* nonnull %a) #2 -// CHECK-NEWPM: ret i32 [[RET]] +// CHECK-NOT: } +// CHECK: ret i32 1 // MINGW64-LABEL: define dso_local i32 @_Z2g1v() // MINGW64: call i32 @_Z1fP1AMS_FivE(%struct.A* %{{.*}}, { i64, i64 }* %{{.*}}) int g1() { @@ -25,6 +21,7 @@ int g1() { } // CHECK-LABEL: define i32 @_Z2g2v() +// CHECK-NOT: } // CHECK: ret i32 2 // MINGW64-LABEL: define dso_local i32 @_Z2g2v() // MINGW64: call i32 @_Z1fP1AMS_FivE(%struct.A* %{{.*}}, { i64, i64 }* %{{.*}}) diff --git a/llvm/lib/Transforms/IPO/Inliner.cpp b/llvm/lib/Transforms/IPO/Inliner.cpp index 4f9f4bd1cd043..55753d979b2c1 100644 --- a/llvm/lib/Transforms/IPO/Inliner.cpp +++ b/llvm/lib/Transforms/IPO/Inliner.cpp @@ -35,6 +35,7 @@ #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Transforms/Utils/Local.h" +#include "llvm/Transforms/Utils/CallPromotionUtils.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CallSite.h" @@ -1100,10 +1101,20 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, if (!IFI.InlinedCallSites.empty()) { int NewHistoryID = InlineHistory.size(); InlineHistory.push_back({&Callee, InlineHistoryID}); - for (CallSite &CS : reverse(IFI.InlinedCallSites)) - if (Function *NewCallee = CS.getCalledFunction()) + for (CallSite &CS : reverse(IFI.InlinedCallSites)) { + Function *NewCallee = CS.getCalledFunction(); + if (!NewCallee) { + // Try to promote an indirect (virtual) call without waiting for the + // post-inline cleanup and the next DevirtSCCRepeatedPass iteration + // because the next iteration may not happen and we may miss + // inlining it. + if (tryPromoteCall(CS)) + NewCallee = CS.getCalledFunction(); + } + if (NewCallee) if (!NewCallee->isDeclaration()) Calls.push_back({CS, NewHistoryID}); + } } if (InlinerFunctionImportStats != InlinerFunctionImportStatsOpts::No) diff --git a/llvm/test/Transforms/Inline/devirtualize-4.ll b/llvm/test/Transforms/Inline/devirtualize-4.ll new file mode 100644 index 0000000000000..2205dae7aa238 --- /dev/null +++ b/llvm/test/Transforms/Inline/devirtualize-4.ll @@ -0,0 +1,214 @@ +; RUN: opt < %s -passes='cgscc(devirt<4>(inline)),function(sroa,early-cse)' -S | FileCheck %s +; RUN: opt < %s -passes='default' -S | FileCheck %s + +; Check that DoNotOptimize is inlined into Test. +; CHECK: @_Z4Testv() +; CHECK-NOT: ret void +; CHECK: call void asm +; CHECK: ret void + +;template +;void DoNotOptimize(const T& var) { +; asm volatile("" : "+m"(const_cast(var))); +;} +; +;class Interface { +; public: +; virtual void Run() = 0; +;}; +; +;class Impl : public Interface { +; public: +; Impl() : f(3) {} +; void Run() { DoNotOptimize(this); } +; +; private: +; int f; +;}; +; +;static void IndirectRun(Interface& o) { o.Run(); } +; +;void Test() { +; Impl o; +; IndirectRun(o); +;} + +%class.Impl = type <{ %class.Interface, i32, [4 x i8] }> +%class.Interface = type { i32 (...)** } + +@_ZTV4Impl = linkonce_odr dso_local unnamed_addr constant { [3 x i8*] } { [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8*, i8* }* @_ZTI4Impl to i8*), i8* bitcast (void (%class.Impl*)* @_ZN4Impl3RunEv to i8*)] }, align 8 +@_ZTVN10__cxxabiv120__si_class_type_infoE = external dso_local global i8* +@_ZTS4Impl = linkonce_odr dso_local constant [6 x i8] c"4Impl\00", align 1 +@_ZTVN10__cxxabiv117__class_type_infoE = external dso_local global i8* +@_ZTS9Interface = linkonce_odr dso_local constant [11 x i8] c"9Interface\00", align 1 +@_ZTI9Interface = linkonce_odr dso_local constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @_ZTS9Interface, i32 0, i32 0) }, align 8 +@_ZTI4Impl = linkonce_odr dso_local constant { i8*, i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv120__si_class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @_ZTS4Impl, i32 0, i32 0), i8* bitcast ({ i8*, i8* }* @_ZTI9Interface to i8*) }, align 8 +@_ZTV9Interface = linkonce_odr dso_local unnamed_addr constant { [3 x i8*] } { [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI9Interface to i8*), i8* bitcast (void ()* @__cxa_pure_virtual to i8*)] }, align 8 + +define dso_local void @_Z4Testv() local_unnamed_addr { +entry: + %o = alloca %class.Impl, align 8 + %0 = bitcast %class.Impl* %o to i8* + call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %0) + call void @_ZN4ImplC2Ev(%class.Impl* nonnull %o) + %1 = getelementptr inbounds %class.Impl, %class.Impl* %o, i64 0, i32 0 + call fastcc void @_ZL11IndirectRunR9Interface(%class.Interface* nonnull dereferenceable(8) %1) + call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %0) + ret void +} + +declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) + +define linkonce_odr dso_local void @_ZN4ImplC2Ev(%class.Impl* %this) unnamed_addr align 2 { +entry: + %0 = getelementptr %class.Impl, %class.Impl* %this, i64 0, i32 0 + call void @_ZN9InterfaceC2Ev(%class.Interface* %0) + %1 = getelementptr %class.Impl, %class.Impl* %this, i64 0, i32 0, i32 0 + store i32 (...)** bitcast (i8** getelementptr inbounds ({ [3 x i8*] }, { [3 x i8*] }* @_ZTV4Impl, i64 0, inrange i32 0, i64 2) to i32 (...)**), i32 (...)*** %1, align 8 + %f = getelementptr inbounds %class.Impl, %class.Impl* %this, i64 0, i32 1 + store i32 3, i32* %f, align 8 + ret void +} + +define internal fastcc void @_ZL11IndirectRunR9Interface(%class.Interface* dereferenceable(8) %o) unnamed_addr { +entry: + %0 = bitcast %class.Interface* %o to void (%class.Interface*)*** + %vtable = load void (%class.Interface*)**, void (%class.Interface*)*** %0, align 8 + %1 = load void (%class.Interface*)*, void (%class.Interface*)** %vtable, align 8 + call void %1(%class.Interface* nonnull %o) + ret void +} + +declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) + +define linkonce_odr dso_local void @_ZN9InterfaceC2Ev(%class.Interface* %this) unnamed_addr align 2 { +entry: + %0 = getelementptr %class.Interface, %class.Interface* %this, i64 0, i32 0 + store i32 (...)** bitcast (i8** getelementptr inbounds ({ [3 x i8*] }, { [3 x i8*] }* @_ZTV9Interface, i64 0, inrange i32 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8 + ret void +} + +define linkonce_odr dso_local void @_ZN4Impl3RunEv(%class.Impl* %this) unnamed_addr align 2 { +entry: + %ref.tmp = alloca %class.Impl*, align 8 + %0 = bitcast %class.Impl** %ref.tmp to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0) + store %class.Impl* %this, %class.Impl** %ref.tmp, align 8 + call void @_Z13DoNotOptimizeIP4ImplEvRKT_(%class.Impl** nonnull dereferenceable(8) %ref.tmp) + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0) + ret void +} + +declare dso_local void @__cxa_pure_virtual() unnamed_addr + +define linkonce_odr dso_local void @_Z13DoNotOptimizeIP4ImplEvRKT_(%class.Impl** dereferenceable(8) %var) local_unnamed_addr { +entry: + call void asm sideeffect "", "=*m,*m,~{dirflag},~{fpsr},~{flags}"(%class.Impl** nonnull %var, %class.Impl** nonnull %var) + ret void +} + + +; Based on clang/test/CodeGenCXX/member-function-pointer-calls.cpp. +; Check that vf1 and vf2 are inlined into g1 and g2. +; CHECK: @_Z2g1v() +; CHECK-NOT: } +; CHECK: ret i32 1 +; CHECK: @_Z2g2v() +; CHECK-NOT: } +; CHECK: ret i32 2 +; +;struct A { +; virtual int vf1() { return 1; } +; virtual int vf2() { return 2; } +;}; +; +;int f(A* a, int (A::*fp)()) { +; return (a->*fp)(); +;} +;int g1() { +; A a; +; return f(&a, &A::vf1); +;} +;int g2() { +; A a; +; return f(&a, &A::vf2); +;} + +%struct.A = type { i32 (...)** } + +@_ZTV1A = linkonce_odr unnamed_addr constant { [4 x i8*] } { [4 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @_ZTI1A to i8*), i8* bitcast (i32 (%struct.A*)* @_ZN1A3vf1Ev to i8*), i8* bitcast (i32 (%struct.A*)* @_ZN1A3vf2Ev to i8*)] }, align 8 +@_ZTS1A = linkonce_odr constant [3 x i8] c"1A\00", align 1 +@_ZTI1A = linkonce_odr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @_ZTVN10__cxxabiv117__class_type_infoE, i64 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @_ZTS1A, i32 0, i32 0) }, align 8 + +define i32 @_Z1fP1AMS_FivE(%struct.A* %a, i64 %fp.coerce0, i64 %fp.coerce1) { +entry: + %0 = bitcast %struct.A* %a to i8* + %1 = getelementptr inbounds i8, i8* %0, i64 %fp.coerce1 + %this.adjusted = bitcast i8* %1 to %struct.A* + %2 = and i64 %fp.coerce0, 1 + %memptr.isvirtual = icmp eq i64 %2, 0 + br i1 %memptr.isvirtual, label %memptr.nonvirtual, label %memptr.virtual + +memptr.virtual: ; preds = %entry + %3 = bitcast i8* %1 to i8** + %vtable = load i8*, i8** %3, align 8 + %4 = add i64 %fp.coerce0, -1 + %5 = getelementptr i8, i8* %vtable, i64 %4 + %6 = bitcast i8* %5 to i32 (%struct.A*)** + %memptr.virtualfn = load i32 (%struct.A*)*, i32 (%struct.A*)** %6, align 8 + br label %memptr.end + +memptr.nonvirtual: ; preds = %entry + %memptr.nonvirtualfn = inttoptr i64 %fp.coerce0 to i32 (%struct.A*)* + br label %memptr.end + +memptr.end: ; preds = %memptr.nonvirtual, %memptr.virtual + %7 = phi i32 (%struct.A*)* [ %memptr.virtualfn, %memptr.virtual ], [ %memptr.nonvirtualfn, %memptr.nonvirtual ] + %call = call i32 %7(%struct.A* %this.adjusted) + ret i32 %call +} + +define i32 @_Z2g1v() { +entry: + %a = alloca %struct.A, align 8 + %0 = bitcast %struct.A* %a to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0) + call void @_ZN1AC1Ev(%struct.A* nonnull %a) + %call = call i32 @_Z1fP1AMS_FivE(%struct.A* nonnull %a, i64 1, i64 0) + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0) + ret i32 %call +} + +define linkonce_odr void @_ZN1AC1Ev(%struct.A* %this) align 2 { +entry: + call void @_ZN1AC2Ev(%struct.A* %this) + ret void +} + +define i32 @_Z2g2v() { +entry: + %a = alloca %struct.A, align 8 + %0 = bitcast %struct.A* %a to i8* + call void @llvm.lifetime.start.p0i8(i64 8, i8* nonnull %0) + call void @_ZN1AC1Ev(%struct.A* nonnull %a) + %call = call i32 @_Z1fP1AMS_FivE(%struct.A* nonnull %a, i64 9, i64 0) + call void @llvm.lifetime.end.p0i8(i64 8, i8* nonnull %0) + ret i32 %call +} + +define linkonce_odr void @_ZN1AC2Ev(%struct.A* %this) align 2 { +entry: + %0 = getelementptr %struct.A, %struct.A* %this, i64 0, i32 0 + store i32 (...)** bitcast (i8** getelementptr inbounds ({ [4 x i8*] }, { [4 x i8*] }* @_ZTV1A, i64 0, inrange i32 0, i64 2) to i32 (...)**), i32 (...)*** %0, align 8 + ret void +} + +define linkonce_odr i32 @_ZN1A3vf1Ev(%struct.A* %this) align 2 { +entry: + ret i32 1 +} + +define linkonce_odr i32 @_ZN1A3vf2Ev(%struct.A* %this) align 2 { +entry: + ret i32 2 +} From 14aef5367d0dfc2ec10ffdbbc3fb8abbc530f4d1 Mon Sep 17 00:00:00 2001 From: Mikhail Maltsev Date: Wed, 26 Feb 2020 17:54:43 +0000 Subject: [PATCH 07/11] [libcxx] Fix _LIBCPP_HAS_THREAD_API_EXTERNAL build Summary: The definition of `__libcpp_timed_backoff_policy` and the declaration of `__libcpp_thread_poll_with_backoff` must not be guarded by #if !defined(_LIBCPP_HAS_THREAD_API_EXTERNAL) because the definitions of `__libcpp_timed_backoff_policy::operator()` and `__libcpp_thread_poll_with_backoff` aren't guarded by this macro (and this is correct because these two functions are implemented in terms of other libc++ functions and don't interact with the host threading library). Reviewers: ldionne, __simt__, EricWF, mclow.lists Reviewed By: ldionne Subscribers: dexonsmith, libcxx-commits Tags: #libc Differential Revision: https://reviews.llvm.org/D75191 --- libcxx/include/__threading_support | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/libcxx/include/__threading_support b/libcxx/include/__threading_support index 8dc6f11782d72..50f65fe278b78 100644 --- a/libcxx/include/__threading_support +++ b/libcxx/include/__threading_support @@ -264,16 +264,6 @@ void __libcpp_thread_yield(); _LIBCPP_THREAD_ABI_VISIBILITY void __libcpp_thread_sleep_for(const chrono::nanoseconds& __ns); -struct __libcpp_timed_backoff_policy { - _LIBCPP_THREAD_ABI_VISIBILITY - bool operator()(chrono::nanoseconds __elapsed) const; -}; - -template -_LIBCPP_INLINE_VISIBILITY -bool __libcpp_thread_poll_with_backoff( - _Fn && __f, _BFn && __bf, chrono::nanoseconds __max_elapsed = chrono::nanoseconds::zero()); - // Thread local storage _LIBCPP_THREAD_ABI_VISIBILITY int __libcpp_tls_create(__libcpp_tls_key* __key, @@ -290,6 +280,16 @@ int __libcpp_tls_set(__libcpp_tls_key __key, void *__p); #if (!defined(_LIBCPP_HAS_THREAD_LIBRARY_EXTERNAL) || \ defined(_LIBCPP_BUILDING_THREAD_LIBRARY_EXTERNAL)) +struct __libcpp_timed_backoff_policy { + _LIBCPP_THREAD_ABI_VISIBILITY + bool operator()(chrono::nanoseconds __elapsed) const; +}; + +template +_LIBCPP_INLINE_VISIBILITY +bool __libcpp_thread_poll_with_backoff( + _Fn && __f, _BFn && __bf, chrono::nanoseconds __max_elapsed = chrono::nanoseconds::zero()); + namespace __thread_detail { inline __libcpp_timespec_t __convert_to_timespec(const chrono::nanoseconds& __ns) From 4f71252cf8430e60837e0e030c3d40db4c79eb99 Mon Sep 17 00:00:00 2001 From: Juneyoung Lee Date: Thu, 27 Feb 2020 02:56:01 +0900 Subject: [PATCH 08/11] [TTI] Let getOperationCost assume that Freeze is free --- llvm/include/llvm/Analysis/TargetTransformInfoImpl.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h index 09bd13aea26ae..5c51d30384b73 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -62,6 +62,11 @@ class TargetTransformInfoImplBase { // Otherwise, the default basic cost is used. return TTI::TCC_Basic; + case Instruction::Freeze: + // Freeze operation is free because it should be lowered into a register + // use without any register copy in assembly code. + return TTI::TCC_Free; + case Instruction::FDiv: case Instruction::FRem: case Instruction::SDiv: From fd7c2e24c1c2ae7d0e251a86cb026710c576eaac Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Wed, 26 Feb 2020 10:54:18 -0600 Subject: [PATCH 09/11] [SDAG] Add SDNode::values() = make_range(values_begin(), values_end()) Also use it in a few places to simplify code a little bit. NFC --- llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 3 +++ .../lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp | 14 +++++--------- llvm/lib/Target/Mips/MipsSEISelLowering.cpp | 5 ++--- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index 086a54c0b078c..faee8e8ffa171 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1017,6 +1017,9 @@ END_TWO_BYTE_PACK() value_iterator value_begin() const { return ValueList; } value_iterator value_end() const { return ValueList+NumValues; } + iterator_range values() const { + return llvm::make_range(value_begin(), value_end()); + } /// Return the opcode of this operation for printing. std::string getOperationName(const SelectionDAG *G = nullptr) const; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp index a624228dac0d4..4d923a3c84dc9 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -182,9 +182,7 @@ bool VectorLegalizer::Run() { E = std::prev(DAG.allnodes_end()); I != std::next(E); ++I) { // Check if the values of the nodes contain vectors. We don't need to check // the operands because we are going to check their values at some point. - for (SDNode::value_iterator J = I->value_begin(), E = I->value_end(); - J != E; ++J) - HasVectors |= J->isVector(); + HasVectors = llvm::any_of(I->values(), [](EVT T) { return T.isVector(); }); // If we found a vector node we can start the legalization. if (HasVectors) @@ -318,12 +316,10 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) { } } - bool HasVectorValueOrOp = false; - for (auto J = Node->value_begin(), E = Node->value_end(); J != E; ++J) - HasVectorValueOrOp |= J->isVector(); - for (const SDValue &Oper : Node->op_values()) - HasVectorValueOrOp |= Oper.getValueType().isVector(); - + bool HasVectorValueOrOp = + llvm::any_of(Node->values(), [](EVT T) { return T.isVector(); }) || + llvm::any_of(Node->op_values(), + [](SDValue O) { return O.getValueType().isVector(); }); if (!HasVectorValueOrOp) return TranslateLegalizeResults(Op, Node); diff --git a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp index 798e8784405f7..bdf29c53cbd54 100644 --- a/llvm/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/llvm/lib/Target/Mips/MipsSEISelLowering.cpp @@ -1342,9 +1342,8 @@ static SDValue lowerDSPIntr(SDValue Op, SelectionDAG &DAG, unsigned Opc) { // Scan output. SmallVector ResTys; - for (SDNode::value_iterator I = Op->value_begin(), E = Op->value_end(); - I != E; ++I) - ResTys.push_back((*I == MVT::i64) ? MVT::Untyped : *I); + for (EVT Ty : Op->values()) + ResTys.push_back((Ty == MVT::i64) ? MVT::Untyped : Ty); // Create node. SDValue Val = DAG.getNode(Opc, DL, ResTys, Ops); From 7822c8c03e9fe8c857da21c4ccbe28396b43130d Mon Sep 17 00:00:00 2001 From: Vedant Kumar Date: Wed, 26 Feb 2020 10:11:39 -0800 Subject: [PATCH 10/11] [lldb/test] Skip running a test under ASan, it intentionally double-frees --- .../functionalities/process_crash_info/TestProcessCrashInfo.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lldb/test/API/functionalities/process_crash_info/TestProcessCrashInfo.py b/lldb/test/API/functionalities/process_crash_info/TestProcessCrashInfo.py index 3caa7c5d905a9..d0f47de83eea4 100644 --- a/lldb/test/API/functionalities/process_crash_info/TestProcessCrashInfo.py +++ b/lldb/test/API/functionalities/process_crash_info/TestProcessCrashInfo.py @@ -25,6 +25,7 @@ def tearDown(self): self.runCmd("settings clear auto-confirm") TestBase.tearDown(self) + @skipIfAsan # The test process intentionally double-frees. @skipUnlessDarwin def test_cli(self): """Test that `process status --verbose` fetches the extended crash @@ -41,6 +42,7 @@ def test_cli(self): patterns=["\"message\".*pointer being freed was not allocated"]) + @skipIfAsan # The test process intentionally hits a memory bug. @skipUnlessDarwin def test_api(self): """Test that lldb can fetch a crashed process' extended crash information From e4af56db27e5007ae6f6095a0ba0421211de9ba3 Mon Sep 17 00:00:00 2001 From: Greg Clayton Date: Wed, 26 Feb 2020 10:30:04 -0800 Subject: [PATCH 11/11] Fix buildbots after recent GSYM commit. Added llvm-gsymutil to LLVM_TEST_DEPENDS. --- llvm/test/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/test/CMakeLists.txt b/llvm/test/CMakeLists.txt index 2f19963fe1b6e..9433fd1a31b0a 100644 --- a/llvm/test/CMakeLists.txt +++ b/llvm/test/CMakeLists.txt @@ -64,6 +64,7 @@ set(LLVM_TEST_DEPENDS llvm-elfabi llvm-exegesis llvm-extract + llvm-gsymutil llvm-isel-fuzzer llvm-ifs llvm-install-name-tool