Skip to content

Commit

Permalink
[Alignment][NFC] Use Align with CreateMaskedStore
Browse files Browse the repository at this point in the history
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: hiraditya, cfe-commits, llvm-commits

Tags: #clang, #llvm

Differential Revision: https://reviews.llvm.org/D73106
  • Loading branch information
gchatelet committed Jan 22, 2020
1 parent 889a4f5 commit 0957233
Show file tree
Hide file tree
Showing 9 changed files with 63 additions and 50 deletions.
25 changes: 12 additions & 13 deletions clang/lib/CodeGen/CGBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9714,17 +9714,16 @@ static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
return MaskVec;
}

static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
unsigned Align) {
static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));

Value *MaskVec = getMaskVecValue(CGF, Ops[2],
Ops[1]->getType()->getVectorNumElements());

return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
}

static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Expand Down Expand Up @@ -10592,12 +10591,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storedquqi512_mask:
case X86::BI__builtin_ia32_storeupd512_mask:
case X86::BI__builtin_ia32_storeups512_mask:
return EmitX86MaskedStore(*this, Ops, 1);
return EmitX86MaskedStore(*this, Ops, Align::None());

case X86::BI__builtin_ia32_storess128_mask:
case X86::BI__builtin_ia32_storesd128_mask: {
return EmitX86MaskedStore(*this, Ops, 1);
}
case X86::BI__builtin_ia32_storesd128_mask:
return EmitX86MaskedStore(*this, Ops, Align::None());

case X86::BI__builtin_ia32_vpopcntb_128:
case X86::BI__builtin_ia32_vpopcntd_128:
case X86::BI__builtin_ia32_vpopcntq_128:
Expand Down Expand Up @@ -10708,11 +10707,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movdqa32store512_mask:
case X86::BI__builtin_ia32_movdqa64store512_mask:
case X86::BI__builtin_ia32_storeaps512_mask:
case X86::BI__builtin_ia32_storeapd512_mask: {
unsigned Align =
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
return EmitX86MaskedStore(*this, Ops, Align);
}
case X86::BI__builtin_ia32_storeapd512_mask:
return EmitX86MaskedStore(
*this, Ops,
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());

case X86::BI__builtin_ia32_loadups128_mask:
case X86::BI__builtin_ia32_loadups256_mask:
case X86::BI__builtin_ia32_loadups512_mask:
Expand Down
4 changes: 4 additions & 0 deletions llvm/include/llvm/IR/Constants.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,10 @@ class ConstantInt final : public ConstantData {
return Val.getSExtValue();
}

/// Return the constant as an llvm::Align. Note that this method can assert if
/// the value does not fit in 64 bits or is not a power of two.
inline Align getAlignValue() const { return Align(getZExtValue()); }

/// A helper method that can be used to determine if the constant contained
/// within is equal to a constant. This only works for very small values,
/// because this is all that can be represented with all types.
Expand Down
12 changes: 10 additions & 2 deletions llvm/include/llvm/IR/IRBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -752,13 +752,21 @@ class IRBuilderBase {
Value *PassThru = nullptr,
const Twine &Name = ""),
"Use the version that takes Align instead") {
return CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru, Name);
return CreateMaskedLoad(Ptr, assumeAligned(Alignment), Mask, PassThru,
Name);
}
CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
Value *PassThru = nullptr, const Twine &Name = "");

/// Create a call to Masked Store intrinsic
CallInst *CreateMaskedStore(Value *Val, Value *Ptr, unsigned Align,
LLVM_ATTRIBUTE_DEPRECATED(CallInst *CreateMaskedStore(Value *Val, Value *Ptr,
unsigned Alignment,
Value *Mask),
"Use the version that takes Align instead") {
return CreateMaskedStore(Val, Ptr, assumeAligned(Alignment), Mask);
}

CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
Value *Mask);

/// Create a call to Masked Gather intrinsic
Expand Down
30 changes: 16 additions & 14 deletions llvm/lib/CodeGen/ScalarizeMaskedMemIntrin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -849,39 +849,41 @@ bool ScalarizeMaskedMemIntrin::optimizeCallInst(CallInst *CI,
bool &ModifiedDT) {
IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
if (II) {
unsigned Alignment;
switch (II->getIntrinsicID()) {
default:
break;
case Intrinsic::masked_load: {
case Intrinsic::masked_load:
// Scalarize unsupported vector masked load
Alignment = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
if (TTI->isLegalMaskedLoad(CI->getType(), MaybeAlign(Alignment)))
if (TTI->isLegalMaskedLoad(
CI->getType(),
cast<ConstantInt>(CI->getArgOperand(1))->getAlignValue()))
return false;
scalarizeMaskedLoad(CI, ModifiedDT);
return true;
}
case Intrinsic::masked_store: {
Alignment = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
if (TTI->isLegalMaskedStore(CI->getArgOperand(0)->getType(),
MaybeAlign(Alignment)))
case Intrinsic::masked_store:
if (TTI->isLegalMaskedStore(
CI->getArgOperand(0)->getType(),
cast<ConstantInt>(CI->getArgOperand(2))->getAlignValue()))
return false;
scalarizeMaskedStore(CI, ModifiedDT);
return true;
}
case Intrinsic::masked_gather:
Alignment = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
case Intrinsic::masked_gather: {
unsigned Alignment =
cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
if (TTI->isLegalMaskedGather(CI->getType(), MaybeAlign(Alignment)))
return false;
scalarizeMaskedGather(CI, ModifiedDT);
return true;
case Intrinsic::masked_scatter:
Alignment = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
}
case Intrinsic::masked_scatter: {
unsigned Alignment =
cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
if (TTI->isLegalMaskedScatter(CI->getArgOperand(0)->getType(),
MaybeAlign(Alignment)))
return false;
scalarizeMaskedScatter(CI, ModifiedDT);
return true;
}
case Intrinsic::masked_expandload:
if (TTI->isLegalMaskedExpandLoad(CI->getType()))
return false;
Expand Down
9 changes: 5 additions & 4 deletions llvm/lib/IR/AutoUpgrade.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1237,18 +1237,19 @@ static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
// Cast the pointer to the right type.
Ptr = Builder.CreateBitCast(Ptr,
llvm::PointerType::getUnqual(Data->getType()));
unsigned Align =
Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1;
const Align Alignment =
Aligned ? Align(cast<VectorType>(Data->getType())->getBitWidth() / 8)
: Align::None();

// If the mask is all ones just emit a regular store.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
return Builder.CreateAlignedStore(Data, Ptr, Align);
return Builder.CreateAlignedStore(Data, Ptr, Alignment);

// Convert the mask from an integer type to a vector of i1.
unsigned NumElts = Data->getType()->getVectorNumElements();
Mask = getX86MaskVec(Builder, Mask, NumElts);
return Builder.CreateMaskedStore(Data, Ptr, Align, Mask);
return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask);
}

static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
Expand Down
14 changes: 7 additions & 7 deletions llvm/lib/IR/IRBuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -487,19 +487,19 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
}

/// Create a call to a Masked Store intrinsic.
/// \p Val - data to be stored,
/// \p Ptr - base pointer for the store
/// \p Align - alignment of the destination location
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
/// \p Val - data to be stored,
/// \p Ptr - base pointer for the store
/// \p Alignment - alignment of the destination location
/// \p Mask - vector of booleans which indicates what vector lanes should
/// be accessed in memory
CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
unsigned Align, Value *Mask) {
Align Alignment, Value *Mask) {
auto *PtrTy = cast<PointerType>(Ptr->getType());
Type *DataTy = PtrTy->getElementType();
assert(DataTy->isVectorTy() && "Ptr should point to a vector");
assert(Mask && "Mask should not be all-ones (null)");
Type *OverloadedTypes[] = { DataTy, PtrTy };
Value *Ops[] = { Val, Ptr, getInt32(Align), Mask };
Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
}

Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1372,7 +1372,7 @@ static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
// on each element's most significant bit (the sign bit).
Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);

IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
IC.Builder.CreateMaskedStore(Vec, PtrCast, Align::None(), BoolMask);

// 'Replace uses' doesn't work for stores. Erase the original masked store.
IC.eraseInstFromFunction(II);
Expand Down
11 changes: 5 additions & 6 deletions llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2904,7 +2904,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
IRBuilder<> IRB(&I);
Value *V = I.getArgOperand(0);
Value *Addr = I.getArgOperand(1);
const MaybeAlign Alignment(
const Align Alignment(
cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
Value *Mask = I.getArgOperand(3);
Value *Shadow = getShadow(V);
Expand All @@ -2921,21 +2921,20 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
insertShadowCheck(Mask, &I);
}

IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment ? Alignment->value() : 0,
Mask);
IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment, Mask);

if (MS.TrackOrigins) {
auto &DL = F.getParent()->getDataLayout();
paintOrigin(IRB, getOrigin(V), OriginPtr,
DL.getTypeStoreSize(Shadow->getType()),
llvm::max(Alignment, kMinOriginAlignment));
std::max(Alignment, kMinOriginAlignment));
}
}

bool handleMaskedLoad(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *Addr = I.getArgOperand(0);
const MaybeAlign Alignment(
const Align Alignment(
cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
Value *Mask = I.getArgOperand(2);
Value *PassThru = I.getArgOperand(3);
Expand All @@ -2945,7 +2944,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (PropagateShadow) {
std::tie(ShadowPtr, OriginPtr) =
getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, *Alignment, Mask,
setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Alignment, Mask,
getShadow(PassThru), "_msmaskedld"));
} else {
setShadow(&I, getCleanShadow(&I));
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2343,7 +2343,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr,
Value *ShuffledMask = Builder.CreateShuffleVector(
BlockInMaskPart, Undefs, RepMask, "interleaved.mask");
NewStoreInstr = Builder.CreateMaskedStore(
IVec, AddrParts[Part], Group->getAlignment(), ShuffledMask);
IVec, AddrParts[Part], Group->getAlign(), ShuffledMask);
}
else
NewStoreInstr = Builder.CreateAlignedStore(IVec, AddrParts[Part],
Expand Down Expand Up @@ -2449,8 +2449,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr,
}
auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0}));
if (isMaskRequired)
NewSI = Builder.CreateMaskedStore(
StoredVal, VecPtr, Alignment.value(), BlockInMaskParts[Part]);
NewSI = Builder.CreateMaskedStore(StoredVal, VecPtr, Alignment,
BlockInMaskParts[Part]);
else
NewSI =
Builder.CreateAlignedStore(StoredVal, VecPtr, Alignment.value());
Expand Down

0 comments on commit 0957233

Please sign in to comment.