Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[NFC] Update from deprecated llvm::Optional #12734

Merged
merged 1 commit into from
Mar 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions compiler/src/iree/compiler/API/Internal/Diagnostics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace mlir::iree_compiler::embed {

namespace {
/// Return a processable CallSiteLoc from the given location.
Optional<CallSiteLoc> getCallSiteLoc(Location loc) {
std::optional<CallSiteLoc> getCallSiteLoc(Location loc) {
if (auto callLoc = dyn_cast<CallSiteLoc>(loc)) return callLoc;
if (auto nameLoc = dyn_cast<NameLoc>(loc))
return getCallSiteLoc(cast<NameLoc>(loc).getChildLoc());
Expand All @@ -36,31 +36,31 @@ Optional<CallSiteLoc> getCallSiteLoc(Location loc) {
return std::nullopt;
}

Optional<Location> findLocToShow(Location loc) {
std::optional<Location> findLocToShow(Location loc) {
// Recurse into the child locations of some of location types.
return TypeSwitch<LocationAttr, Optional<Location>>(loc)
.Case([&](CallSiteLoc callLoc) -> Optional<Location> {
return TypeSwitch<LocationAttr, std::optional<Location>>(loc)
.Case([&](CallSiteLoc callLoc) -> std::optional<Location> {
// We recurse into the callee of a call site, as the caller will be
// emitted in a different note on the main diagnostic.
return findLocToShow(callLoc.getCallee());
})
.Case([&](FileLineColLoc) -> Optional<Location> { return loc; })
.Case([&](FusedLoc fusedLoc) -> Optional<Location> {
.Case([&](FileLineColLoc) -> std::optional<Location> { return loc; })
.Case([&](FusedLoc fusedLoc) -> std::optional<Location> {
// Fused location is unique in that we try to find a sub-location to
// show, rather than the top-level location itself.
for (Location childLoc : fusedLoc.getLocations())
if (Optional<Location> showableLoc = findLocToShow(childLoc))
if (std::optional<Location> showableLoc = findLocToShow(childLoc))
return showableLoc;
return std::nullopt;
})
.Case([&](NameLoc nameLoc) -> Optional<Location> {
.Case([&](NameLoc nameLoc) -> std::optional<Location> {
return findLocToShow(nameLoc.getChildLoc());
})
.Case([&](OpaqueLoc opaqueLoc) -> Optional<Location> {
.Case([&](OpaqueLoc opaqueLoc) -> std::optional<Location> {
// OpaqueLoc always falls back to a different source location.
return findLocToShow(opaqueLoc.getFallbackLocation());
})
.Case([](UnknownLoc) -> Optional<Location> {
.Case([](UnknownLoc) -> std::optional<Location> {
// Prefer not to show unknown locations.
return std::nullopt;
});
Expand Down Expand Up @@ -104,7 +104,7 @@ LogicalResult FormattingDiagnosticHandler::emit(Diagnostic &diag) {
// Assemble location fragments.
SmallVector<std::pair<Location, StringRef>> locationStack;
auto addLocToStack = [&](Location loc, StringRef locContext) {
if (Optional<Location> showableLoc = findLocToShow(loc))
if (std::optional<Location> showableLoc = findLocToShow(loc))
locationStack.emplace_back(*showableLoc, locContext);
};

Expand Down
7 changes: 4 additions & 3 deletions compiler/src/iree/compiler/Codegen/Common/EncodingInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,13 @@ struct MatmulTileParams {
};

/// Extracts encoding from the `tensorType` if specified.
Optional<IREE::LinalgExt::TensorEncoding> getEncoding(
std::optional<IREE::LinalgExt::TensorEncoding> getEncoding(
RankedTensorType tensorType);

Optional<MatmulType> getMatmulType(IREE::LinalgExt::TensorEncoding encoding);
std::optional<MatmulType> getMatmulType(
IREE::LinalgExt::TensorEncoding encoding);

Optional<MatmulOperandRole> getMatmulOperandRole(
std::optional<MatmulOperandRole> getMatmulOperandRole(
IREE::LinalgExt::TensorEncoding encoding);

void adjustTileSizesToNarrowStaticShape(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ struct MemRefTypeConverter final : public TypeConverter {
// Pass through for all other types.
addConversion([](Type type) { return type; });

addConversion([](BaseMemRefType memRefType) -> Optional<Type> {
addConversion([](BaseMemRefType memRefType) -> std::optional<Type> {
// Expect #hal.descriptor_type memory spaces.
Attribute spaceAttr = memRefType.getMemorySpace();
if (!spaceAttr) return std::nullopt;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,10 @@ static bool isRankZeroOrOneMemRef(Type type) {
struct FlattenMemRefTypeConverter final : public TypeConverter {
FlattenMemRefTypeConverter() {
// Allow all other types.
addConversion([](Type type) -> Optional<Type> { return type; });
addConversion([](Type type) -> std::optional<Type> { return type; });

// Convert n-D MemRef to 1-D MemRef.
addConversion([](MemRefType type) -> Optional<Type> {
addConversion([](MemRefType type) -> std::optional<Type> {
int64_t offset;
SmallVector<int64_t> strides;
if (failed(getStridesAndOffset(type, strides, offset))) {
Expand Down Expand Up @@ -663,7 +663,7 @@ struct FoldMemRefReshape final : public OpConversionPattern<ReshapeOpTy> {
///
/// Note that this should be kept consistent with how the byte offset was
/// calculated in the subspan ops!
Optional<int64_t> getNumBytes(Type type) {
std::optional<int64_t> getNumBytes(Type type) {
if (type.isIntOrFloat()) return IREE::Util::getRoundedElementByteWidth(type);
if (auto vectorType = type.dyn_cast<VectorType>()) {
auto elementBytes = getNumBytes(vectorType.getElementType());
Expand Down Expand Up @@ -736,28 +736,30 @@ struct FlattenMemRefSubspanPass
// uniform buffers and dynamic for storage buffers. This matches how IREE
// models runtime buffers nicely.
FlattenMemRefTypeConverter interfaceTypeConverter;
interfaceTypeConverter.addConversion([](MemRefType type) -> Optional<Type> {
// 0-D MemRef types can be used to represent raw pointers for micro-kernel
// ABI purposes. Specially allow it.
if (isRankZeroMemRef(type)) return type;

// Fall back to the default conversion flow.
return std::nullopt;
});
interfaceTypeConverter.addConversion(
[](MemRefType type) -> std::optional<Type> {
// 0-D MemRef types can be used to represent raw pointers for
// micro-kernel ABI purposes. Specially allow it.
if (isRankZeroMemRef(type)) return type;

// Fall back to the default conversion flow.
return std::nullopt;
});
flattenPatterns.add<FlattenBindingSubspan>(interfaceTypeConverter, context);

// Other ops generate MemRef values representing internal allocations (e.g.,
// on stack for GPU, in shared memory for GPU) or data embedded in the
// kernel. We may not be able to go fully dynamic (e.g., memref::GlobalOp).
// Still convert everything to 1-D though.
FlattenMemRefTypeConverter internalTypeConverter;
internalTypeConverter.addConversion([](MemRefType type) -> Optional<Type> {
// 0-D or 1-D MemRef types are okay.
if (isRankZeroOrOneMemRef(type)) return type;
internalTypeConverter.addConversion(
[](MemRefType type) -> std::optional<Type> {
// 0-D or 1-D MemRef types are okay.
if (isRankZeroOrOneMemRef(type)) return type;

// Fall back to the default conversion flow.
return std::nullopt;
});
// Fall back to the default conversion flow.
return std::nullopt;
});
flattenPatterns
.add<FlattenAlloc<memref::AllocaOp>, FlattenAlloc<memref::AllocOp>,
FlattenGlobal, FlattenGetGlobal, LinearizeLoadIndices,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ struct FoldAffineMinOverDistributedLoopInductionVariable final
if (!loopInfo) return failure();
LLVM_DEBUG(llvm::dbgs() << *loopInfo);

Optional<int64_t> untiledStep =
std::optional<int64_t> untiledStep =
getConstantIntValue(loopInfo->untiledStep);
// For IREE right now the original untiled loop should have step 1..
if (!untiledStep || *untiledStep != 1) return failure();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ static void populateTilingCopyToWorkgroupMemPatterns(

/// Compute a tile size so that the numer of iteraton is equal to the flat
/// workgroup size.
static Optional<SmallVector<int64_t>> getTileToDistributableSize(
static std::optional<SmallVector<int64_t>> getTileToDistributableSize(
linalg::GenericOp copyOp, int64_t flatWorkgroupSize) {
SmallVector<int64_t, 4> shape = copyOp.getStaticLoopRanges();
unsigned bitWidth = copyOp.getDpsInitOperand(0)
Expand Down Expand Up @@ -148,7 +148,7 @@ static void populateTileToUnroll(RewritePatternSet &patterns,
SmallVector<Value, 4> tileSizesVal;
auto copyOp = dyn_cast<linalg::GenericOp>(operation);
if (!copyOp) return tileSizesVal;
Optional<SmallVector<int64_t>> staticSize =
std::optional<SmallVector<int64_t>> staticSize =
getTileToDistributableSize(copyOp, flatWorkgroupSize);
for (int64_t dim : *staticSize) {
tileSizesVal.push_back(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,11 @@ class IREEComprehensiveBufferizePass
: public IREEComprehensiveBufferizeBase<IREEComprehensiveBufferizePass> {
public:
explicit IREEComprehensiveBufferizePass(
Optional<BufferizationOptions::AllocationFn> allocationFn = std::nullopt,
Optional<BufferizationOptions::DeallocationFn> deallocationFn =
std::optional<BufferizationOptions::AllocationFn> allocationFn =
std::nullopt,
Optional<BufferizationOptions::MemCpyFn> memCpyFn = std::nullopt)
std::optional<BufferizationOptions::DeallocationFn> deallocationFn =
std::nullopt,
std::optional<BufferizationOptions::MemCpyFn> memCpyFn = std::nullopt)
: allocationFn(allocationFn),
deallocationFn(deallocationFn),
memCpyFn(memCpyFn) {}
Expand All @@ -89,9 +90,9 @@ class IREEComprehensiveBufferizePass
void runOnOperation() override;

private:
const Optional<BufferizationOptions::AllocationFn> allocationFn;
const Optional<BufferizationOptions::DeallocationFn> deallocationFn;
const Optional<BufferizationOptions::MemCpyFn> memCpyFn;
const std::optional<BufferizationOptions::AllocationFn> allocationFn;
const std::optional<BufferizationOptions::DeallocationFn> deallocationFn;
const std::optional<BufferizationOptions::MemCpyFn> memCpyFn;
};
} // namespace

Expand Down Expand Up @@ -218,9 +219,9 @@ std::unique_ptr<OperationPass<ModuleOp>> createEliminateEmptyTensorsPass() {
}

std::unique_ptr<OperationPass<ModuleOp>> createIREEComprehensiveBufferizePass(
Optional<BufferizationOptions::AllocationFn> allocationFn,
Optional<BufferizationOptions::DeallocationFn> deallocationFn,
Optional<BufferizationOptions::MemCpyFn> memCpyFn) {
std::optional<BufferizationOptions::AllocationFn> allocationFn,
std::optional<BufferizationOptions::DeallocationFn> deallocationFn,
std::optional<BufferizationOptions::MemCpyFn> memCpyFn) {
if (!allocationFn) allocationFn = defaultAllocationFn;
if (!deallocationFn) deallocationFn = defaultDeallocationFn;
if (!memCpyFn) memCpyFn = defaultMemCpyFn;
Expand All @@ -241,9 +242,9 @@ void addIREEPostBufferizationPasses(OpPassManager &passManager) {

void addIREEComprehensiveBufferizePasses(
OpPassManager &passManager,
Optional<BufferizationOptions::AllocationFn> allocationFn,
Optional<BufferizationOptions::DeallocationFn> deallocationFn,
Optional<BufferizationOptions::MemCpyFn> memCpyFn) {
std::optional<BufferizationOptions::AllocationFn> allocationFn,
std::optional<BufferizationOptions::DeallocationFn> deallocationFn,
std::optional<BufferizationOptions::MemCpyFn> memCpyFn) {
passManager.addPass(createEliminateEmptyTensorsPass());
passManager.addPass(bufferization::createEmptyTensorToAllocTensorPass());
passManager.addPass(createIREEComprehensiveBufferizePass(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -271,13 +271,13 @@ IREE::LinalgExt::MaterializeEncodingInfo chooseEncodingInfoForMatmul(
return encodingInfo;
}

Optional<TensorEncoding> getEncoding(RankedTensorType tensorType) {
std::optional<TensorEncoding> getEncoding(RankedTensorType tensorType) {
auto encodingAttr = tensorType.getEncoding().dyn_cast_or_null<EncodingAttr>();
if (!encodingAttr) return std::nullopt;
return encodingAttr.getEncoding().getValue();
}

Optional<MatmulType> getMatmulType(TensorEncoding encoding) {
std::optional<MatmulType> getMatmulType(TensorEncoding encoding) {
switch (encoding) {
case TensorEncoding::MATMUL_F32F32F32_LHS:
case TensorEncoding::MATMUL_F32F32F32_RHS:
Expand All @@ -292,7 +292,7 @@ Optional<MatmulType> getMatmulType(TensorEncoding encoding) {
}
}

Optional<MatmulOperandRole> getMatmulOperandRole(TensorEncoding encoding) {
std::optional<MatmulOperandRole> getMatmulOperandRole(TensorEncoding encoding) {
switch (encoding) {
case TensorEncoding::MATMUL_F32F32F32_LHS:
case TensorEncoding::MATMUL_I8I8I32_LHS:
Expand Down Expand Up @@ -334,7 +334,7 @@ void adjustTileSizesToNarrowStaticShape(MaterializeEncodingInfo &encodingInfo,
FailureOr<MaterializeEncodingValueInfo>
chooseDynamicEncodingInfoVMVXMicrokernels(RankedTensorType tensorType,
OpBuilder &builder, Location loc) {
Optional<TensorEncoding> encoding = getEncoding(tensorType);
std::optional<TensorEncoding> encoding = getEncoding(tensorType);
if (!encoding) return failure();
auto matmulType = getMatmulType(*encoding);
auto matmulOperandRole = getMatmulOperandRole(*encoding);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ static unsigned dimToIndex(gpu::Dimension dim) {
/// If the value is a threadID return the range [0, workgroupSize-1].
/// If the number of workgroup is known also return the range of workgroupId ad
/// workgroupCount.
static Optional<std::pair<AffineExpr, AffineExpr>> getWorkgroupRange(
static std::optional<std::pair<AffineExpr, AffineExpr>> getWorkgroupRange(
Value processorValue, SmallVectorImpl<Value> & /*dims*/,
SmallVectorImpl<Value> & /*symbols*/, ArrayRef<int64_t> workgroupCount,
ArrayRef<int64_t> workgroupSize) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ static void addSwappingPatterns(RewritePatternSet &patterns,
bool swapPaddingElideCornerCase) {
patterns.add<linalg::ExtractSliceOfPadTensorSwapPattern>(
patterns.getContext(),
[&](tensor::ExtractSliceOp) -> llvm::Optional<bool> {
[&](tensor::ExtractSliceOp) -> std::optional<bool> {
return !swapPaddingElideCornerCase;
});
}
Expand All @@ -290,13 +290,13 @@ static void addTilingCanonicalizationPatterns(RewritePatternSet &patterns) {
scf::populateSCFForLoopCanonicalizationPatterns(patterns);
}

static Optional<SmallVector<int64_t>> getGPUTensorCoreNativeMmaSyncVectorSize(
Operation *op) {
static std::optional<SmallVector<int64_t>>
getGPUTensorCoreNativeMmaSyncVectorSize(Operation *op) {
return getMmaNativeVectorSize(op);
}

static void addUnrollVectorsGpuMmaSyncPatterns(RewritePatternSet &patterns) {
auto unrollOrder = [](Operation *op) -> Optional<SmallVector<int64_t>> {
auto unrollOrder = [](Operation *op) -> std::optional<SmallVector<int64_t>> {
auto contract = dyn_cast<vector::ContractionOp>(op);
if (!contract) return std::nullopt;
return mlir::iree_compiler::gpuMmaUnrollOrder(contract);
Expand All @@ -307,13 +307,13 @@ static void addUnrollVectorsGpuMmaSyncPatterns(RewritePatternSet &patterns) {
.setUnrollTraversalOrderFn(unrollOrder));
}

static Optional<SmallVector<int64_t>> getGPUTensorCoreNativeWmmaVectorSize(
static std::optional<SmallVector<int64_t>> getGPUTensorCoreNativeWmmaVectorSize(
Operation *op) {
return getWmmaNativeVectorSize(op);
}

static void addUnrollVectorsGpuWmmaPatterns(RewritePatternSet &patterns) {
auto unrollOrder = [](Operation *op) -> Optional<SmallVector<int64_t>> {
auto unrollOrder = [](Operation *op) -> std::optional<SmallVector<int64_t>> {
auto contract = dyn_cast<vector::ContractionOp>(op);
if (!contract) return std::nullopt;
return mlir::iree_compiler::gpuMmaUnrollOrder(contract);
Expand Down Expand Up @@ -851,7 +851,7 @@ void transform_dialect::TileToForallAndWorkgroupCountRegionOp::build(
static LogicalResult lowerWorkgroupCountComputingRegion(
transform::TransformState &state, RewriterBase &rewriter, Location loc,
HAL::ExecutableExportOp exportOp, ArrayRef<OpFoldResult> numThreads,
ArrayRef<OpFoldResult> tileSizes, Optional<ArrayAttr> mapping) {
ArrayRef<OpFoldResult> tileSizes, std::optional<ArrayAttr> mapping) {
Region &r = exportOp.getWorkgroupCount();
if (!r.hasOneBlock()) {
return rewriter.notifyMatchFailure(exportOp,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ namespace iree_compiler {

/// Returns the legal element type to use instead of the passed in element type.
/// If the type is already legal, returns std::nullopt.
static Optional<Type> getLegalizedElementType(Type elementType) {
static std::optional<Type> getLegalizedElementType(Type elementType) {
if (auto intType = elementType.dyn_cast<IntegerType>()) {
unsigned bitWidth = intType.getWidth();
unsigned byteAlignedBitWidth =
Expand Down Expand Up @@ -69,10 +69,11 @@ static Value convertElementType(OpBuilder &b, Location loc, Type targetType,

/// Legalizes the given type. If the type is already legal, returns
/// std::nullopt.
static Optional<Type> getLegalizedType(Type t) {
static std::optional<Type> getLegalizedType(Type t) {
if (auto shapedType = t.dyn_cast<RankedTensorType>()) {
Type elementType = shapedType.getElementType();
Optional<Type> legalizedElementType = getLegalizedElementType(elementType);
std::optional<Type> legalizedElementType =
getLegalizedElementType(elementType);
if (!legalizedElementType) return std::nullopt;
return RankedTensorType::get(shapedType.getShape(),
legalizedElementType.value(),
Expand Down Expand Up @@ -116,7 +117,7 @@ struct ConstantOpTypeConversion
return rewriter.notifyMatchFailure(
constantOp, "expected attribute type to be shaped type");
}
Optional<Type> legalizedElementType =
std::optional<Type> legalizedElementType =
getLegalizedElementType(attrType.getElementType());
if (!legalizedElementType) {
return rewriter.notifyMatchFailure(constantOp,
Expand Down Expand Up @@ -223,7 +224,7 @@ struct GenericOpTypePropagation
signatureConverter.addInputs(index, argType);
continue;
}
Optional<Type> legalizedArgType = getLegalizedElementType(argType);
std::optional<Type> legalizedArgType = getLegalizedElementType(argType);
if (!legalizedArgType) {
return genericOp.emitOpError("failed to get legalized type for arg ")
<< index;
Expand Down Expand Up @@ -268,7 +269,7 @@ struct GenericOpTypePropagation
modifyYield = true;
OpOperand *yieldOperand =
modifiedOp.getMatchingYieldValue(modifiedOpOperand);
Optional<Type> legalizedType =
std::optional<Type> legalizedType =
getLegalizedElementType(yieldOperand->get().getType());
if (!legalizedType) {
return genericOp.emitOpError(
Expand Down Expand Up @@ -298,7 +299,7 @@ struct LinalgFillTypePropagation
linalg::FillOp fillOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
Value value = adaptor.getInputs().front();
Optional<Type> legalizedElementType =
std::optional<Type> legalizedElementType =
getLegalizedElementType(value.getType());
if (!legalizedElementType) {
return fillOp.emitOpError("failed to get legalized type for value");
Expand Down
2 changes: 1 addition & 1 deletion compiler/src/iree/compiler/Codegen/Common/UserConfig.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ LogicalResult setUserConfig(
if (failed(setTranslationInfo(entryPointFn, info))) return failure();

SmallVector<int64_t> workgroupSize = compilationInfo.getWorkgroupSizeVals();
llvm::Optional<int64_t> subgroupSize = compilationInfo.getSubgroupSize();
std::optional<int64_t> subgroupSize = compilationInfo.getSubgroupSize();
if (failed(setDispatchConfig(entryPointFn, workgroupSize, subgroupSize))) {
return failure();
}
Expand Down
Loading