Skip to content

Commit

Permalink
Integrate llvm-project and bump dependencies. (iree-org#12562)
Browse files Browse the repository at this point in the history
* llvm-project: e510d0bda0876c4baa3a270dca39b95da7ec6d9e
* mlir-hlo: e86610442f58b889a57bf814d75c4b50c769c2a3
* tensorflow: 67ba341c869e30ee4a89e040cd875d12b9bc666e

Cherry-picked from LLVM:
```
commit 80074d5fc0ab3f165865b15f5bf55ffac0917bcd (HEAD -> integrate-3-8-2023, fork/integrate-3-8-2023)
Author: Matthias Springer <me@m-sp.org>
Date:   Fri Mar 10 11:25:15 2023 +0100

    [mlir][NFC] reifyResultShapes: Add extra error checking
    
    This change adds a new helper function `mlir::reifyResultShapes` that calls the corresponding interface method and also checks the result produced by the implementation when running in debug mode. Bugs due to incorrect interface implementations can be difficult to debug.
    
    This helper function also reduces the amount of code needed at call sites: the cast to `ReifyRankedShapedTypeOpInterface` is done in the helper function.
    
    Differential Revision: https://reviews.llvm.org/D145777

commit 32b15f601de173e9511f470f7423108d3154e582
Author: Matthias Springer <me@m-sp.org>
Date:   Fri Mar 10 11:24:43 2023 +0100

    [mlir][tensor/linalg] Fix bug in reifyResultShapes
    
    `reifyResultShapes` should return an IntegerAttr if and only if the corresponding dimension is static.
    
    Differential Revision: https://reviews.llvm.org/D145702

commit 894555cd6adf2e0faffe713373a266650b40bb4e
Author: David Green <david.green@arm.com>
Date:   Wed Mar 8 12:48:21 2023 +0000

    [AArch64] Fix load-insert-zero patterns with i8 and negative offsets.
    
    These should have been using the LDURBi instructions where the offset is
    negative, as reported from the reproducer in D144086.
```

Created a new commit on iree-mlir-hlo fork:


https://github.com/iree-org/iree-mhlo-fork/commit/b14e9d9b06255e4476f5698e3bfc531dec793ded
  • Loading branch information
vmurali authored and qedawkins committed Apr 2, 2023
1 parent 55e9fd3 commit bdfd3bb
Show file tree
Hide file tree
Showing 22 changed files with 66 additions and 57 deletions.
10 changes: 4 additions & 6 deletions compiler/src/iree/compiler/Codegen/LLVMGPU/LLVMGPUTensorPad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ static FailureOr<SmallVector<Value>> rewriteAsPaddedOp(
// Slice out the original shape from the padded result to pass on to
// consumers. The original linalg op is used to provide the dims for the reify
// result shapes.
SmallVector<SmallVector<Value>> reifiedResultShapes;
SmallVector<SmallVector<OpFoldResult>> reifiedResultShapes;
if (failed(cast<ReifyRankedShapedTypeOpInterface>(linalgOp.getOperation())
.reifyResultShapes(rewriter, reifiedResultShapes))) {
return failure();
Expand All @@ -98,8 +98,7 @@ static FailureOr<SmallVector<Value>> rewriteAsPaddedOp(
int64_t rank = paddedResult.getType().cast<RankedTensorType>().getRank();
SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
SmallVector<OpFoldResult> sizes;
for (Value v : reifiedResultShapes[resultNumber])
sizes.push_back(getAsOpFoldResult(v));
for (OpFoldResult v : reifiedResultShapes[resultNumber]) sizes.push_back(v);
SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
paddedSubviewResults.push_back(rewriter.create<tensor::ExtractSliceOp>(
loc, paddedResult, offsets, sizes, strides));
Expand Down Expand Up @@ -148,16 +147,15 @@ static FailureOr<Value> rewriteAsPaddedOp(IRRewriter &rewriter,

// Slice out the original shape from the padded result to pass on to
// consumers.
SmallVector<SmallVector<Value>> reifiedResultShapes;
SmallVector<SmallVector<OpFoldResult>> reifiedResultShapes;
if (failed(op.reifyResultShapes(rewriter, reifiedResultShapes))) {
return failure();
}

Value paddedSubviewResults;
int64_t rank = paddedOp.getDestRank();
SmallVector<OpFoldResult> offsets(rank, rewriter.getIndexAttr(0));
SmallVector<OpFoldResult> sizes =
getAsOpFoldResult(ValueRange(reifiedResultShapes[0]));
SmallVector<OpFoldResult> sizes = reifiedResultShapes[0];
SmallVector<OpFoldResult> strides(rank, rewriter.getIndexAttr(1));
paddedSubviewResults = rewriter.create<tensor::ExtractSliceOp>(
loc, paddedOp.getResult(), offsets, sizes, strides);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ static void replaceOpWithRegion(PatternRewriter &rewriter, Operation *op,
Block *block = &region.front();
Operation *terminator = block->getTerminator();
ValueRange results = terminator->getOperands();
rewriter.mergeBlockBefore(block, op, blockArgs);
rewriter.inlineBlockBefore(block, op, blockArgs);
rewriter.replaceOp(op, results);
rewriter.eraseOp(terminator);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "iree/compiler/Dialect/Flow/IR/FlowDialect.h"
#include "iree/compiler/Dialect/Flow/IR/FlowOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Arith/Utils/Utils.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
Expand Down Expand Up @@ -177,18 +178,19 @@ struct ConvertTensorReshapePattern : public OpRewritePattern<TensorReshapeOp> {
if (reshapeOp->template getParentOfType<Flow::DispatchWorkgroupsOp>()) {
return failure();
}
SmallVector<SmallVector<Value>> outputShape;
SmallVector<SmallVector<OpFoldResult>> outputShape;
ReifyRankedShapedTypeOpInterface reifyShapedTypeInterface =
cast<ReifyRankedShapedTypeOpInterface>(reshapeOp.getOperation());
if (failed(reifyShapedTypeInterface.reifyResultShapes(rewriter,
outputShape))) {
return failure();
}
SmallVector<Value> outputDynamicShapes;
for (auto [resultShape, outputShape] : llvm::zip_equal(
for (auto [resultShape, outputShp] : llvm::zip_equal(
reshapeOp.getResultType().getShape(), outputShape[0])) {
if (resultShape != ShapedType::kDynamic) continue;
outputDynamicShapes.push_back(outputShape);
outputDynamicShapes.push_back(getValueOrCreateConstantIndexOp(
rewriter, reshapeOp.getLoc(), outputShp));
}
rewriter.replaceOpWithNewOp<IREE::Flow::TensorReshapeOp>(
reshapeOp, reshapeOp.getResultType(), reshapeOp.getSrc(),
Expand Down
15 changes: 7 additions & 8 deletions compiler/src/iree/compiler/Dialect/Flow/IR/FlowOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -495,15 +495,15 @@ LogicalResult DispatchTieShapeOp::verify() {

LogicalResult DispatchTieShapeOp::reifyResultShapes(
OpBuilder &b, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
SmallVector<Value> shape;
SmallVector<OpFoldResult> shape;
unsigned dynamicIdx = 0;
auto tensorType =
getResult().getType().cast<IREE::Flow::DispatchTensorType>();
for (int64_t dim : tensorType.getShape()) {
if (dim == ShapedType::kDynamic) {
shape.push_back(getDynamicDims()[dynamicIdx++]);
} else {
shape.push_back(b.create<arith::ConstantIndexOp>(getLoc(), dim));
shape.push_back(b.getIndexAttr(dim));
}
}
reifiedReturnShapes.push_back(shape);
Expand Down Expand Up @@ -635,7 +635,7 @@ void DispatchTensorLoadOp::build(OpBuilder &builder, OperationState &state,
LogicalResult DispatchTensorLoadOp::reifyResultShapes(
OpBuilder &b, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
auto mixedSizes = getMixedSizes();
SmallVector<Value> shape;
SmallVector<OpFoldResult> shape;
if (!mixedSizes.empty()) {
// Slicing out a tile; return the size sliced.
shape.reserve(mixedSizes.size());
Expand All @@ -644,8 +644,7 @@ LogicalResult DispatchTensorLoadOp::reifyResultShapes(
if (droppedDims.test(mixedSize.index())) {
continue;
}
shape.push_back(
getValueOrCreateConstantIndexOp(b, getLoc(), mixedSize.value()));
shape.push_back(mixedSize.value());
}
} else {
// Result size matches the source size (no slicing).
Expand All @@ -654,7 +653,7 @@ LogicalResult DispatchTensorLoadOp::reifyResultShapes(
if (dim == ShapedType::kDynamic) {
shape.push_back(getSourceDims()[dynamicIdx++]);
} else {
shape.push_back(b.create<arith::ConstantIndexOp>(getLoc(), dim));
shape.push_back(b.getIndexAttr(dim));
}
}
}
Expand Down Expand Up @@ -1380,14 +1379,14 @@ LogicalResult TensorTieShapeOp::verify() {

LogicalResult TensorTieShapeOp::reifyResultShapes(
OpBuilder &b, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
SmallVector<Value> shape;
SmallVector<OpFoldResult> shape;
unsigned dynamicIdx = 0;
auto tensorType = getResult().getType().cast<RankedTensorType>();
for (int64_t dim : tensorType.getShape()) {
if (dim == ShapedType::kDynamic) {
shape.push_back(getDynamicDims()[dynamicIdx++]);
} else {
shape.push_back(b.create<arith::ConstantIndexOp>(getLoc(), dim));
shape.push_back(b.getIndexAttr(dim));
}
}
reifiedReturnShapes.push_back(shape);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ static SmallVector<Range> getLoopRangesImpl(tensor::ExtractSliceOp sliceOp,
LogicalResult status = sliceOp.reifyResultShapes(builder, resultDims);
(void)status;
assert(succeeded(status) && "reifyResultShapes failed");
return llvm::to_vector(llvm::map_range(resultDims[0], [&](Value v) {
return llvm::to_vector(llvm::map_range(resultDims[0], [&](OpFoldResult v) {
return Range{zero, v, one};
}));
}
Expand Down Expand Up @@ -158,7 +158,7 @@ LogicalResult Flow::reifyDynamicResultDims(OpBuilder &b, Value value,
if (failed(reifyShapeOp.reifyResultShapes(b, dims))) return failure();
for (int64_t i = 0; i < shapedType.getRank(); ++i)
if (shapedType.isDynamicDim(i))
dynamicDims.push_back(dims[opResult.getResultNumber()][i]);
dynamicDims.push_back(dims[opResult.getResultNumber()][i].get<Value>());
return success();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -998,8 +998,8 @@ struct CmdExecuteOpPattern
// Begin/end recording and inline the execution region between them.
auto endOp =
rewriter.create<IREE::HAL::CommandBufferFinalizeOp>(loc, commandBuffer);
rewriter.mergeBlockBefore(&executeOp.getBody().front(), endOp,
adaptor.getResourceOperands());
rewriter.inlineBlockBefore(&executeOp.getBody().front(), endOp,
adaptor.getResourceOperands());

// Gather wait/signal fence, which are optional.
Value waitFence =
Expand Down Expand Up @@ -1032,7 +1032,7 @@ struct CmdSerialOpPattern
OpBuilder::atBlockBegin(&bodyBlock));

// Inline the serial execution region.
rewriter.mergeBlockBefore(&serialOp.getBody().front(), serialOp);
rewriter.inlineBlockBefore(&serialOp.getBody().front(), serialOp);
rewriter.eraseOp(serialOp);
return success();
}
Expand All @@ -1046,7 +1046,7 @@ struct CmdConcurrentOpPattern
ConversionPatternRewriter &rewriter) const override {
// Inline the concurrent execution region.
// TODO(benvanik): split barriers (event set/wait) when nesting.
rewriter.mergeBlockBefore(&concurrentOp.getBody().front(), concurrentOp);
rewriter.inlineBlockBefore(&concurrentOp.getBody().front(), concurrentOp);
rewriter.eraseOp(concurrentOp);
return success();
}
Expand Down
3 changes: 2 additions & 1 deletion compiler/src/iree/compiler/Dialect/VM/IR/VMOpFolders.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2829,8 +2829,9 @@ struct SimplifyBrToBlockWithSinglePred : public OpRewritePattern<BranchOp> {
}

// Merge the successor into the current block and erase the branch.
rewriter.mergeBlocks(succ, opParent, op.getOperands());
SmallVector<Value> operands(op.getOperands());
rewriter.eraseOp(op);
rewriter.mergeBlocks(succ, opParent, operands);
return success();
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ class GlobalInitializationPass
std::pair<LogicalResult, Value> createConst(Location loc, Attribute value,
OpBuilder &builder) {
if (auto integerAttr = value.dyn_cast<IntegerAttr>()) {
if (integerAttr.getValue().isNullValue()) {
if (integerAttr.getValue().isZero()) {
// Globals are zero-initialized by default.
return {success(), {}};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -456,8 +456,8 @@ struct CmdExecuteOpPattern
IREE::Stream::CmdExecuteOp executeOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Inline the serial execution region.
rewriter.mergeBlockBefore(&executeOp.getBody().front(), executeOp,
adaptor.getResourceOperands());
rewriter.inlineBlockBefore(&executeOp.getBody().front(), executeOp,
adaptor.getResourceOperands());
// Immediately resolve the timepoint.
auto resolvedTimepoint =
rewriter.create<arith::ConstantIntOp>(executeOp.getLoc(), 0, 64)
Expand All @@ -474,7 +474,7 @@ struct CmdSerialOpPattern
IREE::Stream::CmdSerialOp serialOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Inline the serial execution region.
rewriter.mergeBlockBefore(&serialOp.getBody().front(), serialOp);
rewriter.inlineBlockBefore(&serialOp.getBody().front(), serialOp);
rewriter.eraseOp(serialOp);
return success();
}
Expand All @@ -487,7 +487,7 @@ struct CmdConcurrentOpPattern
IREE::Stream::CmdConcurrentOp concurrentOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Inline the concurrent execution region.
rewriter.mergeBlockBefore(&concurrentOp.getBody().front(), concurrentOp);
rewriter.inlineBlockBefore(&concurrentOp.getBody().front(), concurrentOp);
rewriter.eraseOp(concurrentOp);
return success();
}
Expand Down
2 changes: 1 addition & 1 deletion integrations/tensorflow/WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")

TENSORFLOW_COMMIT = "eece4dba1fc65f7977085581852b0d6e6d42f04e"
TENSORFLOW_COMMIT = "67ba341c869e30ee4a89e040cd875d12b9bc666e"

git_repository(
name = "org_tensorflow",
Expand Down
2 changes: 1 addition & 1 deletion integrations/tensorflow/iree_tf_compiler/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@ cc_binary(
"@llvm-project//mlir:Transforms",
"@org_tensorflow//tensorflow/compiler/mlir/tensorflow",
"@org_tensorflow//tensorflow/compiler/mlir/tensorflow:tensorflow_passes",
"@org_tensorflow//tensorflow/compiler/mlir/tf2xla:xla_legalize_tf",
"@org_tensorflow//tensorflow/compiler/mlir/tosa:tf_passes",
"@org_tensorflow//tensorflow/compiler/mlir/tosa:tf_tfl_passes",
"@org_tensorflow//tensorflow/compiler/mlir/tosa:tfl_passes",
"@org_tensorflow//tensorflow/compiler/mlir/xla:xla_legalize_tf",
"@org_tensorflow//tensorflow/compiler/xla/mlir_hlo",
"@stablehlo//:chlo_ops",
],
Expand Down
2 changes: 1 addition & 1 deletion integrations/tensorflow/iree_tf_compiler/TF/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,8 @@ cc_library(
"@org_tensorflow//tensorflow/compiler/mlir/tensorflow:tensorflow_passes",
"@org_tensorflow//tensorflow/compiler/mlir/tensorflow:tensorflow_types",
"@org_tensorflow//tensorflow/compiler/mlir/tensorflow:tf_saved_model_passes",
"@org_tensorflow//tensorflow/compiler/mlir/tf2xla:xla_legalize_tf",
"@org_tensorflow//tensorflow/compiler/mlir/tosa:tf_passes",
"@org_tensorflow//tensorflow/compiler/mlir/xla:xla_legalize_tf",
"@org_tensorflow//tensorflow/compiler/xla/mlir_hlo",
"@org_tensorflow//tensorflow/compiler/xla/mlir_hlo:all_passes",
"@org_tensorflow//tensorflow/compiler/xla/mlir_hlo:chlo_legalize_to_hlo",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.h"
#include "tensorflow/compiler/mlir/xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"

namespace mlir {
namespace iree_integrations {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,8 @@ void LowerGlobalTensors::runOnOperation() {
return;
}
auto global = globalBuilder.create<mlir::ml_program::GlobalOp>(
globalTensor.getLoc(), name, globalTensor.getValue().getType(),
globalTensor.getIsMutable(), globalTensor.getValue(), nullptr);
globalTensor.getLoc(), name, globalTensor.getValue()->getType(),
globalTensor.getIsMutable(), *globalTensor.getValue(), nullptr);
global.setPrivate();
symbolRefMap[globalTensor] = global;
}
Expand Down
18 changes: 12 additions & 6 deletions integrations/tensorflow/test/iree_tfl_tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,19 @@ update_tflite_model_documentation.py to update this table.

| Model | Status |
| ------------------ | ------------------ |
person_detect | PASS ✓
east_text_detector | PASS ✓
vulkan_posenet_i8 | FAIL ✗
mobilenet_v3 | PASS ✓
llvmcpu_resnet_50_int8 | PASS ✓
vulkan_mobilebert_tf2_quant | FAIL ✗
cartoon_gan | PASS ✓
llvmcpu_mobilebert_tf2_quant | PASS ✓
mnasnet | PASS ✓
person_detect | PASS ✓
vulkan_posenet_i8 | FAIL ✗
east_text_detector | PASS ✓
gpt2 | PASS ✓
llvmcpu_posenet_i8 | PASS ✓
mobilenet_v3 | PASS ✓
llvmcpu_mobilenet_v1 | PASS ✓
vulkan_mobilenet_v1 | FAIL ✗
llvmcpu_mobilenet_v3-large_uint8 | PASS ✓
vulkan_mobilenet_v1 | PASS ✓
vulkan_mobilenet_v3-large_uint8 | FAIL ✗
llvmcpu_posenet_i8 | FAIL ✗
vulkan_resnet_50_int8 | FAIL ✗
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
# REQUIRES: llvmcpu
# RUN: %PYTHON -m iree_tfl_tests.posenet_i8_test --target_backend=llvmcpu --artifacts_dir=%t
# XFAIL: *
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#!/bin/python3
# Copyright 2022 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ IREE::LinalgExt::detail::verifyLinalgExtOpInterface(Operation *op) {

template <typename Ty, typename DimOpTy>
static void getDimValues(OpBuilder &b, Location loc, Value v, Ty t,
SmallVector<Value> &dimVals) {
SmallVector<OpFoldResult> &dimVals) {
for (auto dim : llvm::enumerate(t.getShape())) {
if (ShapedType::isDynamic(dim.value())) {
dimVals.push_back(b.create<DimOpTy>(loc, v, dim.index()));
dimVals.push_back(b.create<DimOpTy>(loc, v, dim.index()).getResult());
} else {
dimVals.push_back(b.create<arith::ConstantIndexOp>(loc, dim.value()));
dimVals.push_back(b.getIndexAttr(dim.value()));
}
}
}
Expand All @@ -51,7 +51,7 @@ LogicalResult LinalgExtOp::reifyResultShapes(
OpBuilder &b, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
Operation *op = getOperation();
for (auto output : getOutputs()) {
SmallVector<Value> dims;
SmallVector<OpFoldResult> dims;
Type outputType = output.getType();
if (auto rankedTensorType = outputType.dyn_cast<RankedTensorType>()) {
getDimValues<RankedTensorType, tensor::DimOp>(b, op->getLoc(), output,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1937,14 +1937,17 @@ LogicalResult PackOp::generateScalarImplementation(OpBuilder &builder,
// over the tile dimensions.
for (auto dataTileDim :
llvm::seq<unsigned>(getInputRank(), getOutputRank() - 1)) {
Value ub = outputShape[0][dataTileDim];
Value ub = getValueOrCreateConstantIndexOp(builder, loc,
outputShape[0][dataTileDim]);
scf::ForOp loop = builder.create<scf::ForOp>(loc, zero, ub, one);
builder.setInsertionPointToStart(loop.getBody());
ivVec.push_back(loop.getInductionVar());
}
// The body of the innermost loops does the actual data movement.
builder.create<scf::ForOp>(loc, zero, outputShape[0].back(), one,
ValueRange{},
builder.create<scf::ForOp>(loc, zero,
getValueOrCreateConstantIndexOp(
builder, loc, outputShape[0].back()),
one, ValueRange{},
[&](OpBuilder &bodyBuilder, Location bodyLoc,
Value iv, ValueRange regionIterArgs) {
ivVec.push_back(iv);
Expand Down Expand Up @@ -2681,8 +2684,7 @@ LogicalResult SetEncodingOp::reifyResultShapes(
OpBuilder::InsertionGuard g(builder);
builder.setInsertionPoint(getOperation());
reifiedReturnShapes.resize(1);
reifiedReturnShapes[0] = getValueOrCreateConstantIndexOp(
builder, getLoc(), getDims(builder, getLoc(), getSource()));
reifiedReturnShapes[0] = getDims(builder, getLoc(), getSource());
return success();
}

Expand Down Expand Up @@ -2720,8 +2722,7 @@ LogicalResult UnsetEncodingOp::reifyResultShapes(
OpBuilder::InsertionGuard g(builder);
builder.setInsertionPoint(getOperation());
reifiedReturnShapes.resize(1);
reifiedReturnShapes[0] = getValueOrCreateConstantIndexOp(
builder, getLoc(), getDims(builder, getLoc(), getSource()));
reifiedReturnShapes[0] = getDims(builder, getLoc(), getSource());
return success();
}

Expand Down
Loading

0 comments on commit bdfd3bb

Please sign in to comment.