Skip to content

Commit

Permalink
Replace '> >' in templates with >>, NFC (#12615)
Browse files Browse the repository at this point in the history
The problem with greedy lexing of >> as an operator was solved in
C++11, and now templates no longer require spaces between >'s.
  • Loading branch information
Krzysztof Parzyszek authored Aug 26, 2022
1 parent 2e83e03 commit 23e7944
Show file tree
Hide file tree
Showing 46 changed files with 128 additions and 130 deletions.
10 changes: 5 additions & 5 deletions docs/arch/convert_layout.rst
Original file line number Diff line number Diff line change
Expand Up @@ -150,10 +150,10 @@ First example is for layout agnostic operators. These operators do not have any
// .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);

// Take arbitrary input layouts and copy to outputs.
inline Array<Array<Layout> > ElemwiseArbitraryLayout(const Attrs& attrs,
const Array<Layout>& new_in_layouts,
const Array<Layout>& old_in_layouts,
const Array<Array<IndexExpr>> &old_in_shapes) {
inline Array<Array<Layout>> ElemwiseArbitraryLayout(const Attrs& attrs,
const Array<Layout>& new_in_layouts,
const Array<Layout>& old_in_layouts,
const Array<Array<IndexExpr>> &old_in_shapes) {
Layout ret;

if (new_in_layouts.defined()) {
Expand All @@ -168,7 +168,7 @@ First example is for layout agnostic operators. These operators do not have any
}
}

return Array<Array<Layout> >{Array<Layout>(old_in_layouts.size(), ret), {ret}};
return Array<Array<Layout>>{Array<Layout>(old_in_layouts.size(), ret), {ret}};
}


Expand Down
4 changes: 2 additions & 2 deletions docs/arch/inferbound.rst
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ Phase 3: Propagate IntSets to consumer's input tensors
/*
* Input: Map<IterVar, IntSet> dom_map: consumer root -> IntSet
* Output: Map<Tensor, TensorDom> tmap: output tensor -> vector<vector<IntSet> >
* Output: Map<Tensor, TensorDom> tmap: output tensor -> vector<vector<IntSet>>
*/
Note that the consumer's input tensors are output tensors of the stage InferBound is working on. So by establishing information about the consumer's input tensors, we actually obtain information about the stage's output tensors too: the consumers require certain regions of these tensors to be computed. This information can then be propagated through the rest of the stage, eventually obtaining Ranges for the stage's root_iter_vars by the end of Phase 4.
Expand All @@ -306,7 +306,7 @@ Phase 4: Consolidate across all consumers
.. code:: cpp
/*
* Input: Map<Tensor, TensorDom> tmap: output tensor -> vector<vector<IntSet> >
* Input: Map<Tensor, TensorDom> tmap: output tensor -> vector<vector<IntSet>>
* Output: Map<IterVar, Range> rmap: rmap is populated for all of the stage's root_iter_vars
*/
Expand Down
2 changes: 1 addition & 1 deletion docs/dev/how_to/relay_bring_your_own_codegen.rst
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,7 @@ Again, we first define a customized runtime class as follows. The class has to b
/* \brief The subgraph that being processed. */
std::string curr_subgraph_;
/*! \brief A simple graph from subgraph id to node entries. */
std::map<std::string, std::vector<NodeEntry> > graph_;
std::map<std::string, std::vector<NodeEntry>> graph_;
/* \brief A simple pool to contain the tensor for each node in the graph. */
std::vector<NDArray> data_entry_;
/* \brief A mapping from node id to op name. */
Expand Down
8 changes: 4 additions & 4 deletions include/tvm/auto_scheduler/feature.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ void GetPerStoreFeatureName(int max_n_bufs, std::vector<std::string>* ret);
*/
void GetPerStoreFeaturesFromStates(const Array<State>& states, const SearchTask& task,
int skip_first_n_feature_extraction, int max_n_bufs,
std::vector<std::vector<float> >* features);
std::vector<std::vector<float>>* features);

/*!
* \brief Get per-store feature from states of different tasks
Expand All @@ -83,7 +83,7 @@ void GetPerStoreFeaturesFromStates(const Array<State>& states, const SearchTask&
*/
void GetPerStoreFeaturesFromStates(const Array<State>& states, const std::vector<SearchTask>& tasks,
int skip_first_n_feature_extraction, int max_n_bufs,
std::vector<std::vector<float> >* features);
std::vector<std::vector<float>>* features);

/*!
* \brief Get per-store features from a log file
Expand All @@ -96,7 +96,7 @@ void GetPerStoreFeaturesFromStates(const Array<State>& states, const std::vector
* \param task_ids The task ids for all states
*/
void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int max_n_bufs,
std::vector<std::vector<float> >* features,
std::vector<std::vector<float>>* features,
std::vector<float>* normalized_throughputs,
std::vector<int>* task_ids);

Expand All @@ -114,7 +114,7 @@ void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int
void GetPerStoreFeaturesFromMeasurePairs(const Array<MeasureInput>& inputs,
const Array<MeasureResult>& results,
int skip_first_n_feature_extraction, int max_n_bufs,
std::vector<std::vector<float> >* features,
std::vector<std::vector<float>>* features,
std::vector<float>* normalized_throughputs,
std::vector<int>* task_ids);

Expand Down
14 changes: 7 additions & 7 deletions include/tvm/relay/attrs/image.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,9 @@ struct Resize1DAttrs : public tvm::AttrsNode<Resize1DAttrs> {
DataType out_dtype;

TVM_DECLARE_ATTRS(Resize1DAttrs, "relay.attrs.Resize1DAttrs") {
TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr> >()).describe("Output Size.");
TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr>>()).describe("Output Size.");
TVM_ATTR_FIELD(roi)
.set_default(NullValue<Array<FloatImm> >())
.set_default(NullValue<Array<FloatImm>>())
.describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'");
TVM_ATTR_FIELD(layout).set_default("NCW").describe(
"Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
Expand Down Expand Up @@ -99,9 +99,9 @@ struct Resize2DAttrs : public tvm::AttrsNode<Resize2DAttrs> {
DataType out_dtype;

TVM_DECLARE_ATTRS(Resize2DAttrs, "relay.attrs.Resize2DAttrs") {
TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr> >()).describe("Output Size.");
TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr>>()).describe("Output Size.");
TVM_ATTR_FIELD(roi)
.set_default(NullValue<Array<FloatImm> >())
.set_default(NullValue<Array<FloatImm>>())
.describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'");
TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
"Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
Expand Down Expand Up @@ -152,9 +152,9 @@ struct Resize3DAttrs : public tvm::AttrsNode<Resize3DAttrs> {
DataType out_dtype;

TVM_DECLARE_ATTRS(Resize3DAttrs, "relay.attrs.Resize3DAttrs") {
TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr> >()).describe("Output Size.");
TVM_ATTR_FIELD(size).set_default(NullValue<Array<IndexExpr>>()).describe("Output Size.");
TVM_ATTR_FIELD(roi)
.set_default(NullValue<Array<FloatImm> >())
.set_default(NullValue<Array<FloatImm>>())
.describe("Region of Interest for coordinate transformation mode 'tf_crop_and_resize'");
TVM_ATTR_FIELD(layout).set_default("NCDHW").describe(
"Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
Expand Down Expand Up @@ -200,7 +200,7 @@ struct CropAndResizeAttrs : public tvm::AttrsNode<CropAndResizeAttrs> {
DataType out_dtype;

TVM_DECLARE_ATTRS(CropAndResizeAttrs, "relay.attrs.CropAndResizeAttrs") {
TVM_ATTR_FIELD(crop_size).set_default(NullValue<Array<IndexExpr> >()).describe("Target Size.");
TVM_ATTR_FIELD(crop_size).set_default(NullValue<Array<IndexExpr>>()).describe("Target Size.");
TVM_ATTR_FIELD(layout).set_default("NCHW").describe(
"Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
"'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/runtime/module.h
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ class TVM_DLL ModuleNode : public Object {

private:
/*! \brief Cache used by GetImport */
std::unordered_map<std::string, std::shared_ptr<PackedFunc> > import_cache_;
std::unordered_map<std::string, std::shared_ptr<PackedFunc>> import_cache_;
std::mutex mutex_;
};

Expand Down
2 changes: 1 addition & 1 deletion include/tvm/support/span.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class Span {

inline bool operator!=(iterator_base<W1> other) { return !(*this == other); }

template <class X = W1, typename = std::enable_if_t<!std::is_const<X>::value> >
template <class X = W1, typename = std::enable_if_t<!std::is_const<X>::value>>
inline operator iterator_base<const_W>() const {
return iterator_base<const_W>(ptr_, end_);
}
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/te/operation.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ struct TensorDom {
// constructor
explicit TensorDom(int ndim) : data(ndim) {}
/*! \brief The domain data */
std::vector<std::vector<IntSet> > data;
std::vector<std::vector<IntSet>> data;
};

/*!
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/topi/detail/extern.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ using FExtern = std::function<PrimExpr(Array<Buffer>, Array<Buffer>)>;
* be one output Tensor for each element of out_shapes, with dtype equal to the corresponding
* element of out_types.
*/
inline Array<Tensor> make_extern(const Array<Array<PrimExpr> >& out_shapes,
inline Array<Tensor> make_extern(const Array<Array<PrimExpr>>& out_shapes,
const std::vector<DataType>& out_types,
const Array<Tensor>& inputs, FExtern fextern, std::string name,
std::string tag, ::tvm::Map<String, ObjectRef> attrs) {
Expand Down
2 changes: 1 addition & 1 deletion include/tvm/topi/transform.h
Original file line number Diff line number Diff line change
Expand Up @@ -592,7 +592,7 @@ inline Array<Tensor> split(const Tensor& x, Array<PrimExpr> split_indices, int a
begin_ids.push_back(idx);
}

Array<Array<PrimExpr> > out_shapes;
Array<Array<PrimExpr>> out_shapes;
for (size_t i = 0; i < begin_ids.size(); ++i) {
PrimExpr out_axis_size;
if (i == begin_ids.size() - 1) {
Expand Down
4 changes: 2 additions & 2 deletions jvm/native/src/main/native/org_apache_tvm_native_c_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@ struct TVMFuncArgsThreadLocalEntry {
std::vector<TVMValue> tvmFuncArgValues;
std::vector<int> tvmFuncArgTypes;
// for later release
std::vector<std::pair<jstring, const char*> > tvmFuncArgPushedStrs;
std::vector<std::pair<jbyteArray, TVMByteArray*> > tvmFuncArgPushedBytes;
std::vector<std::pair<jstring, const char*>> tvmFuncArgPushedStrs;
std::vector<std::pair<jbyteArray, TVMByteArray*>> tvmFuncArgPushedBytes;
};
typedef dmlc::ThreadLocalStore<TVMFuncArgsThreadLocalEntry> TVMFuncArgsThreadLocalStore;

Expand Down
2 changes: 1 addition & 1 deletion src/arith/analyzer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ TVM_REGISTER_GLOBAL("arith.CreateAnalyzer").set_body([](TVMArgs args, TVMRetValu
return PackedFunc([self](TVMArgs args, TVMRetValue* ret) {
// can't use make_shared due to noexcept(false) decl in destructor,
// see https://stackoverflow.com/a/43907314
auto ctx = std::shared_ptr<With<ConstraintContext> >(
auto ctx = std::shared_ptr<With<ConstraintContext>>(
new With<ConstraintContext>(self.get(), args[0]));
auto fexit = [ctx](TVMArgs, TVMRetValue*) mutable { ctx.reset(); };
*ret = PackedFunc(fexit);
Expand Down
14 changes: 7 additions & 7 deletions src/autotvm/touch_extractor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ void TouchExtractor::ExitMem_() {}
* \note If you want to flatten these features as the input of your model,
* You can use the faster one GetItervarFeatureFlatten below.
*/
void GetItervarFeature(Stmt stmt, bool take_log, Array<Array<Array<PrimExpr> > >* ret_feature) {
void GetItervarFeature(Stmt stmt, bool take_log, Array<Array<Array<PrimExpr>>>* ret_feature) {
// extract
TouchExtractor touch_analyzer;
touch_analyzer.Analyze(stmt);
Expand Down Expand Up @@ -248,7 +248,7 @@ void GetItervarFeature(Stmt stmt, bool take_log, Array<Array<Array<PrimExpr> > >

// serialize for front end
for (auto var : vars) {
Array<Array<PrimExpr> > feature_row;
Array<Array<PrimExpr>> feature_row;
ItervarFeature& fea = touch_analyzer.itervar_map[var];
feature_row.push_back(Array<PrimExpr>{tvm::tir::StringImm("_itervar_"), var});

Expand Down Expand Up @@ -389,10 +389,10 @@ void GetCurveSampleFeatureFlatten(Stmt stmt, int sample_n, std::vector<float>* r
});

int max_depth = 0;
std::map<TouchedBuffer, std::vector<double> > reuse_curve;
std::map<TouchedBuffer, std::vector<double> > count_curve;
std::map<TouchedBuffer, std::vector<double> > topdown_curve;
std::map<TouchedBuffer, std::vector<double> > bottomup_curve;
std::map<TouchedBuffer, std::vector<double>> reuse_curve;
std::map<TouchedBuffer, std::vector<double>> count_curve;
std::map<TouchedBuffer, std::vector<double>> topdown_curve;
std::map<TouchedBuffer, std::vector<double>> bottomup_curve;
std::set<TouchedBuffer> innermost_buffers;
std::set<std::string> added;

Expand Down Expand Up @@ -485,7 +485,7 @@ TVM_REGISTER_GLOBAL("autotvm.feature.GetItervarFeature")
.set_body([](TVMArgs args, TVMRetValue* ret) {
Stmt stmt = args[0];
bool take_log = args[1];
Array<Array<Array<PrimExpr> > > ret_feature;
Array<Array<Array<PrimExpr>>> ret_feature;

GetItervarFeature(stmt, take_log, &ret_feature);

Expand Down
8 changes: 4 additions & 4 deletions src/contrib/ethosu/cascader/propagator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ namespace ethosu {
namespace cascader {

void PropagatorNode::VisitAttrs(AttrVisitor* v) {
Array<Array<FloatImm> > tmp_transform;
Array<Array<FloatImm>> tmp_transform;
for (const auto& vec : transform_) {
tmp_transform.push_back(make_array(vec));
}
Expand All @@ -43,7 +43,7 @@ void PropagatorNode::VisitAttrs(AttrVisitor* v) {
v->Visit("_offset", &tmp_arr);
}

Propagator::Propagator(const std::vector<std::vector<float> >& transform,
Propagator::Propagator(const std::vector<std::vector<float>>& transform,
const std::vector<int>& offset) {
auto n = make_object<PropagatorNode>();
size_t rows = transform.size();
Expand Down Expand Up @@ -102,8 +102,8 @@ StripeConfig PropagatorNode::propagate(const StripeConfig& stripe_config) const
}

TVM_REGISTER_GLOBAL("contrib.ethosu.cascader.Propagator")
.set_body_typed([](Array<Array<FloatImm> > transform, Array<Integer> offset) {
std::vector<std::vector<float> > vtransform;
.set_body_typed([](Array<Array<FloatImm>> transform, Array<Integer> offset) {
std::vector<std::vector<float>> vtransform;
for (const auto& vec : transform) {
vtransform.push_back(make_vector<float, FloatImm>(vec));
}
Expand Down
6 changes: 3 additions & 3 deletions src/contrib/ethosu/cascader/propagator.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ class PropagatorNode : public Object {
void VisitAttrs(AttrVisitor* v);

/*! \return The transform matrix to apply to the StripeConfigs */
const std::vector<std::vector<float> > GetTransform() const { return transform_; }
const std::vector<std::vector<float>> GetTransform() const { return transform_; }
/*! \return The offset vector to apply to the StripeConfigs */
const std::vector<int> GetOffset() const { return offset_; }
/*! \return The number of input dimensions */
Expand Down Expand Up @@ -92,7 +92,7 @@ class PropagatorNode : public Object {
friend class Propagator;

/*! \brief The transform matrix to apply to the StripeConfigs */
std::vector<std::vector<float> > transform_;
std::vector<std::vector<float>> transform_;
/*! \brief The offset vector to apply to the StripeConfigs */
std::vector<int> offset_;
};
Expand Down Expand Up @@ -124,7 +124,7 @@ class PropagatorNode : public Object {
*/
class Propagator : public ObjectRef {
public:
Propagator(const std::vector<std::vector<float> >& transform, const std::vector<int>& offset);
Propagator(const std::vector<std::vector<float>>& transform, const std::vector<int>& offset);

TVM_DEFINE_OBJECT_REF_METHODS(Propagator, ObjectRef, PropagatorNode);
};
Expand Down
2 changes: 1 addition & 1 deletion src/ir/span.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ namespace tvm {
ObjectPtr<Object> GetSourceNameNode(const String& name) {
// always return pointer as the reference can change as map re-allocate.
// or use another level of indirection by creating a unique_ptr
static std::unordered_map<String, ObjectPtr<SourceNameNode> > source_map;
static std::unordered_map<String, ObjectPtr<SourceNameNode>> source_map;

auto sn = source_map.find(name);
if (sn == source_map.end()) {
Expand Down
2 changes: 1 addition & 1 deletion src/node/reflection.cc
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ void NodeListAttrNames(TVMArgs args, TVMRetValue* ret) {
Object* self = static_cast<Object*>(args[0].value().v_handle);

auto names =
std::make_shared<std::vector<std::string> >(ReflectionVTable::Global()->ListAttrNames(self));
std::make_shared<std::vector<std::string>>(ReflectionVTable::Global()->ListAttrNames(self));

*ret = PackedFunc([names](TVMArgs args, TVMRetValue* rv) {
int64_t i = args[0];
Expand Down
2 changes: 1 addition & 1 deletion src/printer/meta_data.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ class TextMetaDataContext {

private:
/*! \brief additional metadata stored in TVM json format */
std::unordered_map<String, Array<ObjectRef> > meta_data_;
std::unordered_map<String, Array<ObjectRef>> meta_data_;
/*! \brief map from meta data into its string representation */
std::unordered_map<ObjectRef, Doc, ObjectPtrHash, ObjectPtrEqual> meta_repr_;
};
Expand Down
4 changes: 2 additions & 2 deletions src/relay/analysis/dependency_graph.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,11 +56,11 @@ class DependencyGraph::Creator : private MixedModeVisitor {
}

void Depend(DependencyGraph::Node* parent, DependencyGraph::Node* child) {
auto* parent_link = arena_->make<LinkNode<DependencyGraph::Node*> >();
auto* parent_link = arena_->make<LinkNode<DependencyGraph::Node*>>();
parent_link->value = parent;
child->parents.Push(parent_link);

auto* child_link = arena_->make<LinkNode<DependencyGraph::Node*> >();
auto* child_link = arena_->make<LinkNode<DependencyGraph::Node*>>();
child_link->value = child;
parent->children.Push(child_link);
}
Expand Down
2 changes: 1 addition & 1 deletion src/relay/ir/transform.cc
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ IRModule FunctionPassNode::operator()(IRModule mod, const PassContext& pass_ctx)

IRModule updated_mod = mod->ShallowCopy();

std::vector<std::pair<GlobalVar, Function> > updates;
std::vector<std::pair<GlobalVar, Function>> updates;
for (const auto& kv : mod->functions) {
// only process optimizable Relay Functions
if (const auto* function_node = AsOptimizableFunctionNode(kv.second)) {
Expand Down
8 changes: 4 additions & 4 deletions src/relay/transforms/convert_sparse_dense.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ TVM_REGISTER_GLOBAL("relay.analysis.search_dense_op_weight").set_body_typed(Sear
class DenseToSparseDenseMutator : public ExprRewriter {
public:
DenseToSparseDenseMutator(const Array<ObjectRef>& weight_name,
const Array<Array<PrimExpr> >& weight_shape)
const Array<Array<PrimExpr>>& weight_shape)
: dense_op_(Op::Get("nn.dense")), sparse_dense_op_(Op::Get("nn.sparse_dense")) {
ICHECK_EQ(weight_name.size(), weight_shape.size());
for (size_t i = 0; i < weight_name.size(); ++i) {
Expand Down Expand Up @@ -117,19 +117,19 @@ class DenseToSparseDenseMutator : public ExprRewriter {
// Cached op
const Op& dense_op_;
const Op& sparse_dense_op_;
std::unordered_map<std::string, std::vector<int> > target_weights_;
std::unordered_map<std::string, std::vector<int>> target_weights_;
}; // class DenseToSparseDenseAlter

Expr DenseToSparse(const Expr& e, const Array<ObjectRef>& weight_name,
const Array<Array<PrimExpr> >& weight_shape) {
const Array<Array<PrimExpr>>& weight_shape) {
auto rewriter = DenseToSparseDenseMutator(weight_name, weight_shape);
return PostOrderRewrite(e, &rewriter);
}

namespace transform {

Pass DenseToSparse(const Array<ObjectRef>& weight_name,
const Array<Array<PrimExpr> >& weight_shape) {
const Array<Array<PrimExpr>>& weight_shape) {
runtime::TypedPackedFunc<Function(Function, IRModule, PassContext)> pass_func =
[=](Function f, IRModule m, PassContext pc) {
// Remove FreeVar warnings
Expand Down
2 changes: 1 addition & 1 deletion src/relay/transforms/fuse_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ class IndexedForwardGraph::Creator : private ExprVisitor {
graph_.node_map[key] = current;
}
if (parent != nullptr) {
auto* link = arena_->make<LinkNode<IndexedForwardGraph::Edge> >();
auto* link = arena_->make<LinkNode<IndexedForwardGraph::Edge>>();
link->value.node = parent;
link->value.pattern = pattern;
current->outputs.Push(link);
Expand Down
2 changes: 1 addition & 1 deletion src/relay/transforms/let_list.h
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ class LetList {
}

private:
std::vector<std::pair<Var, Expr> > lets_;
std::vector<std::pair<Var, Expr>> lets_;
bool used_ = false;
};

Expand Down
Loading

0 comments on commit 23e7944

Please sign in to comment.