diff --git a/include/tvm/ir/attrs.h b/include/tvm/ir/attrs.h index d2eda659a5d1e..35afed7dd267f 100644 --- a/include/tvm/ir/attrs.h +++ b/include/tvm/ir/attrs.h @@ -296,7 +296,7 @@ class DictAttrs : public Attrs { * \endcode */ bool HasNonzeroAttr(const std::string& attr_key) const { - return GetAttr(attr_key, 0) != 0; + return GetAttr(attr_key, 0).value_or(0).IntValue() != 0; } TVM_DEFINE_OBJECT_REF_METHODS(DictAttrs, Attrs, DictAttrsNode); diff --git a/include/tvm/ir/expr.h b/include/tvm/ir/expr.h index b54a067e1c941..b2cfc295b6b52 100644 --- a/include/tvm/ir/expr.h +++ b/include/tvm/ir/expr.h @@ -438,7 +438,7 @@ class Integer : public IntImm { /*! * \brief convert to int64_t */ - operator int64_t() const { + int64_t IntValue() const { ICHECK(data_ != nullptr) << " Trying to reference a null Integer"; return (*this)->value; } diff --git a/include/tvm/relay/feature.h b/include/tvm/relay/feature.h index 751593f94cc0b..136dcfa87c682 100644 --- a/include/tvm/relay/feature.h +++ b/include/tvm/relay/feature.h @@ -68,7 +68,7 @@ class FeatureSet { explicit FeatureSet(Feature ft) { bs_.set(static_cast(ft)); } explicit FeatureSet(const tvm::Array& ft) { for (Integer i : ft) { - (*this) += Feature(static_cast(i)); + *this += Feature(i.IntValue()); } } explicit operator Array() const { diff --git a/include/tvm/topi/cuda/injective.h b/include/tvm/topi/cuda/injective.h index 010fa2ce85671..79ec338aae0e5 100644 --- a/include/tvm/topi/cuda/injective.h +++ b/include/tvm/topi/cuda/injective.h @@ -48,7 +48,7 @@ namespace cuda { inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) { auto fused = detail::Fuse(sch[out], sch[out]->op.as()->axis); auto target = Target::Current(false); - int num_thread = target->GetAttr("max_num_threads").value(); + int num_thread = target->GetAttr("max_num_threads").value().IntValue(); IterVar bx, tx; sch[out].split(fused, num_thread, &bx, &tx); sch[out].bind(bx, thread_axis(Range(), "blockIdx.x")); diff --git a/include/tvm/topi/cuda/pooling.h b/include/tvm/topi/cuda/pooling.h index 0bb9df4a35d18..92be03123602d 100644 --- a/include/tvm/topi/cuda/pooling.h +++ b/include/tvm/topi/cuda/pooling.h @@ -57,7 +57,7 @@ inline Schedule schedule_pool(const Target& target, const Array& outs) { if (padded_input->op->IsInstance()) { s[padded_input].compute_inline(); } - int num_thread = target->GetAttr("max_num_threads").value(); + int num_thread = target->GetAttr("max_num_threads").value().IntValue(); Tensor out; Tensor OL; if (detail::contains(s->outputs, pool->op)) { diff --git a/include/tvm/topi/cuda/reduction.h b/include/tvm/topi/cuda/reduction.h index 51f35ed8dc25c..b1905d844250a 100644 --- a/include/tvm/topi/cuda/reduction.h +++ b/include/tvm/topi/cuda/reduction.h @@ -80,7 +80,7 @@ Schedule ScheduleReduce(const Target& target, Operation op, Schedule sch, thread_y = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.y"); } else { all_reduce = true; - num_thread = target->GetAttr("max_num_threads").value(); + num_thread = target->GetAttr("max_num_threads").value().IntValue(); thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x"); } diff --git a/include/tvm/topi/detail/strided_slice.h b/include/tvm/topi/detail/strided_slice.h index da76022c552bc..a69f8f99ae38b 100644 --- a/include/tvm/topi/detail/strided_slice.h +++ b/include/tvm/topi/detail/strided_slice.h @@ -95,12 +95,12 @@ inline Array StridedSliceCanonicalizeBegin(const Array& isha std::string slice_mode = "end") { Array begin_expr; for (size_t i = 0; i < axes.size(); ++i) { - if (ishape[axes[i]]->IsInstance()) { - int64_t dim_i = GetConstInt(ishape[axes[i]]); + if (ishape[axes[i].IntValue()]->IsInstance()) { + int64_t dim_i = GetConstInt(ishape[axes[i].IntValue()]); int64_t begin_i = CanonicalizeIndex(begin[i], dim_i, strides[i]); begin_expr.push_back(make_const(dtype, begin_i)); } else { - auto idim = ishape[axes[i]]; + auto idim = ishape[axes[i].IntValue()]; auto b_expr = make_const(dtype, begin[i]); PrimExpr b = begin[i] < 0 ? b_expr + idim : b_expr; auto s = strides[i]; @@ -129,8 +129,8 @@ inline Array StridedSliceOutputShape(const Array& ishape, } for (size_t i = 0; i < axes.size(); ++i) { - if (ishape[axes[i]]->IsInstance()) { - const int64_t dim_i = GetConstInt(ishape[axes[i]]); + if (ishape[axes[i].IntValue()]->IsInstance()) { + const int64_t dim_i = GetConstInt(ishape[axes[i].IntValue()]); ICHECK(begin_canonicalized[i]->IsInstance()); int64_t begin_i = GetConstInt(begin_canonicalized[i]); int64_t end_i = CanonicalizeIndex(end[i], dim_i, strides[i]); @@ -139,11 +139,11 @@ inline Array StridedSliceOutputShape(const Array& ishape, static_cast((interval + std::abs(strides[i]) - 1) / std::abs(strides[i])); ICHECK(strides[i] < 0 ? (end_i <= begin_i) : (begin_i <= end_i)) << ": Input [Begin=" << begin[i] << ", End=" << end[i] << "] is invalid for axis=" << i; - out_shape.Set(axes[i], cast(out_shape[i].dtype(), PrimExpr(slice_size))); + out_shape.Set(axes[i].IntValue(), cast(out_shape[i].dtype(), PrimExpr(slice_size))); } else if (use_any) { - out_shape.Set(axes[i], tvm::tir::Any()); + out_shape.Set(axes[i].IntValue(), tvm::tir::Any()); } else { - out_shape.Set(axes[i], tvm::tir::Var("dim", out_shape[i]->dtype)); + out_shape.Set(axes[i].IntValue(), tvm::tir::Var("dim", out_shape[i]->dtype)); } } diff --git a/include/tvm/topi/transform.h b/include/tvm/topi/transform.h index 75070e119f1f6..86a8856469519 100644 --- a/include/tvm/topi/transform.h +++ b/include/tvm/topi/transform.h @@ -790,8 +790,8 @@ inline Tensor strided_slice_with_axes(const Tensor& x, const Array& beg for (size_t i = 0; i < out_shape.size(); ++i) real_indices.push_back(indices[i]); for (size_t i = 0; i < axes.size(); ++i) { auto stride = make_const(strides[i].dtype(), strides_vec[i]); - PrimExpr ind = indices[axes[i]] * stride + begin_expr[i]; - real_indices.Set(axes[i], ind); + PrimExpr ind = indices[axes[i].IntValue()] * stride + begin_expr[i]; + real_indices.Set(axes[i].IntValue(), ind); } return x(real_indices); }, diff --git a/src/auto_scheduler/transform_step.cc b/src/auto_scheduler/transform_step.cc index b67d5cdd7bd93..b821cf892aa78 100644 --- a/src/auto_scheduler/transform_step.cc +++ b/src/auto_scheduler/transform_step.cc @@ -501,10 +501,9 @@ Iterator FuseStepNode::ApplyToState(State* state) const { if (i > 0) { ICHECK_EQ(fused_ids[i]->value, fused_ids[i - 1]->value + 1); } - if (i != fused_ids.size() - 1) { const auto& iter_to_attached_stage = (*state)->attach_map->iter_to_attached_stages; - if (iter_to_attached_stage.find(std::make_pair(stage_id, fused_ids[i])) != + if (iter_to_attached_stage.find(std::make_pair(stage_id, fused_ids[i].IntValue())) != iter_to_attached_stage.end()) { LOG(FATAL) << "Invalid Fuse. Trying to fuse iterators that have been attached by some " << "stages. State before fusion:\n" @@ -512,7 +511,7 @@ Iterator FuseStepNode::ApplyToState(State* state) const { } } - const Iterator& it = stage->iters[fused_ids[i]]; + const Iterator& it = stage->iters[fused_ids[i].IntValue()]; orig_iters.push_back(it); new_name = new_name + it->name + "@"; @@ -543,9 +542,9 @@ Iterator FuseStepNode::ApplyToState(State* state) const { new_iters.push_back(new_it); } else { new_iters.insert(new_iters.end(), stage->iters.begin(), - stage->iters.begin() + fused_ids.front()); + stage->iters.begin() + fused_ids.front().IntValue()); new_iters.push_back(new_it); - new_iters.insert(new_iters.end(), stage->iters.begin() + fused_ids.back() + 1, + new_iters.insert(new_iters.end(), stage->iters.begin() + fused_ids.back().IntValue() + 1, stage->iters.end()); } @@ -561,7 +560,7 @@ Iterator FuseStepNode::ApplyToState(State* state) const { // The original iterators in AttachMap will be updated with the new iterators std::vector from_iters; std::vector to_iters; - const size_t begin_id = fused_ids.front(), end_id = fused_ids.back(); + const size_t begin_id = fused_ids.front().IntValue(), end_id = fused_ids.back().IntValue(); for (size_t i = 0; i < old_iter_size; ++i) { if (i <= begin_id) { continue; @@ -587,7 +586,7 @@ IterVar FuseStepNode::ApplyToSchedule(Array* stages, Array to_fuse; for (const auto& i : fused_ids) { - to_fuse.push_back(axes[i]); + to_fuse.push_back(axes[i.IntValue()]); } IterVar fused_axis; stage.fuse(to_fuse, &fused_axis); @@ -596,9 +595,9 @@ IterVar FuseStepNode::ApplyToSchedule(Array* stages, if (fused_ids.empty()) { new_axes.push_back(fused_axis); } else { - new_axes.insert(new_axes.end(), axes.begin(), axes.begin() + fused_ids.front()); + new_axes.insert(new_axes.end(), axes.begin(), axes.begin() + fused_ids.front().IntValue()); new_axes.push_back(fused_axis); - new_axes.insert(new_axes.end(), axes.begin() + fused_ids.back() + 1, axes.end()); + new_axes.insert(new_axes.end(), axes.begin() + fused_ids.back().IntValue() + 1, axes.end()); } stage_to_axes->Set(stage, std::move(new_axes)); @@ -613,7 +612,8 @@ String FuseStepNode::PrintAsPythonAPI(Array* stages, std::stringstream to_fuse; for (size_t i = 0; i < fused_ids.size(); ++i) { - to_fuse << CleanName(stage_to_axes->at(stage)[fused_ids[i]]->var->name_hint, op_name); + to_fuse << CleanName(stage_to_axes->at(stage)[fused_ids[i].IntValue()]->var->name_hint, + op_name); if (i != fused_ids.size() - 1) { to_fuse << ", "; } @@ -773,7 +773,7 @@ void ReorderStepNode::ApplyToState(State* state) const { const Stage& stage = (*state)->stages[stage_id]; Array iters; for (auto x : after_ids) { - iters.push_back(stage->iters[x]); + iters.push_back(stage->iters[x.IntValue()]); } state->CopyOnWrite()->stages.Set( stage_id, Stage(stage->op, stage->op_type, iters, stage->compute_at, stage->attrs)); @@ -788,7 +788,7 @@ void ReorderStepNode::ApplyToSchedule(Array* stages, Array new_axes; new_axes.reserve(axes.size()); for (auto i : after_ids) { - new_axes.push_back(axes[i]); + new_axes.push_back(axes[i.IntValue()]); } stage.reorder(new_axes); @@ -804,7 +804,7 @@ String ReorderStepNode::PrintAsPythonAPI(Array* stages, ss << "s[" << op_name << "].reorder("; for (size_t i = 0; i < after_ids.size(); ++i) { - ss << CleanName((*stage_to_axes)[stage][after_ids[i]]->var->name_hint, op_name); + ss << CleanName((*stage_to_axes)[stage][after_ids[i].IntValue()]->var->name_hint, op_name); if (i != after_ids.size() - 1) { ss << ", "; } @@ -1180,10 +1180,10 @@ Optional FollowFusedSplitStepNode::ExtractSplitLength( const Array& transform_steps) const { PrimExpr ret(1); - for (int src_step_id : src_step_ids) { + for (auto src_step_id : src_step_ids) { // Make sure the src_step_id is within the range of transform_steps. - ICHECK_LT(src_step_id, transform_steps.size()); - auto ps = transform_steps[src_step_id].as(); + ICHECK_LT(src_step_id.IntValue(), transform_steps.size()); + auto ps = transform_steps[src_step_id.IntValue()].as(); ICHECK(ps != nullptr); // Multiple the splitting factor on corresponding splitting level of src_steps. if (ps->lengths[level] && ret.defined()) { @@ -1572,7 +1572,7 @@ te::Tensor CacheReadStepNode::ApplyToSchedule(Array* stages, const te::Stage& stage = (*stages)[stage_id]; Array readers; for (const auto& i : reader_stage_ids) { - readers.push_back((*stages)[i]->origin_op); + readers.push_back((*stages)[i.IntValue()]->origin_op); } auto out = schedule->cache_read(stage->origin_op.output(0), scope_name, readers); @@ -1591,7 +1591,7 @@ String CacheReadStepNode::PrintAsPythonAPI(Array* stages, StageToAxes auto stage = (*stages)[stage_id]; Array reader_stages; for (size_t i = 0; i < reader_stage_ids.size(); ++i) { - reader_stages.push_back((*stages)[reader_stage_ids[i]]); + reader_stages.push_back((*stages)[reader_stage_ids[i].IntValue()]); } auto out = ApplyToSchedule(stages, stage_to_axes, schedule); diff --git a/src/contrib/ethosu/cascader/parts/ethosu.cc b/src/contrib/ethosu/cascader/parts/ethosu.cc index 33d9b3b452dff..4fb6dbd052033 100644 --- a/src/contrib/ethosu/cascader/parts/ethosu.cc +++ b/src/contrib/ethosu/cascader/parts/ethosu.cc @@ -181,7 +181,10 @@ TVM_REGISTER_GLOBAL("contrib.ethosu.cascader.EthosuPart") Array valid_block_configs, int weight_tensor_idx) { std::vector vsubgraph_inputs(subgraph_inputs.begin(), subgraph_inputs.end()); std::vector vpropagators(propagators.begin(), propagators.end()); - std::vector voutput_quantum(output_quantum.begin(), output_quantum.end()); + std::vector voutput_quantum; + std::transform(output_quantum.begin(), output_quantum.end(), + std::back_inserter(voutput_quantum), + [](auto&& val) { return val.IntValue(); }); TESubgraph subgraph; subgraph.input_tensors = vsubgraph_inputs; subgraph.output_tensor = subgraph_output; diff --git a/src/meta_schedule/arg_info.cc b/src/meta_schedule/arg_info.cc index 21de9d719d00d..84d861cb59c3d 100644 --- a/src/meta_schedule/arg_info.cc +++ b/src/meta_schedule/arg_info.cc @@ -142,7 +142,10 @@ TensorInfo TensorInfo::FromJSON(const ObjectRef& json_obj) { LOG(FATAL) << "ValueError: Unable to parse the JSON object: " << json_obj << "\nThe error is: " << e.what(); } - return TensorInfo(DataType(dtype), ShapeTuple(shape.begin(), shape.end())); + std::vector s; + std::transform(shape.begin(), shape.end(), std::back_inserter(s), + [](Integer i) { return i.IntValue(); }); + return TensorInfo(DataType(dtype), ShapeTuple(s.begin(), s.end())); } /******** Repr ********/ diff --git a/src/meta_schedule/database/json_database.cc b/src/meta_schedule/database/json_database.cc index 5e7c9119c95ac..a55ffa8b283af 100644 --- a/src/meta_schedule/database/json_database.cc +++ b/src/meta_schedule/database/json_database.cc @@ -198,7 +198,7 @@ Database Database::JSONDatabase(String path_workload, String path_tuning_record, try { const ArrayNode* arr = json_obj.as(); ICHECK_EQ(arr->size(), 2); - workload = workloads[Downcast(arr->at(0))]; + workload = workloads[Downcast(arr->at(0)).IntValue()]; records[task_id] = TuningRecord::FromJSON(arr->at(1), workload); } catch (std::runtime_error& e) { LOG(FATAL) << "ValueError: Unable to parse TuningRecord, on line " << (task_id + 1) diff --git a/src/meta_schedule/postproc/rewrite_unbound_block.cc b/src/meta_schedule/postproc/rewrite_unbound_block.cc index 183f04e7ba239..eb57e90f82f66 100644 --- a/src/meta_schedule/postproc/rewrite_unbound_block.cc +++ b/src/meta_schedule/postproc/rewrite_unbound_block.cc @@ -91,7 +91,7 @@ class RewriteUnboundBlockNode : public PostprocNode { context->target.value()->GetAttr("max_threads_per_block"); CHECK(max_threads_per_block.defined()) << "ValueError: missing attribute `max_threads_per_block` in the target"; - this->max_threads_per_block_ = max_threads_per_block.value(); + this->max_threads_per_block_ = max_threads_per_block.value().IntValue(); } // Inherited from PostprocNode diff --git a/src/meta_schedule/postproc/verify_gpu_code.cc b/src/meta_schedule/postproc/verify_gpu_code.cc index 57e58e6a79ffb..857b732c98047 100644 --- a/src/meta_schedule/postproc/verify_gpu_code.cc +++ b/src/meta_schedule/postproc/verify_gpu_code.cc @@ -125,7 +125,7 @@ class VerifyGPUCodeNode : public PostprocNode { {"max_vthread", Integer(8)}, {"max_vector_bytes", Integer(16)}, }; - thread_warp_size_ = Extract(target, "thread_warp_size"); + thread_warp_size_ = Extract(target, "thread_warp_size").IntValue(); } bool Verify(const IRModule& mod) const { diff --git a/src/meta_schedule/schedule_rule/auto_bind.cc b/src/meta_schedule/schedule_rule/auto_bind.cc index 2bc90f3c2e5cf..a67432ebc5da6 100644 --- a/src/meta_schedule/schedule_rule/auto_bind.cc +++ b/src/meta_schedule/schedule_rule/auto_bind.cc @@ -168,7 +168,7 @@ class AutoBindNode : public ScheduleRuleNode { context->target.value()->GetAttr("max_threads_per_block"); CHECK(max_threads_per_block.defined()) << "ValueError: missing attribute `max_threads_per_block` in the target"; - this->max_threads_per_block_ = max_threads_per_block.value(); + this->max_threads_per_block_ = max_threads_per_block.value().IntValue(); } // Inherited from ScheduleRuleNode diff --git a/src/meta_schedule/schedule_rule/multi_level_tiling.cc b/src/meta_schedule/schedule_rule/multi_level_tiling.cc index 28c1a0fdb66e2..2f2eb219e8c79 100644 --- a/src/meta_schedule/schedule_rule/multi_level_tiling.cc +++ b/src/meta_schedule/schedule_rule/multi_level_tiling.cc @@ -118,7 +118,9 @@ std::vector MultiLevelTilingNode::AddWriteReuse(State state) const { if (Optional> ann = tir::GetAnn>( state->sch->GetSRef(state->block_rv), "meta_schedule.write_cache_level")) { req = ReuseType::kMustReuse; - levels = std::vector(ann.value().begin(), ann.value().end()); + levels.clear(); + std::transform(ann.value().begin(), ann.value().end(), std::back_inserter(levels), + [](auto&& v) { return v.IntValue(); }); } std::vector results; if (req == ReuseType::kMayReuse) { diff --git a/src/meta_schedule/utils.h b/src/meta_schedule/utils.h index b5cb73c26e001..e3d726652e0b2 100644 --- a/src/meta_schedule/utils.h +++ b/src/meta_schedule/utils.h @@ -328,7 +328,7 @@ struct ThreadedTraceApply { * \return The number of cores. */ inline int GetTargetNumCores(const Target& target) { - int num_cores = target->GetAttr("num-cores").value_or(-1); + int num_cores = target->GetAttr("num-cores").value_or(-1).IntValue(); if (num_cores == -1) { static const auto* f_cpu_count = runtime::Registry::Get("meta_schedule.cpu_count"); ICHECK(f_cpu_count) diff --git a/src/parser/parser.cc b/src/parser/parser.cc index f51e3e5c9737f..cd208eea5d5c9 100644 --- a/src/parser/parser.cc +++ b/src/parser/parser.cc @@ -1540,7 +1540,7 @@ class Parser { } case TokenType::kBoolean: { Consume(TokenType::kBoolean); - int64_t value = Downcast(next->data); + int64_t value = Downcast(next->data).IntValue(); Expr e = Constant(support::BoolToNDArray(value), next->span); ICHECK(e->span.defined()) << "constant spans must be defined"; return e; diff --git a/src/parser/token.h b/src/parser/token.h index 31e974355e4b8..14e553d358f4f 100644 --- a/src/parser/token.h +++ b/src/parser/token.h @@ -387,7 +387,9 @@ Token::Token(Span span, TokenType token_type, ObjectRef data) { Token Token::Null() { return Token(Span(SourceName(), 0, 0, 0, 0), TokenType::kNull); } -int64_t Token::ToNumber() const { return Downcast(this->operator->()->data); } +int64_t Token::ToNumber() const { + return Downcast(this->operator->()->data).IntValue(); +} std::string Token::ToString() const { return Downcast(this->operator->()->data); } diff --git a/src/relay/analysis/extract_fake_quantized_ops.cc b/src/relay/analysis/extract_fake_quantized_ops.cc index 68cee85f4305c..d66bbd635480b 100644 --- a/src/relay/analysis/extract_fake_quantized_ops.cc +++ b/src/relay/analysis/extract_fake_quantized_ops.cc @@ -55,7 +55,7 @@ class ExtractFakeQuantizedOpsWrapper : private MixedModeVisitor { if (op != dequantize_op_) { if (fake_quantized_op_freqs_.find(op->name) != fake_quantized_op_freqs_.end()) { fake_quantized_op_freqs_.Set(op->name, - int64_t(fake_quantized_op_freqs_.at(op->name)) + 1); + fake_quantized_op_freqs_.at(op->name).IntValue() + 1); } else { fake_quantized_op_freqs_.Set(op->name, 1); } diff --git a/src/relay/analysis/extract_operators.cc b/src/relay/analysis/extract_operators.cc index f150453ba0b66..051c1971f20e3 100644 --- a/src/relay/analysis/extract_operators.cc +++ b/src/relay/analysis/extract_operators.cc @@ -54,7 +54,7 @@ class OperatorExtractorWrapper : private MixedModeVisitor { auto it = operator_freqs_.find(op->name); ICHECK(it != operator_freqs_.end()) << "Call's OpNode must be visited and registered before access"; - operator_freqs_.Set(op->name, 1 + operator_freqs_.at(op->name)); + operator_freqs_.Set(op->name, 1 + operator_freqs_.at(op->name).IntValue()); } MixedModeVisitor::VisitExpr_(n); diff --git a/src/relay/backend/build_module.cc b/src/relay/backend/build_module.cc index 9a68b567305d1..39f2e7761a428 100644 --- a/src/relay/backend/build_module.cc +++ b/src/relay/backend/build_module.cc @@ -334,7 +334,9 @@ class RelayBuildModule : public runtime::ModuleNode { if (config_->optional_homogeneous_target.defined()) { // This pass currently only supports the homogeneous case. pass_seqs.push_back(transform::SplitArgs( - config_->optional_homogeneous_target->GetAttr("max_function_args", -1).value())); + config_->optional_homogeneous_target->GetAttr("max_function_args", -1) + .value() + .IntValue())); } // Always plan devices so the remaining passes don't need to distinguish homogeneous vs diff --git a/src/relay/backend/contrib/ethosu/source_module.cc b/src/relay/backend/contrib/ethosu/source_module.cc index eb4b779ecd815..f66ebd5ed2b29 100644 --- a/src/relay/backend/contrib/ethosu/source_module.cc +++ b/src/relay/backend/contrib/ethosu/source_module.cc @@ -199,7 +199,7 @@ class EthosUModuleNode : public ModuleNode { std::unordered_map param_idx_to_base_address; for (const relay::contrib::ethosu::BaseAddress& base_address : artifact->base_addresses) { if (base_address->primfunc_param_idx.defined()) { - param_idx_to_base_address[base_address->primfunc_param_idx] = base_address; + param_idx_to_base_address[base_address->primfunc_param_idx.IntValue()] = base_address; } } for (unsigned int i = 0; i < param_idx_to_base_address.size(); i++) { diff --git a/src/relay/backend/contrib/tensorrt/codegen.cc b/src/relay/backend/contrib/tensorrt/codegen.cc index 1c4a8d78062e7..f4babad50a3ec 100644 --- a/src/relay/backend/contrib/tensorrt/codegen.cc +++ b/src/relay/backend/contrib/tensorrt/codegen.cc @@ -291,8 +291,8 @@ class TensorRTJSONSerializer : public JSONSerializer { } ICHECK_EQ(target_attr.size(), 3); SetAttr(node, "tensorrt_version", - {std::to_string(target_attr[0]), std::to_string(target_attr[1]), - std::to_string(target_attr[2])}); + {std::to_string(target_attr[0]->value), std::to_string(target_attr[1]->value), + std::to_string(target_attr[2]->value)}); } { diff --git a/src/relay/backend/utils.cc b/src/relay/backend/utils.cc index bd3047e2862c1..fe8127d60dc9a 100644 --- a/src/relay/backend/utils.cc +++ b/src/relay/backend/utils.cc @@ -73,7 +73,7 @@ TVM_REGISTER_GLOBAL("relay.ir.StorageInfo") std::vector sids_v; sids_v.reserve(sids.size()); for (auto s : sids) { - sids_v.push_back(s); + sids_v.push_back(s.IntValue()); } std::vector virtual_devices_v; virtual_devices_v.reserve(device_types.size()); @@ -83,7 +83,7 @@ TVM_REGISTER_GLOBAL("relay.ir.StorageInfo") std::vector size_in_bytes_v; size_in_bytes_v.reserve(sizes_in_bytes.size()); for (auto s : sizes_in_bytes) { - size_in_bytes_v.push_back(s); + size_in_bytes_v.push_back(s.IntValue()); } return StorageInfo(std::move(sids_v), std::move(virtual_devices_v), std::move(size_in_bytes_v)); diff --git a/src/relay/ir/expr.cc b/src/relay/ir/expr.cc index 85892e8223af1..5c85b3b29df79 100644 --- a/src/relay/ir/expr.cc +++ b/src/relay/ir/expr.cc @@ -401,7 +401,7 @@ TupleGetItem WithFields(TupleGetItem tuple_get_item, Optional opt_tuple, if (!unchanged) { TupleGetItemNode* cow_tuple_get_item_node = tuple_get_item.CopyOnWrite(); cow_tuple_get_item_node->tuple = tuple; - cow_tuple_get_item_node->index = index; + cow_tuple_get_item_node->index = index.IntValue(); cow_tuple_get_item_node->span = span; cow_tuple_get_item_node->virtual_device_ = virtual_device; } diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 4d5f52e61cf0d..989ab2ad25d36 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -370,7 +370,7 @@ bool StackRel(const Array& types, int num_inputs, const Attrs& attrs, const int ndim = static_cast(first->shape.size()); // Sanity check: axis - int axis = param->axis; + int axis = param->axis.IntValue(); ICHECK(-(ndim + 1) <= axis && axis < ndim + 1) << "stack only accepts `axis` in [-(ndim+1), ndim+1)" << ", but got axis = " << axis << ", and ndim = " << ndim; @@ -414,7 +414,7 @@ Array StackCompute(const Attrs& attrs, const Array& inpu const Type& out_type) { const StackAttrs* param = attrs.as(); ICHECK(param != nullptr); - return {topi::stack(inputs, param->axis)}; + return {topi::stack(inputs, param->axis.IntValue())}; } Expr MakeStack(Expr data, int axis) { @@ -473,7 +473,7 @@ bool TransposeRel(const Array& types, int num_inputs, const Attrs& attrs, } else { std::vector axis_used(ndim, 0); for (const Integer& e : axes) { - int64_t axis = e; + int64_t axis = e.IntValue(); // sanity check for axis and ndim ICHECK(-ndim <= axis && axis < ndim) << "transpose only allows each `axis` in `axes` in range [-data.ndim, data.ndim)" @@ -1337,10 +1337,11 @@ Array TakeCompute(const Attrs& attrs, const Array& input const auto* param = attrs.as(); ICHECK(param != nullptr); if (!param->axis.defined()) { - return Array{topi::take(inputs[0], inputs[1], param->batch_dims, param->mode)}; - } else { return Array{ - topi::take(inputs[0], inputs[1], param->batch_dims, param->axis, param->mode)}; + topi::take(inputs[0], inputs[1], param->batch_dims.IntValue(), param->mode)}; + } else { + return Array{topi::take(inputs[0], inputs[1], param->batch_dims.IntValue(), + param->axis.IntValue(), param->mode)}; } } @@ -1658,8 +1659,8 @@ bool RepeatRel(const Array& types, int num_inputs, const Attrs& attrs, } const auto* param = attrs.as(); const int ndim = static_cast(data->shape.size()); - const int repeats = param->repeats; - const int axis = param->axis; + const int repeats = param->repeats.IntValue(); + const int axis = param->axis.IntValue(); ICHECK(repeats >= 1) << "repeat only accepts `repeats >= 1`" << ", but got repeats = " << repeats; ICHECK(-ndim - 1 <= axis && axis <= ndim) @@ -1687,7 +1688,7 @@ Array RepeatCompute(const Attrs& attrs, const Array& inp const Type& out_type) { const RepeatAttrs* param = attrs.as(); ICHECK(param != nullptr); - return {topi::repeat(inputs[0], param->repeats, param->axis)}; + return {topi::repeat(inputs[0], param->repeats.IntValue(), param->axis.IntValue())}; } Expr MakeRepeat(Expr data, int repeats, int axis) { @@ -2068,7 +2069,7 @@ bool ReverseRel(const Array& types, int num_inputs, const Attrs& attrs, } const auto* param = attrs.as(); const int ndim = static_cast(data->shape.size()); - const int axis = param->axis; + const int axis = param->axis.IntValue(); ICHECK(-ndim <= axis && axis < ndim) << "reverse only accepts `axis` in [-data.ndim, data.ndim - 1]" << ", but got axis = " << axis << ", and data.ndim = " << ndim; @@ -2081,7 +2082,7 @@ Array ReverseCompute(const Attrs& attrs, const Array& in const ReverseAttrs* param = attrs.as(); ICHECK(param != nullptr); // pass empty seq_length tensor to reverse_sequence - return {topi::reverse_sequence(inputs[0], te::Tensor(), param->axis)}; + return {topi::reverse_sequence(inputs[0], te::Tensor(), param->axis.IntValue())}; } Expr MakeReverse(Expr data, int axis) { @@ -2136,7 +2137,7 @@ bool ReverseSequenceRel(const Array& types, int num_inputs, const Attrs& a const auto* param = attrs.as(); const int ndim = static_cast(data->shape.size()); - int batch_axis = param->batch_axis; + int batch_axis = param->batch_axis.IntValue(); ICHECK(-ndim <= batch_axis && batch_axis < ndim) << "reverse_sequence only accepts `batch_axis` in [-data.ndim, data.ndim - 1]" << ", but got batch_axis = " << batch_axis << ", and data.ndim = " << ndim; @@ -2149,7 +2150,7 @@ bool ReverseSequenceRel(const Array& types, int num_inputs, const Attrs& a << ", but got dimension of batch_axis = " << data->shape[batch_axis] << ", and seq_length size = " << seq_lengths->shape[0]; - const int seq_axis = param->seq_axis; + const int seq_axis = param->seq_axis.IntValue(); ICHECK(-ndim <= seq_axis && seq_axis < ndim) << "reverse_sequnece only accepts `seq_axis` in [-data.ndim, data.ndim - 1]" << ", but got seq_axis = " << seq_axis << ", and data.ndim = " << ndim; @@ -2162,7 +2163,8 @@ Array ReverseSequenceCompute(const Attrs& attrs, const Array(); ICHECK(param != nullptr); - return {topi::reverse_sequence(inputs[0], inputs[1], param->seq_axis, param->batch_axis)}; + return {topi::reverse_sequence(inputs[0], inputs[1], param->seq_axis.IntValue(), + param->batch_axis.IntValue())}; } Expr MakeReverseSequence(Expr data, Expr seq_lengths, int seq_axis, int batch_axis) { @@ -2374,7 +2376,7 @@ InferCorrectLayoutOutput SqueezeInferCorrectLayout(const Attrs& attrs, if (new_in_layouts.defined() && old_in_layouts.defined()) { Array new_axis; for (const auto& e : axis) { - const auto& dim = old_in_layouts[0][e]; + const auto& dim = old_in_layouts[0][e.IntValue()]; new_axis.push_back((new_in_layouts[0]).IndexOf(dim)); } params->axis = new_axis; @@ -2714,7 +2716,7 @@ InferCorrectLayoutOutput StridedSliceInferCorrectLayout( Array new_axes; for (size_t i = 0; i < axes.size(); ++i) { - auto old_idx = axes[i]; + auto old_idx = axes[i].IntValue(); auto new_idx = new_layout.IndexOf(layout[old_idx]); new_begin.push_back(begin[i]); new_end.push_back(end[i]); @@ -2765,7 +2767,7 @@ InferCorrectLayoutOutput StridedSliceInferCorrectLayout( auto axes = params->axes.value(); Array new_axes; for (size_t i = 0; i < axes.size(); ++i) { - auto old_idx = axes[i]; + auto old_idx = axes[i].IntValue(); auto new_idx = new_layout.IndexOf(layout[old_idx]); new_axes.push_back(new_idx); @@ -2783,8 +2785,8 @@ InferCorrectLayoutOutput StridedSliceInferCorrectLayout( return out_default; } } - int64_t bg = begin[i]; - int64_t ed = end[i]; + int64_t bg = begin[i].IntValue(); + int64_t ed = end[i].IntValue(); if (bg % factor || ed % factor) { // transform to original layout return out_default; @@ -2801,8 +2803,8 @@ InferCorrectLayoutOutput StridedSliceInferCorrectLayout( ICHECK(axis.IsPrimal()); auto factor = new_layout.FactorOf(axis); if (factor == -1) { - new_begin.push_back(IntImm(begin[i]->dtype, begin[i])); - new_end.push_back(IntImm(end[i]->dtype, end[i])); + new_begin.push_back(IntImm(begin[i]->dtype, begin[i].IntValue())); + new_end.push_back(IntImm(end[i]->dtype, end[i].IntValue())); } else { if (strides.defined() && i < strides.size()) { auto stride = strides[i]; @@ -3251,17 +3253,17 @@ Array SliceLikeCompute(const Attrs& attrs, const Array& } } } else { - for (int axis : param->axes) { - if (axis < 0) { - axis = static_cast(src_shape.size()) + axis; + for (Integer axis : param->axes) { + int a = axis.IntValue(); + if (a < 0) { + a = static_cast(src_shape.size()) + a; } - ICHECK(target_shape[axis]->IsInstance()) + ICHECK(target_shape[a]->IsInstance()) << "slice_like does not support dynamic output shape"; - end_idx.Set(axis, topi::GetConstInt(target_shape[axis])); - ICHECK_LE(topi::GetConstInt(end_idx[axis]), topi::GetConstInt(src_shape[axis])) - << "End index of axis " << axis - << " exceeds input shape: " << topi::GetConstInt(end_idx[axis]) << " vs " - << topi::GetConstInt(src_shape[axis]); + end_idx.Set(a, topi::GetConstInt(target_shape[a])); + ICHECK_LE(topi::GetConstInt(end_idx[a]), topi::GetConstInt(src_shape[a])) + << "End index of axis " << a << " exceeds input shape: " << topi::GetConstInt(end_idx[a]) + << " vs " << topi::GetConstInt(src_shape[a]); } } return Array{topi::strided_slice(inputs[0], begin_idx, end_idx, strides, "end")}; @@ -3515,7 +3517,7 @@ bool GatherRel(const Array& types, int num_inputs, const Attrs& attrs, Array GatherCompute(const Attrs& attrs, const Array& inputs, const Type& out_type) { const auto* param = attrs.as(); - return {topi::gather(inputs[0], param->axis, inputs[1])}; + return {topi::gather(inputs[0], param->axis.IntValue(), inputs[1])}; } Expr MakeGather(Expr data, Integer axis, Expr indices) { @@ -3594,7 +3596,7 @@ Array GatherNDCompute(const Attrs& attrs, const Array& i const Type& out_type) { const auto* param = attrs.as(); ICHECK(param); - return {topi::gather_nd(inputs[0], inputs[1], param->batch_dims)}; + return {topi::gather_nd(inputs[0], inputs[1], param->batch_dims.IntValue())}; } Expr MakeGatherND(Expr data, Expr indices, int batch_dims = 0, diff --git a/src/relay/op/vision/yolo.cc b/src/relay/op/vision/yolo.cc index 70d8820612995..8979f939c32ed 100644 --- a/src/relay/op/vision/yolo.cc +++ b/src/relay/op/vision/yolo.cc @@ -81,7 +81,7 @@ Its function is mostly shape transform.")doc" TVM_ADD_FILELINE) const Type& out_type) { const auto* params = attrs.as(); ICHECK(params != nullptr); - return Array{topi::vision::reorg(inputs[0], params->stride)}; + return Array{topi::vision::reorg(inputs[0], params->stride.IntValue())}; }); } // namespace relay diff --git a/src/relay/qnn/op/requantize.cc b/src/relay/qnn/op/requantize.cc index 8601264f53130..2a6153e810963 100644 --- a/src/relay/qnn/op/requantize.cc +++ b/src/relay/qnn/op/requantize.cc @@ -91,7 +91,7 @@ InferCorrectLayoutOutput RequantizeInferCorrectLayout(const Attrs& attrs, Layout channel_layout = Layout("C"); input_layouts = {new_layout, channel_layout, channel_layout, channel_layout, channel_layout}; output_layouts = {new_layout}; - param->axis = new_axis; + param->axis = new_axis.IntValue(); } else if (old_in_layouts.defined()) { // If the new layout is undefined, set the old layout as the inferred layout. ICHECK_EQ(old_in_layouts.size(), 5); diff --git a/src/relay/transforms/fuse_ops.cc b/src/relay/transforms/fuse_ops.cc index e25b8db152c49..1ced0883a14cb 100644 --- a/src/relay/transforms/fuse_ops.cc +++ b/src/relay/transforms/fuse_ops.cc @@ -1057,7 +1057,8 @@ Pass FuseOps(int fuse_opt_level) { link_params = pc->GetConfig("relay.FuseOps.link_params", Bool(link_params)).value(); int opt_level = fuse_opt_level == -1 ? pc->opt_level : fuse_opt_level; auto max_fuse_depth = pc->GetConfig("relay.FuseOps.max_depth", Integer(kMaxFusedOps)); - return Downcast(FuseOps(f, opt_level, max_fuse_depth.value(), link_params, m)); + return Downcast( + FuseOps(f, opt_level, max_fuse_depth.value().IntValue(), link_params, m)); }; return CreateFunctionPass(pass_func, 0, "FuseOps", {"InferType"}); } diff --git a/src/relay/transforms/simplify_expr.cc b/src/relay/transforms/simplify_expr.cc index 209639dd8f83a..04d0edb26d753 100644 --- a/src/relay/transforms/simplify_expr.cc +++ b/src/relay/transforms/simplify_expr.cc @@ -30,6 +30,7 @@ #include #include +#include #include #include #include @@ -334,7 +335,7 @@ class SimplifyTranspose : public DFPatternRewrite { if (auto attr = call->attrs.as()) { if (attr->axes.defined()) { for (int i = 0; i < ndim; ++i) { - int64_t axis = attr->axes[i]; + int64_t axis = attr->axes[i].IntValue(); axis += (axis < 0) ? ndim : 0; attr_axes.push_back(axis); } @@ -546,8 +547,10 @@ class ConcretizeCollapseSumLikeRewrite : public ConcretizeLikeRewrite { static const Op& op = Op::Get("collapse_sum_to"); auto attrs = make_object(); attrs->shape = shape; - auto cshape = - MakeConstantTensor(DataType::Int(32), {static_cast(shape.size())}, shape); + std::vector s; + std::transform(shape.begin(), shape.end(), std::back_inserter(s), + [](Integer i) { return i.IntValue(); }); + auto cshape = MakeConstantTensor(DataType::Int(32), {static_cast(shape.size())}, s); return Call(op, {node_map[data_pat_][0], cshape}, Attrs(attrs)); } }; diff --git a/src/target/build_common.h b/src/target/build_common.h index 6c94ec8703b73..35b3d92eb8149 100644 --- a/src/target/build_common.h +++ b/src/target/build_common.h @@ -57,7 +57,7 @@ inline std::unordered_map ExtractFuncInfo(co } } if (auto opt = f->GetAttr(tir::attr::kDeviceUseDynSharedMemory)) { - if (opt.value()) { + if (opt.value().IntValue() != 0) { info.launch_param_tags.push_back(runtime::launch_param::kUseDynamicSharedMemoryTag); } } diff --git a/src/target/llvm/llvm_common.cc b/src/target/llvm/llvm_common.cc index 3d9ac835dc50f..83de839a926e1 100644 --- a/src/target/llvm/llvm_common.cc +++ b/src/target/llvm/llvm_common.cc @@ -159,7 +159,7 @@ std::unique_ptr GetLLVMTargetMachine(const Target& target, return nullptr; } - Integer llvm_opt_level = target->GetAttr("opt-level").value_or(Integer(3)); + int llvm_opt_level = target->GetAttr("opt-level").value_or(Integer(3)).IntValue(); llvm::CodeGenOpt::Level llvm_opt; if (llvm_opt_level <= 0) { llvm_opt = llvm::CodeGenOpt::None; diff --git a/src/target/metadata.h b/src/target/metadata.h index 7551592ac5ab4..b761f7ff2bbb8 100644 --- a/src/target/metadata.h +++ b/src/target/metadata.h @@ -154,7 +154,7 @@ class InMemoryMetadataNode : public ::tvm::target::metadata::VisitableMetadataNo storage_.num_constant_pools = constant_pools.size(); for (size_t i = 0; i < constant_pools.size(); ++i) { constant_pools_.get()[i].name_hint = constant_pools[i]->name_hint.c_str(); - constant_pools_.get()[i].byte_offset = constant_pools[i]->byte_offset; + constant_pools_.get()[i].byte_offset = constant_pools[i]->byte_offset.IntValue(); std::string bytes; dmlc::MemoryStringStream stream(&bytes); diff --git a/src/target/metadata_module.cc b/src/target/metadata_module.cc index e5ca82d5c0996..c8c099171c967 100644 --- a/src/target/metadata_module.cc +++ b/src/target/metadata_module.cc @@ -118,7 +118,7 @@ static runtime::metadata::Metadata ConvertMetaData( if (api->pool_info.as()) { pools.push_back( runtime::metadata::TensorInfo(make_object( - var->name_hint, std::vector{api->allocated_size}, + var->name_hint, std::vector{api->allocated_size.IntValue()}, tvm::runtime::DataType{kDLUInt, 8, 1}))); } } diff --git a/src/target/source/codegen_metal.cc b/src/target/source/codegen_metal.cc index a76da36ea7250..0ec6179115195 100644 --- a/src/target/source/codegen_metal.cc +++ b/src/target/source/codegen_metal.cc @@ -67,7 +67,7 @@ void CodeGenMetal::AddFunction(const PrimFunc& f) { // Buffer arguments size_t num_buffer = 0; - int limit = target_->GetAttr("max_function_args").value(); + int limit = target_->GetAttr("max_function_args").value().IntValue(); if (static_cast(f->params.size()) > limit) { LOG(WARNING) << "Probably you won't be able to execute your kernel due to high number of " "buffers in the kernel"; diff --git a/src/target/source/interface_c.cc b/src/target/source/interface_c.cc index fef81c9bd69fe..fa38d9b9f4d15 100644 --- a/src/target/source/interface_c.cc +++ b/src/target/source/interface_c.cc @@ -177,14 +177,14 @@ class InterfaceCNode : public runtime::ModuleNode { return a->byte_offset->value < b->byte_offset->value; }); int64_t accumulated_pool_len = - const_info_vec.back()->byte_offset + + const_info_vec.back()->byte_offset.IntValue() + runtime::GetDataSize(*const_info_vec.back()->data.operator->()); const auto& accumulated_pool = runtime::NDArray::Empty( {accumulated_pool_len}, DataType::UInt(8), const_info_vec.back()->data->device); for (const auto& const_info : const_info_vec) { const auto& data = const_info->data; const auto& offs = const_info->byte_offset; - data.CopyToBytes(static_cast(accumulated_pool->data) + offs, + data.CopyToBytes(static_cast(accumulated_pool->data) + offs.IntValue(), runtime::GetDataSize(*data.operator->())); } diff --git a/src/target/source/source_module.cc b/src/target/source/source_module.cc index 6495c39ef1400..88a7a99b4c255 100644 --- a/src/target/source/source_module.cc +++ b/src/target/source/source_module.cc @@ -469,8 +469,8 @@ class CSourceCrtMetadataModuleNode : public runtime::ModuleNode { String pool_name_tvmv = GenerateDLTensorStructWrapper(pool_name); code_ << "tensors[" << i << "] = " << pool_name_tvmv << ";\n"; } else { - code_ << "tensors[" << i << "] = ((TVMValue*)args)[" - << run_func_to_entry_point_args[Integer(i)] << "];\n"; + code_ << "tensors[" << i << "] = ((TVMValue*)args)[" << run_func_to_entry_point_args[i] + << "];\n"; } } } @@ -733,7 +733,7 @@ class MetadataSerializer : public AttrVisitor { switch (array->kind) { case MetadataKind::kUint64: { - int64_t i = Downcast(o); + int64_t i = Downcast(o).IntValue(); CHECK_GT(i, 0) << "Metadata is of type uint64_t, but array type contains a negative number"; uint64_t ui = static_cast(i); @@ -741,7 +741,7 @@ class MetadataSerializer : public AttrVisitor { continue; } case MetadataKind::kInt64: { - int64_t i = Downcast(o); + int64_t i = Downcast(o).IntValue(); Visit(nullptr, &i); continue; } diff --git a/src/target/spirv/build_vulkan.cc b/src/target/spirv/build_vulkan.cc index e922942e8acff..94f1bf16a25e7 100644 --- a/src/target/spirv/build_vulkan.cc +++ b/src/target/spirv/build_vulkan.cc @@ -42,8 +42,9 @@ class SPIRVTools { public: explicit SPIRVTools(Target target) { uint32_t vulkan_version = - target->GetAttr("vulkan_api_version").value_or(VK_API_VERSION_1_0); - uint32_t spirv_version = target->GetAttr("max_spirv_version").value_or(0x10000); + target->GetAttr("vulkan_api_version").value_or(VK_API_VERSION_1_0).IntValue(); + uint32_t spirv_version = + target->GetAttr("max_spirv_version").value_or(0x10000).IntValue(); spv_target_env validation_version; if (vulkan_version >= VK_API_VERSION_1_2) { diff --git a/src/target/spirv/spirv_support.cc b/src/target/spirv/spirv_support.cc index 33055e7399d56..a91a2a3384e0e 100644 --- a/src/target/spirv/spirv_support.cc +++ b/src/target/spirv/spirv_support.cc @@ -36,28 +36,32 @@ SPIRVSupport::SPIRVSupport(tvm::Target target) { << "SPIRVSupport can only be checked for vulkan device type"; if (target->GetAttr("vulkan_api_version")) { - vulkan_api_version = target->GetAttr("vulkan_api_version").value(); + vulkan_api_version = target->GetAttr("vulkan_api_version").value().IntValue(); } if (target->GetAttr("supported_subgroup_operations")) { supported_subgroup_operations = - target->GetAttr("supported_subgroup_operations").value(); + target->GetAttr("supported_subgroup_operations").value().IntValue(); } if (target->GetAttr("max_push_constants_size")) { - max_push_constants_size = target->GetAttr("max_push_constants_size").value(); + max_push_constants_size = + target->GetAttr("max_push_constants_size").value().IntValue(); } if (target->GetAttr("max_uniform_buffer_range")) { - max_uniform_buffer_range = target->GetAttr("max_uniform_buffer_range").value(); + max_uniform_buffer_range = + target->GetAttr("max_uniform_buffer_range").value().IntValue(); } if (target->GetAttr("max_storage_buffer_range")) { - max_storage_buffer_range = target->GetAttr("max_storage_buffer_range").value(); + max_storage_buffer_range = + target->GetAttr("max_storage_buffer_range").value().IntValue(); } if (target->GetAttr("max_shared_memory_per_block")) { - max_shared_memory_per_block = target->GetAttr("max_shared_memory_per_block").value(); + max_shared_memory_per_block = + target->GetAttr("max_shared_memory_per_block").value().IntValue(); } if (target->GetAttr("max_per_stage_descriptor_storage_buffer")) { max_per_stage_descriptor_storage_buffers = - target->GetAttr("max_per_stage_descriptor_storage_buffer").value(); + target->GetAttr("max_per_stage_descriptor_storage_buffer").value().IntValue(); } if (target->GetAttr("supports_storage_buffer_storage_class")) { supports_storage_buffer_storage_class = diff --git a/src/target/target.cc b/src/target/target.cc index 3cdfa0cc0d5e8..afdfad9b76b91 100644 --- a/src/target/target.cc +++ b/src/target/target.cc @@ -804,7 +804,7 @@ ObjectPtr TargetInternal::FromConfig(std::unordered_map(attrs.at("from_device")); + int device_id = Downcast(attrs.at("from_device")).IntValue(); attrs.erase("from_device"); auto device_params = QueryDevice(device_id, target.get()); diff --git a/src/tir/analysis/calculate_workspace.cc b/src/tir/analysis/calculate_workspace.cc index 11593bb443a75..a667e2354b9b6 100644 --- a/src/tir/analysis/calculate_workspace.cc +++ b/src/tir/analysis/calculate_workspace.cc @@ -55,7 +55,8 @@ size_t WorkspaceCalculator::operator()(const PrimFunc& func) { template size_t WorkspaceCalculator::GetByteAlignedSize(Integer non_aligned_size) { return non_aligned_size.defined() - ? ((non_aligned_size + byte_alignment - 1) / byte_alignment) * byte_alignment + ? ((non_aligned_size.IntValue() + byte_alignment - 1) / byte_alignment) * + byte_alignment : 0; } diff --git a/src/tir/contrib/ethosu/passes.cc b/src/tir/contrib/ethosu/passes.cc index 09c359c55abba..609d986dbb84f 100644 --- a/src/tir/contrib/ethosu/passes.cc +++ b/src/tir/contrib/ethosu/passes.cc @@ -214,7 +214,7 @@ tvm::transform::Pass CopyComputeReordering(Optional max_copy_movements) "pass in conjunction with the LowerToTIR() pass."; auto value = max_copy_movements.value_or( ctx->GetConfig(kCopyComputeReorderingMaxCopyMovements, Integer(1)).value()); - return CopyComputeReorderingMutator(value)(f); + return CopyComputeReorderingMutator(value.IntValue())(f); }; return tvm::tir::transform::CreatePrimFuncPass(pass_func, 0, "tir.contrib.ethos-u.CopyComputeReordering", {}); diff --git a/src/tir/schedule/primitive/cache_read_write.cc b/src/tir/schedule/primitive/cache_read_write.cc index 5a8d452f14b85..6a7b59cfec96b 100644 --- a/src/tir/schedule/primitive/cache_read_write.cc +++ b/src/tir/schedule/primitive/cache_read_write.cc @@ -1233,7 +1233,7 @@ struct ReIndexTraits : public UnpackedInstTraits { static BlockRV UnpackedApplyToSchedule(Schedule sch, BlockRV block, Integer buffer_index, Integer buffer_index_type) { - return sch->ReIndex(block, buffer_index, + return sch->ReIndex(block, buffer_index.IntValue(), static_cast(buffer_index_type->value)); } diff --git a/src/tir/schedule/primitive/layout_transformation.cc b/src/tir/schedule/primitive/layout_transformation.cc index 692f68a600ae9..639593ab3e74c 100644 --- a/src/tir/schedule/primitive/layout_transformation.cc +++ b/src/tir/schedule/primitive/layout_transformation.cc @@ -548,7 +548,7 @@ struct TransformLayoutTraits : public UnpackedInstTraits static void UnpackedApplyToSchedule(Schedule sch, BlockRV block_rv, Integer buffer_index, Integer buffer_index_type, IndexMap index_map) { - return sch->TransformLayout(block_rv, buffer_index, + return sch->TransformLayout(block_rv, buffer_index.IntValue(), static_cast(buffer_index_type->value), index_map); } @@ -639,7 +639,7 @@ struct SetAxisSeparatorTraits : public UnpackedInstTraits axis_separators) { - return sch->SetAxisSeparator(block_rv, buffer_index, + return sch->SetAxisSeparator(block_rv, buffer_index.IntValue(), static_cast(buffer_index_type->value), axis_separators); } diff --git a/src/tir/schedule/primitive/sampling.cc b/src/tir/schedule/primitive/sampling.cc index b7ea3f539bce9..1961565aac75e 100644 --- a/src/tir/schedule/primitive/sampling.cc +++ b/src/tir/schedule/primitive/sampling.cc @@ -184,7 +184,7 @@ int64_t SampleCategorical(support::LinearCongruentialEngine::TRandState* rand_st } *decision = Integer(i); // decision is guaranteed not to be nullptr. - return candidates[i]; + return candidates[i].IntValue(); } std::function MakeMultinomialSampler( diff --git a/src/tir/schedule/transform.cc b/src/tir/schedule/transform.cc index 67d0f55f20b9f..436d529abdc55 100644 --- a/src/tir/schedule/transform.cc +++ b/src/tir/schedule/transform.cc @@ -284,7 +284,7 @@ Optional TileWithTensorIntrin(const tir::Schedule& sch, const tir::Block ICHECK_EQ(split.size(), 2); inner_loops.insert(sch->GetSRef(split[1]).operator->()); // The inner split will be reordered to the loop domain that is tensorized - int desc_loop_index = info->desc_loop_indexer.at(GetRef(desc_loop)); + int desc_loop_index = info->desc_loop_indexer.at(GetRef(desc_loop)).IntValue(); reorder_suffix[desc_loop_index] = split[1]; } // Reorder the loops diff --git a/src/tir/transforms/inject_software_pipeline.cc b/src/tir/transforms/inject_software_pipeline.cc index de9aa79583b46..b4a597fe97d86 100644 --- a/src/tir/transforms/inject_software_pipeline.cc +++ b/src/tir/transforms/inject_software_pipeline.cc @@ -772,7 +772,7 @@ class PipelineInjector : private StmtExprMutator { auto it = op->annotations.find(attr::double_buffer_scope); if (it != op->annotations.end()) { - int buffer_index = Downcast((*it).second); + int buffer_index = Downcast((*it).second).IntValue(); CHECK(buffer_index >= 0 && static_cast(buffer_index) < op->writes.size()) << "ValueError: Index of the buffer exceeds the size of the write regions of the block. (" << buffer_index << " vs. " << op->writes.size() << ")"; diff --git a/src/tir/transforms/lower_thread_allreduce.cc b/src/tir/transforms/lower_thread_allreduce.cc index 7e09943d01858..aeb819c5168d3 100644 --- a/src/tir/transforms/lower_thread_allreduce.cc +++ b/src/tir/transforms/lower_thread_allreduce.cc @@ -62,7 +62,8 @@ class UpdatePointerStorageScopeAllReduce final : public UpdatePointerStorageScop class ThreadAllreduceBuilder final : public StmtExprMutator { public: explicit ThreadAllreduceBuilder(const TargetNode* target) - : target_(target), warp_size_(target->GetAttr("thread_warp_size", 1).value()) {} + : target_(target), + warp_size_(target->GetAttr("thread_warp_size", 1).value().IntValue()) {} Stmt VisitStmt_(const AttrStmtNode* op) final { if (op->attr_key == attr::thread_extent) { diff --git a/src/tir/transforms/lower_warp_memory.cc b/src/tir/transforms/lower_warp_memory.cc index d8250cd09888e..408cdbd04ec77 100644 --- a/src/tir/transforms/lower_warp_memory.cc +++ b/src/tir/transforms/lower_warp_memory.cc @@ -472,7 +472,7 @@ Pass LowerWarpMemory() { auto* n = f.CopyOnWrite(); auto target = f->GetAttr(tvm::attr::kTarget); ICHECK(target.defined()) << "LowerWarpMemory: Require the target attribute"; - int warp_size = target.value()->GetAttr("thread_warp_size", 1).value(); + int warp_size = target.value()->GetAttr("thread_warp_size", 1).value().IntValue(); WarpMemoryRewriter warp_memory_rewriter(warp_size); auto stmt = warp_memory_rewriter.Rewrite(std::move(n->body)); n->body = UpdatePointerStorageScope(warp_memory_rewriter.new_storage_scopes_)(stmt); diff --git a/src/tir/usmp/algo/greedy.cc b/src/tir/usmp/algo/greedy.cc index cae01ee859696..ec4f5a5d7215a 100644 --- a/src/tir/usmp/algo/greedy.cc +++ b/src/tir/usmp/algo/greedy.cc @@ -74,7 +74,7 @@ bool GreedyBase::IsValidPlacement(const PoolInfo& candidate_pool, const size_t& // this means pool is not bounded return true; } - auto pool_size = static_cast(size_hint_bytes); + auto pool_size = static_cast(size_hint_bytes.IntValue()); auto max_address = next_offset + size_bytes; if (max_address <= pool_size) { return true; @@ -124,7 +124,8 @@ Map GreedyBase::PostSortAllocation( // We only look at already allocated BufferInfo in-terms of conflicts. if (pool_allocations.count(conflict_buf_info)) { auto pool_allocation = pool_allocations[conflict_buf_info]; - next_offset = pool_allocation->byte_offset + conflict_buf_info->size_bytes; + next_offset = + pool_allocation->byte_offset.IntValue() + conflict_buf_info->size_bytes.IntValue(); next_offset = round_up_to_byte_alignment(next_offset, conflict_buf_info->alignment->value); // Checks whether the next offset in the same pool as the conflicting BufferInfo is valid. if (IsValidPlacement(pool_allocation->pool_info, next_offset, @@ -169,7 +170,7 @@ class GreedySize : public GreedyBase { return a->conflicts.size() > b->conflicts.size(); } } - return a->size_bytes > b->size_bytes; + return a->size_bytes.IntValue() > b->size_bytes.IntValue(); }); return PostSortAllocation(buffer_info_vec); } diff --git a/src/tir/usmp/algo/hill_climb.cc b/src/tir/usmp/algo/hill_climb.cc index c4ed73eb2feb2..8234074f9c892 100644 --- a/src/tir/usmp/algo/hill_climb.cc +++ b/src/tir/usmp/algo/hill_climb.cc @@ -105,7 +105,8 @@ class HillClimbAllocator : public GreedyBase { for (const auto* conflict_buf_info : buf_conf) { size_t next_offset = 0; auto pool_allocation = pool_allocations[conflict_buf_info]; - next_offset = pool_allocation->byte_offset + conflict_buf_info->size_bytes; + next_offset = + pool_allocation->byte_offset.IntValue() + conflict_buf_info->size_bytes.IntValue(); next_offset = round_up_to_byte_alignment(next_offset, conflict_buf_info->alignment->value); if (!pool_offset_candidates.count(pool_allocation->pool_info)) { continue; @@ -114,8 +115,8 @@ class HillClimbAllocator : public GreedyBase { buf_info->size_bytes->value)) { if (next_offset > pool_offset_candidates[pool_allocation->pool_info] && pool_offset_candidates[pool_allocation->pool_info] + - static_cast(buf_info->size_bytes) > - static_cast(pool_allocation->byte_offset)) { + static_cast(buf_info->size_bytes.IntValue()) > + static_cast(pool_allocation->byte_offset.IntValue())) { pool_offset_candidates[pool_allocation->pool_info] = next_offset; } } else { @@ -138,7 +139,7 @@ class HillClimbAllocator : public GreedyBase { for (const auto& it : *pool_allocations) { const BufferInfoNode* buf = it.first; const PoolAllocation& pa = it.second; - size_t high_sz = pa->byte_offset + buf->size_bytes; + size_t high_sz = pa->byte_offset.IntValue() + buf->size_bytes.IntValue(); if (pool_sizes[pa->pool_info] <= high_sz) { pool_sizes[pa->pool_info] = high_sz; } @@ -277,7 +278,7 @@ class HillClimbAllocator : public GreedyBase { for (const auto& it : pool_allocations) { const auto* buf = it.first; const auto pa = it.second; - size_t high_sz = pa->byte_offset + buf->size_bytes; + size_t high_sz = pa->byte_offset.IntValue() + buf->size_bytes.IntValue(); if (pool_sizes[pa->pool_info] == high_sz) { max_pool_buf.push_back(buf); } @@ -325,7 +326,7 @@ class HillClimbAllocator : public GreedyBase { Map HillClimb(const Array& buffer_info_arr, const Integer& memory_pressure) { - return HillClimbAllocator(memory_pressure).PlanMemory(buffer_info_arr); + return HillClimbAllocator(memory_pressure.IntValue()).PlanMemory(buffer_info_arr); } TVM_REGISTER_GLOBAL("tir.usmp.algo.hill_climb") diff --git a/src/tir/usmp/analysis/extract_buffer_info.cc b/src/tir/usmp/analysis/extract_buffer_info.cc index 4e98116f8a17a..ba8f6aa911f14 100644 --- a/src/tir/usmp/analysis/extract_buffer_info.cc +++ b/src/tir/usmp/analysis/extract_buffer_info.cc @@ -369,11 +369,11 @@ void BufferInfoExtractor::VisitStmt_(const ForNode* op) { update_call = ai.call; } if (scope_stack_.top().initial_stmt_of_the_nested_loops->value < - buffer_info_start_stmt_idx_[update_call][allocate]) { + buffer_info_start_stmt_idx_[update_call][allocate].IntValue()) { buffer_info_start_stmt_idx_[update_call].Set( allocate, scope_stack_.top().initial_stmt_of_the_nested_loops->value); } - if (current_stmt_idx_ > buffer_info_end_stmt_idx_[update_call][allocate]) { + if (current_stmt_idx_ > buffer_info_end_stmt_idx_[update_call][allocate].IntValue()) { buffer_info_end_stmt_idx_[update_call].Set(allocate, current_stmt_idx_); } } @@ -518,7 +518,7 @@ BufferInfoAnalysis BufferInfoExtractor::operator()(const PrimFunc& main_func) { LivenessEvent le_event_start; le_event_start.buffer_info = buffer_info; le_event_start.le_type = START; - le_event_start.tick = buffer_info_starts[allocate]; + le_event_start.tick = buffer_info_starts[allocate].IntValue(); le_events_timeline.push_back(le_event_start); } } @@ -529,7 +529,7 @@ BufferInfoAnalysis BufferInfoExtractor::operator()(const PrimFunc& main_func) { LivenessEvent le_event_end; le_event_end.buffer_info = buffer_info; le_event_end.le_type = END; - le_event_end.tick = buffer_info_ends[allocate]; + le_event_end.tick = buffer_info_ends[allocate].IntValue(); le_events_timeline.push_back(le_event_end); } } @@ -562,13 +562,13 @@ BufferInfoAnalysis BufferInfoExtractor::operator()(const PrimFunc& main_func) { le_event.buffer_info->conflicts.push_back(open_buffer_info); } } - open_set_size += le_event.buffer_info->size_bytes; + open_set_size += le_event.buffer_info->size_bytes.IntValue(); if (open_set_size > max_open_set_size) { max_open_set_size = open_set_size; } open_set.insert(le_event.buffer_info); } else { - open_set_size -= le_event.buffer_info->size_bytes; + open_set_size -= le_event.buffer_info->size_bytes.IntValue(); open_set.erase(le_event.buffer_info); } } diff --git a/src/tir/usmp/transform/convert_pool_allocations_to_offsets.cc b/src/tir/usmp/transform/convert_pool_allocations_to_offsets.cc index 24a55190d326e..601e347196327 100644 --- a/src/tir/usmp/transform/convert_pool_allocations_to_offsets.cc +++ b/src/tir/usmp/transform/convert_pool_allocations_to_offsets.cc @@ -57,10 +57,10 @@ class PoolAllocationToOffsetConverter : public StmtExprMutator { size_t extent_size = -1; if (kv.first->IsInstance()) { Allocate allocate_node = Downcast(kv.first); - extent_size = CalculateExtentsSize(allocate_node.operator->()); + extent_size = CalculateExtentsSize(allocate_node.operator->()).IntValue(); } else if (kv.first->IsInstance()) { AllocateConst allocate_const_node = Downcast(kv.first); - extent_size = CalculateExtentsSize(allocate_const_node.operator->()); + extent_size = CalculateExtentsSize(allocate_const_node.operator->()).IntValue(); } else { ICHECK(false) << "Not supported node type " << kv.first->GetTypeKey(); } diff --git a/src/tir/usmp/utils.cc b/src/tir/usmp/utils.cc index 6f95c7cbaf66c..3350ecc5d47f1 100644 --- a/src/tir/usmp/utils.cc +++ b/src/tir/usmp/utils.cc @@ -228,14 +228,14 @@ class ModuleWorkspaceSizeCalculator : public StmtExprVisitor { Integer workspace_byte_alignment = tgt->GetAttr("workspace-byte-alignment").value_or(16); Integer workspace_req = CalculateWorkspaceBytes(func, workspace_byte_alignment); - if (workspace_req) { + if (workspace_req.IntValue() != 0) { current_workspace_size_ += workspace_req->value; } if (max_workspace_size < current_workspace_size_) { max_workspace_size = current_workspace_size_; } this->VisitStmt(func->body); - if (workspace_req) { + if (workspace_req.IntValue() != 0) { current_workspace_size_ -= workspace_req->value; } } diff --git a/tests/cpp/container_test.cc b/tests/cpp/container_test.cc index 32ec346c8796a..f6c4fb4b67d64 100644 --- a/tests/cpp/container_test.cc +++ b/tests/cpp/container_test.cc @@ -26,8 +26,14 @@ #include #include +#include +#include +#include +#include #include +#include #include +#include #include using namespace tvm; @@ -342,7 +348,7 @@ TEST(Map, Insert) { ICHECK_EQ(result.size(), expected.size()); for (const auto& kv : result) { ICHECK(expected.count(kv.first)); - ICHECK_EQ(expected[kv.first], kv.second.operator int64_t()); + ICHECK_EQ(expected[kv.first], kv.second.IntValue()); expected.erase(kv.first); } }; @@ -364,12 +370,14 @@ TEST(Map, Erase) { ICHECK_EQ(result.size(), expected.size()); for (const auto& kv : result) { ICHECK(expected.count(kv.first)); - ICHECK_EQ(expected[kv.first], kv.second.operator int64_t()); + ICHECK_EQ(expected[kv.first], kv.second.IntValue()); expected.erase(kv.first); } }; Map map{{"a", 1}, {"b", 2}, {"c", 3}, {"d", 4}, {"e", 5}}; - std::unordered_map stl(map.begin(), map.end()); + std::unordered_map stl; + std::transform(map.begin(), map.end(), std::inserter(stl, stl.begin()), + [](auto&& p) { return std::make_pair(p.first, p.second.IntValue()); }); for (char c = 'a'; c <= 'e'; ++c) { Map result = map; std::unordered_map expected(stl);