diff --git a/python/tvm/relay/op/annotation/annotation.py b/python/tvm/relay/op/annotation/annotation.py index 685a8807f744..71a434917e72 100644 --- a/python/tvm/relay/op/annotation/annotation.py +++ b/python/tvm/relay/op/annotation/annotation.py @@ -30,7 +30,7 @@ def _make_virtual_device(device): return target.VirtualDevice(_nd.device(device)) if isinstance(device, target.VirtualDevice): return device - raise ValueError("expecting a Device or device name, but received a %s" % (type(device))) + raise ValueError(f"expecting a Device or device name, but received a {type(device)}") def on_device(body, device, constrain_result=False, constrain_body=True): diff --git a/python/tvm/relay/op/contrib/cutlass.py b/python/tvm/relay/op/contrib/cutlass.py index 6fce020a6694..40fc22e9e82f 100644 --- a/python/tvm/relay/op/contrib/cutlass.py +++ b/python/tvm/relay/op/contrib/cutlass.py @@ -88,7 +88,7 @@ def make_conv2d_pattern(with_bias=False, with_act=None): ) return is_op("multiply")(conv2d_out, rhs) - raise ValueError("Unknown activation %s." % with_act) + raise ValueError(f"Unknown activation {with_act}.") return conv2d_out diff --git a/python/tvm/relay/op/contrib/dnnl.py b/python/tvm/relay/op/contrib/dnnl.py index cc8848b23637..71a126ae8f26 100644 --- a/python/tvm/relay/op/contrib/dnnl.py +++ b/python/tvm/relay/op/contrib/dnnl.py @@ -45,14 +45,7 @@ from tvm.relay.expr_functor import ExprMutator, ExprVisitor from ... import _ffi_api -from ...dataflow_pattern import ( - DFPatternCallback, - is_constant, - is_expr, - is_op, - rewrite, - wildcard, -) +from ...dataflow_pattern import DFPatternCallback, is_constant, is_expr, is_op, rewrite, wildcard from .register import register_pattern_table logger = logging.getLogger("DNNL") @@ -172,7 +165,7 @@ def make_conv_pattern(conv_name, with_bias=True, with_eltwise=None): Call node sequence. """ if with_eltwise not in supported_post_elts: - raise ValueError("Unsupported eltwise post-op: %s" % with_eltwise) + raise ValueError(f"Unsupported eltwise post-op: {with_eltwise}") data = wildcard() weight = wildcard() bias = wildcard() @@ -335,7 +328,7 @@ def make_dense_pattern(with_bias=True, with_eltwise=None): Call node sequence. """ if with_eltwise not in supported_post_elts: - raise ValueError("Unsupported eltwise post-op: %s" % with_eltwise) + raise ValueError(f"Unsupported eltwise post-op: {with_eltwise}") data = wildcard() weight = wildcard() bias = wildcard() @@ -579,7 +572,7 @@ def get_shape(tensor): if tensor.op.name == "multiply": return tensor.type_args[0].shape return tensor.checked_type.shape - raise TypeError("Unsupport data type: %s" % type(tensor)) + raise TypeError(f"Unsupport data type: {type(tensor)}") def get_dtype(tensor): @@ -596,7 +589,7 @@ def get_dtype(tensor): if tensor.op.name == "multiply": return tensor.type_args[0].dtype return tensor.checked_type.dtype - raise TypeError("Unsupport data type: %s" % type(tensor)) + raise TypeError(f"Unsupport data type: {type(tensor)}") def tag2layout(input_data, is_weight=False, conv_type="Conv1D"): @@ -627,7 +620,7 @@ def tag2layout(input_data, is_weight=False, conv_type="Conv1D"): elif i.isdigit(): res += i else: - raise ValueError("Unsupport layout format: %s" % input_data) + raise ValueError(f"Unsupport layout format: {input_data}") return res diff --git a/python/tvm/relay/op/nn/_nn.py b/python/tvm/relay/op/nn/_nn.py index b93285aed86e..c68685f0ae09 100644 --- a/python/tvm/relay/op/nn/_nn.py +++ b/python/tvm/relay/op/nn/_nn.py @@ -307,7 +307,7 @@ def convert_conv2d(attrs, inputs, tinfos, desired_layouts): new_attrs["kernel_layout"] = desired_kernel_layout return relay.nn.contrib_conv2d_nchwc(data, weight, **new_attrs) - raise ValueError("Layout %s is not yet supported." % desired_data_layout) + raise ValueError(f"Layout {desired_data_layout} is not yet supported.") # conv2d_transpose @@ -375,7 +375,7 @@ def convert_conv2d_transpose(attrs, inputs, tinfos, desired_layouts): new_attrs["kernel_layout"] = "HWIO" return relay.nn.conv2d_transpose(data, weight, **new_attrs) - raise ValueError("Layout %s is not yet supported." % desired_data_layout) + raise ValueError(f"Layout {desired_data_layout} is not yet supported.") # conv3d_transpose @@ -424,7 +424,7 @@ def convert_conv3d_transpose(attrs, inputs, tinfos, desired_layouts): new_attrs["kernel_layout"] = "DHWOI" return relay.nn.conv3d_transpose(data, weight, **new_attrs) - raise ValueError("Layout %s is not yet supported" % desired_data_layout) + raise ValueError(f"Layout {desired_data_layout} is not yet supported") @reg.register_legalize("nn.conv3d_transpose") @@ -498,7 +498,7 @@ def convert_conv3d(attrs, inputs, tinfos, desired_layouts): new_attrs["kernel_layout"] = "DHWIO" return relay.nn.conv3d(data, weight, **new_attrs) - raise ValueError("Layout %s is not yet supported" % desired_data_layout) + raise ValueError(f"Layout {desired_data_layout} is not yet supported") # conv3d_winograd related operators @@ -917,7 +917,7 @@ def convert_deformable_conv2d(attrs, inputs, tinfos, desired_layouts): elif desired_data_layout == "NHWC": new_attrs["kernel_layout"] = "HWIO" else: - raise ValueError("Layout %s is not yet supported." % desired_data_layout) + raise ValueError(f"Layout {desired_data_layout} is not yet supported.") return relay.nn.deformable_conv2d(data, offset, weight, **new_attrs) @@ -1457,10 +1457,7 @@ def dense_shape_func(attrs, inputs, _): """ ret = [ _matmul_shape_func( - inputs[0], - inputs[1], - expr.IntImm("bool", False), - expr.IntImm("bool", True), + inputs[0], inputs[1], expr.IntImm("bool", False), expr.IntImm("bool", True) ) ] return ret diff --git a/python/tvm/relay/op/nn/utils.py b/python/tvm/relay/op/nn/utils.py index fc687cfe070e..0286f0a8f4fb 100644 --- a/python/tvm/relay/op/nn/utils.py +++ b/python/tvm/relay/op/nn/utils.py @@ -45,7 +45,7 @@ def get_pad_tuple1d(padding): elif isinstance(padding, int): pad_w = padding * 2 else: - raise ValueError("Unknown padding option %s" % padding) + raise ValueError(f"Unknown padding option {padding}") pad_left = (pad_w + 1) // 2 return pad_left, pad_w - pad_left @@ -81,7 +81,7 @@ def get_pad_tuple2d(padding): elif isinstance(padding, int): pad_h = pad_w = padding * 2 else: - raise ValueError("Unknown padding option %s" % padding) + raise ValueError(f"Unknown padding option {padding}") pad_top = (pad_h + 1) // 2 pad_left = (pad_w + 1) // 2 return pad_top, pad_left, pad_h - pad_top, pad_w - pad_left @@ -123,7 +123,7 @@ def get_pad_tuple3d(padding): elif isinstance(padding, int): pad_d = pad_h = pad_w = padding * 2 else: - raise ValueError("Unknown padding option %s" % padding) + raise ValueError(f"Unknown padding option {padding}") pad_front = (pad_d + 1) // 2 pad_top = (pad_h + 1) // 2 pad_left = (pad_w + 1) // 2 diff --git a/python/tvm/relay/op/op.py b/python/tvm/relay/op/op.py index ec48ea175f30..5f37845cebf0 100644 --- a/python/tvm/relay/op/op.py +++ b/python/tvm/relay/op/op.py @@ -192,15 +192,15 @@ def _fstrategy(attrs, inputs, out_type, target): def _create_fstrategy_from_schedule(op_name, schedule): assert hasattr(schedule, "dispatch_dict") compute = get(op_name).get_attr("FTVMCompute") - assert compute is not None, "FTVMCompute is not registered for op %s" % op_name - fstrategy = get_native_generic_func("{}_strategy".format(op_name)) + assert compute is not None, f"FTVMCompute is not registered for op {op_name}" + fstrategy = get_native_generic_func(f"{op_name}_strategy") name_pfx = schedule.__name__ name_pfx = name_pfx[name_pfx.index("_") + 1 :] fstrategy.set_default( - _wrap_default_fstrategy(compute, schedule.fdefault, "%s.generic" % name_pfx) + _wrap_default_fstrategy(compute, schedule.fdefault, f"{name_pfx}.generic") ) for key, sch in schedule.dispatch_dict.items(): - fstrategy.register(_wrap_default_fstrategy(compute, sch, "%s.%s" % (name_pfx, key)), [key]) + fstrategy.register(_wrap_default_fstrategy(compute, sch, f"{name_pfx}.{key}"), [key]) return fstrategy @@ -522,7 +522,7 @@ def debug(expr, debug_func=None): global __DEBUG_COUNTER__ if debug_func: - name = "debugger_func{}".format(__DEBUG_COUNTER__) + name = f"debugger_func{__DEBUG_COUNTER__}" tvm._ffi.register_func(name, debug_func) __DEBUG_COUNTER__ += 1 else: diff --git a/python/tvm/relay/op/strategy/adreno.py b/python/tvm/relay/op/strategy/adreno.py index b606ab05d701..712b66e2469d 100644 --- a/python/tvm/relay/op/strategy/adreno.py +++ b/python/tvm/relay/op/strategy/adreno.py @@ -109,7 +109,7 @@ def conv2d_strategy_adreno(attrs, inputs, out_type, target): elif data_layout == "NHWC4c": ic = data.shape[3] * data.shape[4] else: - raise RuntimeError("Unsupported depthwise_conv2d data layout {}".format(data_layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d data layout {data_layout}") if kernel_layout == "OIHW": oc = kernel.shape[0] elif kernel_layout == "OIHW4o": @@ -119,9 +119,7 @@ def conv2d_strategy_adreno(attrs, inputs, out_type, target): elif kernel_layout == "HWOI4o": oc = kernel.shape[2] * kernel.shape[4] else: - raise RuntimeError( - "Unsupported depthwise_conv2d kernel layout {}".format(kernel_layout) - ) + raise RuntimeError(f"Unsupported depthwise_conv2d kernel layout {kernel_layout}") if ic == oc == groups: if (data_layout == "NCHW" and kernel_layout == "OIHW") or ( @@ -186,9 +184,7 @@ def conv2d_winograd_without_weight_transform_strategy_adreno(attrs, inputs, out_ plevel=5, ) else: - raise RuntimeError( - "Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout) - ) + raise RuntimeError(f"Unsupported conv2d_winograd_without_weight_transform layout {layout}") return strategy diff --git a/python/tvm/relay/op/strategy/arm_cpu.py b/python/tvm/relay/op/strategy/arm_cpu.py index 6e6c1bf03b5d..dc3b16aa82c2 100644 --- a/python/tvm/relay/op/strategy/arm_cpu.py +++ b/python/tvm/relay/op/strategy/arm_cpu.py @@ -180,9 +180,7 @@ def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target): name="conv2d_nchw_spatial_pack.arm_cpu", ) else: - raise RuntimeError( - "Unsupported weight layout {} for conv2d NCHW".format(kernel_layout) - ) + raise RuntimeError(f"Unsupported weight layout {kernel_layout} for conv2d NCHW") elif layout == "HWCN": assert kernel_layout == "HWIO" logger.warning("conv2d_hwcn is not optimized for arm cpu.") @@ -237,12 +235,10 @@ def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target): name="conv2d_nhwc_spatial_pack.arm_cpu", ) else: - raise RuntimeError( - "Unsupported kernel layout {} for conv2d NHWC".format(kernel_layout) - ) + raise RuntimeError(f"Unsupported kernel layout {kernel_layout} for conv2d NHWC") else: - raise RuntimeError("Unsupported conv2d layout {} for arm cpu".format(layout)) + raise RuntimeError(f"Unsupported conv2d layout {layout} for arm cpu") elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" or re.match(r"OIHW\d*o", kernel_layout) @@ -329,7 +325,7 @@ def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target): name="depthwise_conv2d_nhwc.generic", ) else: - raise RuntimeError("Unsupported depthwise_conv2d layout {} for arm cpu".format(layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout} for arm cpu") else: # group_conv2d if layout == "NCHW": assert kernel_layout == "OIHW" @@ -347,7 +343,7 @@ def conv2d_strategy_arm_cpu(attrs, inputs, out_type, target): name="group_conv2d_nhwc.generic", ) else: - raise RuntimeError("Unsupported group_conv2d layout {} for arm cpu".format(layout)) + raise RuntimeError(f"Unsupported group_conv2d layout {layout} for arm cpu") return strategy @@ -439,11 +435,9 @@ def conv2d_winograd_without_weight_transform_strategy_arm_cpu(attrs, inputs, out plevel=15, ) else: - raise RuntimeError("Unsupported kernel shape: {}".format(kernel.shape)) + raise RuntimeError(f"Unsupported kernel shape: {kernel.shape}") else: - raise RuntimeError( - "Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout) - ) + raise RuntimeError(f"Unsupported conv2d_winograd_without_weight_transform layout {layout}") return strategy @@ -493,8 +487,8 @@ def conv2d_gemm_without_weight_transform_strategy_arm_cpu(attrs, inputs, out_typ ) else: raise RuntimeError( - "Unsupported conv2d_NHWC_quantized_without_transform layout {0}" - "with datatype {1}".format(layout, data.dtype) + f"Unsupported conv2d_NHWC_quantized_without_transform layout {layout}" + f"with datatype {data.dtype}" ) return strategy @@ -535,7 +529,7 @@ def bitserial_conv2d_strategy_arm_cpu(attrs, inputs, out_type, target): name="bitserial_conv2d_nhwc.arm_cpu", ) else: - raise ValueError("Data layout {} not supported.".format(layout)) + raise ValueError(f"Data layout {layout} not supported.") return strategy @@ -612,9 +606,7 @@ def conv1d_strategy_arm_cpu(attrs, inputs, out_type, target): ) else: raise RuntimeError( - "Unsupported kernel layout {} for conv1d {} for arm cpu.".format( - kernel_layout, layout - ) + f"Unsupported kernel layout {kernel_layout} for conv1d {layout} for arm cpu." ) elif layout == "NCW": logger.warning("conv1d with layout %s is not optimized for arm cpu.", layout) @@ -632,6 +624,6 @@ def conv1d_strategy_arm_cpu(attrs, inputs, out_type, target): ) else: raise RuntimeError( - "Unsupported kernel layout {} for conv1d {} for arm cpu.".format(kernel_layout, layout) + f"Unsupported kernel layout {kernel_layout} for conv1d {layout} for arm cpu." ) return strategy diff --git a/python/tvm/relay/op/strategy/bifrost.py b/python/tvm/relay/op/strategy/bifrost.py index 46ebb6048c2d..f437aa15f6a0 100644 --- a/python/tvm/relay/op/strategy/bifrost.py +++ b/python/tvm/relay/op/strategy/bifrost.py @@ -74,7 +74,7 @@ def conv2d_strategy_bifrost(attrs, inputs, out_type, target): name="conv2d_nhwc_spatial_pack.bifrost", ) else: - raise RuntimeError("Unsupported conv2d layout {} for Mali(Bifrost)".format(layout)) + raise RuntimeError(f"Unsupported conv2d layout {layout} for Mali(Bifrost)") elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" @@ -92,9 +92,7 @@ def conv2d_strategy_bifrost(attrs, inputs, out_type, target): name="depthwise_conv2d_nchw.bifrost", ) else: - raise RuntimeError( - "Unsupported depthwise_conv2d layout {} for Mali(Bifrost)".format(layout) - ) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout} for Mali(Bifrost)") else: # group_conv2d raise RuntimeError("group_conv2d is not supported for Mali(Bifrost)") return strategy @@ -118,9 +116,7 @@ def conv2d_winograd_without_weight_transform_strategy_bifrost(attrs, inputs, out name="conv2d_nchw_winograd.bifrost", ) else: - raise RuntimeError( - "Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout) - ) + raise RuntimeError(f"Unsupported conv2d_winograd_without_weight_transform layout {layout}") return strategy diff --git a/python/tvm/relay/op/strategy/cuda.py b/python/tvm/relay/op/strategy/cuda.py index 65573321f76c..1fd806b7cf5c 100644 --- a/python/tvm/relay/op/strategy/cuda.py +++ b/python/tvm/relay/op/strategy/cuda.py @@ -357,7 +357,7 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target): ) elif target.kind.name == "cuda" and "cudnn" not in target.libs: # No TVM native kernel applicable - raise RuntimeError("Unsupported conv2d layout {} for CUDA".format(layout)) + raise RuntimeError(f"Unsupported conv2d layout {layout} for CUDA") if ( target.kind.name == "cuda" @@ -395,7 +395,7 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target): name="depthwise_conv2d_nhwc.cuda", ) else: - raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout}") else: # group_conv2d # add cudnn implementation, if any cudnn_impl = False @@ -453,7 +453,7 @@ def conv2d_strategy_cuda(attrs, inputs, out_type, target): name="group_conv2d_NCHWc_int8.cuda", ) elif not cudnn_impl: - raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported group_conv2d layout {layout}") return strategy @@ -603,9 +603,7 @@ def conv2d_winograd_without_weight_transform_strategy_cuda(attrs, inputs, out_ty plevel=15, ) else: - raise RuntimeError( - "Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout) - ) + raise RuntimeError(f"Unsupported conv2d_winograd_without_weight_transform layout {layout}") return strategy @@ -629,7 +627,7 @@ def deformable_conv2d_strategy_cuda(attrs, inputs, out_type, target): name="deformable_conv2d_nhwc.cuda", ) else: - raise RuntimeError("Layout %s is not supported in deformable conv2d on CUDA" % layout) + raise RuntimeError(f"Layout {layout} is not supported in deformable conv2d on CUDA") return strategy @@ -689,10 +687,9 @@ def conv2d_transpose_strategy_cuda(attrs, inputs, out_type, target): num_strategies += 1 # TODO(masahi): Support conv2d_transpose NHWC for non-cudnn path. - assert num_strategies > 0, "Unsupported conv2d_transpose workload, layout = %s, groups = %d" % ( - layout, - groups, - ) + assert ( + num_strategies > 0 + ), f"Unsupported conv2d_transpose workload, layout = {layout}, groups = {groups}" return strategy @@ -722,7 +719,7 @@ def conv3d_strategy_cuda(attrs, inputs, out_type, target): layout = attrs.data_layout _, stride_h, stride_w = attrs.get_int_tuple("strides") _, dilation_h, dilation_w = attrs.get_int_tuple("dilation") - assert layout in ["NCDHW", "NDHWC"], "Not support this layout {} yet".format(layout) + assert layout in ["NCDHW", "NDHWC"], f"Not support this layout {layout} yet" if layout == "NCDHW": strategy.add_implementation( wrap_compute_conv3d(topi.cuda.conv3d_ncdhw), @@ -796,9 +793,7 @@ def conv3d_winograd_without_weight_transform_strategy_cuda(attrs, inputs, out_ty name="conv3d_ncdhw_winograd_without_weight_transform.cuda", ) else: - raise RuntimeError( - "Unsupported conv3d_winograd_without_weight_transform layout {}".format(layout) - ) + raise RuntimeError(f"Unsupported conv3d_winograd_without_weight_transform layout {layout}") return strategy @@ -824,7 +819,7 @@ def conv1d_strategy_cuda(attrs, inputs, out_type, target): name="conv1d_nwc.cuda", ) else: - raise ValueError("Unsupported conv1d layout {}".format(layout)) + raise ValueError(f"Unsupported conv1d layout {layout}") else: if layout == "NCW": strategy.add_implementation( @@ -839,7 +834,7 @@ def conv1d_strategy_cuda(attrs, inputs, out_type, target): name="group_conv1d_nwc.cuda", ) else: - raise ValueError("Unsupported conv1d layout {}".format(layout)) + raise ValueError(f"Unsupported conv1d layout {layout}") return strategy @@ -868,15 +863,11 @@ def matmul_strategy_cuda(attrs, inputs, out_type, target): if is_auto_scheduler_enabled(): strategy.add_implementation( - wrap_compute_matmul(topi.nn.matmul), - naive_schedule, - name="matmul.cuda", + wrap_compute_matmul(topi.nn.matmul), naive_schedule, name="matmul.cuda" ) elif is_meta_schedule_enabled(): strategy.add_implementation( - wrap_compute_matmul(topi.nn.matmul), - naive_schedule, - name="matmul.cuda", + wrap_compute_matmul(topi.nn.matmul), naive_schedule, name="matmul.cuda" ) else: logger.warning( diff --git a/python/tvm/relay/op/strategy/generic.py b/python/tvm/relay/op/strategy/generic.py index 2883e5e1fb77..533d65ead727 100644 --- a/python/tvm/relay/op/strategy/generic.py +++ b/python/tvm/relay/op/strategy/generic.py @@ -67,12 +67,12 @@ def get_conv2d_in_channels(data_shape, data_layout): data_shape = get_const_tuple(data_shape) if len(data_shape) == 4: idx = data_layout.find("C") - assert idx >= 0, "Invalid conv2d data layout {}".format(data_layout) + assert idx >= 0, f"Invalid conv2d data layout {data_layout}" return data_shape[idx] if re.match(r"NCHW\d*c", data_layout): # NCHW[8]c return data_shape[1] * data_shape[4] - raise ValueError("Unknown conv2d data layout {}".format(data_layout)) + raise ValueError(f"Unknown conv2d data layout {data_layout}") def get_conv2d_out_channels(kernel_shape, kernel_layout): @@ -80,13 +80,13 @@ def get_conv2d_out_channels(kernel_shape, kernel_layout): kernel_shape = get_const_tuple(kernel_shape) if len(kernel_shape) == 4: idx = kernel_layout.find("O") - assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout) + assert idx >= 0, f"Invalid conv2d kernel layout {kernel_layout}" return kernel_shape[idx] if re.match(r"OIHW\d*i\d*o", kernel_layout): return kernel_shape[0] * kernel_shape[5] if re.match(r"OIHW\d*o", kernel_layout): return kernel_shape[0] * kernel_shape[4] - raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout)) + raise ValueError(f"Unknown conv2d kernel layout {kernel_layout}") def is_depthwise_conv2d(data_shape, data_layout, kernel_shape, kernel_layout, groups): @@ -302,7 +302,7 @@ def conv2d_strategy(attrs, inputs, out_type, target): name="conv2d_hwcn.generic", ) else: - raise RuntimeError("Unsupported conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported conv2d layout {layout}") elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" @@ -319,7 +319,7 @@ def conv2d_strategy(attrs, inputs, out_type, target): name="depthwise_conv2d_nhwc.generic", ) else: - raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout}") else: # group_conv2d if layout == "NCHW": assert kernel_layout == "OIHW" @@ -336,7 +336,7 @@ def conv2d_strategy(attrs, inputs, out_type, target): name="group_conv2d_nhwc.generic", ) else: - raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported group_conv2d layout {layout}") return strategy @@ -465,7 +465,7 @@ def deformable_conv2d_strategy(attrs, inputs, out_type, target): name="deformable_conv2d_nhwc.generic", ) else: - raise RuntimeError("Layout %s is not supported in deformable conv2d" % layout) + raise RuntimeError(f"Layout {layout} is not supported in deformable conv2d") return strategy @@ -608,7 +608,7 @@ def conv3d_strategy(attrs, inputs, out_type, target): name="conv3d_ndhwc.generic", ) else: - raise ValueError("Not support this layout {} yet".format(layout)) + raise ValueError(f"Not support this layout {layout} yet") return strategy @@ -665,7 +665,7 @@ def conv1d_strategy(attrs, inputs, out_type, target): name="conv1d_nwc.generic", ) else: - raise ValueError("Unsupported conv1d layout {}".format(layout)) + raise ValueError(f"Unsupported conv1d layout {layout}") return strategy @@ -708,7 +708,7 @@ def group_conv1d_strategy(attrs, inputs, out_type, target): name="group_conv1d_nwc.generic", ) else: - raise ValueError("Unsupported conv1d layout {}".format(layout)) + raise ValueError(f"Unsupported conv1d layout {layout}") return strategy @@ -796,7 +796,7 @@ def dilation2d_strategy(attrs, inputs, out_type, target): name="dilation2d_nhwc.generic", ) else: - raise RuntimeError("Unsupported dilation2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported dilation2d layout {layout}") return strategy @@ -815,9 +815,7 @@ def copy_if_identical(tensor_a, tensor_b): # matmul def wrap_compute_matmul( - topi_compute, - need_auto_scheduler_layout=False, - need_meta_schedule_layout=False, + topi_compute, need_auto_scheduler_layout=False, need_meta_schedule_layout=False ): """wrap matmul topi compute""" @@ -825,14 +823,7 @@ def _compute_matmul(attrs, inputs, out_type): """Compute definition of matmul""" out_dtype = attrs.out_dtype out_dtype = inputs[0].dtype if out_dtype == "" else out_dtype - args = [ - inputs[0], - inputs[1], - None, - out_dtype, - attrs.transpose_a, - attrs.transpose_b, - ] + args = [inputs[0], inputs[1], None, out_dtype, attrs.transpose_a, attrs.transpose_b] if need_auto_scheduler_layout: args.append(get_auto_scheduler_rewritten_layout(attrs)) elif need_meta_schedule_layout: @@ -859,9 +850,7 @@ def matmul_strategy(attrs, inputs, out_type, target): # dense def wrap_compute_dense( - topi_compute, - need_auto_scheduler_layout=False, - need_meta_schedule_layout=False, + topi_compute, need_auto_scheduler_layout=False, need_meta_schedule_layout=False ): """wrap dense topi compute""" @@ -1309,12 +1298,7 @@ def _compute_nms(attrs, inputs, out_type): score_threshold = inputs[4] output_format = attrs.output_format return topi_compute( - inputs[0], - inputs[1], - max_output_size, - iou_threshold, - score_threshold, - output_format, + inputs[0], inputs[1], max_output_size, iou_threshold, score_threshold, output_format ) return _compute_nms @@ -1480,11 +1464,7 @@ def wrap_compute_dft(topi_compute): """Wrap DFT compute""" def _compute_dft(attrs, inputs, _): - return topi_compute( - inputs[0], - inputs[1], - attrs.inverse, - ) + return topi_compute(inputs[0], inputs[1], attrs.inverse) return _compute_dft @@ -1506,13 +1486,7 @@ def wrap_compute_trilu(topi_compute): """Wrap trilu compute""" def _compute_trilu(attrs, inputs, output_type): - return [ - topi_compute( - inputs[0], - inputs[1], - attrs.upper, - ) - ] + return [topi_compute(inputs[0], inputs[1], attrs.upper)] return _compute_trilu @@ -1663,7 +1637,7 @@ def bitserial_conv2d_strategy(attrs, inputs, out_type, target): name="bitserial_conv2d_nhwc.generic", ) else: - raise ValueError("Data layout {} not supported.".format(layout)) + raise ValueError(f"Data layout {layout} not supported.") return strategy @@ -2033,15 +2007,7 @@ def _compute_conv2d_backward_weight(attrs, inputs, out_dtype): layout = attrs.data_layout out_dtype = inputs[0].dtype if out_dtype in ("same", "") else out_dtype out = topi_compute( - inputs[0], - inputs[1], - kernel_size, - padding, - strides, - dilation, - groups, - layout, - out_dtype, + inputs[0], inputs[1], kernel_size, padding, strides, dilation, groups, layout, out_dtype ) return [out] @@ -2074,13 +2040,6 @@ def wrap_compute_layout_transform(topi_compute, schedule_rule="None"): """Wrap layout transform compute""" def _compute_layout_transform(attrs, inputs, output_type): - return [ - topi_compute( - inputs[0], - attrs.src_layout, - attrs.dst_layout, - schedule_rule, - ) - ] + return [topi_compute(inputs[0], attrs.src_layout, attrs.dst_layout, schedule_rule)] return _compute_layout_transform diff --git a/python/tvm/relay/op/strategy/hexagon.py b/python/tvm/relay/op/strategy/hexagon.py index f42503a1477c..2db3b2c886f5 100644 --- a/python/tvm/relay/op/strategy/hexagon.py +++ b/python/tvm/relay/op/strategy/hexagon.py @@ -92,7 +92,7 @@ def conv2d_strategy_hexagon(attrs, inputs, out_type, target): name="depthwise_conv2d_nhwc.hexagon", ) else: - raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout}") else: # group_conv2d raise RuntimeError(f"Unsupported group_conv2d layout {layout}") @@ -139,7 +139,7 @@ def conv2d_transpose_strategy_hexagon(attrs, inputs, out_type, target): name="conv2d_transpose_nchw.generic", ) else: - raise RuntimeError("Unsupported conv2d_transpose layout {}".format(layout)) + raise RuntimeError(f"Unsupported conv2d_transpose layout {layout}") return strategy diff --git a/python/tvm/relay/op/strategy/hls.py b/python/tvm/relay/op/strategy/hls.py index 4a682066ca2e..61f5a18e9ce9 100644 --- a/python/tvm/relay/op/strategy/hls.py +++ b/python/tvm/relay/op/strategy/hls.py @@ -109,7 +109,7 @@ def conv2d_strategy_hls(attrs, inputs, out_type, target): name="conv2d_nhwc.hls", ) else: - raise RuntimeError("Unsupported conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported conv2d layout {layout}") elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" @@ -126,7 +126,7 @@ def conv2d_strategy_hls(attrs, inputs, out_type, target): name="depthwise_nhwc.hls", ) else: - raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout}") else: # group_conv2d raise RuntimeError("group_conv2d is not supported for hls") return strategy @@ -192,5 +192,5 @@ def bitserial_conv2d_strategy_hls(attrs, inputs, out_type, target): name="bitserial_conv2d_nhwc.hls", ) else: - raise ValueError("Data layout {} not supported.".format(layout)) + raise ValueError(f"Data layout {layout} not supported.") return strategy diff --git a/python/tvm/relay/op/strategy/intel_graphics.py b/python/tvm/relay/op/strategy/intel_graphics.py index 115a71114468..4bbafb62f2f2 100644 --- a/python/tvm/relay/op/strategy/intel_graphics.py +++ b/python/tvm/relay/op/strategy/intel_graphics.py @@ -52,7 +52,7 @@ def conv2d_strategy_intel_graphics(attrs, inputs, out_type, target): plevel=5, ) else: - raise RuntimeError("Unsupported conv2d layout {} for intel graphics".format(layout)) + raise RuntimeError(f"Unsupported conv2d layout {layout} for intel graphics") elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" @@ -62,7 +62,7 @@ def conv2d_strategy_intel_graphics(attrs, inputs, out_type, target): name="depthwise_conv2d_nchw.intel_graphics", ) else: - raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout}") else: # group_conv2d raise RuntimeError("group_conv2d is not supported for intel graphics") return strategy diff --git a/python/tvm/relay/op/strategy/mali.py b/python/tvm/relay/op/strategy/mali.py index c39487b16d55..f37071c9fcbd 100644 --- a/python/tvm/relay/op/strategy/mali.py +++ b/python/tvm/relay/op/strategy/mali.py @@ -70,9 +70,7 @@ def conv2d_strategy_mali(attrs, inputs, out_type, target): name="conv2d_nchw_spatial_pack.mali", ) else: - raise RuntimeError( - "Unsupported weight layout {} for conv2d NCHW".format(kernel_layout) - ) + raise RuntimeError(f"Unsupported weight layout {kernel_layout} for conv2d NCHW") elif layout == "NHWC": assert kernel_layout == "HWIO" need_auto_scheduler_layout = is_auto_scheduler_enabled() @@ -133,7 +131,7 @@ def conv2d_strategy_mali(attrs, inputs, out_type, target): ) else: - raise RuntimeError("Unsupported conv2d layout {} for mali".format(layout)) + raise RuntimeError(f"Unsupported conv2d layout {layout} for mali") elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" @@ -163,7 +161,7 @@ def conv2d_strategy_mali(attrs, inputs, out_type, target): name="depthwise_conv2d_nhwc.mali", ) else: - raise RuntimeError("Unsupported depthwise_conv2d layout {} for mali".format(layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout} for mali") else: # group_conv2d raise RuntimeError("group_conv2d is not supported for mali") return strategy @@ -207,9 +205,7 @@ def conv2d_winograd_without_weight_transform_strategy_mali(attrs, inputs, out_ty "Winograd conv2d NHWC is not enabled for mali without auto_scheduler." ) else: - raise RuntimeError( - "Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout) - ) + raise RuntimeError(f"Unsupported conv2d_winograd_without_weight_transform layout {layout}") return strategy diff --git a/python/tvm/relay/op/strategy/x86.py b/python/tvm/relay/op/strategy/x86.py index bcc9ca4e206b..1b69c7a6ca42 100644 --- a/python/tvm/relay/op/strategy/x86.py +++ b/python/tvm/relay/op/strategy/x86.py @@ -201,7 +201,7 @@ def conv2d_strategy_cpu(attrs, inputs, out_type, target): name="conv2d_hwcn.generic", ) else: - raise RuntimeError("Unsupported conv2d layout {} for x86".format(layout)) + raise RuntimeError(f"Unsupported conv2d layout {layout} for x86") elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups): if layout == "NCHW": assert kernel_layout == "OIHW" @@ -236,7 +236,7 @@ def conv2d_strategy_cpu(attrs, inputs, out_type, target): name="depthwise_conv2d_nhwc.generic", ) else: - raise RuntimeError("Unsupported depthwise_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported depthwise_conv2d layout {layout}") else: # group_conv2d if layout == "NCHW": assert kernel_layout == "OIHW" @@ -258,7 +258,7 @@ def conv2d_strategy_cpu(attrs, inputs, out_type, target): assert _OIHWio_matcher.match(kernel_layout) # check if kernel is OIHWio return conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target) else: - raise RuntimeError("Unsupported group_conv2d layout {}".format(layout)) + raise RuntimeError(f"Unsupported group_conv2d layout {layout}") return strategy @@ -352,9 +352,7 @@ def conv3d_strategy_cpu(attrs, inputs, out_type, target): # or packed layouts. if layout == "NCDHW": strategy.add_implementation( - wrap_compute_conv3d(topi.nn.conv3d_ncdhw), - naive_schedule, - name="conv3d_ncdhw.x86", + wrap_compute_conv3d(topi.nn.conv3d_ncdhw), naive_schedule, name="conv3d_ncdhw.x86" ) elif layout == "NDHWC": strategy.add_implementation( @@ -367,7 +365,7 @@ def conv3d_strategy_cpu(attrs, inputs, out_type, target): name="conv3d_ndhwc.x86", ) else: - raise ValueError("Not support this layout {} yet".format(layout)) + raise ValueError(f"Not support this layout {layout} yet") else: # Use autotvm templates if layout == "NCDHW": @@ -383,7 +381,7 @@ def conv3d_strategy_cpu(attrs, inputs, out_type, target): name="conv3d_ndhwc.x86", ) else: - raise ValueError("Not support this layout {} yet".format(layout)) + raise ValueError(f"Not support this layout {layout} yet") return strategy @@ -410,7 +408,7 @@ def conv1d_strategy_cpu(attrs, inputs, out_type, target): name="conv1d_nwc.x86", ) else: - raise ValueError("Unsupported conv1d layout {}".format(layout)) + raise ValueError(f"Unsupported conv1d layout {layout}") else: if layout == "NCW": strategy.add_implementation( @@ -425,7 +423,7 @@ def conv1d_strategy_cpu(attrs, inputs, out_type, target): name="group_conv1d_nwc.x86", ) else: - raise ValueError("Unsupported conv1d layout {}".format(layout)) + raise ValueError(f"Unsupported conv1d layout {layout}") return strategy @@ -500,9 +498,7 @@ def matmul_strategy_cpu(attrs, inputs, out_type, target): "Recommend to use cblas/mkl/dnnl for better performance." ) strategy.add_implementation( - wrap_compute_matmul(topi.nn.matmul), - naive_schedule, - name="matmul.generic", + wrap_compute_matmul(topi.nn.matmul), naive_schedule, name="matmul.generic" ) return strategy @@ -750,7 +746,7 @@ def bitserial_conv2d_strategy_cpu(attrs, inputs, out_type, target): name="bitserial_conv2d_nhwc.x86", ) else: - raise ValueError("Data layout {} not supported.".format(layout)) + raise ValueError(f"Data layout {layout} not supported.") return strategy @@ -816,9 +812,7 @@ def conv2d_winograd_without_weight_transform_strategy_cpu(attrs, inputs, out_typ else: raise RuntimeError("Both AutoScheduler and MetaSchedule are not enabled") else: - raise RuntimeError( - "Unsupported conv2d_winograd_without_weight_transform layout {}".format(layout) - ) + raise RuntimeError(f"Unsupported conv2d_winograd_without_weight_transform layout {layout}") return strategy diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index aa3ede5a07dc..6b488719eb84 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -32,7 +32,7 @@ def _make_virtual_device(device): return target.VirtualDevice(device) if isinstance(device, str): return target.VirtualDevice(_nd.device(device)) - raise ValueError("expecting a Device or device name, but received a %s" % (type(device))) + raise ValueError(f"expecting a Device or device name, but received a {type(device)}") # We create a wrapper function for each operator in the diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index c8e4879a6181..ef1cdb3afdd8 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -235,7 +235,7 @@ def squeeze(data, axis=None): try: tempaxis.append(int(tmpax)) except ValueError as err: - raise RuntimeError("Unrecognized axis type: %s" % err) + raise RuntimeError(f"Unrecognized axis type: {err}") axis = tempaxis return _make.squeeze(data, axis) @@ -324,7 +324,7 @@ def reshape(data, newshape, allowzero=False): try: tempshape.append(int(shape)) except ValueError as err: - raise RuntimeError("Unrecognized shape type: %s" % err) + raise RuntimeError(f"Unrecognized shape type: {err}") newshape = tempshape return _make.reshape(data, list(newshape), allowzero) diff --git a/python/tvm/relay/op/vision/_rcnn.py b/python/tvm/relay/op/vision/_rcnn.py index 4686974059b4..a3f749236d3f 100644 --- a/python/tvm/relay/op/vision/_rcnn.py +++ b/python/tvm/relay/op/vision/_rcnn.py @@ -66,7 +66,7 @@ def convert_roi_align(attrs, inputs, tinfos, desired_layouts): if desired_data_layout in ["NCHW", "NHWC"]: return relay.vision.roi_align(data, rois, **new_attrs) - raise ValueError("Layout %s is not yet supported." % desired_data_layout) + raise ValueError(f"Layout {desired_data_layout} is not yet supported.") @reg.register_convert_op_layout("vision.roi_pool") @@ -108,7 +108,7 @@ def convert_roi_pool(attrs, inputs, tinfos, desired_layouts): if desired_data_layout in ["NCHW", "NHWC"]: return relay.vision.roi_pool(data, rois, **new_attrs) - raise ValueError("Layout %s is not yet supported." % desired_data_layout) + raise ValueError(f"Layout {desired_data_layout} is not yet supported.") # roi_pool