diff --git a/docs/reference/langref/relay_op.rst b/docs/reference/langref/relay_op.rst index 8788eb52ae0d..8bc24b9ab865 100644 --- a/docs/reference/langref/relay_op.rst +++ b/docs/reference/langref/relay_op.rst @@ -245,4 +245,3 @@ This level supports dialect operators. tvm.relay.qnn.op.simulated_dequantize tvm.relay.qnn.op.simulated_quantize tvm.relay.qnn.op.subtract - tvm.relay.qnn.op.transpose_conv2d diff --git a/python/tvm/relay/op/strategy/generic.py b/python/tvm/relay/op/strategy/generic.py index abd3e28bc3eb..b8a31370658e 100644 --- a/python/tvm/relay/op/strategy/generic.py +++ b/python/tvm/relay/op/strategy/generic.py @@ -482,7 +482,7 @@ def conv2d_transpose_strategy(attrs, inputs, out_type, target): wrap_topi_schedule(topi.generic.schedule_conv2d_transpose_nchw), name="conv2d_transpose_nchw.generic", ) - else: # group_transpose_conv2d + else: # group_conv2d_transpose strategy.add_implementation( wrap_compute_conv2d_transpose(topi.nn.group_conv2d_transpose_nchw, has_groups=True), wrap_topi_schedule(topi.generic.schedule_group_conv2d_transpose_nchw), diff --git a/python/tvm/relay/qnn/op/qnn.py b/python/tvm/relay/qnn/op/qnn.py index aef514d81cc1..85629a9b5a5a 100644 --- a/python/tvm/relay/qnn/op/qnn.py +++ b/python/tvm/relay/qnn/op/qnn.py @@ -527,8 +527,8 @@ def conv2d_transpose( kernel_scale: tvm.relay.Expr The scale for the weight tensor. The scale for the weight tensor is stored for access to this during relay. This information is not - needed in the pass pipeline after qnn.transpose_conv2d is lowered to the - sequence of steps as in nn.transpose_conv2d. See also input_scale in Requantize. + needed in the pass pipeline after qnn.conv2d_transpose is lowered to the + sequence of steps as in nn.conv2d_transpose. See also input_scale in Requantize. strides : Tuple[int], optional The strides of convolution.