Skip to content

Commit

Permalink
lint
Browse files Browse the repository at this point in the history
  • Loading branch information
icemelon committed Feb 5, 2020
1 parent b233005 commit efcddbb
Show file tree
Hide file tree
Showing 33 changed files with 94 additions and 53 deletions.
2 changes: 1 addition & 1 deletion topi/python/topi/arm_cpu/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

# pylint: disable=wildcard-import
"""Schedule for ARM CPU"""

from .conv2d import *
Expand Down
2 changes: 1 addition & 1 deletion topi/python/topi/arm_cpu/bitserial_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
from tvm import relay
from .. import tag
from ..nn.pad import pad
from ..nn.bitserial_conv2d import bitserial_conv2d_nhwc, bitserial_conv2d_legalize
from ..nn.bitserial_conv2d import bitserial_conv2d_legalize
from ..nn.bitserial_util import bitpack, binary_op_multiplier
from ..nn.util import get_pad_tuple
from ..util import get_const_int, get_const_tuple
Expand Down
9 changes: 9 additions & 0 deletions topi/python/topi/arm_cpu/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,14 @@

@autotvm.register_topi_compute("conv2d_nchw_spatial_pack.arm_cpu")
def conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NCHW layout"""
return conv2d_spatial_pack_nchw(cfg, data, kernel, strides, padding,
dilation, out_dtype, num_tile=2)


@autotvm.register_topi_schedule("conv2d_nchw_spatial_pack.arm_cpu")
def schedule_conv2d_nchw_spatial_pack(cfg, outs):
"""Create schedule for conv2d_nchw"""
s = tvm.create_schedule([x.op for x in outs])

def _callback(op):
Expand Down Expand Up @@ -69,12 +71,14 @@ def _callback(op):

@autotvm.register_topi_compute("conv2d_nhwc_spatial_pack.arm_cpu")
def conv2d_nhwc_spatial_pack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d with NHWC layout"""
return conv2d_spatial_pack_nhwc(cfg, data, kernel, strides, padding,
dilation, out_dtype)


@autotvm.register_topi_schedule("conv2d_nhwc_spatial_pack.arm_cpu")
def schedule_conv2d_nhwc_spatial_pack(cfg, outs):
"""Create schedule for conv2d_nhwc"""
s = tvm.create_schedule([x.op for x in outs])

def _callback(op):
Expand All @@ -87,13 +91,15 @@ def _callback(op):

@autotvm.register_topi_compute("conv2d_nchw_winograd.arm_cpu")
def conv2d_nchw_winograd(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nchw layout using Winograd with weight transform"""
tile_size = 4
return _decl_winograd(cfg, data, kernel, strides, padding, dilation,
out_dtype, tile_size)


@autotvm.register_topi_schedule("conv2d_nchw_winograd.arm_cpu")
def schedule_conv2d_nchw_winograd(cfg, outs):
"""Create schedule for conv2d_nchw_winograd"""
s = tvm.create_schedule([x.op for x in outs])

def _callback(op):
Expand Down Expand Up @@ -286,6 +292,7 @@ def _schedule_winograd(cfg, s, output, last):

@autotvm.register_topi_compute("conv2d_nchw_winograd_nnpack.arm_cpu")
def conv2d_nchw_winograd_nnpack(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute conv2d_nchw using nnpack Winograd implementation"""
dtype = data.dtype
if dtype == "float32":
return _conv2d_arm_cpu_winograd_nnpack(
Expand All @@ -302,6 +309,7 @@ def conv2d_nchw_winograd_nnpack(cfg, data, kernel, strides, padding, dilation, o

@autotvm.register_topi_schedule("conv2d_nchw_winograd_nnpack.arm_cpu")
def schedule_conv2d_nchw_winograd_nnpack(cfg, outs):
"""Create schedule for conv2d_nchw_winograd_nnpack"""
s = tvm.create_schedule([x.op for x in outs])

def _callback(op):
Expand Down Expand Up @@ -371,6 +379,7 @@ def _schedule_winograd_nnpack(cfg, s, output, last):
@autotvm.register_topi_compute("conv2d_nchw_winograd_nnpack_without_weight_transform.arm_cpu")
def conv2d_nchw_winograd_nnpack_without_weight_transform(
cfg, data, transformed_kernel, bias, strides, padding, dilation, out_dtype):
"""Compute conv2d_nchw using NNPack winograd without weight transform"""
N, CI, IH, IW = get_const_tuple(data.shape)
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
Expand Down
6 changes: 3 additions & 3 deletions topi/python/topi/arm_cpu/conv2d_alter_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member,no-else-return
"""Conv2D alter op and legalize functions for arm cpu"""

import logging
Expand Down Expand Up @@ -104,8 +104,8 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type):
weight_expr, tile_size=tile_size)
weight_expr = relay.reshape(weight_expr,
newshape=(KH + tile_size - 1,
KW + tile_size - 1,
idxd(CO, VC), VC, CI))
KW + tile_size - 1,
idxd(CO, VC), VC, CI))
weight_expr = relay.transpose(weight_expr, axes=[0, 1, 2, 4, 3])

new_attrs['tile_size'] = tile_size
Expand Down
3 changes: 2 additions & 1 deletion topi/python/topi/arm_cpu/conv2d_int8.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

import tvm
from tvm import autotvm
from .. import generic, tag
from .. import tag
from ..util import get_const_tuple
from ..generic import conv2d as conv2d_generic
from .. import nn
Expand All @@ -44,6 +44,7 @@ def _get_default_config(cfg, data, kernel, strides, padding, out_dtype):
@autotvm.register_topi_compute("conv2d_NCHWc_int8.arm_cpu")
def conv2d_NCHWc_int8(cfg, data, kernel, strides,
padding, dilation, layout, out_layout, out_dtype):
"""Compute conv2d int8 with NCHWc layout"""
# layout and out_layout are not used here,
# we keep them for debug convenience when dumping autotvm workload
n, ic_chunk, ih, iw, ic_bn = get_const_tuple(data.shape)
Expand Down
4 changes: 3 additions & 1 deletion topi/python/topi/arm_cpu/depthwise_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@


@autotvm.register_topi_compute("depthwise_conv2d_nchw.arm_cpu")
def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype):
def depthwise_conv2d_nchw(_, data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise_conv2d with NCHW layout"""
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)


Expand Down Expand Up @@ -177,6 +178,7 @@ def depthwise_conv2d_nchw_spatial_pack(cfg, data, kernel, strides, padding, dila

@autotvm.register_topi_schedule("depthwise_conv2d_nchw_spatial_pack.arm_cpu")
def schedule_depthwise_conv2d_nchw_spatial_pack(cfg, outs):
"""Create the schedule for depthwise_conv2d_nchw_spatial_pack"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])

Expand Down
2 changes: 1 addition & 1 deletion topi/python/topi/bifrost/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
# specific language governing permissions and limitations
# under the License.

# pylint: disable=invalid-name,unused-variable,unused-argument
# pylint: disable=invalid-name,unused-variable,unused-argument,no-else-return
"""conv2d schedule on ARM Mali (Bifrost) GPU"""

import tvm
Expand Down
24 changes: 12 additions & 12 deletions topi/python/topi/bifrost/dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,11 @@ def _callback(op):
vec_size = [1, 2, 4, 8, 16]
max_unroll = 32

dense = op.output(0)
dense_out = op.output(0)
output = outs[0]

y, x = s[output].op.axis
c = s[dense].op.reduce_axis[0]
c = s[dense_out].op.reduce_axis[0]

##### space definition begin #####
cfg.define_split('tile_y', y, num_outputs=3)
Expand All @@ -73,8 +73,8 @@ def _callback(op):
cfg.fallback_with_reference_log(ref_log)
##### space definition end #####

if dense.op in s.outputs:
dense = s.cache_write(output, 'local')
if dense_out.op in s.outputs:
dense_out = s.cache_write(output, 'local')

by, ty, yi = cfg['tile_y'].apply(s, output, y)
bx, tx, xi = cfg['tile_x'].apply(s, output, x)
Expand All @@ -88,17 +88,17 @@ def _callback(op):
s[output].unroll(yi)
if cfg['tile_x'].size[-1] in vec_size:
s[output].vectorize(xi)
s[dense].compute_at(s[output], tx)
s[dense_out].compute_at(s[output], tx)

k = s[dense].op.reduce_axis[0]
y, x = s[dense].op.axis
k, k_unroll = cfg['c_unroll'].apply(s, dense, k)
s[dense].reorder(k, k_unroll, y, x)
s[dense].unroll(k_unroll)
k = s[dense_out].op.reduce_axis[0]
y, x = s[dense_out].op.axis
k, k_unroll = cfg['c_unroll'].apply(s, dense_out, k)
s[dense_out].reorder(k, k_unroll, y, x)
s[dense_out].unroll(k_unroll)
if cfg['tile_y'].size[-1] < max_unroll:
s[dense].unroll(y)
s[dense_out].unroll(y)
if cfg['tile_x'].size[-1] in vec_size:
s[dense].vectorize(x)
s[dense_out].vectorize(x)

traverse_inline(s, outs[0].op, _callback)
return s
Expand Down
6 changes: 5 additions & 1 deletion topi/python/topi/cuda/conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv2d with cuda backend"""
import tvm
from tvm import autotvm
Expand All @@ -28,11 +28,13 @@

@autotvm.register_topi_compute("conv2d_nchw.cuda")
def conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype='float32'):
"""Compute conv2d with NCHW layout"""
return nn.conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)


@autotvm.register_topi_schedule("conv2d_nchw.cuda")
def schedule_conv2d_nchw(cfg, outs):
"""Create the schedule for conv2d_nchw"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])

Expand Down Expand Up @@ -67,6 +69,7 @@ def _callback(op):
@autotvm.register_topi_compute("conv2d_cudnn.cuda")
def conv2d_cudnn(cfg, data, kernel, strides, padding, dilation, layout='NCHW',
out_dtype='float32'):
"""Compute conv2d using CuDNN library"""
if layout == 'NCHW':
tensor_format = 0 # CUDNN_TENSOR_NCHW
N, _, H, W = get_const_tuple(data.shape)
Expand Down Expand Up @@ -109,4 +112,5 @@ def conv2d_cudnn(cfg, data, kernel, strides, padding, dilation, layout='NCHW',

@autotvm.register_topi_schedule("conv2d_cudnn.cuda")
def schedule_conv2d_cudnn(cfg, outs):
"""Create the schedule for conv2d_cudnn"""
return generic.schedule_extern(outs)
4 changes: 3 additions & 1 deletion topi/python/topi/cuda/conv2d_hwcn.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,18 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Schedule for conv2d_hwcn with auto fusion"""
import tvm
from tvm import autotvm

from tvm.autotvm.task.space import SplitEntity

from .. import nn, tag

@autotvm.register_topi_compute("conv2d_hwcn.cuda")
def conv2d_hwcn(cfg, data, kernel, strides, padding, dilation, out_dtype='float32'):
"""Compute conv2d with HWCN layout on CUDA"""
return nn.conv2d_hwcn(data, kernel, strides, padding, dilation, out_dtype)


Expand Down
2 changes: 1 addition & 1 deletion topi/python/topi/cuda/conv2d_int8.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,7 @@ def conv2d_NCHWc_int8(cfg, data, kernel, stride, padding, dilation, layout, out_

@autotvm.register_topi_schedule("conv2d_NCHWc_int8.cuda")
def schedule_conv2d_NCHWc_int8(cfg, outs):
"""Schedule conv2d int8 NCHWc template"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])

Expand All @@ -167,7 +168,6 @@ def _callback(op):


def _schedule_conv2d_NCHWc_int8(cfg, s, output):
"""Schedule conv2d int8 NCHWc template"""
conv = output.op.input_tensors[0]
packed_data, packed_kernel = conv.op.input_tensors

Expand Down
2 changes: 1 addition & 1 deletion topi/python/topi/cuda/conv3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=invalid-name, unused-argument
"""Compute definition for conv3d with cuda backend"""
import tvm
from tvm import autotvm
Expand Down
2 changes: 1 addition & 1 deletion topi/python/topi/cuda/deformable_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=invalid-name,unused-argument
"""Schedule template of deformable conv2d with cuda backend"""
import tvm
from tvm import autotvm
Expand Down
5 changes: 3 additions & 2 deletions topi/python/topi/cuda/dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable
# pylint: disable=invalid-name, unused-argument
"""Schedule for dense operator"""
from __future__ import absolute_import as _abs
import logging
Expand Down Expand Up @@ -65,6 +65,7 @@ def dense_small_batch(cfg, data, weight, bias=None, out_dtype=None):

@autotvm.register_topi_schedule("dense_small_batch.cuda")
def schedule_dense_small_batch(cfg, outs):
"""Schedule float32/64 dense with small batch size"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])

Expand All @@ -76,7 +77,6 @@ def _callback(op):
return s

def _schedule_dense_small_batch(cfg, s, C):
"""Schedule float32/64 dense with small batch size"""
A, _ = C.op.input_tensors
_, in_dim = get_const_tuple(A.shape)
cfg.define_split('tile_k', in_dim, num_outputs=2)
Expand Down Expand Up @@ -110,6 +110,7 @@ def dense_large_batch(cfg, data, weight, bias=None, out_dtype=None):

@autotvm.register_topi_schedule("dense_large_batch.cuda")
def schedule_dense_large_batch(cfg, outs):
"""Schedule float32/64 dense with large batch size"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])

Expand Down
3 changes: 2 additions & 1 deletion topi/python/topi/cuda/depthwise_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
# pylint: disable=invalid-name, unused-argument
"""Schedule for depthwise_conv2d with auto fusion"""
import tvm
from tvm import autotvm
Expand All @@ -25,6 +25,7 @@
# register original implementation of depthwise_conv2d_nchw since we don't need to change this part
@autotvm.register_topi_compute("depthwise_conv2d_nchw.cuda")
def depthwise_conv2d_nchw(cfg, data, kernel, strides, padding, dilation, out_dtype):
"""Compute depthwise_conv2d with NCHW layout."""
return nn.depthwise_conv2d_nchw(data, kernel, strides, padding, dilation, out_dtype)

@autotvm.register_topi_schedule("depthwise_conv2d_nchw.cuda")
Expand Down
17 changes: 8 additions & 9 deletions topi/python/topi/cuda/group_conv2d_nchw.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@


@autotvm.register_topi_compute("group_conv2d_nchw.cuda")
def group_conv2d_nchw_cuda(cfg, data, kernel, stride, padding, dilation, groups,
def group_conv2d_nchw_cuda(_, data, kernel, stride, padding, dilation, groups,
out_dtype='float32'):
return nn.group_conv2d_nchw(data, kernel, stride, padding, dilation, groups, out_dtype)

Expand Down Expand Up @@ -302,14 +302,13 @@ def group_conv2d_NCHWc_int8(cfg, data, kernel, stride, padding, dilation, groups
#
# Compared with a normal convolution, group convolution only sums
# input channels from the group that an output channel resides in.
conv = tvm.compute(oshape, lambda n, occ, oh, ow, ocb:
tvm.sum(pad_data[n, occ//(oc_chunk//groups)*(ic_chunk//groups)+icc,
oh*stride_h+kh*dilation_h, ow*stride_w+kw*dilation_w, icb]
.astype('int32') *
packed_kernel[occ, icc,
kh, kw, ocb, icb]
.astype('int32'),
axis=[icc, kh, kw, icb]))
conv = tvm.compute(
oshape, lambda n, occ, oh, ow, ocb:
tvm.sum(pad_data[n, occ//(oc_chunk//groups)*(ic_chunk//groups)+icc,
oh*stride_h+kh*dilation_h, ow*stride_w+kw*dilation_w, icb]
.astype('int32') *
packed_kernel[occ, icc, kh, kw, ocb, icb].astype('int32'),
axis=[icc, kh, kw, icb]))

# Type conversion
output = tvm.compute(oshape, lambda *index: conv(*index).astype(out_dtype),
Expand Down
1 change: 0 additions & 1 deletion topi/python/topi/cuda/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
"""scheduler functions for cuda backend"""
from __future__ import absolute_import as _abs

import tvm
from .. import cpp

def schedule_lrn(outs):
Expand Down
2 changes: 1 addition & 1 deletion topi/python/topi/cuda/rcnn/proposal.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"""Proposal operator"""
import math
import tvm
from ...vision.rcnn import proposal, generate_anchor, reg_bbox, reg_iou
from ...vision.rcnn import generate_anchor, reg_bbox, reg_iou
from ...util import get_const_tuple, get_const_int


Expand Down
1 change: 0 additions & 1 deletion topi/python/topi/cuda/vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
"""Schedule for vision operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import generic
from .. import cpp
from .. import tag
from .pooling import schedule_pool
Expand Down
Loading

0 comments on commit efcddbb

Please sign in to comment.