Skip to content

Commit

Permalink
fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
icemelon committed Jan 8, 2020
1 parent 5f11005 commit 3d3984d
Show file tree
Hide file tree
Showing 7 changed files with 52 additions and 53 deletions.
1 change: 0 additions & 1 deletion python/tvm/autotvm/graph_tuner/base_graph_tuner.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ def get_infer_layout(task_name):
@autotvm.register_customized_task("layout_transform")
def layout_transform(*args):
"""Autotvm layout transform template."""
args = deserialize_args(args)
cfg = get_config()
cfg.add_flop(-1)
data = args[0]
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/relay/op/strategy/arm_cpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@
from .. import op as _op

@schedule_injective.register("arm_cpu")
def schedule_injective(_, outs, target):
def schedule_injective_arm_cpu(_, outs, target):
with target:
return topi.arm_cpu.schedule_injective(outs)

@schedule_concatenate.register("arm_cpu")
def schedule_concatenate(_, outs, target):
def schedule_concatenate_arm_cpu(_, outs, target):
with target:
return topi.arm_cpu.schedule_concatenate(outs)
50 changes: 25 additions & 25 deletions python/tvm/relay/op/strategy/cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,65 +24,65 @@
from ....schedule import SpecializedCondition

@schedule_injective.register(["cuda", "gpu"])
def schedule_injective(attrs, outs, target):
def schedule_injective_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_injective(outs)

@schedule_reduce.register(["cuda", "gpu"])
def schedule_reduce(attrs, outs, target):
def schedule_reduce_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_reduce(outs)

@schedule_concatenate.register(["cuda", "gpu"])
def schedule_concatenate(attrs, outs, target):
def schedule_concatenate_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_injective(outs)

@schedule_pool.register(["cuda", "gpu"])
def schedule_pool(attrs, outs, target):
def schedule_pool_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_pool(outs, attrs.layout)

@schedule_pool_grad.register(["cuda", "gpu"])
def schedule_pool_grad(attrs, outs, target):
def schedule_pool_grad_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_pool_grad(outs)

@schedule_adaptive_pool.register(["cuda", "gpu"])
def schedule_adaptive_pool(attrs, outs, target):
def schedule_adaptive_pool_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_adaptive_pool(outs)

@schedule_softmax.register(["cuda", "gpu"])
def schedule_softmax(attrs, outs, target):
def schedule_softmax_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_softmax(outs)

@schedule_lrn.register(["cuda", "gpu"])
def schedule_lrn(attrs, outs, target):
def schedule_lrn_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_lrn(outs)

@schedule_l2_normalize.register(["cuda", "gpu"])
def schedule_l2_normalize(attrs, outs, target):
def schedule_l2_normalize_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_l2_normalize(outs)

@deformable_conv2d_strategy.register(["cuda", "gpu"])
def deformable_conv2d_strategy(attrs, inputs, out_type, target):
def deformable_conv2d_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_deformable_conv2d(topi.cuda.deformable_conv2d_nchw),
wrap_topi_schedule(topi.cuda.schedule_deformable_conv2d_nchw))
return strategy

@conv3d_strategy.register(["cuda", "gpu"])
def conv3d_strategy(attrs, inputs, out_type, target):
def conv3d_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
layout = attrs.data_layout
assert layout in ["NCDHW", "NDHWC"], "Not support this layout {} yet".format(layout)
if layout == "NCDHW":
strategy.add_implement(wrap_compute_conv3d(topi.cuda.conv3d_ncdhw),
wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw),
_reg._wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw),
10)
else: # layout == "NDHWC":
strategy.add_implement(wrap_compute_conv3d(topi.cuda.conv3d_ndhwc),
Expand All @@ -95,7 +95,7 @@ def conv3d_strategy(attrs, inputs, out_type, target):
return strategy

@conv1d_transpose_strategy.register(["cuda", "gpu"])
def conv1d_transpose_strategy(attrs, inputs, out_type, target):
def conv1d_transpose_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
layout = attrs.data_layout
dilation = get_const_tuple(attrs.dilation)
Expand All @@ -108,7 +108,7 @@ def conv1d_transpose_strategy(attrs, inputs, out_type, target):
return strategy

@dense_strategy.register(["cuda", "gpu"])
def dense_strategy(attrs, inputs, out_type, target):
def dense_strategy_cuda(attrs, inputs, out_type, target):
# Todo(@icemelon9): update dense strategy
strategy = _op.OpStrategy()
if out_type.dtype == "int8":
Expand All @@ -127,8 +127,8 @@ def dense_strategy(attrs, inputs, out_type, target):
return strategy

@batch_matmul_strategy.register(["cuda", "gpu"])
def batch_matmul_strategy(attrs, inputs, out_type, target):
strategy =_op.OpStrategy()
def batch_matmul_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_batch_matmul(topi.nn.batch_matmul),
wrap_topi_schedule(topi.cuda.schedule_batch_matmul),
10)
Expand All @@ -139,57 +139,57 @@ def batch_matmul_strategy(attrs, inputs, out_type, target):
return strategy

@argsort_strategy.register(["cuda", "gpu"])
def argsort_strategy(attrs, inputs, out_type, target):
def argsort_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_argsort(topi.cuda.argsort_gpu),
wrap_topi_schedule(topi.cuda.schedule_argsort))
return strategy

@topk_strategy.register(["cuda", "gpu"])
def topk_strategy(attrs, inputs, out_type, target):
def topk_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_topk(topi.cuda.topk_gpu),
wrap_topi_schedule(topi.cuda.schedule_topk))
return strategy

@schedule_multibox_prior.register(["cuda", "gpu"])
def schedule_multibox_prior(attrs, outs, target):
def schedule_multibox_prior_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_multibox_prior(outs)

@schedule_multibox_transform_loc.register(["cuda", "gpu"])
def schedule_multibox_transform_loc(attrs, outs, target):
def schedule_multibox_transform_loc_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_multibox_transform_loc(outs)

@get_valid_counts_strategy.register(["cuda", "gpu"])
def get_valid_counts_strategy(attrs, inputs, out_type, target):
def get_valid_counts_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_get_valid_counts(topi.cuda.get_valid_counts),
wrap_topi_schedule(topi.cuda.schedule_get_valid_counts))
return strategy

@nms_strategy.register(["cuda", "gpu"])
def nms_strategy(attrs, inputs, out_type, target):
def nms_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_nms(topi.cuda.non_max_suppression),
wrap_topi_schedule(topi.cuda.schedule_nms))
return strategy

@roi_align_strategy.register(["cuda", "gpu"])
def roi_align_strategy(attrs, inputs, out_type, target):
def roi_align_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw),
wrap_topi_schedule(topi.cuda.schedule_roi_align))
return strategy

@schedule_roi_pool.register(["cuda", "gpu"])
def schedule_roi_pool(attrs, outs, target):
def schedule_roi_pool_cuda(attrs, outs, target):
with target:
return topi.cuda.schedule_roi_pool(outs)

@proposal_strategy.register(["cuda", "gpu"])
def proposal_strategy(attrs, inputs, out_type, target):
def proposal_strategy_cuda(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_proposal(topi.cuda.proposal),
wrap_topi_schedule(topi.cuda.schedule_proposal))
Expand Down
12 changes: 6 additions & 6 deletions python/tvm/relay/op/strategy/hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,31 +23,31 @@
from .. import op as _op

@schedule_injective.register("hls")
def schedule_injective(attrs, outs, target):
def schedule_injective_hls(attrs, outs, target):
with target:
return topi.hls.schedule_injective(outs)

@schedule_reduce.register("hls")
def schedule_reduce(attrs, outs, target):
def schedule_reduce_hls(attrs, outs, target):
with target:
return topi.hls.schedule_reduce(outs)

@schedule_concatenate.register("hls")
def schedule_concatenate(attrs, outs, target):
def schedule_concatenate_hls(attrs, outs, target):
with target:
return topi.hls.schedule_injective(outs)

@schedule_pool.register("hls")
def schedule_pool(attrs, outs, target):
def schedule_pool_hls(attrs, outs, target):
with target:
return topi.hls.schedule_pool(outs, attrs.layout)

@schedule_adaptive_pool.register("hls")
def schedule_adaptive_pool(attrs, outs, target):
def schedule_adaptive_pool_hls(attrs, outs, target):
with target:
return topi.hls.schedule_adaptive_pool(outs)

@schedule_softmax.register("hls")
def schedule_softmax(attrs, outs, target):
def schedule_softmax_hls(attrs, outs, target):
with target:
return topi.hls.schedule_softmax(outs)
10 changes: 5 additions & 5 deletions python/tvm/relay/op/strategy/opengl.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,26 +23,26 @@
from .. import op as _op

@schedule_injective.register("opengl")
def schedule_injective(attrs, outs, target):
def schedule_injective_opengl(attrs, outs, target):
with target:
return topi.opengl.schedule_injective(outs)

@schedule_concatenate.register("opengl")
def schedule_injective(attrs, outs, target):
def schedule_concatenate_opengl(attrs, outs, target):
with target:
return topi.opengl.schedule_injective(outs)

@schedule_pool.register("opengl")
def schedule_pool(attrs, outs, target):
def schedule_pool_opengl(attrs, outs, target):
with target:
return topi.opengl.schedule_pool(outs, attrs.layout)

@schedule_adaptive_pool.register("opengl")
def schedule_adaptive_pool(attrs, outs, target):
def schedule_adaptive_pool_opengl(attrs, outs, target):
with target:
return topi.opengl.schedule_adaptive_pool(outs)

@schedule_softmax.register("opengl")
def schedule_softmax(attrs, outs, target):
def schedule_softmax_opengl(attrs, outs, target):
with target:
return topi.opengl.schedule_softmax(outs)
4 changes: 2 additions & 2 deletions python/tvm/relay/op/strategy/rocm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@
from .. import op as _op

@schedule_lrn.register("rocm")
def schedule_lrn(attrs, outs, target):
def schedule_lrn_rocm(attrs, outs, target):
with target:
return topi.rocm.schedule_lrn(outs)

@schedule_l2_normalize.register("rocm")
def schedule_l2_normalize(attrs, outs, target):
def schedule_l2_normalize_rocm(attrs, outs, target):
with target:
return topi.rocm.schedule_l2_normalize(outs)
24 changes: 12 additions & 12 deletions python/tvm/relay/op/strategy/x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,37 +24,37 @@
from ....schedule import SpecializedCondition

@schedule_injective.register("cpu")
def schedule_injective(attrs, outs, target):
def schedule_injective_cpu(attrs, outs, target):
with target:
return topi.x86.schedule_injective(outs)

@schedule_reduce.register("cpu")
def schedule_reduce(attrs, outs, target):
def schedule_reduce_cpu(attrs, outs, target):
with target:
return topi.x86.schedule_reduce(outs)

@schedule_concatenate.register("cpu")
def schedule_concatenate(attrs, outs, target):
def schedule_concatenate_cpu(attrs, outs, target):
with target:
return topi.x86.schedule_concatenate(outs)

@schedule_pool.register("cpu")
def schedule_pool(attrs, outs, target):
def schedule_pool_cpu(attrs, outs, target):
with target:
return topi.x86.schedule_pool(outs, attrs.layout)

@schedule_adaptive_pool.register("cpu")
def schedule_adaptive_pool(attrs, outs, target):
def schedule_adaptive_pool_cpu(attrs, outs, target):
with target:
return topi.x86.schedule_adaptive_pool(outs)

@schedule_softmax.register("cpu")
def schedule_softmax(attrs, outs, target):
def schedule_softmax_cpu(attrs, outs, target):
with target:
return topi.x86.schedule_softmax(outs)

@conv2d_strategy.register("cpu")
def conv2d_strategy(attrs, inputs, out_type, target):
def conv2d_strategy_cpu(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
layout = attrs.data_layout
dtype = out_type.dtype
Expand All @@ -68,15 +68,15 @@ def conv2d_strategy(attrs, inputs, out_type, target):
return strategy

@conv2d_NCHWc_strategy.register("cpu")
def conv2d_NCHWc_strategy(attrs, inputs, out_type, target):
def conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target):
print('inside x86 conv2d_NCHWc_strategy')
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_conv2d_NCHWc(topi.x86.conv2d_NCHWc),
wrap_topi_schedule(topi.x86.schedule_conv2d_NCHWc))
return strategy

@dense_strategy.register("cpu")
def dense_strategy(attrs, inputs, out_type, target):
def dense_strategy_cpu(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
m, k = inputs[0].shape
strategy.add_implement(wrap_compute_dense(topi.x86.dense_nopack),
Expand All @@ -92,7 +92,7 @@ def dense_strategy(attrs, inputs, out_type, target):
return strategy

@batch_matmul_strategy.register("cpu")
def batch_matmul_strategy(attrs, inputs, out_type, target):
def batch_matmul_strategy_cpu(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_batch_matmul(topi.x86.batch_matmul),
wrap_topi_schedule(topi.x86.schedule_batch_matmul),
Expand All @@ -104,12 +104,12 @@ def batch_matmul_strategy(attrs, inputs, out_type, target):
return strategy

@schedule_sparse_dense.register("cpu")
def schedule_sparse_dense(attrs, outs, target):
def schedule_sparse_dense_cpu(attrs, outs, target):
with target:
return topi.x86.schedule_sparse_dense(outs)

@roi_align_strategy.register("cpu")
def roi_align_strategy(attrs, inputs, out_type, target):
def roi_align_strategy_cpu(attrs, inputs, out_type, target):
strategy = _op.OpStrategy()
strategy.add_implement(wrap_compute_roi_align(topi.x86.roi_align_nchw),
wrap_topi_schedule(topi.generic.schedule_roi_align))
Expand Down

0 comments on commit 3d3984d

Please sign in to comment.