From 3d3984db25277d30075d3a964af5e8d68fcc8667 Mon Sep 17 00:00:00 2001 From: Haichen Shen Date: Wed, 8 Jan 2020 12:17:37 -0800 Subject: [PATCH] fix lint --- .../autotvm/graph_tuner/base_graph_tuner.py | 1 - python/tvm/relay/op/strategy/arm_cpu.py | 4 +- python/tvm/relay/op/strategy/cuda.py | 50 +++++++++---------- python/tvm/relay/op/strategy/hls.py | 12 ++--- python/tvm/relay/op/strategy/opengl.py | 10 ++-- python/tvm/relay/op/strategy/rocm.py | 4 +- python/tvm/relay/op/strategy/x86.py | 24 ++++----- 7 files changed, 52 insertions(+), 53 deletions(-) diff --git a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py index 53d91d2b878c2..90fdcd2d8d51f 100644 --- a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py +++ b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py @@ -54,7 +54,6 @@ def get_infer_layout(task_name): @autotvm.register_customized_task("layout_transform") def layout_transform(*args): """Autotvm layout transform template.""" - args = deserialize_args(args) cfg = get_config() cfg.add_flop(-1) data = args[0] diff --git a/python/tvm/relay/op/strategy/arm_cpu.py b/python/tvm/relay/op/strategy/arm_cpu.py index a2468cc12c4f4..a7ba234dceb67 100644 --- a/python/tvm/relay/op/strategy/arm_cpu.py +++ b/python/tvm/relay/op/strategy/arm_cpu.py @@ -23,11 +23,11 @@ from .. import op as _op @schedule_injective.register("arm_cpu") -def schedule_injective(_, outs, target): +def schedule_injective_arm_cpu(_, outs, target): with target: return topi.arm_cpu.schedule_injective(outs) @schedule_concatenate.register("arm_cpu") -def schedule_concatenate(_, outs, target): +def schedule_concatenate_arm_cpu(_, outs, target): with target: return topi.arm_cpu.schedule_concatenate(outs) diff --git a/python/tvm/relay/op/strategy/cuda.py b/python/tvm/relay/op/strategy/cuda.py index 97546567c0d4e..430647becc2e5 100644 --- a/python/tvm/relay/op/strategy/cuda.py +++ b/python/tvm/relay/op/strategy/cuda.py @@ -24,65 +24,65 @@ from ....schedule import SpecializedCondition @schedule_injective.register(["cuda", "gpu"]) -def schedule_injective(attrs, outs, target): +def schedule_injective_cuda(attrs, outs, target): with target: return topi.cuda.schedule_injective(outs) @schedule_reduce.register(["cuda", "gpu"]) -def schedule_reduce(attrs, outs, target): +def schedule_reduce_cuda(attrs, outs, target): with target: return topi.cuda.schedule_reduce(outs) @schedule_concatenate.register(["cuda", "gpu"]) -def schedule_concatenate(attrs, outs, target): +def schedule_concatenate_cuda(attrs, outs, target): with target: return topi.cuda.schedule_injective(outs) @schedule_pool.register(["cuda", "gpu"]) -def schedule_pool(attrs, outs, target): +def schedule_pool_cuda(attrs, outs, target): with target: return topi.cuda.schedule_pool(outs, attrs.layout) @schedule_pool_grad.register(["cuda", "gpu"]) -def schedule_pool_grad(attrs, outs, target): +def schedule_pool_grad_cuda(attrs, outs, target): with target: return topi.cuda.schedule_pool_grad(outs) @schedule_adaptive_pool.register(["cuda", "gpu"]) -def schedule_adaptive_pool(attrs, outs, target): +def schedule_adaptive_pool_cuda(attrs, outs, target): with target: return topi.cuda.schedule_adaptive_pool(outs) @schedule_softmax.register(["cuda", "gpu"]) -def schedule_softmax(attrs, outs, target): +def schedule_softmax_cuda(attrs, outs, target): with target: return topi.cuda.schedule_softmax(outs) @schedule_lrn.register(["cuda", "gpu"]) -def schedule_lrn(attrs, outs, target): +def schedule_lrn_cuda(attrs, outs, target): with target: return topi.cuda.schedule_lrn(outs) @schedule_l2_normalize.register(["cuda", "gpu"]) -def schedule_l2_normalize(attrs, outs, target): +def schedule_l2_normalize_cuda(attrs, outs, target): with target: return topi.cuda.schedule_l2_normalize(outs) @deformable_conv2d_strategy.register(["cuda", "gpu"]) -def deformable_conv2d_strategy(attrs, inputs, out_type, target): +def deformable_conv2d_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_deformable_conv2d(topi.cuda.deformable_conv2d_nchw), wrap_topi_schedule(topi.cuda.schedule_deformable_conv2d_nchw)) return strategy @conv3d_strategy.register(["cuda", "gpu"]) -def conv3d_strategy(attrs, inputs, out_type, target): +def conv3d_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() layout = attrs.data_layout assert layout in ["NCDHW", "NDHWC"], "Not support this layout {} yet".format(layout) if layout == "NCDHW": strategy.add_implement(wrap_compute_conv3d(topi.cuda.conv3d_ncdhw), - wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw), + _reg._wrap_topi_schedule(topi.cuda.schedule_conv3d_ncdhw), 10) else: # layout == "NDHWC": strategy.add_implement(wrap_compute_conv3d(topi.cuda.conv3d_ndhwc), @@ -95,7 +95,7 @@ def conv3d_strategy(attrs, inputs, out_type, target): return strategy @conv1d_transpose_strategy.register(["cuda", "gpu"]) -def conv1d_transpose_strategy(attrs, inputs, out_type, target): +def conv1d_transpose_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() layout = attrs.data_layout dilation = get_const_tuple(attrs.dilation) @@ -108,7 +108,7 @@ def conv1d_transpose_strategy(attrs, inputs, out_type, target): return strategy @dense_strategy.register(["cuda", "gpu"]) -def dense_strategy(attrs, inputs, out_type, target): +def dense_strategy_cuda(attrs, inputs, out_type, target): # Todo(@icemelon9): update dense strategy strategy = _op.OpStrategy() if out_type.dtype == "int8": @@ -127,8 +127,8 @@ def dense_strategy(attrs, inputs, out_type, target): return strategy @batch_matmul_strategy.register(["cuda", "gpu"]) -def batch_matmul_strategy(attrs, inputs, out_type, target): - strategy =_op.OpStrategy() +def batch_matmul_strategy_cuda(attrs, inputs, out_type, target): + strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_batch_matmul(topi.nn.batch_matmul), wrap_topi_schedule(topi.cuda.schedule_batch_matmul), 10) @@ -139,57 +139,57 @@ def batch_matmul_strategy(attrs, inputs, out_type, target): return strategy @argsort_strategy.register(["cuda", "gpu"]) -def argsort_strategy(attrs, inputs, out_type, target): +def argsort_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_argsort(topi.cuda.argsort_gpu), wrap_topi_schedule(topi.cuda.schedule_argsort)) return strategy @topk_strategy.register(["cuda", "gpu"]) -def topk_strategy(attrs, inputs, out_type, target): +def topk_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_topk(topi.cuda.topk_gpu), wrap_topi_schedule(topi.cuda.schedule_topk)) return strategy @schedule_multibox_prior.register(["cuda", "gpu"]) -def schedule_multibox_prior(attrs, outs, target): +def schedule_multibox_prior_cuda(attrs, outs, target): with target: return topi.cuda.schedule_multibox_prior(outs) @schedule_multibox_transform_loc.register(["cuda", "gpu"]) -def schedule_multibox_transform_loc(attrs, outs, target): +def schedule_multibox_transform_loc_cuda(attrs, outs, target): with target: return topi.cuda.schedule_multibox_transform_loc(outs) @get_valid_counts_strategy.register(["cuda", "gpu"]) -def get_valid_counts_strategy(attrs, inputs, out_type, target): +def get_valid_counts_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_get_valid_counts(topi.cuda.get_valid_counts), wrap_topi_schedule(topi.cuda.schedule_get_valid_counts)) return strategy @nms_strategy.register(["cuda", "gpu"]) -def nms_strategy(attrs, inputs, out_type, target): +def nms_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_nms(topi.cuda.non_max_suppression), wrap_topi_schedule(topi.cuda.schedule_nms)) return strategy @roi_align_strategy.register(["cuda", "gpu"]) -def roi_align_strategy(attrs, inputs, out_type, target): +def roi_align_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_roi_align(topi.vision.rcnn.roi_align_nchw), wrap_topi_schedule(topi.cuda.schedule_roi_align)) return strategy @schedule_roi_pool.register(["cuda", "gpu"]) -def schedule_roi_pool(attrs, outs, target): +def schedule_roi_pool_cuda(attrs, outs, target): with target: return topi.cuda.schedule_roi_pool(outs) @proposal_strategy.register(["cuda", "gpu"]) -def proposal_strategy(attrs, inputs, out_type, target): +def proposal_strategy_cuda(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_proposal(topi.cuda.proposal), wrap_topi_schedule(topi.cuda.schedule_proposal)) diff --git a/python/tvm/relay/op/strategy/hls.py b/python/tvm/relay/op/strategy/hls.py index fa78c7abe3b62..c943ad43aeac1 100644 --- a/python/tvm/relay/op/strategy/hls.py +++ b/python/tvm/relay/op/strategy/hls.py @@ -23,31 +23,31 @@ from .. import op as _op @schedule_injective.register("hls") -def schedule_injective(attrs, outs, target): +def schedule_injective_hls(attrs, outs, target): with target: return topi.hls.schedule_injective(outs) @schedule_reduce.register("hls") -def schedule_reduce(attrs, outs, target): +def schedule_reduce_hls(attrs, outs, target): with target: return topi.hls.schedule_reduce(outs) @schedule_concatenate.register("hls") -def schedule_concatenate(attrs, outs, target): +def schedule_concatenate_hls(attrs, outs, target): with target: return topi.hls.schedule_injective(outs) @schedule_pool.register("hls") -def schedule_pool(attrs, outs, target): +def schedule_pool_hls(attrs, outs, target): with target: return topi.hls.schedule_pool(outs, attrs.layout) @schedule_adaptive_pool.register("hls") -def schedule_adaptive_pool(attrs, outs, target): +def schedule_adaptive_pool_hls(attrs, outs, target): with target: return topi.hls.schedule_adaptive_pool(outs) @schedule_softmax.register("hls") -def schedule_softmax(attrs, outs, target): +def schedule_softmax_hls(attrs, outs, target): with target: return topi.hls.schedule_softmax(outs) \ No newline at end of file diff --git a/python/tvm/relay/op/strategy/opengl.py b/python/tvm/relay/op/strategy/opengl.py index 726bd4fc2d4e8..1e2c521733387 100644 --- a/python/tvm/relay/op/strategy/opengl.py +++ b/python/tvm/relay/op/strategy/opengl.py @@ -23,26 +23,26 @@ from .. import op as _op @schedule_injective.register("opengl") -def schedule_injective(attrs, outs, target): +def schedule_injective_opengl(attrs, outs, target): with target: return topi.opengl.schedule_injective(outs) @schedule_concatenate.register("opengl") -def schedule_injective(attrs, outs, target): +def schedule_concatenate_opengl(attrs, outs, target): with target: return topi.opengl.schedule_injective(outs) @schedule_pool.register("opengl") -def schedule_pool(attrs, outs, target): +def schedule_pool_opengl(attrs, outs, target): with target: return topi.opengl.schedule_pool(outs, attrs.layout) @schedule_adaptive_pool.register("opengl") -def schedule_adaptive_pool(attrs, outs, target): +def schedule_adaptive_pool_opengl(attrs, outs, target): with target: return topi.opengl.schedule_adaptive_pool(outs) @schedule_softmax.register("opengl") -def schedule_softmax(attrs, outs, target): +def schedule_softmax_opengl(attrs, outs, target): with target: return topi.opengl.schedule_softmax(outs) \ No newline at end of file diff --git a/python/tvm/relay/op/strategy/rocm.py b/python/tvm/relay/op/strategy/rocm.py index c878de70ad5dd..8901b42485164 100644 --- a/python/tvm/relay/op/strategy/rocm.py +++ b/python/tvm/relay/op/strategy/rocm.py @@ -23,11 +23,11 @@ from .. import op as _op @schedule_lrn.register("rocm") -def schedule_lrn(attrs, outs, target): +def schedule_lrn_rocm(attrs, outs, target): with target: return topi.rocm.schedule_lrn(outs) @schedule_l2_normalize.register("rocm") -def schedule_l2_normalize(attrs, outs, target): +def schedule_l2_normalize_rocm(attrs, outs, target): with target: return topi.rocm.schedule_l2_normalize(outs) diff --git a/python/tvm/relay/op/strategy/x86.py b/python/tvm/relay/op/strategy/x86.py index f1e75e63b7a9a..717194400e3eb 100644 --- a/python/tvm/relay/op/strategy/x86.py +++ b/python/tvm/relay/op/strategy/x86.py @@ -24,37 +24,37 @@ from ....schedule import SpecializedCondition @schedule_injective.register("cpu") -def schedule_injective(attrs, outs, target): +def schedule_injective_cpu(attrs, outs, target): with target: return topi.x86.schedule_injective(outs) @schedule_reduce.register("cpu") -def schedule_reduce(attrs, outs, target): +def schedule_reduce_cpu(attrs, outs, target): with target: return topi.x86.schedule_reduce(outs) @schedule_concatenate.register("cpu") -def schedule_concatenate(attrs, outs, target): +def schedule_concatenate_cpu(attrs, outs, target): with target: return topi.x86.schedule_concatenate(outs) @schedule_pool.register("cpu") -def schedule_pool(attrs, outs, target): +def schedule_pool_cpu(attrs, outs, target): with target: return topi.x86.schedule_pool(outs, attrs.layout) @schedule_adaptive_pool.register("cpu") -def schedule_adaptive_pool(attrs, outs, target): +def schedule_adaptive_pool_cpu(attrs, outs, target): with target: return topi.x86.schedule_adaptive_pool(outs) @schedule_softmax.register("cpu") -def schedule_softmax(attrs, outs, target): +def schedule_softmax_cpu(attrs, outs, target): with target: return topi.x86.schedule_softmax(outs) @conv2d_strategy.register("cpu") -def conv2d_strategy(attrs, inputs, out_type, target): +def conv2d_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() layout = attrs.data_layout dtype = out_type.dtype @@ -68,7 +68,7 @@ def conv2d_strategy(attrs, inputs, out_type, target): return strategy @conv2d_NCHWc_strategy.register("cpu") -def conv2d_NCHWc_strategy(attrs, inputs, out_type, target): +def conv2d_NCHWc_strategy_cpu(attrs, inputs, out_type, target): print('inside x86 conv2d_NCHWc_strategy') strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_conv2d_NCHWc(topi.x86.conv2d_NCHWc), @@ -76,7 +76,7 @@ def conv2d_NCHWc_strategy(attrs, inputs, out_type, target): return strategy @dense_strategy.register("cpu") -def dense_strategy(attrs, inputs, out_type, target): +def dense_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() m, k = inputs[0].shape strategy.add_implement(wrap_compute_dense(topi.x86.dense_nopack), @@ -92,7 +92,7 @@ def dense_strategy(attrs, inputs, out_type, target): return strategy @batch_matmul_strategy.register("cpu") -def batch_matmul_strategy(attrs, inputs, out_type, target): +def batch_matmul_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_batch_matmul(topi.x86.batch_matmul), wrap_topi_schedule(topi.x86.schedule_batch_matmul), @@ -104,12 +104,12 @@ def batch_matmul_strategy(attrs, inputs, out_type, target): return strategy @schedule_sparse_dense.register("cpu") -def schedule_sparse_dense(attrs, outs, target): +def schedule_sparse_dense_cpu(attrs, outs, target): with target: return topi.x86.schedule_sparse_dense(outs) @roi_align_strategy.register("cpu") -def roi_align_strategy(attrs, inputs, out_type, target): +def roi_align_strategy_cpu(attrs, inputs, out_type, target): strategy = _op.OpStrategy() strategy.add_implement(wrap_compute_roi_align(topi.x86.roi_align_nchw), wrap_topi_schedule(topi.generic.schedule_roi_align))