Skip to content

Commit

Permalink
Rename "MetaTileRewritePolicy" to "SketchPolicy". (apache#36)
Browse files Browse the repository at this point in the history
* Rename "MetaTileRewritePolicy" to "SketchPolicy".

* Add a new class for auto_unroll_max_step, storage_offset in StageNode

* fix tune_op_subgraph.py
  • Loading branch information
merrymercy committed Jun 20, 2020
1 parent 0794875 commit a4c4548
Show file tree
Hide file tree
Showing 22 changed files with 386 additions and 398 deletions.
6 changes: 3 additions & 3 deletions python/tvm/ansor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,14 @@

# Shortcut
from .compute_dag import ComputeDAG, LayoutRewriteLevel
from .auto_schedule import SearchTask, MetaTileRewritePolicy, TuneOption, HardwareParams, \
PreloadMeasuredStates, PreAddCustomRule, auto_schedule
from .auto_schedule import SearchTask, SketchSearchPolicy, TuneOption, HardwareParams, \
PreloadMeasuredStates, PreloadCustomSketchRule, auto_schedule
from .measure import MeasureInput, LocalBuilder, LocalRunner, RPCRunner, LocalRPCMeasureContext
from .cost_model import RandomModel
from .cost_model.xgb_model import XGBModel
from .serialization import LogToFile, LogReader, best_measure_pair_in_file, \
load_from_file, write_measure_records_to_file
from .workload_registry import register_auto_scheduler_workload_func, \
from .workload_registry import register_workload_func, \
workload_key_to_dag, make_workload_key_func
from .task_scheduler import TaskScheduler, SimpleTaskScheduler
from .dispatcher import DispatchContext, ApplyConfig, ApplyHistoryBest as apply_history_best, \
Expand Down
28 changes: 15 additions & 13 deletions python/tvm/ansor/auto_schedule.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,17 +83,19 @@ def run_callbacks(self, callbacks):
_ffi_api.SearchPolicyRunCallbacks(self, callbacks)


@tvm._ffi.register_object("ansor.MetaTileRewritePolicy")
class MetaTileRewritePolicy(SearchPolicy):
""" The search policy that searches with meta tiling and random rewrite
@tvm._ffi.register_object("ansor.SketchSearchPolicy")
class SketchSearchPolicy(SearchPolicy):
""" The search policy that searches in a hierarchical search space defined by sketches.
The policy randomly samples programs from the space defined by sketches
and use evolutionary search to fine-tune them.
Parameters
----------
program_cost_model: CostModel
Cost model for programs
params: int
Parameters of the search policy, go meta_tile_rewrite_policy.h to find the
definitions. See code below to find the default values
Parameters of the search policy. See `src/ansor/search_policy/sketch_search_policy.h`
to find the definitions. See code below to find the default values
seed: int
Random seed
"""
Expand Down Expand Up @@ -124,7 +126,7 @@ def __init__(self,
params[key] = value

self.__init_handle_by_constructor__(
_ffi_api.MetaTileRewritePolicy, program_cost_model, params,
_ffi_api.SketchSearchPolicy, program_cost_model, params,
seed or random.randint(1, 1 << 30))


Expand All @@ -148,16 +150,16 @@ def __init__(self, filename: str):
_ffi_api.PreloadMeasuredStates, filename)


@tvm._ffi.register_object("ansor.PreAddCustomRule")
class PreAddCustomRule(SearchCallback):
@tvm._ffi.register_object("ansor.PreloadCustomSketchRule")
class PreloadCustomSketchRule(SearchCallback):
"""
A SearchCallback for MetaTileRewritePolicy that allowing users to add
A SearchCallback for SketchSearchPolicy that allowing users to add
custom sketch rule.
Notes
-----
This is an advanced feature. Make sure you're clear how it
works and this should only be used in MetaTileRewritePolicy.
works and this should only be used in SketchSearchPolicy.
Parameters
----------
Expand All @@ -168,7 +170,7 @@ class PreAddCustomRule(SearchCallback):
"""
def __init__(self, meet_condition_func, apply_func):
self.__init_handle_by_constructor__(
_ffi_api.PreAddCustomRule, meet_condition_func, apply_func)
_ffi_api.PreloadCustomSketchRule, meet_condition_func, apply_func)


@tvm._ffi.register_object("ansor.TuneOption")
Expand Down Expand Up @@ -197,7 +199,7 @@ class TuneOption(Object):
Callback functions called before the search process
Candidates:
- ansor.PreloadMeasuredStates
- ansor.PreAddCustomRule
- ansor.PreloadCustomSketchRule
"""
def __init__(self, n_trials=0, early_stopping=-1, num_measure_per_iter=64,
verbose=1, builder='local', runner='local', measure_callbacks=None,
Expand Down Expand Up @@ -249,7 +251,7 @@ def auto_schedule(workload, target=None,
"""
if isinstance(search_policy, str):
if search_policy == 'default':
search_policy = MetaTileRewritePolicy(RandomModel())
search_policy = SketchSearchPolicy(RandomModel())
else:
raise ValueError("Invalid search policy: " + search_policy)

Expand Down
7 changes: 5 additions & 2 deletions python/tvm/ansor/relay_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from tvm import target, te, transform
from tvm.te.tensor import PlaceholderOp, ComputeOp
from .dispatcher import DispatchContext
from .workload_registry import register_auto_scheduler_workload_bufs, compute_dag_hash
from .workload_registry import register_workload_bufs, compute_dag_hash
from .compute_dag import ComputeDAG, LayoutRewriteLevel
from .env import GLOBAL_SCOPE

Expand Down Expand Up @@ -203,11 +203,14 @@ def traverse(t):
def auto_schedule_topi(outs):
""" Use ansor to auto-schedule a topi compute declaration """
io_tensors, has_layout_free = traverse_to_get_io_tensors(outs)
key = register_auto_scheduler_workload_bufs(io_tensors)
key = register_workload_bufs(io_tensors)

env = TracingEnvironment.current
if env is None: # in the final build mode
state = DispatchContext.current.query(target.Target.current(), key)
if state is None:
return te.create_schedule([x.op for x in outs])

dag = ComputeDAG(io_tensors)
# Only update compute body, layout_rewrite_level = LayoutRewriteLevel.COMPUTE_REWRITE,
# Since kernel layout has already been rewritten in relay pass
Expand Down
18 changes: 9 additions & 9 deletions python/tvm/ansor/task_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

import numpy as np

from .auto_schedule import SearchTask, SearchPolicy, MetaTileRewritePolicy, TuneOption
from .auto_schedule import SearchTask, SearchPolicy, SketchSearchPolicy, TuneOption
from .cost_model import RandomModel, XGBModel
from .measure import ProgramMeasurer
from .utils import array_mean, to_str_round
Expand All @@ -42,7 +42,7 @@ def compute_score(self, costs: List[float]) -> float:
def get_search_policies(search_policy: Union[str, List[SearchPolicy]], tasks: List[SearchTask],
num_measure_per_iter, load_model_file=None, load_log_file=None):
if search_policy == 'default':
search_policy = 'meta-rewrite.xgb'
search_policy = 'sketch.xgb'

if isinstance(search_policy, str):
policy_type, model_type = search_policy.split('.')
Expand All @@ -58,16 +58,16 @@ def get_search_policies(search_policy: Union[str, List[SearchPolicy]], tasks: Li
else:
raise ValueError("Invalid search policy: " + search_policy)

if policy_type == 'meta-rewrite':
search_policies = [MetaTileRewritePolicy(cost_model) for _ in range(len(tasks))]
if policy_type == 'sketch':
search_policies = [SketchSearchPolicy(cost_model) for _ in range(len(tasks))]
elif policy_type == 'limit-space':
search_policies = [MetaTileRewritePolicy(cost_model,
params={'cpu_multi_level_tiling_structure': 'SRS',
'disable_change_compute_location': 1})
search_policies = [SketchSearchPolicy(cost_model,
params={'cpu_multi_level_tiling_structure': 'SRS',
'disable_change_compute_location': 1})
for _ in range(len(tasks))]
elif policy_type == 'beam-search':
search_policies = [MetaTileRewritePolicy(cost_model,
params={'use_beam_search': 1})
search_policies = [SketchSearchPolicy(cost_model,
params={'use_beam_search': 1})
for _ in range(len(tasks))]
else:
raise ValueError("Invalid search policy: " + search_policy)
Expand Down
14 changes: 7 additions & 7 deletions python/tvm/ansor/workload_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,19 +42,19 @@
WORKLOAD_FUNC_REGISTRY = {}


def register_auto_scheduler_workload_func(func: Callable):
def register_workload_func(func: Callable):
"""Register a workload generation function
The input function should take hashable and jsonable arguments
(int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of tvm.tensor.Tensor.
Examples
--------
@register_auto_scheduler_workload_func
@register_workload_func
def matmul(N, M, K):
A = tvm.placeholder((N, K), name='A')
B = tvm.placeholder((K, M), name='B')
k = tvm.reduce_axis((0, K), name='k')
C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], axis=[k]), name='C')
A = te.placeholder((N, K), name='A')
B = te.placeholder((K, M), name='B')
k = te.reduce_axis((0, K), name='k')
C = te.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], axis=[k]), name='C')
return [A, B, C]
"""
func_name = func.__name__
Expand Down Expand Up @@ -84,7 +84,7 @@ def compute_dag_hash(dag: ComputeDAG):
return hashlib.md5(str_key).hexdigest()


def register_auto_scheduler_workload_bufs(bufs: List[Tensor]) -> str:
def register_workload_bufs(bufs: List[Tensor]) -> str:
"""Directly register buffers of a workload and return the workload_key
The buffers can be looked up with workload_key_to_tensors by the workload_key
"""
Expand Down
38 changes: 19 additions & 19 deletions scripts/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,36 +14,36 @@
import tvm
from tvm import te
from tvm.ansor import (LogReader, make_workload_key_func,
register_auto_scheduler_workload_func,
register_workload_func,
write_measure_records_to_file)
from tvm.contrib import ndk, util

############################################################
###################### Test Workloads ####################
############################################################

@register_auto_scheduler_workload_func
@register_workload_func
def min_mn(M, N):
A = te.placeholder((M, N), name='A')
B = topi.min(A, axis=1)

return [A, B]

@register_auto_scheduler_workload_func
@register_workload_func
def argmin_mn(M, N):
A = te.placeholder((M, N), name='A')
B = topi.argmin(A, axis=1)

return [A, B]

@register_auto_scheduler_workload_func
@register_workload_func
def softmax_mn(M, N):
A = te.placeholder((M, N), name='A')
B = topi.nn.softmax(A, axis=1)

return [A, B]

@register_auto_scheduler_workload_func
@register_workload_func
def norm_bmn(B, M, N):
A = te.placeholder((B, M, N), name='A')
i = te.reduce_axis((0, M))
Expand All @@ -53,15 +53,15 @@ def norm_bmn(B, M, N):

return [A, D]

@register_auto_scheduler_workload_func
@register_workload_func
def add_mn(M, N):
A = te.placeholder((M, N), name='A')
B = te.placeholder((M, N), name='B')
C = te.compute((M, N), lambda i, j: A[i][j] + B[i][j], name='C')

return [A, B, C]

@register_auto_scheduler_workload_func
@register_workload_func
def matmul_nkkm(N, M, K, in_type='float32', out_type='float32',
tensor_core_support=False):
A = te.placeholder((N, K), name='A', dtype=in_type)
Expand All @@ -73,7 +73,7 @@ def matmul_nkkm(N, M, K, in_type='float32', out_type='float32',
C = te.compute((N, M),
lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]),
name='C',
attrs={"auto_scheduler_tensor_core_support": "True" if tensor_core_support else "False"})
attrs={"ansor_tensor_core_support": "True" if tensor_core_support else "False"})
else:
if not ((in_type == 'float16' and out_type == 'float32') or \
(in_type == 'int8' and out_type == 'int32')):
Expand All @@ -82,11 +82,11 @@ def matmul_nkkm(N, M, K, in_type='float32', out_type='float32',
lambda i, j: te.sum(A[i][k].astype(out_type) * B[k][j].astype(out_type),
axis=[k]),
name='C',
attrs={"auto_scheduler_tensor_core_support": "True" if tensor_core_support else "False"})
attrs={"ansor_tensor_core_support": "True" if tensor_core_support else "False"})

return [A, B, C]

@register_auto_scheduler_workload_func
@register_workload_func
def dense_layer(batch, in_dim, out_dim):
A = te.placeholder((batch, in_dim), name='A')
B = te.placeholder((out_dim, in_dim), name='B')
Expand All @@ -95,15 +95,15 @@ def dense_layer(batch, in_dim, out_dim):

return [A, B, C]

@register_auto_scheduler_workload_func
@register_workload_func
def max_pool_2d_nchw(N, C, H, W):
data = te.placeholder((N, C, H, W), name='data')
out = topi.nn.pool(data, (2, 2), (1, 1), (0, 0, 0, 0), pool_type='max', ceil_mode=True,
layout="NCHW", count_include_pad=True)

return [data, out]

@register_auto_scheduler_workload_func
@register_workload_func
def add_min_relu(M, N):
A = te.placeholder((M, N), name='A')
B = te.placeholder((M, N), name='B')
Expand All @@ -112,7 +112,7 @@ def add_min_relu(M, N):
out = topi.nn.relu(D)
return [A, B, out]

@register_auto_scheduler_workload_func
@register_workload_func
def conv2d_relu_softmax_min(N, H, W, CI, CO, KH, KW, strides, padding, dilation):
data = te.placeholder((N, CI, H, W), name='data')
kernel = te.placeholder((CO, CI, KH, KW), name='kernel')
Expand All @@ -123,7 +123,7 @@ def conv2d_relu_softmax_min(N, H, W, CI, CO, KH, KW, strides, padding, dilation)

return [data, kernel, out]

@register_auto_scheduler_workload_func
@register_workload_func
def conv2d_nchw_bias(N, H, W, CI, CO, KH, KW, strides, padding, dilation):
data = te.placeholder((N, CI, H, W), name='data')
kernel = te.placeholder((CO, CI, KH, KW), name='kernel')
Expand Down Expand Up @@ -190,7 +190,7 @@ def conv2d_nhwc_without_layout_rewrite(Input, Filter, stride, padding, dilation,
return Output


@register_auto_scheduler_workload_func
@register_workload_func
def conv2d_nhwc_bias_with_rewrite(N, H, W, CI, CO, KH, KW, strides, padding, dilation):
data = te.placeholder((N, H, W, CI), name='data')
kernel = te.placeholder((KH, KW, CI, CO), name='kernel')
Expand All @@ -199,7 +199,7 @@ def conv2d_nhwc_bias_with_rewrite(N, H, W, CI, CO, KH, KW, strides, padding, dil
out = topi.add(conv, bias)
return [data, kernel, bias, out]

@register_auto_scheduler_workload_func
@register_workload_func
def depthwise_conv2d_nhwc_bias_with_rewrite(N, H, W, CI, CO, KH, KW, strides, padding, dilation):
data = te.placeholder((N, H, W, CI), name='data')
kernel = te.placeholder((KH, KW, CI, 1), name='kernel')
Expand All @@ -208,7 +208,7 @@ def depthwise_conv2d_nhwc_bias_with_rewrite(N, H, W, CI, CO, KH, KW, strides, pa
out = topi.add(conv, bias)
return [data, kernel, bias, out]

@register_auto_scheduler_workload_func
@register_workload_func
def conv2d_nhwc_bias(N, H, W, CI, CO, KH, KW, strides, padding, dilation):
data = te.placeholder((N, H, W, CI), name='data')
kernel = te.placeholder((KH, KW, CI, CO), name='kernel')
Expand All @@ -218,7 +218,7 @@ def conv2d_nhwc_bias(N, H, W, CI, CO, KH, KW, strides, padding, dilation):
return [data, kernel, bias, out]


@register_auto_scheduler_workload_func
@register_workload_func
def conv2d_nchw_bn_relu(N, H, W, CI, CO, kernel_size, strides, padding, dilation=1):
data = te.placeholder((N, CI, H, W), name='data')
kernel = te.placeholder((CO, CI, kernel_size, kernel_size), name='kernel')
Expand All @@ -243,7 +243,7 @@ def conv2d_nchw_bn_relu(N, H, W, CI, CO, kernel_size, strides, padding, dilation

return [data, kernel, bias, bn_offset, bn_scale, out]

@register_auto_scheduler_workload_func
@register_workload_func
def conv2d_nhwc_bn_relu(N, H, W, CI, CO, kernel_size, strides, padding, dilation=1):
data = te.placeholder((N, H, W, CI), name='data')
kernel = te.placeholder((kernel_size, kernel_size, CI, CO), name='kernel')
Expand Down
Loading

0 comments on commit a4c4548

Please sign in to comment.