Skip to content

Commit

Permalink
fix bugs
Browse files Browse the repository at this point in the history
  • Loading branch information
icemelon committed Feb 5, 2020
1 parent da924be commit b233005
Show file tree
Hide file tree
Showing 18 changed files with 68 additions and 159 deletions.
2 changes: 1 addition & 1 deletion python/tvm/autotvm/graph_tuner/utils/traverse_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def _traverse_expr(node):
return
node_index = len(node_list)
node_entry = {"node": node, "inputs": [], "types": [],
"op": "null", "name": None}
"op": None, "name": None}

if isinstance(node, Call):
op = node.op
Expand Down
13 changes: 6 additions & 7 deletions python/tvm/autotvm/graph_tuner/utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def has_multiple_inputs(node_list, node_idx, input_names):
in_idx = in_idx[0]
in_node = node_list[in_idx]
# Exclude parameter nodes
if in_node["op"] != "null" or \
if in_node["op"] is not None or \
("name" in in_node and in_node["name"] in input_names):
num_inputs += 1
return num_inputs > 1
Expand All @@ -71,9 +71,10 @@ def is_boundary_node(node_entry, input_names):
whether node is a boundary node.
"""
# Operators dependent on original layouts.
_LAYOUT_FIXED_OP = ["batch_flatten", "transpose", "reshape",
"multibox_prior", "multibox_transform_loc", "where",
"non_max_suppression", "strided_slice"]
_LAYOUT_FIXED_OP = [relay.op.get(name) for name in (
"nn.batch_flatten", "transpose", "reshape", "vision.multibox_prior",
"vision.multibox_transform_loc", "where", "vision.non_max_suppression",
"strided_slice")]

out = node_entry["op"] in _LAYOUT_FIXED_OP or \
("name" in node_entry and node_entry["name"] in input_names)
Expand All @@ -94,9 +95,7 @@ def is_skipped_node(node_entry):
whether node is skipped.
"""
# Operators not counted in graph tuner.
_SKIPPED_OP = ["Tuple"]

return node_entry["op"] in _SKIPPED_OP
return isinstance(node_entry["node"], relay.Tuple)


def bind_inputs(expr, input_shapes=None, input_dtypes="float32"):
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/autotvm/record.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,12 +161,12 @@ def clean_json_to_python(x):
tgt = _target.create(items[0])
task_tuple = pickle.loads(base64.b64decode(items[1].encode()))
config = pickle.loads(base64.b64decode(items[2].encode()))
result = pickle.loads(base64.b64decode(items[3].encode()))
result = MeasureResult(*pickle.loads(base64.b64decode(items[3].encode())))
config.cost = np.mean(result.costs)

tsk = task.Task(task_tuple[0], task_tuple[1])
tsk.workload = task_tuple[3]
return MeasureInput(tgt, tsk, config), MeasureResult(*result)
return MeasureInput(tgt, tsk, config), result

raise RuntimeError("Invalid log protocol: " + protocol)

Expand Down
3 changes: 0 additions & 3 deletions python/tvm/autotvm/task/dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,6 @@
import logging

import numpy as np
from decorator import decorate

from tvm import target as _target

from .space import FallbackConfigEntity

Expand Down
71 changes: 2 additions & 69 deletions python/tvm/autotvm/task/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,74 +342,6 @@ def args_to_workload(x, task_name=None):
'primitive types or tvm.expr.Var only' % type(x))
return tuple((task_name, ) + workload) if task_name else workload

# def template(func):
# """
# Decorate a function as a tunable schedule template
#
# Parameters
# ----------
# func: callable
# A callable template function.
# Its argument should be hashable values.
# Its return value should be a Tuple(Schedule, Array of Tensor)
#
# Returns
# -------
# func: callable
# The decorated function
#
# Examples
# --------
# The following code is a tunable template for a blocked matrix multiplication
#
# .. code-block:: python
#
# @autotvm.template
# def matmul(N, L, M, dtype):
# A = tvm.placeholder((N, L), name='A', dtype=dtype)
# B = tvm.placeholder((L, M), name='B', dtype=dtype)
#
# k = tvm.reduce_axis((0, L), name='k')
# C = tvm.compute((N, M), lambda i, j: tvm.sum(A[i, k] * B[k, j], axis=k), name='C')
# s = tvm.create_schedule(C.op)
#
# # schedule
# y, x = s[C].op.axis
# k = s[C].op.reduce_axis[0]
#
# ##### define space begin #####
# cfg = autotvm.get_config()
# cfg.define_split("tile_y", y, num_outputs=2)
# cfg.define_split("tile_x", x, num_outputs=2)
# ##### define space end #####
#
# # schedule according to config
# yo, yi = cfg["tile_y"].apply(s, C, y)
# xo, xi = cfg["tile_x"].apply(s, C, x)
#
# s[C].reorder(yo, xo, k, yi, xi)
#
# return s, [A, B, C]
# """
# # pylint: disable=unused-variable
#
# fname = get_func_name(func)
#
# @register(fname)
# @dispatcher
# def config_dispatcher(*args, **kwargs):
# assert not kwargs, "Do not support kwargs in template function call"
# return (fname, ) + args_to_workload(args)
#
# @config_dispatcher.register("")
# def template_call(cfg, *args, **kwargs):
# assert not kwargs, "Do not support kwargs in template function call"
# with ApplyConfig(cfg):
# return func(*args, **kwargs)
#
# config_dispatcher.func_name = fname
# return config_dispatcher

def get_config():
"""Get current config object
Expand All @@ -418,7 +350,8 @@ def get_config():
cfg: ConfigSpace or ConfigEntity
The current config
"""
return DispatchContext.current.query(None, None)
tgt = _target.current_target(allow_none=True)
return DispatchContext.current.query(tgt, None)

class FlopCalculationError(RuntimeError):
"""Error happens when estimating FLOP for a compute op"""
Expand Down
11 changes: 6 additions & 5 deletions python/tvm/relay/backend/compile_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ def create_tensors(typ, tensors):
self.func_name = "fused"
outputs = self.visit(prim_func.body)
if len(self.func_name) > ScheduleGetter.MAX_FUNC_NAME_LENGTH:
hash_digest = int(hashlib.sha1(self.func_name).hexdigest(), 16)
hash_digest = int(hashlib.sha1(self.func_name.encode("utf-8")).hexdigest(), 16)
self.func_name = "%s_%s" % (
self.func_name[:ScheduleGetter.MAX_FUNC_NAME_LENGTH], hash_digest)

Expand All @@ -270,7 +270,8 @@ def create_tensors(typ, tensors):
# print('master op:', self.master_op.name)
sch = self.master_implement.schedule(self.master_attrs, tensor_outs, self.target)
for scalar in self.scalars:
sch[scalar].compute_inline()
if scalar in sch.stage_map:
sch[scalar].compute_inline()
return CachedFunc(self.target, self.func_name, inputs, outputs, sch)

def visit_var(self, var):
Expand Down Expand Up @@ -381,10 +382,10 @@ def visit_tuple(self, tup):
return fields

def visit_tuple_getitem(self, t):
tup = self.visit(t.tuple)
assert len(tup) == len(t.tuple.checked_type.fields)
tup = self.visit(t.tuple_value)
assert len(tup) == len(t.tuple_value.checked_type.fields)
assert t.index >= 0
assert t.index < tup.size()
assert t.index < len(tup)
return [tup[t.index]]


Expand Down
4 changes: 3 additions & 1 deletion python/tvm/relay/frontend/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,7 @@ def _impl(inputs, attr, params):
flip_layout = True

if attr['data_format'] == 'NHWC':
in_channels = input_shape[3]
kernel_h, kernel_w, _, depth_mult = weights_shape
attr['kernel_shape'] = (weights_shape[0], weights_shape[1])
if opname == 'conv':
Expand All @@ -323,6 +324,7 @@ def _impl(inputs, attr, params):
attr['dilations'] = (attr['dilations'][1], attr['dilations'][2])
attr['strides'] = (attr['strides'][1], attr['strides'][2])
elif attr['data_format'] == 'NCHW':
in_channels = input_shape[1]
_, depth_mult, kernel_h, kernel_w = weights_shape
attr['kernel_shape'] = (weights_shape[2], weights_shape[3])
if opname == 'conv':
Expand All @@ -343,7 +345,7 @@ def _impl(inputs, attr, params):
raise tvm.error.OpAttributeInvalid(msg.format(attr['data_format']))

if opname == 'depthwise':
attr['groups'] = attr['channels']
attr['groups'] = in_channels

# Fix padding
attr['padding'] = attr['padding'].decode("utf-8")
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/nn/_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def compute_upsampling3d(attrs, inputs, out_dtype):

# mirror_pad
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype, target):
def compute_mirror_pad(attrs, inputs, out_dtype):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
Expand Down
3 changes: 2 additions & 1 deletion python/tvm/relay/op/strategy/x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,8 @@ def conv2d_strategy_cpu(attrs, inputs, out_type, target):
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nchw))
elif layout == "NHWC":
assert kernel_layout == "HWOI"
logger.warning("For x86 target, NCHW layout is recommended for depthwise_conv2d.")
logger.warning("For x86 target, depthwise_conv2d with NCHW layout is "
"not optimized.")
strategy.add_implement(
wrap_compute_conv2d(topi.nn.depthwise_conv2d_nhwc),
wrap_topi_schedule(topi.generic.schedule_depthwise_conv2d_nhwc))
Expand Down
14 changes: 12 additions & 2 deletions src/relay/op/nn/convolution.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,16 @@ bool Conv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
<< " But got " << out_layout;

Array<IndexExpr> dshape_nchw = trans_in_layout.ForwardShape(data->shape);
bool is_depthwise = false;
if (param->groups > 1) {
CHECK(weight->shape.defined()) << "Weight shape must be specified " <<
"when groups is greater than 1.";
Array<IndexExpr> wshape_oihw = trans_kernel_layout.ForwardShape(weight->shape);
if (tvm::tir::Equal(param->groups, dshape_nchw[1]) &&
tvm::tir::Equal(param->groups, wshape_oihw[0])) {
is_depthwise = true;
}
}

IndexExpr channels, dilated_ksize_y, dilated_ksize_x;
// infer weight if the kernel_size and channels are defined
Expand All @@ -161,9 +171,9 @@ bool Conv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
CHECK_EQ(param->dilation.size(), 2);
Array<IndexExpr> wshape;

if (tvm::tir::Equal(param->channels, param->groups) && !tvm::tir::Equal(param->channels, 1)) {
if (is_depthwise) {
// infer weight's shape for depthwise convolution
wshape = {{dshape_nchw[1], indexdiv(param->groups, dshape_nchw[1]), param->kernel_size[0],
wshape = {{dshape_nchw[1], indexdiv(param->channels, dshape_nchw[1]), param->kernel_size[0],
param->kernel_size[1]}};
} else {
wshape = {{param->channels, indexdiv(dshape_nchw[1], param->groups), param->kernel_size[0],
Expand Down
15 changes: 11 additions & 4 deletions tests/python/frontend/mxnet/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -852,17 +852,22 @@ def verify(data_shape, out_shape, begin, end):


def test_forward_convolution():
def verify(data_shape, kernel_size, stride, pad, num_filter):
weight_shape=(num_filter, data_shape[1],) + kernel_size
def verify(data_shape, kernel_size, stride, pad, num_filter, is_depthwise=False):
if is_depthwise:
groups = data_shape[1]
weight_shape=(data_shape[1], num_filter // groups,) + kernel_size
else:
groups = 1
weight_shape=(num_filter, data_shape[1],) + kernel_size
x = np.random.uniform(size=data_shape).astype("float32")
weight = np.random.uniform(size=weight_shape).astype("float32")
bias = np.random.uniform(size=num_filter).astype("float32")
ref_res = mx.nd.Convolution(data=mx.nd.array(x), weight=mx.nd.array(weight),
bias=mx.nd.array(bias), kernel=kernel_size, stride=stride,
pad=pad, num_filter=num_filter)
pad=pad, num_filter=num_filter, num_group=groups)
mx_sym = mx.sym.Convolution(mx.sym.var("x"), mx.sym.var("weight"), mx.sym.var("bias"),
kernel=kernel_size, stride=stride,
pad=pad, num_filter=num_filter)
pad=pad, num_filter=num_filter, num_group=groups)
shape_dict = {"x": x.shape, "weight": weight.shape, "bias": bias.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, ctx in ctx_list():
Expand All @@ -879,6 +884,8 @@ def verify(data_shape, kernel_size, stride, pad, num_filter):
verify(data_shape=(20, 1, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(1, 8, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(20, 8, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(1, 8, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=8,
is_depthwise=True)

def test_forward_deconvolution():
def verify(data_shape, kernel_size, stride, pad, num_filter):
Expand Down
4 changes: 2 additions & 2 deletions tests/python/relay/test_op_level2.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def run_test_conv2d(dtype, out_dtype, scale, dshape, kshape,
except_targets = []

x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
Expand Down Expand Up @@ -230,7 +230,7 @@ def compile_test_conv2d_arm_cpu(dtype, out_dtype, scale, dshape, kshape,
dilation=(1, 1),
**attrs):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(x, w,
padding=padding,
dilation=dilation,
Expand Down
6 changes: 3 additions & 3 deletions tests/python/unittest/test_autotvm_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from tvm import autotvm
from tvm.autotvm import MeasureInput, MeasureResult

@autotvm.template
@autotvm.register_customized_task("testing/matmul")
def matmul(N, L, M, dtype):
A = tvm.placeholder((N, L), name='A', dtype=dtype)
B = tvm.placeholder((L, M), name='B', dtype=dtype)
Expand All @@ -48,7 +48,7 @@ def matmul(N, L, M, dtype):

return s, [A, B, C]

@autotvm.template
@autotvm.register_customized_task("testing/bad_matmul")
def bad_matmul(N, L, M, dtype):
if 'bad_device' in tvm.target.current_target().keys:
A = tvm.placeholder((N, L), name='A', dtype=dtype)
Expand All @@ -70,7 +70,7 @@ def bad_matmul(N, L, M, dtype):
def get_sample_task(n=128):
"""return a sample task for testing"""
target = tvm.target.create("llvm")
task = autotvm.task.create(matmul, args=(n, n, n, 'float32'), target=target)
task = autotvm.task.create("testing/matmul", args=(n, n, n, 'float32'), target=target)
return task, target

def get_sample_records(n):
Expand Down
34 changes: 1 addition & 33 deletions tests/python/unittest/test_autotvm_dispatch_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,42 +18,11 @@
The dispatcher can choose which template to use according
to the parameters of workload"""

from collections import namedtuple
from tvm import autotvm
from tvm.autotvm.task import dispatcher, DispatchContext

SimpleConfig = namedtuple('SimpleConfig', ('template_key', 'is_fallback'))

def test_dispatch():
@dispatcher
def my_dispatcher(a, b):
return (a, b)

@my_dispatcher.register("im2col")
def _im2col(cfg, a, b):
return a

@my_dispatcher.register("spatial_pack")
def _spatial_pack(cfg, a, b):
return b

class SimpleDispatcher(DispatchContext):
def query(self, target, workload):
a, b = workload
tkey = "spatial_pack" if a + b > 2 else "im2col"
cfg = SimpleConfig(tkey, False)
return cfg

with SimpleDispatcher():
# this will call im2col
assert my_dispatcher(1, 0) == 1

# this will call spatial pack
assert my_dispatcher(1, 100) == 100

def test_fallback():

@autotvm.template
@autotvm.register_customized_task("testing/dispatch/fallback")
def simple_template(a, b):
cfg = autotvm.get_config()
assert cfg.is_fallback
Expand All @@ -62,5 +31,4 @@ def simple_template(a, b):


if __name__ == "__main__":
test_dispatch()
test_fallback()
2 changes: 1 addition & 1 deletion tests/python/unittest/test_autotvm_measure.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def _callback_correct(tuner, measure_inputs, measure_results):
# a bad template
n = 128
target = tvm.target.create("llvm -device=bad_device")
task = autotvm.task.create(bad_matmul, args=(n, n, n, 'float32'), target=target)
task = autotvm.task.create("testing/bad_matmul", args=(n, n, n, 'float32'), target=target)

def _callback_wrong(tuner, measure_inputs, measure_results):
for inp, res in zip(measure_inputs, measure_results):
Expand Down
Loading

0 comments on commit b233005

Please sign in to comment.