Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unnecessary utility code #5

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 0 additions & 30 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,36 +29,6 @@ tvm_option(USE_ROCM "Build with ROCM" OFF)
tvm_option(ROCM_PATH "The path to rocm" /opt/rocm)
tvm_option(USE_RPC "Build with RPC" ON)
tvm_option(USE_LLVM "Build with LLVM, can be set to specific llvm-config path" OFF)
if(USE_LLVM STREQUAL "ON")
getFromList(BUILD_PATH_SRC ".*LLVM.*" LLVM_PATH)
endif()
if(USE_CUDA STREQUAL "ON")
getFromList(BUILD_PATH_SRC ".*\/Cuda.*" USE_CUDA)
execute_process(COMMAND ls ${USE_CUDA} OUTPUT_VARIABLE CUDA_SRC_DIR)
if (NOT ${CUDA_SRC_DIR} MATCHES "build[^-]")
set(USE_CUDA ${USE_CUDA}/../../DEV.STD.PTHREAD/build)
else()
set(USE_CUDA ${USE_CUDA}/build)
endif()
endif()
if(USE_CUDNN STREQUAL "ON")
getFromList(BUILD_PATH_SRC ".*Cudnn.*" USE_CUDNN)
execute_process(COMMAND ls ${USE_CUDNN} OUTPUT_VARIABLE CUDNN_SRC_DIR)
if (NOT ${CUDNN_SRC_DIR} MATCHES "build[^-]")
set(USE_CUDNN ${USE_CUDNN}/../../DEV.STD.PTHREAD/build)
else()
set(USE_CUDNN ${USE_CUDNN}/build)
endif()
endif()
if(USE_TENSORRT STREQUAL "ON")
getFromList(BUILD_PATH_SRC ".*DLC_TENSORRT.*" USE_TENSORRT)
execute_process(COMMAND ls ${USE_TENSORRT} OUTPUT_VARIABLE TENSORRT_SRC_DIR)
if (NOT ${TENSORRT_SRC_DIR} MATCHES "Config")
set(USE_TENSORRT ${USE_TENSORRT}/../../DEV.STD.PTHREAD/build)
else()
set(USE_TENSORRT ${USE_TENSORRT}/build)
endif()
endif()
tvm_option(USE_STACKVM_RUNTIME "Include stackvm into the runtime" OFF)
tvm_option(USE_GRAPH_RUNTIME "Build with tiny graph runtime" ON)
tvm_option(USE_GRAPH_RUNTIME_DEBUG "Build with tiny graph runtime debug mode" OFF)
Expand Down
2 changes: 1 addition & 1 deletion nnvm/include/nnvm/op_attr_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ using FCorrectLayout = std::function<bool(
* \return success flag.
*/
using FCorrectLayoutEx = std::function<bool(
NodeAttrs& attrs,
const NodeAttrs& attrs,
std::vector<TShape>* ishapes,
std::vector<Layout>* ilayouts,
const std::vector<Layout>* last_ilayouts,
Expand Down
7 changes: 3 additions & 4 deletions nnvm/python/nnvm/frontend/mxnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,8 +274,8 @@ def _lrn(inputs, attrs):
return _get_nnvm_op(op_name)(*inputs, **new_attrs)

def _symbol_ring_buffer(inputs, attrs):
output = _get_nnvm_op('ring_buffer')(*inputs, **attrs)
return _sym._assign(inputs[1], output)
output = _get_nnvm_op('ring_buffer')(*inputs, **attrs)
return _sym._assign(inputs[1], output)


def _copy(inputs, _):
Expand Down Expand Up @@ -339,8 +339,7 @@ def _argmax(inputs, attrs):
'expand_dims' : _expand_dims,
'LRN' : _lrn,
'ring_buffer' : _symbol_ring_buffer,
'LinearRegressionOutput' : _copy,
'argmax' : _argmax
'LinearRegressionOutput' : _copy
}

def _convert_symbol(op_name, inputs, attrs,
Expand Down
30 changes: 15 additions & 15 deletions nnvm/python/nnvm/top/nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,12 +127,12 @@ def compute_conv2d(attrs, inputs, _):
else:
raise ValueError("not support arbitrary group number for now")

if attrs.get_bool("use_bias"):
bias = inputs[2]
expand_axis = 1 if layout == "NCHW" else 0
bias = topi.expand_dims(bias, axis=expand_axis, num_newaxis=2)
out = topi.add(out, bias)
return out
if attrs.get_bool("use_bias"):
bias = inputs[2]
expand_axis = 1 if layout == "NCHW" else 0
bias = topi.expand_dims(bias, axis=expand_axis, num_newaxis=2)
out = topi.add(out, bias)
return out

@reg.register_schedule("conv2d")
def schedule_conv2d(attrs, outs, target):
Expand Down Expand Up @@ -245,11 +245,11 @@ def compute_contrib_conv2d_winograd_without_weight_transform(attrs, inputs, _):
inputs[0], inputs[1], strides, padding, layout, out_dtype,
tile_size)

if attrs.get_bool("use_bias"):
bias = inputs[2]
bias = topi.expand_dims(bias, axis=1, num_newaxis=2)
out = topi.add(out, bias)
return out
if attrs.get_bool("use_bias"):
bias = inputs[2]
bias = topi.expand_dims(bias, axis=1, num_newaxis=2)
out = topi.add(out, bias)
return out

@reg.register_schedule("_contrib_conv2d_winograd_without_weight_transform")
def schedule_contrib_conv2d_winograd_without_weight_transform(attrs, outs, target):
Expand Down Expand Up @@ -297,22 +297,22 @@ def schedule_conv2d_transpose(attrs, outs, target):
reg.register_pattern("conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)

@reg.register_alter_op_layout("max_pool2d")
def alter_pooling_layout(attrs, inputs, tinfos):
def alter_pooling_layout_max_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
return topi.nn.max_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("avg_pool2d")
def alter_pooling_layout(attrs, inputs, tinfos):
def alter_pooling_layout_avg_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
return topi.nn.avg_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("global_max_pool2d")
def alter_pooling_layout(attrs, inputs, tinfos):
def alter_pooling_layout_global_max_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
return topi.nn.global_max_pool2d_alter_layout(attrs, inputs, tinfos)

@reg.register_alter_op_layout("global_avg_pool2d")
def alter_pooling_layout(attrs, inputs, tinfos):
def alter_pooling_layout_global_avg_pool2d(attrs, inputs, tinfos):
with tvm.target.create(attrs.get_string("target")):
return topi.nn.global_avg_pool2d_alter_layout(attrs, inputs, tinfos)

Expand Down
7 changes: 5 additions & 2 deletions nnvm/src/compiler/alter_op_layout.cc
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,11 @@ Graph AlterOpLayout(const Graph& src) {
Symbol op;
bool do_alter =
fn_alter_op_layout(n->attrs, Symbol::CreateGroup(op_inputs), tensor_infos, &op);
if (do_alter) *ret = op.outputs;
else new_nodes[n.get()] = nid;
if (do_alter) {
*ret = op.outputs;
} else {
new_nodes[n.get()] = nid;
}
return do_alter;
};

Expand Down
64 changes: 32 additions & 32 deletions nnvm/src/pass/graph_annotate.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,38 +3,38 @@
* \file graph_annotate.h
* \brief Define rules to annotate a graph.
*/
#ifndef NNVM_PASS_GRAPH_ANNOTATE_H_
#define NNVM_PASS_GRAPH_ANNOTATE_H_
#ifndef NNVM_PASS_GRAPH_ANNOTATE_H_
#define NNVM_PASS_GRAPH_ANNOTATE_H_

#include <nnvm/graph.h>
#include <nnvm/graph.h>

#include <string>
#include <unordered_map>
#include <string>
#include <unordered_map>

namespace nnvm {
namespace nnvm {

class ManualAnnotator;
class ManualAnnotator;
/*
* This class is an abstract class that can be derived by other classes to
* implement how a node should be selected.
*/
class GraphAnnotator {
public:
explicit GraphAnnotator(int fallback_device)
class GraphAnnotator {
public:
explicit GraphAnnotator(int fallback_device)
: fallback_device_(fallback_device) {}
virtual ~GraphAnnotator() = default;
// A virtual function that is implemented by different annotation methods.
virtual int AnnotateNode(const nnvm::Node* n) const = 0;
virtual ~GraphAnnotator() = default;
// A virtual function that is implemented by different annotation methods.
virtual int AnnotateNode(const nnvm::Node* n) const = 0;

int GetFallbackDevice() const {
int GetFallbackDevice() const {
return fallback_device_;
}
}

private:
friend ManualAnnotator;
/* The fallback device. */
int fallback_device_;
};
private:
friend ManualAnnotator;
/* The fallback device. */
int fallback_device_;
};

/*
* This class defines a manual way to annotate a graph node. In this method,
Expand All @@ -43,28 +43,28 @@
* is registered with a fallback property or the operator name has not been
* saved, this node will be annotated with the fallback device.
*/
class ManualAnnotator : public GraphAnnotator {
using OpNameDeviceMap = std::unordered_map<std::string, int>;
public:
explicit ManualAnnotator(const OpNameDeviceMap& op_name_dev_map,
class ManualAnnotator : public GraphAnnotator {
using OpNameDeviceMap = std::unordered_map<std::string, int>;
public:
explicit ManualAnnotator(const OpNameDeviceMap& op_name_dev_map,
int fallback_device)
: GraphAnnotator(fallback_device),
op_name_dev_map_(new OpNameDeviceMap(op_name_dev_map)) {}

int AnnotateNode(const nnvm::Node* n) const final {
int AnnotateNode(const nnvm::Node* n) const final {
if (n->is_variable()) return 0;
if (n->op()->fallback) return fallback_device_;

return op_name_dev_map_->count(n->op()->name)
? op_name_dev_map_->at(n->op()->name)
: fallback_device_;
}
}

private:
std::unique_ptr<const OpNameDeviceMap> op_name_dev_map_;
};
private:
std::unique_ptr<const OpNameDeviceMap> op_name_dev_map_;
};

using ManualAnnotatorPtr = std::shared_ptr<ManualAnnotator>;
using ManualAnnotatorPtr = std::shared_ptr<ManualAnnotator>;

} // namespace nnvm
#endif // NNVM_PASS_GRAPH_ANNOTATE_H_
} // namespace nnvm
#endif // NNVM_PASS_GRAPH_ANNOTATE_H_
4 changes: 2 additions & 2 deletions nnvm/src/top/nn/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ inline bool BatchNormInferShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool BatchNormCorrectLayout(NodeAttrs& attrs,
inline bool BatchNormCorrectLayout(const NodeAttrs& attrs,
std::vector<TShape>* ishapes,
std::vector<Layout> *in_layouts,
const std::vector<Layout> *last_in_layouts,
Expand Down Expand Up @@ -593,7 +593,7 @@ inline bool PadInferShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool PadCorrectLayout(NodeAttrs& attrs,
inline bool PadCorrectLayout(const NodeAttrs& attrs,
std::vector<TShape>* ishapes,
std::vector<Layout>* ilayouts,
const std::vector<Layout>* last_ilayouts,
Expand Down
2 changes: 1 addition & 1 deletion nnvm/src/top/tensor/broadcast.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ inline bool BinaryBroadcastShape(const nnvm::NodeAttrs& attrs,
return true;
}

inline bool BinaryBroadcastCorrectLayout(NodeAttrs& attrs,
inline bool BinaryBroadcastCorrectLayout(const NodeAttrs& attrs,
std::vector<TShape>* ishapes,
std::vector<Layout>* ilayouts,
const std::vector<Layout>* last_ilayouts,
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from __future__ import absolute_import as _abs
import numpy as _np

from ._ffi.function import register_func
from ._ffi.ndarray import TVMContext, TVMType, NDArrayBase
from ._ffi.ndarray import context, empty, from_dlpack
from ._ffi.ndarray import _set_class_ndarray
Expand Down Expand Up @@ -199,7 +200,6 @@ def array(arr, ctx=cpu(0)):
return empty(arr.shape, arr.dtype, ctx).copyfrom(arr)


from ._ffi.function import register_func
@register_func("tvm.nd.random_uniform")
def random_uniform(size, dtype, target):
size = [int(x) for x in size.split()]
Expand Down
2 changes: 1 addition & 1 deletion src/contrib/subgraph/tensorrt_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@
#include <dmlc/parameter.h>
#include <dmlc/timer.h>
#include <unordered_set>
#include <cmath>
#include <functional>
#include <iostream>
#include <sstream>
#include "./subgraph.h"
#include "./tensorrt_executor.h"
#include "../../runtime/cuda/cuda_common.h"
#include <cmath>

namespace tvm {
namespace contrib {
Expand Down
1 change: 0 additions & 1 deletion topi/python/topi/cuda/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import tvm
from .. import tag
from .. import generic
from ..nn.pooling import *

@generic.schedule_global_pool.register(["cuda", "gpu"])
def schedule_global_pool(outs):
Expand Down
6 changes: 5 additions & 1 deletion topi/python/topi/nn/pooling.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
"""TVM operator pooling compute."""
from __future__ import absolute_import
import tvm
from .. import cpp

import tvm

POOL_TYPE_CODE = {
"avg": 0,
Expand Down Expand Up @@ -102,6 +102,7 @@ def pool(data,

@tvm.target.generic_func
def max_pool2d_alter_layout(attrs, inputs, tinfos):
#pylint: disable=unused-argument
"""Change max pool2d layout.

Parameters
Expand All @@ -119,6 +120,7 @@ def max_pool2d_alter_layout(attrs, inputs, tinfos):

@tvm.target.generic_func
def avg_pool2d_alter_layout(attrs, inputs, tinfos):
#pylint: disable=unused-argument
"""Change average pool2d layout.

Parameters
Expand All @@ -136,6 +138,7 @@ def avg_pool2d_alter_layout(attrs, inputs, tinfos):

@tvm.target.generic_func
def global_max_pool2d_alter_layout(attrs, inputs, tinfos):
#pylint: disable=unused-argument
"""Change global max pool2d layout.

Parameters
Expand All @@ -153,6 +156,7 @@ def global_max_pool2d_alter_layout(attrs, inputs, tinfos):

@tvm.target.generic_func
def global_avg_pool2d_alter_layout(attrs, inputs, tinfos):
#pylint: disable=unused-argument
"""Change global average pool2d layout.

Parameters
Expand Down