Skip to content

Commit

Permalink
Merge pull request #13 from MichaelJKlaiber/uma
Browse files Browse the repository at this point in the history
UMAv1.0 updates according to comments and suggestions from  PR apache#12087
  • Loading branch information
MichaelJKlaiber authored Jul 27, 2022
2 parents bc211fd + 0b5b6b0 commit 212df81
Show file tree
Hide file tree
Showing 24 changed files with 489 additions and 315 deletions.
9 changes: 4 additions & 5 deletions tests/scripts/task_python_uma.sh → apps/uma/_template/__init__.py
100755 → 100644
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
Expand All @@ -15,10 +14,10 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
set -euxo pipefail
Template files for UMA tutorial
source tests/scripts/setup-pytest-env.sh
Do not import
run_pytest ctypes test_uma tests/python/contrib/test_uma
run_pytest cython3 test_uma tests/python/contrib/test_uma
"""
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
# specific language governing permissions and limitations
# under the License.
"""UMA backend for the my_ai_hw accelerator"""
from .passes import MyAiHwConv2dPass
from ..api.utils import PassPhase
from ..backend import UMABackend
from .codegen import gen_includes, gen_replace_call_extern
from .patterns import conv2d_pattern
from passes import MyAiHwConv2dPass
from tvm.relay.backend.contrib.uma.api.utils import PassPhase
from tvm.relay.backend.contrib.uma.backend import UMABackend
from codegen import gen_includes
from patterns import conv2d_pattern


class MyAiHwBackend(UMABackend):
Expand All @@ -28,24 +28,16 @@ class MyAiHwBackend(UMABackend):
def __init__(self):
super().__init__()

#######################################################################
# Target configuration
#######################################################################
self._register_target_attr("dimension")

#######################################################################
# Relay to Relay function registration
#######################################################################
# Relay Pattern registration
self._register_pattern("conv2d", conv2d_pattern())

#######################################################################
# Relay to TIR function registration
#######################################################################
self._register_tir_pass(PassPhase.TIR_PHASE_0, MyAiHwConv2dPass())

#######################################################################
# TIR to runtime function registration
#######################################################################
self._register_codegen(fmt="c", includes=gen_includes)

@property
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,3 @@ def gen_includes() -> str:
includes = ""
includes += f'#include "{topdir}/conv2dnchw.cc"'
return includes


def gen_replace_call_extern(args: tvm.ir.container.Array) -> str:
return "my_custom_api_function({}, {}, {})".format(*args)
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,20 @@
#ifdef __cplusplus
extern "C"
#endif

/*!
* \brief Conv2D function for mock-accelerator examples. Limited to same-padded Conv2D with
* stride (1,1) and datatype float. \param ifmap Pointer to input feature map data of size
* iw*ih*ic*sizeof(float). \param weights Pointer to weight data of size
* kh*kw*ic**oc*sizeof(float). \param result Pointer to output feature map data of size
* iw*ih*oc*sizeof(float). \param oc Number of channels of output feature map. \param iw Width
* of input feature map, ifmap. \param ih Height of input feature map, ifmap. \param ic Number
* of channels of input feature map. \param kh Height of convolution kernels. \param kw Width of
* convolution kernels.
*
* \return error code
*
*/
int
my_ai_hw_conv2dnchw(float* ifmap, float* weights, float* result, int oc, int iw, int ih, int ic,
int kh, int kw) {
Expand All @@ -33,6 +47,8 @@ extern "C"
int padded_iw = iw + 2 * kw_low;
int padded_ih = ih + 2 * kh_low;

// This is only example code. A real hardware accelerator would call a device specific malloc
// function.
float* pad_temp = (float*)malloc(
(((ic * padded_iw * padded_ih) + (padded_ih * padded_iw)) + padded_iw) * sizeof(float));

Expand Down Expand Up @@ -71,6 +87,9 @@ extern "C"
}
}
}

// This is only example code. A real hardware accelerator would call a device specific free
// function.
free(pad_temp);
return 0;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"""Transform passes for the my_ai_hw accelerator"""

import tvm
from tvm import relay, tir
from tvm import tir
from tvm.relay.backend.contrib.uma.api.utils import add_llvm_to_block


Expand All @@ -30,14 +30,13 @@ def transform_function(

@staticmethod
def _my_ai_hw_conv2d_pass(func, mod, ctx):
_found_blocks = []
_loops = dict()
_handles = []
_entry_node = None
_external_function_name = "my_ai_hw_conv2dnchw"
_tvm_block_match_name = "conv2d_nchw"

def _has_block(name: str, func) -> bool:
def _has_block(name: str, func: tvm.tir.PrimFunc) -> bool:
"""
Determine of a tir.block with `name` exists in `func`
"""
Expand All @@ -50,7 +49,7 @@ def _hb(op):
tvm.tir.stmt_functor.post_order_visit(func.body, _hb)
return name in _found_blocks

def _transform_function(
def _detect_and_replace_conv2d(
func: tvm.tir.PrimFunc, mod: tvm.ir.IRModule, ctx: tvm.ir.transform.PassContext
) -> tvm.tir.PrimFunc:
def _replace_conv2d(op):
Expand All @@ -67,10 +66,13 @@ def _replace_conv2d(op):
external_call = tvm.tir.Evaluate(
tir_call(irb, True, _external_function_name, *args)
)
mac_calls = tvm.tir.SeqStmt([external_call])
irb.emit(mac_calls)
ext_calls = tvm.tir.SeqStmt([external_call])
irb.emit(ext_calls)
irb_result = irb.get()
return irb_result
elif isinstance(op, tvm.tir.SeqStmt):
# Remove that pad block of TOPI's conv2DNCHW by only returning the 2nd statement
return op.seq[1]
return op

sch = tir.Schedule(func)
Expand All @@ -92,12 +94,14 @@ def _replace_conv2d(op):
_loops = {k: sch.get(v) for k, v in loops.items()}
_handles = func.buffer_map.items()

x = tvm.tir.stmt_functor.ir_transform(func.body, None, _replace_conv2d, ["tir.For"])
x = tvm.tir.stmt_functor.ir_transform(
func.body, None, _replace_conv2d, ["tir.For", "tir.SeqStmt"]
)
return func.with_body(x)
else:
return func

r = _transform_function(func, mod, ctx)
r = _detect_and_replace_conv2d(func, mod, ctx)
return r


Expand All @@ -118,7 +122,7 @@ def buf_from_array(ib, arr, dtype):
var = ib.allocate("int32", (len(arr),), scope="global")
for i, v in enumerate(arr):
var[i] = v
# Declare a buffer, which is basically a view on the chunk of memory that we allocated previously
# Declare a buffer, which is basically a view on the chunk of memory that we allocated
buf = tvm.tir.decl_buffer((len(arr),), dtype, data=var, scope="global")
return buf

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,10 @@
# under the License.
"""Relay graph patterns for the my_ai_hw accelerator"""

from tvm.relay.dataflow_pattern import is_op, wildcard, has_attr
from tvm.relay.dataflow_pattern import is_op, wildcard


def conv2d_pattern():
pattern = is_op("nn.conv2d")(wildcard(), wildcard())
pattern = pattern.has_attr({"strides": [1, 1]})
return pattern


def dense_pattern():
pattern = is_op("nn.dense")(wildcard(), wildcard())
pattern = pattern.optional(
lambda x: is_op("nn.bias_add")(x, wildcard()) | is_op("add")(x, wildcard())
)
pattern = pattern.optional(lambda x: is_op("nn.relu")(x))
pattern = pattern.has_attr({"strides": [1, 1], "groups": 1})
return pattern
Original file line number Diff line number Diff line change
Expand Up @@ -15,39 +15,33 @@
# specific language governing permissions and limitations
# under the License.
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER

from tvm.testing.aot import compile_and_run, AOTTestModel, AOTTestRunner

import tvm
from tvm import relay
from tvm.relay.backend.contrib.uma._template.backend import MyAiHwBackend
from backend import MyAiHwBackend
from tvm.relay import transform
from collections import OrderedDict

import numpy as np
import tarfile
from pathlib import Path
import onnx


from tvm.testing.aot import (
AOTTestModel,
AOTTestRunner,
AOTTestModel as AOTModel,
AOTTestRunner as AOTRunner,
generate_ref_data,
compile_and_run,
)


def create_conv2d(groups=1, test_runner=AOT_DEFAULT_RUNNER, weight_shape=32):
def create_conv2d(groups=1, runner=AOT_DEFAULT_RUNNER, weight_shape=32):
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
runner = AOTRunner(
makefile=runner.makefile,
prologue=runner.prologue,
epilogue=runner.epilogue,
includes=runner.includes,
parameters=runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
Expand All @@ -61,11 +55,11 @@ def create_conv2d(groups=1, test_runner=AOT_DEFAULT_RUNNER, weight_shape=32):
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
return mod, inputs, output_list, test_runner
return mod, inputs, output_list, runner


def main():
mod, inputs, output_list, test_runner = create_conv2d()
mod, inputs, output_list, runner = create_conv2d()

uma_backend = MyAiHwBackend()
uma_backend.register()
Expand All @@ -75,8 +69,8 @@ def main():
export_directory = tvm.contrib.utils.tempdir(keep_for_debug=True).path
print(f"Generated files are in {export_directory}")
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
AOTModel(module=mod, inputs=inputs, outputs=output_list),
runner,
interface_api="c",
use_unpacked_api=True,
target=target,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,3 @@
# specific language governing permissions and limitations
# under the License.
"""Strategies for the my_ai_hw accelerator"""


from tvm import relay, te
from tvm.relay.op import op as _op
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple1d
from tvm.relay.op.strategy.generic import wrap_compute_conv1d, wrap_topi_schedule

import logging
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import os
import shutil
import sys
import pathlib
from inflection import camelize, underscore


Expand Down Expand Up @@ -66,14 +67,17 @@ def main():
"""
args = _parse_args()
add_hw_name = args.add_hardware
add_hw_path = os.path.join(os.getcwd(), add_hw_name)
uma_template_path = pathlib.Path(os.getcwd(), "_template").absolute()

add_hw_path = os.path.join(uma_template_path.parent, add_hw_name)
if os.path.exists(add_hw_path):
print(f"Hardware with name {add_hw_name} already exists in UMA file structure")
print(
f"Hardware with name {add_hw_name} already exists in UMA file structure: {add_hw_path}"
)
sys.exit(-1)
else:
os.mkdir(add_hw_name)
os.mkdir(add_hw_path)

uma_template_path = "_template"
uma_files = ["backend.py", "codegen.py", "passes.py", "patterns.py", "run.py", "strategies.py"]
if args.tutorial == "vanilla":
uma_files.append("conv2dnchw.cc")
Expand All @@ -87,6 +91,8 @@ def main():
template_name = "my_ai_hw"
replace_template_name(destination_files, template_name, add_hw_name)

print(f"Success: added {add_hw_name} to {add_hw_path}")


if __name__ == "__main__":
main()
3 changes: 3 additions & 0 deletions cmake/config.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -296,6 +296,9 @@ set(USE_VTA_FPGA OFF)
# Whether use Thrust
set(USE_THRUST OFF)

# Whether use cuRAND
set(USE_CURAND OFF)

# Whether to build the TensorFlow TVMDSOOp module
set(USE_TF_TVMDSOOP OFF)

Expand Down
1 change: 1 addition & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,7 @@ def git_describe_version(original_version):
"topi.pi",
"cross_compilation_and_rpc.py",
"relay_quick_start.py",
"uma.py",
],
"compile_models": [
"from_pytorch.py",
Expand Down
Loading

0 comments on commit 212df81

Please sign in to comment.