Skip to content

Commit

Permalink
Fix remaining linter errors
Browse files Browse the repository at this point in the history
  • Loading branch information
cgerum authored and MichaelJKlaiber committed Jul 18, 2022
1 parent 8992eaa commit ca6796d
Show file tree
Hide file tree
Showing 6 changed files with 98 additions and 49 deletions.
16 changes: 16 additions & 0 deletions python/tvm/relay/backend/contrib/uma/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
22 changes: 14 additions & 8 deletions python/tvm/relay/backend/contrib/uma/_template/conv2dnchw.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,9 @@
#ifdef __cplusplus
extern "C"
#endif
int my_ai_hw_conv2dnchw(float* ifmap, float* weights, float* result,
int oc, int iw, int ih, int ic, int kh, int kw) {
int
my_ai_hw_conv2dnchw(float* ifmap, float* weights, float* result, int oc, int iw, int ih, int ic,
int kh, int kw) {

int kw_low = kw / 2;
int kh_low = kh / 2;
Expand All @@ -32,7 +33,8 @@ int my_ai_hw_conv2dnchw(float* ifmap, float* weights, float* result,
int padded_iw = iw + 2 * kw_low;
int padded_ih = ih + 2 * kh_low;

float* pad_temp = (float*) malloc((((ic * padded_iw * padded_ih) + (padded_ih * padded_iw)) + padded_iw) * sizeof(float));
float* pad_temp = (float*)malloc(
(((ic * padded_iw * padded_ih) + (padded_ih * padded_iw)) + padded_iw) * sizeof(float));

if (pad_temp == NULL) {
return -1;
Expand All @@ -42,7 +44,9 @@ int my_ai_hw_conv2dnchw(float* ifmap, float* weights, float* result,
for (int i2 = 0; i2 < padded_ih; ++i2) {
for (int i3 = 0; i3 < padded_iw; ++i3) {
((float*)pad_temp)[(((i1 * padded_iw * padded_ih) + (i2 * padded_iw)) + i3)] =
(((((kh_low <= i2) && (i2 < kh_high)) && (kw_low <= i3)) && (i3 < kw_high)) ? ifmap[((((i1 * iw * ih) + ((i2-kh_low) * iw)) + i3 - kw_low) )] : 0.000000e+00f);
(((((kh_low <= i2) && (i2 < kh_high)) && (kw_low <= i3)) && (i3 < kw_high))
? ifmap[((((i1 * iw * ih) + ((i2 - kh_low) * iw)) + i3 - kw_low))]
: 0.000000e+00f);
}
}
}
Expand All @@ -52,13 +56,15 @@ int my_ai_hw_conv2dnchw(float* ifmap, float* weights, float* result,
for (int i4 = 0; i4 < ic; ++i4) {
for (int i5 = 0; i5 < kh; ++i5) {
for (int i6 = 0; i6 < kw; ++i6) {
int cse_var_1 = (((i11 * iw*ih) + (i21 * iw)) + i31);
int cse_var_1 = (((i11 * iw * ih) + (i21 * iw)) + i31);
if (((i4 == 0) && (i5 == 0)) && (i6 == 0)) {
result[cse_var_1] = 0.000000e+00f;
}
result[cse_var_1] = (result[cse_var_1]
+ (((float*)pad_temp)[i4 * padded_iw * padded_ih + (i21+i5) * padded_iw + i31 + i6]
* weights[((((i11 * ic * kh * kw) + (i4 * kh * kw)) + (i5 * kw)) + i6)]));
result[cse_var_1] =
(result[cse_var_1] +
(((float*)
pad_temp)[i4 * padded_iw * padded_ih + (i21 + i5) * padded_iw + i31 + i6] *
weights[((((i11 * ic * kh * kw) + (i4 * kh * kw)) + (i5 * kw)) + i6)]));
}
}
}
Expand Down
17 changes: 17 additions & 0 deletions python/tvm/relay/backend/contrib/uma/tutorial.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,20 @@
<!--- Licensed to the Apache Software Foundation (ASF) under one -->
<!--- or more contributor license agreements. See the NOTICE file -->
<!--- distributed with this work for additional information -->
<!--- regarding copyright ownership. The ASF licenses this file -->
<!--- to you under the Apache License, Version 2.0 (the -->
<!--- "License"); you may not use this file except in compliance -->
<!--- with the License. You may obtain a copy of the License at -->

<!--- http://www.apache.org/licenses/LICENSE-2.0 -->

<!--- Unless required by applicable law or agreed to in writing, -->
<!--- software distributed under the License is distributed on an -->
<!--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->
<!--- KIND, either express or implied. See the License for the -->
<!--- specific language governing permissions and limitations -->
<!--- under the License. -->

Making your hardware accelerator TVM-ready with UMA
=============================================

Expand Down
7 changes: 4 additions & 3 deletions src/relay/backend/contrib/uma/relay_to_tir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,8 @@ tvm::transform::Pass OutlineCompilerFunctions(const std::string& compiler_name)
}
return mod;
};
return tvm::transform::CreateModulePass(
pass_func, 0, "relay.backend.contrib.uma.OutlineCompilerFunctions", {});
return tvm::transform::CreateModulePass(pass_func, 0,
"relay.backend.contrib.uma.OutlineCompilerFunctions", {});
}

TVM_REGISTER_GLOBAL("relay.ext.uma.OutlineCompilerFunctions")
Expand All @@ -159,7 +159,8 @@ TVM_REGISTER_GLOBAL("relay.ext.uma.OutlineCompilerFunctions")
tvm::transform::Pass RelayToTIR(String target_name) {
runtime::TypedPackedFunc<IRModule(IRModule, transform::PassContext)> pass_func =
[=](IRModule ir_module, transform::PassContext pass_context) {
auto relay_to_tir_pf = tvm::runtime::Registry::Get("relay.ext.uma." + target_name + ".relay_to_tir");
auto relay_to_tir_pf =
tvm::runtime::Registry::Get("relay.ext.uma." + target_name + ".relay_to_tir");
ICHECK(relay_to_tir_pf);
ir_module = (*relay_to_tir_pf)(ir_module);
return ir_module;
Expand Down
68 changes: 37 additions & 31 deletions src/relay/backend/contrib/uma/targets.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,44 +31,50 @@ namespace tvm {
namespace relay {
namespace contrib {
namespace uma {
tvm::transform::Pass RelayToTIR(String target_name);
runtime::Module TIRToRuntime(IRModule mod, Target target);
tvm::transform::Pass RelayToTIR(String target_name);
runtime::Module TIRToRuntime(IRModule mod, Target target);
} // namespace uma
} // namespace contrib
} // namespace relay

TVM_REGISTER_GLOBAL("relay.backend.contrib.uma.RegisterTarget")
.set_body_typed([](String target_name, Map<String, ObjectRef> attr_options){
auto target_kind = ::tvm::TargetKindRegEntry::RegisterOrGet(target_name)
.set_name()
.set_device_type(kDLCPU)
.add_attr_option<Array<String>>("keys")
.add_attr_option<String>("tag")
.add_attr_option<String>("device")
.add_attr_option<String>("model")
.add_attr_option<Array<String>>("libs")
.add_attr_option<Target>("host")
.add_attr_option<Integer>("from_device")
.set_attr<FTVMRelayToTIR>("RelayToTIR", relay::contrib::uma::RelayToTIR(target_name))
.set_attr<FTVMTIRToRuntime>("TIRToRuntime", relay::contrib::uma::TIRToRuntime);
.set_body_typed([](String target_name, Map<String, ObjectRef> attr_options) {
auto target_kind =
::tvm::TargetKindRegEntry::RegisterOrGet(target_name)
.set_name()
.set_device_type(kDLCPU)
.add_attr_option<Array<String>>("keys")
.add_attr_option<String>("tag")
.add_attr_option<String>("device")
.add_attr_option<String>("model")
.add_attr_option<Array<String>>("libs")
.add_attr_option<Target>("host")
.add_attr_option<Integer>("from_device")
.set_attr<FTVMRelayToTIR>(tvm::attr::kRelayToTIR,
relay::contrib::uma::RelayToTIR(target_name))
.set_attr<FTVMTIRToRuntime>("TIRToRuntime", relay::contrib::uma::TIRToRuntime);

for (auto &attr_option : attr_options) {
try {
target_kind.add_attr_option<String>(attr_option.first, Downcast<String>(attr_option.second));
continue;
} catch (...) {}
try {
target_kind.add_attr_option<Bool>(attr_option.first, Downcast<Bool>(attr_option.second));
continue;
} catch (...) {}
try {
target_kind.add_attr_option<Integer>(attr_option.first, Downcast<Integer>(attr_option.second));
continue;
} catch (...) {
LOG(FATAL) << "Attribute option of type " << attr_option.second->GetTypeKey()
<< " can not be added. Only String, Integer, or Bool are supported.";
}
for (auto& attr_option : attr_options) {
try {
target_kind.add_attr_option<String>(attr_option.first,
Downcast<String>(attr_option.second));
continue;
} catch (...) {
}
try {
target_kind.add_attr_option<Bool>(attr_option.first, Downcast<Bool>(attr_option.second));
continue;
} catch (...) {
}
try {
target_kind.add_attr_option<Integer>(attr_option.first,
Downcast<Integer>(attr_option.second));
continue;
} catch (...) {
LOG(FATAL) << "Attribute option of type " << attr_option.second->GetTypeKey()
<< " can not be added. Only String, Integer, or Bool are supported.";
}
}
});

} // namespace tvm
17 changes: 10 additions & 7 deletions src/relay/backend/contrib/uma/tir_to_runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,11 @@ namespace uma {

class UMACodegen : public codegen::CodeGenCHost {
public:
UMACodegen(String target_str) : target_str_(target_str) {}
explicit UMACodegen(String target_str) : target_str_(target_str) {}

void Init(bool output_ssa, bool emit_asserts) {
auto includes_pf = tvm::runtime::Registry::Get("relay.ext.uma.codegen_c_includes_" + target_str_);
auto includes_pf =
tvm::runtime::Registry::Get("relay.ext.uma.codegen_c_includes_" + target_str_);
ICHECK(includes_pf);
String includes = (*includes_pf)();
decl_stream << includes;
Expand All @@ -65,13 +66,15 @@ class UMACodegen : public codegen::CodeGenCHost {
CodeGenCHost::VisitExpr_(op, os);
return;
}
auto replace_call_extern_pf = tvm::runtime::Registry::Get("relay.ext.uma.codegen_c_replace_call_extern_" + target_str_);
auto replace_call_extern_pf =
tvm::runtime::Registry::Get("relay.ext.uma.codegen_c_replace_call_extern_" + target_str_);
if (replace_call_extern_pf == nullptr) {
CodeGenCHost::VisitExpr_(op, os);
} else {
// TODO:
// - funtion type (void) still gets printed before CallNode if extern call is wrapped in EvaluateNode
// - VarNode arguments might have "wrong" name_hints. The correct variable name is determined in C++ through GetVarID
// - funtion type (void) still gets printed before CallNode if extern call is wrapped in
// EvaluateNode
// - VarNode arguments might have "wrong" name_hints. The correct variable name is determined
// in C++ through GetVarID
String api_string = (*replace_call_extern_pf)(op->args);
os << api_string;
}
Expand All @@ -82,7 +85,7 @@ class UMACodegen : public codegen::CodeGenCHost {
runtime::Module TIRToRuntime(IRModule mod, Target target) {
bool output_ssa = false;
bool emit_asserts = false;
UMACodegen codegen (target->kind->name);
UMACodegen codegen(target->kind->name);
Array<String> function_names;
codegen.Init(output_ssa, emit_asserts);
for (auto kv : mod->functions) {
Expand Down

0 comments on commit ca6796d

Please sign in to comment.