From b9b74d5d91f3beb5e1730eb14bea01e2a682daca Mon Sep 17 00:00:00 2001 From: Anirudh Sundar Date: Tue, 26 Jul 2022 20:51:08 +0530 Subject: [PATCH] [Hexagon] Add Hand written HVX conv2d --- cmake/modules/Hexagon.cmake | 13 + src/runtime/hexagon/ops/conv2d.h | 144 ++++++ src/runtime/hexagon/ops/conv2d_hvx.cc | 468 ++++++++++++++++++ src/runtime/hexagon/ops/conv_utils.cc | 191 +++++++ .../topi/test_conv2d_fp16_intrin.py | 256 ++++++++++ 5 files changed, 1072 insertions(+) create mode 100644 src/runtime/hexagon/ops/conv2d.h create mode 100644 src/runtime/hexagon/ops/conv2d_hvx.cc create mode 100644 src/runtime/hexagon/ops/conv_utils.cc create mode 100644 tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py diff --git a/cmake/modules/Hexagon.cmake b/cmake/modules/Hexagon.cmake index c08ea5eb1df14..884c2d5c42652 100644 --- a/cmake/modules/Hexagon.cmake +++ b/cmake/modules/Hexagon.cmake @@ -172,6 +172,19 @@ if(BUILD_FOR_HEXAGON) list(APPEND TVM_RUNTIME_LINKER_LIBS -Wl,--whole-archive ${USE_HEXAGON_SDK}/libs/qhl/prebuilt/hexagon_toolv84_v68/libqhmath.a -Wl,--no-whole-archive) endif() + + # Hand-written ops + file_glob_append(RUNTIME_HEXAGON_SRCS + "${TVMRT_SOURCE_DIR}/hexagon/ops/*.cc" + ) + include_directories( + "${TVMRT_SOURCE_DIR}/hexagon/ops" + ) + + set_source_files_properties( + "${TVMRT_SOURCE_DIR}/hexagon/ops/conv2d_hvx.cc" + PROPERTIES COMPILE_FLAGS "-mhvx" + ) endif() if(USE_HEXAGON_RPC) diff --git a/src/runtime/hexagon/ops/conv2d.h b/src/runtime/hexagon/ops/conv2d.h new file mode 100644 index 0000000000000..ba524df24a7a3 --- /dev/null +++ b/src/runtime/hexagon/ops/conv2d.h @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include + +#include + +#ifndef TVM_RUNTIME_HEXAGON_OPS_CONV2D_H_ +#define TVM_RUNTIME_HEXAGON_OPS_CONV2D_H_ + +#ifdef DEBUG_CONV +#define DEBUG_BLOCK(X) \ + { X } +#define debug(...) FARF(ALWAYS, ##__VA_ARGS__) +#else +#define DEBUG_BLOCK(X) +#define debug(...) +#endif + +#define HAP_CALL(hap_fn, ...) \ + { \ + int rc = hap_fn(__VA_ARGS__); \ + if (rc != 0) { \ + debug("%s failed: rc=%x", #hap_fn, rc); \ + } \ + } + +namespace detail { +static constexpr auto hexagon_device = DLDevice{static_cast(kDLHexagon), 0}; + +// Standalone DLTensor: the standalone-ness means that this object owns the shape +// (as opposed to a DLTensor). +template +class SDLTensor : public DLTensor { + public: + SDLTensor(void* data_ptr, DLDataType data_type, void* data_space, const int64_t* data_dims) + : SDLTensor(data_ptr, data_type, data_space) { + for (size_t i = 0; i != N; ++i) dims[i] = data_dims[i]; + } + + SDLTensor(void* data_ptr, DLDataType data_type, void* data_space, + std::initializer_list data_dims) + : SDLTensor(data_ptr, data_type, data_space, data_dims.begin()) {} + + void* GetDataSpace() const { return data_space; } + + private: + SDLTensor(void* data_ptr, DLDataType data_type, void* data_space) : data_space(data_space) { + data = data_ptr; + device = hexagon_device; + ndim = N; + dtype = data_type; + shape = dims; + strides = nullptr; + byte_offset = 0; + } + + void* data_space = nullptr; + int64_t dims[N]; +}; + +inline void* to_ptr(uintptr_t v) { return reinterpret_cast(v); } + +inline uintptr_t to_uint(void* ptr) { return reinterpret_cast(ptr); } + +inline constexpr int xyc_to_sm_16b(int y, int x, int c) { + // Map y,x,c coordinates within a block to the offset (in 16-bit elements) + // from the beginning of the block in spatial-major layout. + // 10-bit spatial mask: yyyxcccccx + return y << 7 | (x & 2) << 5 | c << 1 | (x & 1); +} + +inline constexpr int hwio_to_sm_16b(int width, int y, int x, int i, int o) { + // Map y,x,i,o coordinates within a chunk (assuming the origin at the + // top-left spatial corner) to the offset (in 16-bit elements) from the + // beginning of the chunk in spatial-major layout. + // Spatial mask: p..piiiioooooi, where p..p are position bits. + int p = y * width + (width - 1 - x); + return p << 10 | (i & 0x1e) << 5 | o << 1 | (i & 1); +} + +inline constexpr int round_up(int v, int p2) { return (v + p2 - 1) & -p2; } + +constexpr uintptr_t nhwc_at(const DLTensor& a, int n, int y, int x, int c) { + if (y < 0 || y >= a.shape[1]) return uintptr_t(0); + auto p = static_cast(a.data); + assert(n == 0); + return p[y * a.shape[2] * a.shape[3] + x * a.shape[3] + c]; +} + +constexpr uintptr_t hwio_at(const DLTensor& f, int y, int x, int i, int o) { + auto p = static_cast(f.data); + return p[y * f.shape[1] * f.shape[2] * f.shape[3] + x * f.shape[2] * f.shape[3] + i * f.shape[3] + + o]; +} + +constexpr uint32_t* bias_at(const DLTensor& b, int d) { + auto p = static_cast(b.data); + return p + d; +} + +void blockize_hwc_16b(void* out, void* inp_flat, int height, int width, int depth); + +void deblockize_hwc_16b(void* out_flat, void* inp, int height, int width, int depth); + +void chunkify_hwio_16b(void** out_ptr, int out_ptr_size, void* out, void* inp, int height, + int width, int idepth, int odepth); + +SDLTensor<4> prepare_nhwc(tvm::runtime::DeviceAPI* device_api, const DLTensor* nhwc_flat, + bool copy_data); + +int calculate_num_weight_chunks(int64_t* shape_hwio); + +SDLTensor<4> prepare_hwio(tvm::runtime::DeviceAPI* device_api, const DLTensor* hwio_flat, + int num_chunks, void** ptr_table); + +template +void release(tvm::runtime::DeviceAPI* device_api, const SDLTensor& tensor) { + if (auto* data_space = tensor.GetDataSpace()) { + device_api->FreeDataSpace(hexagon_device, data_space); + } +} + +} // namespace detail + +#endif // TVM_RUNTIME_HEXAGON_OPS_CONV2D_H_ diff --git a/src/runtime/hexagon/ops/conv2d_hvx.cc b/src/runtime/hexagon/ops/conv2d_hvx.cc new file mode 100644 index 0000000000000..69bfe17557e85 --- /dev/null +++ b/src/runtime/hexagon/ops/conv2d_hvx.cc @@ -0,0 +1,468 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "conv2d.h" + +// Current limitations: +// - N in NHWC must be 1 +// - dilated convolutions are not supported +// - Bias is not accepted +// - Optional "relu" is not performed + +// Packed arguments: +// 0: DLTensor activations (NHWC) +// 1: DLTensor weights (HWIO) +// 2: int offset_top +// 3: int offset_left +// 4: int stride_h +// 5: int stride_w +// 6: DLTensor output (NHWC) +extern "C" int conv2d_packed(TVMValue* args, int* type_codes, int num_args, TVMValue* out_val, + int out_code, void* res_handle); + +namespace detail { + +inline uint16_t* getElementPtr(int block_out_y, int block_out_x, int block_out_c, int yi, int xio, + int ci, int xii, const DLTensor& block) { + auto block_ptr = nhwc_at(block, 0, block_out_y, block_out_x, block_out_c); + auto block_offset = yi * 128 + xio * 64 + ci * 2 + xii; + auto first_element_ptr = reinterpret_cast(block_ptr); + return first_element_ptr + block_offset; +} + +/** + * @brief Compute 2 vectors with ones in the even and odd lanes + * + * Output vectors are: + * vector 1 = [0xFFFF,0x0000,0xFFFFF,0x0000,...,0xFFFF,0x0000] + * vector lanes = [ 0 , 2 , 3 , 4 ,..., 62 , 63 ] + * + * vector 2 = [0x0000,0xFFFF,0x0000,0xFFFFF,...,0xFFFF,0x0000] + * vector lanes = [ 0 , 2 , 3 , 4 ,..., 62 , 63 ] + * + * @return Return the 2 vectors + */ +inline std::pair getOddEvenOnes() { + HVX_Vector v0 = Q6_V_vzero(); + HVX_Vector v1 = Q6_Vh_vsplat_R(0xFFFF); + + HVX_Vector v1e = Q6_Vh_vshuffe_VhVh(v0, v1); + HVX_Vector v1o = Q6_V_vnot_V(v1e); + return {v1e, v1o}; +} + +/** + * @brief Return the input vector filled with the 2 channel elements(which is the 1st and 3rd + * element) from base_ptr filled up 32 times to get 64 elements + * + * 1. It's generated by first creating 2 vectors "splatted" with the 2 required elements + * 2. Then we andd it with vectors containing all ones (0xFFFF) in the even and odd lanes + * 3. Finally those 2 vectors are OR'ed together + * + * @param base_ptr pointer to the first of the 2 channel elements to be filled + * + * @return input vector + */ +inline HVX_Vector getInputVector(uint16_t* base_ptr) { + HVX_Vector v1 = Q6_Vh_vsplat_R(base_ptr[0]); + HVX_Vector v2 = Q6_Vh_vsplat_R(base_ptr[2]); + + auto oddEvenOnes = getOddEvenOnes(); + auto v1e = oddEvenOnes.first; + auto v1o = oddEvenOnes.second; + + HVX_Vector v_even_vals = Q6_V_vand_VV(v1, v1e); + HVX_Vector v_odd_vals = Q6_V_vand_VV(v2, v1o); + + return Q6_V_vor_VV(v_even_vals, v_odd_vals); +} + +/** + * @brief Return the Output vector which contains the 32 output channels in the even lanes + * + * The output vector is commputed as: + * 1. vector multiply(vmpy) of input and weights + * 2. Rotate the vector right by 1 element and add with the first vector to add the 2 input channels + * 3. Then convert the results back from qfloat16 to IEEE half-precision float + * 4. The added values are in even lanes, so zero out the odd lanes by anding with ones in even + * lanes and return + * + * @param act_vec Input activations vector + * @param wgt_vec Weights vector + * + * @return output vector with 32 output channels even lanes + */ +inline HVX_Vector computeOuputVector(HVX_Vector act_vec, HVX_Vector wgt_vec) { + HVX_Vector v_res = Q6_Vqf16_vmpy_VhfVhf(act_vec, wgt_vec); // result is in qf16 + HVX_Vector v_rot = Q6_V_vror_VR(v_res, 2); + HVX_Vector v_reduced = Q6_Vqf16_vadd_Vqf16Vqf16(v_res, v_rot); + HVX_Vector v_hf = Q6_Vhf_equals_Vqf16(v_reduced); + HVX_Vector v1e = getOddEvenOnes().first; + HVX_Vector v_reduced_even_lanes = Q6_V_vand_VV(v_hf, v1e); + return v_reduced_even_lanes; +} + +static int round_down(int v, int base) { return v - (v % base); } + +/** + * @brief Compute the convolution of inputs from cr_act, and weights from + * cr_filt to update the output to cr_out. The goal is to have an efficient + * HVX implementation + * + * Assumptions: + * ----------- + * - This implementation right now assumes that the dilation is 1 + * - there is zero padding or the input was already pre-padded. + * - block specific spatial padding is only expected at the end and hence + * pad_top and pad_left are not yet used + * - Relu activation is not used + * - Bias add is not done + * + * @param cr_out blockized output tensor with zeros already filled in + * @param cr_act blockized activations + * @param cr_filt Chunkified weights as returned from output of prepare_hwio + * @param out_shape Original output shape of the tensor before blockization + * @param act_shape Original input shape + * @param bias_flat Flat bias values and are not used right now + * @param filt_shape Original filter shape + * @param pad_shape Pad top and pad left shape + * @param relu Whether to apply relu after convolution, not done right now + * @param zero_block A block filled with zeros + * + * @return + */ +void conv_layer_fp16_hvx(DLTensor& cr_out, const DLTensor& cr_act, // NOLINT(*) + const DLTensor& cr_filt, const DLTensor& out_shape, + const DLTensor& act_shape, const DLTensor& bias_flat, + const DLTensor& filt_shape, const DLTensor& pad_shape, bool relu, + int stride_h, int stride_w, uintptr_t zero_block) { + int64_t filt_height = filt_shape.shape[0]; + int64_t filt_width = filt_shape.shape[1]; + int64_t filt_idepth = filt_shape.shape[2]; + (void)filt_idepth; + + DEBUG_BLOCK(int pad_top = pad_shape.shape[0]; int pad_left = pad_shape.shape[1];) + + debug("filt_height=%" PRId64 ", filt_width=%" PRId64 ", filt_idepth=%" PRId64 + ", pad_top=%d, pad_left=%d\n", + filt_height, filt_width, filt_idepth, pad_top, pad_left); + + assert(pad_top < 8 && pad_left < 4); + + DEBUG_BLOCK(int a_height = cr_act.shape[1]; int a_width = cr_act.shape[2]; + int a_depth = cr_act.shape[3]; + + int w_height = cr_filt.shape[0]; int w_width = cr_filt.shape[1]; + + int o_depth = cr_out.shape[3]; int b_depth = bias_flat.shape[0];) + + int o_height = cr_out.shape[1]; + int o_width = cr_out.shape[2]; + + int out_height = out_shape.shape[1]; + int out_width = out_shape.shape[2]; + + debug("a: %dx%dx%dx%d, w: %dx%dx%dx%d, o: %dx%dx%dx%d, b: %d, out_shape: %dx%d\n", 1, a_height, + a_width, a_depth, w_height, w_width, static_cast(cr_filt.shape[2]), + static_cast(cr_filt.shape[3]), 1, o_height, o_width, o_depth, b_depth, out_height, + out_width); + + assert(a_depth == cr_filt.shape[2]); + assert(o_depth == cr_filt.shape[3]); + + int rd = round_down(filt_width, 4); + int wgt_chunk_thin_width = filt_width - rd; + + /* + * Compute the output vector of either 1 or 2 elements along the width and max 32 elements along + * the depth to constitue a maximum of 64 elements + * + * The weights are loaded directly in the order they're stored, which results + * in 2 input channels and 32 output channels + * + * Weights vector illustration: + * ------- ------ ------------ + * weights_vec = [0-0,0-1,1-0,1-1,2-0,2-1,3-0,3-1,4-0,4-1,...,31-0,31-1] -> This is the + * vector representation of weights, where the elements are represented as + * "out_channel-input_channel" + * + * + * Same 2 input channels have to be multiplied across all output channels in the weights. + * + * Activations vector would thus be: + * ----------- ------ ----- ---- -- + * act_vec = [i0,i1,i0,i1,i0,i1,...,i0,i1] - 2 elements of the input channels broadcasted 32 times + * to fill 64 elements of the vector + * + * + * Thus the computation is just a vmpy(act_vec,weights_vec) followed by a some rearrangement to + * add every pair of 16b lanes in the vector to reduce along the input channels + * + * This result is added to the result of the next pair of input channels all the way until we + * have reduced across the entire input channels. + * + * Then the same vector is added to the results of the following elements along the width and + * height to finally get 32 elements representing 32 output channels. + * + * Since the output block also has the 8h2w32c2w format, the 32 elements of the next element + * along the width is also added into the the same vector such that the first 32 channel elements + * occupy the even lanes and the next 32 occupy the odd lanes to form a single 64-element vector + * which is then stored + */ + auto computeConv = [filt_height, filt_width, wgt_chunk_thin_width, filt_idepth, stride_h, + stride_w, &cr_out, &cr_act, &cr_filt](int out_act_y, int out_act_x, int out_c, + int h, int wo, bool skip_wi_1 = false) { + auto out_element_ptr = getElementPtr(out_act_y, out_act_x, out_c, h, wo, 0, 0, cr_out); + + debug( + "out_act_y: %d, out_act_x: %d, out_c: %d, h: %d, wo: %d, out_block: %x, " + "out_block_offset: %d, out_base_ptr: %x, out_element_ptr: %x", + out_act_y, out_act_x, out_c, h, wo, out_block, out_block_offset, out_base_ptr, + out_element_ptr); + + HVX_Vector* out_vector = reinterpret_cast(out_element_ptr); + HVX_Vector existing_out_vec = *out_vector; + + for (int fh = 0; fh < filt_height; ++fh) { + for (int fw = 0; fw < filt_width; ++fw) { + int fch = fh / 8; + int fcw = 0; + if (fw >= wgt_chunk_thin_width) { + fcw = (fw - wgt_chunk_thin_width) / 4 + 1; + } + int fx = (fw < wgt_chunk_thin_width) ? fw : ((fw - wgt_chunk_thin_width) % 4); + int fy = fh % 8; + for (int c = 0; c < round_up(filt_idepth, 2); c += 2) { + int out_act_cc = c / 32; + int ci = c % 32; + auto wgt_chunk = hwio_at(cr_filt, fch, fcw, out_act_cc, out_c); + + // Find weight chunk offset ptr + int max_x = (fcw == 0) ? wgt_chunk_thin_width : 4; + + int wi = 0; + + int out_width_idx = out_act_x * 4 + wo * 2 + wi; + int act_width_access_idx = out_width_idx * stride_w + fw; + int true_out_act_x = act_width_access_idx / 4; + int true_wo = (act_width_access_idx % 4) / 2; + int true_wi = act_width_access_idx % 2; + + int out_height_idx = out_act_y * 8 + h; + int act_height_access_idx = out_height_idx * stride_h + fh; + int true_out_act_y = act_height_access_idx / 8; + int true_h = act_height_access_idx % 8; + + DEBUG_BLOCK(int act_channel_idx = out_act_cc * 32 + ci;); + + auto act_element_ptr = getElementPtr(true_out_act_y, true_out_act_x, out_act_cc, true_h, + true_wo, ci, true_wi, cr_act); + HVX_Vector act_vec = getInputVector(act_element_ptr); + + auto wgt_chunk_offset = hwio_to_sm_16b(max_x, fy, fx, ci, 0); + auto base_chunk_ptr = reinterpret_cast(wgt_chunk); + auto chunk_ptr = base_chunk_ptr + wgt_chunk_offset; + + debug( + "act: %dx%dx%dx%d, wgt: %dx%dx%dx%d, out: %dx%dx%dx%d, " + "act_block_offset: %d, wgt_block_offset: %d", + 0, act_height_access_idx, act_width_access_idx, act_channel_idx, fh, fw, + act_channel_idx, out_c * 32, 0, out_height_idx, out_width_idx, out_c * 32, + block_offset, wgt_chunk_offset); + + const HVX_Vector* weights_vec_ptr = reinterpret_cast(chunk_ptr); + HVX_Vector weights_vec = *weights_vec_ptr; + + HVX_Vector reduced_vec_even_elements = computeOuputVector(act_vec, weights_vec); + + if (!skip_wi_1) { + wi = 1; + + out_width_idx = out_act_x * 4 + wo * 2 + wi; + act_width_access_idx = out_width_idx * stride_w + fw; + true_out_act_x = act_width_access_idx / 4; + true_wo = (act_width_access_idx % 4) / 2; + true_wi = act_width_access_idx % 2; + + act_element_ptr = getElementPtr(true_out_act_y, true_out_act_x, out_act_cc, true_h, + true_wo, ci, true_wi, cr_act); + act_vec = getInputVector(act_element_ptr); + + debug( + "act: %dx%dx%dx%d, wgt: %dx%dx%dx%d, out: %dx%dx%dx%d, " + "act_block_offset: %d, wgt_block_offset: %d", + 0, act_height_access_idx, act_width_access_idx, act_channel_idx, fh, fw, + act_channel_idx, out_c * 32, 0, out_height_idx, out_width_idx, out_c * 32, + block_offset, wgt_chunk_offset); + + HVX_Vector reduced_vec_odd_elements = computeOuputVector(act_vec, weights_vec); + reduced_vec_odd_elements = Q6_V_vror_VR(reduced_vec_odd_elements, -2); + HVX_Vector out_final = Q6_V_vor_VV(reduced_vec_even_elements, reduced_vec_odd_elements); + + HVX_Vector out_vec_qf16 = Q6_Vqf16_vadd_VhfVhf(out_final, existing_out_vec); + existing_out_vec = Q6_Vhf_equals_Vqf16(out_vec_qf16); + } else { + debug("skipped wi=1"); + HVX_Vector out_vec_qf16 = + Q6_Vqf16_vadd_VhfVhf(reduced_vec_even_elements, existing_out_vec); + existing_out_vec = Q6_Vhf_equals_Vqf16(out_vec_qf16); + } + } + } + } + *out_vector = existing_out_vec; + }; + + auto computeFullWidth = [&computeConv](int out_y, int out_x, int out_c, int h) { + for (int wo = 0; wo < 2; ++wo) { + computeConv(out_y, out_x, out_c, h, wo); + } + }; + + auto computePartialWidth = [out_width, o_width, &computeConv](int out_y, int out_c, int h) { + int out_x = o_width - 1; + int wo = 0; + for (; wo < (out_width % 4) / 2; ++wo) { + computeConv(out_y, out_x, out_c, h, wo); + } + + if (out_width % 2) { + computeConv(out_y, out_x, out_c, h, wo, true /* skip_wi_1 */); + } + }; + + for (int out_c = 0; out_c < cr_filt.shape[3]; ++out_c) { + for (int out_act_y = 0; out_act_y < out_height / 8; ++out_act_y) { + int out_y = out_act_y; + for (int out_act_x = 0; out_act_x < out_width / 4; ++out_act_x) { + int out_x = out_act_x; + for (int h = 0; h < 8; ++h) { + computeFullWidth(out_y, out_x, out_c, h); + } + } + + for (int h = 0; h < 8; ++h) { + computePartialWidth(out_y, out_c, h); + } + } + + int out_y = o_height - 1; + for (int h = 0; h < out_height % 8; ++h) { + for (int out_act_x = 0; out_act_x < out_width / 4; ++out_act_x) { + int out_x = out_act_x; + computeFullWidth(out_y, out_x, out_c, h); + } + computePartialWidth(out_y, out_c, h); + } + } +} +} // namespace detail + +int conv2d_packed(TVMValue* args, int* type_codes, int num_args, TVMValue* out_val, int out_code, + void* res_handle) { + assert(num_args == 7); + assert(type_codes[0] == kTVMDLTensorHandle); // Input activations + assert(type_codes[1] == kTVMDLTensorHandle); // Weights + assert(type_codes[2] == kDLInt); // pad_top offset + assert(type_codes[3] == kDLInt); // pad_left offset + assert(type_codes[4] == kDLInt); // stride_h + assert(type_codes[5] == kDLInt); // stride_w + assert(type_codes[6] == kTVMDLTensorHandle); // output + + auto* act_flat = static_cast(args[0].v_handle); + auto* wgt_flat = static_cast(args[1].v_handle); + auto* out_flat = static_cast(args[6].v_handle); + + // Temporary assertion until multiple batches are supported + assert(act_flat->shape[0] == 1); + + // Temporary assertion until multiple batches are supported + assert(out_flat->shape[0] == 1); + + int pad_top = args[2].v_int64; + int pad_left = args[3].v_int64; + int stride_h = args[4].v_int64; + int stride_w = args[5].v_int64; + + debug("act.shape=%" PRId64 "x%" PRId64 "x%" PRId64 "x%" PRId64 ", wgt.shape=%" PRId64 "x%" PRId64 + "x%" PRId64 "x%" PRId64 ", pad_top=%d, pad_left=%d", + act_flat->shape[0], act_flat->shape[1], act_flat->shape[2], act_flat->shape[3], + wgt_flat->shape[0], wgt_flat->shape[1], wgt_flat->shape[2], wgt_flat->shape[3], pad_top, + pad_left); + + auto* device_api = tvm::runtime::DeviceAPI::Get(detail::hexagon_device, false); + ICHECK(device_api != nullptr); + tvm::runtime::String vtcm_scope = "global.vtcm"; + + auto act_vtcm = detail::prepare_nhwc(device_api, act_flat, /*copy_data=*/true); + + assert(wgt_flat->shape[0] != 0); + assert(wgt_flat->shape[1] != 0); + assert(wgt_flat->shape[2] != 0); + assert(wgt_flat->shape[3] != 0); + int num_wgt_chunks = detail::calculate_num_weight_chunks(wgt_flat->shape); + debug("num_wgt_chunks: %d", num_wgt_chunks); + auto wgt_ptr_table = + reinterpret_cast(__builtin_alloca(num_wgt_chunks * sizeof(uintptr_t))); + auto wgt_vtcm = detail::prepare_hwio(device_api, wgt_flat, num_wgt_chunks, wgt_ptr_table); + + auto out_vtcm = detail::prepare_nhwc(device_api, out_flat, /*copy_data=*/false); + + // Prepare zero_block + int64_t block_nbytes = 2048; + void* zero_block = device_api->AllocDataSpace(detail::hexagon_device, 1, &block_nbytes, + tvm::runtime::DataType::UInt(8), vtcm_scope); + memset(zero_block, 0, 2048); + + debug("act_vtcm=%p, wgt_vtcm=%p, out_vtcm=%p, zero_block=%p, num_wgt_chunks=%d", act_vtcm.data, + wgt_vtcm.data, out_vtcm.data, zero_block, num_wgt_chunks); + + // FIXME: Setting bias to zero_block: this works for up to 256 output channels. + auto bias_flat = + detail::SDLTensor<1>(zero_block, wgt_flat->dtype, zero_block, &wgt_flat->shape[3]); + auto act_shape = detail::SDLTensor<4>(nullptr, act_flat->dtype, nullptr, act_flat->shape); + auto filt_shape = detail::SDLTensor<4>(nullptr, wgt_flat->dtype, nullptr, wgt_flat->shape); + auto pad_shape = detail::SDLTensor<2>(nullptr, act_flat->dtype, nullptr, {pad_top, pad_left}); + auto out_shape = detail::SDLTensor<4>(nullptr, out_flat->dtype, nullptr, out_flat->shape); + bool relu = false; + + detail::conv_layer_fp16_hvx(out_vtcm, act_vtcm, wgt_vtcm, out_shape, act_shape, bias_flat, + filt_shape, pad_shape, relu, stride_h, stride_w, + detail::to_uint(zero_block)); + + detail::deblockize_hwc_16b(out_flat->data, out_vtcm.data, out_flat->shape[1], out_flat->shape[2], + out_flat->shape[3]); + + device_api->FreeDataSpace(detail::hexagon_device, zero_block); + detail::release(device_api, out_vtcm); + detail::release(device_api, wgt_vtcm); + detail::release(device_api, act_vtcm); + + return 0; +} diff --git a/src/runtime/hexagon/ops/conv_utils.cc b/src/runtime/hexagon/ops/conv_utils.cc new file mode 100644 index 0000000000000..e69b794878859 --- /dev/null +++ b/src/runtime/hexagon/ops/conv_utils.cc @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +#include "conv2d.h" + +namespace detail { + +void blockize_hwc_16b(void* out, void* inp_flat, int height, int width, int depth) { + auto inp_data = static_cast(inp_flat); + auto out_data = static_cast(out); + const int stride_x = depth; + const int stride_y = stride_x * width; + + for (int cy = 0; cy < height; cy += 8) { + for (int cx = 0; cx < width; cx += 4) { + for (int cc = 0; cc < depth; cc += 32) { + auto block = reinterpret_cast(*out_data++); + int max_y = std::min(8, height - cy); + int max_x = std::min(4, width - cx); + int max_c = std::min(32, depth - cc); + for (int y = 0; y < max_y; ++y) { + for (int x = 0; x < max_x; ++x) { + for (int c = 0; c < max_c; ++c) { + block[xyc_to_sm_16b(y, x, c)] = + inp_data[(cy + y) * stride_y + (cx + x) * stride_x + (cc + c)]; + } + for (int c = max_c; c < 32; ++c) block[xyc_to_sm_16b(y, x, c)] = 0; + } + for (int x = max_x; x < 4; ++x) { + for (int c = 0; c < 32; ++c) block[xyc_to_sm_16b(y, x, c)] = 0; + } + } + + for (int y = max_y; y < 8; ++y) + for (int x = 0; x < 4; ++x) + for (int c = 0; c < 32; ++c) block[xyc_to_sm_16b(y, x, c)] = 0; + } // cc + } // cx + } // cy +} + +void deblockize_hwc_16b(void* out_flat, void* inp, int height, int width, int depth) { + uintptr_t* inp_data = static_cast(inp); + uint16_t* out_data = static_cast(out_flat); + const int stride_x = depth; + const int stride_y = stride_x * width; + + for (int cy = 0; cy < height; cy += 8) { + for (int cx = 0; cx < width; cx += 4) { + for (int cc = 0; cc < depth; cc += 32) { + auto block = reinterpret_cast(*inp_data); + int max_y = std::min(8, height - cy); + int max_x = std::min(4, width - cx); + int max_c = std::min(32, depth - cc); + for (int y = 0; y < max_y; ++y) { + for (int x = 0; x < max_x; ++x) { + for (int c = 0; c < max_c; ++c) { + out_data[(cy + y) * stride_y + (cx + x) * stride_x + (cc + c)] = + block[xyc_to_sm_16b(y, x, c)]; + } + } + } + + inp_data++; + } + } + } +} + +void chunkify_hwio_16b(void** out_ptr, int out_ptr_size, void* out, void* inp, int height, + int width, int idepth, int odepth) { + auto inp_data = static_cast(inp); + auto out_data = static_cast(out); + const int stride_i = odepth; + const int stride_x = stride_i * idepth; + const int stride_y = stride_x * width; + + for (int cy = 0; cy < height; cy += 8) { + // In the chunkified tensor, the chunks are ordered in increasing + // x order, but they start from the thin one. + for (int cx = width - round_up(width, 4); cx < width; cx += 4) { + int cx0 = std::max(0, cx); + for (int ci = 0; ci < idepth; ci += 32) { + for (int co = 0; co < odepth; co += 32) { + int max_y = std::min(8, height - cy); + int max_x = std::min(4, cx + 4 - cx0); + int max_i = std::min(32, idepth - ci); + int max_o = std::min(32, odepth - co); + + auto chunk = reinterpret_cast(out_data); + for (int y = 0; y < max_y; ++y) { + for (int x = max_x - 1; x >= 0; --x) { + for (int i = 0; i < max_i; ++i) { + for (int o = 0; o < max_o; ++o) { + debug( + "cy: %d, cx: %d, cx0: %d, ci: %d, co: %d, max_x: %d, y: %d, x: %d, i: %d, o: " + "%d, index: %d", + cy, cx, cx0, ci, co, max_x, y, x, i, o, hwio_to_sm_16b(max_x, y, x, i, o)); + chunk[hwio_to_sm_16b(max_x, y, x, i, o)] = + inp_data[(cy + y) * stride_y + (cx0 + x) * stride_x + (ci + i) * stride_i + + (co + o)]; + } + for (int o = max_o; o < 32; ++o) chunk[hwio_to_sm_16b(max_x, y, x, i, o)] = 0; + } + for (int i = max_i; i < 32; ++i) + for (int o = 0; o < 32; ++o) chunk[hwio_to_sm_16b(max_x, y, x, i, o)] = 0; + } + } + + *out_ptr++ = chunk; + out_data += max_y * max_x * 32 * 32; + out_ptr_size--; + assert(out_ptr_size >= 0); + } + } + } + } +} + +SDLTensor<4> prepare_nhwc(tvm::runtime::DeviceAPI* device_api, const DLTensor* nhwc_flat, + bool copy_data) { + tvm::runtime::String vtcm_scope = "global.vtcm"; + + // Allocate blocks for activations. We will use the block pointers + // directly from the allocated area. + int n = nhwc_flat->shape[0]; + int h = round_up(nhwc_flat->shape[1], 8); + int w = round_up(nhwc_flat->shape[2], 4); + int c = round_up(nhwc_flat->shape[3], 32); + int64_t shape_2d[2] = {(n * h * w * c) / (8 * 4 * 32), 8 * 4 * 32}; + void* nhwc_vtcm = + device_api->AllocDataSpace(hexagon_device, 2, shape_2d, nhwc_flat->dtype, vtcm_scope); + if (copy_data) { + blockize_hwc_16b(nhwc_vtcm, nhwc_flat->data, nhwc_flat->shape[1], nhwc_flat->shape[2], + nhwc_flat->shape[3]); + } + + return SDLTensor<4>(nhwc_vtcm, nhwc_flat->dtype, nhwc_vtcm, {n, h / 8, w / 4, c / 32}); +} + +SDLTensor<4> prepare_hwio(tvm::runtime::DeviceAPI* device_api, const DLTensor* hwio_flat, + int num_chunks, void** ptr_table) { + tvm::runtime::String vtcm_scope = "global.vtcm"; + + // Allocate one block for filter data. We will need to create our own + // pointer table. The reason is that filter chunks cannot be padded + // height- or width-wise, so filter chunks may have different sizes. + // A filter chunk is a block of size HxWx32x32, where H, W are at most + // height and width of a block respectively. + int h = hwio_flat->shape[0]; + int w = hwio_flat->shape[1]; + int i = round_up(hwio_flat->shape[2], 32); + int o = round_up(hwio_flat->shape[3], 32); + int64_t shape_1d[] = {h * w * i * o}; + void* hwio_vtcm = + device_api->AllocDataSpace(hexagon_device, 1, shape_1d, hwio_flat->dtype, vtcm_scope); + + chunkify_hwio_16b(ptr_table, num_chunks, hwio_vtcm, hwio_flat->data, hwio_flat->shape[0], + hwio_flat->shape[1], hwio_flat->shape[2], hwio_flat->shape[3]); + + return SDLTensor<4>(ptr_table, hwio_flat->dtype, hwio_vtcm, + {round_up(h, 8) / 8, round_up(w, 4) / 4, i / 32, o / 32}); +} + +int calculate_num_weight_chunks(int64_t* shape_hwio) { + int h = round_up(shape_hwio[0], 8); + int w = round_up(shape_hwio[1], 4); + int i = round_up(shape_hwio[2], 32); + int o = round_up(shape_hwio[3], 32); + + debug("h: %d, w: %d, i: %d, o: %d", h, w, i, o); + return (h * w * i * o) / (8 * 4 * 32 * 32); +} + +} // namespace detail diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py b/tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py new file mode 100644 index 0000000000000..a1eac2dfebe07 --- /dev/null +++ b/tests/python/contrib/test_hexagon/topi/test_conv2d_fp16_intrin.py @@ -0,0 +1,256 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import numpy as np +import pytest +import sys + +import tvm +import tvm.contrib.hexagon +from tvm.topi.testing import conv2d_nhwc_python + + +def build_conv2d(target): + an, ah, aw, ac = ( + tvm.te.var("an"), + tvm.te.var("ah"), + tvm.te.var("aw"), + tvm.te.var("ac"), + ) + fh, fw, fo = tvm.te.var("fh"), tvm.te.var("fw"), tvm.te.var("fo") + off_l, off_t = tvm.te.var("off_l"), tvm.te.var("off_t") + stride_h, stride_w = tvm.te.var("stride_h"), tvm.te.var("stride_w") + + act_flat = tvm.te.placeholder(shape=(an, ah, aw, ac), dtype="float16", name="act_flat") + wgt_flat = tvm.te.placeholder(shape=(fh, fw, ac, fo), dtype="float16", name="wgt_flat") + + out_flat = tvm.te.extern( + shape=(an, (ah - fh) // stride_h + 1, (aw - fw) // stride_w + 1, fo), + inputs=[act_flat, wgt_flat], + fcompute=lambda ins, outs: tvm.tir.call_cpacked( + "conv2d_packed", # Function from TVM runtime + ins[0], + ins[1], + off_t, + off_l, + stride_h, + stride_w, + outs[0], + tvm.runtime.const(0), # resource_handle (unused) + ), + dtype="float16", + ) + + s = tvm.te.create_schedule(out_flat.op) + + func_name = "extern_conv" + with tvm.transform.PassContext(opt_level=3): + module = tvm.build( + s, + [act_flat, wgt_flat, off_t, off_l, stride_h, stride_w, out_flat], + target=target, + name=func_name, + ) + + return module + + +shape_parameters = [ + { + "act_shape": (1, 8, 4, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 10, 14, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 14, 6, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 14, 6, 3), + "wgt_shape": (3, 3, 3, 64), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 14, 6, 3), + "wgt_shape": (5, 5, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 8, 8, 3), + "wgt_shape": (2, 2, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 14, 6, 64), + "wgt_shape": (3, 3, 64, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 4, 4, 40), + "wgt_shape": (3, 3, 40, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 4, 4, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 5, 5, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 6, 6, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 7, 7, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 8, 8, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 8, 8, 3), + "wgt_shape": (5, 5, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 8, 8, 64), + "wgt_shape": (2, 2, 64, 64), + "inp_offset": (0, 0), + "inp_stride": (1, 1), + }, + { + "act_shape": (1, 8, 4, 3), + "wgt_shape": (3, 3, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (2, 2), + }, + { + "act_shape": (1, 14, 6, 3), + "wgt_shape": (3, 3, 3, 64), + "inp_offset": (0, 0), + "inp_stride": (2, 2), + }, + { + "act_shape": (1, 14, 6, 3), + "wgt_shape": (5, 5, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (2, 2), + }, + { + "act_shape": (1, 8, 8, 3), + "wgt_shape": (2, 2, 3, 3), + "inp_offset": (0, 0), + "inp_stride": (2, 2), + }, +] + + +def gen_id(param): + """Utility function to generate useful ids for shape_parameters""" + + dims = lambda vals: "x".join(map(str, vals)) + + act_shape = param["act_shape"] + wgt_shape = param["wgt_shape"] + inp_stride = param["inp_stride"] + return f"nhwc{dims(act_shape)}-hwio{dims(wgt_shape)}-stride{dims(inp_stride)}" + + +@tvm.testing.requires_hexagon +@pytest.mark.parametrize("shapes", shape_parameters, ids=map(gen_id, shape_parameters)) +def test_conv2d(shapes, hexagon_session): + act_shape = shapes["act_shape"] + wgt_shape = shapes["wgt_shape"] + inp_offset = shapes["inp_offset"] + inp_stride = shapes["inp_stride"] + assert act_shape[3] == wgt_shape[2] + + target_hexagon = tvm.target.hexagon("v69") + target = tvm.target.Target(target_hexagon, host=target_hexagon) + + # Currently, input offset does not affect the output shape + def get_out_shape(ash, wsh, inp_stride): + assert ash[3] == wsh[2] + osh = ( + ash[0], + (ash[1] - wsh[0]) // inp_stride[0] + 1, + (ash[2] - wsh[1]) // inp_stride[1] + 1, + wsh[3], + ) + assert tvm.tir.all([x > 0 for x in osh]) + return osh + + act = np.random.rand(*act_shape).astype("float16") + wgt = np.random.rand(*wgt_shape).astype("float16") + + module = build_conv2d(target) + + mod = hexagon_session.load_module(module) + output = tvm.nd.array( + np.zeros(get_out_shape(act_shape, wgt_shape, inp_stride), dtype="float16"), + device=hexagon_session.device, + ) + mod( + tvm.nd.array(act, device=hexagon_session.device), + tvm.nd.array(wgt, device=hexagon_session.device), + inp_offset[0], # off_t + inp_offset[1], # off_l + inp_stride[0], # stride_height + inp_stride[1], # stride_width + output, + ) + + out = output.numpy() + + # Generate reference output and compare: + ref_out = conv2d_nhwc_python( + act.astype("float32"), wgt.astype("float32"), stride=inp_stride, padding="VALID" + ).astype("float16") + + tvm.testing.assert_allclose(out, ref_out, rtol=5e-2, atol=5e-2) + + +if __name__ == "__main__": + tvm.testing.main()