Skip to content

Commit

Permalink
fix build
Browse files Browse the repository at this point in the history
  • Loading branch information
nihui committed Jul 25, 2023
1 parent 0c1026e commit 3ed8367
Showing 1 changed file with 9 additions and 9 deletions.
18 changes: 9 additions & 9 deletions src/layer/arm/multiheadattention_arm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ int MultiHeadAttention_arm::forward(const std::vector<Mat>& bottom_blobs, std::v
Mat attn_mask_blob_unpacked;
if (attn_mask_blob.elempack != 1)
{
convert_packing(attn_mask_blob, attn_mask_blob_unpacked, 1, opt);
convert_packing(attn_mask_blob, attn_mask_blob_unpacked, 1, _opt);
}
else
{
Expand All @@ -532,9 +532,9 @@ int MultiHeadAttention_arm::forward(const std::vector<Mat>& bottom_blobs, std::v
Mat k_affine;
k_gemm->forward(k_blob, k_affine, opt);

Mat qk_cross(dst_seqlen, src_seqlen * num_head, 2u, opt.blob_allocator);
Mat qk_cross(dst_seqlen, src_seqlen * num_heads, 2u, opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < num_head; i++)
for (int i = 0; i < num_heads; i++)
{
std::vector<Mat> qk_bottom_blobs(2);
qk_bottom_blobs[0] = q_affine.row_range(i * embed_dim_per_head, embed_dim_per_head);
Expand All @@ -559,9 +559,9 @@ int MultiHeadAttention_arm::forward(const std::vector<Mat>& bottom_blobs, std::v
Mat v_affine;
v_gemm->forward(v_blob, v_affine, opt);

Mat qkv_cross(src_seqlen, embed_dim_per_head * num_head, 2u, opt.blob_allocator);
Mat qkv_cross(src_seqlen, embed_dim_per_head * num_heads, 2u, opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < num_head; i++)
for (int i = 0; i < num_heads; i++)
{
std::vector<Mat> qkv_bottom_blobs(2);
qkv_bottom_blobs[0] = qk_cross.row_range(i * src_seqlen, src_seqlen);
Expand All @@ -587,9 +587,9 @@ int MultiHeadAttention_arm::forward(const std::vector<Mat>& bottom_blobs, std::v
Mat k_affine;
k_gemm->forward(k_blob, k_affine, opt);

Mat qk_cross(dst_seqlen, src_seqlen * num_head, 4u, opt.blob_allocator);
Mat qk_cross(dst_seqlen, src_seqlen * num_heads, 4u, opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < num_head; i++)
for (int i = 0; i < num_heads; i++)
{
std::vector<Mat> qk_bottom_blobs(2);
qk_bottom_blobs[0] = q_affine.row_range(i * embed_dim_per_head, embed_dim_per_head);
Expand All @@ -614,9 +614,9 @@ int MultiHeadAttention_arm::forward(const std::vector<Mat>& bottom_blobs, std::v
Mat v_affine;
v_gemm->forward(v_blob, v_affine, opt);

Mat qkv_cross(src_seqlen, embed_dim_per_head * num_head, 4u, opt.blob_allocator);
Mat qkv_cross(src_seqlen, embed_dim_per_head * num_heads, 4u, opt.blob_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < num_head; i++)
for (int i = 0; i < num_heads; i++)
{
std::vector<Mat> qkv_bottom_blobs(2);
qkv_bottom_blobs[0] = qk_cross.row_range(i * src_seqlen, src_seqlen);
Expand Down

0 comments on commit 3ed8367

Please sign in to comment.