Skip to content

Commit 877deb8

Browse files
committed
Fixes GQA support in prefix prefill kernels
Signed-off-by: Tao He <sighingnow@gmail.com>
1 parent c530e2c commit 877deb8

File tree

3 files changed

+87
-47
lines changed

3 files changed

+87
-47
lines changed

tests/kernels/test_prefix_prefill.py

+42-19
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,8 @@
88
from xformers import ops as xops
99
from xformers.ops.fmha.attn_bias import BlockDiagonalCausalFromBottomRightMask
1010

11-
NUM_HEADS = [12]
11+
NUM_HEADS = [64]
12+
NUM_QUERIES_PER_KV = [1, 8, 64]
1213
HEAD_SIZES = [128]
1314
DTYPES = [torch.float16]
1415
CUDA_DEVICES = [
@@ -17,12 +18,14 @@
1718

1819

1920
@pytest.mark.parametrize("num_heads", NUM_HEADS)
21+
@pytest.mark.parametrize("num_queries_per_kv", NUM_HEADS)
2022
@pytest.mark.parametrize("head_size", HEAD_SIZES)
2123
@pytest.mark.parametrize("dtype", DTYPES)
2224
@pytest.mark.parametrize("device", CUDA_DEVICES)
2325
@torch.inference_mode()
2426
def test_contexted_kv_attention(
2527
num_heads: int,
28+
num_queries_per_kv: int,
2629
head_size: int,
2730
dtype: torch.dtype,
2831
device: str,
@@ -41,28 +44,29 @@ def test_contexted_kv_attention(
4144
subquery_lens = [random.randint(16, MAX_SEQ_LEN) for _ in range(BS)]
4245
ctx_lens = [random.randint(16, MAX_CTX_LEN) for _ in range(BS)]
4346
seq_lens = [a + b for a, b in zip(subquery_lens, ctx_lens)]
47+
num_kv_heads = num_heads // num_queries_per_kv
4448

4549
num_tokens = sum(subquery_lens)
4650
query = torch.empty(num_tokens, num_heads, head_size, dtype=dtype)
4751
query.uniform_(-1e-3, 1e-3)
4852
output = torch.empty(num_tokens, num_heads, head_size, dtype=dtype)
4953

50-
kv = torch.empty(sum(seq_lens), 2, num_heads, head_size, dtype=dtype)
54+
kv = torch.empty(sum(seq_lens), 2, num_kv_heads, head_size, dtype=dtype)
5155
kv.uniform_(-1e-3, 1e-3)
5256
key, value = kv.unbind(dim=1)
5357

5458
k_cache = torch.zeros(cache_size,
5559
block_size,
56-
num_heads,
60+
num_kv_heads,
5761
head_size,
5862
dtype=dtype)
5963
v_cache = torch.zeros(cache_size,
6064
block_size,
61-
num_heads,
65+
num_kv_heads,
6266
head_size,
6367
dtype=dtype)
64-
k = torch.zeros(sum(subquery_lens), num_heads, head_size, dtype=dtype)
65-
v = torch.zeros(sum(subquery_lens), num_heads, head_size, dtype=dtype)
68+
k = torch.zeros(sum(subquery_lens), num_kv_heads, head_size, dtype=dtype)
69+
v = torch.zeros(sum(subquery_lens), num_kv_heads, head_size, dtype=dtype)
6670
values = torch.arange(0, cache_size, dtype=torch.long)
6771
values = values[torch.randperm(cache_size)]
6872
block_table = values[:BS * max_block_per_request].view(
@@ -93,19 +97,21 @@ def test_contexted_kv_attention(
9397
end_loc = start_loc + block_size
9498
start_slot = block_table[i, block_id] * block_size
9599
end_slot = start_slot + end_loc - start_loc
96-
k_cache.view(-1, num_heads, head_size)[start_slot:end_slot].copy_(
97-
key[start_loc:end_loc])
98-
v_cache.view(-1, num_heads, head_size)[start_slot:end_slot].copy_(
99-
value[start_loc:end_loc])
100+
k_cache.view(-1, num_kv_heads,
101+
head_size)[start_slot:end_slot].copy_(
102+
key[start_loc:end_loc])
103+
v_cache.view(-1, num_kv_heads,
104+
head_size)[start_slot:end_slot].copy_(
105+
value[start_loc:end_loc])
100106
cur_ctx += block_size
101107
block_id += 1
102108
# transpose K_cache[num_blocks, block_size, num_kv_heads, head_size]
103109
# to K_cache[num_blocks, num_kv_heads, head_size/8, block_size, 8]
104-
k_cache = k_cache.view(-1, block_size, num_heads, head_size // 8,
110+
k_cache = k_cache.view(-1, block_size, num_kv_heads, head_size // 8,
105111
8).permute(0, 2, 3, 1, 4).contiguous()
106112
# transpose V_cache[num_blocks, block_size, num_kv_heads, head_size]
107113
# to V_cache[num_blocks, num_kv_heads, head_size, block_size]
108-
v_cache = v_cache.view(-1, block_size, num_heads,
114+
v_cache = v_cache.view(-1, block_size, num_kv_heads,
109115
head_size).permute(0, 2, 3, 1).contiguous()
110116

111117
# Warm up the Triton kernel by calling it once before actually measuring generation time
@@ -123,12 +129,29 @@ def test_contexted_kv_attention(
123129

124130
attn_op = xops.fmha.cutlass.FwOp()
125131

132+
if num_kv_heads != num_heads:
133+
# As of Nov 2023, xformers only supports MHA. For MQA/GQA,
134+
# project the key and value tensors to the desired number of
135+
# heads.
136+
#
137+
# see also: vllm/model_executor/layers/attention.py
138+
query = query.view(query.shape[0], num_kv_heads, num_queries_per_kv,
139+
query.shape[-1])
140+
key = key[:, :, None, :].expand(key.shape[0], num_kv_heads,
141+
num_queries_per_kv, key.shape[-1])
142+
value = value[:, :,
143+
None, :].expand(value.shape[0], num_kv_heads,
144+
num_queries_per_kv, value.shape[-1])
145+
query = query.unsqueeze(0)
146+
key = key.unsqueeze(0)
147+
value = value.unsqueeze(0)
148+
126149
attn_bias = BlockDiagonalCausalFromBottomRightMask.from_seqlens(
127150
subquery_lens, seq_lens)
128151
output_ref = xops.memory_efficient_attention_forward(
129-
query.unsqueeze(0),
130-
key.unsqueeze(0),
131-
value.unsqueeze(0),
152+
query,
153+
key,
154+
value,
132155
attn_bias=attn_bias,
133156
p=0.0,
134157
scale=scale,
@@ -137,9 +160,9 @@ def test_contexted_kv_attention(
137160
torch.cuda.synchronize()
138161
start_time = time.time()
139162
output_ref = xops.memory_efficient_attention_forward(
140-
query.unsqueeze(0),
141-
key.unsqueeze(0),
142-
value.unsqueeze(0),
163+
query,
164+
key,
165+
value,
143166
attn_bias=attn_bias,
144167
p=0.0,
145168
scale=scale,
@@ -148,5 +171,5 @@ def test_contexted_kv_attention(
148171
torch.cuda.synchronize()
149172
end_time = time.time()
150173
print(f"xformers Time: {(end_time - start_time)*1000:.2f} ms")
151-
output_ref = output_ref.squeeze(0)
174+
output_ref = output_ref.squeeze(0, 2)
152175
assert torch.allclose(output_ref, output, atol=1e-6, rtol=0)

vllm/model_executor/layers/attention.py

+18-16
Original file line numberDiff line numberDiff line change
@@ -137,25 +137,27 @@ def forward(
137137
)
138138

139139
if input_metadata.is_prompt:
140-
# Prompt run.
141-
if self.num_kv_heads != self.num_heads:
142-
# As of Nov 2023, xformers only supports MHA. For MQA/GQA,
143-
# project the key and value tensors to the desired number of
144-
# heads.
145-
# TODO(woosuk): Use MQA/GQA kernels for higher performance.
146-
query = query.view(query.shape[0], self.num_kv_heads,
147-
self.num_queries_per_kv, query.shape[-1])
148-
key = key[:, :,
149-
None, :].expand(key.shape[0], self.num_kv_heads,
150-
self.num_queries_per_kv,
151-
key.shape[-1])
152-
value = value[:, :, None, :].expand(value.shape[0],
153-
self.num_kv_heads,
154-
self.num_queries_per_kv,
155-
value.shape[-1])
156140
# normal attention
157141
if (key_cache is None or value_cache is None
158142
or input_metadata.block_tables.numel() == 0):
143+
if self.num_kv_heads != self.num_heads:
144+
# As of Nov 2023, xformers only supports MHA. For MQA/GQA,
145+
# project the key and value tensors to the desired number of
146+
# heads.
147+
# TODO(woosuk): Use MQA/GQA kernels for higher performance.
148+
query = query.view(query.shape[0], self.num_kv_heads,
149+
self.num_queries_per_kv,
150+
query.shape[-1])
151+
key = key[:, :,
152+
None, :].expand(key.shape[0], self.num_kv_heads,
153+
self.num_queries_per_kv,
154+
key.shape[-1])
155+
value = value[:, :,
156+
None, :].expand(value.shape[0],
157+
self.num_kv_heads,
158+
self.num_queries_per_kv,
159+
value.shape[-1])
160+
159161
# Set attention bias if not provided. This typically happens at
160162
# the very attention layer of every iteration.
161163
# FIXME(woosuk): This is a hack.

vllm/model_executor/layers/triton_kernel/prefix_prefill.py

+27-12
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@ def _fwd_kernel(
4545
stride_v_cache_h,
4646
stride_v_cache_d,
4747
stride_v_cache_bl,
48+
num_queries_per_kv: int,
4849
BLOCK_M: tl.constexpr,
4950
BLOCK_DMODEL: tl.constexpr,
5051
BLOCK_N: tl.constexpr,
@@ -53,6 +54,8 @@ def _fwd_kernel(
5354
cur_head = tl.program_id(1)
5455
start_m = tl.program_id(2)
5556

57+
cur_kv_head = cur_head // num_queries_per_kv
58+
5659
cur_batch_ctx_len = tl.load(B_Ctxlen + cur_batch)
5760
cur_batch_seq_len = tl.load(B_Seqlen + cur_batch)
5861
cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch)
@@ -85,13 +88,14 @@ def _fwd_kernel(
8588
mask=(start_n + offs_n) < cur_batch_ctx_len,
8689
other=0)
8790
off_k = (bn[None, :] * stride_k_cache_bs +
88-
cur_head * stride_k_cache_h +
91+
cur_kv_head * stride_k_cache_h +
8992
(offs_d[:, None] // x) * stride_k_cache_d +
9093
((start_n + offs_n[None, :]) % block_size) *
9194
stride_k_cache_bl +
9295
(offs_d[:, None] % x) * stride_k_cache_x)
9396
off_v = (
94-
bn[:, None] * stride_v_cache_bs + cur_head * stride_v_cache_h +
97+
bn[:, None] * stride_v_cache_bs +
98+
cur_kv_head * stride_v_cache_h +
9599
offs_d[None, :] * stride_v_cache_d +
96100
(start_n + offs_n[:, None]) % block_size * stride_v_cache_bl)
97101
k = tl.load(K_cache + off_k,
@@ -131,9 +135,9 @@ def _fwd_kernel(
131135
l_i = l_i_new
132136
m_i = m_i_new
133137

134-
off_k = (offs_n[None, :] * stride_kbs + cur_head * stride_kh +
138+
off_k = (offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh +
135139
offs_d[:, None] * stride_kd)
136-
off_v = (offs_n[:, None] * stride_vbs + cur_head * stride_vh +
140+
off_v = (offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh +
137141
offs_d[None, :] * stride_vd)
138142
k_ptrs = K + off_k
139143
v_ptrs = V + off_v
@@ -232,6 +236,7 @@ def _fwd_kernel_flash_attn_v2(
232236
stride_v_cache_h,
233237
stride_v_cache_d,
234238
stride_v_cache_bl,
239+
num_queries_per_kv: int,
235240
BLOCK_M: tl.constexpr,
236241
BLOCK_DMODEL: tl.constexpr,
237242
BLOCK_N: tl.constexpr,
@@ -240,6 +245,8 @@ def _fwd_kernel_flash_attn_v2(
240245
cur_head = tl.program_id(1)
241246
start_m = tl.program_id(2)
242247

248+
cur_kv_head = cur_head // num_queries_per_kv
249+
243250
cur_batch_ctx_len = tl.load(B_Ctxlen + cur_batch)
244251
cur_batch_seq_len = tl.load(B_Seqlen + cur_batch)
245252
cur_batch_in_all_start_index = tl.load(B_Start_Loc + cur_batch)
@@ -272,13 +279,14 @@ def _fwd_kernel_flash_attn_v2(
272279
mask=(start_n + offs_n) < cur_batch_ctx_len,
273280
other=0)
274281
off_k = (bn[None, :] * stride_k_cache_bs +
275-
cur_head * stride_k_cache_h +
282+
cur_kv_head * stride_k_cache_h +
276283
(offs_d[:, None] // x) * stride_k_cache_d +
277284
((start_n + offs_n[None, :]) % block_size) *
278285
stride_k_cache_bl +
279286
(offs_d[:, None] % x) * stride_k_cache_x)
280287
off_v = (
281-
bn[:, None] * stride_v_cache_bs + cur_head * stride_v_cache_h +
288+
bn[:, None] * stride_v_cache_bs +
289+
cur_kv_head * stride_v_cache_h +
282290
offs_d[None, :] * stride_v_cache_d +
283291
(start_n + offs_n[:, None]) % block_size * stride_v_cache_bl)
284292
k = tl.load(K_cache + off_k,
@@ -317,9 +325,9 @@ def _fwd_kernel_flash_attn_v2(
317325
l_i = l_i_new
318326
m_i = m_i_new
319327

320-
off_k = (offs_n[None, :] * stride_kbs + cur_head * stride_kh +
328+
off_k = (offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh +
321329
offs_d[:, None] * stride_kd)
322-
off_v = (offs_n[:, None] * stride_vbs + cur_head * stride_vh +
330+
off_v = (offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh +
323331
offs_d[None, :] * stride_vd)
324332
k_ptrs = K + off_k
325333
v_ptrs = V + off_v
@@ -420,6 +428,7 @@ def _fwd_kernel_alibi(
420428
stride_v_cache_h,
421429
stride_v_cache_d,
422430
stride_v_cache_bl,
431+
num_queries_per_kv: int,
423432
BLOCK_M: tl.constexpr,
424433
BLOCK_DMODEL: tl.constexpr,
425434
BLOCK_N: tl.constexpr,
@@ -429,6 +438,8 @@ def _fwd_kernel_alibi(
429438
cur_head = tl.program_id(1)
430439
start_m = tl.program_id(2)
431440

441+
cur_kv_head = cur_head // num_queries_per_kv
442+
432443
# cur_batch_seq_len: the length of prompts
433444
# cur_batch_ctx_len: the length of prefix
434445
# cur_batch_in_all_start_index: the start id of the dim=0
@@ -468,13 +479,14 @@ def _fwd_kernel_alibi(
468479
mask=(start_n + offs_n) < cur_batch_ctx_len,
469480
other=0)
470481
off_k = (bn[None, :] * stride_k_cache_bs +
471-
cur_head * stride_k_cache_h +
482+
cur_kv_head * stride_k_cache_h +
472483
(offs_d[:, None] // x) * stride_k_cache_d +
473484
((start_n + offs_n[None, :]) % block_size) *
474485
stride_k_cache_bl +
475486
(offs_d[:, None] % x) * stride_k_cache_x)
476487
off_v = (
477-
bn[:, None] * stride_v_cache_bs + cur_head * stride_v_cache_h +
488+
bn[:, None] * stride_v_cache_bs +
489+
cur_kv_head * stride_v_cache_h +
478490
offs_d[None, :] * stride_v_cache_d +
479491
(start_n + offs_n[:, None]) % block_size * stride_v_cache_bl)
480492
k = tl.load(K_cache + off_k,
@@ -522,9 +534,9 @@ def _fwd_kernel_alibi(
522534
l_i = l_i_new
523535
m_i = m_i_new
524536

525-
off_k = (offs_n[None, :] * stride_kbs + cur_head * stride_kh +
537+
off_k = (offs_n[None, :] * stride_kbs + cur_kv_head * stride_kh +
526538
offs_d[:, None] * stride_kd)
527-
off_v = (offs_n[:, None] * stride_vbs + cur_head * stride_vh +
539+
off_v = (offs_n[:, None] * stride_vbs + cur_kv_head * stride_vh +
528540
offs_d[None, :] * stride_vd)
529541
k_ptrs = K + off_k
530542
v_ptrs = V + off_v
@@ -628,6 +640,7 @@ def context_attention_fwd(q,
628640

629641
sm_scale = 1.0 / (Lq**0.5)
630642
batch, head = b_seq_len.shape[0], q.shape[1]
643+
num_queries_per_kv = q.shape[1] // k.shape[1]
631644

632645
grid = (batch, head, triton.cdiv(max_input_len, BLOCK)) # batch, head,
633646

@@ -674,6 +687,7 @@ def context_attention_fwd(q,
674687
v_cache.stride(2),
675688
v_cache.stride(
676689
3), #[num_blocks, num_kv_heads, head_size, block_size]
690+
num_queries_per_kv=num_queries_per_kv,
677691
BLOCK_M=BLOCK,
678692
BLOCK_DMODEL=Lk,
679693
BLOCK_N=BLOCK,
@@ -721,6 +735,7 @@ def context_attention_fwd(q,
721735
v_cache.stride(2),
722736
v_cache.stride(
723737
3), #[num_blocks, num_kv_heads, head_size, block_size]
738+
num_queries_per_kv=num_queries_per_kv,
724739
BLOCK_M=BLOCK,
725740
BLOCK_DMODEL=Lk,
726741
BLOCK_N=BLOCK,

0 commit comments

Comments
 (0)