Skip to content

Commit 25b1903

Browse files
committed
[main][bugfix] Fix bugs and refactor cached mask generation logic
Signed-off-by: rjg-lyh <[email protected]>
1 parent 3f867ee commit 25b1903

File tree

4 files changed

+41
-78
lines changed

4 files changed

+41
-78
lines changed

tests/ut/attention/test_attention_mask.py

Lines changed: 7 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -77,34 +77,22 @@ def test_get_splitfuse_attn_mask(self):
7777
attention_mask_builder = AttentionMaskBuilder(max_seq_len=1024,
7878
dtype=torch.float16)
7979
attn_mask = attention_mask_builder.get_splitfuse_attn_mask(
80-
seq_lens=[512],
81-
query_lens=[512],
82-
position=torch.tensor([0]),
80+
seq_lens=torch.tensor([10, 20, 100]),
81+
position=torch.tensor([7, 8, 9, 18, 19, 99]),
8382
dtype=torch.float16,
8483
device=torch.device("cpu"),
8584
)
86-
self.assertEqual(attn_mask.shape, (1, 512))
85+
self.assertEqual(attn_mask.shape, (6, 100))
8786
self.assertEqual(attention_mask_builder._seq_len_cached, 1024)
8887

8988
attn_mask = attention_mask_builder.get_splitfuse_attn_mask(
90-
seq_lens=[2048],
91-
query_lens=[1024],
92-
position=torch.tensor([0]),
89+
seq_lens=torch.tensor([10, 3000, 2000]),
90+
position=torch.tensor([7, 8, 9, 2999, 1999]),
9391
dtype=torch.float16,
9492
device=torch.device("cpu"),
9593
)
96-
self.assertEqual(attn_mask.shape, (1024, 2048))
97-
98-
attention_mask_builder = AttentionMaskBuilder(max_seq_len=1024,
99-
dtype=torch.int8)
100-
attn_mask = attention_mask_builder.get_splitfuse_attn_mask(
101-
seq_lens=[512],
102-
query_lens=[512],
103-
position=torch.tensor([0]),
104-
dtype=torch.int8,
105-
device=torch.device("cpu"),
106-
)
107-
self.assertEqual(attn_mask.shape, (1, 512))
94+
self.assertEqual(attn_mask.shape, (5, 3000))
95+
self.assertEqual(attention_mask_builder._seq_len_cached, 3000)
10896

10997
def test_use_multiple_masks(self):
11098
max_seq_lens = [128, 512, 1024]

vllm_ascend/attention/attention_mask.py

Lines changed: 22 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -44,61 +44,41 @@ def __init__(
4444

4545
self._seq_len_cached = attn_mask.shape[0]
4646
self.attn_mask_cache = attn_mask
47-
self.splitfuse_mask_value = -10000
47+
48+
@staticmethod
49+
def get_mask_scale_factor(dtype: torch.dtype = torch.float16):
50+
mask_scale_factor = 1
51+
if dtype == torch.bfloat16:
52+
mask_scale_factor = -10000
53+
return mask_scale_factor
4854

4955
def get_attn_mask(self, max_seq_len: int, dtype: torch.dtype,
5056
device: torch.device):
51-
self._update_attn_cache(max_seq_len, dtype, device)
52-
return self.attn_mask_cache[:max_seq_len, :max_seq_len].contiguous()
57+
self._update_attn_cache(max_seq_len, dtype)
58+
return self.attn_mask_cache[:max_seq_len, :max_seq_len].contiguous(
59+
).to(device)
5360

5461
def get_splitfuse_attn_mask(
5562
self,
5663
seq_lens,
57-
query_lens,
5864
position,
5965
dtype,
6066
device,
6167
) -> torch.Tensor:
6268
max_seq_len = max(seq_lens, default=0)
63-
if max_seq_len <= self._seq_len_cached:
64-
self._update_attn_cache(max_seq_len, dtype, device)
65-
# FIXME: Currently the mask value of chunked-prefill situation and Prefill-Only situation
66-
# is not the same. Fix this in the future when kernel is ready.
67-
if self.attn_mask_cache.numel(
68-
) > 1 and self.attn_mask_cache[0][1] > 0:
69-
attn_mask = self.get_attn_mask( # type: ignore
70-
max_seq_len, dtype, device)
71-
# Do not use in-place multiplication to avoid modifying `self.attn_mask_cache`!
72-
attn_mask = attn_mask * -10000
73-
else:
74-
attn_mask = self.attn_mask_cache
75-
return torch.index_select(attn_mask, dim=0,
76-
index=position)[:, :max_seq_len]
77-
total_q_len = sum(query_lens)
78-
attn_mask = torch.zeros((total_q_len, max_seq_len),
79-
dtype=dtype,
80-
device="cpu")
81-
current_row = 0
82-
for i in range(len(query_lens)):
83-
seq_len = seq_lens[i]
84-
q_len = query_lens[i]
85-
context_len = seq_len - q_len
86-
87-
assert context_len >= 0
88-
attn_mask[current_row:current_row + q_len,
89-
context_len:] = self.splitfuse_mask_value
90-
right_tensor = attn_mask[current_row:current_row + q_len,
91-
context_len:seq_len]
92-
right_tensor.masked_fill_(
93-
right_tensor.tril() == self.splitfuse_mask_value, 0)
94-
current_row += q_len
95-
96-
return attn_mask.to(device, non_blocking=True)
69+
self._update_attn_cache(max_seq_len, dtype)
70+
# FIXME: Currently the mask value of chunked-prefill situation and Prefill-Only situation
71+
# is not the same. Fix this in the future when kernel is ready.
72+
mask_scale_factor = AttentionMaskBuilder.get_mask_scale_factor(dtype)
73+
attn_mask = torch.index_select(self.attn_mask_cache,
74+
dim=0,
75+
index=position)[:, :max_seq_len]
76+
attn_mask *= mask_scale_factor
77+
return attn_mask.contiguous().to(device, non_blocking=True)
9778

98-
def _update_attn_cache(self, seqlen: int, dtype: torch.dtype,
99-
device: torch.device):
79+
def _update_attn_cache(self, seqlen: int, dtype: torch.dtype):
10080
if seqlen > self._seq_len_cached:
10181
self._seq_len_cached = seqlen
10282
self.attn_mask_cache = _generate_attn_mask(seqlen, dtype)
103-
if self.attn_mask_cache.device != device:
104-
self.attn_mask_cache = self.attn_mask_cache.to(device)
83+
if self.attn_mask_cache.dtype != dtype:
84+
self.attn_mask_cache = self.attn_mask_cache.to(dtype)

vllm_ascend/worker/eagle_proposer_v1.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,11 +79,10 @@ def __init__(self,
7979
def _make_attention_mask(
8080
self,
8181
seq_lens,
82-
query_lens,
8382
position,
8483
) -> torch.Tensor:
8584
return self.attn_mask_builder.get_splitfuse_attn_mask(
86-
seq_lens, query_lens, position, self.dtype, self.device)
85+
seq_lens, position, self.dtype, self.device)
8786

8887
def propose(
8988
self,
@@ -247,7 +246,6 @@ def propose(
247246
positions = positions_cpu.to(device)
248247
attn_mask = self._make_attention_mask(
249248
seq_lens=attn_metadata.seq_lens,
250-
query_lens=attn_metadata.max_query_len,
251249
position=positions,
252250
)
253251
attn_metadata.attn_mask = attn_mask

vllm_ascend/worker/model_runner_v1.py

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,6 @@
2020
import copy
2121
import gc
2222
import math
23-
import os
2423
import time
2524
import types
2625
from contextlib import contextmanager, nullcontext
@@ -228,8 +227,7 @@ def __init__(self, vllm_config: VllmConfig, device: torch.device):
228227
self.attn_metadata_builder = self.attn_backend.get_builder_cls()(
229228
vllm_config, device)
230229
self.attn_mask_builder = AttentionMaskBuilder(
231-
min(self.model_config.max_model_len,
232-
int(os.getenv("PAGED_ATTENTION_MASK_LEN", 10000))), self.dtype)
230+
self.model_config.max_model_len, self.dtype)
233231

234232
# Set up speculative decoding.
235233
self.use_aux_hidden_state_outputs = False
@@ -847,12 +845,12 @@ def get_supported_tasks(self) -> "tuple[SupportedTask, ...]":
847845

848846
return tuple(tasks)
849847

850-
def _make_attention_mask(self, seq_lens, query_lens, position,
848+
def _make_attention_mask(self, seq_lens, position,
851849
attn_state) -> torch.Tensor:
852850
# Chunk Prefill situation.
853851
if attn_state == AscendAttentionState.ChunkedPrefill and not self.vllm_config.model_config.use_mla:
854852
return self.attn_mask_builder.get_splitfuse_attn_mask(
855-
seq_lens, query_lens, position, self.dtype, self.device)
853+
seq_lens, position, self.dtype, self.device)
856854
# Prefill without cache situation.
857855
elif attn_state == AscendAttentionState.PrefillNoCache:
858856
max_seq_len = max(seq_lens, default=0)
@@ -1124,16 +1122,17 @@ def _process_reqs(
11241122
self.mrope_positions_cpu[:, :total_num_scheduled_tokens],
11251123
non_blocking=True)
11261124

1127-
self.positions[total_num_scheduled_tokens:num_input_tokens].zero_()
1128-
self.positions[:total_num_scheduled_tokens].copy_(
1129-
self.positions_cpu[:total_num_scheduled_tokens], non_blocking=True)
1125+
self.positions_cpu[total_num_scheduled_tokens:num_input_tokens].zero_()
1126+
self.positions[:num_input_tokens].copy_(
1127+
self.positions_cpu[:num_input_tokens], non_blocking=True)
1128+
positions_cpu = self.positions_cpu[:num_input_tokens]
11301129
positions = self.positions[:num_input_tokens]
11311130
self.query_lens = torch.from_numpy(num_scheduled_tokens)
11321131

11331132
self.seq_lens_np[:num_reqs] = (
11341133
self.input_batch.num_computed_tokens_cpu[:num_reqs] +
11351134
num_scheduled_tokens)
1136-
seq_lens = self.seq_lens_cpu[:num_reqs]
1135+
seq_lens_cpu = self.seq_lens_cpu[:num_reqs]
11371136

11381137
block_table_indices = (req_indices * self.max_num_blocks_per_req +
11391138
positions_np // self.block_size)
@@ -1169,11 +1168,9 @@ def _process_reqs(
11691168
else:
11701169
attn_state = AscendAttentionState.PrefillCacheHit
11711170

1172-
self.attn_mask = self._make_attention_mask(
1173-
seq_lens=seq_lens,
1174-
query_lens=num_scheduled_tokens,
1175-
position=positions,
1176-
attn_state=attn_state)
1171+
self.attn_mask = self._make_attention_mask(seq_lens=seq_lens_cpu,
1172+
position=positions_cpu,
1173+
attn_state=attn_state)
11771174
self.attn_state = attn_state # type: ignore
11781175

11791176
self.query_start_loc_np[0] = 0

0 commit comments

Comments
 (0)