Skip to content

Commit 2ca097e

Browse files
Cyrilvallezzaristei
authored andcommitted
[modular] Follow global indexing and attribute setting, and their dependencies (huggingface#39180)
* export global indexing statements * add example * style * examples
1 parent 393765c commit 2ca097e

File tree

8 files changed

+254
-85
lines changed

8 files changed

+254
-85
lines changed
Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,169 @@
1+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2+
# This file was automatically generated from examples/modular-transformers/modular_global_indexing.py.
3+
# Do NOT edit this file manually as any edits will be overwritten by the generation of
4+
# the file from the modular. If any change should be done, please apply the change to the
5+
# modular_global_indexing.py file directly. One of our CI enforces this.
6+
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7+
from typing import Callable, Optional
8+
9+
import torch
10+
from torch import nn
11+
12+
from transformers.modeling_utils import AttentionInterface
13+
14+
from ...cache_utils import Cache
15+
from ...processing_utils import Unpack
16+
from ...utils import TransformersKwargs
17+
from .configuration_global_indexing import GlobalIndexingConfig
18+
19+
20+
def rotate_half(x):
21+
"""Rotates half the hidden dims of the input."""
22+
x1 = x[..., : x.shape[-1] // 2]
23+
x2 = x[..., x.shape[-1] // 2 :]
24+
return torch.cat((-x2, x1), dim=-1)
25+
26+
27+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
28+
"""Applies Rotary Position Embedding to the query and key tensors.
29+
30+
Args:
31+
q (`torch.Tensor`): The query tensor.
32+
k (`torch.Tensor`): The key tensor.
33+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
34+
sin (`torch.Tensor`): The sine part of the rotary embedding.
35+
position_ids (`torch.Tensor`, *optional*):
36+
Deprecated and unused.
37+
unsqueeze_dim (`int`, *optional*, defaults to 1):
38+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
39+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
40+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
41+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
42+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
43+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
44+
Returns:
45+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
46+
"""
47+
cos = cos.unsqueeze(unsqueeze_dim)
48+
sin = sin.unsqueeze(unsqueeze_dim)
49+
q_embed = (q * cos) + (rotate_half(q) * sin)
50+
k_embed = (k * cos) + (rotate_half(k) * sin)
51+
return q_embed, k_embed
52+
53+
54+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
55+
"""
56+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
57+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
58+
"""
59+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
60+
if n_rep == 1:
61+
return hidden_states
62+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
63+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
64+
65+
66+
def eager_attention_forward(
67+
module: nn.Module,
68+
query: torch.Tensor,
69+
key: torch.Tensor,
70+
value: torch.Tensor,
71+
attention_mask: Optional[torch.Tensor],
72+
scaling: float,
73+
dropout: float = 0.0,
74+
**kwargs: Unpack[TransformersKwargs],
75+
):
76+
key_states = repeat_kv(key, module.num_key_value_groups)
77+
value_states = repeat_kv(value, module.num_key_value_groups)
78+
79+
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
80+
if attention_mask is not None:
81+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
82+
attn_weights = attn_weights + causal_mask
83+
84+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
85+
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
86+
attn_output = torch.matmul(attn_weights, value_states)
87+
attn_output = attn_output.transpose(1, 2).contiguous()
88+
89+
return attn_output, attn_weights
90+
91+
92+
def custom_flex(x, **kwargs):
93+
"""Dummy function."""
94+
return x
95+
96+
97+
ALL_ATTENTION_FUNCTIONS = AttentionInterface()
98+
# This indexing statement and associated function should be exported correctly!
99+
ALL_ATTENTION_FUNCTIONS["flex_attention"] = custom_flex
100+
101+
102+
class GlobalIndexingAttention(nn.Module):
103+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
104+
105+
def __init__(self, config: GlobalIndexingConfig, layer_idx: int):
106+
super().__init__()
107+
self.config = config
108+
self.layer_idx = layer_idx
109+
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
110+
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
111+
self.scaling = self.head_dim**-0.5
112+
self.attention_dropout = config.attention_dropout
113+
self.is_causal = True
114+
115+
self.q_proj = nn.Linear(
116+
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
117+
)
118+
self.k_proj = nn.Linear(
119+
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
120+
)
121+
self.v_proj = nn.Linear(
122+
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
123+
)
124+
self.o_proj = nn.Linear(
125+
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
126+
)
127+
128+
def forward(
129+
self,
130+
hidden_states: torch.Tensor,
131+
position_embeddings: tuple[torch.Tensor, torch.Tensor],
132+
attention_mask: Optional[torch.Tensor],
133+
past_key_value: Optional[Cache] = None,
134+
cache_position: Optional[torch.LongTensor] = None,
135+
**kwargs: Unpack[TransformersKwargs],
136+
) -> tuple[torch.Tensor, torch.Tensor]:
137+
input_shape = hidden_states.shape[:-1]
138+
hidden_shape = (*input_shape, -1, self.head_dim)
139+
140+
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
141+
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
142+
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
143+
144+
cos, sin = position_embeddings
145+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
146+
147+
if past_key_value is not None:
148+
# sin and cos are specific to RoPE models; cache_position needed for the static cache
149+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
150+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
151+
152+
attention_interface: Callable = eager_attention_forward
153+
if self.config._attn_implementation != "eager":
154+
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
155+
156+
attn_output, attn_weights = attention_interface(
157+
self,
158+
query_states,
159+
key_states,
160+
value_states,
161+
attention_mask,
162+
dropout=0.0 if not self.training else self.attention_dropout,
163+
scaling=self.scaling,
164+
**kwargs,
165+
)
166+
167+
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
168+
attn_output = self.o_proj(attn_output)
169+
return attn_output, attn_weights

examples/modular-transformers/modeling_multimodal2.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,6 @@ def __init__(self, config):
289289
self.layers = nn.ModuleList([Multimodal2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
290290
self.gradient_checkpointing = False
291291

292-
@can_return_tuple
293292
def forward(
294293
self,
295294
inputs_embeds,
@@ -455,7 +454,6 @@ def __init__(self, config):
455454
self.encoder = Multimodal2VisionEncoder(config)
456455
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
457456

458-
@can_return_tuple
459457
@auto_docstring
460458
def forward(
461459
self,

examples/modular-transformers/modeling_my_new_model2.py

Lines changed: 19 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,13 @@
1212
from ...activations import ACT2FN
1313
from ...cache_utils import Cache, DynamicCache
1414
from ...masking_utils import create_causal_mask
15-
from ...modeling_flash_attention_utils import FlashAttentionKwargs
1615
from ...modeling_layers import GradientCheckpointingLayer
1716
from ...modeling_outputs import BaseModelOutputWithPast, SequenceClassifierOutputWithPast
1817
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
1918
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
2019
from ...processing_utils import Unpack
21-
from ...utils import auto_docstring, can_return_tuple, logging
20+
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, logging
21+
from ...utils.generic import check_model_inputs
2222
from .configuration_my_new_model2 import MyNewModel2Config
2323

2424

@@ -149,7 +149,7 @@ def eager_attention_forward(
149149
attention_mask: Optional[torch.Tensor],
150150
scaling: float,
151151
dropout: float = 0.0,
152-
**kwargs,
152+
**kwargs: Unpack[TransformersKwargs],
153153
):
154154
key_states = repeat_kv(key, module.num_key_value_groups)
155155
value_states = repeat_kv(value, module.num_key_value_groups)
@@ -200,8 +200,8 @@ def forward(
200200
attention_mask: Optional[torch.Tensor],
201201
past_key_value: Optional[Cache] = None,
202202
cache_position: Optional[torch.LongTensor] = None,
203-
**kwargs: Unpack[FlashAttentionKwargs],
204-
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
203+
**kwargs: Unpack[TransformersKwargs],
204+
) -> tuple[torch.Tensor, torch.Tensor]:
205205
input_shape = hidden_states.shape[:-1]
206206
hidden_shape = (*input_shape, -1, self.head_dim)
207207

@@ -254,22 +254,19 @@ def forward(
254254
attention_mask: Optional[torch.Tensor] = None,
255255
position_ids: Optional[torch.LongTensor] = None,
256256
past_key_value: Optional[Cache] = None,
257-
output_attentions: Optional[bool] = False,
258257
use_cache: Optional[bool] = False,
259258
cache_position: Optional[torch.LongTensor] = None,
260259
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
261-
**kwargs: Unpack[FlashAttentionKwargs],
262-
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
260+
**kwargs: Unpack[TransformersKwargs],
261+
) -> tuple[torch.Tensor]:
263262
residual = hidden_states
264263
hidden_states = self.input_layernorm(hidden_states)
265-
266264
# Self Attention
267-
hidden_states, self_attn_weights = self.self_attn(
265+
hidden_states, _ = self.self_attn(
268266
hidden_states=hidden_states,
269267
attention_mask=attention_mask,
270268
position_ids=position_ids,
271269
past_key_value=past_key_value,
272-
output_attentions=output_attentions,
273270
use_cache=use_cache,
274271
cache_position=cache_position,
275272
position_embeddings=position_embeddings,
@@ -282,12 +279,7 @@ def forward(
282279
hidden_states = self.post_attention_layernorm(hidden_states)
283280
hidden_states = self.mlp(hidden_states)
284281
hidden_states = residual + hidden_states
285-
286-
outputs = (hidden_states,)
287-
if output_attentions:
288-
outputs += (self_attn_weights,)
289-
290-
return outputs
282+
return hidden_states
291283

292284

293285
@auto_docstring
@@ -304,6 +296,10 @@ class MyNewModel2PreTrainedModel(PreTrainedModel):
304296
_supports_quantized_cache = True
305297
_supports_static_cache = True
306298
_supports_attention_backend = True
299+
_can_record_outputs = {
300+
"hidden_states": MyNewModel2DecoderLayer,
301+
"attentions": MyNewModel2Attention,
302+
}
307303

308304
def _init_weights(self, module):
309305
std = self.config.initializer_range
@@ -343,7 +339,7 @@ def get_input_embeddings(self):
343339
def set_input_embeddings(self, value):
344340
self.embed_tokens = value
345341

346-
@can_return_tuple
342+
@check_model_inputs
347343
@auto_docstring
348344
def forward(
349345
self,
@@ -353,26 +349,12 @@ def forward(
353349
past_key_values: Optional[Cache] = None,
354350
inputs_embeds: Optional[torch.FloatTensor] = None,
355351
use_cache: Optional[bool] = None,
356-
output_attentions: Optional[bool] = None,
357-
output_hidden_states: Optional[bool] = None,
358352
cache_position: Optional[torch.LongTensor] = None,
359-
**kwargs: Unpack[FlashAttentionKwargs],
353+
**kwargs: Unpack[TransformersKwargs],
360354
) -> BaseModelOutputWithPast:
361-
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
362-
output_hidden_states = (
363-
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
364-
)
365-
use_cache = use_cache if use_cache is not None else self.config.use_cache
366-
367355
if (input_ids is None) ^ (inputs_embeds is not None):
368356
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
369357

370-
if self.gradient_checkpointing and self.training and use_cache:
371-
logger.warning_once(
372-
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
373-
)
374-
use_cache = False
375-
376358
if inputs_embeds is None:
377359
inputs_embeds = self.embed_tokens(input_ids)
378360

@@ -394,6 +376,7 @@ def forward(
394376
attention_mask=attention_mask,
395377
cache_position=cache_position,
396378
past_key_values=past_key_values,
379+
position_ids=position_ids,
397380
)
398381

399382
# embed positions
@@ -408,42 +391,21 @@ def forward(
408391
normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
409392
hidden_states = hidden_states * normalizer
410393

411-
# decoder layers
412-
all_hidden_states = () if output_hidden_states else None
413-
all_self_attns = () if output_attentions else None
414-
415394
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
416-
if output_hidden_states:
417-
all_hidden_states += (hidden_states,)
418-
419-
layer_outputs = decoder_layer(
395+
hidden_states = decoder_layer(
420396
hidden_states,
421397
attention_mask=causal_mask,
422398
position_ids=position_ids,
423399
past_key_value=past_key_values,
424-
output_attentions=output_attentions,
425400
use_cache=use_cache,
426401
cache_position=cache_position,
427402
position_embeddings=position_embeddings,
428403
**kwargs,
429404
)
430-
431-
hidden_states = layer_outputs[0]
432-
433-
if output_attentions:
434-
all_self_attns += (layer_outputs[1],)
435-
436405
hidden_states = self.norm(hidden_states)
437-
438-
# add hidden states from the last decoder layer
439-
if output_hidden_states:
440-
all_hidden_states += (hidden_states,)
441-
442406
return BaseModelOutputWithPast(
443407
last_hidden_state=hidden_states,
444408
past_key_values=past_key_values if use_cache else None,
445-
hidden_states=all_hidden_states,
446-
attentions=all_self_attns,
447409
)
448410

449411

@@ -488,8 +450,7 @@ def forward(
488450
inputs_embeds: Optional[torch.FloatTensor] = None,
489451
labels: Optional[torch.LongTensor] = None,
490452
use_cache: Optional[bool] = None,
491-
output_attentions: Optional[bool] = None,
492-
output_hidden_states: Optional[bool] = None,
453+
**kwargs: Unpack[TransformersKwargs],
493454
) -> SequenceClassifierOutputWithPast:
494455
r"""
495456
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
@@ -505,8 +466,7 @@ def forward(
505466
past_key_values=past_key_values,
506467
inputs_embeds=inputs_embeds,
507468
use_cache=use_cache,
508-
output_attentions=output_attentions,
509-
output_hidden_states=output_hidden_states,
469+
**kwargs,
510470
)
511471
hidden_states = transformer_outputs.last_hidden_state
512472
logits = self.score(hidden_states)

0 commit comments

Comments
 (0)