Skip to content

Commit 1bb7e3f

Browse files
authored
Update gemma3.py
change code format
1 parent 8c73781 commit 1bb7e3f

File tree

1 file changed

+9
-11
lines changed

1 file changed

+9
-11
lines changed

vllm_ascend/models/gemma3.py

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,27 +4,25 @@
44
import torch
55
from torch import nn
66
from transformers import Gemma3TextConfig
7-
87
from vllm.compilation.decorators import support_torch_compile
98
from vllm.config import CacheConfig, VllmConfig
9+
from vllm.model_executor.layers.logits_processor import LogitsProcessor
1010
from vllm.model_executor.layers.quantization import QuantizationConfig
1111
from vllm.model_executor.layers.vocab_parallel_embedding import \
1212
VocabParallelEmbedding
1313
from vllm.model_executor.model_loader.weight_utils import (
1414
default_weight_loader, maybe_remap_kv_scale_name)
15-
from vllm.model_executor.layers.logits_processor import LogitsProcessor
1615
from vllm.model_executor.models.gemma3 import (Gemma3DecoderLayer,
1716
Gemma3Model,
1817
Gemma3Attention,
1918
Gemma3MLP,
2019
Gemma3ForCausalLM)
21-
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
22-
from vllm_ascend.ops.layernorm import AddRMSNormW8A8Quant, AscendRMSNorm
23-
2420
from vllm.model_executor.models.interfaces import SupportsLoRA, SupportsPP
2521
from vllm.model_executor.models.utils import (is_pp_missing_parameter,
2622
make_empty_intermediate_tensors_factory, make_layers,
2723
maybe_prefix)
24+
from vllm_ascend.ops.layernorm import AddRMSNormW8A8Quant, AscendRMSNorm
25+
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
2826

2927
class AscendGemma3DecoderLayer(Gemma3DecoderLayer):
3028
def __init__(
@@ -57,12 +55,12 @@ def __init__(
5755
prefix=f"{prefix}.mlp",
5856
)
5957

60-
self.input_layernorm = AscendRMSNorm(
61-
config.hidden_size, eps=config.rms_norm_eps)
62-
self.post_attention_layernorm = AscendRMSNorm(
63-
config.hidden_size, eps=config.rms_norm_eps)
64-
self.pre_feedforward_layernorm = AscendRMSNorm(
65-
config.hidden_size, eps=config.rms_norm_eps)
58+
self.input_layernorm = AscendRMSNorm(config.hidden_size,
59+
eps=config.rms_norm_eps)
60+
self.post_attention_layernorm = AscendRMSNorm(config.hidden_size,
61+
eps=config.rms_norm_eps)
62+
self.pre_feedforward_layernorm = AscendRMSNorm(config.hidden_size,
63+
eps=config.rms_norm_eps)
6664
self.post_feedforward_layernorm = AscendRMSNorm(
6765
config.hidden_size, eps=config.rms_norm_eps)
6866

0 commit comments

Comments
 (0)