Skip to content

Commit fafb95e

Browse files
authored
Update gemma3.py
code format adjust
1 parent 1bb7e3f commit fafb95e

File tree

1 file changed

+7
-8
lines changed

1 file changed

+7
-8
lines changed

vllm_ascend/models/gemma3.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,14 @@
1212
VocabParallelEmbedding
1313
from vllm.model_executor.model_loader.weight_utils import (
1414
default_weight_loader, maybe_remap_kv_scale_name)
15-
from vllm.model_executor.models.gemma3 import (Gemma3DecoderLayer,
16-
Gemma3Model,
17-
Gemma3Attention,
18-
Gemma3MLP,
19-
Gemma3ForCausalLM)
15+
from vllm.model_executor.models.gemma3 import (Gemma3Attention,
16+
Gemma3DecoderLayer,
17+
Gemma3ForCausalLM, Gemma3MLP,
18+
Gemma3Model)
2019
from vllm.model_executor.models.interfaces import SupportsLoRA, SupportsPP
21-
from vllm.model_executor.models.utils import (is_pp_missing_parameter,
22-
make_empty_intermediate_tensors_factory, make_layers,
23-
maybe_prefix)
20+
from vllm.model_executor.models.utils import (
21+
is_pp_missing_parameter, make_empty_intermediate_tensors_factory,
22+
make_layers, maybe_prefix)
2423
from vllm_ascend.ops.layernorm import AddRMSNormW8A8Quant, AscendRMSNorm
2524
from vllm_ascend.quantization.w8a8 import AscendW8A8LinearMethod
2625

0 commit comments

Comments
 (0)