Skip to content

Add Ukrainian translation of README.md #39212

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ limitations under the License.
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_uk.md">Українська</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
Expand Down
330 changes: 330 additions & 0 deletions i18n/README_uk.md

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions src/transformers/models/auto/configuration_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,6 +399,7 @@
("zamba", "ZambaConfig"),
("zamba2", "Zamba2Config"),
("zoedepth", "ZoeDepthConfig"),
("mobilenet_v5", "MobileNetV5Config"),
]
)

Expand Down Expand Up @@ -627,6 +628,7 @@
("mobilebert", "MobileBERT"),
("mobilenet_v1", "MobileNetV1"),
("mobilenet_v2", "MobileNetV2"),
("mobilenet_v5", "MobileNetV5"),
("mobilevit", "MobileViT"),
("mobilevitv2", "MobileViTV2"),
("modernbert", "ModernBERT"),
Expand Down
6 changes: 5 additions & 1 deletion src/transformers/models/auto/image_processing_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@
("xclip", ("CLIPImageProcessor", "CLIPImageProcessorFast")),
("yolos", ("YolosImageProcessor", "YolosImageProcessorFast")),
("zoedepth", ("ZoeDepthImageProcessor", "ZoeDepthImageProcessorFast")),
("mobilenet_v5", "MobileNetV5ImageProcessor"),
]
)

Expand Down Expand Up @@ -664,4 +665,7 @@ def register(
)


__all__ = ["IMAGE_PROCESSOR_MAPPING", "AutoImageProcessor"]
__all__ = [
"IMAGE_PROCESSOR_MAPPING",
"AutoImageProcessor",
]
1 change: 1 addition & 0 deletions src/transformers/models/auto/modeling_auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,6 +368,7 @@
("yoso", "YosoModel"),
("zamba", "ZambaModel"),
("zamba2", "Zamba2Model"),
("mobilenet_v5", "MobileNetV5Model"),
]
)

Expand Down
8 changes: 4 additions & 4 deletions src/transformers/models/fsmt/modeling_fsmt.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def _prepare_fsmt_decoder_inputs(


@auto_docstring
class PretrainedFSMTModel(PreTrainedModel):
class PreTrainedFSMTModel(PreTrainedModel):
config_class = FSMTConfig
base_model_prefix = "model"

Expand Down Expand Up @@ -908,7 +908,7 @@ def _get_shape(t):


@auto_docstring
class FSMTModel(PretrainedFSMTModel):
class FSMTModel(PreTrainedFSMTModel):
_tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]

def __init__(self, config: FSMTConfig):
Expand Down Expand Up @@ -1067,7 +1067,7 @@ def set_output_embeddings(self, value):
The FSMT Model with a language modeling head. Can be used for summarization.
"""
)
class FSMTForConditionalGeneration(PretrainedFSMTModel, GenerationMixin):
class FSMTForConditionalGeneration(PreTrainedFSMTModel, GenerationMixin):
base_model_prefix = "model"
_tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]

Expand Down Expand Up @@ -1285,4 +1285,4 @@ def forward(
return super().forward(positions)


__all__ = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]
__all__ = ["FSMTForConditionalGeneration", "FSMTModel", "PreTrainedFSMTModel"]
4 changes: 4 additions & 0 deletions src/transformers/models/mobilenet_v5/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Init file for MobileNetV5 model package
from .configuration_mobilenet_v5 import *
from .modeling_mobilenet_v5 import *
from .image_processing_mobilenet_v5 import *
43 changes: 43 additions & 0 deletions src/transformers/models/mobilenet_v5/configuration_mobilenet_v5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNetV5 model configuration"""

from ...configuration_utils import PretrainedConfig
from ...utils import logging

logger = logging.get_logger(__name__)

class MobileNetV5Config(PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`MobileNetV5Model`].
It is used to instantiate a MobileNetV5 model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a minimal configuration.

Example:
>>> from transformers import MobileNetV5Config, MobileNetV5Model
>>> config = MobileNetV5Config()
>>> model = MobileNetV5Model(config)
"""
model_type = "mobilenet_v5"

def __init__(
self,
num_channels=3,
image_size=224,
num_classes=1000,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.image_size = image_size
self.num_classes = num_classes
# TODO: Add more architecture-specific parameters when implementing full support
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNetV5 image processor stub implementation"""

from ...image_processing_utils import BaseImageProcessor
from ...utils import logging

logger = logging.get_logger(__name__)

class MobileNetV5ImageProcessor(BaseImageProcessor):
"""
Minimal stub for MobileNetV5 image processor. Does not implement real preprocessing logic.
"""
model_input_names = ["pixel_values"]

def __init__(self, **kwargs):
super().__init__(**kwargs)
logger.warning("MobileNetV5ImageProcessor is a stub and does not perform real preprocessing.")

def preprocess(self, images, **kwargs):
# Stub: just return input as is
return images
35 changes: 35 additions & 0 deletions src/transformers/models/mobilenet_v5/modeling_mobilenet_v5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright 2024 The HuggingFace Team. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MobileNetV5 model stub implementation"""

from ...modeling_utils import PreTrainedModel
from .configuration_mobilenet_v5 import MobileNetV5Config
import torch.nn as nn

class MobileNetV5Model(PreTrainedModel):
"""
This class provides a minimal stub for the MobileNetV5 model architecture.
Currently, it does not implement any real logic and serves only to avoid 'Unknown Model' errors.
Contributions for a full implementation are welcome!
"""
config_class = MobileNetV5Config

def __init__(self, config: MobileNetV5Config):
super().__init__(config)
# Minimal stub: a single dummy layer
self.dummy = nn.Identity()

def forward(self, pixel_values=None, **kwargs):
# This is a stub. Real implementation required for actual use.
if pixel_values is None:
raise ValueError("pixel_values must be provided for MobileNetV5Model (stub)")
return self.dummy(pixel_values)
2 changes: 1 addition & 1 deletion utils/check_repo.py
Original file line number Diff line number Diff line change
Expand Up @@ -1000,7 +1000,7 @@ def find_all_documented_objects() -> list[str]:
"LineByLineWithSOPTextDataset",
"NerPipeline",
"PretrainedBartModel",
"PretrainedFSMTModel",
"PreTrainedFSMTModel",
"SingleSentenceClassificationProcessor",
"SquadDataTrainingArguments",
"SquadDataset",
Expand Down