Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ dependencies = [
"rich_argparse", # CLI help-formatter
# Torch and Lightning dependencies
"lightning>=2.2",
"torchmetrics>=1.3.2",
"torchmetrics>=1.8.2",
"lightning-utilities",
# Deep learning models and utilities
"timm",
Expand Down
5 changes: 5 additions & 0 deletions src/anomalib/data/utils/generators/perlin.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,6 +269,11 @@ def generate_perturbation(
"""
# Generate perlin noise
perlin_noise = generate_perlin_noise(height, width, device=device)
# in some cases the perlin noise is all less than 0.5, so we need to rescale it between 0 and 1
if not (perlin_noise > 0.5).any():
perlin_noise = (perlin_noise - perlin_noise.min()) / (perlin_noise.max() - perlin_noise.min())
# rescale to [-1, 1] range
perlin_noise = (perlin_noise * 2) - 1

# Create rotated noise pattern
perlin_noise = perlin_noise.unsqueeze(0) # [1, H, W]
Expand Down
5 changes: 5 additions & 0 deletions src/anomalib/models/image/dsr/anomaly_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,11 @@ def generate_anomaly(self, height: int, width: int) -> Tensor:
# Generate perlin noise using the new function
perlin_noise = generate_perlin_noise(height, width, scale=(perlin_scalex, perlin_scaley))

# Rescale with threshold at center if all values are below the threshold
if not (perlin_noise > threshold).any():
perlin_noise = (perlin_noise - perlin_noise.min()) / (perlin_noise.max() - perlin_noise.min())
perlin_noise = (perlin_noise * 2) - 1

# Apply random rotation
perlin_noise = perlin_noise.unsqueeze(0) # Add channel dimension for transform
perlin_noise = self.rot(perlin_noise).squeeze(0) # Remove channel dimension
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,13 @@ def generate_perlin(self, batches: int, height: int, width: int) -> torch.Tensor
# keep power of 2 here for reproduction purpose, although this function supports power2 internally
perlin_noise = generate_perlin_noise(height=perlin_height, width=perlin_width)

# Rescale with threshold at center if all values are below the threshold
if not (perlin_noise > self.threshold).any():
# First normalize to [0,1] range by min-max normalization
perlin_noise = (perlin_noise - perlin_noise.min()) / (perlin_noise.max() - perlin_noise.min())
# Then rescale to [-1, 1] range
perlin_noise = (perlin_noise * 2) - 1

# original is power of 2 scale, so fit to our size
perlin_noise = F.interpolate(
perlin_noise.reshape(1, 1, perlin_height, perlin_width),
Expand Down
11 changes: 5 additions & 6 deletions src/anomalib/models/image/vlm_ad/backends/chat_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,6 @@

from lightning_utilities.core.imports import module_available

# Optional import for dotenv
try:
from dotenv import load_dotenv
except ImportError:
load_dotenv = None

from anomalib.models.image.vlm_ad.utils import Prompt

from .base import Backend
Expand All @@ -51,6 +45,11 @@
else:
OpenAI = None

if module_available("dotenv"):
from dotenv import load_dotenv
else:
load_dotenv = None

if TYPE_CHECKING:
from openai.types.chat import ChatCompletion

Expand Down
5 changes: 3 additions & 2 deletions src/anomalib/models/image/winclip/torch_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,9 @@
from anomalib.data import InferenceBatch
from anomalib.models.components import BufferListMixin, DynamicBufferMixin

from .prompting import create_prompt_ensemble
from .utils import class_scores, harmonic_aggregation, make_masks, visual_association_score

# Optional import for open_clip
try:
import open_clip
Expand All @@ -47,8 +50,6 @@
open_clip = None
tokenize = None

from .prompting import create_prompt_ensemble
from .utils import class_scores, harmonic_aggregation, make_masks, visual_association_score

BACKBONE = "ViT-B-16-plus-240"
PRETRAINED = "laion400m_e31"
Expand Down
Loading
Loading