Skip to content

Commit a16a753

Browse files
committed
Add lamb/lars to optim init imports, remove stray comment
1 parent c207e02 commit a16a753

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

timm/optim/__init__.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,15 @@
1-
from .adamp import AdamP
2-
from .adamw import AdamW
1+
from .adabelief import AdaBelief
32
from .adafactor import Adafactor
43
from .adahessian import Adahessian
4+
from .adamp import AdamP
5+
from .adamw import AdamW
6+
from .lamb import Lamb
7+
from .lars import Lars
58
from .lookahead import Lookahead
69
from .madgrad import MADGRAD
710
from .nadam import Nadam
811
from .nvnovograd import NvNovoGrad
912
from .radam import RAdam
1013
from .rmsprop_tf import RMSpropTF
1114
from .sgdp import SGDP
12-
from .adabelief import AdaBelief
1315
from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs

timm/optim/lars.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,6 @@ def step(self, closure=None):
8787
device = self.param_groups[0]['params'][0].device
8888
one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly
8989

90-
# exclude scaling for params with 0 weight decay
9190
for group in self.param_groups:
9291
weight_decay = group['weight_decay']
9392
momentum = group['momentum']

0 commit comments

Comments
 (0)