Skip to content

Commit

Permalink
Remove adahessian since sophia is so much better
Browse files Browse the repository at this point in the history
  • Loading branch information
jloveric committed Jun 19, 2024
1 parent ed2efe9 commit 93c4bfa
Show file tree
Hide file tree
Showing 4 changed files with 4 additions and 35 deletions.
12 changes: 1 addition & 11 deletions examples/function_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import numpy as np
import torch
import torch.optim
import torch_optimizer as alt_optim
from pytorch_lightning import LightningModule, Trainer
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
Expand Down Expand Up @@ -131,16 +130,7 @@ def train_dataloader(self):
return DataLoader(FunctionDataset(), batch_size=4)

def configure_optimizers(self):
if self.optimizer == "adahessian":
return alt_optim.Adahessian(
self.layer.parameters(),
lr=1.0,
betas=(0.9, 0.999),
eps=1e-4,
weight_decay=0.0,
hessian_power=1.0,
)
elif self.optimizer == "adam":
if self.optimizer == "adam":
return torch.optim.Adam(self.parameters(), lr=0.001)
elif self.optimizer == "lion":
return Lion(self.parameters(), lr=0.001)
Expand Down
12 changes: 1 addition & 11 deletions examples/invariant_mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch_optimizer as alt_optim
import torchvision
import torchvision.transforms as transforms
from omegaconf import DictConfig, OmegaConf
Expand Down Expand Up @@ -139,16 +138,7 @@ def test_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, "test")

def configure_optimizers(self):
if self.cfg.optimizer.name == "adahessian":
return alt_optim.Adahessian(
self.parameters(),
lr=1.0,
betas=(0.9, 0.999),
eps=1e-4,
weight_decay=0.0,
hessian_power=1.0,
)
elif self.cfg.optimizer.name == "adam":
if self.cfg.optimizer.name == "adam":
optimizer = optim.Adam(self.parameters(), lr=self.cfg.optimizer.lr)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
Expand Down
12 changes: 1 addition & 11 deletions examples/variational_autoencoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import hydra
import torch
import torch.optim as optim
import torch_optimizer as alt_optim
import torchvision
import torchvision.transforms as transforms
from matplotlib.pyplot import figure
Expand Down Expand Up @@ -158,16 +157,7 @@ def test_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, "test")

def configure_optimizers(self):
if self._cfg.optimizer.name == "adahessian":
return alt_optim.Adahessian(
self.parameters(),
lr=1.0,
betas=(0.9, 0.999),
eps=1e-4,
weight_decay=0.0,
hessian_power=1.0,
)
elif self._cfg.optimizer.name == "adam":
if self._cfg.optimizer.name == "adam":

optimizer = optim.Adam(
params=self.parameters(),
Expand Down
3 changes: 1 addition & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
[tool.poetry]
name = "high-order-layers-torch"
version = "2.5.3"
version = "2.6.0"
description = "High order layers in pytorch"
authors = ["jloverich <john.loverich@gmail.com>"]
license = "MIT"
readme = "README.md"

[tool.poetry.dependencies]
python = ">=3.9,<4"
torch-optimizer = "^0.3.0"
hydra-core = "^1.2.0"
torch = ">=2.1.1"
torchvision = ">=0.18.0"
Expand Down

0 comments on commit 93c4bfa

Please sign in to comment.