Skip to content

Commit

Permalink
Bump PyTorch Lightning version to v1.9.* (#870)
Browse files Browse the repository at this point in the history
* Modified padim configs

* Removed registry

* Replace LightningLoggerBase with Logger

* Remove resume_from_checkpoint arg from Trainer

* Updated cfa configs for v1.9.0

* Udate cflow configs to run on v1.9.0

* Udate csflow configs to run on v1.9.0

* Udate dfkde configs to run on v1.9.0

* Udate dfm configs to run on v1.9.0

* Udate draem configs to run on v1.9.0

* Udpdate fasatflow configs to run on v1.9.0

* Udpdate ganomaly configs to run on v1.9.0

* Udpdate patchcore configs to run on v1.9.0

* Udpdate reverse_distillation configs to run on v1.9.0

* Udpdate rkde configs to run on v1.9.0

* Udpdate stfpm configs to run on v1.9.0

* Update CHANGELOG.md

* Remove `inference_mode` from the configs to support PyTorch Lightning v1.6.*

* Update CHANGELOG.md

* Address pre-commit issues

* PL v1.6.* is not supported anymore

* Ignore no-member issue when getting trainer.callbacks

* Added requirements to pre-commit

* Resolve merge conflicts properly

* Revert the changes in tox.ini

* Address pre-commit errors

* Address pylint comments from pre-commit

* Address pylint comments from pre-commit. - This is the final one!!

* Fix tests. Tensorboard logger has not been addressed yet.

* 🩹 Patch requirements (#892)

* check extras dependencies in loggers

* Add recreate to workflow

* Revert -r flag

* Fix mocks

---------

Co-authored-by: Ashwin Vaidya <ashwinitinvaidya@gmail.com>

* replace in-place torch operations in Denormalize

---------

Co-authored-by: Ashwin Vaidya <ashwin.vaidya@intel.com>
Co-authored-by: Ashwin Vaidya <ashwinitinvaidya@gmail.com>
  • Loading branch information
3 people authored Feb 7, 2023
1 parent 476655a commit d390fb5
Show file tree
Hide file tree
Showing 57 changed files with 507 additions and 593 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).

### Added

- Bump up PyTorch Lightning version to v.1.9.\* (<https://github.com/openvinotoolkit/anomalib/pull/870>)
- Add ShanghaiTech Campus video anomaly detection dataset (<https://github.com/openvinotoolkit/anomalib/pull/869>)
- Add `pyupgrade` to `pre-commit` configs, and refactor based on `pyupgrade` and `refurb` (<https://github.com/openvinotoolkit/anomalib/pull/845>)
- Add [CFA](https://arxiv.org/abs/2206.04325) model implementation (<https://github.com/openvinotoolkit/anomalib/pull/783>)
Expand Down
2 changes: 0 additions & 2 deletions anomalib/data/btech.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@
import cv2
import pandas as pd
from pandas.core.frame import DataFrame
from pytorch_lightning.utilities.cli import DATAMODULE_REGISTRY
from tqdm import tqdm

from anomalib.data.base import AnomalibDataModule, AnomalibDataset
Expand Down Expand Up @@ -176,7 +175,6 @@ def _setup(self) -> None:
self.samples = make_btech_dataset(path=self.root_category, split=self.split)


@DATAMODULE_REGISTRY
class BTech(AnomalibDataModule):
"""BTech Lightning Data Module.
Expand Down
64 changes: 29 additions & 35 deletions anomalib/models/cfa/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,49 +66,43 @@ optimization:

# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
accumulate_grad_batches: 1
amp_backend: native
auto_lr_find: false
auto_scale_batch_size: false
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1
enable_checkpointing: true
default_root_dir: null
detect_anomaly: false
deterministic: false
gradient_clip_val: 0
gradient_clip_algorithm: norm
num_nodes: 1
devices: 1
enable_checkpointing: true
enable_model_summary: true
enable_progress_bar: true
overfit_batches: 0.0
track_grad_norm: -1
check_val_every_n_epoch: 1 # Don't validate before extracting features.
fast_dev_run: false
gpus: null # Set automatically
gradient_clip_val: 0
ipus: null
limit_predict_batches: 1.0
limit_test_batches: 1.0
limit_train_batches: 1.0
limit_val_batches: 1.0
log_every_n_steps: 50
log_gpu_memory: null
accumulate_grad_batches: 1
max_epochs: 30
max_steps: -1
max_time: null
min_epochs: null
max_steps: -1
min_steps: null
move_metrics_to_cpu: false
multiple_trainloader_mode: max_size_cycle
num_nodes: 1
num_processes: null
num_sanity_val_steps: 0
overfit_batches: 0.0
plugins: null
max_time: null
limit_train_batches: 1.0
limit_val_batches: 1.0
limit_test_batches: 1.0
limit_predict_batches: 1.0
val_check_interval: 1.0 # Don't validate before extracting features.
log_every_n_steps: 50
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
strategy: null
sync_batchnorm: false
precision: 32
enable_model_summary: true
num_sanity_val_steps: 0
profiler: null
benchmark: false
deterministic: false
reload_dataloaders_every_n_epochs: 0
auto_lr_find: false
replace_sampler_ddp: true
strategy: null
sync_batchnorm: false
tpu_cores: null
track_grad_norm: -1
val_check_interval: 1.0
detect_anomaly: false
auto_scale_batch_size: false
plugins: null
move_metrics_to_cpu: false
multiple_trainloader_mode: max_size_cycle
9 changes: 6 additions & 3 deletions anomalib/models/cfa/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
from omegaconf import DictConfig, ListConfig
from pytorch_lightning import Callback
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.utilities.cli import MODEL_REGISTRY
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch import Tensor
from torch.optim.optimizer import Optimizer
Expand All @@ -30,7 +29,6 @@
__all__ = ["Cfa", "CfaLightning"]


@MODEL_REGISTRY
class Cfa(AnomalyModule):
"""CFA: Coupled-hypersphere-based Feature Adaptation for Target-Oriented Anomaly Localization.
Expand Down Expand Up @@ -83,6 +81,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP
Returns:
STEP_OUTPUT: Loss value.
"""
del args, kwargs # These variables are not used.

distance = self.model(batch["image"])
loss = self.loss(distance)
return {"loss": loss}
Expand All @@ -96,6 +96,8 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST
Returns:
dict: Anomaly map computed by the model.
"""
del args, kwargs # These variables are not used.

batch["anomaly_maps"] = self.model(batch["image"])
return batch

Expand All @@ -107,7 +109,8 @@ def backward(self, loss: Tensor, optimizer: Optimizer | None, optimizer_idx: int
optimizer (Optimizer | None): Optimizer.
optimizer_idx (int | None): Optimizer index.
"""
del optimizer, optimizer_idx # These variables are not used.
del optimizer, optimizer_idx, args, kwargs # These variables are not used.

# TODO: Investigate why retain_graph is needed.
loss.backward(retain_graph=True)

Expand Down
64 changes: 29 additions & 35 deletions anomalib/models/cflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,49 +72,43 @@ optimization:

# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
accumulate_grad_batches: 1
amp_backend: native
auto_lr_find: false
auto_scale_batch_size: false
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1
enable_checkpointing: true
default_root_dir: null
detect_anomaly: false
deterministic: false
gradient_clip_val: 0
gradient_clip_algorithm: norm
num_nodes: 1
devices: 1
enable_checkpointing: true
enable_model_summary: true
enable_progress_bar: true
overfit_batches: 0.0
track_grad_norm: -1
check_val_every_n_epoch: 1 # Don't validate before extracting features.
fast_dev_run: false
gpus: null # Set automatically
gradient_clip_val: 0
ipus: null
limit_predict_batches: 1.0
limit_test_batches: 1.0
limit_train_batches: 1.0
limit_val_batches: 1.0
log_every_n_steps: 50
log_gpu_memory: null
accumulate_grad_batches: 1
max_epochs: 50
max_steps: -1
max_time: null
min_epochs: null
max_steps: -1
min_steps: null
move_metrics_to_cpu: false
multiple_trainloader_mode: max_size_cycle
num_nodes: 1
num_processes: null
num_sanity_val_steps: 0
overfit_batches: 0.0
plugins: null
max_time: null
limit_train_batches: 1.0
limit_val_batches: 1.0
limit_test_batches: 1.0
limit_predict_batches: 1.0
val_check_interval: 1.0 # Don't validate before extracting features.
log_every_n_steps: 50
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
strategy: null
sync_batchnorm: false
precision: 32
enable_model_summary: true
num_sanity_val_steps: 0
profiler: null
benchmark: false
deterministic: false
reload_dataloaders_every_n_epochs: 0
auto_lr_find: false
replace_sampler_ddp: true
strategy: null
sync_batchnorm: false
tpu_cores: null
track_grad_norm: -1
val_check_interval: 1.0
detect_anomaly: false
auto_scale_batch_size: false
plugins: null
move_metrics_to_cpu: false
multiple_trainloader_mode: max_size_cycle
7 changes: 4 additions & 3 deletions anomalib/models/cflow/lightning_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
import torch.nn.functional as F
from omegaconf import DictConfig, ListConfig
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.utilities.cli import MODEL_REGISTRY
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch import Tensor, optim
from torch.optim import Optimizer
Expand All @@ -25,7 +24,6 @@
__all__ = ["Cflow", "CflowLightning"]


@MODEL_REGISTRY
class Cflow(AnomalyModule):
"""PL Lightning Module for the CFLOW algorithm."""

Expand Down Expand Up @@ -98,6 +96,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP
Loss value for the batch
"""
del args, kwargs # These variables are not used.

opt = self.optimizers()
self.model.encoder.eval()

Expand Down Expand Up @@ -169,8 +169,9 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST
These are required in `validation_epoch_end` for feature concatenation.
"""
batch["anomaly_maps"] = self.model(batch["image"])
del args, kwargs # These variables are not used.

batch["anomaly_maps"] = self.model(batch["image"])
return batch


Expand Down
10 changes: 9 additions & 1 deletion anomalib/models/components/base/anomaly_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks import Callback
from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT
from torch import Tensor, nn
from torchmetrics import Metric
Expand Down Expand Up @@ -62,6 +62,8 @@ def forward(self, batch: dict[str, str | Tensor], *args, **kwargs) -> Any:
Returns:
Tensor: Output tensor from the model.
"""
del args, kwargs # These variables are not used.

return self.model(batch)

def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP_OUTPUT:
Expand Down Expand Up @@ -113,16 +115,22 @@ def test_step(self, batch: dict[str, str | Tensor], batch_idx: int, *args, **kwa
Dictionary containing images, features, true labels and masks.
These are required in `validation_epoch_end` for feature concatenation.
"""
del args, kwargs # These variables are not used.

return self.predict_step(batch, batch_idx)

def validation_step_end(self, val_step_outputs: STEP_OUTPUT, *args, **kwargs) -> STEP_OUTPUT:
"""Called at the end of each validation step."""
del args, kwargs # These variables are not used.

self._outputs_to_cpu(val_step_outputs)
self._post_process(val_step_outputs)
return val_step_outputs

def test_step_end(self, test_step_outputs: STEP_OUTPUT, *args, **kwargs) -> STEP_OUTPUT:
"""Called at the end of each test step."""
del args, kwargs # These variables are not used.

self._outputs_to_cpu(test_step_outputs)
self._post_process(test_step_outputs)
return test_step_outputs
Expand Down
64 changes: 29 additions & 35 deletions anomalib/models/csflow/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,49 +73,43 @@ optimization:
export_mode: null #options: onnx, openvino
# PL Trainer Args. Don't add extra parameter here.
trainer:
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
accumulate_grad_batches: 1
amp_backend: native
auto_lr_find: false
auto_scale_batch_size: false
auto_select_gpus: false
benchmark: false
check_val_every_n_epoch: 1
enable_checkpointing: true
default_root_dir: null
detect_anomaly: false
deterministic: false
gradient_clip_val: 1 # Grad clip value set based on the official implementation
gradient_clip_algorithm: norm
num_nodes: 1
devices: 1
enable_checkpointing: true
enable_model_summary: true
enable_progress_bar: true
overfit_batches: 0.0
track_grad_norm: -1
check_val_every_n_epoch: 1 # Don't validate before extracting features.
fast_dev_run: false
gpus: null # Set automatically
gradient_clip_val: 1 # Grad clip value set based on the official implementation
ipus: null
limit_predict_batches: 1.0
limit_test_batches: 1.0
limit_train_batches: 1.0
limit_val_batches: 1.0
log_every_n_steps: 50
log_gpu_memory: null
accumulate_grad_batches: 1
max_epochs: 240
max_steps: -1
max_time: null
min_epochs: null
max_steps: -1
min_steps: null
move_metrics_to_cpu: false
multiple_trainloader_mode: max_size_cycle
num_nodes: 1
num_processes: null
num_sanity_val_steps: 0
overfit_batches: 0.0
plugins: null
max_time: null
limit_train_batches: 1.0
limit_val_batches: 1.0
limit_test_batches: 1.0
limit_predict_batches: 1.0
val_check_interval: 1.0 # Don't validate before extracting features.
log_every_n_steps: 50
accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto">
strategy: null
sync_batchnorm: false
precision: 32
enable_model_summary: true
num_sanity_val_steps: 0
profiler: null
benchmark: false
deterministic: false
reload_dataloaders_every_n_epochs: 0
auto_lr_find: false
replace_sampler_ddp: true
strategy: null
sync_batchnorm: false
tpu_cores: null
track_grad_norm: -1
val_check_interval: 1.0
detect_anomaly: false
auto_scale_batch_size: false
plugins: null
move_metrics_to_cpu: false
multiple_trainloader_mode: max_size_cycle
Loading

0 comments on commit d390fb5

Please sign in to comment.