From d390fb580e9b614840e58fc126e27a9de95b8746 Mon Sep 17 00:00:00 2001 From: Samet Akcay Date: Tue, 7 Feb 2023 10:11:11 +0000 Subject: [PATCH] Bump PyTorch Lightning version to v1.9.* (#870) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Modified padim configs * Removed registry * Replace LightningLoggerBase with Logger * Remove resume_from_checkpoint arg from Trainer * Updated cfa configs for v1.9.0 * Udate cflow configs to run on v1.9.0 * Udate csflow configs to run on v1.9.0 * Udate dfkde configs to run on v1.9.0 * Udate dfm configs to run on v1.9.0 * Udate draem configs to run on v1.9.0 * Udpdate fasatflow configs to run on v1.9.0 * Udpdate ganomaly configs to run on v1.9.0 * Udpdate patchcore configs to run on v1.9.0 * Udpdate reverse_distillation configs to run on v1.9.0 * Udpdate rkde configs to run on v1.9.0 * Udpdate stfpm configs to run on v1.9.0 * Update CHANGELOG.md * Remove `inference_mode` from the configs to support PyTorch Lightning v1.6.* * Update CHANGELOG.md * Address pre-commit issues * PL v1.6.* is not supported anymore * Ignore no-member issue when getting trainer.callbacks * Added requirements to pre-commit * Resolve merge conflicts properly * Revert the changes in tox.ini * Address pre-commit errors * Address pylint comments from pre-commit * Address pylint comments from pre-commit. - This is the final one!! * Fix tests. Tensorboard logger has not been addressed yet. * 🩹 Patch requirements (#892) * check extras dependencies in loggers * Add recreate to workflow * Revert -r flag * Fix mocks --------- Co-authored-by: Ashwin Vaidya * replace in-place torch operations in Denormalize --------- Co-authored-by: Ashwin Vaidya Co-authored-by: Ashwin Vaidya --- CHANGELOG.md | 1 + anomalib/data/btech.py | 2 - anomalib/models/cfa/config.yaml | 64 ++++++++---------- anomalib/models/cfa/lightning_model.py | 9 ++- anomalib/models/cflow/config.yaml | 64 ++++++++---------- anomalib/models/cflow/lightning_model.py | 7 +- .../models/components/base/anomaly_module.py | 10 ++- anomalib/models/csflow/config.yaml | 64 ++++++++---------- anomalib/models/csflow/lightning_model.py | 6 +- anomalib/models/dfkde/config.yaml | 64 ++++++++---------- anomalib/models/dfkde/lightning_model.py | 6 +- anomalib/models/dfm/config.yaml | 64 ++++++++---------- anomalib/models/dfm/lightning_model.py | 6 +- anomalib/models/draem/config.yaml | 64 ++++++++---------- anomalib/models/draem/lightning_model.py | 6 +- anomalib/models/fastflow/config.yaml | 64 ++++++++---------- anomalib/models/fastflow/lightning_model.py | 6 +- anomalib/models/ganomaly/config.yaml | 62 ++++++++--------- anomalib/models/ganomaly/lightning_model.py | 2 - anomalib/models/padim/config.yaml | 62 ++++++++--------- anomalib/models/padim/lightning_model.py | 5 +- anomalib/models/patchcore/config.yaml | 64 ++++++++---------- anomalib/models/patchcore/lightning_model.py | 5 +- .../models/reverse_distillation/config.yaml | 63 ++++++++---------- .../reverse_distillation/lightning_model.py | 6 +- anomalib/models/rkde/config.yaml | 64 ++++++++---------- anomalib/models/rkde/lightning_model.py | 5 +- anomalib/models/stfpm/config.yaml | 66 +++++++++---------- anomalib/models/stfpm/lightning_model.py | 7 +- anomalib/pre_processing/transforms/custom.py | 9 +-- anomalib/utils/callbacks/cdf_normalization.py | 4 +- anomalib/utils/callbacks/export.py | 2 - anomalib/utils/callbacks/graph.py | 2 - .../utils/callbacks/metrics_configuration.py | 2 - .../utils/callbacks/min_max_normalization.py | 2 - anomalib/utils/callbacks/model_loader.py | 2 - anomalib/utils/callbacks/nncf/callback.py | 2 - .../post_processing_configuration.py | 2 - .../utils/callbacks/tiler_configuration.py | 2 - anomalib/utils/callbacks/timer.py | 2 - .../callbacks/visualizer/visualizer_image.py | 2 - .../callbacks/visualizer/visualizer_metric.py | 2 - anomalib/utils/cli/cli.py | 48 +------------- anomalib/utils/hpo/runners.py | 4 +- anomalib/utils/loggers/__init__.py | 8 +-- anomalib/utils/loggers/comet.py | 6 +- anomalib/utils/loggers/tensorboard.py | 6 +- anomalib/utils/loggers/wandb.py | 5 +- requirements/base.txt | 5 +- requirements/extras.txt | 4 ++ setup.py | 3 +- .../callbacks/export_callback/test_export.py | 2 +- .../test_metrics_configuration_callback.py | 2 +- .../visualizer_callback/test_visualizer.py | 2 +- .../utils/loggers/test_get_logger.py | 44 +++++++++---- tools/train.py | 2 +- tox.ini | 6 +- 57 files changed, 507 insertions(+), 593 deletions(-) create mode 100644 requirements/extras.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index db184410c2..ffbbde0233 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Added +- Bump up PyTorch Lightning version to v.1.9.\* () - Add ShanghaiTech Campus video anomaly detection dataset () - Add `pyupgrade` to `pre-commit` configs, and refactor based on `pyupgrade` and `refurb` () - Add [CFA](https://arxiv.org/abs/2206.04325) model implementation () diff --git a/anomalib/data/btech.py b/anomalib/data/btech.py index fa6c6cb759..99b2a4484a 100644 --- a/anomalib/data/btech.py +++ b/anomalib/data/btech.py @@ -19,7 +19,6 @@ import cv2 import pandas as pd from pandas.core.frame import DataFrame -from pytorch_lightning.utilities.cli import DATAMODULE_REGISTRY from tqdm import tqdm from anomalib.data.base import AnomalibDataModule, AnomalibDataset @@ -176,7 +175,6 @@ def _setup(self) -> None: self.samples = make_btech_dataset(path=self.root_category, split=self.split) -@DATAMODULE_REGISTRY class BTech(AnomalibDataModule): """BTech Lightning Data Module. diff --git a/anomalib/models/cfa/config.yaml b/anomalib/models/cfa/config.yaml index 3726965378..7a4fef0c93 100644 --- a/anomalib/models/cfa/config.yaml +++ b/anomalib/models/cfa/config.yaml @@ -66,49 +66,43 @@ optimization: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 30 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/cfa/lightning_model.py b/anomalib/models/cfa/lightning_model.py index 51d65a1326..0e9e983184 100644 --- a/anomalib/models/cfa/lightning_model.py +++ b/anomalib/models/cfa/lightning_model.py @@ -16,7 +16,6 @@ from omegaconf import DictConfig, ListConfig from pytorch_lightning import Callback from pytorch_lightning.callbacks import EarlyStopping -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor from torch.optim.optimizer import Optimizer @@ -30,7 +29,6 @@ __all__ = ["Cfa", "CfaLightning"] -@MODEL_REGISTRY class Cfa(AnomalyModule): """CFA: Coupled-hypersphere-based Feature Adaptation for Target-Oriented Anomaly Localization. @@ -83,6 +81,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP Returns: STEP_OUTPUT: Loss value. """ + del args, kwargs # These variables are not used. + distance = self.model(batch["image"]) loss = self.loss(distance) return {"loss": loss} @@ -96,6 +96,8 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: dict: Anomaly map computed by the model. """ + del args, kwargs # These variables are not used. + batch["anomaly_maps"] = self.model(batch["image"]) return batch @@ -107,7 +109,8 @@ def backward(self, loss: Tensor, optimizer: Optimizer | None, optimizer_idx: int optimizer (Optimizer | None): Optimizer. optimizer_idx (int | None): Optimizer index. """ - del optimizer, optimizer_idx # These variables are not used. + del optimizer, optimizer_idx, args, kwargs # These variables are not used. + # TODO: Investigate why retain_graph is needed. loss.backward(retain_graph=True) diff --git a/anomalib/models/cflow/config.yaml b/anomalib/models/cflow/config.yaml index 5cbc86db0a..4e7818a1a4 100644 --- a/anomalib/models/cflow/config.yaml +++ b/anomalib/models/cflow/config.yaml @@ -72,49 +72,43 @@ optimization: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 50 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/cflow/lightning_model.py b/anomalib/models/cflow/lightning_model.py index aa2f45637e..f3b604b04f 100644 --- a/anomalib/models/cflow/lightning_model.py +++ b/anomalib/models/cflow/lightning_model.py @@ -13,7 +13,6 @@ import torch.nn.functional as F from omegaconf import DictConfig, ListConfig from pytorch_lightning.callbacks import EarlyStopping -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor, optim from torch.optim import Optimizer @@ -25,7 +24,6 @@ __all__ = ["Cflow", "CflowLightning"] -@MODEL_REGISTRY class Cflow(AnomalyModule): """PL Lightning Module for the CFLOW algorithm.""" @@ -98,6 +96,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP Loss value for the batch """ + del args, kwargs # These variables are not used. + opt = self.optimizers() self.model.encoder.eval() @@ -169,8 +169,9 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST These are required in `validation_epoch_end` for feature concatenation. """ - batch["anomaly_maps"] = self.model(batch["image"]) + del args, kwargs # These variables are not used. + batch["anomaly_maps"] = self.model(batch["image"]) return batch diff --git a/anomalib/models/components/base/anomaly_module.py b/anomalib/models/components/base/anomaly_module.py index df7785853c..3c6c2a5d49 100644 --- a/anomalib/models/components/base/anomaly_module.py +++ b/anomalib/models/components/base/anomaly_module.py @@ -12,7 +12,7 @@ import pytorch_lightning as pl import torch -from pytorch_lightning.callbacks.base import Callback +from pytorch_lightning.callbacks import Callback from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT from torch import Tensor, nn from torchmetrics import Metric @@ -62,6 +62,8 @@ def forward(self, batch: dict[str, str | Tensor], *args, **kwargs) -> Any: Returns: Tensor: Output tensor from the model. """ + del args, kwargs # These variables are not used. + return self.model(batch) def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP_OUTPUT: @@ -113,16 +115,22 @@ def test_step(self, batch: dict[str, str | Tensor], batch_idx: int, *args, **kwa Dictionary containing images, features, true labels and masks. These are required in `validation_epoch_end` for feature concatenation. """ + del args, kwargs # These variables are not used. + return self.predict_step(batch, batch_idx) def validation_step_end(self, val_step_outputs: STEP_OUTPUT, *args, **kwargs) -> STEP_OUTPUT: """Called at the end of each validation step.""" + del args, kwargs # These variables are not used. + self._outputs_to_cpu(val_step_outputs) self._post_process(val_step_outputs) return val_step_outputs def test_step_end(self, test_step_outputs: STEP_OUTPUT, *args, **kwargs) -> STEP_OUTPUT: """Called at the end of each test step.""" + del args, kwargs # These variables are not used. + self._outputs_to_cpu(test_step_outputs) self._post_process(test_step_outputs) return test_step_outputs diff --git a/anomalib/models/csflow/config.yaml b/anomalib/models/csflow/config.yaml index 928797463b..60684ac1bd 100644 --- a/anomalib/models/csflow/config.yaml +++ b/anomalib/models/csflow/config.yaml @@ -73,49 +73,43 @@ optimization: export_mode: null #options: onnx, openvino # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 1 # Grad clip value set based on the official implementation + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 1 # Grad clip value set based on the official implementation - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 240 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/csflow/lightning_model.py b/anomalib/models/csflow/lightning_model.py index ec99fe198b..6b829e712c 100644 --- a/anomalib/models/csflow/lightning_model.py +++ b/anomalib/models/csflow/lightning_model.py @@ -13,7 +13,6 @@ import torch from omegaconf import DictConfig, ListConfig from pytorch_lightning.callbacks import Callback, EarlyStopping -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor @@ -27,7 +26,6 @@ __all__ = ["Csflow", "CsflowLightning"] -@MODEL_REGISTRY class Csflow(AnomalyModule): """Fully Convolutional Cross-Scale-Flows for Image-based Defect Detection. @@ -67,6 +65,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP Returns: Loss value """ + del args, kwargs # These variables are not used. + self.model.feature_extractor.eval() z_dist, jacobians = self.model(batch["image"]) loss = self.loss(z_dist, jacobians) @@ -82,6 +82,8 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: dict[str, Tensor]: Dictionary containing the anomaly map, scores, etc. """ + del args, kwargs # These variables are not used. + anomaly_maps, anomaly_scores = self.model(batch["image"]) batch["anomaly_maps"] = anomaly_maps batch["pred_scores"] = anomaly_scores diff --git a/anomalib/models/dfkde/config.yaml b/anomalib/models/dfkde/config.yaml index 7a8bdec769..5cff3a35f2 100644 --- a/anomalib/models/dfkde/config.yaml +++ b/anomalib/models/dfkde/config.yaml @@ -59,49 +59,43 @@ optimization: export_mode: null #options: onnx, openvino # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 1 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/dfkde/lightning_model.py b/anomalib/models/dfkde/lightning_model.py index dc39586179..0a63648fc6 100644 --- a/anomalib/models/dfkde/lightning_model.py +++ b/anomalib/models/dfkde/lightning_model.py @@ -9,7 +9,6 @@ import torch from omegaconf import DictConfig, ListConfig -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor @@ -21,7 +20,6 @@ logger = logging.getLogger(__name__) -@MODEL_REGISTRY class Dfkde(AnomalyModule): """DFKDE: Deep Feature Kernel Density Estimation. @@ -74,6 +72,7 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> None Returns: Deep CNN features. """ + del args, kwargs # These variables are not used. embedding = self.model(batch["image"]) @@ -104,8 +103,9 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: Dictionary containing probability, prediction and ground truth values. """ - batch["pred_scores"] = self.model(batch["image"]) + del args, kwargs # These variables are not used. + batch["pred_scores"] = self.model(batch["image"]) return batch diff --git a/anomalib/models/dfm/config.yaml b/anomalib/models/dfm/config.yaml index 351115467e..b6110681eb 100755 --- a/anomalib/models/dfm/config.yaml +++ b/anomalib/models/dfm/config.yaml @@ -60,49 +60,43 @@ optimization: export_mode: null #options: onnx, openvino # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 1 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/dfm/lightning_model.py b/anomalib/models/dfm/lightning_model.py index c83ad7a1c5..4910f4b041 100644 --- a/anomalib/models/dfm/lightning_model.py +++ b/anomalib/models/dfm/lightning_model.py @@ -9,7 +9,6 @@ import torch from omegaconf import DictConfig, ListConfig -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor @@ -20,7 +19,6 @@ logger = logging.getLogger(__name__) -@MODEL_REGISTRY class Dfm(AnomalyModule): """DFM: Deep Featured Kernel Density Estimation. @@ -79,6 +77,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> None Returns: Deep CNN features. """ + del args, kwargs # These variables are not used. + embedding = self.model.get_features(batch["image"]).squeeze() # NOTE: `self.embedding` appends each batch embedding to @@ -109,6 +109,8 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: Dictionary containing FRE anomaly scores and anomaly maps. """ + del args, kwargs # These variables are not used. + if self.score_type == "fre": batch["anomaly_maps"], batch["pred_scores"] = self.model(batch["image"]) elif self.score_type == "nll": diff --git a/anomalib/models/draem/config.yaml b/anomalib/models/draem/config.yaml index 941b05f46a..abdd9c8a1c 100644 --- a/anomalib/models/draem/config.yaml +++ b/anomalib/models/draem/config.yaml @@ -68,49 +68,43 @@ optimization: export_mode: null #options: onnx, openvino # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 700 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/draem/lightning_model.py b/anomalib/models/draem/lightning_model.py index ac7f55bd2c..64b458fe78 100644 --- a/anomalib/models/draem/lightning_model.py +++ b/anomalib/models/draem/lightning_model.py @@ -13,7 +13,6 @@ import torch from omegaconf import DictConfig, ListConfig from pytorch_lightning.callbacks import EarlyStopping -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor, nn @@ -25,7 +24,6 @@ __all__ = ["Draem", "DraemLightning"] -@MODEL_REGISTRY class Draem(AnomalyModule): """DRÆM: A discriminatively trained reconstruction embedding for surface anomaly detection. @@ -81,6 +79,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP Returns: Loss dictionary """ + del args, kwargs # These variables are not used. + input_image = batch["image"] # Apply corruption to input image augmented_image, anomaly_mask = self.augmenter.augment_batch(input_image) @@ -106,6 +106,8 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: Dictionary to which predicted anomaly maps have been added. """ + del args, kwargs # These variables are not used. + prediction = self.model(batch["image"]) batch["anomaly_maps"] = prediction return batch diff --git a/anomalib/models/fastflow/config.yaml b/anomalib/models/fastflow/config.yaml index 501d8f3679..b2d1e115c8 100644 --- a/anomalib/models/fastflow/config.yaml +++ b/anomalib/models/fastflow/config.yaml @@ -72,49 +72,43 @@ optimization: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 500 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/fastflow/lightning_model.py b/anomalib/models/fastflow/lightning_model.py index 7b80a1dde7..c2d203b926 100644 --- a/anomalib/models/fastflow/lightning_model.py +++ b/anomalib/models/fastflow/lightning_model.py @@ -8,7 +8,6 @@ import torch from omegaconf import DictConfig, ListConfig from pytorch_lightning.callbacks import EarlyStopping -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor, optim @@ -17,7 +16,6 @@ from anomalib.models.fastflow.torch_model import FastflowModel -@MODEL_REGISTRY class Fastflow(AnomalyModule): """PL Lightning Module for the FastFlow algorithm. @@ -61,6 +59,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP Returns: STEP_OUTPUT: Dictionary containing the loss value. """ + del args, kwargs # These variables are not used. + hidden_variables, jacobians = self.model(batch["image"]) loss = self.loss(hidden_variables, jacobians) self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True) @@ -75,6 +75,8 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: STEP_OUTPUT | None: batch dictionary containing anomaly-maps. """ + del args, kwargs # These variables are not used. + anomaly_maps = self.model(batch["image"]) batch["anomaly_maps"] = anomaly_maps return batch diff --git a/anomalib/models/ganomaly/config.yaml b/anomalib/models/ganomaly/config.yaml index fd6618302f..0ffc0d11b3 100644 --- a/anomalib/models/ganomaly/config.yaml +++ b/anomalib/models/ganomaly/config.yaml @@ -72,48 +72,42 @@ optimization: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 2 + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 2 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 + accumulate_grad_batches: 1 + max_epochs: 100 + max_steps: -1 + min_steps: null + max_time: null limit_train_batches: 1.0 limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. log_every_n_steps: 50 - log_gpu_memory: null - max_epochs: 100 - max_steps: null - min_epochs: null - min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/ganomaly/lightning_model.py b/anomalib/models/ganomaly/lightning_model.py index 0034e8ee5e..9018a56764 100644 --- a/anomalib/models/ganomaly/lightning_model.py +++ b/anomalib/models/ganomaly/lightning_model.py @@ -13,7 +13,6 @@ import torch from omegaconf import DictConfig, ListConfig from pytorch_lightning.callbacks import Callback, EarlyStopping -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import EPOCH_OUTPUT, STEP_OUTPUT from torch import Tensor, optim @@ -25,7 +24,6 @@ logger = logging.getLogger(__name__) -@MODEL_REGISTRY class Ganomaly(AnomalyModule): """PL Lightning Module for the GANomaly Algorithm. diff --git a/anomalib/models/padim/config.yaml b/anomalib/models/padim/config.yaml index b6826cc482..23aa97a216 100644 --- a/anomalib/models/padim/config.yaml +++ b/anomalib/models/padim/config.yaml @@ -67,47 +67,43 @@ optimization: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 + accumulate_grad_batches: 1 max_epochs: 1 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/padim/lightning_model.py b/anomalib/models/padim/lightning_model.py index ec1b4f073d..3e71163422 100644 --- a/anomalib/models/padim/lightning_model.py +++ b/anomalib/models/padim/lightning_model.py @@ -12,7 +12,6 @@ import torch from omegaconf import DictConfig, ListConfig -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor @@ -24,7 +23,6 @@ __all__ = ["Padim", "PadimLightning"] -@MODEL_REGISTRY class Padim(AnomalyModule): """PaDiM: a Patch Distribution Modeling Framework for Anomaly Detection and Localization. @@ -74,6 +72,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> None Returns: Hierarchical feature map """ + del args, kwargs # These variables are not used. + self.model.feature_extractor.eval() embedding = self.model(batch["image"]) @@ -106,6 +106,7 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Dictionary containing images, features, true labels and masks. These are required in `validation_epoch_end` for feature concatenation. """ + del args, kwargs # These variables are not used. batch["anomaly_maps"] = self.model(batch["image"]) return batch diff --git a/anomalib/models/patchcore/config.yaml b/anomalib/models/patchcore/config.yaml index 5ff069bfd5..728eae80f4 100644 --- a/anomalib/models/patchcore/config.yaml +++ b/anomalib/models/patchcore/config.yaml @@ -68,49 +68,43 @@ optimization: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 1 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/patchcore/lightning_model.py b/anomalib/models/patchcore/lightning_model.py index 5e34c301e4..fbdb8949a2 100644 --- a/anomalib/models/patchcore/lightning_model.py +++ b/anomalib/models/patchcore/lightning_model.py @@ -12,7 +12,6 @@ import torch from omegaconf import DictConfig, ListConfig -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor @@ -22,7 +21,6 @@ logger = logging.getLogger(__name__) -@MODEL_REGISTRY class Patchcore(AnomalyModule): """PatchcoreLightning Module to train PatchCore algorithm. @@ -74,6 +72,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> None Returns: dict[str, np.ndarray]: Embedding Vector """ + del args, kwargs # These variables are not used. + self.model.feature_extractor.eval() embedding = self.model(batch["image"]) @@ -104,6 +104,7 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: dict[str, Any]: Image filenames, test images, GT and predicted label/masks """ + del args, kwargs # These variables are not used. anomaly_maps, anomaly_score = self.model(batch["image"]) batch["anomaly_maps"] = anomaly_maps diff --git a/anomalib/models/reverse_distillation/config.yaml b/anomalib/models/reverse_distillation/config.yaml index de0c58053e..1cf9c9097c 100644 --- a/anomalib/models/reverse_distillation/config.yaml +++ b/anomalib/models/reverse_distillation/config.yaml @@ -76,48 +76,43 @@ optimization: # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 2 + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 2 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 200 - max_steps: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/reverse_distillation/lightning_model.py b/anomalib/models/reverse_distillation/lightning_model.py index 732c27185b..29a2f3dc31 100644 --- a/anomalib/models/reverse_distillation/lightning_model.py +++ b/anomalib/models/reverse_distillation/lightning_model.py @@ -10,7 +10,6 @@ from omegaconf import DictConfig, ListConfig from pytorch_lightning.callbacks import EarlyStopping -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor, optim @@ -20,7 +19,6 @@ from .torch_model import ReverseDistillationModel -@MODEL_REGISTRY class ReverseDistillation(AnomalyModule): """PL Lightning Module for Reverse Distillation Algorithm. @@ -88,6 +86,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP Returns: Feature Map """ + del args, kwargs # These variables are not used. + loss = self.loss(*self.model(batch["image"])) self.log("train_loss", loss.item(), on_epoch=True, prog_bar=True, logger=True) return {"loss": loss} @@ -105,6 +105,8 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Dictionary containing images, anomaly maps, true labels and masks. These are required in `validation_epoch_end` for feature concatenation. """ + del args, kwargs # These variables are not used. + batch["anomaly_maps"] = self.model(batch["image"]) return batch diff --git a/anomalib/models/rkde/config.yaml b/anomalib/models/rkde/config.yaml index d18a730444..f5bda6c533 100644 --- a/anomalib/models/rkde/config.yaml +++ b/anomalib/models/rkde/config.yaml @@ -69,49 +69,43 @@ optimization: export_mode: null #options: onnx, openvino # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 # Don't validate before extracting features. + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 1 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 # Don't validate before extracting features. + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/rkde/lightning_model.py b/anomalib/models/rkde/lightning_model.py index f0a721f4de..bc3624ee73 100644 --- a/anomalib/models/rkde/lightning_model.py +++ b/anomalib/models/rkde/lightning_model.py @@ -9,7 +9,6 @@ import torch from omegaconf import DictConfig, ListConfig -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor @@ -22,7 +21,6 @@ logger = logging.getLogger(__name__) -@MODEL_REGISTRY class Rkde(AnomalyModule): """Region Based Anomaly Detection With Real-Time Training and Analysis. @@ -78,6 +76,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> None Returns: Deep CNN features. """ + del args, kwargs # These variables are not used. + features = self.model(batch["image"]) self.embeddings.append(features) @@ -99,6 +99,7 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Returns: Dictionary containing probability, prediction and ground truth values. """ + del args, kwargs # These variables are not used. # get batched model predictions boxes, scores = self.model(batch["image"]) diff --git a/anomalib/models/stfpm/config.yaml b/anomalib/models/stfpm/config.yaml index f85516cf67..efdd9ee14b 100644 --- a/anomalib/models/stfpm/config.yaml +++ b/anomalib/models/stfpm/config.yaml @@ -37,7 +37,7 @@ model: momentum: 0.9 weight_decay: 0.0001 early_stopping: - patience: 3 + patience: 5 metric: pixel_AUROC mode: max normalization_method: min_max # options: [null, min_max, cdf] @@ -73,49 +73,43 @@ optimization: export_mode: null #options: onnx, openvino # PL Trainer Args. Don't add extra parameter here. trainer: - accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> - accumulate_grad_batches: 1 - amp_backend: native - auto_lr_find: false - auto_scale_batch_size: false - auto_select_gpus: false - benchmark: false - check_val_every_n_epoch: 1 + enable_checkpointing: true default_root_dir: null - detect_anomaly: false - deterministic: false + gradient_clip_val: 0 + gradient_clip_algorithm: norm + num_nodes: 1 devices: 1 - enable_checkpointing: true - enable_model_summary: true enable_progress_bar: true + overfit_batches: 0.0 + track_grad_norm: -1 + check_val_every_n_epoch: 1 # Don't validate before extracting features. fast_dev_run: false - gpus: null # Set automatically - gradient_clip_val: 0 - ipus: null - limit_predict_batches: 1.0 - limit_test_batches: 1.0 - limit_train_batches: 1.0 - limit_val_batches: 1.0 - log_every_n_steps: 50 - log_gpu_memory: null + accumulate_grad_batches: 1 max_epochs: 100 - max_steps: -1 - max_time: null min_epochs: null + max_steps: -1 min_steps: null - move_metrics_to_cpu: false - multiple_trainloader_mode: max_size_cycle - num_nodes: 1 - num_processes: null - num_sanity_val_steps: 0 - overfit_batches: 0.0 - plugins: null + max_time: null + limit_train_batches: 1.0 + limit_val_batches: 1.0 + limit_test_batches: 1.0 + limit_predict_batches: 1.0 + val_check_interval: 1.0 # Don't validate before extracting features. + log_every_n_steps: 50 + accelerator: auto # <"cpu", "gpu", "tpu", "ipu", "hpu", "auto"> + strategy: null + sync_batchnorm: false precision: 32 + enable_model_summary: true + num_sanity_val_steps: 0 profiler: null + benchmark: false + deterministic: false reload_dataloaders_every_n_epochs: 0 + auto_lr_find: false replace_sampler_ddp: true - strategy: null - sync_batchnorm: false - tpu_cores: null - track_grad_norm: -1 - val_check_interval: 1.0 + detect_anomaly: false + auto_scale_batch_size: false + plugins: null + move_metrics_to_cpu: false + multiple_trainloader_mode: max_size_cycle diff --git a/anomalib/models/stfpm/lightning_model.py b/anomalib/models/stfpm/lightning_model.py index ad847987da..e05f0aa783 100644 --- a/anomalib/models/stfpm/lightning_model.py +++ b/anomalib/models/stfpm/lightning_model.py @@ -11,7 +11,6 @@ import torch from omegaconf import DictConfig, ListConfig from pytorch_lightning.callbacks import EarlyStopping -from pytorch_lightning.utilities.cli import MODEL_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch import Tensor, optim @@ -22,7 +21,6 @@ __all__ = ["StfpmLightning"] -@MODEL_REGISTRY class Stfpm(AnomalyModule): """PL Lightning Module for the STFPM algorithm. @@ -58,6 +56,8 @@ def training_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> STEP Returns: Loss value """ + del args, kwargs # These variables are not used. + self.model.teacher_model.eval() teacher_features, student_features = self.model.forward(batch["image"]) loss = self.loss(teacher_features, student_features) @@ -77,8 +77,9 @@ def validation_step(self, batch: dict[str, str | Tensor], *args, **kwargs) -> ST Dictionary containing images, anomaly maps, true labels and masks. These are required in `validation_epoch_end` for feature concatenation. """ - batch["anomaly_maps"] = self.model(batch["image"]) + del args, kwargs # These variables are not used. + batch["anomaly_maps"] = self.model(batch["image"]) return batch diff --git a/anomalib/pre_processing/transforms/custom.py b/anomalib/pre_processing/transforms/custom.py index 083efc5572..fb4a721df5 100644 --- a/anomalib/pre_processing/transforms/custom.py +++ b/anomalib/pre_processing/transforms/custom.py @@ -8,6 +8,7 @@ import warnings import numpy as np +import torch from torch import Tensor @@ -47,11 +48,11 @@ def __call__(self, tensor: Tensor) -> np.ndarray: else: raise ValueError(f"Tensor has batch size of {tensor.size(0)}. Only single batch is supported.") - for tnsr, mean, std in zip(tensor, self.mean, self.std): - tnsr.mul_(std).add_(mean) + denormalized_per_channel = [(tnsr * std) + mean for tnsr, mean, std in zip(tensor, self.mean, self.std)] + denormalized_tensor = torch.stack(denormalized_per_channel) - array = (tensor * 255).permute(1, 2, 0).cpu().numpy().astype(np.uint8) - return array + denormalized_array = (denormalized_tensor * 255).permute(1, 2, 0).cpu().numpy().astype(np.uint8) + return denormalized_array def __repr__(self) -> str: """Representational string.""" diff --git a/anomalib/utils/callbacks/cdf_normalization.py b/anomalib/utils/callbacks/cdf_normalization.py index 52ec6af226..464a58479c 100644 --- a/anomalib/utils/callbacks/cdf_normalization.py +++ b/anomalib/utils/callbacks/cdf_normalization.py @@ -10,7 +10,6 @@ import pytorch_lightning as pl from pytorch_lightning import Callback, Trainer -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from torch.distributions import LogNormal @@ -22,7 +21,6 @@ logger = logging.getLogger(__name__) -@CALLBACK_REGISTRY class CdfNormalizationCallback(Callback): """Callback that standardizes the image-level and pixel-level anomaly scores.""" @@ -113,7 +111,7 @@ def _collect_stats(self, trainer: pl.Trainer, pl_module: AnomalyModule) -> None: estimate the distribution of anomaly scores for normal data at the image and pixel level by computing the mean and standard deviations. A dictionary containing the computed statistics is stored in self.stats. """ - predictions = Trainer(gpus=trainer.gpus).predict( + predictions = Trainer(accelerator=trainer.accelerator, devices=trainer.num_devices).predict( model=self._create_inference_model(pl_module), dataloaders=trainer.datamodule.train_dataloader() ) pl_module.normalization_metrics.reset() diff --git a/anomalib/utils/callbacks/export.py b/anomalib/utils/callbacks/export.py index 65ae1ff53a..a641823e9d 100644 --- a/anomalib/utils/callbacks/export.py +++ b/anomalib/utils/callbacks/export.py @@ -10,7 +10,6 @@ import pytorch_lightning as pl from pytorch_lightning import Callback -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from anomalib.deploy import ExportMode, export from anomalib.models.components import AnomalyModule @@ -18,7 +17,6 @@ logger = logging.getLogger(__name__) -@CALLBACK_REGISTRY class ExportCallback(Callback): """Callback to compresses a trained model. diff --git a/anomalib/utils/callbacks/graph.py b/anomalib/utils/callbacks/graph.py index 20c60126ba..8d8ed29cdf 100644 --- a/anomalib/utils/callbacks/graph.py +++ b/anomalib/utils/callbacks/graph.py @@ -5,7 +5,6 @@ import torch from pytorch_lightning import Callback, LightningModule, Trainer -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from anomalib.utils.loggers import ( AnomalibCometLogger, @@ -14,7 +13,6 @@ ) -@CALLBACK_REGISTRY class GraphLogger(Callback): """Log model graph to respective logger.""" diff --git a/anomalib/utils/callbacks/metrics_configuration.py b/anomalib/utils/callbacks/metrics_configuration.py index 812d48e757..31113961ec 100644 --- a/anomalib/utils/callbacks/metrics_configuration.py +++ b/anomalib/utils/callbacks/metrics_configuration.py @@ -10,7 +10,6 @@ import pytorch_lightning as pl from pytorch_lightning.callbacks import Callback -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from anomalib.data import TaskType from anomalib.models.components.base.anomaly_module import AnomalyModule @@ -21,7 +20,6 @@ __all__ = ["MetricsConfigurationCallback"] -@CALLBACK_REGISTRY class MetricsConfigurationCallback(Callback): """Metrics Configuration Callback.""" diff --git a/anomalib/utils/callbacks/min_max_normalization.py b/anomalib/utils/callbacks/min_max_normalization.py index 632ed8b872..b3fadb5c26 100644 --- a/anomalib/utils/callbacks/min_max_normalization.py +++ b/anomalib/utils/callbacks/min_max_normalization.py @@ -10,7 +10,6 @@ import pytorch_lightning as pl import torch from pytorch_lightning import Callback -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from anomalib.models.components import AnomalyModule @@ -18,7 +17,6 @@ from anomalib.utils.metrics import MinMax -@CALLBACK_REGISTRY class MinMaxNormalizationCallback(Callback): """Callback that normalizes the image-level and pixel-level anomaly scores using min-max normalization.""" diff --git a/anomalib/utils/callbacks/model_loader.py b/anomalib/utils/callbacks/model_loader.py index a32384a3ee..5bae4841f5 100644 --- a/anomalib/utils/callbacks/model_loader.py +++ b/anomalib/utils/callbacks/model_loader.py @@ -9,14 +9,12 @@ import torch from pytorch_lightning import Callback, Trainer -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from anomalib.models.components import AnomalyModule logger = logging.getLogger(__name__) -@CALLBACK_REGISTRY class LoadModelCallback(Callback): """Callback that loads the model weights from the state dict.""" diff --git a/anomalib/utils/callbacks/nncf/callback.py b/anomalib/utils/callbacks/nncf/callback.py index 51ca1de386..3dfbb3e532 100644 --- a/anomalib/utils/callbacks/nncf/callback.py +++ b/anomalib/utils/callbacks/nncf/callback.py @@ -14,12 +14,10 @@ from nncf.api.compression import CompressionAlgorithmController from nncf.torch import register_default_init_args from pytorch_lightning import Callback -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from anomalib.utils.callbacks.nncf.utils import InitLoader, wrap_nncf_model -@CALLBACK_REGISTRY class NNCFCallback(Callback): """Callback for NNCF compression. diff --git a/anomalib/utils/callbacks/post_processing_configuration.py b/anomalib/utils/callbacks/post_processing_configuration.py index d097277687..ecca484246 100644 --- a/anomalib/utils/callbacks/post_processing_configuration.py +++ b/anomalib/utils/callbacks/post_processing_configuration.py @@ -10,7 +10,6 @@ import torch from pytorch_lightning import Callback, LightningModule, Trainer -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from anomalib.models.components.base.anomaly_module import AnomalyModule from anomalib.post_processing import NormalizationMethod, ThresholdMethod @@ -20,7 +19,6 @@ __all__ = ["PostProcessingConfigurationCallback"] -@CALLBACK_REGISTRY class PostProcessingConfigurationCallback(Callback): """Post-Processing Configuration Callback. diff --git a/anomalib/utils/callbacks/tiler_configuration.py b/anomalib/utils/callbacks/tiler_configuration.py index db5c5b710a..952cacd3dd 100644 --- a/anomalib/utils/callbacks/tiler_configuration.py +++ b/anomalib/utils/callbacks/tiler_configuration.py @@ -10,7 +10,6 @@ import pytorch_lightning as pl from pytorch_lightning.callbacks import Callback -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from anomalib.models.components import AnomalyModule from anomalib.pre_processing.tiler import Tiler @@ -18,7 +17,6 @@ __all__ = ["TilerConfigurationCallback"] -@CALLBACK_REGISTRY class TilerConfigurationCallback(Callback): """Tiler Configuration Callback.""" diff --git a/anomalib/utils/callbacks/timer.py b/anomalib/utils/callbacks/timer.py index 17f132a43f..a4b288d0a6 100644 --- a/anomalib/utils/callbacks/timer.py +++ b/anomalib/utils/callbacks/timer.py @@ -7,12 +7,10 @@ import time from pytorch_lightning import Callback, LightningModule, Trainer -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY logger = logging.getLogger(__name__) -@CALLBACK_REGISTRY class TimerCallback(Callback): """Callback that measures the training and testing time of a PyTorch Lightning module.""" diff --git a/anomalib/utils/callbacks/visualizer/visualizer_image.py b/anomalib/utils/callbacks/visualizer/visualizer_image.py index 8ae244a066..f8816d3207 100644 --- a/anomalib/utils/callbacks/visualizer/visualizer_image.py +++ b/anomalib/utils/callbacks/visualizer/visualizer_image.py @@ -10,7 +10,6 @@ from typing import Any import pytorch_lightning as pl -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from pytorch_lightning.utilities.types import STEP_OUTPUT from anomalib.models.components import AnomalyModule @@ -18,7 +17,6 @@ from .visualizer_base import BaseVisualizerCallback -@CALLBACK_REGISTRY class ImageVisualizerCallback(BaseVisualizerCallback): """Callback that visualizes the inference results of a model. diff --git a/anomalib/utils/callbacks/visualizer/visualizer_metric.py b/anomalib/utils/callbacks/visualizer/visualizer_metric.py index 428fc07d32..9dd7692635 100644 --- a/anomalib/utils/callbacks/visualizer/visualizer_metric.py +++ b/anomalib/utils/callbacks/visualizer/visualizer_metric.py @@ -10,14 +10,12 @@ import numpy as np import pytorch_lightning as pl from matplotlib import pyplot as plt -from pytorch_lightning.utilities.cli import CALLBACK_REGISTRY from anomalib.models.components import AnomalyModule from .visualizer_base import BaseVisualizerCallback -@CALLBACK_REGISTRY class MetricVisualizerCallback(BaseVisualizerCallback): """Callback that visualizes the metric results of a model by plotting the corresponding curves. diff --git a/anomalib/utils/cli/cli.py b/anomalib/utils/cli/cli.py index 86bc21f4cd..a83ad31ac9 100644 --- a/anomalib/utils/cli/cli.py +++ b/anomalib/utils/cli/cli.py @@ -11,15 +11,9 @@ from datetime import datetime from importlib import import_module from pathlib import Path -from typing import Any, Callable from omegaconf.omegaconf import OmegaConf -from pytorch_lightning import LightningDataModule, LightningModule, Trainer -from pytorch_lightning.utilities.cli import ( - LightningArgumentParser, - LightningCLI, - SaveConfigCallback, -) +from pytorch_lightning.cli import LightningArgumentParser, LightningCLI from anomalib.utils.callbacks import ( CdfNormalizationCallback, @@ -47,46 +41,6 @@ class AnomalibCLI(LightningCLI): For more details, the reader could refer to PyTorch Lightning CLI documentation. """ - def __init__( # pylint: disable=too-many-function-args - self, - model_class: type[LightningModule] | Callable[..., LightningModule] | None = None, - datamodule_class: type[LightningDataModule] | Callable[..., LightningDataModule] | None = None, - save_config_callback: type[SaveConfigCallback] | None = SaveConfigCallback, - save_config_filename: str = "config.yaml", - save_config_overwrite: bool = False, - save_config_multifile: bool = False, - trainer_class: type[Trainer] | Callable[..., Trainer] = Trainer, - trainer_defaults: dict[str, Any] | None = None, - seed_everything_default: int | None = None, - description: str = "Anomalib trainer command line tool", - env_prefix: str = "Anomalib", - env_parse: bool = False, - parser_kwargs: dict[str, Any] | dict[str, dict[str, Any]] | None = None, - subclass_mode_model: bool = False, - subclass_mode_data: bool = False, - run: bool = True, - auto_registry: bool = True, - ) -> None: - super().__init__( - model_class, - datamodule_class, - save_config_callback, - save_config_filename, - save_config_overwrite, - save_config_multifile, - trainer_class, - trainer_defaults, - seed_everything_default, - description, - env_prefix, - env_parse, - parser_kwargs, - subclass_mode_model, - subclass_mode_data, - run, - auto_registry, - ) - def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: """Add default arguments. diff --git a/anomalib/utils/hpo/runners.py b/anomalib/utils/hpo/runners.py index 3760b2180f..3dc2d8cbef 100644 --- a/anomalib/utils/hpo/runners.py +++ b/anomalib/utils/hpo/runners.py @@ -72,7 +72,7 @@ def sweep(self) -> None: callbacks = get_sweep_callbacks(config) # Disable saving checkpoints as all checkpoints from the sweep will get uploaded - config.trainer.checkpoint_callback = False + config.trainer.enable_checkpointing = False trainer = pl.Trainer(**config.trainer, logger=wandb_logger, callbacks=callbacks) trainer.fit(model, datamodule=datamodule) @@ -126,7 +126,7 @@ def run(self) -> None: callbacks = get_sweep_callbacks(config) # Disable saving checkpoints as all checkpoints from the sweep will get uploaded - config.trainer.checkpoint_callback = False + config.trainer.enable_checkpointing = False trainer = pl.Trainer(**config.trainer, logger=comet_logger, callbacks=callbacks) trainer.fit(model, datamodule=datamodule) diff --git a/anomalib/utils/loggers/__init__.py b/anomalib/utils/loggers/__init__.py index 98a4caa0af..4e47ea0168 100644 --- a/anomalib/utils/loggers/__init__.py +++ b/anomalib/utils/loggers/__init__.py @@ -13,7 +13,7 @@ from omegaconf.dictconfig import DictConfig from omegaconf.listconfig import ListConfig -from pytorch_lightning.loggers import CSVLogger, LightningLoggerBase +from pytorch_lightning.loggers import CSVLogger, Logger from .comet import AnomalibCometLogger from .tensorboard import AnomalibTensorBoardLogger @@ -62,7 +62,7 @@ def configure_logger(level: int | str = logging.INFO) -> None: def get_experiment_logger( config: DictConfig | ListConfig, -) -> LightningLoggerBase | Iterable[LightningLoggerBase] | bool: +) -> Logger | Iterable[Logger] | bool: """Return a logger based on the choice of logger in the config file. Args: @@ -72,7 +72,7 @@ def get_experiment_logger( ValueError: for any logger types apart from false and tensorboard Returns: - LightningLoggerBase | Iterable[LightningLoggerBase] | bool]: Logger + Logger | Iterable[Logger] | bool]: Logger """ logger.info("Loading the experiment logger(s)") @@ -91,7 +91,7 @@ def get_experiment_logger( if config.logging.logger in (None, False): return False - logger_list: list[LightningLoggerBase] = [] + logger_list: list[Logger] = [] if isinstance(config.logging.logger, str): config.logging.logger = [config.logging.logger] diff --git a/anomalib/utils/loggers/comet.py b/anomalib/utils/loggers/comet.py index 8daf40cd6b..1a5e56dfce 100644 --- a/anomalib/utils/loggers/comet.py +++ b/anomalib/utils/loggers/comet.py @@ -9,7 +9,11 @@ import numpy as np from matplotlib.figure import Figure -from pytorch_lightning.loggers.comet import CometLogger + +try: + from pytorch_lightning.loggers.comet import CometLogger +except ModuleNotFoundError: + print("To use comet logger install it using `pip install comet-ml`") from pytorch_lightning.utilities import rank_zero_only from .base import ImageLoggerBase diff --git a/anomalib/utils/loggers/tensorboard.py b/anomalib/utils/loggers/tensorboard.py index 74137e19bb..ca3e7b4702 100644 --- a/anomalib/utils/loggers/tensorboard.py +++ b/anomalib/utils/loggers/tensorboard.py @@ -9,7 +9,11 @@ import numpy as np from matplotlib.figure import Figure -from pytorch_lightning.loggers.tensorboard import TensorBoardLogger + +try: + from pytorch_lightning.loggers.tensorboard import TensorBoardLogger +except ModuleNotFoundError: + print("To use tensorboard logger install it using `pip install tensorboard`") from pytorch_lightning.utilities import rank_zero_only from .base import ImageLoggerBase diff --git a/anomalib/utils/loggers/wandb.py b/anomalib/utils/loggers/wandb.py index 6260337adb..bba5703afe 100644 --- a/anomalib/utils/loggers/wandb.py +++ b/anomalib/utils/loggers/wandb.py @@ -12,7 +12,10 @@ from pytorch_lightning.loggers.wandb import WandbLogger from pytorch_lightning.utilities import rank_zero_only -import wandb +try: + import wandb +except ModuleNotFoundError: + print("To use wandb logger install it using `pip install wandb`") from .base import ImageLoggerBase diff --git a/requirements/base.txt b/requirements/base.txt index e84c0cc8f6..3a180e1a07 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,9 +1,7 @@ albumentations>=1.1.0 av>=10.0.0 -comet-ml>=3.31.7 einops>=0.3.2 freia>=0.2 -gradio>=2.9.4 imgaug==0.4.0 jsonargparse[signatures]>=4.3 kornia>=0.6.6 @@ -11,9 +9,8 @@ matplotlib>=3.4.3 omegaconf>=2.1.1 opencv-python>=4.5.3.56 pandas>=1.1.0 -pytorch-lightning>=1.6.0,<1.7.0 +pytorch-lightning>=1.7.0,<1.10.0 timm>=0.5.4,<=0.6.12 torchmetrics==0.10.3 torchvision>=0.9.1,<=0.13.0 torchtext>=0.9.1,<=0.13.0 -wandb==0.12.17 diff --git a/requirements/extras.txt b/requirements/extras.txt new file mode 100644 index 0000000000..f85b6e0503 --- /dev/null +++ b/requirements/extras.txt @@ -0,0 +1,4 @@ +comet-ml>=3.31.7 +gradio>=2.9.4 +tensorboard +wandb==0.12.17 diff --git a/setup.py b/setup.py index 1d93d3058a..95e1610b39 100644 --- a/setup.py +++ b/setup.py @@ -83,8 +83,9 @@ def get_required_packages(requirement_files: list[str]) -> list[str]: LONG_DESCRIPTION = (Path(__file__).parent / "README.md").read_text(encoding="utf8") INSTALL_REQUIRES = get_required_packages(requirement_files=["base"]) EXTRAS_REQUIRE = { + "extra": get_required_packages(requirement_files=["extras"]), + "full": get_required_packages(requirement_files=["docs", "openvino", "extras"]), "openvino": get_required_packages(requirement_files=["openvino"]), - "full": get_required_packages(requirement_files=["docs", "openvino"]), } diff --git a/tests/pre_merge/utils/callbacks/export_callback/test_export.py b/tests/pre_merge/utils/callbacks/export_callback/test_export.py index 23865b2c25..f3869f4fce 100644 --- a/tests/pre_merge/utils/callbacks/export_callback/test_export.py +++ b/tests/pre_merge/utils/callbacks/export_callback/test_export.py @@ -42,7 +42,7 @@ def test_export_model_callback(export_mode): gpus=1, callbacks=model.callbacks, logger=False, - checkpoint_callback=False, + enable_checkpointing=False, max_epochs=1, val_check_interval=3, ) diff --git a/tests/pre_merge/utils/callbacks/metrics_configuration_callback/test_metrics_configuration_callback.py b/tests/pre_merge/utils/callbacks/metrics_configuration_callback/test_metrics_configuration_callback.py index 997eb53ad3..5e377a2b90 100644 --- a/tests/pre_merge/utils/callbacks/metrics_configuration_callback/test_metrics_configuration_callback.py +++ b/tests/pre_merge/utils/callbacks/metrics_configuration_callback/test_metrics_configuration_callback.py @@ -51,7 +51,7 @@ def test_metric_collection_configuration_callback(config_from_yaml): dummy_logger = DummyLogger() dummy_anomaly_module = _DummyAnomalyModule() trainer = pl.Trainer( - callbacks=[callback], logger=dummy_logger, checkpoint_callback=False, default_root_dir=dummy_logger.tempdir + callbacks=[callback], logger=dummy_logger, enable_checkpointing=False, default_root_dir=dummy_logger.tempdir ) callback.setup(trainer, dummy_anomaly_module, DummyDataModule()) diff --git a/tests/pre_merge/utils/callbacks/visualizer_callback/test_visualizer.py b/tests/pre_merge/utils/callbacks/visualizer_callback/test_visualizer.py index 4cfc409886..475eff45de 100644 --- a/tests/pre_merge/utils/callbacks/visualizer_callback/test_visualizer.py +++ b/tests/pre_merge/utils/callbacks/visualizer_callback/test_visualizer.py @@ -39,7 +39,7 @@ def test_add_images(task): logger = get_dummy_logger(config, dir_loc) model = get_dummy_module(config) trainer = pl.Trainer( - callbacks=model.callbacks, logger=logger, checkpoint_callback=False, default_root_dir=config.project.path + callbacks=model.callbacks, logger=logger, enable_checkpointing=False, default_root_dir=config.project.path ) trainer.test(model=model, datamodule=DummyDataModule()) # test if images are logged diff --git a/tests/pre_merge/utils/loggers/test_get_logger.py b/tests/pre_merge/utils/loggers/test_get_logger.py index 7c5fe21168..3230c0dbdc 100644 --- a/tests/pre_merge/utils/loggers/test_get_logger.py +++ b/tests/pre_merge/utils/loggers/test_get_logger.py @@ -5,20 +5,37 @@ from unittest.mock import patch -patch("pytorch_lightning.utilities.imports._package_available", False) -patch("pytorch_lightning.loggers.wandb.WandbLogger") - import pytest from omegaconf import OmegaConf -from pytorch_lightning.loggers import CSVLogger -from anomalib.utils.loggers import ( - AnomalibCometLogger, - AnomalibTensorBoardLogger, - AnomalibWandbLogger, - UnknownLogger, - get_experiment_logger, -) +try: + from wandb import init + + wandb_installed = True +except ImportError: + wandb_installed = False + +if wandb_installed: + with patch("wandb.init"): + from pytorch_lightning.loggers import CSVLogger + + from anomalib.utils.loggers import ( + AnomalibCometLogger, + AnomalibTensorBoardLogger, + AnomalibWandbLogger, + UnknownLogger, + get_experiment_logger, + ) +else: + from pytorch_lightning.loggers import CSVLogger + + from anomalib.utils.loggers import ( + AnomalibCometLogger, + AnomalibTensorBoardLogger, + AnomalibWandbLogger, + UnknownLogger, + get_experiment_logger, + ) def test_get_experiment_logger(): @@ -32,8 +49,9 @@ def test_get_experiment_logger(): } ) - with patch("pytorch_lightning.loggers.wandb.wandb"): - + with patch("anomalib.utils.loggers.wandb.AnomalibWandbLogger.experiment"), patch( + "pytorch_lightning.loggers.wandb.wandb" + ), patch("pytorch_lightning.loggers.comet.comet_ml"): # get no logger logger = get_experiment_logger(config=config) assert isinstance(logger, bool) diff --git a/tools/train.py b/tools/train.py index 3df5c25296..f5ffb8ef9b 100644 --- a/tools/train.py +++ b/tools/train.py @@ -62,7 +62,7 @@ def train(): logger.info("Loading the best model weights.") load_model_callback = LoadModelCallback(weights_path=trainer.checkpoint_callback.best_model_path) - trainer.callbacks.insert(0, load_model_callback) + trainer.callbacks.insert(0, load_model_callback) # pylint: disable=no-member if config.dataset.test_split_mode == TestSplitMode.NONE: logger.info("No test set provided. Skipping test stage.") diff --git a/tox.ini b/tox.ini index 359568a20b..1415f0dd6f 100644 --- a/tox.ini +++ b/tox.ini @@ -8,7 +8,8 @@ envlist = [testenv:pre-commit] basepython = python3 -deps = pre-commit +deps = + pre-commit commands = pre-commit run --all-files [testenv:pre_merge] @@ -25,6 +26,8 @@ deps = nbmake -r{toxinidir}/requirements/base.txt -r{toxinidir}/requirements/openvino.txt + -r{toxinidir}/requirements/extras.txt + commands = ; 1. Run Coverage First. coverage erase @@ -50,6 +53,7 @@ deps = flaky -r{toxinidir}/requirements/base.txt -r{toxinidir}/requirements/openvino.txt + -r{toxinidir}/requirements/extras.txt commands = coverage erase coverage run --include=anomalib/* -m pytest tests/nightly/ -ra --showlocals