Skip to content

Commit

Permalink
docs: Add custom build methods example
Browse files Browse the repository at this point in the history
  • Loading branch information
lRomul committed Jul 24, 2020
1 parent 93285d1 commit 9092666
Show file tree
Hide file tree
Showing 4 changed files with 81 additions and 8 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ from torch import nn
import torch.nn.functional as F
from mnist_utils import get_data_loaders

from argus import Model, load_model
from argus import Model
from argus.callbacks import MonitorCheckpoint, EarlyStopping, ReduceLROnPlateau


Expand Down
10 changes: 6 additions & 4 deletions docs/source/examples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,19 @@ You can find examples `here <https://github.com/lRomul/argus/blob/master/example
Basic examples
--------------

* `MNIST example <https://github.com/lRomul/argus/blob/master/examples/mnist.py>`_
* `MNIST example. <https://github.com/lRomul/argus/blob/master/examples/mnist.py>`_

.. code:: bash
python mnist.py --device cuda
* `MNIST VAE example <https://github.com/lRomul/argus/blob/master/examples/mnist_vae.py>`_
* `MNIST VAE example. <https://github.com/lRomul/argus/blob/master/examples/mnist_vae.py>`_

.. code:: bash
python mnist_vae.py --device cuda
* `CIFAR example <https://github.com/lRomul/argus/blob/master/examples/cifar_simple.py>`_
* `CIFAR example. <https://github.com/lRomul/argus/blob/master/examples/cifar_simple.py>`_

.. code:: bash
Expand All @@ -27,7 +27,7 @@ Basic examples
Advanced examples
-----------------

* `CIFAR with DPP, mixed precision and gradient accumulation <https://github.com/lRomul/argus/blob/master/examples/cifar_advanced.py>`_
* `CIFAR with DPP, mixed precision and gradient accumulation. <https://github.com/lRomul/argus/blob/master/examples/cifar_advanced.py>`_

Single GPU training:

Expand All @@ -47,6 +47,8 @@ Advanced examples
./cifar_advanced.sh 2 --batch_size 128 --lr 0.0005 --amp --iter_size 2
* `Custom build methods for creation of model parts. <https://github.com/lRomul/argus/blob/master/examples/custom_build_methods.py>`_

Kaggle solutions
----------------

Expand Down
8 changes: 5 additions & 3 deletions examples/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@

## Basic examples

* MNIST example
* MNIST example.
```bash
python mnist.py --device cuda
```

* MNIST VAE example
* MNIST VAE example.
```bash
python mnist_vae.py --device cuda
```

* CIFAR example
* CIFAR example.
```bash
python cifar_simple.py --device cuda
```
Expand Down Expand Up @@ -40,6 +40,8 @@ Advanced examples
./cifar_advanced.sh 2 --batch_size 128 --lr 0.0005 --amp --iter_size 2
```

* Custom build methods for creation of model parts [custom_build_methods.py](custom_build_methods.py).

Kaggle solutions
----------------

Expand Down
69 changes: 69 additions & 0 deletions examples/custom_build_methods.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import torch

from torchvision.models import densenet121

from argus import Model
from argus.model.build import (
choose_attribute_from_dict,
cast_optimizer,
cast_nn_module
)


class MyModel(Model):
nn_module = densenet121
optimizer = torch.optim.Adam
loss = torch.nn.CrossEntropyLoss

def build_nn_module(self, nn_module_meta, nn_module_params):
if nn_module_meta is None:
raise ValueError("nn_module is required attribute for argus.Model")

nn_module, nn_module_params = choose_attribute_from_dict(nn_module_meta,
nn_module_params)
nn_module = cast_nn_module(nn_module)
nn_module = nn_module(**nn_module_params)

# Replace last fully connected layer
num_classes = self.params['num_classes']
in_features = nn_module.classifier.in_features
nn_module.classifier = torch.nn.Linear(in_features=in_features,
out_features=num_classes)
return nn_module

def build_optimizer(self, optimizer_meta, optim_params):
optimizer, optim_params = choose_attribute_from_dict(optimizer_meta,
optim_params)
optimizer = cast_optimizer(optimizer)

# Set small LR for pretrained layers
pretrain_modules = [
self.nn_module.features
]
pretrain_params = []
for pretrain_module in pretrain_modules:
pretrain_params += pretrain_module.parameters()
pretrain_ids = list(map(id, pretrain_params))
other_params = filter(lambda p: id(p) not in pretrain_ids,
self.nn_module.parameters())
grad_params = [
{"params": pretrain_params, "lr": optim_params['lr'] * 0.01},
{"params": other_params, "lr": optim_params['lr']}
]
del optim_params['lr']

optimizer = optimizer(params=grad_params, **optim_params)
return optimizer


if __name__ == "__main__":
params = {
'nn_module': {'pretrained': True, 'progress': False},
'optimizer': {'lr': 0.001},
'device': 'cuda',
'num_classes': 10
}

model = MyModel(params)
print("Learning rate for each params group:", model.get_lr())
print("Last FC layer:", model.nn_module.classifier)

0 comments on commit 9092666

Please sign in to comment.