From 625944fb8deac6974182bcaaed299cc009c84de4 Mon Sep 17 00:00:00 2001
From: Lupin1998 <1070535169@qq.com>
Date: Thu, 30 Dec 2021 09:52:53 +0000
Subject: [PATCH] init
---
.gitignore | 136 +++
LICENSE | 201 ++++
README.md | 109 ++
benchmarks/detection/README.md | 12 +
.../configs/Base-Keypoint-RCNN-FPN.yaml | 15 +
.../detection/configs/Base-RCNN-C4-BN.yaml | 17 +
.../detection/configs/Base-RCNN-FPN.yaml | 42 +
.../detection/configs/Base-RetinaNet.yaml | 25 +
.../Cityscapes/mask_rcnn_R_50_FPN.yaml | 30 +
.../Cityscapes/mask_rcnn_R_50_FPN_moco.yaml | 9 +
.../detection/configs/coco_R_50_C4_1x.yaml | 4 +
.../configs/coco_R_50_C4_1x_moco.yaml | 4 +
.../detection/configs/coco_R_50_C4_2x.yaml | 13 +
.../configs/coco_R_50_C4_2x_moco.yaml | 10 +
.../detection/configs/coco_R_50_FPN_1x.yaml | 17 +
.../configs/coco_R_50_FPN_1x_moco.yaml | 9 +
.../detection/configs/coco_R_50_FPN_2x.yaml | 4 +
.../configs/coco_R_50_FPN_2x_moco.yaml | 4 +
.../configs/coco_R_50_RetinaNet_1x.yaml | 13 +
.../configs/coco_R_50_RetinaNet_1x_moco.yaml | 9 +
.../configs/coco_R_50_RetinaNet_2x.yaml | 4 +
.../configs/coco_R_50_RetinaNet_2x_moco.yaml | 4 +
.../configs/keypoint_rcnn_R_50_FPN_2x.yaml | 16 +
.../keypoint_rcnn_R_50_FPN_2x_moco.yaml | 9 +
.../configs/pascal_voc_R_50_C4_24k.yaml | 16 +
.../configs/pascal_voc_R_50_C4_24k_moco.yaml | 9 +
.../convert-pretrain-to-detectron2.py | 36 +
benchmarks/detection/run.sh | 6 +
benchmarks/detection/train_net.py | 77 ++
benchmarks/dist_test_svm_epoch.sh | 28 +
benchmarks/dist_test_svm_pretrain.sh | 28 +
benchmarks/dist_train_linear.sh | 24 +
benchmarks/dist_train_linear_1gpu.sh | 24 +
benchmarks/dist_train_linear_1gpu_sd.sh | 27 +
benchmarks/dist_train_linear_2gpu.sh | 24 +
benchmarks/dist_train_linear_4gpu.sh | 24 +
benchmarks/dist_train_semi.sh | 24 +
benchmarks/extract_info/voc07.py | 20 +
benchmarks/srun_test_svm_epoch.sh | 24 +
benchmarks/srun_test_svm_pretrain.sh | 24 +
benchmarks/srun_train_linear.sh | 31 +
benchmarks/srun_train_semi.sh | 31 +
.../svm_tools/aggregate_low_shot_svm_stats.py | 127 ++
benchmarks/svm_tools/eval_svm_full.sh | 40 +
benchmarks/svm_tools/eval_svm_lowshot.sh | 64 +
benchmarks/svm_tools/svm_helper.py | 171 +++
benchmarks/svm_tools/test_svm.py | 174 +++
benchmarks/svm_tools/test_svm_low_shot.py | 212 ++++
benchmarks/svm_tools/train_svm_kfold.py | 162 +++
.../svm_tools/train_svm_kfold_parallel.py | 151 +++
benchmarks/svm_tools/train_svm_low_shot.py | 144 +++
.../svm_tools/train_svm_low_shot_parallel.py | 145 +++
configs/base.py | 19 +
.../cifar10/r18_last_1gpu_cifar10.py | 75 ++
.../cifar10/r18_rep_cifar10.py | 49 +
.../r50_last_1gpu_cifar10_from_stl10_lr01.py | 78 ++
.../cifar100/r18_last_1gpu_cifar100.py | 79 ++
.../cifar100/r18_rep_cifar100.py | 48 +
.../cub200/r50_last_2gpu_cub200.py | 84 ++
.../dogs120/r50_last_2gpu_dogs120.py | 87 ++
.../fmnist/lenet_last_1gpu_fmnist.py | 49 +
.../fmnist/lenet_rep_fmnist.py | 50 +
.../imagenet/official/r50_last.py | 76 ++
.../imagenet/official/r50_last_sobel.py | 76 ++
.../imagenet/official/r50_multihead.py | 89 ++
.../imagenet/official/r50_multihead_sobel.py | 89 ++
.../imagenet/r18_last_1gpu.py | 79 ++
.../imagenet/r18_last_2gpu.py | 79 ++
.../imagenet/r18_last_4gpu.py | 78 ++
.../imagenet/r18_rep_imagenet.py | 71 ++
.../imagenet/r50_last_2gpu.py | 79 ++
.../imagenet/r50_last_4gpu.py | 83 ++
.../imagenet/r50_rep_imagenet.py | 71 ++
.../mnist/lenet_rep_mnist.py | 55 +
.../pets/r50_last_2gpu_pets.py | 90 ++
.../pets/r50_rep_pets.py | 70 ++
.../places205/r50_multihead.py | 89 ++
.../places205/r50_multihead_sobel.py | 89 ++
.../stl10/mobilenet_last_1gpu_stl10.py | 79 ++
.../stl10/mobilenet_rep_stl10.py | 64 +
.../stl10/r18/r18_lr1_0_bs256_head1.py | 78 ++
.../stl10/r18/run_stl10_dist_train_linear.sh | 35 +
.../stl10/r18_last_1gpu_stl10.py | 80 ++
.../stl10/r18_rep_stl10.py | 64 +
.../stl10/r50_last_1gpu_stl10.py | 82 ++
.../stl10/r50_rep_stl10.py | 64 +
.../tiny_imagenet/r18_last_1gpu_tiny.py | 80 ++
.../tiny_imagenet/r18_rep_tiny_imagenet.py | 66 +
.../imagenet_10percent/base.py | 66 +
.../imagenet_10percent/r50_lr0_001_head1.py | 4 +
.../imagenet_10percent/r50_lr0_001_head10.py | 4 +
.../imagenet_10percent/r50_lr0_001_head100.py | 4 +
.../imagenet_10percent/r50_lr0_01_head1.py | 4 +
.../imagenet_10percent/r50_lr0_01_head10.py | 4 +
.../imagenet_10percent/r50_lr0_01_head100.py | 4 +
.../r50_lr0_01_head1_sobel.py | 71 ++
.../imagenet_10percent/r50_lr0_1_head1.py | 4 +
.../imagenet_10percent/r50_lr0_1_head10.py | 4 +
.../imagenet_10percent/r50_lr0_1_head100.py | 4 +
.../imagenet_1percent/base.py | 72 ++
.../imagenet_1percent/r50_lr0_001_head1.py | 4 +
.../imagenet_1percent/r50_lr0_001_head10.py | 4 +
.../imagenet_1percent/r50_lr0_001_head100.py | 4 +
.../imagenet_1percent/r50_lr0_01_head1.py | 4 +
.../imagenet_1percent/r50_lr0_01_head10.py | 4 +
.../imagenet_1percent/r50_lr0_01_head100.py | 4 +
.../r50_lr0_01_head1_sobel.py | 77 ++
.../imagenet_1percent/r50_lr0_1_head1.py | 4 +
.../imagenet_1percent/r50_lr0_1_head10.py | 4 +
.../imagenet_1percent/r50_lr0_1_head100.py | 4 +
.../semi_classification/stl10/base.py | 69 ++
.../stl10/r50_lr0_001_head1.py | 4 +
.../stl10/r50_lr0_001_head10.py | 4 +
.../stl10/r50_lr0_001_head100.py | 4 +
.../stl10/r50_lr0_01_head1.py | 4 +
.../stl10/r50_lr0_01_head10.py | 4 +
.../stl10/r50_lr0_01_head100.py | 4 +
.../stl10/r50_lr0_1_head1.py | 6 +
.../stl10/r50_lr0_1_head10.py | 4 +
.../stl10/r50_lr0_1_head100.py | 4 +
docs/CHANGELOG.md | 37 +
docs/GETTING_STARTED.md | 287 +++++
docs/INSTALL.md | 160 +++
docs/MODEL_ZOO.md | 184 +++
docs/relation.jpg | Bin 0 -> 330127 bytes
openmixup/__init__.py | 3 +
openmixup/apis/__init__.py | 3 +
openmixup/apis/train.py | 235 ++++
openmixup/datasets/__init__.py | 17 +
openmixup/datasets/base.py | 38 +
openmixup/datasets/builder.py | 43 +
openmixup/datasets/byol.py | 41 +
openmixup/datasets/classification.py | 44 +
openmixup/datasets/contrastive.py | 33 +
openmixup/datasets/data_sources/__init__.py | 10 +
openmixup/datasets/data_sources/cifar.py | 227 ++++
openmixup/datasets/data_sources/image_list.py | 38 +
openmixup/datasets/data_sources/imagenet.py | 10 +
openmixup/datasets/data_sources/mnist.py | 112 ++
openmixup/datasets/dataset_wrappers.py | 55 +
openmixup/datasets/deepcluster.py | 33 +
openmixup/datasets/deepcluster_contrastive.py | 69 ++
openmixup/datasets/extraction.py | 19 +
openmixup/datasets/loader/__init__.py | 7 +
openmixup/datasets/loader/build_loader.py | 133 +++
openmixup/datasets/loader/sampler.py | 302 +++++
openmixup/datasets/multi_view.py | 65 +
openmixup/datasets/npid.py | 25 +
openmixup/datasets/pipelines/__init__.py | 3 +
openmixup/datasets/pipelines/auto_augment.py | 1053 ++++++++++++++++
openmixup/datasets/pipelines/compose.py | 42 +
openmixup/datasets/pipelines/transforms.py | 146 +++
openmixup/datasets/registry.py | 5 +
openmixup/datasets/relative_loc.py | 65 +
openmixup/datasets/rotation_pred.py | 45 +
openmixup/datasets/semi_supervised.py | 157 +++
openmixup/datasets/utils.py | 9 +
openmixup/hooks/__init__.py | 12 +
openmixup/hooks/addtional_scheduler.py | 645 ++++++++++
openmixup/hooks/builder.py | 113 ++
openmixup/hooks/byol_hook.py | 43 +
openmixup/hooks/deepcluster_automix_hook.py | 162 +++
openmixup/hooks/deepcluster_hook.py | 135 +++
openmixup/hooks/extractor.py | 61 +
openmixup/hooks/momentum_hook.py | 261 ++++
openmixup/hooks/odc_hook.py | 90 ++
openmixup/hooks/optimizer_hook.py | 73 ++
openmixup/hooks/registry.py | 3 +
openmixup/hooks/save_hook.py | 39 +
openmixup/hooks/validate_hook.py | 93 ++
openmixup/models/__init__.py | 10 +
openmixup/models/backbones/__init__.py | 18 +
openmixup/models/backbones/alexnet.py | 81 ++
openmixup/models/backbones/base_backbone.py | 57 +
openmixup/models/backbones/lenet.py | 75 ++
openmixup/models/backbones/mobilenet_v2.py | 274 +++++
openmixup/models/backbones/mobilenet_v3.py | 204 ++++
openmixup/models/backbones/resnet_mmcls.py | 965 +++++++++++++++
openmixup/models/backbones/resnext.py | 494 ++++++++
openmixup/models/backbones/seresnet.py | 223 ++++
openmixup/models/backbones/shufflenet_v2.py | 299 +++++
openmixup/models/backbones/wide_resnet.py | 302 +++++
openmixup/models/builder.py | 56 +
openmixup/models/classifiers/__init__.py | 11 +
.../models/classifiers/classification.py | 107 ++
.../classifiers/mixup_classification.py | 155 +++
.../classifiers/mixup_momentum_V1plus.py | 336 ++++++
.../models/classifiers/mixup_momentum_V2.py | 525 ++++++++
.../models/classifiers/representation.py | 71 ++
openmixup/models/heads/__init__.py | 12 +
openmixup/models/heads/cls_head.py | 146 +++
openmixup/models/heads/cls_mixup_head.py | 266 +++++
openmixup/models/heads/contrastive_head.py | 38 +
openmixup/models/heads/latent_pred_head.py | 37 +
openmixup/models/heads/multi_cls_head.py | 83 ++
openmixup/models/heads/pmix_block_V2.py | 486 ++++++++
openmixup/models/losses/__init__.py | 13 +
openmixup/models/losses/asymmetric_loss.py | 207 ++++
openmixup/models/losses/cross_entropy_loss.py | 328 +++++
openmixup/models/losses/focal_loss.py | 118 ++
openmixup/models/losses/label_smooth_loss.py | 169 +++
openmixup/models/losses/utils.py | 112 ++
openmixup/models/memories/__init__.py | 7 +
openmixup/models/memories/odc_memory.py | 233 ++++
openmixup/models/memories/simple_memory.py | 65 +
openmixup/models/necks/__init__.py | 10 +
openmixup/models/necks/conv_necks.py | 120 ++
openmixup/models/necks/fpn_automix.py | 41 +
openmixup/models/necks/mlp_necks.py | 445 +++++++
openmixup/models/registry.py | 8 +
openmixup/models/selfsup/__init__.py | 17 +
openmixup/models/selfsup/byol.py | 113 ++
openmixup/models/selfsup/deepcluster.py | 131 ++
openmixup/models/selfsup/moco.py | 218 ++++
openmixup/models/selfsup/moco_automix_v2.py | 1057 +++++++++++++++++
openmixup/models/selfsup/moco_mix.py | 360 ++++++
openmixup/models/selfsup/npid.py | 130 ++
openmixup/models/selfsup/odc.py | 148 +++
openmixup/models/selfsup/relative_loc.py | 107 ++
openmixup/models/selfsup/rotation_pred.py | 94 ++
openmixup/models/selfsup/simclr.py | 109 ++
openmixup/models/selfsup/simclr_mix.py | 265 +++++
openmixup/models/semisup/__init__.py | 9 +
openmixup/models/semisup/dmixmatch.py | 403 +++++++
openmixup/models/semisup/fine_tuning.py | 95 ++
openmixup/models/semisup/fixmatch.py | 184 +++
openmixup/models/semisup/mix_tuning.py | 473 ++++++++
openmixup/models/semisup/self_tuning.py | 255 ++++
openmixup/models/utils/__init__.py | 29 +
openmixup/models/utils/accuracy.py | 63 +
openmixup/models/utils/channel_shuffle.py | 29 +
openmixup/models/utils/conv_module.py | 163 +++
openmixup/models/utils/conv_ws.py | 46 +
openmixup/models/utils/fmix.py | 236 ++++
openmixup/models/utils/gather_layer.py | 223 ++++
openmixup/models/utils/grad_weight.py | 59 +
openmixup/models/utils/inverted_residual.py | 115 ++
openmixup/models/utils/make_divisible.py | 27 +
openmixup/models/utils/mixup_input.py | 356 ++++++
openmixup/models/utils/multi_pooling.py | 38 +
openmixup/models/utils/norm.py | 55 +
openmixup/models/utils/scale.py | 13 +
openmixup/models/utils/se_layer.py | 75 ++
openmixup/models/utils/smoothing.py | 48 +
openmixup/models/utils/sobel.py | 24 +
openmixup/models/utils/weight_init.py | 73 ++
openmixup/third_party/clustering.py | 309 +++++
openmixup/utils/__init__.py | 8 +
openmixup/utils/alias_multinomial.py | 75 ++
openmixup/utils/collect.py | 83 ++
openmixup/utils/collect_env.py | 64 +
openmixup/utils/config_tools.py | 13 +
openmixup/utils/contextmanagers.py | 122 ++
openmixup/utils/flops_counter.py | 444 +++++++
openmixup/utils/gather.py | 69 ++
openmixup/utils/logger.py | 66 +
openmixup/utils/misc.py | 37 +
openmixup/utils/optimizers.py | 116 ++
openmixup/utils/profiling.py | 40 +
openmixup/utils/registry.py | 79 ++
requirements.txt | 2 +
requirements/runtime.txt | 12 +
requirements/tests.txt | 11 +
setup.py | 193 +++
tools/auto_train.py | 140 +++
tools/count_parameters.py | 44 +
tools/dist_extract.sh | 13 +
tools/dist_test.sh | 17 +
tools/dist_train.sh | 12 +
tools/extract.py | 182 +++
tools/kill.sh | 2 +
.../extract_backbone_weights.py | 31 +
tools/model_converters/extract_dir_weights.py | 64 +
tools/model_converters/publish_model.py | 33 +
tools/model_converters/upgrade_models.py | 27 +
tools/prepare_data/convert_subset.py | 35 +
tools/prepare_data/create_voc_data_files.py | 193 +++
.../create_voc_low_shot_challenge_samples.py | 131 ++
tools/prepare_data/prepare_voc07_cls.sh | 34 +
tools/single_train.sh | 9 +
tools/srun_extract.sh | 24 +
tools/srun_test.sh | 30 +
tools/srun_train.sh | 26 +
tools/summary/find_automix_val_median.py | 105 ++
.../find_classification_val_3times_average.py | 86 ++
tools/summary/find_classification_val_max.py | 91 ++
.../summary/find_classification_val_median.py | 75 ++
tools/summary/results_summary.py | 104 ++
tools/test.py | 122 ++
tools/train.py | 142 +++
tools/visualization/gradcam.py | 421 +++++++
tools/visualization/my_dist_analysis.py | 329 +++++
tools/visualization/visualize_embedding.py | 390 ++++++
293 files changed, 28808 insertions(+)
create mode 100644 .gitignore
create mode 100644 LICENSE
create mode 100644 README.md
create mode 100644 benchmarks/detection/README.md
create mode 100644 benchmarks/detection/configs/Base-Keypoint-RCNN-FPN.yaml
create mode 100644 benchmarks/detection/configs/Base-RCNN-C4-BN.yaml
create mode 100644 benchmarks/detection/configs/Base-RCNN-FPN.yaml
create mode 100644 benchmarks/detection/configs/Base-RetinaNet.yaml
create mode 100644 benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml
create mode 100644 benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN_moco.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_C4_1x.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_C4_1x_moco.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_C4_2x.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_FPN_1x.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_FPN_1x_moco.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_FPN_2x.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_FPN_2x_moco.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_RetinaNet_1x.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_RetinaNet_1x_moco.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_RetinaNet_2x.yaml
create mode 100644 benchmarks/detection/configs/coco_R_50_RetinaNet_2x_moco.yaml
create mode 100644 benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x.yaml
create mode 100644 benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x_moco.yaml
create mode 100644 benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml
create mode 100644 benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml
create mode 100644 benchmarks/detection/convert-pretrain-to-detectron2.py
create mode 100644 benchmarks/detection/run.sh
create mode 100644 benchmarks/detection/train_net.py
create mode 100644 benchmarks/dist_test_svm_epoch.sh
create mode 100644 benchmarks/dist_test_svm_pretrain.sh
create mode 100644 benchmarks/dist_train_linear.sh
create mode 100644 benchmarks/dist_train_linear_1gpu.sh
create mode 100644 benchmarks/dist_train_linear_1gpu_sd.sh
create mode 100644 benchmarks/dist_train_linear_2gpu.sh
create mode 100644 benchmarks/dist_train_linear_4gpu.sh
create mode 100644 benchmarks/dist_train_semi.sh
create mode 100644 benchmarks/extract_info/voc07.py
create mode 100644 benchmarks/srun_test_svm_epoch.sh
create mode 100644 benchmarks/srun_test_svm_pretrain.sh
create mode 100644 benchmarks/srun_train_linear.sh
create mode 100644 benchmarks/srun_train_semi.sh
create mode 100644 benchmarks/svm_tools/aggregate_low_shot_svm_stats.py
create mode 100644 benchmarks/svm_tools/eval_svm_full.sh
create mode 100644 benchmarks/svm_tools/eval_svm_lowshot.sh
create mode 100644 benchmarks/svm_tools/svm_helper.py
create mode 100644 benchmarks/svm_tools/test_svm.py
create mode 100644 benchmarks/svm_tools/test_svm_low_shot.py
create mode 100644 benchmarks/svm_tools/train_svm_kfold.py
create mode 100644 benchmarks/svm_tools/train_svm_kfold_parallel.py
create mode 100644 benchmarks/svm_tools/train_svm_low_shot.py
create mode 100644 benchmarks/svm_tools/train_svm_low_shot_parallel.py
create mode 100644 configs/base.py
create mode 100644 configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py
create mode 100644 configs/benchmarks/linear_classification/cifar10/r18_rep_cifar10.py
create mode 100644 configs/benchmarks/linear_classification/cifar10/r50_last_1gpu_cifar10_from_stl10_lr01.py
create mode 100644 configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py
create mode 100644 configs/benchmarks/linear_classification/cifar100/r18_rep_cifar100.py
create mode 100644 configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py
create mode 100644 configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py
create mode 100644 configs/benchmarks/linear_classification/fmnist/lenet_last_1gpu_fmnist.py
create mode 100644 configs/benchmarks/linear_classification/fmnist/lenet_rep_fmnist.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/official/r50_last.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/official/r50_last_sobel.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/official/r50_multihead.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/official/r50_multihead_sobel.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/r18_rep_imagenet.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py
create mode 100644 configs/benchmarks/linear_classification/imagenet/r50_rep_imagenet.py
create mode 100644 configs/benchmarks/linear_classification/mnist/lenet_rep_mnist.py
create mode 100644 configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py
create mode 100644 configs/benchmarks/linear_classification/pets/r50_rep_pets.py
create mode 100644 configs/benchmarks/linear_classification/places205/r50_multihead.py
create mode 100644 configs/benchmarks/linear_classification/places205/r50_multihead_sobel.py
create mode 100644 configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py
create mode 100644 configs/benchmarks/linear_classification/stl10/mobilenet_rep_stl10.py
create mode 100644 configs/benchmarks/linear_classification/stl10/r18/r18_lr1_0_bs256_head1.py
create mode 100644 configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh
create mode 100644 configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py
create mode 100644 configs/benchmarks/linear_classification/stl10/r18_rep_stl10.py
create mode 100644 configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py
create mode 100644 configs/benchmarks/linear_classification/stl10/r50_rep_stl10.py
create mode 100644 configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py
create mode 100644 configs/benchmarks/linear_classification/tiny_imagenet/r18_rep_tiny_imagenet.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/base.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head1.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head10.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head100.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head10.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head100.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1_sobel.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head1.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head10.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head100.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/base.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head1.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head10.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head100.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head10.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head100.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1_sobel.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head1.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head10.py
create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head100.py
create mode 100644 configs/benchmarks/semi_classification/stl10/base.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_001_head1.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_001_head10.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_001_head100.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_01_head1.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_01_head10.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_01_head100.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_1_head10.py
create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_1_head100.py
create mode 100644 docs/CHANGELOG.md
create mode 100644 docs/GETTING_STARTED.md
create mode 100644 docs/INSTALL.md
create mode 100644 docs/MODEL_ZOO.md
create mode 100644 docs/relation.jpg
create mode 100644 openmixup/__init__.py
create mode 100644 openmixup/apis/__init__.py
create mode 100644 openmixup/apis/train.py
create mode 100644 openmixup/datasets/__init__.py
create mode 100644 openmixup/datasets/base.py
create mode 100644 openmixup/datasets/builder.py
create mode 100644 openmixup/datasets/byol.py
create mode 100644 openmixup/datasets/classification.py
create mode 100644 openmixup/datasets/contrastive.py
create mode 100644 openmixup/datasets/data_sources/__init__.py
create mode 100644 openmixup/datasets/data_sources/cifar.py
create mode 100644 openmixup/datasets/data_sources/image_list.py
create mode 100644 openmixup/datasets/data_sources/imagenet.py
create mode 100644 openmixup/datasets/data_sources/mnist.py
create mode 100644 openmixup/datasets/dataset_wrappers.py
create mode 100644 openmixup/datasets/deepcluster.py
create mode 100644 openmixup/datasets/deepcluster_contrastive.py
create mode 100644 openmixup/datasets/extraction.py
create mode 100644 openmixup/datasets/loader/__init__.py
create mode 100644 openmixup/datasets/loader/build_loader.py
create mode 100644 openmixup/datasets/loader/sampler.py
create mode 100644 openmixup/datasets/multi_view.py
create mode 100644 openmixup/datasets/npid.py
create mode 100644 openmixup/datasets/pipelines/__init__.py
create mode 100644 openmixup/datasets/pipelines/auto_augment.py
create mode 100644 openmixup/datasets/pipelines/compose.py
create mode 100644 openmixup/datasets/pipelines/transforms.py
create mode 100644 openmixup/datasets/registry.py
create mode 100644 openmixup/datasets/relative_loc.py
create mode 100644 openmixup/datasets/rotation_pred.py
create mode 100644 openmixup/datasets/semi_supervised.py
create mode 100644 openmixup/datasets/utils.py
create mode 100644 openmixup/hooks/__init__.py
create mode 100644 openmixup/hooks/addtional_scheduler.py
create mode 100644 openmixup/hooks/builder.py
create mode 100644 openmixup/hooks/byol_hook.py
create mode 100644 openmixup/hooks/deepcluster_automix_hook.py
create mode 100644 openmixup/hooks/deepcluster_hook.py
create mode 100644 openmixup/hooks/extractor.py
create mode 100644 openmixup/hooks/momentum_hook.py
create mode 100644 openmixup/hooks/odc_hook.py
create mode 100644 openmixup/hooks/optimizer_hook.py
create mode 100644 openmixup/hooks/registry.py
create mode 100644 openmixup/hooks/save_hook.py
create mode 100644 openmixup/hooks/validate_hook.py
create mode 100644 openmixup/models/__init__.py
create mode 100644 openmixup/models/backbones/__init__.py
create mode 100644 openmixup/models/backbones/alexnet.py
create mode 100644 openmixup/models/backbones/base_backbone.py
create mode 100644 openmixup/models/backbones/lenet.py
create mode 100644 openmixup/models/backbones/mobilenet_v2.py
create mode 100644 openmixup/models/backbones/mobilenet_v3.py
create mode 100644 openmixup/models/backbones/resnet_mmcls.py
create mode 100644 openmixup/models/backbones/resnext.py
create mode 100644 openmixup/models/backbones/seresnet.py
create mode 100644 openmixup/models/backbones/shufflenet_v2.py
create mode 100644 openmixup/models/backbones/wide_resnet.py
create mode 100644 openmixup/models/builder.py
create mode 100644 openmixup/models/classifiers/__init__.py
create mode 100644 openmixup/models/classifiers/classification.py
create mode 100644 openmixup/models/classifiers/mixup_classification.py
create mode 100644 openmixup/models/classifiers/mixup_momentum_V1plus.py
create mode 100644 openmixup/models/classifiers/mixup_momentum_V2.py
create mode 100644 openmixup/models/classifiers/representation.py
create mode 100644 openmixup/models/heads/__init__.py
create mode 100644 openmixup/models/heads/cls_head.py
create mode 100644 openmixup/models/heads/cls_mixup_head.py
create mode 100644 openmixup/models/heads/contrastive_head.py
create mode 100644 openmixup/models/heads/latent_pred_head.py
create mode 100644 openmixup/models/heads/multi_cls_head.py
create mode 100644 openmixup/models/heads/pmix_block_V2.py
create mode 100644 openmixup/models/losses/__init__.py
create mode 100644 openmixup/models/losses/asymmetric_loss.py
create mode 100644 openmixup/models/losses/cross_entropy_loss.py
create mode 100644 openmixup/models/losses/focal_loss.py
create mode 100644 openmixup/models/losses/label_smooth_loss.py
create mode 100644 openmixup/models/losses/utils.py
create mode 100644 openmixup/models/memories/__init__.py
create mode 100644 openmixup/models/memories/odc_memory.py
create mode 100644 openmixup/models/memories/simple_memory.py
create mode 100644 openmixup/models/necks/__init__.py
create mode 100644 openmixup/models/necks/conv_necks.py
create mode 100644 openmixup/models/necks/fpn_automix.py
create mode 100644 openmixup/models/necks/mlp_necks.py
create mode 100644 openmixup/models/registry.py
create mode 100644 openmixup/models/selfsup/__init__.py
create mode 100644 openmixup/models/selfsup/byol.py
create mode 100644 openmixup/models/selfsup/deepcluster.py
create mode 100644 openmixup/models/selfsup/moco.py
create mode 100644 openmixup/models/selfsup/moco_automix_v2.py
create mode 100644 openmixup/models/selfsup/moco_mix.py
create mode 100644 openmixup/models/selfsup/npid.py
create mode 100644 openmixup/models/selfsup/odc.py
create mode 100644 openmixup/models/selfsup/relative_loc.py
create mode 100644 openmixup/models/selfsup/rotation_pred.py
create mode 100644 openmixup/models/selfsup/simclr.py
create mode 100644 openmixup/models/selfsup/simclr_mix.py
create mode 100644 openmixup/models/semisup/__init__.py
create mode 100644 openmixup/models/semisup/dmixmatch.py
create mode 100644 openmixup/models/semisup/fine_tuning.py
create mode 100644 openmixup/models/semisup/fixmatch.py
create mode 100644 openmixup/models/semisup/mix_tuning.py
create mode 100644 openmixup/models/semisup/self_tuning.py
create mode 100644 openmixup/models/utils/__init__.py
create mode 100644 openmixup/models/utils/accuracy.py
create mode 100644 openmixup/models/utils/channel_shuffle.py
create mode 100644 openmixup/models/utils/conv_module.py
create mode 100644 openmixup/models/utils/conv_ws.py
create mode 100644 openmixup/models/utils/fmix.py
create mode 100644 openmixup/models/utils/gather_layer.py
create mode 100644 openmixup/models/utils/grad_weight.py
create mode 100644 openmixup/models/utils/inverted_residual.py
create mode 100644 openmixup/models/utils/make_divisible.py
create mode 100644 openmixup/models/utils/mixup_input.py
create mode 100644 openmixup/models/utils/multi_pooling.py
create mode 100644 openmixup/models/utils/norm.py
create mode 100644 openmixup/models/utils/scale.py
create mode 100644 openmixup/models/utils/se_layer.py
create mode 100644 openmixup/models/utils/smoothing.py
create mode 100644 openmixup/models/utils/sobel.py
create mode 100644 openmixup/models/utils/weight_init.py
create mode 100644 openmixup/third_party/clustering.py
create mode 100644 openmixup/utils/__init__.py
create mode 100644 openmixup/utils/alias_multinomial.py
create mode 100644 openmixup/utils/collect.py
create mode 100644 openmixup/utils/collect_env.py
create mode 100644 openmixup/utils/config_tools.py
create mode 100644 openmixup/utils/contextmanagers.py
create mode 100644 openmixup/utils/flops_counter.py
create mode 100644 openmixup/utils/gather.py
create mode 100644 openmixup/utils/logger.py
create mode 100644 openmixup/utils/misc.py
create mode 100644 openmixup/utils/optimizers.py
create mode 100644 openmixup/utils/profiling.py
create mode 100644 openmixup/utils/registry.py
create mode 100644 requirements.txt
create mode 100644 requirements/runtime.txt
create mode 100644 requirements/tests.txt
create mode 100644 setup.py
create mode 100644 tools/auto_train.py
create mode 100644 tools/count_parameters.py
create mode 100644 tools/dist_extract.sh
create mode 100644 tools/dist_test.sh
create mode 100644 tools/dist_train.sh
create mode 100644 tools/extract.py
create mode 100644 tools/kill.sh
create mode 100644 tools/model_converters/extract_backbone_weights.py
create mode 100644 tools/model_converters/extract_dir_weights.py
create mode 100644 tools/model_converters/publish_model.py
create mode 100644 tools/model_converters/upgrade_models.py
create mode 100644 tools/prepare_data/convert_subset.py
create mode 100644 tools/prepare_data/create_voc_data_files.py
create mode 100644 tools/prepare_data/create_voc_low_shot_challenge_samples.py
create mode 100644 tools/prepare_data/prepare_voc07_cls.sh
create mode 100644 tools/single_train.sh
create mode 100644 tools/srun_extract.sh
create mode 100644 tools/srun_test.sh
create mode 100644 tools/srun_train.sh
create mode 100644 tools/summary/find_automix_val_median.py
create mode 100644 tools/summary/find_classification_val_3times_average.py
create mode 100644 tools/summary/find_classification_val_max.py
create mode 100644 tools/summary/find_classification_val_median.py
create mode 100644 tools/summary/results_summary.py
create mode 100644 tools/test.py
create mode 100644 tools/train.py
create mode 100644 tools/visualization/gradcam.py
create mode 100644 tools/visualization/my_dist_analysis.py
create mode 100644 tools/visualization/visualize_embedding.py
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..4bd8619d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,136 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+apex/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+
+openmixup/version.py
+version.py
+data
+.vscode
+.idea
+
+# custom
+*.pkl
+*.pkl.json
+*.log.json
+work_dirs/
+tools/exp_bash/
+pretrains
+
+# Pytorch
+*.pth
+
+*.swp
+source.sh
+tensorboard.sh
+.DS_Store
+replace.sh
+benchmarks/detection/datasets
+benchmarks/detection/output
+
+# add temp ignore path:
+configs/classification
+configs/selfsup
+configs/semisup
+*.json
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 00000000..ca9f7cb1
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2020-2021 Open-MMLab.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..a4276bf2
--- /dev/null
+++ b/README.md
@@ -0,0 +1,109 @@
+
+# OpenSelfSup
+
+**News**
+* Downstream tasks now support more methods(Mask RCNN-FPN, RetinaNet, Keypoints RCNN) and more datasets(Cityscapes).
+* 'GaussianBlur' is replaced from Opencv to PIL, and MoCo v2 training speed doubles!
+(time/iter 0.35s-->0.16s, SimCLR and BYOL are also affected.)
+* OpenSelfSup now supports [Mixed Precision Training (apex AMP)](https://github.com/NVIDIA/apex)!
+* A bug of MoCo v2 has been fixed and now the results are reproducible.
+* OpenSelfSup now supports [BYOL](https://arxiv.org/pdf/2006.07733.pdf)!
+
+## Introduction
+
+The master branch works with **PyTorch 1.1** or higher.
+
+`OpenSelfSup` is an open source unsupervised representation learning toolbox based on PyTorch.
+
+### What does this repo do?
+
+Below is the relations among Unsupervised Learning, Self-Supervised Learning and Representation Learning. This repo focuses on the shadow area, i.e., Unsupervised Representation Learning. Self-Supervised Representation Learning is the major branch of it. Since in many cases we do not distingush between Self-Supervised Representation Learning and Unsupervised Representation Learning strictly, we still name this repo as `OpenSelfSup`.
+
+
+
+### Major features
+
+- **All methods in one repository**
+
+ For comprehensive comparison in all benchmarks, refer to [MODEL_ZOO.md](docs/MODEL_ZOO.md). Most of the selfsup pretraining methods are under the `batch_size=256, epochs=200` setting.
+
+
+- **Flexibility & Extensibility**
+
+ `OpenSelfSup` follows a similar code architecture of MMDetection while is even more flexible than MMDetection, since OpenSelfSup integrates various self-supervised tasks including classification, joint clustering and feature learning, contrastive learning, tasks with a memory bank, etc.
+
+ For existing methods in this repo, you only need to modify config files to adjust hyper-parameters. It is also simple to design your own methods, please refer to [GETTING_STARTED.md](docs/GETTING_STARTED.md).
+
+- **Efficiency**
+
+ All methods support multi-machine multi-gpu distributed training.
+
+- **Standardized Benchmarks**
+
+ We standardize the benchmarks including logistic regression, SVM / Low-shot SVM from linearly probed features, semi-supervised classification, and object detection. Below are the setting of these benchmarks.
+
+ | Benchmarks | Setting | Remarks |
+ |----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------|
+ | ImageNet Linear Classification (Multi) | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Evaluate different layers. |
+ | ImageNet Linear Classification (Last) | [MoCo](http://openaccess.thecvf.com/content_CVPR_2020/papers/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.pdf) | Evaluate the last layer after global pooling. |
+ | Places205 Linear Classification | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Evaluate different layers. |
+ | ImageNet Semi-Sup Classification |
+ | PASCAL VOC07 SVM | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Costs="1.0,10.0,100.0" to save evaluation time w/o change of results. |
+ | PASCAL VOC07 Low-shot SVM | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Costs="1.0,10.0,100.0" to save evaluation time w/o change of results. |
+ | PASCAL VOC07+12 Object Detection | [MoCo](http://openaccess.thecvf.com/content_CVPR_2020/papers/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.pdf) | |
+ | COCO17 Object Detection | [MoCo](http://openaccess.thecvf.com/content_CVPR_2020/papers/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.pdf) | |
+
+## Change Log
+
+Please refer to [CHANGELOG.md](docs/CHANGELOG.md) for details and release history.
+
+[2020-10-14] `OpenSelfSup` v0.3.0 is released with some bugs fixed and support of new features.
+
+[2020-06-26] `OpenSelfSup` v0.2.0 is released with benchmark results and support of new features.
+
+[2020-06-16] `OpenSelfSup` v0.1.0 is released.
+
+## Installation
+
+Please refer to [INSTALL.md](docs/INSTALL.md) for installation and dataset preparation.
+
+## Get Started
+
+Please see [GETTING_STARTED.md](docs/GETTING_STARTED.md) for the basic usage of OpenSelfSup.
+
+## Benchmark and Model Zoo
+
+Please refer to [MODEL_ZOO.md](docs/MODEL_ZOO.md) for for a comprehensive set of pre-trained models and benchmarks.
+
+## License
+
+This project is released under the [Apache 2.0 license](LICENSE).
+
+
+## Acknowledgement
+
+- This repo borrows the architecture design and part of the code from [MMDetection](https://github.com/open-mmlab/mmdetection).
+- The implementation of MoCo and the detection benchmark borrow the code from [moco](https://github.com/facebookresearch/moco).
+- The SVM benchmark borrows the code from [
+fair_self_supervision_benchmark](https://github.com/facebookresearch/fair_self_supervision_benchmark).
+- `openselfsup/third_party/clustering.py` is borrowed from [deepcluster](https://github.com/facebookresearch/deepcluster/blob/master/clustering.py).
+
+## Contributors
+
+We encourage researchers interested in Self-Supervised Learning to contribute to OpenSelfSup. Your contributions, including implementing or transferring new methods to OpenSelfSup, performing experiments, reproducing of results, parameter studies, etc, will be recorded in [MODEL_ZOO.md](docs/MODEL_ZOO.md). For now, the contributors include: Xiaohang Zhan ([@XiaohangZhan](http://github.com/XiaohangZhan)), Jiahao Xie ([@Jiahao000](https://github.com/Jiahao000)), Enze Xie ([@xieenze](https://github.com/xieenze)), Xiangxiang Chu ([@cxxgtxy](https://github.com/cxxgtxy)), Zijian He ([@scnuhealthy](https://github.com/scnuhealthy)).
+
+## Contact
+
+This repo is currently maintained by Xiaohang Zhan ([@XiaohangZhan](http://github.com/XiaohangZhan)), Jiahao Xie ([@Jiahao000](https://github.com/Jiahao000)) and Enze Xie ([@xieenze](https://github.com/xieenze)).
diff --git a/benchmarks/detection/README.md b/benchmarks/detection/README.md
new file mode 100644
index 00000000..caeb7ae3
--- /dev/null
+++ b/benchmarks/detection/README.md
@@ -0,0 +1,12 @@
+
+## Transferring to Detection
+
+We follow the evaluation setting in MoCo when trasferring to object detection.
+
+### Instruction
+
+1. Install [detectron2](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md).
+
+1. Put dataset under "benchmarks/detection/datasets" directory,
+ following the [directory structure](https://github.com/facebookresearch/detectron2/tree/master/datasets)
+ requried by detectron2.
diff --git a/benchmarks/detection/configs/Base-Keypoint-RCNN-FPN.yaml b/benchmarks/detection/configs/Base-Keypoint-RCNN-FPN.yaml
new file mode 100644
index 00000000..7cbf5eec
--- /dev/null
+++ b/benchmarks/detection/configs/Base-Keypoint-RCNN-FPN.yaml
@@ -0,0 +1,15 @@
+_BASE_: "Base-RCNN-FPN.yaml"
+MODEL:
+ KEYPOINT_ON: True
+ ROI_HEADS:
+ NUM_CLASSES: 1
+ ROI_BOX_HEAD:
+ SMOOTH_L1_BETA: 0.5 # Keypoint AP degrades (though box AP improves) when using plain L1 loss
+ RPN:
+ # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2.
+ # 1000 proposals per-image is found to hurt box AP.
+ # Therefore we increase it to 1500 per-image.
+ POST_NMS_TOPK_TRAIN: 1500
+DATASETS:
+ TRAIN: ("keypoints_coco_2017_train",)
+ TEST: ("keypoints_coco_2017_val",)
diff --git a/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml b/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml
new file mode 100644
index 00000000..5104c6a6
--- /dev/null
+++ b/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml
@@ -0,0 +1,17 @@
+MODEL:
+ META_ARCHITECTURE: "GeneralizedRCNN"
+ RPN:
+ PRE_NMS_TOPK_TEST: 6000
+ POST_NMS_TOPK_TEST: 1000
+ ROI_HEADS:
+ NAME: "Res5ROIHeadsExtraNorm"
+ BACKBONE:
+ FREEZE_AT: 0
+ RESNETS:
+ NORM: "SyncBN"
+TEST:
+ PRECISE_BN:
+ ENABLED: True
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.02
diff --git a/benchmarks/detection/configs/Base-RCNN-FPN.yaml b/benchmarks/detection/configs/Base-RCNN-FPN.yaml
new file mode 100644
index 00000000..d40fe5ef
--- /dev/null
+++ b/benchmarks/detection/configs/Base-RCNN-FPN.yaml
@@ -0,0 +1,42 @@
+MODEL:
+ META_ARCHITECTURE: "GeneralizedRCNN"
+ BACKBONE:
+ NAME: "build_resnet_fpn_backbone"
+ RESNETS:
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
+ FPN:
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
+ ANCHOR_GENERATOR:
+ SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
+ RPN:
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
+ # Detectron1 uses 2000 proposals per-batch,
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
+ # which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
+ POST_NMS_TOPK_TRAIN: 1000
+ POST_NMS_TOPK_TEST: 1000
+ ROI_HEADS:
+ NAME: "StandardROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_FC: 2
+ POOLER_RESOLUTION: 7
+ ROI_MASK_HEAD:
+ NAME: "MaskRCNNConvUpsampleHead"
+ NUM_CONV: 4
+ POOLER_RESOLUTION: 14
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ TEST: ("coco_2017_val",)
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.02
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+VERSION: 2
\ No newline at end of file
diff --git a/benchmarks/detection/configs/Base-RetinaNet.yaml b/benchmarks/detection/configs/Base-RetinaNet.yaml
new file mode 100644
index 00000000..95ee124f
--- /dev/null
+++ b/benchmarks/detection/configs/Base-RetinaNet.yaml
@@ -0,0 +1,25 @@
+MODEL:
+ META_ARCHITECTURE: "RetinaNet"
+ BACKBONE:
+ NAME: "build_retinanet_resnet_fpn_backbone"
+ RESNETS:
+ OUT_FEATURES: ["res3", "res4", "res5"]
+ ANCHOR_GENERATOR:
+ SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"]
+ FPN:
+ IN_FEATURES: ["res3", "res4", "res5"]
+ RETINANET:
+ IOU_THRESHOLDS: [0.4, 0.5]
+ IOU_LABELS: [0, -1, 1]
+ SMOOTH_L1_LOSS_BETA: 0.0
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ TEST: ("coco_2017_val",)
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+VERSION: 2
\ No newline at end of file
diff --git a/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml b/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml
new file mode 100644
index 00000000..8e74dfbc
--- /dev/null
+++ b/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml
@@ -0,0 +1,30 @@
+_BASE_: "../Base-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ MASK_ON: True
+ ROI_HEADS:
+ NUM_CLASSES: 8
+ BACKBONE:
+ FREEZE_AT: 0
+ RESNETS:
+ DEPTH: 50
+ NORM: "SyncBN"
+ FPN:
+ NORM: "SyncBN"
+INPUT:
+ MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024)
+ MIN_SIZE_TRAIN_SAMPLING: "choice"
+ MIN_SIZE_TEST: 1024
+ MAX_SIZE_TRAIN: 2048
+ MAX_SIZE_TEST: 2048
+DATASETS:
+ TRAIN: ("cityscapes_fine_instance_seg_train",)
+ TEST: ("cityscapes_fine_instance_seg_val",)
+SOLVER:
+ BASE_LR: 0.01
+ STEPS: (18000,)
+ MAX_ITER: 24000
+ IMS_PER_BATCH: 8
+TEST:
+ PRECISE_BN:
+ ENABLED: True
\ No newline at end of file
diff --git a/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN_moco.yaml b/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN_moco.yaml
new file mode 100644
index 00000000..52ad1fb1
--- /dev/null
+++ b/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN_moco.yaml
@@ -0,0 +1,9 @@
+_BASE_: "mask_rcnn_R_50_FPN.yaml"
+MODEL:
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
+ PIXEL_STD: [58.395, 57.120, 57.375]
+ WEIGHTS: "See Instructions"
+ RESNETS:
+ STRIDE_IN_1X1: False
+INPUT:
+ FORMAT: "RGB"
\ No newline at end of file
diff --git a/benchmarks/detection/configs/coco_R_50_C4_1x.yaml b/benchmarks/detection/configs/coco_R_50_C4_1x.yaml
new file mode 100644
index 00000000..e0826b40
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_C4_1x.yaml
@@ -0,0 +1,4 @@
+_BASE_: "coco_R_50_C4_2x.yaml"
+SOLVER:
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
diff --git a/benchmarks/detection/configs/coco_R_50_C4_1x_moco.yaml b/benchmarks/detection/configs/coco_R_50_C4_1x_moco.yaml
new file mode 100644
index 00000000..98524d0b
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_C4_1x_moco.yaml
@@ -0,0 +1,4 @@
+_BASE_: "coco_R_50_C4_2x_moco.yaml"
+SOLVER:
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
diff --git a/benchmarks/detection/configs/coco_R_50_C4_2x.yaml b/benchmarks/detection/configs/coco_R_50_C4_2x.yaml
new file mode 100644
index 00000000..5b7e4240
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_C4_2x.yaml
@@ -0,0 +1,13 @@
+_BASE_: "Base-RCNN-C4-BN.yaml"
+MODEL:
+ MASK_ON: True
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+ MIN_SIZE_TEST: 800
+DATASETS:
+ TRAIN: ("coco_2017_train",)
+ TEST: ("coco_2017_val",)
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000
diff --git a/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml b/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml
new file mode 100644
index 00000000..8e310683
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml
@@ -0,0 +1,10 @@
+_BASE_: "coco_R_50_C4_2x.yaml"
+MODEL:
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
+ PIXEL_STD: [58.395, 57.120, 57.375]
+ WEIGHTS: "See Instructions"
+ RESNETS:
+ STRIDE_IN_1X1: False
+INPUT:
+ MAX_SIZE_TRAIN: 1200
+ FORMAT: "RGB"
diff --git a/benchmarks/detection/configs/coco_R_50_FPN_1x.yaml b/benchmarks/detection/configs/coco_R_50_FPN_1x.yaml
new file mode 100644
index 00000000..142319a7
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_FPN_1x.yaml
@@ -0,0 +1,17 @@
+_BASE_: "Base-RCNN-FPN.yaml"
+MODEL:
+ MASK_ON: True
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ BACKBONE:
+ FREEZE_AT: 0
+ RESNETS:
+ DEPTH: 50
+ NORM: "SyncBN"
+ FPN:
+ NORM: "SyncBN"
+TEST:
+ PRECISE_BN:
+ ENABLED: True
+SOLVER:
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
\ No newline at end of file
diff --git a/benchmarks/detection/configs/coco_R_50_FPN_1x_moco.yaml b/benchmarks/detection/configs/coco_R_50_FPN_1x_moco.yaml
new file mode 100644
index 00000000..c341eab4
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_FPN_1x_moco.yaml
@@ -0,0 +1,9 @@
+_BASE_: "coco_R_50_FPN_1x.yaml"
+MODEL:
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
+ PIXEL_STD: [58.395, 57.120, 57.375]
+ WEIGHTS: "See Instructions"
+ RESNETS:
+ STRIDE_IN_1X1: False
+INPUT:
+ FORMAT: "RGB"
\ No newline at end of file
diff --git a/benchmarks/detection/configs/coco_R_50_FPN_2x.yaml b/benchmarks/detection/configs/coco_R_50_FPN_2x.yaml
new file mode 100644
index 00000000..483789f3
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_FPN_2x.yaml
@@ -0,0 +1,4 @@
+_BASE_: "coco_R_50_FPN_1x.yaml"
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000
\ No newline at end of file
diff --git a/benchmarks/detection/configs/coco_R_50_FPN_2x_moco.yaml b/benchmarks/detection/configs/coco_R_50_FPN_2x_moco.yaml
new file mode 100644
index 00000000..875c2a73
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_FPN_2x_moco.yaml
@@ -0,0 +1,4 @@
+_BASE_: "coco_R_50_FPN_1x_moco.yaml"
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000
\ No newline at end of file
diff --git a/benchmarks/detection/configs/coco_R_50_RetinaNet_1x.yaml b/benchmarks/detection/configs/coco_R_50_RetinaNet_1x.yaml
new file mode 100644
index 00000000..52c63ba1
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_RetinaNet_1x.yaml
@@ -0,0 +1,13 @@
+_BASE_: "Base-RetinaNet.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ BACKBONE:
+ FREEZE_AT: 0
+ RESNETS:
+ DEPTH: 50
+ NORM: "SyncBN"
+ FPN:
+ NORM: "SyncBN"
+TEST:
+ PRECISE_BN:
+ ENABLED: True
\ No newline at end of file
diff --git a/benchmarks/detection/configs/coco_R_50_RetinaNet_1x_moco.yaml b/benchmarks/detection/configs/coco_R_50_RetinaNet_1x_moco.yaml
new file mode 100644
index 00000000..2fdff1a5
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_RetinaNet_1x_moco.yaml
@@ -0,0 +1,9 @@
+_BASE_: "coco_R_50_RetinaNet_1x.yaml"
+MODEL:
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
+ PIXEL_STD: [58.395, 57.120, 57.375]
+ WEIGHTS: "See Instructions"
+ RESNETS:
+ STRIDE_IN_1X1: False
+INPUT:
+ FORMAT: "RGB"
\ No newline at end of file
diff --git a/benchmarks/detection/configs/coco_R_50_RetinaNet_2x.yaml b/benchmarks/detection/configs/coco_R_50_RetinaNet_2x.yaml
new file mode 100644
index 00000000..150a607e
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_RetinaNet_2x.yaml
@@ -0,0 +1,4 @@
+_BASE_: "coco_R_50_RetinaNet_1x.yaml"
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000
\ No newline at end of file
diff --git a/benchmarks/detection/configs/coco_R_50_RetinaNet_2x_moco.yaml b/benchmarks/detection/configs/coco_R_50_RetinaNet_2x_moco.yaml
new file mode 100644
index 00000000..b1faa8c4
--- /dev/null
+++ b/benchmarks/detection/configs/coco_R_50_RetinaNet_2x_moco.yaml
@@ -0,0 +1,4 @@
+_BASE_: "coco_R_50_RetinaNet_1x_moco.yaml"
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000
\ No newline at end of file
diff --git a/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x.yaml b/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x.yaml
new file mode 100644
index 00000000..1d60b437
--- /dev/null
+++ b/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-Keypoint-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ BACKBONE:
+ FREEZE_AT: 0
+ RESNETS:
+ DEPTH: 50
+ NORM: "SyncBN"
+ FPN:
+ NORM: "SyncBN"
+TEST:
+ PRECISE_BN:
+ ENABLED: True
+SOLVER:
+ STEPS: (120000, 160000)
+ MAX_ITER: 180000
\ No newline at end of file
diff --git a/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x_moco.yaml b/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x_moco.yaml
new file mode 100644
index 00000000..7dbcfbb9
--- /dev/null
+++ b/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x_moco.yaml
@@ -0,0 +1,9 @@
+_BASE_: "keypoint_rcnn_R_50_FPN_2x.yaml"
+MODEL:
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
+ PIXEL_STD: [58.395, 57.120, 57.375]
+ WEIGHTS: "See Instructions"
+ RESNETS:
+ STRIDE_IN_1X1: False
+INPUT:
+ FORMAT: "RGB"
\ No newline at end of file
diff --git a/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml
new file mode 100644
index 00000000..a05eb5e2
--- /dev/null
+++ b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-RCNN-C4-BN.yaml"
+MODEL:
+ MASK_ON: False
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ ROI_HEADS:
+ NUM_CLASSES: 20
+INPUT:
+ MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
+ MIN_SIZE_TEST: 800
+DATASETS:
+ TRAIN: ('voc_2007_trainval', 'voc_2012_trainval')
+ TEST: ('voc_2007_test',)
+SOLVER:
+ STEPS: (18000, 22000)
+ MAX_ITER: 24000
+ WARMUP_ITERS: 100
diff --git a/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml
new file mode 100644
index 00000000..eebe6905
--- /dev/null
+++ b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml
@@ -0,0 +1,9 @@
+_BASE_: "pascal_voc_R_50_C4_24k.yaml"
+MODEL:
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
+ PIXEL_STD: [58.395, 57.120, 57.375]
+ WEIGHTS: "See Instructions"
+ RESNETS:
+ STRIDE_IN_1X1: False
+INPUT:
+ FORMAT: "RGB"
diff --git a/benchmarks/detection/convert-pretrain-to-detectron2.py b/benchmarks/detection/convert-pretrain-to-detectron2.py
new file mode 100644
index 00000000..e8bf5434
--- /dev/null
+++ b/benchmarks/detection/convert-pretrain-to-detectron2.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import pickle as pkl
+import sys
+import torch
+
+if __name__ == "__main__":
+ input = sys.argv[1]
+
+ obj = torch.load(input, map_location="cpu")
+ obj = obj["state_dict"]
+
+ newmodel = {}
+ for k, v in obj.items():
+ old_k = k
+ if "layer" not in k:
+ k = "stem." + k
+ for t in [1, 2, 3, 4]:
+ k = k.replace("layer{}".format(t), "res{}".format(t + 1))
+ for t in [1, 2, 3]:
+ k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
+ k = k.replace("downsample.0", "shortcut")
+ k = k.replace("downsample.1", "shortcut.norm")
+ print(old_k, "->", k)
+ newmodel[k] = v.numpy()
+
+ res = {
+ "model": newmodel,
+ "__author__": "OpenSelfSup",
+ "matching_heuristics": True
+ }
+
+ assert sys.argv[2].endswith('.pkl')
+ with open(sys.argv[2], "wb") as f:
+ pkl.dump(res, f)
diff --git a/benchmarks/detection/run.sh b/benchmarks/detection/run.sh
new file mode 100644
index 00000000..2b35e59d
--- /dev/null
+++ b/benchmarks/detection/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+DET_CFG=$1
+WEIGHTS=$2
+
+python $(dirname "$0")/train_net.py --config-file $DET_CFG \
+ --num-gpus 8 MODEL.WEIGHTS $WEIGHTS
diff --git a/benchmarks/detection/train_net.py b/benchmarks/detection/train_net.py
new file mode 100644
index 00000000..8ae31c9e
--- /dev/null
+++ b/benchmarks/detection/train_net.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import os
+
+from detectron2.checkpoint import DetectionCheckpointer
+from detectron2.config import get_cfg
+from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch
+from detectron2.evaluation import COCOEvaluator, PascalVOCDetectionEvaluator
+from detectron2.layers import get_norm
+from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, Res5ROIHeads
+
+
+@ROI_HEADS_REGISTRY.register()
+class Res5ROIHeadsExtraNorm(Res5ROIHeads):
+ """
+ As described in the MOCO paper, there is an extra BN layer
+ following the res5 stage.
+ """
+
+ def _build_res5_block(self, cfg):
+ seq, out_channels = super()._build_res5_block(cfg)
+ norm = cfg.MODEL.RESNETS.NORM
+ norm = get_norm(norm, out_channels)
+ seq.add_module("norm", norm)
+ return seq, out_channels
+
+
+class Trainer(DefaultTrainer):
+
+ @classmethod
+ def build_evaluator(cls, cfg, dataset_name, output_folder=None):
+ if output_folder is None:
+ output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
+ if "coco" in dataset_name:
+ return COCOEvaluator(dataset_name, cfg, True, output_folder)
+ else:
+ assert "voc" in dataset_name
+ return PascalVOCDetectionEvaluator(dataset_name)
+
+
+def setup(args):
+ cfg = get_cfg()
+ cfg.merge_from_file(args.config_file)
+ cfg.merge_from_list(args.opts)
+ cfg.freeze()
+ default_setup(cfg, args)
+ return cfg
+
+
+def main(args):
+ cfg = setup(args)
+
+ if args.eval_only:
+ model = Trainer.build_model(cfg)
+ DetectionCheckpointer(
+ model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
+ cfg.MODEL.WEIGHTS, resume=args.resume)
+ res = Trainer.test(cfg, model)
+ return res
+
+ trainer = Trainer(cfg)
+ trainer.resume_or_load(resume=args.resume)
+ return trainer.train()
+
+
+if __name__ == "__main__":
+ args = default_argument_parser().parse_args()
+ print("Command Line Args:", args)
+ launch(
+ main,
+ args.num_gpus,
+ num_machines=args.num_machines,
+ machine_rank=args.machine_rank,
+ dist_url=args.dist_url,
+ args=(args, ),
+ )
diff --git a/benchmarks/dist_test_svm_epoch.sh b/benchmarks/dist_test_svm_epoch.sh
new file mode 100644
index 00000000..216229f0
--- /dev/null
+++ b/benchmarks/dist_test_svm_epoch.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+set -e
+set -x
+
+CFG=$1
+EPOCH=$2
+FEAT_LIST=$3 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5"
+GPUS=${4:-8}
+WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
+
+if [ "$CFG" == "" ] || [ "$EPOCH" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+if [ ! -f $WORK_DIR/epoch_${EPOCH}.pth ]; then
+ echo "ERROR: File not exist: $WORK_DIR/epoch_${EPOCH}.pth"
+ exit
+fi
+
+mkdir -p $WORK_DIR/logs
+echo "Testing checkpoint: $WORK_DIR/epoch_${EPOCH}.pth" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log
+
+bash tools/dist_extract.sh $CFG $GPUS $WORK_DIR --checkpoint $WORK_DIR/epoch_${EPOCH}.pth
+
+bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR "$FEAT_LIST"
+
+bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR "$FEAT_LIST"
diff --git a/benchmarks/dist_test_svm_pretrain.sh b/benchmarks/dist_test_svm_pretrain.sh
new file mode 100644
index 00000000..5297899b
--- /dev/null
+++ b/benchmarks/dist_test_svm_pretrain.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+set -e
+set -x
+
+CFG=$1
+PRETRAIN=$2 # pretrained model or "random" (random init)
+FEAT_LIST=$3 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5"
+GPUS=${4:-8}
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+if [ ! -f $PRETRAIN ] && [ "$PRETRAIN" != "random" ]; then
+ echo "ERROR: PRETRAIN should be a file or a string \"random\", got: $PRETRAIN"
+ exit
+fi
+
+mkdir -p $WORK_DIR/logs
+echo "Testing pretrain: $PRETRAIN" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log
+
+bash tools/dist_extract.sh $CFG $GPUS $WORK_DIR --pretrained $PRETRAIN
+
+bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR "$FEAT_LIST"
+
+bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR "$FEAT_LIST"
diff --git a/benchmarks/dist_train_linear.sh b/benchmarks/dist_train_linear.sh
new file mode 100644
index 00000000..ce1c4224
--- /dev/null
+++ b/benchmarks/dist_train_linear.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/"
+PRETRAIN=$2
+PY_ARGS=${@:3} # --resume_from --deterministic
+GPUS=8 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256.
+PORT=${PORT:-29500}
+
+if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+# train
+python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
+ tools/train.py \
+ $CFG \
+ --pretrained $PRETRAIN \
+ --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS}
diff --git a/benchmarks/dist_train_linear_1gpu.sh b/benchmarks/dist_train_linear_1gpu.sh
new file mode 100644
index 00000000..51244e11
--- /dev/null
+++ b/benchmarks/dist_train_linear_1gpu.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/"
+PRETRAIN=$2
+PY_ARGS=${@:3} # --resume_from --deterministic
+GPUS=1 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256.
+PORT=${PORT:-29500}
+
+if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+# train
+python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
+ tools/train.py \
+ $CFG \
+ --pretrained $PRETRAIN \
+ --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS}
diff --git a/benchmarks/dist_train_linear_1gpu_sd.sh b/benchmarks/dist_train_linear_1gpu_sd.sh
new file mode 100644
index 00000000..eee380bf
--- /dev/null
+++ b/benchmarks/dist_train_linear_1gpu_sd.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/"
+PRETRAIN=$2
+SD=$3 # random seed
+PY_ARGS=${@:4} # --resume_from --deterministic
+GPUS=1 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256.
+PORT=${PORT:-29500}
+
+if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+
+# train
+python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
+ tools/train.py \
+ $CFG \
+ --pretrained $PRETRAIN \
+ --work_dir $WORK_DIR --seed $SD --launcher="pytorch" ${PY_ARGS}
diff --git a/benchmarks/dist_train_linear_2gpu.sh b/benchmarks/dist_train_linear_2gpu.sh
new file mode 100644
index 00000000..36131104
--- /dev/null
+++ b/benchmarks/dist_train_linear_2gpu.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/"
+PRETRAIN=$2
+PY_ARGS=${@:3} # --resume_from --deterministic
+GPUS=2 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256.
+PORT=${PORT:-29500}
+
+if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+# train
+python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
+ tools/train.py \
+ $CFG \
+ --pretrained $PRETRAIN \
+ --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS}
diff --git a/benchmarks/dist_train_linear_4gpu.sh b/benchmarks/dist_train_linear_4gpu.sh
new file mode 100644
index 00000000..9f176d2c
--- /dev/null
+++ b/benchmarks/dist_train_linear_4gpu.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/"
+PRETRAIN=$2
+PY_ARGS=${@:3} # --resume_from --deterministic
+GPUS=4 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256.
+PORT=${PORT:-29500}
+
+if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+# train
+python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
+ tools/train.py \
+ $CFG \
+ --pretrained $PRETRAIN \
+ --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS}
diff --git a/benchmarks/dist_train_semi.sh b/benchmarks/dist_train_semi.sh
new file mode 100644
index 00000000..b6d7e37b
--- /dev/null
+++ b/benchmarks/dist_train_semi.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+CFG=$1 # use cfgs under "configs/benchmarks/semi_classification/imagenet_*percent/"
+PRETRAIN=$2
+PY_ARGS=${@:3}
+GPUS=4 # in the standard setting, GPUS=4
+PORT=${PORT:-29500}
+
+if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+# train
+python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \
+ tools/train.py \
+ $CFG \
+ --pretrained $PRETRAIN \
+ --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS}
diff --git a/benchmarks/extract_info/voc07.py b/benchmarks/extract_info/voc07.py
new file mode 100644
index 00000000..2680b198
--- /dev/null
+++ b/benchmarks/extract_info/voc07.py
@@ -0,0 +1,20 @@
+data_source_cfg = dict(type='ImageList', memcached=False, mclient_path=None)
+data_root = "data/VOCdevkit/VOC2007/JPEGImages"
+data_all_list = "data/VOCdevkit/VOC2007/Lists/trainvaltest.txt"
+split_at = [5011]
+split_name = ['voc07_trainval', 'voc07_test']
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+
+data = dict(
+ imgs_per_gpu=32,
+ workers_per_gpu=2,
+ extract=dict(
+ type="ExtractDataset",
+ data_source=dict(
+ list_file=data_all_list, root=data_root, **data_source_cfg),
+ pipeline=[
+ dict(type='Resize', size=256),
+ dict(type='Resize', size=(224, 224)),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+ ]))
diff --git a/benchmarks/srun_test_svm_epoch.sh b/benchmarks/srun_test_svm_epoch.sh
new file mode 100644
index 00000000..33f41262
--- /dev/null
+++ b/benchmarks/srun_test_svm_epoch.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+set -e
+set -x
+
+PARTITION=$1
+CFG=$2
+EPOCH=$3
+FEAT_LIST=$4 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5"
+GPUS=${5:-8}
+WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/
+
+if [ ! -f $WORK_DIR/epoch_${EPOCH}.pth ]; then
+ echo "ERROR: File not exist: $WORK_DIR/epoch_${EPOCH}.pth"
+ exit
+fi
+
+mkdir -p $WORK_DIR/logs
+echo "Testing checkpoint: $WORK_DIR/epoch_${EPOCH}.pth" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log
+
+bash tools/srun_extract.sh $PARTITION $CFG $GPUS $WORK_DIR --checkpoint $WORK_DIR/epoch_${EPOCH}.pth
+
+srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR "$FEAT_LIST"
+
+srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR "$FEAT_LIST"
diff --git a/benchmarks/srun_test_svm_pretrain.sh b/benchmarks/srun_test_svm_pretrain.sh
new file mode 100644
index 00000000..a475138b
--- /dev/null
+++ b/benchmarks/srun_test_svm_pretrain.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+set -e
+set -x
+
+PARTITION=$1
+CFG=$2
+PRETRAIN=$3 # pretrained model or "random" (random init)
+FEAT_LIST=$4 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5"
+GPUS=${5:-8}
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+if [ ! -f $PRETRAIN ] and [ "$PRETRAIN" != "random" ]; then
+ echo "ERROR: PRETRAIN should be a file or a string \"random\", got: $PRETRAIN"
+ exit
+fi
+
+mkdir -p $WORK_DIR/logs
+echo "Testing pretrain: $PRETRAIN" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log
+
+bash tools/srun_extract.sh $PARTITION $CFG $GPUS $WORK_DIR --pretrained $PRETRAIN
+
+srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR "$FEAT_LIST"
+
+srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR "$FEAT_LIST"
diff --git a/benchmarks/srun_train_linear.sh b/benchmarks/srun_train_linear.sh
new file mode 100644
index 00000000..4f857b58
--- /dev/null
+++ b/benchmarks/srun_train_linear.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+PARTITION=$1
+CFG=$2
+PRETRAIN=$3
+PY_ARGS=${@:4}
+JOB_NAME="openselfsup"
+GPUS=8 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256.
+GPUS_PER_NODE=${GPUS_PER_NODE:-8}
+CPUS_PER_TASK=${CPUS_PER_TASK:-5}
+SRUN_ARGS=${SRUN_ARGS:-""}
+
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+# train
+GLOG_vmodule=MemcachedClient=-1 \
+srun -p ${PARTITION} \
+ --job-name=${JOB_NAME} \
+ --gres=gpu:${GPUS_PER_NODE} \
+ --ntasks=${GPUS} \
+ --ntasks-per-node=${GPUS_PER_NODE} \
+ --cpus-per-task=${CPUS_PER_TASK} \
+ --kill-on-bad-exit=1 \
+ ${SRUN_ARGS} \
+ python -u tools/train.py \
+ $CFG \
+ --pretrained $PRETRAIN \
+ --work_dir $WORK_DIR --seed 0 --launcher="slurm" ${PY_ARGS}
diff --git a/benchmarks/srun_train_semi.sh b/benchmarks/srun_train_semi.sh
new file mode 100644
index 00000000..0aa8f022
--- /dev/null
+++ b/benchmarks/srun_train_semi.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+
+PARTITION=$1
+CFG=$2
+PRETRAIN=$3
+PY_ARGS=${@:4}
+JOB_NAME="openselfsup"
+GPUS=4 # in the standard setting, GPUS=4
+GPUS_PER_NODE=${GPUS_PER_NODE:-4}
+CPUS_PER_TASK=${CPUS_PER_TASK:-5}
+SRUN_ARGS=${SRUN_ARGS:-""}
+
+WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)"
+
+# train
+GLOG_vmodule=MemcachedClient=-1 \
+srun -p ${PARTITION} \
+ --job-name=${JOB_NAME} \
+ --gres=gpu:${GPUS_PER_NODE} \
+ --ntasks=${GPUS} \
+ --ntasks-per-node=${GPUS_PER_NODE} \
+ --cpus-per-task=${CPUS_PER_TASK} \
+ --kill-on-bad-exit=1 \
+ ${SRUN_ARGS} \
+ python -u tools/train.py \
+ $CFG \
+ --pretrained $PRETRAIN \
+ --work_dir $WORK_DIR --seed 0 --launcher="slurm" ${PY_ARGS}
diff --git a/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py b/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py
new file mode 100644
index 00000000..34f59377
--- /dev/null
+++ b/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py
@@ -0,0 +1,127 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+################################################################################
+"""
+Aggregate the stats over various independent samples for low-shot svm training.
+Stats computed: mean, max, min, std
+
+Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low
+shot samples.
+"""
+
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import argparse
+import logging
+import numpy as np
+import os
+import sys
+
+# create the logger
+FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
+logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+def save_stats(output_dir, stat, output):
+ out_file = os.path.join(output_dir, 'test_ap_{}.npy'.format(stat))
+ #logger.info('Saving {} to: {} {}'.format(stat, out_file, output.shape))
+ np.save(out_file, output)
+
+
+def aggregate_stats(opts):
+ k_values = [int(val) for val in opts.k_values.split(",")]
+ sample_inds = [int(val) for val in opts.sample_inds.split(",")]
+ #logger.info(
+ # 'Aggregating stats for k-values: {} and sample_inds: {}'.format(
+ # k_values, sample_inds))
+
+ output_mean, output_max, output_min, output_std = [], [], [], []
+ for k_idx in range(len(k_values)):
+ k_low = k_values[k_idx]
+ k_val_output = []
+ for inds in range(len(sample_inds)):
+ sample_idx = sample_inds[inds]
+ file_name = 'test_ap_sample{}_k{}.npy'.format(
+ sample_idx + 1, k_low)
+ filepath = os.path.join(opts.output_path, file_name)
+ if os.path.exists(filepath):
+ k_val_output.append(np.load(filepath, encoding='latin1'))
+ else:
+ logger.info('file does not exist: {}'.format(filepath))
+ k_val_output = np.concatenate(k_val_output, axis=0)
+ k_low_max = np.max(
+ k_val_output, axis=0).reshape(-1, k_val_output.shape[1])
+ k_low_min = np.min(
+ k_val_output, axis=0).reshape(-1, k_val_output.shape[1])
+ k_low_mean = np.mean(
+ k_val_output, axis=0).reshape(-1, k_val_output.shape[1])
+ k_low_std = np.std(
+ k_val_output, axis=0).reshape(-1, k_val_output.shape[1])
+ output_mean.append(k_low_mean)
+ output_min.append(k_low_min)
+ output_max.append(k_low_max)
+ output_std.append(k_low_std)
+
+ output_mean = np.concatenate(output_mean, axis=0)
+ output_min = np.concatenate(output_min, axis=0)
+ output_max = np.concatenate(output_max, axis=0)
+ output_std = np.concatenate(output_std, axis=0)
+
+ save_stats(opts.output_path, 'mean', output_mean)
+ save_stats(opts.output_path, 'min', output_min)
+ save_stats(opts.output_path, 'max', output_max)
+ save_stats(opts.output_path, 'std', output_std)
+
+ argmax_cls = np.argmax(output_mean, axis=1)
+ argmax_mean, argmax_min, argmax_max, argmax_std = [], [], [], []
+ for idx in range(len(argmax_cls)):
+ argmax_mean.append(100.0 * output_mean[idx, argmax_cls[idx]])
+ argmax_min.append(100.0 * output_min[idx, argmax_cls[idx]])
+ argmax_max.append(100.0 * output_max[idx, argmax_cls[idx]])
+ argmax_std.append(100.0 * output_std[idx, argmax_cls[idx]])
+ for idx in range(len(argmax_max)):
+ logger.info('mean/min/max/std: {} / {} / {} / {}'.format(
+ round(argmax_mean[idx], 2),
+ round(argmax_min[idx], 2),
+ round(argmax_max[idx], 2),
+ round(argmax_std[idx], 2),
+ ))
+ #logger.info('All done!!')
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Low shot SVM model test')
+ parser.add_argument(
+ '--output_path',
+ type=str,
+ default=None,
+ help="Numpy file containing test AP result files")
+ parser.add_argument(
+ '--k_values',
+ type=str,
+ default=None,
+ help="Low-shot k-values for svm testing. Comma separated")
+ parser.add_argument(
+ '--sample_inds',
+ type=str,
+ default=None,
+ help="sample_inds for which to test svm. Comma separated")
+ if len(sys.argv) == 1:
+ parser.print_help()
+ sys.exit(1)
+
+ opts = parser.parse_args()
+ #logger.info(opts)
+ aggregate_stats(opts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/benchmarks/svm_tools/eval_svm_full.sh b/benchmarks/svm_tools/eval_svm_full.sh
new file mode 100644
index 00000000..4fbbd26e
--- /dev/null
+++ b/benchmarks/svm_tools/eval_svm_full.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -x
+set -e
+
+WORK_DIR=$1
+FEAT_LIST=${2:-"feat5"} # "feat1 feat2 feat3 feat4 feat5"
+TRAIN_SVM_FLAG=true
+TEST_SVM_FLAG=true
+DATA="data/VOCdevkit/VOC2007/SVMLabels"
+
+# config svm
+costs="1.0,10.0,100.0"
+
+for feat in $FEAT_LIST; do
+ echo "For feature: $feat" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log
+ # train svm
+ if $TRAIN_SVM_FLAG; then
+ rm -rf $WORK_DIR/svm
+ mkdir -p $WORK_DIR/svm/voc07_${feat}
+ echo "training svm ..."
+ python benchmarks/svm_tools/train_svm_kfold_parallel.py \
+ --data_file $WORK_DIR/features/voc07_trainval_${feat}.npy \
+ --targets_data_file $DATA/train_labels.npy \
+ --costs_list $costs \
+ --output_path $WORK_DIR/svm/voc07_${feat}
+ fi
+
+ # test svm
+ if $TEST_SVM_FLAG; then
+ echo "testing svm ..."
+ python benchmarks/svm_tools/test_svm.py \
+ --data_file $WORK_DIR/features/voc07_test_${feat}.npy \
+ --json_targets $DATA/test_targets.json \
+ --targets_data_file $DATA/test_labels.npy \
+ --costs_list $costs \
+ --generate_json 1 \
+ --output_path $WORK_DIR/svm/voc07_${feat} 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log
+ fi
+
+done
diff --git a/benchmarks/svm_tools/eval_svm_lowshot.sh b/benchmarks/svm_tools/eval_svm_lowshot.sh
new file mode 100644
index 00000000..ae85b126
--- /dev/null
+++ b/benchmarks/svm_tools/eval_svm_lowshot.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+set -x
+set -e
+
+WORK_DIR=$1
+MODE="full"
+FEAT_LIST=${2:-"feat5"} # "feat1 feat2 feat3 feat4 feat5"
+TRAIN_SVM_LOWSHOT_FLAG=true
+TEST_SVM_LOWSHOT_FLAG=true
+AGGREGATE_FLAG=true
+DATA="data/VOCdevkit/VOC2007/SVMLabels"
+
+# config svm
+costs="1.0,10.0,100.0"
+if [ "$MODE" == "fast" ]; then
+ shots="96"
+else
+ shots="1 2 4 8 16 32 64 96"
+fi
+
+for feat in $FEAT_LIST; do
+ echo "For feature: $feat" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log
+ # train lowshot svm
+ if $TRAIN_SVM_LOWSHOT_FLAG; then
+ rm -rf $WORK_DIR/svm_lowshot
+ mkdir -p $WORK_DIR/svm_lowshot/voc07_${feat}
+ echo "training svm low-shot ..."
+ for s in {1..5}; do
+ for k in $shots; do
+ echo -e "\ts${s} k${k}"
+ python benchmarks/svm_tools/train_svm_low_shot.py \
+ --data_file $WORK_DIR/features/voc07_trainval_${feat}.npy \
+ --targets_data_file $DATA/low_shot/labels/train_targets_sample${s}_k${k}.npy \
+ --costs_list $costs \
+ --output_path $WORK_DIR/svm_lowshot/voc07_${feat}
+ done
+ done
+ fi
+
+ # test lowshot svm
+ if $TEST_SVM_LOWSHOT_FLAG; then
+ echo "testing svm low-shot ..."
+ python benchmarks/svm_tools/test_svm_low_shot.py \
+ --data_file $WORK_DIR/features/voc07_test_${feat}.npy \
+ --targets_data_file $DATA/test_labels.npy \
+ --json_targets $DATA/test_targets.json \
+ --generate_json 1 \
+ --costs_list $costs \
+ --output_path $WORK_DIR/svm_lowshot/voc07_${feat} \
+ --k_values "${shots// /,}" \
+ --sample_inds "0,1,2,3,4" \
+ --dataset "voc"
+ fi
+
+ # aggregate testing results
+ if $AGGREGATE_FLAG; then
+ echo "aggregating svm low-shot ..."
+ python benchmarks/svm_tools/aggregate_low_shot_svm_stats.py \
+ --output_path $WORK_DIR/svm_lowshot/voc07_${feat} \
+ --k_values "${shots// /,}" \
+ --sample_inds "0,1,2,3,4" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log
+ fi
+
+done
diff --git a/benchmarks/svm_tools/svm_helper.py b/benchmarks/svm_tools/svm_helper.py
new file mode 100644
index 00000000..9fe14470
--- /dev/null
+++ b/benchmarks/svm_tools/svm_helper.py
@@ -0,0 +1,171 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+################################################################################
+"""
+Helper module for svm training and testing.
+"""
+
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import logging
+import numpy as np
+import os
+import sys
+
+# create the logger
+FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
+logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+# Python 2 and python 3 have different floating point precision. The following
+# trick helps keep the backwards compatibility.
+def py2_py3_compatible_cost(cost):
+ return str(float("{:.17f}".format(cost)))
+
+
+def get_svm_train_output_files(cls, cost, output_path):
+ cls_cost = str(cls) + '_cost' + py2_py3_compatible_cost(cost)
+ out_file = os.path.join(output_path, 'cls' + cls_cost + '.pickle')
+ ap_matrix_out_file = os.path.join(output_path,
+ 'AP_cls' + cls_cost + '.npy')
+ return out_file, ap_matrix_out_file
+
+
+def parse_cost_list(costs):
+ costs_list = [float(cost) for cost in costs.split(",")]
+ start_num, end_num = 4, 20
+ for num in range(start_num, end_num):
+ costs_list.append(0.5**num)
+ return costs_list
+
+
+def normalize_features(features):
+ feats_norm = np.linalg.norm(features, axis=1)
+ features = features / (feats_norm + 1e-5)[:, np.newaxis]
+ return features
+
+
+def load_input_data(data_file, targets_file):
+ # load the features and the targets
+ #logger.info('loading features and targets...')
+ targets = np.load(targets_file, encoding='latin1')
+ features = np.array(np.load(data_file,
+ encoding='latin1')).astype(np.float64)
+ assert features.shape[0] == targets.shape[0], "Mismatched #images"
+ #logger.info('Loaded features: {} and targets: {}'.format(
+ # features.shape, targets.shape))
+ return features, targets
+
+
+def calculate_ap(rec, prec):
+ """
+ Computes the AP under the precision recall curve.
+ """
+ rec, prec = rec.reshape(rec.size, 1), prec.reshape(prec.size, 1)
+ z, o = np.zeros((1, 1)), np.ones((1, 1))
+ mrec, mpre = np.vstack((z, rec, o)), np.vstack((z, prec, z))
+ for i in range(len(mpre) - 2, -1, -1):
+ mpre[i] = max(mpre[i], mpre[i + 1])
+
+ indices = np.where(mrec[1:] != mrec[0:-1])[0] + 1
+ ap = 0
+ for i in indices:
+ ap = ap + (mrec[i] - mrec[i - 1]) * mpre[i]
+ return ap
+
+
+def get_precision_recall(targets, preds):
+ """
+ [P, R, score, ap] = get_precision_recall(targets, preds)
+ Input :
+ targets : number of occurrences of this class in the ith image
+ preds : score for this image
+ Output :
+ P, R : precision and recall
+ score : score which corresponds to the particular precision and recall
+ ap : average precision
+ """
+ # binarize targets
+ targets = np.array(targets > 0, dtype=np.float32)
+ tog = np.hstack((targets[:, np.newaxis].astype(np.float64),
+ preds[:, np.newaxis].astype(np.float64)))
+ ind = np.argsort(preds)
+ ind = ind[::-1]
+ score = np.array([tog[i, 1] for i in ind])
+ sortcounts = np.array([tog[i, 0] for i in ind])
+
+ tp = sortcounts
+ fp = sortcounts.copy()
+ for i in range(sortcounts.shape[0]):
+ if sortcounts[i] >= 1:
+ fp[i] = 0.
+ elif sortcounts[i] < 1:
+ fp[i] = 1.
+ P = np.cumsum(tp) / (np.cumsum(tp) + np.cumsum(fp))
+ numinst = np.sum(targets)
+ R = np.cumsum(tp) / numinst
+ ap = calculate_ap(R, P)
+ return P, R, score, ap
+
+
+def get_low_shot_output_file(opts, cls, cost, suffix):
+ # in case of low-shot training, we train for 5 independent samples
+ # (sample{}) and vary low-shot amount (k{}). The input data should have
+ # sample{}_k{} information that we extract in suffix below.
+ # logger.info('Suffix: {}'.format(suffix))
+ cls_cost = str(cls) + '_cost' + py2_py3_compatible_cost(cost)
+ out_file = os.path.join(opts.output_path,
+ 'cls' + cls_cost + '_' + suffix + '.pickle')
+ return out_file
+
+
+def get_low_shot_svm_classes(targets, dataset):
+ # classes for which SVM testing should be done
+ num_classes, cls_list = None, None
+ if dataset == 'voc':
+ num_classes = targets.shape[1]
+ cls_list = range(num_classes)
+ elif dataset == 'places':
+ # each image in places has a target cls [0, .... ,204]
+ num_classes = len(set(targets[:, 0].tolist()))
+ cls_list = list(set(targets[:, 0].tolist()))
+ else:
+ logger.info('Dataset not recognized. Abort!')
+ #logger.info('Testing SVM for classes: {}'.format(cls_list))
+ #logger.info('Num classes: {}'.format(num_classes))
+ return num_classes, cls_list
+
+
+def get_cls_feats_labels(cls, features, targets, dataset):
+ out_feats, out_cls_labels = None, None
+ if dataset == 'voc':
+ cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True)
+ # find the indices for positive/negative imgs. Remove the ignore label.
+ out_data_inds = (targets[:, cls] != -1)
+ out_feats = features[out_data_inds]
+ out_cls_labels = cls_labels[out_data_inds]
+ # label 0 = not present, set it to -1 as svm train target.
+ # Make the svm train target labels as -1, 1.
+ out_cls_labels[np.where(out_cls_labels == 0)] = -1
+ elif dataset == 'places':
+ out_feats = features
+ out_cls_labels = targets.astype(dtype=np.int32, copy=True)
+ # for the given class, get the relevant positive/negative images and
+ # make the label 1, -1
+ cls_inds = np.where(targets[:, 0] == cls)
+ non_cls_inds = (targets[:, 0] != cls)
+ out_cls_labels[non_cls_inds] = -1
+ out_cls_labels[cls_inds] = 1
+ # finally reshape into the format taken by sklearn svm package.
+ out_cls_labels = out_cls_labels.reshape(-1)
+ else:
+ raise Exception('args.dataset not recognized')
+ return out_feats, out_cls_labels
diff --git a/benchmarks/svm_tools/test_svm.py b/benchmarks/svm_tools/test_svm.py
new file mode 100644
index 00000000..49d34e20
--- /dev/null
+++ b/benchmarks/svm_tools/test_svm.py
@@ -0,0 +1,174 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+################################################################################
+"""
+SVM test for image classification.
+
+Relevant transfer tasks: Image Classification VOC07 and COCO2014.
+"""
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import argparse
+import json
+import logging
+import numpy as np
+import os
+import pickle
+import six
+import sys
+
+import svm_helper
+
+# create the logger
+FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
+logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+def get_chosen_costs(opts, num_classes):
+ costs_list = svm_helper.parse_cost_list(opts.costs_list)
+ train_ap_matrix = np.zeros((num_classes, len(costs_list)))
+ for cls in range(num_classes):
+ for cost_idx in range(len(costs_list)):
+ cost = costs_list[cost_idx]
+ _, ap_out_file = svm_helper.get_svm_train_output_files(
+ cls, cost, opts.output_path)
+ train_ap_matrix[cls][cost_idx] = float(
+ np.load(ap_out_file, encoding='latin1')[0])
+ argmax_cls = np.argmax(train_ap_matrix, axis=1)
+ chosen_cost = [costs_list[idx] for idx in argmax_cls]
+ #logger.info('chosen_cost: {}'.format(chosen_cost))
+ np.save(
+ os.path.join(opts.output_path, 'crossval_ap.npy'),
+ np.array(train_ap_matrix))
+ np.save(
+ os.path.join(opts.output_path, 'chosen_cost.npy'),
+ np.array(chosen_cost))
+ #logger.info('saved crossval_ap AP to file: {}'.format(
+ # os.path.join(opts.output_path, 'crossval_ap.npy')))
+ #logger.info('saved chosen costs to file: {}'.format(
+ # os.path.join(opts.output_path, 'chosen_cost.npy')))
+ return np.array(chosen_cost)
+
+
+def load_json(file_path):
+ assert os.path.exists(file_path), "{} does not exist".format(file_path)
+ with open(file_path, 'r') as fp:
+ data = json.load(fp)
+ img_ids = list(data.keys())
+ cls_names = list(data[img_ids[0]].keys())
+ return img_ids, cls_names
+
+
+def test_svm(opts):
+ assert os.path.exists(opts.data_file), "Data file not found. Abort!"
+ json_predictions, img_ids, cls_names = {}, [], []
+ if opts.generate_json:
+ img_ids, cls_names = load_json(opts.json_targets)
+
+ features, targets = svm_helper.load_input_data(opts.data_file,
+ opts.targets_data_file)
+ # normalize the features: N x 9216 (example shape)
+ features = svm_helper.normalize_features(features)
+ num_classes = targets.shape[1]
+ #logger.info('Num classes: {}'.format(num_classes))
+
+ # get the chosen cost that maximizes the cross-validation AP per class
+ costs_list = get_chosen_costs(opts, num_classes)
+
+ ap_matrix = np.zeros((num_classes, 1))
+ for cls in range(num_classes):
+ cost = costs_list[cls]
+ #logger.info('Testing model for cls: {} cost: {}'.format(cls, cost))
+ model_file = os.path.join(
+ opts.output_path,
+ 'cls' + str(cls) + '_cost' + str(cost) + '.pickle')
+ with open(model_file, 'rb') as fopen:
+ if six.PY2:
+ model = pickle.load(fopen)
+ else:
+ model = pickle.load(fopen, encoding='latin1')
+ prediction = model.decision_function(features)
+ if opts.generate_json:
+ cls_name = cls_names[cls]
+ for idx in range(len(prediction)):
+ img_id = img_ids[idx]
+ if img_id in json_predictions:
+ json_predictions[img_id][cls_name] = prediction[idx]
+ else:
+ out_lbl = {}
+ out_lbl[cls_name] = prediction[idx]
+ json_predictions[img_id] = out_lbl
+
+ cls_labels = targets[:, cls]
+ # meaning of labels in VOC/COCO original loaded target files:
+ # label 0 = not present, set it to -1 as svm train target
+ # label 1 = present. Make the svm train target labels as -1, 1.
+ evaluate_data_inds = (targets[:, cls] != -1)
+ eval_preds = prediction[evaluate_data_inds]
+ eval_cls_labels = cls_labels[evaluate_data_inds]
+ eval_cls_labels[np.where(eval_cls_labels == 0)] = -1
+ P, R, score, ap = svm_helper.get_precision_recall(
+ eval_cls_labels, eval_preds)
+ ap_matrix[cls][0] = ap
+ if opts.generate_json:
+ output_file = os.path.join(opts.output_path, 'json_preds.json')
+ with open(output_file, 'w') as fp:
+ json.dump(json_predictions, fp)
+ #logger.info('Saved json predictions to: {}'.format(output_file))
+ logger.info('Mean AP: {}'.format(np.mean(ap_matrix, axis=0)))
+ np.save(os.path.join(opts.output_path, 'test_ap.npy'), np.array(ap_matrix))
+ #logger.info('saved test AP to file: {}'.format(
+ # os.path.join(opts.output_path, 'test_ap.npy')))
+
+
+def main():
+ parser = argparse.ArgumentParser(description='SVM model test')
+ parser.add_argument(
+ '--data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image features and labels")
+ parser.add_argument(
+ '--json_targets',
+ type=str,
+ default=None,
+ help="Json file containing json targets")
+ parser.add_argument(
+ '--targets_data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image labels")
+ parser.add_argument(
+ '--costs_list',
+ type=str,
+ default="0.01,0.1",
+ help="comma separated string containing list of costs")
+ parser.add_argument(
+ '--output_path',
+ type=str,
+ default=None,
+ help="path where trained SVM models are saved")
+ parser.add_argument(
+ '--generate_json',
+ type=int,
+ default=0,
+ help="Whether to generate json files for output")
+ if len(sys.argv) == 1:
+ parser.print_help()
+ sys.exit(1)
+
+ opts = parser.parse_args()
+ #logger.info(opts)
+ test_svm(opts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/benchmarks/svm_tools/test_svm_low_shot.py b/benchmarks/svm_tools/test_svm_low_shot.py
new file mode 100644
index 00000000..75ba81c9
--- /dev/null
+++ b/benchmarks/svm_tools/test_svm_low_shot.py
@@ -0,0 +1,212 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+################################################################################
+"""
+SVM test for low shot image classification.
+
+Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low
+shot samples.
+"""
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import argparse
+import json
+import logging
+import numpy as np
+import os
+import pickle
+import six
+import sys
+
+import svm_helper
+
+# create the logger
+FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
+logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+def load_json(file_path):
+ assert os.path.exists(file_path), "{} does not exist".format(file_path)
+ with open(file_path, 'r') as fp:
+ data = json.load(fp)
+ img_ids = list(data.keys())
+ cls_names = list(data[img_ids[0]].keys())
+ return img_ids, cls_names
+
+
+def save_json_predictions(opts, cost, sample_idx, k_low, features, cls_list,
+ cls_names, img_ids):
+ num_classes = len(cls_list)
+ json_predictions = {}
+ for cls in range(num_classes):
+ suffix = 'sample{}_k{}'.format(sample_idx + 1, k_low)
+ model_file = svm_helper.get_low_shot_output_file(
+ opts, cls, cost, suffix)
+ with open(model_file, 'rb') as fopen:
+ if six.PY2:
+ model = pickle.load(fopen)
+ else:
+ model = pickle.load(fopen, encoding='latin1')
+ prediction = model.decision_function(features)
+ cls_name = cls_names[cls]
+ for idx in range(len(prediction)):
+ img_id = img_ids[idx]
+ if img_id in json_predictions:
+ json_predictions[img_id][cls_name] = prediction[idx]
+ else:
+ out_lbl = {}
+ out_lbl[cls_name] = prediction[idx]
+ json_predictions[img_id] = out_lbl
+
+ output_file = os.path.join(opts.output_path,
+ 'test_{}_json_preds.json'.format(suffix))
+ with open(output_file, 'w') as fp:
+ json.dump(json_predictions, fp)
+ #logger.info('Saved json predictions to: {}'.format(output_file))
+
+
+def test_svm_low_shot(opts):
+ k_values = [int(val) for val in opts.k_values.split(",")]
+ sample_inds = [int(val) for val in opts.sample_inds.split(",")]
+ #logger.info('Testing svm for k-values: {} and sample_inds: {}'.format(
+ # k_values, sample_inds))
+
+ img_ids, cls_names = [], []
+ if opts.generate_json:
+ img_ids, cls_names = load_json(opts.json_targets)
+
+ assert os.path.exists(opts.data_file), "Data file not found. Abort!"
+ # we test the svms on the full test set. Given the test features and the
+ # targets, we test it for various k-values (low-shot), cost values and
+ # 5 independent samples.
+ features, targets = svm_helper.load_input_data(opts.data_file,
+ opts.targets_data_file)
+ # normalize the features: N x 9216 (example shape)
+ features = svm_helper.normalize_features(features)
+
+ # parse the cost values for training the SVM on
+ costs_list = svm_helper.parse_cost_list(opts.costs_list)
+ #logger.info('Testing SVM for costs: {}'.format(costs_list))
+
+ # classes for which SVM testing should be done
+ num_classes, cls_list = svm_helper.get_low_shot_svm_classes(
+ targets, opts.dataset)
+
+ # create the output for per sample, per k-value and per cost.
+ sample_ap_matrices = []
+ for _ in range(len(sample_inds)):
+ ap_matrix = np.zeros((len(k_values), len(costs_list)))
+ sample_ap_matrices.append(ap_matrix)
+
+ # the test goes like this: For a given sample, for a given k-value and a
+ # given cost value, we evaluate the trained svm model for all classes.
+ # After computing over all classes, we get the mean AP value over all
+ # classes. We hence end up with: output = [sample][k_value][cost]
+ for inds in range(len(sample_inds)):
+ sample_idx = sample_inds[inds]
+ for k_idx in range(len(k_values)):
+ k_low = k_values[k_idx]
+ suffix = 'sample{}_k{}'.format(sample_idx + 1, k_low)
+ for cost_idx in range(len(costs_list)):
+ cost = costs_list[cost_idx]
+ local_cost_ap = np.zeros((num_classes, 1))
+ for cls in cls_list:
+ #logger.info(
+ # 'Test sample/k_value/cost/cls: {}/{}/{}/{}'.format(
+ # sample_idx + 1, k_low, cost, cls))
+ model_file = svm_helper.get_low_shot_output_file(
+ opts, cls, cost, suffix)
+ with open(model_file, 'rb') as fopen:
+ if six.PY2:
+ model = pickle.load(fopen)
+ else:
+ model = pickle.load(fopen, encoding='latin1')
+ prediction = model.decision_function(features)
+ eval_preds, eval_cls_labels = svm_helper.get_cls_feats_labels(
+ cls, prediction, targets, opts.dataset)
+ P, R, score, ap = svm_helper.get_precision_recall(
+ eval_cls_labels, eval_preds)
+ local_cost_ap[cls][0] = ap
+ mean_cost_ap = np.mean(local_cost_ap, axis=0)
+ sample_ap_matrices[inds][k_idx][cost_idx] = mean_cost_ap
+ out_k_sample_file = os.path.join(
+ opts.output_path,
+ 'test_ap_sample{}_k{}.npy'.format(sample_idx + 1, k_low))
+ save_data = sample_ap_matrices[inds][k_idx]
+ save_data = save_data.reshape((1, -1))
+ np.save(out_k_sample_file, save_data)
+ #logger.info('Saved sample test k_idx AP to file: {} {}'.format(
+ # out_k_sample_file, save_data.shape))
+ if opts.generate_json:
+ argmax_cls = np.argmax(save_data, axis=1)
+ chosen_cost = costs_list[argmax_cls[0]]
+ #logger.info('chosen cost: {}'.format(chosen_cost))
+ save_json_predictions(opts, chosen_cost, sample_idx, k_low,
+ features, cls_list, cls_names, img_ids)
+ #logger.info('All done!!')
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Low shot SVM model test')
+ parser.add_argument(
+ '--data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image features and labels")
+ parser.add_argument(
+ '--targets_data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image labels")
+ parser.add_argument(
+ '--json_targets',
+ type=str,
+ default=None,
+ help="Numpy file containing json targets")
+ parser.add_argument(
+ '--generate_json',
+ type=int,
+ default=0,
+ help="Whether to generate json files for output")
+ parser.add_argument(
+ '--costs_list',
+ type=str,
+ default=
+ "0.0000001,0.000001,0.00001,0.0001,0.001,0.01,0.1,1.0,10.0,100.0",
+ help="comma separated string containing list of costs")
+ parser.add_argument(
+ '--output_path',
+ type=str,
+ default=None,
+ help="path where trained SVM models are saved")
+ parser.add_argument(
+ '--k_values',
+ type=str,
+ default="1,2,4,8,16,32,64,96",
+ help="Low-shot k-values for svm testing. Comma separated")
+ parser.add_argument(
+ '--sample_inds',
+ type=str,
+ default="0,1,2,3,4",
+ help="sample_inds for which to test svm. Comma separated")
+ parser.add_argument(
+ '--dataset', type=str, default="voc", help='voc | places')
+ if len(sys.argv) == 1:
+ parser.print_help()
+ sys.exit(1)
+
+ opts = parser.parse_args()
+ #logger.info(opts)
+ test_svm_low_shot(opts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/benchmarks/svm_tools/train_svm_kfold.py b/benchmarks/svm_tools/train_svm_kfold.py
new file mode 100644
index 00000000..b3a7f1d2
--- /dev/null
+++ b/benchmarks/svm_tools/train_svm_kfold.py
@@ -0,0 +1,162 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+################################################################################
+"""
+SVM training using 3-fold cross-validation.
+
+Relevant transfer tasks: Image Classification VOC07 and COCO2014.
+"""
+
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import argparse
+import logging
+import numpy as np
+import os
+import pickle
+import sys
+from tqdm import tqdm
+from sklearn.svm import LinearSVC
+from sklearn.model_selection import cross_val_score
+
+import svm_helper
+
+import time
+
+# create the logger
+FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
+logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+def train_svm(opts):
+ assert os.path.exists(opts.data_file), "Data file not found. Abort!"
+ if not os.path.exists(opts.output_path):
+ os.makedirs(opts.output_path)
+
+ features, targets = svm_helper.load_input_data(opts.data_file,
+ opts.targets_data_file)
+ # normalize the features: N x 9216 (example shape)
+ features = svm_helper.normalize_features(features)
+
+ # parse the cost values for training the SVM on
+ costs_list = svm_helper.parse_cost_list(opts.costs_list)
+ #logger.info('Training SVM for costs: {}'.format(costs_list))
+
+ # classes for which SVM training should be done
+ if opts.cls_list:
+ cls_list = [int(cls) for cls in opts.cls_list.split(",")]
+ else:
+ num_classes = targets.shape[1]
+ cls_list = range(num_classes)
+ #logger.info('Training SVM for classes: {}'.format(cls_list))
+
+ for cls_idx in tqdm(range(len(cls_list))):
+ cls = cls_list[cls_idx]
+ for cost_idx in range(len(costs_list)):
+ start = time.time()
+ cost = costs_list[cost_idx]
+ out_file, ap_out_file = svm_helper.get_svm_train_output_files(
+ cls, cost, opts.output_path)
+ if os.path.exists(out_file) and os.path.exists(ap_out_file):
+ logger.info('SVM model exists: {}'.format(out_file))
+ logger.info('AP file exists: {}'.format(ap_out_file))
+ else:
+ #logger.info('Training model with the cost: {}'.format(cost))
+ clf = LinearSVC(
+ C=cost,
+ class_weight={
+ 1: 2,
+ -1: 1
+ },
+ intercept_scaling=1.0,
+ verbose=0,
+ penalty='l2',
+ loss='squared_hinge',
+ tol=0.0001,
+ dual=True,
+ max_iter=2000,
+ )
+ cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True)
+ # meaning of labels in VOC/COCO original loaded target files:
+ # label 0 = not present, set it to -1 as svm train target
+ # label 1 = present. Make the svm train target labels as -1, 1.
+ cls_labels[np.where(cls_labels == 0)] = -1
+ #num_positives = len(np.where(cls_labels == 1)[0])
+ #num_negatives = len(cls_labels) - num_positives
+
+ #logger.info('cls: {} has +ve: {} -ve: {} ratio: {}'.format(
+ # cls, num_positives, num_negatives,
+ # float(num_positives) / num_negatives)
+ #)
+ #logger.info('features: {} cls_labels: {}'.format(
+ # features.shape, cls_labels.shape))
+ ap_scores = cross_val_score(
+ clf,
+ features,
+ cls_labels,
+ cv=3,
+ scoring='average_precision')
+ clf.fit(features, cls_labels)
+
+ #logger.info('cls: {} cost: {} AP: {} mean:{}'.format(
+ # cls, cost, ap_scores, ap_scores.mean()))
+ #logger.info('Saving cls cost AP to: {}'.format(ap_out_file))
+ np.save(ap_out_file, np.array([ap_scores.mean()]))
+ #logger.info('Saving SVM model to: {}'.format(out_file))
+ with open(out_file, 'wb') as fwrite:
+ pickle.dump(clf, fwrite)
+ print("time: {:.4g} s".format(time.time() - start))
+
+
+def main():
+ parser = argparse.ArgumentParser(description='SVM model training')
+ parser.add_argument(
+ '--data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image features")
+ parser.add_argument(
+ '--targets_data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image labels")
+ parser.add_argument(
+ '--output_path',
+ type=str,
+ default=None,
+ help="path where to save the trained SVM models")
+ parser.add_argument(
+ '--costs_list',
+ type=str,
+ default="0.01,0.1",
+ help="comma separated string containing list of costs")
+ parser.add_argument(
+ '--random_seed',
+ type=int,
+ default=100,
+ help="random seed for SVM classifier training")
+
+ parser.add_argument(
+ '--cls_list',
+ type=str,
+ default=None,
+ help="comma separated string list of classes to train")
+ if len(sys.argv) == 1:
+ parser.print_help()
+ sys.exit(1)
+
+ opts = parser.parse_args()
+ #logger.info(opts)
+ train_svm(opts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/benchmarks/svm_tools/train_svm_kfold_parallel.py b/benchmarks/svm_tools/train_svm_kfold_parallel.py
new file mode 100644
index 00000000..1ffbcb8b
--- /dev/null
+++ b/benchmarks/svm_tools/train_svm_kfold_parallel.py
@@ -0,0 +1,151 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+################################################################################
+"""
+SVM training using 3-fold cross-validation.
+
+Relevant transfer tasks: Image Classification VOC07 and COCO2014.
+"""
+
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import multiprocessing as mp
+import tqdm
+import argparse
+import logging
+import numpy as np
+import os
+import pickle
+import sys
+from sklearn.svm import LinearSVC
+from sklearn.model_selection import cross_val_score
+
+import svm_helper
+
+import pdb
+
+
+def task(cls, cost, opts, features, targets):
+ out_file, ap_out_file = svm_helper.get_svm_train_output_files(
+ cls, cost, opts.output_path)
+ if not (os.path.exists(out_file) and os.path.exists(ap_out_file)):
+ clf = LinearSVC(
+ C=cost,
+ class_weight={
+ 1: 2,
+ -1: 1
+ },
+ intercept_scaling=1.0,
+ verbose=0,
+ penalty='l2',
+ loss='squared_hinge',
+ tol=0.0001,
+ dual=True,
+ max_iter=2000,
+ )
+ cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True)
+ cls_labels[np.where(cls_labels == 0)] = -1
+ ap_scores = cross_val_score(
+ clf, features, cls_labels, cv=3, scoring='average_precision')
+ clf.fit(features, cls_labels)
+ np.save(ap_out_file, np.array([ap_scores.mean()]))
+ with open(out_file, 'wb') as fwrite:
+ pickle.dump(clf, fwrite)
+ return 0
+
+
+def mp_helper(args):
+ return task(*args)
+
+
+def train_svm(opts):
+ assert os.path.exists(opts.data_file), "Data file not found. Abort!"
+ if not os.path.exists(opts.output_path):
+ os.makedirs(opts.output_path)
+
+ features, targets = svm_helper.load_input_data(opts.data_file,
+ opts.targets_data_file)
+ # normalize the features: N x 9216 (example shape)
+ features = svm_helper.normalize_features(features)
+
+ # parse the cost values for training the SVM on
+ costs_list = svm_helper.parse_cost_list(opts.costs_list)
+
+ # classes for which SVM training should be done
+ if opts.cls_list:
+ cls_list = [int(cls) for cls in opts.cls_list.split(",")]
+ else:
+ num_classes = targets.shape[1]
+ cls_list = range(num_classes)
+
+ num_task = len(cls_list) * len(costs_list)
+ args_cls = []
+ args_cost = []
+ for cls in cls_list:
+ for cost in costs_list:
+ args_cls.append(cls)
+ args_cost.append(cost)
+ args_opts = [opts] * num_task
+ args_features = [features] * num_task
+ args_targets = [targets] * num_task
+
+ pool = mp.Pool(mp.cpu_count())
+ for _ in tqdm.tqdm(
+ pool.imap_unordered(
+ mp_helper,
+ zip(args_cls, args_cost, args_opts, args_features,
+ args_targets)),
+ total=num_task):
+ pass
+
+
+def main():
+ parser = argparse.ArgumentParser(description='SVM model training')
+ parser.add_argument(
+ '--data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image features")
+ parser.add_argument(
+ '--targets_data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image labels")
+ parser.add_argument(
+ '--output_path',
+ type=str,
+ default=None,
+ help="path where to save the trained SVM models")
+ parser.add_argument(
+ '--costs_list',
+ type=str,
+ default="0.01,0.1",
+ help="comma separated string containing list of costs")
+ parser.add_argument(
+ '--random_seed',
+ type=int,
+ default=100,
+ help="random seed for SVM classifier training")
+
+ parser.add_argument(
+ '--cls_list',
+ type=str,
+ default=None,
+ help="comma separated string list of classes to train")
+ if len(sys.argv) == 1:
+ parser.print_help()
+ sys.exit(1)
+
+ opts = parser.parse_args()
+ train_svm(opts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/benchmarks/svm_tools/train_svm_low_shot.py b/benchmarks/svm_tools/train_svm_low_shot.py
new file mode 100644
index 00000000..b5a0fbb2
--- /dev/null
+++ b/benchmarks/svm_tools/train_svm_low_shot.py
@@ -0,0 +1,144 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+################################################################################
+"""
+Low Shot SVM training.
+
+Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low
+shot samples.
+"""
+
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import argparse
+import logging
+import numpy as np
+import os
+import pickle
+import sys
+from sklearn.svm import LinearSVC
+from tqdm import tqdm
+
+import svm_helper
+
+import time
+
+# create the logger
+FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s'
+logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
+logger = logging.getLogger(__name__)
+
+
+def train_svm_low_shot(opts):
+ assert os.path.exists(opts.data_file), "Data file not found. Abort!"
+ if not os.path.exists(opts.output_path):
+ os.makedirs(opts.output_path)
+
+ features, targets = svm_helper.load_input_data(opts.data_file,
+ opts.targets_data_file)
+ # normalize the features: N x 9216 (example shape)
+ features = svm_helper.normalize_features(features)
+
+ # parse the cost values for training the SVM on
+ costs_list = svm_helper.parse_cost_list(opts.costs_list)
+ #logger.info('Training SVM for costs: {}'.format(costs_list))
+
+ # classes for which SVM testing should be done
+ num_classes, cls_list = svm_helper.get_low_shot_svm_classes(
+ targets, opts.dataset)
+
+ for cls in tqdm(cls_list):
+ for cost_idx in range(len(costs_list)):
+ start = time.time()
+ cost = costs_list[cost_idx]
+ suffix = '_'.join(
+ opts.targets_data_file.split('/')[-1].split('.')[0].split('_')
+ [-2:])
+ out_file = svm_helper.get_low_shot_output_file(
+ opts, cls, cost, suffix)
+ if os.path.exists(out_file):
+ logger.info('SVM model exists: {}'.format(out_file))
+ else:
+ #logger.info('SVM model not found: {}'.format(out_file))
+ #logger.info('Training model with the cost: {}'.format(cost))
+ clf = LinearSVC(
+ C=cost,
+ class_weight={
+ 1: 2,
+ -1: 1
+ },
+ intercept_scaling=1.0,
+ verbose=0,
+ penalty='l2',
+ loss='squared_hinge',
+ tol=0.0001,
+ dual=True,
+ max_iter=2000,
+ )
+ train_feats, train_cls_labels = svm_helper.get_cls_feats_labels(
+ cls, features, targets, opts.dataset)
+ #num_positives = len(np.where(train_cls_labels == 1)[0])
+ #num_negatives = len(np.where(train_cls_labels == -1)[0])
+
+ #logger.info('cls: {} has +ve: {} -ve: {} ratio: {}'.format(
+ # cls, num_positives, num_negatives,
+ # float(num_positives) / num_negatives)
+ #)
+ #logger.info('features: {} cls_labels: {}'.format(
+ # train_feats.shape, train_cls_labels.shape))
+ clf.fit(train_feats, train_cls_labels)
+ #logger.info('Saving SVM model to: {}'.format(out_file))
+ with open(out_file, 'wb') as fwrite:
+ pickle.dump(clf, fwrite)
+ #print("time: {:.4g} s".format(time.time() - start))
+ #logger.info('All done!')
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Low-shot SVM model training')
+ parser.add_argument(
+ '--data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image features")
+ parser.add_argument(
+ '--targets_data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image labels")
+ parser.add_argument(
+ '--costs_list',
+ type=str,
+ default="0.01,0.1",
+ help="comma separated string containing list of costs")
+ parser.add_argument(
+ '--output_path',
+ type=str,
+ default=None,
+ help="path where to save the trained SVM models")
+ parser.add_argument(
+ '--random_seed',
+ type=int,
+ default=100,
+ help="random seed for SVM classifier training")
+ parser.add_argument(
+ '--dataset', type=str, default="voc", help='voc | places')
+ if len(sys.argv) == 1:
+ parser.print_help()
+ sys.exit(1)
+
+ opts = parser.parse_args()
+
+ #logger.info(opts)
+ train_svm_low_shot(opts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/benchmarks/svm_tools/train_svm_low_shot_parallel.py b/benchmarks/svm_tools/train_svm_low_shot_parallel.py
new file mode 100644
index 00000000..f3a0843d
--- /dev/null
+++ b/benchmarks/svm_tools/train_svm_low_shot_parallel.py
@@ -0,0 +1,145 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+#
+################################################################################
+"""
+Low Shot SVM training.
+
+Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low
+shot samples.
+"""
+
+from __future__ import division
+from __future__ import absolute_import
+from __future__ import unicode_literals
+from __future__ import print_function
+
+import multiprocessing as mp
+import tqdm
+import argparse
+import logging
+import numpy as np
+import os
+import pickle
+import sys
+from sklearn.svm import LinearSVC
+
+import svm_helper
+
+import pdb
+
+
+def task(cls, cost, opts, features, targets):
+ suffix = '_'.join(
+ opts.targets_data_file.split('/')[-1].split('.')[0].split('_')[-2:])
+ out_file = svm_helper.get_low_shot_output_file(opts, cls, cost, suffix)
+ if not os.path.exists(out_file):
+ clf = LinearSVC(
+ C=cost,
+ class_weight={
+ 1: 2,
+ -1: 1
+ },
+ intercept_scaling=1.0,
+ verbose=0,
+ penalty='l2',
+ loss='squared_hinge',
+ tol=0.0001,
+ dual=True,
+ max_iter=2000,
+ )
+ train_feats, train_cls_labels = svm_helper.get_cls_feats_labels(
+ cls, features, targets, opts.dataset)
+ clf.fit(train_feats, train_cls_labels)
+ #cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True)
+ #cls_labels[np.where(cls_labels == 0)] = -1
+ #clf.fit(features, cls_labels)
+ with open(out_file, 'wb') as fwrite:
+ pickle.dump(clf, fwrite)
+ return 0
+
+
+def mp_helper(args):
+ return task(*args)
+
+
+def train_svm_low_shot(opts):
+ assert os.path.exists(opts.data_file), "Data file not found. Abort!"
+ if not os.path.exists(opts.output_path):
+ os.makedirs(opts.output_path)
+
+ features, targets = svm_helper.load_input_data(opts.data_file,
+ opts.targets_data_file)
+ # normalize the features: N x 9216 (example shape)
+ features = svm_helper.normalize_features(features)
+
+ # parse the cost values for training the SVM on
+ costs_list = svm_helper.parse_cost_list(opts.costs_list)
+
+ # classes for which SVM testing should be done
+ num_classes, cls_list = svm_helper.get_low_shot_svm_classes(
+ targets, opts.dataset)
+
+ num_task = len(cls_list) * len(costs_list)
+ args_cls = []
+ args_cost = []
+ for cls in cls_list:
+ for cost in costs_list:
+ args_cls.append(cls)
+ args_cost.append(cost)
+ args_opts = [opts] * num_task
+ args_features = [features] * num_task
+ args_targets = [targets] * num_task
+
+ pool = mp.Pool(mp.cpu_count())
+ for _ in tqdm.tqdm(
+ pool.imap_unordered(
+ mp_helper,
+ zip(args_cls, args_cost, args_opts, args_features,
+ args_targets)),
+ total=num_task):
+ pass
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Low-shot SVM model training')
+ parser.add_argument(
+ '--data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image features")
+ parser.add_argument(
+ '--targets_data_file',
+ type=str,
+ default=None,
+ help="Numpy file containing image labels")
+ parser.add_argument(
+ '--costs_list',
+ type=str,
+ default="0.01,0.1",
+ help="comma separated string containing list of costs")
+ parser.add_argument(
+ '--output_path',
+ type=str,
+ default=None,
+ help="path where to save the trained SVM models")
+ parser.add_argument(
+ '--random_seed',
+ type=int,
+ default=100,
+ help="random seed for SVM classifier training")
+ parser.add_argument(
+ '--dataset', type=str, default="voc", help='voc | places')
+ if len(sys.argv) == 1:
+ parser.print_help()
+ sys.exit(1)
+
+ opts = parser.parse_args()
+ train_svm_low_shot(opts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/configs/base.py b/configs/base.py
new file mode 100644
index 00000000..ab9923a4
--- /dev/null
+++ b/configs/base.py
@@ -0,0 +1,19 @@
+train_cfg = {}
+test_cfg = {}
+optimizer_config = dict() # grad_clip, coalesce, bucket_size_mb
+# yapf:disable
+log_config = dict(
+ interval=50,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ dict(type='TensorboardLoggerHook')
+ ])
+# yapf:enable
+# runtime settings
+dist_params = dict(backend='nccl')
+cudnn_benchmark = True
+log_level = 'INFO'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
+prefetch = False
\ No newline at end of file
diff --git a/configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py b/configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py
new file mode 100644
index 00000000..9e298c1a
--- /dev/null
+++ b/configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py
@@ -0,0 +1,75 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict( # mmclassification
+ type='ResNet_CIFAR',
+ depth=18,
+ num_stages=4,
+ out_indices=(3,),
+ style='pytorch',
+ frozen_stages=4,
+ ),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=512,
+ num_classes=10)) # cifar-10
+# dataset settings
+data_source_cfg = dict(type='Cifar10', root='./data/cifar10/')
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10
+
+train_pipeline = [
+ dict(type='RandomCrop', size=32, padding=4),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=128, # 1 GPU
+ workers_per_gpu=6,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(split='train', **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline))
+
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ # initial=False,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # imagenet MoCo version
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.) # imagenet
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=100)
+# runtime settings
+total_epochs = 100
+
+# * 1218: CIFAR-10 linear evaluation, size=32, bs128
+# Test: CUDA_VISIBLE_DEVICES=5 PORT=29471 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py ./work_dirs/my_pretrains/
diff --git a/configs/benchmarks/linear_classification/cifar10/r18_rep_cifar10.py b/configs/benchmarks/linear_classification/cifar10/r18_rep_cifar10.py
new file mode 100644
index 00000000..f2ce9ad6
--- /dev/null
+++ b/configs/benchmarks/linear_classification/cifar10/r18_rep_cifar10.py
@@ -0,0 +1,49 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation',
+ pretrained=None,
+ backbone=dict( # mmclassification
+ type='ResNet_CIFAR',
+ depth=18,
+ num_stages=4,
+ out_indices=(3,),
+ style='pytorch'),
+ neck=dict(type='AvgPoolNeck'),
+)
+# dataset settings
+data_source_cfg = dict(type='Cifar10', root='./data/cifar10/')
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10
+
+test_pipeline = [
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256,
+ workers_per_gpu=8,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline),
+)
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=8,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005)
+# learning policy
+lr_config = dict(policy='step', step=[150, 250])
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 350
+
diff --git a/configs/benchmarks/linear_classification/cifar10/r50_last_1gpu_cifar10_from_stl10_lr01.py b/configs/benchmarks/linear_classification/cifar10/r50_last_1gpu_cifar10_from_stl10_lr01.py
new file mode 100644
index 00000000..6bc45ea5
--- /dev/null
+++ b/configs/benchmarks/linear_classification/cifar10/r50_last_1gpu_cifar10_from_stl10_lr01.py
@@ -0,0 +1,78 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=10)) # to cifar-10
+# dataset settings
+data_source_cfg = dict(type='Cifar10', root='./data/cifar10/')
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet for transfer
+# img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10
+resizeto = 64
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=resizeto, scale=[0.2, 1.0]),
+ # dict(type='RandomCrop', size=32, padding=4),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=64,
+ # workers_per_gpu=8,
+ imgs_per_gpu=128,
+ workers_per_gpu=10,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(split='train', **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ # initial=False,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # imagenet MoCo version
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.) # imagenet
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
+
+# * 1230: Transfer test from STL10 to CIFAR-10 (linear evaluation), size=64, bs128
+# Test: CUDA_VISIBLE_DEVICES=3 PORT=25717 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/cifar/r50_last_1gpu_cifar10_from_stl10_lr01.py ./work_dirs/my_pretrains/
diff --git a/configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py b/configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py
new file mode 100644
index 00000000..c354da7e
--- /dev/null
+++ b/configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py
@@ -0,0 +1,79 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict( # mmclassification
+ type='ResNet_CIFAR',
+ depth=18,
+ num_stages=4,
+ out_indices=(3,),
+ style='pytorch',
+ frozen_stages=4,
+ ),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=512,
+ num_classes=100)) # cifar-100
+# dataset settings
+data_source_cfg = dict(type='Cifar100', root='./data/cifar100/')
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10 for cifar100
+
+train_pipeline = [
+ dict(type='RandomCrop', size=32, padding=4),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=64,
+ # workers_per_gpu=8,
+ imgs_per_gpu=128,
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(split='train', **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline))
+
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ # initial=True,
+ initial=False,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # imagenet MoCo version
+optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # imagenet, [choosed]
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='step',
+ # step=[60, 80], # 2 step
+ step=[30, 60, 80], # 3 step
+ gamma=0.2,
+)
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
+
+# * 1224: CIFAR-10 linear evaluation, size=32, bs128. try lr=1.0 + 3 steps
+# Test: CUDA_VISIBLE_DEVICES=0 PORT=25917 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py ./work_dirs/my_pretrains/
diff --git a/configs/benchmarks/linear_classification/cifar100/r18_rep_cifar100.py b/configs/benchmarks/linear_classification/cifar100/r18_rep_cifar100.py
new file mode 100644
index 00000000..dcedae3e
--- /dev/null
+++ b/configs/benchmarks/linear_classification/cifar100/r18_rep_cifar100.py
@@ -0,0 +1,48 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation',
+ pretrained=None,
+ backbone=dict( # mmclassification
+ type='ResNet_CIFAR',
+ depth=18,
+ num_stages=4,
+ out_indices=(3,),
+ style='pytorch'),
+ neck=dict(type='AvgPoolNeck'),
+)
+# dataset settings
+data_source_cfg = dict(type='Cifar100', root='./data/cifar100/')
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10
+
+test_pipeline = [
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256,
+ workers_per_gpu=8,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline),
+)
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=8,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005)
+# learning policy
+lr_config = dict(policy='step', step=[150, 250])
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 350
diff --git a/configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py b/configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py
new file mode 100644
index 00000000..5e90afd3
--- /dev/null
+++ b/configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py
@@ -0,0 +1,84 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=200)) # CUB-200
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: UCB-200 dataset
+base = "/usr/commondata/public/CUB200/CUB_200/"
+data_train_list = base + 'classification_meta_0/train_labeled.txt' # CUB200 labeled train, 30 per class, 5994
+data_train_root = base + "images"
+data_test_list = base + 'classification_meta_0/test_labeled.txt' # CUB200 labeled test, 30 per class
+data_test_root = base + "images"
+# resize setting
+resizeto = 224
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=resizeto),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ # workers_per_gpu=12,
+ imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls
+ workers_per_gpu=10,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10, # 1,
+ imgs_per_gpu=128,
+ workers_per_gpu=8, # 4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # ImageNet MoCo, basic lr
+# optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # STL-10 lr
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
+
+# * 1203: CUB-200_2011, baseline, size=224, try ImageNet basic lr=30.0
+# Test: CUDA_VISIBLE_DEVICES=0,1 PORT=25003 bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py ./work_dirs/my_pretrains/
diff --git a/configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py b/configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py
new file mode 100644
index 00000000..13164942
--- /dev/null
+++ b/configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py
@@ -0,0 +1,87 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=120)) # Dogs-120
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: Dogs-120 dataset
+base = "/usr/commondata/public/Dogs120/"
+data_train_list = base + 'classification_meta_0/train_labeled.txt' # Dogs-120 labeled train, 100 per class, 12000
+data_train_root = base + "Images"
+data_test_list = base + 'classification_meta_0/test_labeled.txt' # Dogs-120 labeled test, 100 per class
+data_test_root = base + "Images"
+# resize setting
+resizeto = 224
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=resizeto),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ # workers_per_gpu=12,
+ imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls
+ workers_per_gpu=10,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10, # 1,
+ imgs_per_gpu=128,
+ workers_per_gpu=8, # 4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # ImageNet basic lr
+# optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # STL-10 lr
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+ # step=[30, 40]
+ # step=[18, 24]
+)
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
+# total_epochs = 50
+
+# * 1205: Dogs-120, baseline, size=224, try ImageNet basic lr=30.0
+# Test: CUDA_VISIBLE_DEVICES=4,5 PORT=25105 bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py ./work_dirs/my_pretrains/
diff --git a/configs/benchmarks/linear_classification/fmnist/lenet_last_1gpu_fmnist.py b/configs/benchmarks/linear_classification/fmnist/lenet_last_1gpu_fmnist.py
new file mode 100644
index 00000000..a86d8df4
--- /dev/null
+++ b/configs/benchmarks/linear_classification/fmnist/lenet_last_1gpu_fmnist.py
@@ -0,0 +1,49 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ backbone=dict( # mmclassification
+ type='LeNet5',
+ activation="LeakyReLU",
+ mlp_neck=None,
+ cls_neck=True,
+ ),
+ head=dict(
+ type='ClsHead', with_avg_pool=False, in_channels=84,
+ num_classes=10))
+# dataset settings
+data_source_cfg = dict(type='Fmnist', root='./data/')
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.], std=[1.]) # MNIST grayscale
+resizeto = 32
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=100,
+ workers_per_gpu=2,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline),
+)
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=2,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
diff --git a/configs/benchmarks/linear_classification/fmnist/lenet_rep_fmnist.py b/configs/benchmarks/linear_classification/fmnist/lenet_rep_fmnist.py
new file mode 100644
index 00000000..71fdc1f6
--- /dev/null
+++ b/configs/benchmarks/linear_classification/fmnist/lenet_rep_fmnist.py
@@ -0,0 +1,50 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation',
+ backbone=dict( # mmclassification
+ type='LeNet5',
+ activation="LeakyReLU",
+ mlp_neck=None,
+ cls_neck=True,
+ ),
+ neck=None,
+)
+# dataset settings
+data_source_cfg = dict(type='Fmnist', root='./data/')
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.], std=[1.]) # MNIST grayscale
+
+resizeto = 32
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=5000,
+ workers_per_gpu=2,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline),
+)
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005)
+# learning policy
+lr_config = dict(policy='step', step=[150, 250])
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 350
diff --git a/configs/benchmarks/linear_classification/imagenet/official/r50_last.py b/configs/benchmarks/linear_classification/imagenet/official/r50_last.py
new file mode 100644
index 00000000..1d476619
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/official/r50_last.py
@@ -0,0 +1,76 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/imagenet/meta/train_labeled.txt'
+data_train_root = 'data/imagenet/train'
+data_test_list = 'data/imagenet/meta/val_labeled.txt'
+data_test_root = 'data/imagenet/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+]
+# prefetch
+prefetch = False
+if not prefetch:
+ train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
+ test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
+data = dict(
+ imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ workers_per_gpu=5,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline,
+ prefetch=prefetch),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline,
+ prefetch=prefetch))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ prefetch=prefetch,
+ img_norm_cfg=img_norm_cfg,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
diff --git a/configs/benchmarks/linear_classification/imagenet/official/r50_last_sobel.py b/configs/benchmarks/linear_classification/imagenet/official/r50_last_sobel.py
new file mode 100644
index 00000000..ec4f7723
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/official/r50_last_sobel.py
@@ -0,0 +1,76 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=True,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=2,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/imagenet/meta/train_labeled.txt'
+data_train_root = 'data/imagenet/train'
+data_test_list = 'data/imagenet/meta/val_labeled.txt'
+data_test_root = 'data/imagenet/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+]
+# prefetch
+prefetch = False
+if not prefetch:
+ train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
+ test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
+data = dict(
+ imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ workers_per_gpu=5,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline,
+ prefetch=prefetch),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline,
+ prefetch=prefetch))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ prefetch=prefetch,
+ img_norm_cfg=img_norm_cfg,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
diff --git a/configs/benchmarks/linear_classification/imagenet/official/r50_multihead.py b/configs/benchmarks/linear_classification/imagenet/official/r50_multihead.py
new file mode 100644
index 00000000..54c995d1
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/official/r50_multihead.py
@@ -0,0 +1,89 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='MultiClsHead',
+ pool_type='specified',
+ in_indices=[0, 1, 2, 3, 4],
+ with_last_layer_unpool=False,
+ backbone='resnet50',
+ norm_cfg=dict(type='SyncBN', momentum=0.1, affine=False),
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/imagenet/meta/train_labeled.txt'
+data_train_root = 'data/imagenet/train'
+data_test_list = 'data/imagenet/meta/val_labeled.txt'
+data_test_root = 'data/imagenet/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(
+ type='ColorJitter',
+ brightness=0.4,
+ contrast=0.4,
+ saturation=0.4,
+ hue=0.),
+ dict(type='ToTensor'),
+ dict(type='Lighting'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=32, # total 32x8=256
+ workers_per_gpu=5,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, )))
+]
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.01,
+ momentum=0.9,
+ weight_decay=0.0001,
+ paramwise_options=dict(norm_decay_mult=0.),
+ nesterov=True)
+# learning policy
+lr_config = dict(policy='step', step=[30, 60, 90])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 90
diff --git a/configs/benchmarks/linear_classification/imagenet/official/r50_multihead_sobel.py b/configs/benchmarks/linear_classification/imagenet/official/r50_multihead_sobel.py
new file mode 100644
index 00000000..bc3638cd
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/official/r50_multihead_sobel.py
@@ -0,0 +1,89 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=True,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=2,
+ out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='MultiClsHead',
+ pool_type='specified',
+ in_indices=[0, 1, 2, 3, 4],
+ with_last_layer_unpool=False,
+ backbone='resnet50',
+ norm_cfg=dict(type='SyncBN', momentum=0.1, affine=False),
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/imagenet/meta/train_labeled.txt'
+data_train_root = 'data/imagenet/train'
+data_test_list = 'data/imagenet/meta/val_labeled.txt'
+data_test_root = 'data/imagenet/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(
+ type='ColorJitter',
+ brightness=0.4,
+ contrast=0.4,
+ saturation=0.4,
+ hue=0.),
+ dict(type='ToTensor'),
+ dict(type='Lighting'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=32, # total 32x8=256
+ workers_per_gpu=5,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, )))
+]
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.01,
+ momentum=0.9,
+ weight_decay=0.0001,
+ paramwise_options=dict(norm_decay_mult=0.),
+ nesterov=True)
+# learning policy
+lr_config = dict(policy='step', step=[30, 60, 90])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 90
diff --git a/configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py b/configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py
new file mode 100644
index 00000000..125cfa19
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py
@@ -0,0 +1,79 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=512,
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+
+imagenet_base = "/usr/lsy/src/ImageNet/"
+data_train_list = imagenet_base + 'meta/train_labeled_full.txt'
+data_train_root = imagenet_base + 'train'
+data_test_list = imagenet_base + 'meta/val_labeled.txt'
+data_test_root = imagenet_base + 'val/'
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256, # total 256*1=256, 1GPU linear cls
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=5,
+ imgs_per_gpu=128,
+ workers_per_gpu=6,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=100)
+# runtime settings
+total_epochs = 100
+
+# Test: CUDA_VISIBLE_DEVICES=3 PORT=25010 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py ./work_dirs/
diff --git a/configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py b/configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py
new file mode 100644
index 00000000..c8d14689
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py
@@ -0,0 +1,79 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=512, num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# ImageNet dataset
+data_root="/data/public_datasets/ILSVRC2012/" # ori
+# data_root = "/data/ImageNet/" # node16
+data_train_list = 'data/meta/imagenet/train_labeled_full.txt'
+data_train_root = data_root + 'train'
+data_test_list = 'data/meta/imagenet/val_labeled.txt'
+data_test_root = "/data/liuzicheng/ImageNet/val/"
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=2,
+ imgs_per_gpu=100,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=100)
+# runtime settings
+total_epochs = 100
+
+# Test: CUDA_VISIBLE_DEVICES=6,7 PORT=25019 bash benchmarks/dist_train_linear_2gpu.sh configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py ./work_dirs/
diff --git a/configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py b/configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py
new file mode 100644
index 00000000..f7f1645f
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py
@@ -0,0 +1,78 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=512, num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# ImageNet dataset
+data_root="data/ImageNet/"
+data_train_list = 'data/meta/imagenet/train_labeled_full.txt'
+data_train_root = data_root + 'train'
+data_test_list = 'data/meta/imagenet/val_labeled.txt'
+data_test_root = data_root + "val"
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=64, # total 64*4=256, 4GPU linear cls
+ workers_per_gpu=6,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=1,
+ imgs_per_gpu=100,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=100)
+# runtime settings
+total_epochs = 100
+
+# Test: CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=25011 bash benchmarks/dist_train_linear_4gpu.sh configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py ./work_dirs/
diff --git a/configs/benchmarks/linear_classification/imagenet/r18_rep_imagenet.py b/configs/benchmarks/linear_classification/imagenet/r18_rep_imagenet.py
new file mode 100644
index 00000000..43dea2c6
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/r18_rep_imagenet.py
@@ -0,0 +1,71 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation', # 0802
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ # depth=50,
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048
+)
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+
+# test: 10 class (1300 for each class)
+imagenet_base = "/usr/commondata/public/ImageNet/ILSVRC2012/"
+data_test_list = imagenet_base + 'meta/train_labeled_10class_0123_8081_154155_404_407.txt' # 10 class
+# data_train_list = imagenet_base + 'meta/train_full.txt' # full
+data_test_root = imagenet_base + 'train'
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017
+
+# resizeto = 96
+resizeto = 224
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ imgs_per_gpu=128,
+ workers_per_gpu=12, # 5,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=12,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
+
+
+# test baseline
+# Test: bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/imagenet/r50_last.py ./pretrains/moco_r50_v2_simclr_neck.pth
+
diff --git a/configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py b/configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py
new file mode 100644
index 00000000..b20734dc
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py
@@ -0,0 +1,79 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_base = "/usr/lsy/src/ImageNet/"
+data_train_list = data_base + 'meta/train_labeled_full.txt'
+data_train_root = data_base + 'train'
+data_test_list = data_base + 'meta/val_labeled.txt'
+data_test_root = data_base + 'val/'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ # workers_per_gpu=12,
+ imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls
+ workers_per_gpu=10,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10, # 1,
+ imgs_per_gpu=128,
+ workers_per_gpu=12, # 4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
+
+# Test: CUDA_VISIBLE_DEVICES=3,5 PORT=25010 bash benchmarks/dist_train_linear_2gpu.sh configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py ./work_dirs/
diff --git a/configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py b/configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py
new file mode 100644
index 00000000..6b46cfd4
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py
@@ -0,0 +1,83 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# ImageNet dataset
+data_root="data/ImageNet/"
+data_train_list = 'data/meta/imagenet/train_labeled_full.txt'
+data_train_root = data_root + 'train'
+data_test_list = 'data/meta/imagenet/val_labeled.txt'
+data_test_root = data_root + "val"
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ # workers_per_gpu=12,
+ imgs_per_gpu=64, # total 64*4=256
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+ # step=[30, 40]
+)
+checkpoint_config = dict(interval=100)
+# runtime settings
+total_epochs = 100
+# total_epochs = 50
+
+# * 1105: ELIS-1030, baseline, size=224
+# Test: CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=25012 bash benchmarks/dist_train_linear_4gpu.sh configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py ./work_dirs/my_pretrains/
diff --git a/configs/benchmarks/linear_classification/imagenet/r50_rep_imagenet.py b/configs/benchmarks/linear_classification/imagenet/r50_rep_imagenet.py
new file mode 100644
index 00000000..a87e2526
--- /dev/null
+++ b/configs/benchmarks/linear_classification/imagenet/r50_rep_imagenet.py
@@ -0,0 +1,71 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation', # 0802
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ # depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048
+)
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+
+# test: 10 class (1300 for each class)
+imagenet_base = "/usr/commondata/public/ImageNet/ILSVRC2012/"
+data_test_list = imagenet_base + 'meta/train_labeled_10class_0123_8081_154155_404_407.txt' # 10 class
+# data_train_list = imagenet_base + 'meta/train_full.txt' # full
+data_test_root = imagenet_base + 'train'
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017
+
+# resizeto = 96
+resizeto = 224
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ imgs_per_gpu=128,
+ workers_per_gpu=12, # 5,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=12,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
+
+
+# test baseline
+# Test: bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/imagenet/r50_last.py ./pretrains/moco_r50_v2_simclr_neck.pth
+
diff --git a/configs/benchmarks/linear_classification/mnist/lenet_rep_mnist.py b/configs/benchmarks/linear_classification/mnist/lenet_rep_mnist.py
new file mode 100644
index 00000000..caa64756
--- /dev/null
+++ b/configs/benchmarks/linear_classification/mnist/lenet_rep_mnist.py
@@ -0,0 +1,55 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation',
+ backbone=dict( # mmclassification
+ type='LeNet5',
+ activation="LeakyReLU",
+ mlp_neck=dict(
+ type='NonLinearNeckV1',
+ in_channels=120,
+ hid_channels=120,
+ out_channels=10,
+ activation="LeakyReLU",
+ with_avg_pool=False),
+ cls_neck=None,
+ ),
+ neck=None,
+)
+# dataset settings
+data_source_cfg = dict(type='Mnist', root='./data/')
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.], std=[1.]) # MNIST grayscale
+
+resizeto = 32
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=5000,
+ workers_per_gpu=2,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(split='test', **data_source_cfg),
+ pipeline=test_pipeline),
+)
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005)
+# learning policy
+lr_config = dict(policy='step', step=[150, 250])
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 350
diff --git a/configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py b/configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py
new file mode 100644
index 00000000..0b517a38
--- /dev/null
+++ b/configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py
@@ -0,0 +1,90 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=37)) # Pets-37
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+
+# test: Pets-37 dataset
+base = "/usr/commondata/public/Pets37/"
+data_train_list = base + 'classification_meta_0/train_labeled.txt' # Pets-37 labeled train, 3680
+data_train_root = base + "images"
+data_test_list = base + 'classification_meta_0/test_labeled.txt' # Pets-37 labeled test
+data_test_root = base + "images"
+
+# resize setting
+resizeto = 224
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=resizeto),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ # dict(type='Resize', size=resizeto),
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ # workers_per_gpu=12,
+ imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls
+ workers_per_gpu=12,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10, # 1,
+ imgs_per_gpu=128,
+ workers_per_gpu=8, # 4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # ImageNet basic lr
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+ # step=[30, 40]
+ # step=[18, 24]
+)
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
+# total_epochs = 50
+
+# try SSL official pretrains
+# * 1205: Pets-37, baseline, size=224, try ImageNet basic lr=30.0
+# Test: CUDA_VISIBLE_DEVICES=6,7 PORT=25005 bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py ./work_dirs/my_pretrains/
diff --git a/configs/benchmarks/linear_classification/pets/r50_rep_pets.py b/configs/benchmarks/linear_classification/pets/r50_rep_pets.py
new file mode 100644
index 00000000..8beed19f
--- /dev/null
+++ b/configs/benchmarks/linear_classification/pets/r50_rep_pets.py
@@ -0,0 +1,70 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation', # 0802
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ # depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048
+)
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+
+# test: Pets-37 dataset
+base = "/usr/commondata/public/Pets37/"
+data_train_list = base + 'classification_meta_0/train_labeled.txt' # Pets-37 labeled train, 3680
+data_train_root = base + "images"
+data_test_list = base + 'classification_meta_0/test_labeled.txt' # Pets-37 labeled test
+data_test_root = base + "images"
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017
+
+resizeto = 224
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ imgs_per_gpu=128,
+ workers_per_gpu=10,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg
+ # **data_source_cfg
+ ),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=12,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
+
+# using for visualize representation of Pets-37
diff --git a/configs/benchmarks/linear_classification/places205/r50_multihead.py b/configs/benchmarks/linear_classification/places205/r50_multihead.py
new file mode 100644
index 00000000..8826bb41
--- /dev/null
+++ b/configs/benchmarks/linear_classification/places205/r50_multihead.py
@@ -0,0 +1,89 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='MultiClsHead',
+ pool_type='specified',
+ in_indices=[0, 1, 2, 3, 4],
+ with_last_layer_unpool=False,
+ backbone='resnet50',
+ norm_cfg=dict(type='SyncBN', momentum=0.1, affine=False),
+ num_classes=205))
+# dataset settings
+data_source_cfg = dict(
+ type='Places205',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/places205/meta/train_labeled.txt'
+data_train_root = 'data/places205/train'
+data_test_list = 'data/places205/meta/val_labeled.txt'
+data_test_root = 'data/places205/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=256),
+ dict(type='RandomCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+]
+# prefetch
+prefetch = False
+if not prefetch:
+ train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
+ test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
+data = dict(
+ imgs_per_gpu=32, # total 32x8=256
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline,
+ prefetch=prefetch),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline,
+ prefetch=prefetch))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=32,
+ workers_per_gpu=4,
+ prefetch=prefetch,
+ img_norm_cfg=img_norm_cfg,
+ eval_param=dict(topk=(1, )))
+]
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.01,
+ momentum=0.9,
+ weight_decay=0.0001,
+ paramwise_options=dict(norm_decay_mult=0.),
+ nesterov=True)
+# learning policy
+lr_config = dict(policy='step', step=[7, 14, 21])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 28
diff --git a/configs/benchmarks/linear_classification/places205/r50_multihead_sobel.py b/configs/benchmarks/linear_classification/places205/r50_multihead_sobel.py
new file mode 100644
index 00000000..7e5cd869
--- /dev/null
+++ b/configs/benchmarks/linear_classification/places205/r50_multihead_sobel.py
@@ -0,0 +1,89 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=True,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=2,
+ out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='MultiClsHead',
+ pool_type='specified',
+ in_indices=[0, 1, 2, 3, 4],
+ with_last_layer_unpool=False,
+ backbone='resnet50',
+ norm_cfg=dict(type='SyncBN', momentum=0.1, affine=False),
+ num_classes=205))
+# dataset settings
+data_source_cfg = dict(
+ type='Places205',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/places205/meta/train_labeled.txt'
+data_train_root = 'data/places205/train'
+data_test_list = 'data/places205/meta/val_labeled.txt'
+data_test_root = 'data/places205/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=256),
+ dict(type='RandomCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+]
+# prefetch
+prefetch = False
+if not prefetch:
+ train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
+ test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
+data = dict(
+ imgs_per_gpu=32, # total 32x8=256
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline,
+ prefetch=prefetch),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline,
+ prefetch=prefetch))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10,
+ imgs_per_gpu=32,
+ workers_per_gpu=4,
+ prefetch=prefetch,
+ img_norm_cfg=img_norm_cfg,
+ eval_param=dict(topk=(1, )))
+]
+# optimizer
+optimizer = dict(
+ type='SGD',
+ lr=0.01,
+ momentum=0.9,
+ weight_decay=0.0001,
+ paramwise_options=dict(norm_decay_mult=0.),
+ nesterov=True)
+# learning policy
+lr_config = dict(policy='step', step=[7, 14, 21])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 28
diff --git a/configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py b/configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py
new file mode 100644
index 00000000..9fe8283e
--- /dev/null
+++ b/configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py
@@ -0,0 +1,79 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='MobileNetV2',
+ widen_factor=1.0,
+ frozen_stages=7), # 0-7 stages
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=1280,
+ num_classes=10)) # stl 10
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: STL-10 dataset
+data_base = "/usr/lsy/src/OpenSelfSup_v1214/"
+data_train_list = data_base + 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train
+data_train_root = data_base + 'data/stl10/train/' # using labeled train set
+data_test_list = data_base + 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test
+data_test_root = data_base + 'data/stl10/test/' # using labeled test set
+# resize setting
+resizeto = 96
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=resizeto),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256, # total 256*1=256, 1GPU linear cls
+ workers_per_gpu=6,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ # initial=True,
+ initial=False,
+ interval=5,
+ imgs_per_gpu=100,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # MoCoo ImageNet
+optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK]
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=100)
+# runtime settings
+total_epochs = 100
+
+# * STL-10, baseline, size=96, lr=1.0
+# Test: CUDA_VISIBLE_DEVICES=0 PORT=25530 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py ./work_dirs/
diff --git a/configs/benchmarks/linear_classification/stl10/mobilenet_rep_stl10.py b/configs/benchmarks/linear_classification/stl10/mobilenet_rep_stl10.py
new file mode 100644
index 00000000..661fe172
--- /dev/null
+++ b/configs/benchmarks/linear_classification/stl10/mobilenet_rep_stl10.py
@@ -0,0 +1,64 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation',
+ pretrained=None,
+ backbone=dict(
+ type='MobileNetV2',
+ widen_factor=1.0,
+ frozen_stages=7, # 0-7 stages
+ ),
+ neck=dict(type='AvgPoolNeck'),
+)
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: STL-10 dataset
+data_base = "/usr/lsy/src/OpenSelfSup_v1214/"
+data_train_list = data_base + 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train
+data_train_root = data_base + 'data/stl10/train/' # using labeled train set
+data_test_list = data_base + 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test
+data_test_root = data_base + 'data/stl10/test/' # using labeled test set
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017
+resizeto = 96
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ imgs_per_gpu=128,
+ workers_per_gpu=10,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=12,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK]
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
+
+# using for visualize representation of STL-10
diff --git a/configs/benchmarks/linear_classification/stl10/r18/r18_lr1_0_bs256_head1.py b/configs/benchmarks/linear_classification/stl10/r18/r18_lr1_0_bs256_head1.py
new file mode 100644
index 00000000..4681867f
--- /dev/null
+++ b/configs/benchmarks/linear_classification/stl10/r18/r18_lr1_0_bs256_head1.py
@@ -0,0 +1,78 @@
+_base_ = '../../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=512, num_classes=10)) # stl 10
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: STL-10 dataset
+data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train
+data_train_root = 'data/stl10/train/' # using labeled train set
+data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test
+data_test_root = 'data/stl10/test/' # using labeled test set
+# resize setting
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=96),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=96),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256, # total 256, 1GPU linear cls
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=2,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # MoCo ImageNet
+optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK]
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=200)
+# runtime settings
+total_epochs = 100
+
+# * STL-10, baseline, size=96, lr=1.0
+# Test: CUDA_VISIBLE_DEVICES=6 PORT=25519 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py []
diff --git a/configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh b/configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh
new file mode 100644
index 00000000..2fa2adc3
--- /dev/null
+++ b/configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# Usage:
+# bash configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh $GPU_id $PORT $WEIGHT.pth
+
+base_path="configs/benchmarks/linear_classification/stl10/r18/"
+exp_cfg0=$base_path"r18_lr0_1_bs256_head1.py"
+exp_cfg1=$base_path"r18_lr1_0_bs256_head1.py"
+exp_cfg2=$base_path"r18_lr10_bs256_head1.py"
+
+set -e
+set -x
+
+GPU_ID=$1
+PORT_id=$2
+WEIGHT=$3
+
+
+if [ "$GPU_ID" == "" ] || [ "$PORT_id" == "" ]; then
+ echo "ERROR: Missing arguments."
+ exit
+fi
+
+if [ "$WEIGHT" == "" ]; then
+ echo "train with random init ! ! !"
+ # random init train
+fi
+
+if [ "$WEIGHT" != "" ]; then
+ echo "normal linear-supervised training start..."
+ # normal train with 3 random seeds {0,1,3}
+ CUDA_VISIBLE_DEVICES=$GPU_ID PORT=$PORT_id bash benchmarks/dist_train_linear_1gpu_sd.sh $exp_cfg1 $WEIGHT 0
+ CUDA_VISIBLE_DEVICES=$GPU_ID PORT=$PORT_id bash benchmarks/dist_train_linear_1gpu_sd.sh $exp_cfg1 $WEIGHT 1
+ CUDA_VISIBLE_DEVICES=$GPU_ID PORT=$PORT_id bash benchmarks/dist_train_linear_1gpu_sd.sh $exp_cfg1 $WEIGHT 3
+fi
diff --git a/configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py b/configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py
new file mode 100644
index 00000000..db186647
--- /dev/null
+++ b/configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py
@@ -0,0 +1,80 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=512,
+ num_classes=10)) # stl 10
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: STL-10 dataset
+data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train
+data_train_root = 'data/stl10/train/' # using labeled train set
+data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test
+data_test_root = 'data/stl10/test/' # using labeled test set
+# resize setting
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=96),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=96),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256, # total 256, 1GPU linear cls
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ # initial=True,
+ initial=False,
+ interval=2,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # MoCo ImageNet
+optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK]
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=100)
+# runtime settings
+total_epochs = 100
+
+# * STL-10, baseline, size=96, lr=1.0
+# Test: CUDA_VISIBLE_DEVICES=6 PORT=25519 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py []
diff --git a/configs/benchmarks/linear_classification/stl10/r18_rep_stl10.py b/configs/benchmarks/linear_classification/stl10/r18_rep_stl10.py
new file mode 100644
index 00000000..8f49541d
--- /dev/null
+++ b/configs/benchmarks/linear_classification/stl10/r18_rep_stl10.py
@@ -0,0 +1,64 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation',
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048
+)
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: STL-10 dataset
+data_train_list = 'data/STL/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train
+data_train_root = 'data/STL/stl10/train/' # using labeled train set
+data_test_list = 'data/STL/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test
+data_test_root = 'data/STL/stl10/test/' # using labeled test set
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017
+resizeto = 96
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=12,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
+
+# using for visualize representation of STL-10
diff --git a/configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py b/configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py
new file mode 100644
index 00000000..5ece46b3
--- /dev/null
+++ b/configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py
@@ -0,0 +1,82 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=10)) # stl 10
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: STL-10 dataset
+data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train
+data_train_root = 'data/stl10/train/' # using labeled train set
+data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test
+data_test_root = 'data/stl10/test/' # using labeled test set
+# resize setting
+resizeto = 96
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=resizeto),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256, # total 256*1=256, 1GPU linear cls
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ # initial=True,
+ initial=False,
+ interval=10,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # MoCoo ImageNet
+optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK]
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
+
+# * STL-10, baseline, size=96, lr=1.0
+# Test: CUDA_VISIBLE_DEVICES=3 PORT=25530 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py ./work_dirs/my_pretrains/stl10_baseline/
diff --git a/configs/benchmarks/linear_classification/stl10/r50_rep_stl10.py b/configs/benchmarks/linear_classification/stl10/r50_rep_stl10.py
new file mode 100644
index 00000000..1f4eeab9
--- /dev/null
+++ b/configs/benchmarks/linear_classification/stl10/r50_rep_stl10.py
@@ -0,0 +1,64 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation',
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048
+)
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: STL-10 dataset
+data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train
+data_train_root = 'data/stl10/train/' # using labeled train set
+data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test
+data_test_root = 'data/stl10/test/' # using labeled test set
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017
+resizeto = 96
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls
+ imgs_per_gpu=128,
+ workers_per_gpu=10, # 5,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=12,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
+
+# using for visualize representation of STL-10
diff --git a/configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py b/configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py
new file mode 100644
index 00000000..d5a1cd9b
--- /dev/null
+++ b/configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py
@@ -0,0 +1,80 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=False,
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=512,
+ num_classes=200)) # Tiny ImageNet
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# tiny imagenet
+data_train_list = './data/TinyImagenet200/meta/train_labeled.txt' # unlabeled train 10w
+data_train_root = './data/TinyImagenet200/train/'
+data_test_list = './data/TinyImagenet200/meta/val_labeled.txt' # val labeled 1w
+data_test_root = './data/TinyImagenet200/val/'
+# resize setting
+resizeto = 64
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=resizeto),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256, # total 256*1=256, 1GPU linear cls
+ workers_per_gpu=12,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=10, # 1,
+ imgs_per_gpu=128,
+ workers_per_gpu=8, # 4,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # Imagenet baseline
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.) # [OK]
+# learning policy
+lr_config = dict(
+ policy='step',
+ step=[60, 80]
+)
+checkpoint_config = dict(interval=50)
+# runtime settings
+total_epochs = 100
+
+# * Tiny Imagenet, baseline, size=64
+# Test: CUDA_VISIBLE_DEVICES=0 PORT=25027 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py ./work_dirs/my_pretrains/
diff --git a/configs/benchmarks/linear_classification/tiny_imagenet/r18_rep_tiny_imagenet.py b/configs/benchmarks/linear_classification/tiny_imagenet/r18_rep_tiny_imagenet.py
new file mode 100644
index 00000000..9eac0bb0
--- /dev/null
+++ b/configs/benchmarks/linear_classification/tiny_imagenet/r18_rep_tiny_imagenet.py
@@ -0,0 +1,66 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Representation',
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ depth=18,
+ in_channels=3,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='BN'),
+ frozen_stages=4),
+ neck=dict(type='AvgPoolNeck'),
+)
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# tiny imagenet
+data_train_list = './data/TinyImagenet200/meta/train_unlabeled.txt' # unlabeled train 10w
+data_train_root = './data/TinyImagenet200/train/'
+# data_test_list = './data/TinyImagenet200/meta/val_labeled.txt' # val labeled 1w
+# data_test_root = './data/TinyImagenet200/val/'
+data_test_list = './data/TinyImagenet200/meta/train_20class_labeled.txt' # val labeled 20 class
+data_test_root = './data/TinyImagenet200/train/'
+
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+
+resizeto = 64
+test_pipeline = [
+ dict(type='Resize', size=resizeto),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=128,
+ workers_per_gpu=12, # 5,
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root,
+ **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=True,
+ interval=1,
+ imgs_per_gpu=128,
+ workers_per_gpu=12,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)
+# learning policy
+lr_config = dict(policy='step', step=[60, 80])
+checkpoint_config = dict(interval=10)
+# runtime settings
+total_epochs = 100
+
+# using for visualize representation of tiny-imagenet
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/base.py b/configs/benchmarks/semi_classification/imagenet_10percent/base.py
new file mode 100644
index 00000000..48e29a60
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/base.py
@@ -0,0 +1,66 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='SyncBN')),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/imagenet/meta/train_labeled_10percent.txt'
+data_train_root = 'data/imagenet/train'
+data_test_list = 'data/imagenet/meta/val_labeled.txt'
+data_test_root = 'data/imagenet/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=64, # total 256
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=20,
+ imgs_per_gpu=32,
+ workers_per_gpu=2,
+ eval_param=dict(topk=(1, 5)))
+]
+# learning policy
+lr_config = dict(policy='step', step=[12, 16], gamma=0.2)
+checkpoint_config = dict(interval=20)
+# runtime settings
+total_epochs = 20
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head1.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head1.py
new file mode 100644
index 00000000..5bd55efc
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head1.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head10.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head10.py
new file mode 100644
index 00000000..9dc9a79e
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head100.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head100.py
new file mode 100644
index 00000000..a1d324d2
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1.py
new file mode 100644
index 00000000..c3553048
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head10.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head10.py
new file mode 100644
index 00000000..7d5bedaa
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head100.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head100.py
new file mode 100644
index 00000000..6b696915
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1_sobel.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1_sobel.py
new file mode 100644
index 00000000..53cefa4b
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1_sobel.py
@@ -0,0 +1,71 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=True,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=2,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='SyncBN')),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/imagenet/meta/train_labeled_10percent.txt'
+data_train_root = 'data/imagenet/train'
+data_test_list = 'data/imagenet/meta/val_labeled.txt'
+data_test_root = 'data/imagenet/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=64, # total 256
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=20,
+ imgs_per_gpu=32,
+ workers_per_gpu=2,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
+# learning policy
+lr_config = dict(policy='step', step=[12, 16], gamma=0.2)
+checkpoint_config = dict(interval=20)
+# runtime settings
+total_epochs = 20
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head1.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head1.py
new file mode 100644
index 00000000..414d07e8
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head1.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head10.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head10.py
new file mode 100644
index 00000000..9b8c6f1c
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head100.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head100.py
new file mode 100644
index 00000000..f7cc627a
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/base.py b/configs/benchmarks/semi_classification/imagenet_1percent/base.py
new file mode 100644
index 00000000..83a38a04
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/base.py
@@ -0,0 +1,72 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='SyncBN')),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/imagenet/meta/train_labeled_1percent.txt'
+data_train_root = 'data/imagenet/train'
+data_test_list = 'data/imagenet/meta/val_labeled.txt'
+data_test_root = 'data/imagenet/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=64, # total 256
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=20,
+ imgs_per_gpu=32,
+ workers_per_gpu=2,
+ eval_param=dict(topk=(1, 5)))
+]
+# learning policy
+lr_config = dict(policy='step', step=[12, 16], gamma=0.2)
+checkpoint_config = dict(interval=20)
+log_config = dict(
+ interval=10,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ dict(type='TensorboardLoggerHook')
+ ])
+# runtime settings
+total_epochs = 20
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head1.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head1.py
new file mode 100644
index 00000000..16bc7988
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head1.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head10.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head10.py
new file mode 100644
index 00000000..f16c5269
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head100.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head100.py
new file mode 100644
index 00000000..e7e4355d
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1.py
new file mode 100644
index 00000000..dfb6c97f
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head10.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head10.py
new file mode 100644
index 00000000..a8fe6d76
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head100.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head100.py
new file mode 100644
index 00000000..12a80442
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1_sobel.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1_sobel.py
new file mode 100644
index 00000000..ed16a61f
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1_sobel.py
@@ -0,0 +1,77 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ with_sobel=True,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ in_channels=2,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='SyncBN')),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=1000))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=True,
+ mclient_path='/mnt/lustre/share/memcached_client')
+data_train_list = 'data/imagenet/meta/train_labeled_1percent.txt'
+data_train_root = 'data/imagenet/train'
+data_test_list = 'data/imagenet/meta/val_labeled.txt'
+data_test_root = 'data/imagenet/val'
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=256),
+ dict(type='CenterCrop', size=224),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=64, # total 256
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=20,
+ imgs_per_gpu=32,
+ workers_per_gpu=2,
+ eval_param=dict(topk=(1, 5)))
+]
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
+# learning policy
+lr_config = dict(policy='step', step=[12, 16], gamma=0.2)
+checkpoint_config = dict(interval=20)
+log_config = dict(
+ interval=10,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ dict(type='TensorboardLoggerHook')
+ ])
+# runtime settings
+total_epochs = 20
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head1.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head1.py
new file mode 100644
index 00000000..9c469652
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head1.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head10.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head10.py
new file mode 100644
index 00000000..97d2be11
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head100.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head100.py
new file mode 100644
index 00000000..94e75883
--- /dev/null
+++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/configs/benchmarks/semi_classification/stl10/base.py b/configs/benchmarks/semi_classification/stl10/base.py
new file mode 100644
index 00000000..934f20ed
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/base.py
@@ -0,0 +1,69 @@
+_base_ = '../../../base.py'
+# model settings
+model = dict(
+ type='Classification',
+ pretrained=None,
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ out_indices=[4], # 0: conv-1, x: stage-x
+ norm_cfg=dict(type='SyncBN')),
+ head=dict(
+ type='ClsHead', with_avg_pool=True, in_channels=2048,
+ num_classes=10))
+# dataset settings
+data_source_cfg = dict(
+ type='ImageNet',
+ memcached=False,
+ mclient_path='/mnt/lustre/share/memcached_client')
+# test: STL-10 dataset
+data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train
+data_train_root = 'data/stl10/train/' # using labeled train set
+data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test
+data_test_root = 'data/stl10/test/' # using labeled test set
+# resize setting
+resizeto = 96
+dataset_type = 'ClassificationDataset'
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=resizeto),
+ dict(type='RandomHorizontalFlip'),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+test_pipeline = [
+ dict(type='Resize', size=resizeto+32),
+ dict(type='CenterCrop', size=resizeto),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg),
+]
+data = dict(
+ imgs_per_gpu=256, # total 256, 1GPU linear cls
+ workers_per_gpu=8,
+ train=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_train_list, root=data_train_root,
+ **data_source_cfg),
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_source=dict(
+ list_file=data_test_list, root=data_test_root, **data_source_cfg),
+ pipeline=test_pipeline))
+# additional hooks
+custom_hooks = [
+ dict(
+ type='ValidateHook',
+ dataset=data['val'],
+ initial=False,
+ interval=20,
+ imgs_per_gpu=128,
+ workers_per_gpu=4,
+ eval_param=dict(topk=(1, 5)))
+]
+# learning policy
+lr_config = dict(policy='step', step=[12, 16], gamma=0.2)
+checkpoint_config = dict(interval=20)
+# runtime settings
+total_epochs = 20
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head1.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head1.py
new file mode 100644
index 00000000..5bd55efc
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head1.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head10.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head10.py
new file mode 100644
index 00000000..9dc9a79e
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head100.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head100.py
new file mode 100644
index 00000000..a1d324d2
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head1.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head1.py
new file mode 100644
index 00000000..c3553048
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head1.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head10.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head10.py
new file mode 100644
index 00000000..7d5bedaa
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head100.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head100.py
new file mode 100644
index 00000000..6b696915
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py
new file mode 100644
index 00000000..9ea9112c
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py
@@ -0,0 +1,6 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=1)})
+
+# CUDA_VISIBLE_DEVICES=1 bash benchmarks/dist_train_semi_1gpu.sh configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py ${WEIGHT_FILE}
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head10.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head10.py
new file mode 100644
index 00000000..9b8c6f1c
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head10.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=10)})
diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head100.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head100.py
new file mode 100644
index 00000000..f7cc627a
--- /dev/null
+++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head100.py
@@ -0,0 +1,4 @@
+_base_ = 'base.py'
+# optimizer
+optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001,
+ paramwise_options={'\Ahead.': dict(lr_mult=100)})
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
new file mode 100644
index 00000000..b3ff4668
--- /dev/null
+++ b/docs/CHANGELOG.md
@@ -0,0 +1,37 @@
+## Changelog
+
+### v0.3.0 (14/10/2020)
+
+#### Highlight
+* Support Mixed Precision Training
+* Improvement of GaussianBlur doubles the training speed
+* More benchmarking results
+
+#### Bug Fixes
+* Fix bugs in moco v2, now the results are reproducible.
+* Fix bugs in byol.
+
+#### New Features
+* Mixed Precision Training
+* Improvement of GaussianBlur doubles the training speed of MoCo V2, SimCLR, BYOL
+* More benchmarking results, including Places, VOC, COCO
+
+### v0.2.0 (26/6/2020)
+
+#### Highlights
+* Support BYOL
+* Support semi-supervised benchmarks
+
+#### Bug Fixes
+* Fix hash id in publish_model.py
+
+#### New Features
+
+* Support BYOL.
+* Separate train and test scripts in linear/semi evaluation.
+* Support semi-supevised benchmarks: benchmarks/dist_train_semi.sh.
+* Move benchmarks related configs into configs/benchmarks/.
+* Provide benchmarking results and model download links.
+* Support updating network every several interations.
+* Support LARS optimizer with nesterov.
+* Support excluding specific parameters from LARS adaptation and weight decay required in SimCLR and BYOL.
diff --git a/docs/GETTING_STARTED.md b/docs/GETTING_STARTED.md
new file mode 100644
index 00000000..bb3a56fe
--- /dev/null
+++ b/docs/GETTING_STARTED.md
@@ -0,0 +1,287 @@
+# Getting Started
+
+This page provides basic tutorials about the usage of OpenSelfSup.
+For installation instructions, please see [INSTALL.md](INSTALL.md).
+
+## Train existing methods
+
+**Note**: The default learning rate in config files is for 8 GPUs. If using differnt number GPUs, the total batch size will change in proportion, you have to scale the learning rate following `new_lr = old_lr * new_ngpus / old_ngpus`. We recommend to use `tools/dist_train.sh` even with 1 gpu, since some methods do not support non-distributed training.
+
+### Train with single/multiple GPUs
+
+```shell
+bash tools/dist_train.sh ${CONFIG_FILE} ${GPUS} [optional arguments]
+```
+Optional arguments are:
+- `--resume_from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file.
+- `--pretrained ${PRETRAIN_WEIGHTS}`: Load pretrained weights for the backbone.
+- `--deterministic`: Switch on "deterministic" mode which slows down training but the results are reproducible.
+
+An example:
+```shell
+# checkpoints and logs saved in WORK_DIR=work_dirs/selfsup/odc/r50_v1/
+bash tools/dist_train.sh configs/selfsup/odc/r50_v1.py 8
+```
+**Note**: During training, checkpoints and logs are saved in the same folder structure as the config file under `work_dirs/`. Custom work directory is not recommended since evaluation scripts infer work directories from the config file name. If you want to save your weights somewhere else, please use symlink, for example:
+
+```shell
+ln -s /DATA/xhzhan/openselfsup_workdirs ${OPENSELFSUP}/work_dirs
+```
+
+Alternatively, if you run OpenSelfSup on a cluster managed with [slurm](https://slurm.schedmd.com/):
+```shell
+SRUN_ARGS="${SRUN_ARGS}" bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} ${GPUS} [optional arguments]
+```
+
+An example:
+```shell
+SRUN_ARGS="-w xx.xx.xx.xx" bash tools/srun_train.sh Dummy configs/selfsup/odc/r50_v1.py 8 --resume_from work_dirs/selfsup/odc/r50_v1/epoch_100.pth
+```
+
+### Train with multiple machines
+
+If you launch with multiple machines simply connected with ethernet, you have to modify `tools/dist_train.sh` or create a new script, please refer to PyTorch [Launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility). Usually it is slow if you do not have high speed networking like InfiniBand.
+
+If you launch with slurm, the command is the same as that on single machine described above. You only need to change ${GPUS}, e.g., to 16 for two 8-GPU machines.
+
+### Launch multiple jobs on a single machine
+
+If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs,
+you need to specify different ports (29500 by default) for each job to avoid communication conflict.
+
+If you use `dist_train.sh` to launch training jobs:
+```shell
+CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash tools/dist_train.sh ${CONFIG_FILE} 4
+CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash tools/dist_train.sh ${CONFIG_FILE} 4
+```
+
+If you use launch training jobs with slurm:
+```shell
+GPUS_PER_NODE=4 bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} 4 --port 29500
+GPUS_PER_NODE=4 bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} 4 --port 29501
+```
+
+### What if I do not have so many GPUs?
+
+Assuming that you only have 1 GPU that can contain 64 images in a batch, while you expect the batch size to be 256, you may add the following line into your config file. It performs network update every 4 iterations. In this way, the equivalent batch size is 256. Of course, it is about 4x slower than using 4 GPUs. Note that the workaround is not applicable for methods like SimCLR which require intra-batch communication.
+
+```python
+optimizer_config = dict(update_interval=4)
+```
+
+### Mixed Precision Training (Optional)
+We use [Apex](https://github.com/NVIDIA/apex) to implement Mixed Precision Training.
+If you want to use Mixed Precision Training, you can add below in the config file.
+```python
+use_fp16 = True
+optimizer_config = dict(use_fp16=use_fp16)
+```
+An example:
+```python
+bash tools/dist_train.sh configs/selfsup/moco/r50_v1_fp16.py 8
+```
+
+### Speeding Up IO (Optional)
+1 . Prefetching data helps to speeding up IO and make better use of CUDA stream parallelization.
+If you want to use it, you can activate it in the config file (disabled by default).
+```python
+prefetch = True
+```
+2 . Costly operation ToTensor is reimplemented along with prefetch.
+
+3 . Replacing Pillow with Pillow-SIMD (https://github.com/uploadcare/pillow-simd.git) to make use of SIMD command sets with modern CPU.
+ ```shell
+pip uninstall pillow
+pip install Pillow-SIMD or CC="cc -mavx2" pip install -U --force-reinstall pillow-simd if AVX2 is available.
+```
+We test it using MoCoV2 using a total batch size of 256 on Tesla V100. The training time per step is decreased to 0.17s from 0.23s.
+## Benchmarks
+
+We provide several standard benchmarks to evaluate representation learning. The config files or scripts for evaluation mentioned below are NOT recommended to be changed if you want to use this repo in your publications. We hope that all methods are under a fair comparison.
+
+### VOC07 Linear SVM & Low-shot Linear SVM
+
+```shell
+# test by epoch (only applicable to experiments trained with OpenSelfSup)
+bash benchmarks/dist_test_svm_epoch.sh ${CONFIG_FILE} ${EPOCH} ${FEAT_LIST} ${GPUS}
+# test a pretrained model (applicable to any pre-trained models)
+bash benchmarks/dist_test_svm_pretrain.sh ${CONFIG_FILE} ${PRETRAIN} ${FEAT_LIST} ${GPUS}
+```
+Augments:
+- `${CONFIG_FILE}` the config file of the self-supervised experiment.
+- `${FEAT_LIST}` is a string to specify features from layer1 to layer5 to evaluate; e.g., if you want to evaluate layer5 only, then `FEAT_LIST` is `"feat5"`, if you want to evaluate all features, then then `FEAT_LIST` is `"feat1 feat2 feat3 feat4 feat5"` (separated by space). If left empty, the default `FEAT_LIST` is `"feat5"`.
+- `$GPUS` is the number of GPUs to extract features.
+
+Working directories:
+The features, logs and intermediate files generated are saved in `$SVM_WORK_DIR/` as follows:
+- `dist_test_svm_epoch.sh`: `SVM_WORK_DIR=$WORK_DIR/` (The same as that mentioned in `Train with single/multiple GPUs` above.) Hence, the files will be overridden to save space when evaluating with a new `$EPOCH`.
+- `dist_test_svm_pretrain.sh`: `SVM_WORK_DIR=$WORK_DIR/$PRETRAIN_NAME/`, e.g., if `PRETRAIN=pretrains/odc_r50_v1-5af5dd0c.pth`, then `PRETRAIN_NAME=odc_r50_v1-5af5dd0c.pth`; if `PRETRAIN=random`, then `PRETRAIN_NAME=random`.
+
+Notes:
+- The evaluation records are saved in `$SVM_WORK_DIR/logs/eval_svm.log`.
+- When using `benchmarks/dist_test_svm_epoch.sh`, DO NOT launch multiple tests of the same experiment with different epochs, since they share the same working directory.
+- Linear SVM takes 5 min, low-shot linear SVM takes about 1 hour with 32 CPU cores. If you want to save time, you may delete or comment the low-shot SVM testing command (the last line in the scripts).
+
+### ImageNet / Places205 Linear Classification
+
+**First**, extract backbone weights:
+```shell
+python tools/extract_backbone_weights.py ${CHECKPOINT} ${WEIGHT_FILE}
+```
+Arguments:
+- `CHECKPOINTS`: the checkpoint file of a selfsup method named as `epoch_*.pth`.
+- `WEIGHT_FILE`: the output backbone weights file, e.g., `pretrains/moco_r50_v1-4ad89b5c.pth`.
+
+**Next**, train and test linear classification:
+```shell
+# train
+bash benchmarks/dist_train_linear.sh ${CONFIG_FILE} ${WEIGHT_FILE} [optional arguments]
+# test (unnecessary if have validation in training)
+bash tools/dist_test.sh ${CONFIG_FILE} ${GPUS} ${CHECKPOINT}
+```
+Augments:
+- `CONFIG_FILE`: Use config files under "configs/benchmarks/linear_classification/". Note that if you want to test DeepCluster that has a sobel layer before the backbone, you have to use the config file named `*_sobel.py`, e.g., `configs/benchmarks/linear_classification/imagenet/r50_multihead_sobel.py`.
+- Optional arguments include:
+ - `--resume_from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file.
+ - `--deterministic`: Switch on "deterministic" mode which slows down training but the results are reproducible.
+
+Working directories:
+Where are the checkpoints and logs? E.g., if you use `configs/benchmarks/linear_classification/imagenet/r50_multihead.py` to evaluate `pretrains/moco_r50_v1-4ad89b5c.pth`, then the working directories for this evalution is `work_dirs/benchmarks/linear_classification/imagenet/r50_multihead/moco_r50_v1-4ad89b5c.pth/`.
+
+### ImageNet Semi-Supervised Classification
+
+```shell
+# train
+bash benchmarks/dist_train_semi.sh ${CONFIG_FILE} ${WEIGHT_FILE} [optional arguments]
+# test (unnecessary if have validation in training)
+bash tools/dist_test.sh ${CONFIG_FILE} ${GPUS} ${CHECKPOINT}
+```
+Augments:
+- `CONFIG_FILE`: Use config files under "configs/benchmarks/semi_classification/". Note that if you want to test DeepCluster that has a sobel layer before the backbone, you have to use the config file named `*_sobel.py`, e.g., `configs/benchmarks/semi_classification/imagenet_1percent/r50_sobel.py`.
+- Optional arguments include:
+ - `--resume_from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file.
+ - `--deterministic`: Switch on "deterministic" mode which slows down training but the results are reproducible.
+
+### VOC07+12 / COCO17 Object Detection
+
+For more details to setup the environments for detection, please refer [here](https://github.com/open-mmlab/OpenSelfSup/blob/master/benchmarks/detection/README.md).
+
+```shell
+conda activate detectron2 # use detectron2 environment here, otherwise use open-mmlab environment
+cd benchmarks/detection
+python convert-pretrain-to-detectron2.py ${WEIGHT_FILE} ${OUTPUT_FILE} # must use .pkl as the output extension.
+bash run.sh ${DET_CFG} ${OUTPUT_FILE}
+```
+Arguments:
+- `WEIGHT_FILE`: The extracted backbone weights extracted aforementioned.
+- `OUTPUT_FILE`: Converted backbone weights file, e.g., `odc_v1.pkl`.
+- `DET_CFG`: The detectron2 config file, usually we use `configs/pascal_voc_R_50_C4_24k_moco.yaml`.
+
+**Note**:
+- This benchmark must use 8 GPUs as the default setting from MoCo.
+- Please report the mean of 5 trials in your offical paper, according to MoCo.
+- DeepCluster that uses Sobel layer is not supported by detectron2.
+
+## Tools and Tips
+
+### Count number of parameters
+
+```shell
+python tools/count_parameters.py ${CONFIG_FILE}
+```
+
+### Publish a model
+
+Compute the hash of the weight file and append the hash id to the filename. The output file is the input file name with a hash suffix.
+
+```shell
+python tools/publish_model.py ${WEIGHT_FILE}
+```
+Arguments:
+- `WEIGHT_FILE`: The extracted backbone weights extracted aforementioned.
+
+### Reproducibility
+
+If you want to make your performance exactly reproducible, please switch on `--deterministic` to train the final model to be published. Note that this flag will switch off `torch.backends.cudnn.benchmark` and slow down the training speed.
+
+## How-to
+
+### Use a new dataset
+
+1. Write a data source file under `openselfsup/datasets/data_sources/`. You may refer to the existing ones.
+
+2. Create new config files for your experiments.
+
+### Design your own methods
+
+#### What you need to do
+
+ 1. Create a dataset file under `openselfsup/datasets/` (better using existing ones);
+ 2. Create a model file under `openselfsup/models/`. The model typically contains:
+ i) backbone (required): images to deep features from differet depth of layers. Your model must contain a `self.backbone` module, otherwise the backbone weights cannot be extracted.
+ ii) neck (optional): deep features to compact feature vectors.
+ iii) head (optional): define loss functions.
+ iv) memory_bank (optional): define memory banks.
+ 3. Create a config file under `configs/` and setup the configs;
+ 4. [Optional] Create a hook file under `openselfsup/hooks/` if your method requires additional operations before run, every several iterations, every several epoch, or after run.
+
+You may refer to existing modules under respective folders.
+
+#### Features that may facilitate your implementation
+
+* Decoupled data source and dataset.
+
+Since dataset is correlated to a specific task while data source is general, we decouple data source and dataset in OpenSelfSup.
+
+```python
+data = dict(
+ train=dict(type='ContrastiveDataset',
+ data_source=dict(type='ImageNet', list_file='xx', root='xx'),
+ pipeline=train_pipeline),
+ val=dict(...),
+ ...
+)
+```
+
+* Configure data augmentations in the config file.
+
+The augmentations are the same as `torchvision.transforms` except that `torchvision.transforms.RandomAppy` corresponds to `RandomAppliedTrans`. `Lighting` and `GaussianBlur` is additionally implemented.
+
+```python
+img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+train_pipeline = [
+ dict(type='RandomResizedCrop', size=224),
+ dict(type='RandomAppliedTrans',
+ transforms=[
+ dict(type='GaussianBlur', sigma_min=0.1, sigma_max=2.0, kernel_size=23)],
+ p=0.5),
+ dict(type='ToTensor'),
+ dict(type='Normalize', **img_norm_cfg)
+]
+```
+
+* Parameter-wise optimization parameters.
+
+You may specify optimization paramters including lr, momentum and weight_decay for a certain group of paramters in the config file with `paramwise_options`. `paramwise_options` is a dict whose key is regular expressions and value is options. Options include 6 fields: lr, lr_mult, momentum, momentum_mult, weight_decay, weight_decay_mult, lars_exclude (only works with LARS optimizer).
+
+```python
+# this config sets all normalization layers with weight_decay_mult=0.1,
+# and the head with `lr_mult=10, momentum=0`.
+paramwise_options = {
+ '(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay_mult=0.1),
+ '\Ahead.': dict(lr_mult=10, momentum=0)}
+optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
+ weight_decay=0.0001,
+ paramwise_options=paramwise_options)
+```
+
+* Configure custom hooks in the config file.
+
+The hooks will be called in order. For hook design, please refer to [odc_hook.py](https://github.com/open-mmlab/OpenSelfSup/blob/master/openselfsup/hooks/odc_hook.py) as an example.
+
+```python
+custom_hooks = [
+ dict(type='DeepClusterHook', ...),
+ dict(type='ODCHook', ...),
+]
+```
diff --git a/docs/INSTALL.md b/docs/INSTALL.md
new file mode 100644
index 00000000..7420e0c9
--- /dev/null
+++ b/docs/INSTALL.md
@@ -0,0 +1,160 @@
+## Installation
+
+### Requirements
+
+- Linux (Windows is not officially supported)
+- Python 3.5+
+- PyTorch 1.1 or higher
+- CUDA 9.0 or higher
+- NCCL 2
+- GCC 4.9 or higher
+- [mmcv](https://github.com/open-mmlab/mmcv)
+
+We have tested the following versions of OS and softwares:
+
+- OS: Ubuntu 16.04/18.04 and CentOS 7.2
+- CUDA: 9.0/9.2/10.0/10.1/11.0
+- NCCL: 2.1.15/2.2.13/2.3.7/2.4.2 (PyTorch-1.1 w/ NCCL-2.4.2 has a deadlock bug, see [here](https://github.com/open-mmlab/OpenSelfSup/issues/6))
+- GCC(G++): 4.9/5.3/5.4/7.3
+
+### Install openselfsup
+
+a. Create a conda virtual environment and activate it.
+
+```shell
+conda create -n open-mmlab python=3.7 -y
+conda activate open-mmlab
+```
+
+b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/), e.g.,
+
+```shell
+conda install pytorch torchvision -c pytorch
+# or assuming CUDA=10.1, "pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html"
+```
+
+c. Install other third-party libraries (not necessary).
+
+```shell
+conda install faiss-gpu cudatoolkit=10.1 -c pytorch # optional for DeepCluster and ODC, assuming CUDA=10.1
+pip install umap-learn # optional for umap visualization.
+pip install opencv-contrib-python # optional for SaliencyMix (cv2.saliency.StaticSaliencyFineGrained_create())
+```
+
+d. Clone the openselfsup repository.
+
+```shell
+git clone https://github.com/open-mmlab/openselfsup.git
+cd openselfsup
+```
+
+e. Install.
+
+```shell
+pip install -v -e . # or "python setup.py develop"
+```
+
+f. Install Apex (optional), following the [official instructions](https://github.com/NVIDIA/apex), e.g.
+```shell
+git clone https://github.com/NVIDIA/apex
+cd apex
+pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
+```
+
+Note:
+
+1. The git commit id will be written to the version number with step d, e.g. 0.6.0+2e7045c. The version will also be saved in trained models.
+
+2. Following the above instructions, openselfsup is installed on `dev` mode, any local modifications made to the code will take effect without the need to reinstall it (unless you submit some commits and want to update the version number).
+
+3. If you would like to use `opencv-python-headless` instead of `opencv-python`,
+you can install it before installing MMCV.
+
+
+### Prepare datasets
+
+It is recommended to symlink your dataset root (assuming $YOUR_DATA_ROOT) to `$OPENSELFSUP/data`.
+If your folder structure is different, you may need to change the corresponding paths in config files.
+
+#### Prepare PASCAL VOC
+
+Assuming that you usually store datasets in `$YOUR_DATA_ROOT` (e.g., for me, `/home/xhzhan/data/`).
+This script will automatically download PASCAL VOC 2007 into `$YOUR_DATA_ROOT`, prepare the required files, create a folder `data` under `$OPENSELFSUP` and make a symlink `VOCdevkit`.
+
+```shell
+cd $OPENSELFSUP
+bash tools/prepare_data/prepare_voc07_cls.sh $YOUR_DATA_ROOT
+```
+
+#### Prepare ImageNet and Places205
+
+Taking ImageNet for example, you need to 1) download ImageNet; 2) create the following list files or download [here](https://drive.google.com/drive/folders/1wYkJU_1qRHEt1LPVjBiG6ddUFV-t9hVJ?usp=sharing) under $IMAGENET/meta/: `train.txt` and `val.txt` contains an image file name in each line, `train_labeled.txt` and `val_labeled.txt` contains `filename[space]label\n` in each line; `train_labeled_*percent.txt` are the down-sampled lists for semi-supervised evaluation. 3) create a symlink under `$OPENSELFSUP/data/`.
+
+At last, the folder looks like:
+
+```
+OpenSelfSup
+├── openselfsup
+├── benchmarks
+├── configs
+├── data
+│ ├── VOCdevkit
+│ │ ├── VOC2007
+│ │ ├── VOC2012
+│ ├── imagenet
+│ │ ├── meta
+│ │ | ├── train.txt (for self-sup training, "filename\n" in each line)
+│ │ | ├── train_labeled.txt (for linear evaluation, "filename[space]label\n" in each line)
+│ │ | ├── train_labeled_1percent.txt (for semi-supervised evaluation)
+│ │ | ├── train_labeled_10percent.txt (for semi-supervised evaluation)
+│ │ | ├── val.txt
+│ │ | ├── val_labeled.txt (for evaluation)
+│ │ ├── train
+│ │ ├── val
+│ ├── places205
+│ │ ├── meta
+│ │ | ├── train.txt
+│ │ | ├── train_labeled.txt
+│ │ | ├── val.txt
+│ │ | ├── val_labeled.txt
+│ │ ├── train
+│ │ ├── val
+```
+
+### A from-scratch setup script
+
+Here is a full script for setting up openselfsup with conda and link the dataset path. The script does not download ImageNet and Places datasets, you have to prepare them on your own.
+
+```shell
+conda create -n open-mmlab python=3.7 -y
+conda activate open-mmlab
+
+conda install -c pytorch pytorch torchvision -y
+git clone https://github.com/open-mmlab/OpenSelfSup.git
+cd OpenSelfSup
+pip install -v -e .
+
+bash tools/prepare_data/prepare_voc07_cls.sh $YOUR_DATA_ROOT
+ln -s $IMAGENET_ROOT data/imagenet
+ln -s $PLACES_ROOT data/places205
+```
+
+### Using multiple OpenSelfSup versions
+
+If there are more than one openselfsup on your machine, and you want to use them alternatively, the recommended way is to create multiple conda environments and use different environments for different versions.
+
+Another way is to insert the following code to the main scripts (`train.py`, `test.py` or any other scripts you run)
+```python
+import os.path as osp
+import sys
+sys.path.insert(0, osp.join(osp.dirname(osp.abspath(__file__)), '../'))
+```
+
+Or run the following command in the terminal of corresponding folder to temporally use the current one.
+```shell
+export PYTHONPATH=`pwd`:$PYTHONPATH
+```
+
+## Common Issues
+
+1. The training hangs / deadlocks in some intermediate iteration. See this [issue](https://github.com/open-mmlab/OpenSelfSup/issues/6).
diff --git a/docs/MODEL_ZOO.md b/docs/MODEL_ZOO.md
new file mode 100644
index 00000000..2d75be82
--- /dev/null
+++ b/docs/MODEL_ZOO.md
@@ -0,0 +1,184 @@
+# Model Zoo
+
+**OpenSelfSup needs your contribution!
+Since we don't have sufficient GPUs to run these large-scale experiments, your contributions, including parameter studies, reproducing of results, implementing new methods, etc, are essential to make OpenSelfSup better. Your contribution will be recorded in the below table, top contributors will be included in the author list of OpenSelfSup!**
+
+## Pre-trained model download links and speed test.
+**Note**
+* If not specifically indicated, the testing GPUs are NVIDIA Tesla V100.
+* The table records the implementors who implemented the methods (either by themselves or refactoring from other repos), and the experimenters who performed experiments and reproduced the results. The experimenters should be responsible for the evaluation results on all the benchmarks, and the implementors should be responsible for the implementation as well as the results; If the experimenter is not indicated, an implementator is the experimenter by default.
+
+
+
+
+## Benchmarks
+
+### VOC07 SVM & SVM Low-shot
+
+Method | Config | Remarks | Best layer | VOC07 SVM | VOC07 SVM Low-shot |
+1 | 2 | 4 | 8 | 16 | 32 | 64 | 96 |
+ImageNet | - | torchvision | feat5 | 87.17 | 52.99 | 63.55 | 73.7 | 78.79 | 81.76 | 83.75 | 85.18 | 85.97 |
+Random | - | kaiming | feat2 | 30.54 | 9.15 | 9.39 | 11.09 | 12.3 | 14.3 | 17.41 | 21.32 | 23.77 |
+Relative-Loc | selfsup/relative_loc/r50.py | default | feat4 | 64.78 | 18.17 | 22.08 | 29.37 | 35.58 | 41.8 | 48.73 | 55.55 | 58.33 |
+Rotation-Pred | selfsup/rotation_pred/r50.py | default | feat4 | 67.38 | 18.91 | 23.33 | 30.57 | 38.22 | 45.83 | 52.23 | 58.08 | 61.11 |
+DeepCluster | selfsup/deepcluster/r50.py | default | feat5 | 74.26 | 29.73 | 37.66 | 45.85 | 55.57 | 62.48 | 66.15 | 70.0 | 71.37 |
+NPID | selfsup/npid/r50.py | default | feat5 | 74.50 | 24.19 | 31.24 | 39.69 | 50.99 | 59.03 | 64.4 | 68.69 | 70.84 |
+ | selfsup/npid/r50_ensure_neg.py | ensure_neg=True | feat5 | 75.70 | | | | | | | | |
+ODC | selfsup/odc/r50_v1.py | default | feat5 | 78.42 | 32.42 | 40.27 | 49.95 | 59.96 | 65.71 | 69.99 | 73.64 | 75.13 |
+MoCo | selfsup/moco/r50_v1.py | default | feat5 | 79.18 | 30.03 | 37.73 | 47.64 | 58.78 | 66.0 | 70.6 | 74.6 | 76.07 |
+MoCo v2 | selfsup/moco/r50_v2.py | default | feat5 | 84.26 | 43.0 | 52.48 | 63.43 | 71.74 | 76.35 | 78.9 | 81.31 | 82.45 |
+SimCLR | selfsup/simclr/r50_bs256_ep200.py | default | feat5 | 78.95 | 32.45 | 40.76 | 50.4 | 59.01 | 65.45 | 70.13 | 73.58 | 75.35 |
+ | selfsup/simclr/r50_bs256_ep200_mocov2_neck.py | -> MoCo v2 neck | feat5 | 77.65 | | | | | | | | |
+BYOL | selfsup/byol/r50_bs4096_ep200.py | default | feat5 | 85.10 | 44.48 | 52.09 | 62.88 | 70.87 | 76.18 | 79.45 | 81.88 | 83.08 |
+ | selfsup/byol/r50_bs256_accumulate16_ep300.py | default | feat5 | 86.58 | | | | | | | | |
+ | selfsup/byol/r50_bs2048_accumulate2_ep200.py | default | feat5 | 85.86 | | | | | | | | |
+
+
+
+
+### ImageNet Linear Classification
+
+**Note**
+* Config: `configs/benchmarks/linear_classification/imagenet/r50_multihead.py` for ImageNet (Multi) and `configs/benchmarks/linear_classification/imagenet/r50_last.py` for ImageNet (Last).
+* For DeepCluster, use the corresponding one with `_sobel`.
+* ImageNet (Multi) evaluates features in around 9k dimensions from different layers. Top-1 result of the last epoch is reported.
+* ImageNet (Last) evaluates the last feature after global average pooling, e.g., 2048 dimensions for resnet50. The best top-1 result among all epochs is reported.
+* Usually, we report the best result from ImageNet (Multi) and ImageNet (Last) to ensure fairness, since different methods achieve their best performance on different layers.
+
+Method | Config | Remarks | ImageNet (Multi) | ImageNet (Last) |
+feat1 | feat2 | feat3 | feat4 | feat5 | avgpool |
+ImageNet | - | torchvision | 15.18 | 33.96 | 47.86 | 67.56 | 76.17 | 74.12 |
+Random | - | kaiming | 11.37 | 16.21 | 13.47 | 9.07 | 6.54 | 4.35 |
+Relative-Loc | selfsup/relative_loc/r50.py | default | 14.76 | 31.29 | 45.77 | 49.31 | 40.20 | 38.83 |
+Rotation-Pred | selfsup/rotation_pred/r50.py | default | 12.89 | 34.30 | 44.91 | 54.99 | 49.09 | 47.01 |
+DeepCluster | selfsup/deepcluster/r50.py | default | 12.78 | 30.81 | 43.88 | 57.71 | 51.68 | 46.92 |
+NPID | selfsup/npid/r50.py | default | 14.28 | 31.20 | 40.68 | 54.46 | 56.61 | 56.60 |
+ODC | selfsup/odc/r50_v1.py | default | 14.76 | 31.82 | 42.44 | 55.76 | 57.70 | 53.42 |
+MoCo | selfsup/moco/r50_v1.py | default | 15.32 | 33.08 | 44.68 | 57.27 | 60.60 | 61.02 |
+MoCo v2 | selfsup/moco/r50_v2.py | default | 14.74 | 32.81 | 44.95 | 61.61 | 66.73 | 67.69 |
+SimCLR | selfsup/simclr/r50_bs256_ep200.py | default | 17.09 | 31.37 | 41.38 | 54.35 | 61.57 | 60.06 |
+ | selfsup/simclr/r50_bs256_ep200_mocov2_neck.py | -> MoCo v2 neck | 16.97 | 31.88 | 41.73 | 54.33 | 59.94 | 58.00 |
+BYOL | selfsup/byol/r50_bs4096_ep200.py | default | 16.70 | 34.22 | 46.61 | 60.78 | 69.14 | 67.10 |
+ | selfsup/byol/r50_bs256_accumulate16_ep300.py | default | 14.07 | 34.44 | 47.22 | 63.08 | 72.35 | |
+ | selfsup/byol/rr50_bs2048_accumulate2_ep200_fp16.py | default | 15.52 | 34.50 | 47.22 | 62.78 | 71.61 | |
+
+
+### Places205 Linear Classification
+
+**Note**
+* Config: `configs/benchmarks/linear_classification/places205/r50_multihead.py`.
+* For DeepCluster, use the corresponding one with `_sobel`.
+* Places205 evaluates features in around 9k dimensions from different layers. Top-1 result of the last epoch is reported.
+
+Method | Config | Remarks | Places205 |
+feat1 | feat2 | feat3 | feat4 | feat5 |
+ImageNet | - | torchvision | 21.27 | 36.10 | 43.03 | 51.38 | 53.05 |
+Random | - | kaiming | 17.19 | 21.70 | 19.23 | 14.59 | 11.73 |
+Relative-Loc | selfsup/relative_loc/r50.py | default | 21.07 | 34.86 | 42.84 | 45.71 | 41.45 |
+Rotation-Pred | selfsup/rotation_pred/r50.py | default | 18.65 | 35.71 | 42.28 | 45.98 | 43.72 |
+DeepCluster | selfsup/deepcluster/r50.py | default | 18.80 | 33.93 | 41.44 | 47.22 | 42.61 |
+NPID | selfsup/npid/r50.py | default | 20.53 | 34.03 | 40.48 | 47.13 | 47.73 |
+ODC | selfsup/odc/r50_v1.py | default | 20.94 | 34.78 | 41.19 | 47.45 | 49.18 |
+MoCo | selfsup/moco/r50_v1.py | default | 21.13 | 35.19 | 42.40 | 48.78 | 50.70 |
+MoCo v2 | selfsup/moco/r50_v2.py | default | 21.88 | 35.75 | 43.65 | 49.99 | 52.57 |
+SimCLR | selfsup/simclr/r50_bs256_ep200.py | default | 22.55 | 34.14 | 40.35 | 47.15 | 51.64 |
+ | selfsup/simclr/r50_bs256_ep200_mocov2_neck.py | -> MoCo v2 neck | | | | | |
+BYOL | selfsup/byol/r50_bs4096_ep200.py | default | 22.28 | 35.95 | 43.03 | 49.79 | 52.75 |
+
+
+### ImageNet Semi-Supervised Classification
+
+**Note**
+* In this benchmark, the necks or heads are removed and only the backbone CNN is evaluated by appending a linear classification head. All parameters are fine-tuned.
+* Config: under `configs/benchmarks/semi_classification/imagenet_1percent/` for 1% data, and `configs/benchmarks/semi_classification/imagenet_10percent/` for 10% data.
+* When training with 1% ImageNet, we find hyper-parameters especially the learning rate greatly influence the performance. Hence, we prepare a list of settings with the base learning rate from \{0.001, 0.01, 0.1\} and the learning rate multiplier for the head from \{1, 10, 100\}. We choose the best performing setting for each method.
+* Please use `--deterministic` in this benchmark.
+
+Method | Config | Remarks | Optimal setting for ImageNet 1% | ImageNet 1% |
+top-1 | top-5 |
+ImageNet | - | torchvision | r50_lr0_001_head100.py | 68.68 | 88.87 |
+Random | - | kaiming | r50_lr0_01_head1.py | 1.56 | 4.99 |
+Relative-Loc | selfsup/relative_loc/r50.py | default | r50_lr0_01_head100.py | 16.48 | 40.37 |
+Rotation-Pred | selfsup/rotation_pred/r50.py | default | r50_lr0_01_head100.py | 18.98 | 44.05 |
+DeepCluster | selfsup/deepcluster/r50.py | default | r50_lr0_01_head1_sobel.py | 33.44 | 58.62 |
+NPID | selfsup/npid/r50.py | default | r50_lr0_01_head100.py | 27.95 | 54.37 |
+ODC | selfsup/odc/r50_v1.py | default | r50_lr0_1_head100.py | 32.39 | 61.02 |
+MoCo | selfsup/moco/r50_v1.py | default | r50_lr0_01_head100.py | 33.15 | 61.30 |
+MoCo v2 | selfsup/moco/r50_v2.py | default | r50_lr0_01_head100.py | 39.07 | 68.31 |
+SimCLR | selfsup/simclr/r50_bs256_ep200.py | default | r50_lr0_01_head100.py | 36.09 | 64.50 |
+ | selfsup/simclr/r50_bs256_ep200_mocov2_neck.py | -> MoCo v2 neck | r50_lr0_01_head100.py | 36.31 | 64.68 |
+BYOL | selfsup/byol/r50_bs4096_ep200.py | default | r50_lr0_01_head10.py | 49.37 | 76.75 |
+
+
+Method | Config | Remarks | Optimal setting for ImageNet 10% | ImageNet 10% |
+top-1 | top-5 |
+ImageNet | - | torchvision | r50_lr0_001_head10.py | 74.53 | 92.19 |
+Random | - | kaiming | r50_lr0_01_head1.py | 21.78 | 44.24 |
+Relative-Loc | selfsup/relative_loc/r50.py | default | r50_lr0_01_head100.py | 53.86 | 79.62 |
+Rotation-Pred | selfsup/rotation_pred/r50.py | default | r50_lr0_01_head100.py | 54.75 | 80.21 |
+DeepCluster | selfsup/deepcluster/r50.py | default | r50_lr0_01_head1_sobel.py | 52.94 | 77.96 |
+NPID | selfsup/npid/r50.py | default | r50_lr0_01_head100.py | 57.22 | 81.39 |
+ODC | selfsup/odc/r50_v1.py | default | r50_lr0_1_head10.py | 58.15 | 82.55 |
+MoCo | selfsup/moco/r50_v1.py | default | r50_lr0_01_head100.py | 60.08 | 84.02 |
+MoCo v2 | selfsup/moco/r50_v2.py | default | r50_lr0_01_head100.py | 61.80 | 85.11 |
+SimCLR | selfsup/simclr/r50_bs256_ep200.py | default | r50_lr0_01_head100.py | 58.46 | 82.60 |
+ | selfsup/simclr/r50_bs256_ep200_mocov2_neck.py | -> MoCo v2 neck | r50_lr0_01_head100.py | 58.38 | 82.53 |
+BYOL | selfsup/byol/r50_bs4096_ep200.py | default | r50_lr0_01_head100.py | 65.94 | 87.81 |
+
+
+### PASCAL VOC07+12 Object Detection
+
+**Note**
+* This benchmark follows the evluation protocols set up by MoCo.
+* Config: `benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml`.
+* Please follow [here](GETTING_STARTED.md#voc0712--coco17-object-detection) to run the evaluation.
+
+Method | Config | Remarks | VOC07+12 |
+AP50 | AP | AP75 |
+ImageNet | - | torchvision | 81.58 | 54.19 | 59.80 |
+Random | - | kaiming | 59.02 | 32.83 | 31.60 |
+Relative-Loc | selfsup/relative_loc/r50.py | default | 80.36 | 55.13 | 61.18 |
+Rotation-Pred | selfsup/rotation_pred/r50.py | default | 80.91 | 55.52 | 61.39 |
+NPID | selfsup/npid/r50.py | default | 80.03 | 54.11 | 59.50 |
+MoCo | selfsup/moco/r50_v1.py | default | 81.38 | 55.95 | 62.23 |
+MoCo v2 | selfsup/moco/r50_v2.py | default | 82.24 | 56.97 | 63.43 |
+SimCLR | selfsup/simclr/r50_bs256_ep200.py | default | 79.41 | 51.54 | 55.63 |
+BYOL | selfsup/byol/r50_bs4096_ep200.py | default | 80.95 | 51.87 | 56.53 |
+
+
+### COCO2017 Object Detection
+
+**Note**
+* This benchmark follows the evluation protocols set up by MoCo.
+* Config: `benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml`.
+* Please follow [here](GETTING_STARTED.md#voc0712--coco17-object-detection) to run the evaluation.
+
+Method | Config | Remarks | COCO2017 |
+AP50(Box) | AP(Box) | AP75(Box) | AP50(Mask) | AP(Mask) | AP75(Mask) |
+ImageNet | - | torchvision | 59.9 | 40.0 | 43.1 | 56.5 | 34.7 | 36.9 |
+Random | - | kaiming | 54.6 | 35.6 | 38.2 | 51.5 | 31.4 | 33.5 |
+Relative-Loc | selfsup/relative_loc/r50.py | default | 59.6 | 40.0 | 43.5 | 56.5 | 35.0 | 37.3 |
+Rotation-Pred | selfsup/rotation_pred/r50.py | default | 59.3 | 40.0 | 43.6 | 56.0 | 34.9 | 37.4 |
+NPID | selfsup/npid/r50.py | default | 59.0 | 39.4 | 42.8 | 55.9 | 34.5 | 36.6 |
+MoCo | selfsup/moco/r50_v1.py | default | 60.5 | 40.9 | 44.2 | 57.1 | 35.5 | 37.7 |
+MoCo v2 | selfsup/moco/r50_v2.py | default | 60.6 | 41.0 | 44.5 | 57.2 | 35.6 | 38.0 |
+SimCLR | selfsup/simclr/r50_bs256_ep200.py | default | 59.1 | 39.6 | 42.9 | 55.9 | 34.6 | 37.1 |
+BYOL | selfsup/byol/r50_bs4096_ep200.py | default | 60.5 | 40.3 | 43.9 | 56.8 | 35.1 | 37.3 |
+
+
diff --git a/docs/relation.jpg b/docs/relation.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..00e039b2c10d6b5489cb38d257ef756b17a9560a
GIT binary patch
literal 330127
zcmeGDXIN8R)Gi8#CQU%3NRuc=DG`+}ARxU&MVgT=JtP8BqzVW~jY_YP-U%J0
zN|#Qk(i3VRg_Gxb-@W&Dz1MfHv(Ndn|GWd%VqRgcnKjlLbIdXBanFlC7fXOEIvUy<
z0167gKR*D#1sd>0-Ot$p0MOF|+$MiO2e?FW2|z_|Q2;2(r>pUb0nq&WIRIcv
z!TUekwiKfOtAB(ufciiCkjMN-{`r=FPXGCr>MOr$#Y%C}1ptz7frjE=`S&vNiGq@fn&uKM9X$ghxkJqr03`(#6(uzl
z4b4AaLlH#&KY*H*=IYJ6YM0myU(yPAu*(D|e4`V5RMF01G=dSj_sTPbo`Lfk7dMaa
zEfG<%+p==<3X1n1Jbt3Cp{b>w~BUp!Fxs+j#r)|44gu;XkqL>s{O0k|DIwY|DQDbpNjo2
zy=DQA0hIp=6(uDV4HXp?%_SPLT%x=5kI*sD{VNRrDVP5h=6{6cf64`U5DM}b)YR0p
zjU(3Zj*(_r&W&oF|$Pc8jJfA|)vi;@2`HiPVg&yFo$0J`=g5Lb_w#M?;1Eh8>c@50ix%JM|ZZq}#yvTlb_
zVUQD1gclcpkR5CQP|qNQKiAGe+v>f@bb=|?w5%19NK57x~rO-nZ7L^g@arGgf==*Ckz!AfDw9|
zGN`GfW$EbN1>oK;CbsdJTO}c$m6+>IbnLhQP$}Cjv#{7|7i`I!YG{c>?k37FDe
zzbn4thP!1z@Pj_S0F*a^Nugfgz+`k$D{uFNhjpJvRc(p+u_~2Z*SNLrhuqvVYhmvo
z{-nRK0z^Hau7=8h8Y|+#l^PcD2HwkhOv)8Eg8^7@}S<7YBdw!+?84Ip0=rjmD
z4ei9t4lYYyp&Qk9kxVmE3fkh5IOGfSWcDep0CvaSJHY94_f9>Z=g})#Om%U|IjYNw
zFY3msQic|mm}qy*&29#nl}$kqc81{8b6Ns>XXmNqXLrAhwmKf~=pRffH3#!rQ3hbi
zZy)%)(c4FE?W8-Hsrcsm_REWOi}`uETXeSED?e$eODO4REX=R;J-iV@H^|+4dF2Pa
zvW*D)Nr2pG{_ocWgZBfx<~qfCt~CaG&iC@ZH#aT!_JAm$oR}?Ke6MfMf>>s+8@2Ab
zwYAnIoo;>&_XmlO;$$>5V07abfTjHOCuU)?ZU6%>Coc4!97o8bmbk8vXDqdc$NQG{
z%3@#azU|(r&dgbS(t0v6a0rv=|8eicueeWUw*>B~-@B#Gfo17nnXNqVmmaU>2S4!-eT=Xfm8vl;uj%NK
zOuObd-Sv8JK@)t(jz4+FmJ@4_jA~}e_1l`E4Vg6`D?1kY_*3em2gAUXgq|f{T0V!U
zG#kHX;kOJ=Z_OCA)G=7(^LuZE#}IKsGdtP67V1z<%;Cv4&SnMT0ymu4DQ{MIiN>2W
zrUd3|D-Rw>K=_1Kqt8kO84k4+B!(LI#12QrCSCK!u)Ou_XnKGP)ba4T~za9XZf$vuA`&8fu7$r
z9W*=_?^3-f&O{i|2K-gMO3;Z|Ms?1zMEcBFpX-(93Ol{5x0m^{?4!5g2DodaS<4c@
zZ(;LtQnK^ZWm4pdgku-^D-lOEttyMtoav_S~Ed-_33Co3?oJk$>bz
zQf4b;iX;KG!(3~3V@5_2e4#GOX#+l(C$09~UGHC)8m>B06$y#12)%Y(Sbn3JsbROX
z0GuA2or`NxZpx;;Y})zqVBe`ibu=A?l6-J>qM{K+sQZ8NUbYjM(34%94OYpz8dF>4
z=z(^zB&l)KH`u34TJY|D&GJchPzL<`nhDTWEb4Zba?!{rGYVa^Nr#8H#MAetsRtCt
zKG;s%EQVm61$VbZkw6>@JBFTWXW~(@>Qz|3@9G68cq%>$Lzws5AeT)RullvrcRkujO
zGvUt}#C7W*r;99Uy#$-M09QI{Tw8Dd8?RRyg;O~%Iv7o>)t4>C7Rv6#s
zh)!0a1>vdPj}~Qb`%0fFlIROuIa*9ie*SGUOGsHWkFhtf4-fs^jIv$4H$6~gEzgXo
z?bg#!g%`jRPH13W@b-tTBG89854R;#%&ivRZ&_+JH{D%ci>;qMnswN>Y03Z^TW2Sb
z9o@Z=TPW7;1&aFWN|7|l8tVGbWIu=L$h`oxb^E98&SRC@;hfDt%vg6T%{UG5L*f>;
z{zmSPz7i+P`_B$aWfcFaQD3DKIg;W)Uv{`A_w9I#e`S4P6nwd^W@-WE+YPk~>0VQe
zFEl%ylA4Of<<#tuR*pev{ycaYR1{-dQ9LwdRyT7iOZo7A|1XK!k*X&CwJ(W~-`o8p
z+6%xhP7EzE`rM3^sA>BRoVm`b%W5dKe+bg*&t2fpfcK$pAbVTvR>0knAQw>rdjGIJ*u#vFT~Bl`8)BsL?Y}Tn?_A2TwTKWjE^v
zUb_I07a`DPxoG5NXZcfpu}O*7cddns%y+*f_`P;sy#K=QCdd2x)FE_8af!aWihbGr
zjC#M$U`YEz73i2^$5IjA+C-gW<6Ls_C*C%SfUG6Qs2LVDle5DSiJEWCq1dh9Iccxz
zSy7Q$_w!R^J&6Gd#|2_nG!aG^`>q5Viz`y4t|7hN%=?YqCSTZH2Xfo1>EU*LP*B-vs&7*|c4i|^hx}vJ#D!tu=6yy9CoZuy
zg{xX7)UG!YY+*p5?uk5}@uE6z-DIoMvOue#_{XeSXfcyyo|>zl?FRNo6>9ctH^#=z
zdJt8MyKpeuWxLIZEN-LI9DaxL>i8gXRF_%p
zmqTInS-b<;bl
zX4o|=hAp#qiw`QzP8YJb>$ZX%)%`Vl5Yar2?O+NUKfGB*hGA_@3Rlb>4zpX1baWCA
zSCprshw;u(@i7?NCWyVJzTAb^I`Bi)XwvJt$_!^qk++xV3K>?b?pzPIiaPv}#w^pK
zIFX~*CuLnczZ)1dbD|>Q5ER-uo_u@a`Rkm4{h1TgVu9E8x%ffb1)yE86gK+ay}NEV)JmSG?rYZIl$GK@KDpOxGlM}>Iu`)J>A&Awd2n_30H`?5
ztHj#Rw=*Tl4`o#SDoL(1nj>9N@iktJsUadLvVYdcQ+%lJ0BR~9E>!`xA9
z)!s_FKy{I*Q#~(o@WY&j)$K$u$quTnn1k6`(`y@Js}qP{%5%38YU!E6<_|ABhTKg*
z_E5f-ZiEolVLoxoF}2#tYPHJN=Z&pw{C)CyyTnE^-8ax_f)oe@8>(|0Y>bDo8$P5V
zAAE>-H>v7pd#1Lm
z&|)^D$W8%kwHs#htafuj4s@@u?fyoACGjsViJZ=EcY{wDTFL1w3pfshd1`PbK@z?I
zRKUPQ@mgfWvbj#a0@LHHZk^Mr3VdC-ToJnRX+PiJ4zGf~Ddp#>V8q-=-35SQmh6o$
z0BTHT7XVsQAR)ox!Nf3uJp*l;1&Ta|WLfOEm<^=sojj=g&RjZDlob+}yW7T>eIGfy
zC0>EaG&;S@!FB<-Mxge?V106&P5Uti@g_UF@T|qv;bDVm-jhEdUO8`^7xvQ7b){}q
zU2TYmt4=#qfF>6#S!wrKbn(5;kINc!0QjTrGaTk58iF34Jzso+d0{RoL3pvr=i#2v
zxxpr@L#zHo>dO%q@-Kfi+Pb6W&Al*R|3`Y@hT-L5mKQ(F`{aJGu7){@9b5p!;RUIh
zfzDqI!A#IM#1_(3sK~?~^lpxtLiBNyyqL0&YsE~bKql7$y3RhrgGZ6}#wRrnbSppE
zqv!g$Za<3yvg0mo>sLB->N&$wJ#U|%xm^Za02W_0Ze!&sGl`j7C4|N0oJIjC!#|V%Xjzxk}2Qy7)4G
z6C8peUM0#cd#l$TXBrp$$K0GyZt29
z6m$Jh_M{0mVdwr`yy?~S?D`D}Iz@Csn!(f6E5f*4Y=$||%?M6TR#R}(L`@JNSj5_g=`CXLPv=cJfj)-i%WN1@vx3Rkp-ujB8-rfY%K23>jv?L3ZQ
z_9JE0#0NHNuK7(CwrYHsjw>A%qDD^|L`_60#bRGW2APf!=h@0(BHL~3{qhEiVZ*lvV=TARo
zd{nM;_Fh}IPk1}>qZx@IqkveQ16BNNv9TX(^7PB`^vPQoFN&v^yGv0*nV4ny5tKnl
zQt~ulzSrCnKY8biqv$)A!o{()5<>iBlz1EL5^NN48SccS>fFrjE5wtW9^NqK+R^KK
zHIoT%l4h%%>GtTMA}#fD%go;1WXB|AdoyuiW}ZjeJ|RRv;BwVHegNQgQX9u68Y{ci
z{=7iEVEhc)DcOyy$fn~k6WlPxByVNWne(Yh2^?!3->W~UGa?Oe#7mln}wSX
zqTHgM|J>{p7hAK@e=G(Fx%0-2+i$F6k)`>*|MD>RmZ7tyUesQSe;i<#;vu(#gYxY{
z2?GF|?M7{cNOfz*VqO!14r9_%H*p43+3j}>+=aLWZrT-)a~G%$IeJjTxFQOFxhx@R
zY$2wFTGy`mI@_UWi+d{c-2+{3oClA#2%=dC?2pl!U3v~I%(%u5_$9DSy_S7_nnoGKcqxv>d8g&yH&up1m=h$HSxShIt;DsOb1y4`!a6|;mSENa;zqrV)kGWg2Hjhl
zSb^D;@nHFdLY~s&ld5PLkafQ!xqlIQNqO4
z@p)GX*#s8+OAuYUmNroY=6IYPA(7}?*IH(uV}SfdykQ9Df>IExn+3^X=`xtg3C7<#
zx+hSTa@6mMmErg}!hoi~uhhE+eoQdJRV^8W+FpZSgFYPBK)m8p=LwV1RW7Ax?dDmn
zx(um>gmRnveYG8Xpz0rcc0zIj=
z0k)PJR_`5xDjrC|BLykOmtIe0FyHQ9Q&l$skIe4Z#1|mZ(yYe0+q!f+H6z_90*yAR
zjd&9oiFz|Uv;B;iaZF@XufhyiVkRZ+>!pRqtiBAmRXI^A3*Jg5?vGuEb`H~bA+#Nh1b%T$Yy5JwQ#ze@v{GPKEXIO!S?-g#UCjGGiby8hCjL28
z6h~R)xZKp>R@3lXn=|cIiSVz-ZN77T3Ex!agOzugLTv}I&kILPZtINb`atzABROJw
zbXmf4=QS1^lXuHe!i#pBEp
z52)*VQ);8PGSsoGMxFX72BfHA${nrT+%J49H!<|H!Rd#XOwAA^$WiCJ+d4`%{@al9
zy1sOXZCydM^KJQ@U9pUxT>HbDXkH*LRR6i$fbXu+*Ze&Gt1
zGebJ`IQrDQjCzbQ!(z>=<`o-tOi4x0@1GRPmxd?~#XRIMd8>SWQ$pB1omu>N%!On;
zJm+$ZE(uEvF$>5aJ&b}bG&%)ss`sb}#-Cqy+F{}}w993t-%vbzh2K@h~bfd}|zS8@TLKC60lX3gOQuo7n<
zl4B-5Bx1^-FI%3WJ$*22GJOt2u^Xxf`Qv^YszX<>QzyANGYqBZZnK5A@Lr93_Ui}s
ziKVF$5%2fwi#5{rcIY>p-tu&^MHOf;=UG@tZNMy&UwPEzM06N?WlRhE68#aHhTz0)
zvaiq^Bh47L^65w%p7s>yKMmNy%Y&E}V}m9z_f9_J$a%N(q25w|!;SIMak-%kA5NwE
z&r5DIrG41dm4K@hZHgis>r3^aw2_Fa97Ba!le0Xp&8lJMs?yr{$-7@1xqKTFZJWzx
z*8`8)&BzNlH_8q90nAWSf@Ij_JXMvLd@FR5drYFTUP!s>D&^648Tv3K^!*B%1Q8us~m`WPMc%?Bjr}dj)dN
z8vcQt6#fm_ZrxNd`w#j>iFUjZ9ua{(+5rc3ZfSefW<&65@b2}mupvv;_|oljy~)z_
zt^=#@vod0jd1!^}655B~wBv&DV5lSxf?4cTRlx$YT?tkNR&V4VJmnQ?zH8l8NqP37
zFuKXv(_KsH@fG!VCWCj5))a@F6}JfG{`LB70~PF>Cg0Timcx^k^rEIA@~-o
zA5|j(s;=_;Rz{Ft0LFKQ4^U3MS}VYVv&QLi=-nu~iCvN(eAp!PVa5rG)4){ADE}dzXCKCovUXc*=N3@Z>z}vHzTIX?
zkJ5LR-N`Mlbv;GtBJ9akIG7xH#XHc@yul4^|)Z
z*47orC2s+#;BJfIC{Ds-7c>j2b0rRBj`EKBt#_xPCfiKO^yVF{z9v4G%kSCR-Y)T5
zTI&U88QA|v6mwFy98mv%GrPbk;7iSR_$Oe}^n=#0<&rFa!j<&w#!q`#<@ZY17&sukTK`|eGFr?Z%u|AT&
zWw=qid{zw}T($3zyUW^Xm&|v&I7?w*9_2$uLe7zbI}G9*$QhB7h~IEW0~TkPz*Kcf
zmZhZW-o?-bjo=`KfjUxK2ZaK>w3HX#6OB6
zq?2pJA^Fap;L-p@XWCaXAbfLz)8f&2tC(NwOOFdQbf}uyKnkAcXF=){V>`-0N%{B0OwOyTybZZx0rV^)nMWGoV_JbQ$?>
zI7iR26V%nby6)K`Nro0i
zD%yMHQthjF&s$dF8iFK$C86>aWygNNv5}@+D@e8z{VLDa8GJtL6?y}0-6@+<@c#Sy
z{P=ij>Q|Twk59{?NRU%NfvyjEU?Z*vY;W@PF!nUyS}AVnFn;
z;KURN$w(&nA13%wegF}H>_Oth^h!Tsl{)HjVS6@wJuJ<>SIKb7w&wCF+=Bhm8V!(Y2k{D77KYC;avGu_C8i!iobeCF87l}5L(FH$g^IgUS
zw^%N<@V5f5bCwLM?jNe|QwjZ5EAKy04Iq-Euhn^|?*-t+;RPV)YO6fi)?Z%$dLz0(
z=i7M~fWHcI$Gy8@g`Ih%!B&NASmNws%6QZ4WzpWPY2;_nQkqX?{>PQ=QIM}>4}wdj
zP9uqoMg4Qbw@uMds~|_<5bcIV6^eanbdw%ttcb#i#S>!rFI@nTO2b6%>p=>5G4jBd
zF{kfkQIV?m#$HYlY?dS0470b+)PIG!q5^qtq}~KWs03F$UitUXK!1@}$-5VTgq*g;
z;|+iEEej&Y_bG}Cf(Z{IDT_BJ-&6diV3<7XvA2w8UMez5{@j3c{ner(N4V_ywl>yP
zuxFx#x88GxIA_}02onh=jU#B9=~v=I7evOTWTTzrf>;b1P?gP5LUB&3H8K5F0q=Db
z^!)?FY2-Us-)PqP1X16RXz5TqndE&NYLd`aPk}4+B4a1~_+WXS0
zG99uSr{&B2P6_PHfoC8maXRIP_N1TDdQ}IM@Zn1dgypmqkDyM~HvZ)EB24Wz?2j+AoCA?8H_U*^|I;In=zjV4K
z4z8_MpVW+lKfic>-1AWIu&N7;(I1Xn;AwqO*MZ<^mrYphJ&Bj^SkpW;HHWtKF0KM~
zw(`>YxXRf_Dk|pPo5z{>1c)nY{A!%KAr{9ZDLq6`hS(sVS=x1p1d4QXJ#SXlB
zf9EOUs+@}gx#;im4#l!XJ7A-6pV3O~&Qh;{(A~z56mxvLyp{B;UZ=p$d8Mv&P%spf
z>(}GASJThS>nH8-F>TahIYzMF!9>J^`xg~bhv17?QP_>e-+{~|1jmB)E1l|ve4{~zx1DIV_ItB(9Qw~BOJH0@eTHN>)f-&G07kaB{Q>qB+
zs#oML=a*=AVE^%4&^GtYh~B}F6#mHV;OIaJr+Gcqv}Y=r!86YyZ;)QxWkPzmyy*?>
zIqVnwAb|fSnYfd_L5h>ZtAnFam0&`4L?nDWzkzIwv9J-tWk>um==>Bou1vO5_o52`
zSoj>60>&7-ozX&7{vrJ!NTDuZF9PrakmQdsAV$r&kv{c;$>6kJ`Axh+XTQy2i*&In
zXqxhm;~}yM#zLOs%{UQKw~N9BK+CRV(2QXB2Z$wuC_1n0rAaapaNNraKyD7E=>&pP
zz5wv4iv97sy>9VhR8X^J{BW%k9JB9SLyj7twX8RX_@K$buDh3vPErOtc
zp5^7Gdv+uChQSw#n5HWhe@-2Ql70zIt?m=pUM&x*is8Obs*#eVVjJ&~I;@}Y!QP^h
zVS{FFQE6rV4nWiOH>^7SgWK@2RS!SI&%fhrj~FP7<$M
z41%S4bu@j(FK5EVWn9zM?#qtuhuDX79gm99?!4}ADY@z<5ET*KHjkGJ+Qqq#e+~8^
z(Ps$VhU6hzLxz%WL0_S+Ek=HedU?@b_gG=c}Ti!;y_xJ3?kC_zt{X
z_GJT@#&6Vzc4xoPsyM*STpK=;QkgB_x4@J9^l@{Mo@iZ5RyArEV-tHbBAoGKj=|+J
zo*%B-F5W>d2L!NJ8x$;+hkpT^0%O94{jh#^n)TT;HzFc;-fADN>bLJ*_j{!{_kLBB
z<9r$&{{eNG!27)yz0d6D&@tLA$eN6Qprn;U3Q!6eWt(ddIz_gF!@%^)E?Bk|<}%D_
zXTXe;y97Ks9c#kshYNiL*eXFBuP>~n_rGe(%{64(fu7{
zM$??tlV8pFpBb-JchsRSfp`jN(0EyPzacKz&fD#lf~Mr)cFv1cdrKK+I@)cpC4mDc
zwhVs12!J{+EwXKKHpWZ=XA~;5onDJp{BlUxRee=7^!L~1LbLTYTWE@vCNoy2#$3;{
zN%qWk8+`b;iGM*myhrn1S){p{#^Fu^waa`weI8Gr4tP&nw
zu1iBIyDhc*$+`bl&&z~N?x^By#!9wPluIj+GytL^@pitLxfWLDD#vs`Cq)V0bxi^G
z&e5|^=+pH$E>~X4_W=~$FeuqN-U=l3EXYo@wZ*HYq@AW}YzI4Vu}1)QaQ9KXV3h%5p_n
zZBO54;OtlKZdxX`qpB##F)Z@T
z>SSxOMmh?_uPlhs?L@BYzPkCL@NZpX26BO_@`>Qv6Pl7^--npJ3^b75+|NwYK&n@(
zDsGf@u|NDyG{TcsqygE7_;ae;h?~Oe;+j^M{BwPzp*~#3%rt#
zvXO$X%z#gM&rFqFKqb9Q$(ke^_51KBOVswjZ15)Wm0#v0_~1lOlfXIxx7A!RAh6ky
zbm~4{0CpJ2F%VBXIrs=3hKuadyrhl1jGAV3u7y1`I9nP8Wg08HO4mF7p&G42EoA@?
zOq$8=?}hONAn(r9=5UWeFqj2)-wBfIH~BLo+I`W!_5Fv&T8Hhm25JsPZVeg(>(`X3
zb;`re_H()t>h^vIxx0;xmdI_pRT+u!a=XsEog>M412e5d!;VDPduTaS+q_--Nda22
zHc9p4T5(uSoJ^Y?SC-ZxHDcjV^;N(jSPV9Xs*r(dO;j&lC0wr=C4dTncfGk*C0(dH
zL-`^<^=t{zx_ec-{uD_Vj`NEq(NAM}JI#7camFj=w-S}VkVe-3SQnDE`dPkk;R&Ie
zwt=7?w^`nk7n3fftpY8RdHGH3PbLgzUCJMB2{apl-TB@N{UizPTHiM{5&n2Sq#I|P
z-MX`vao|hQMfms%geiMT*)&YxQFg_;??tD+oTLq~1
z(VO?ne6~X3!VTfi_iiULk9H*kbqH{rCb|hR`}pM3q_Ww{X+kw44WvP`F1fq7wckq4
z9-ouhz$L=$$z27`pM+m|HyjVF%CA{Q(uudtEW{ShMuO9BG#&`W=DhWuA+qp>Ghc#W
z<4-(r;w!_k53=j>aV%X4ey|n2aBhdgTiMe0ucv7!fAXr1BQ+Z>&52Zgi$|KiNj7mh
zv|4KNt+WmBH-|b$pVS<539)j}NyWp?pIJ>72dJNZg
zwjEhMKZ;@MucM9IxP`ifdW#4#1p%6^(2LQF^j_aA7i{c)z5i63iiI8XNNmsxgdV)(
z)uM84+k*cWXKTWwNuoc+&YB0i)VT3GpZ+MJrB~@*jpCxDpvVza6mndsMo)ycEeBOs
z35zxzsux*mpEfx_wXpRB>54eZ6=7EkF%2xFCjLjO5bXEpAK0%FY{nCTS(C{lyx(rf
z!Ohl20WtXvWa8`}$MPY_ioqYV6CccJm+yG1BtgBJG+ABID1Tx)bwm5|DaGSK*}myR
z%Drb_*K|353Nh?l0Gxs0szNw3Ji8yb?ZL#j!l@;5ZJF}q?0_Eou+Qe{gu$ifwqzXX
z5veU{dJunqvtkxr8bUe~Ax9kHHe`g4&%RW|h>I_Y)r>+qt|VS&G1+$J9Oi({Q}*$X
z$cc|{*Vawm9oh|2feUV_SfMWf!QS&?M=ExmGePUw+dtYRtM$0}`^>2R#A&`E2ouXy
zsb>g`xM=^r0ALp^mlb$JGiiR;F5WUw*58B`uMx(gvZ`sUtS2loZL=^D_G5NQ0txurSI+=6lL;}mxZ|!e?+r)kQjNV8dzOwuo4N+~ZF54_R
z=fHqST6OP1JK#~&QR0JR269<9_yTaC-b`@;cts*pC2|v{3eDOb!22AaE;5IE|6us^
z4Hymn^k4Z+#BpRti!aIG1cc{v0UbfTPf0ukKh*iD;Nd=->SZ7?=p(*}j3TZ(ZgyL3
z&)UfMtr)g^Qg?&!_6uf5MKaz6Jru_(p@HFha2J-D$L3gY{DQ`B36ApB+66PtV!Y(r
ziPXI~&(#;1_kTJJ!rBOiKbj_B+_)y)ABqjlqtfWeW8!McUdeenfs2?4W56A*=ogf2
zwo+k*Y=QM-FT*XGe-%UzX*w9j3-Rp=qL3Wt$cQ7cUYeLLyt=-&=%FQ{XzytM9GHX)
zv(2rjmr$tmzA>8=wmUG{Z@XB8pR&VDq1Q8VnJ9ZZd0szodWLeeQiOhIf8lCVuk3qY
z?o1B*4y#5*pB9cdDr&O>lfKS8=umfjH)dI2!U
zh_ApI35LWf)SKx3S!BdDAJP@f9Lt*QNH8@W;X1gx%cXi-
znH5Op%sWOO={*P%>3bW-v`a2op?+)O~OIFRJFqOw_OM2|OBfOSw}!3IoE_P%g=ylR~zqmEiaPMw9FQ5S&mN{%AH@dwgTpz~#s{h#ya6{-^l
z@)PM%BirTszp+(28n?>r-O!J
z!WHE-UfYNZ^LJO~STd3KNf`l3?_QnL6P~M}1-_%YwaEotl@S#L#Mh&izJd?ju7OUj
zR$c&pr{@yizZIw=W|fu;;-_X}kA_JYk81}pW}Fu3%bE>H$ywHP
zzOey!7dFqrmLc8colk(sTA-3YPzxu5X6a;!xSD~W@_W^((cl!rq7#+P*s
zqvux{`986{Aw0$5zi#u36Mr;6M(a^}L4wM+emi4-(m(rtAiCtz?LS@f(_`LK{B#|~
zxu0xBVvzdwUJxI^He6V11hU+~Yy-`WJC9Obd-vP%>BVVY@y%G#21`|)WDrZf$p4m!i%0C6-JC+gO|PxF_=
zHbN4?1wIXZIzIc%iV;;M&{fAe$R1a|^_BAPt2WnMd1m*!=UHHCGESgQhGRlEF90Xk
ztI15BYaGiMSLcr@9vZpD@@wKsE&FpP^~1M2HE)&a%Z?hxPNYok1j2U^%SwwJuptuN
z#4&yPvWLUTz@F!T#CqbN*QbgxUgO7dXTLC1B!g3O#qvk*M^ILG&@P@8{_BUIG-(-h
z?%se5fdSyZNZX4iTv(CAdSqVTW}(T@%j=)tz&us$_M3oB;iyFo8P;%6z4qiQQD)a0m7^xO0bDgD&eox5+HF+zymQ
zo>~3g8IajB&lddv(b$sRcdQxnK`p|KHwV4YzMY7xPBJ(<3(Nw-Bd;$^IlfAx`0J_O
zSN50VGN05|`KDGQb`VUxJ@l~$FjRwX!u|F^cG5y<*#B^tR6l^jq
zfiAE)IQy}5_##bS)$e>rkU`femA>{f^m1@Qwy*J@&o*gW3ag&?Y{l?Qut8N;Rr(rS
z`%0{gq}lU{?8gP_p3xHWCnL4(H)4Y?nc2VM+F-16r5D;D8^Y7cLp)=fL>gYSVVpYC
zW1T(15u|r~VzATX?d*?N!7)H2{2_Ur+%ulaKPwYqO5+;~)mhGH#W$Hh_J&Fq?^#XT
zSY806hZ{0QA9Ht2*hQK>|JqkTBdDBgn3@cSw%y08
z%kxdjGQkbpfp($4L+EMAJdkYK@m3z#q$(HYQjwtEt+3@5nFR$w&a$;>cr)#tGn>GW8@_`8#e1NGr)AJ>%ozIz92!{^O*ROMdX@W-gD@YO885>mU_;`z;OtRsoO1-(nv
zg}6pgcU+c~nR*_g!l2EmRA}_h3~y3D$N8|0@9MpMFm2b-mM$RdI6pf)oV5g*Lti4z9ly`{U~`ujU_
zV4&h1#(gtp^Fu~;2yq`+L@(k;-OkT*eKP#SNG?MK%%?OJ&y{}fmM9-RdzpfU!zH`^
zT0C97Yg63o^vXO%5gbTtgx%c&)2lM*u|&A?Oj%){tmxGxGYf^^GCJ%lD7<;8P4o5;
zm#6sQ1%MnTFYjN$(Vm2(a0Z1>`ypkTfe@A2m9?_HY#y~6zy(IW=uA%*6`H_a0*8ox
zQ)(waVB*sS;QOpCxc$ZjAU{cyO=Od8D&s?s2DhX(dZlk-Y(Fq}o(TMT{&w+JiB;U4
zaj)A^xx&txx>C<`S9l=>GLW*LkKQs`7e~3*DZ@oEdz8O%H>oq%DGrz@U7;#=%7^Y_Xg_h6E`vSmB
zBId#W=FgCcb7Xo4`Q4St%uI-(b95BNyUVx1ptQlRKYUVxvL(?a*`j*d)1Jd~Rin3u
zbzSoDnejUnWw_9jx5P3zP_Sh;8lgS0hj^M=ob&YBKxM=8ojrzf6(JRO@l6Aq*ajhC
z_%i&DKjzl6pb?SjQ_)9d>PI$4ednH|Hp-*DklRY4j?syRJ~0UW
z3G(uRF&;h>%;q9LuLJm|WVhE?=Tu(uE3wJYsHQc8s@jNekpp+Qh9vtv6d&~E&gkSs
zIo}tW*uf*)1qV&bgnNp}Ss?I@sjX+PL#lQMbD4>x{!jkO-FQqep$rKC6*8k9%eg
zuYALEE1mh;$$4)O0>SD00JWopH<_MuGGpui*7bX?yE)|4Qhm%No%`w=Z&G|xdE2slm!Pm*N&R|bsak=0_nH|-xp;PxNnYLE{
z=>{kYwE@;8&D@!rC3VkSZ}}^*EWM-ti8Zw46-5X%V`Mfl6Gh;|O|wRhwSMgsoQ
zt(+b+^v)y-n}7EDyg#`_0f$P%p6-O!;2DSdblrwm
z%&LDc*^K_+{rUQt1_iJrApQf41vY3)jlI_8Mu#&n7!?|M_lsOT3o}}N_wf1)=b_gE
z+8ne4y_syI&0Zw>`GRhHyxbq_-c#hU_}Lld(y-gh2Yr3%^QTnbv9LoWvUpy9K8N^;LuJG-ig0E&S6cpL+P;Y!Jnvs(^!-PLP0p9V{%fn{}_~
zml6ZNL@Q~O3Ux8`sqwu&><$wA|1-Ogf)Q9USve`3V>o@cZ`%esT!Cd1Z(sJhuTk#8
zb35y^^FjZdwq)t(VdK#>zZL!=j&tQilK35}v@#sdjB%=4+^ntb(NNU-BRrqc^t$?O
z=^bq$@!*Naz~3yh@wTW5_-mrBH6}N1lBa0QKK;qhTRr})?TUZ7KGXhL!a!D9j7jwE
z*cIyoRBf|w{u_S%ed%4c1>Gti1(!wE9`}23^&IC7ODM`)8JG+;9ArhStR}2tWcOXu
zn)tfaoc|RG8E7R%a!|vnnX7T?E1?Qm8wGtfOR2#U6;uho2$zSHjlPAgjAGcwvPZUf
z?E=yK^p^AT(}3FHK`>6_35o*TnI0Bb+vo*-lz7}1t7dnvCclf7(O8;mzRAx30Sa83
z2Ky7-iPr38m2D6m%5`zNcBF>9Q8RTWJN`A}
z-O;_pXSK2Hj$RhlQa-`IRJ8%j`r;2b#Fxla2h;AC{$Cq=hPGQ!Hq50yK#I(D@JyOM
zWM^AD|Ct~?{2I=c64u}O;xove5%{@6K|wQthlz$caXuHbT|so%0>|TEqTPk;O8>}&7GAT
z|D8K!c3FcvFFE*?)!0oI*Gi!q!Jk{2ae!Dc7v%l<aW5o)_4MMKmLe1IW-4
z)z;!4s^#@Otn^aF?_+#;!vqxQE^#?dS2-o-+iu#&y_|ROC)5XeFgw!k1>j_}4c^Hw
zzJs5?UcNCLRnlIdDf;T0`p(@peAHnT)6&0b59yAA6zj;%#Ci4?d(N`NImMz|yL!y?@gqYYaJ30UL2WVpU){U~vM
z_$KgPb19>z{|ueU->(cfGO7772Hegg{XrHS)yg}%GxUSi*fIp2eP{Yz12|CuP7
z8@Rwp75-71b4YixAn6%{0~E?_w9lVRHT*pH##*s^=
zM}*g~0kpR}x%m$m|JbQ}j49PLJTtX236gN0N%dGCRW|sU>!sDoAl^Z_Z{I7N%tcf{
zXJKX(h1Z;g)mm*NzwMX3Zb;6%sd*I3QlEH<@6oF9cKCqY_PZT^8Zzz%P6n&24O1p`OolRJ7@7vDie*4h(TT$&$VwRQh
zkLfm6J{_HKd
z7*K*0C{yCh*9b5=Co-tEdXi`u;40!Jf%x(4oHa^oz2p*DXv^b8{
zY$_=>t#5i=bnpJuip$_KjPFbVYpexj4;;#-s=+9=nibUIU)QH{C7b?86y*0#u;jkk
zAtczzj)Kbo*Ojk)105eibXwhmQvXSn=a=IQ=pIa!oiG>vl}usstc2!DOgc%B6a%y#
z^Tca^DXiT7QtUjdy8vh+*Pa3Vh@(yrmL3>MPjuCzs0EVLoSoh|opT5B%w~GemJ8PB
zjhXA@IOXD_fyfQC(V|?M<8=q1E_v0sB9{CcX)jI8d*o|=Uy(?7DZ$S~GX@}soxnc^
z7GHd2Sk58fSpd|2My)`;zFQa#e7F%3FY0uhM!uf&mjVQ#2QSV1rN}DqmL}^>m6T4Ct~{oju_YxT6fuE8
z{vU&P*Rhup6(nASbo+8;(h(zO0n7onDzgbfYqdlJy4l!rKFI0(*&|+qy<43TeXE0K
z+v_`JYGB&1(lTbB^5q5K=AAyKyz2vFuxrX@(;Tf7kTdDsb3xv^SU6SxV+rqO2%Aj@
z6ZvLWm?!fl(NqhnF}xZSuWeAo^-I9+PABW+>xQn@Z2P1)^pl6_0iNULKmb8;g#qM!e@alSZ>B-o
zMD}&JY8{~+pwA7wg<+M7^HEF8NOo*Dy}zifT<&F##KZIg$8v6g0K#i0oR;`cBZZZt
z?4nE8CyM3qSS~63NXN6G*}sXXQF!@m?I4*SVX{$-oc7TrdYHxPjZJ=XD@^9(By>{i*hju%!K3
z&aA@TRP*key{g4EFD7Eo2dVGtuKeWXxs5R*iyZk0{04Zh9$+(lN6c0AG-_&zW&KDK
zEiXhA`$7+0B
z*{EfH>!4k;mh@xs1p@iGTsM@!S_>oHjcW63PoohKdYQBf@US2m`Z1CoZ&-dLr=hyw
zz$C&Nd7%`oi^tD_PqR_*KmY;80(6h0c>u|)EC;G_jYHwhM|q?@Dl&LhU~h#5)?XwK
z{0dBXKZcAF{Lwy4e6~iuheHgl!GUqjP24d_vHznm$04zGeb{?CIfl1q@w(DTpdz^s
zOm&A;)XtA9S_k#&dkmR#efA30e`<-;O}*6dmm*c-N6_YJrz+u7G3hR;`gE^N7i)t#
zsEhm^Y0tb(ZdzPY70JP&WCE1~M<6xO9^F_n=S&?ed_^Lp8;f_I5B3Iv>)p|bQX0w2
z)nC*(6fux%zf?nv
zj0qE*`h4wjHx7@<6G|H`a@L;97;gJ3kM@ErF8XGxr04E4_PRP0TfXzhk5zw9dED7-FL^mX(D%|)^}`@qO4+9;W(yhm6M1;=k!2EhGa{
zjtB8|eSI-#>LL|(qfBtkg0x%hBD(mlu+Ga;barlTa)#8DiQ9W%@pqNt1P?JGPpVd-I#4Ic`l6;99l8b8|%ti?!^2lQ`dpjCN#dn
zzt+WR2Z96GY&{Sz&3FuKo#(KW=Cw^w;Dk@3J-_hM@MQu>BxnaB7udG~sz#?BG`X)?
z%4{S~sWr~M&vke%CGEK<;%M>nrAa8q?MvBRtud2z4uB52SUqV)^IW^F1qA78RAiyc
zkjV}2^Yhxza8T2ftoAJuT|C|o0Jv1wRDsaU1uSqZCIqB;G{2h<{+&V+=
zg8lTzSY_b=0gb8Zw^4r1QY;Ndp88ya+AU9WEhmL*ZY6R}!B&65kNLbot^?hwb0Ew@G_p4(4a>mf(d`49AF}q16Kcm)Hff?F@3&Td6~ut|1yR
zv!u-R;Re0djG8~oiOovtoe+9ukpH7#6tcsLr)r$(Xy}C8=kld?#mLhq<~!SBQ&nT8
z4L+S5pvZ$p5m4s`S|$E64XufTQx>ylEO77nVp*e;w&Cva#JgOVu0j#In&dBg=p!uH
z5V{!hTlLdD+qnp=vJ=CGlZ#-NL5hVamzkJR^+CZX__}+Hs)(3#?3PQu!~KkzWp~rP
z#s3fEtTF4_1CeIWD2w{~49C>)q*bhIR1TA#L8-vOg-%2(eicYFM;w>n}EMh(XQY44tLdf090%R{4{15>Q8=(AGJ@s}r
z9;m(bj5p@?5B^d-V8Bq3!u)lKhfX;PNy)#Y0&L(aldFz;p(0pDGa&;AUhmYH$U;Td
zhz|icB@%xr@H%HI*juFZ);p!R>tpN2zx}9NRZAT=QB!&Cpm$NTqtm2sV6XeHnk2vE
zhg4m@)+YlaUkCn{zYnN)ufP_u@6NkJY~=_~dWI4cgjk!7))_p@?{
zd9EToeQwm1U5Bb$ok4(2J~7%!edtKGa%&R45BgG2xG_Lctt+K-ROG|?!Yr+p~hm{tb-D`(%CGDm!`(*`Gwq{b`;hj(6q9@Mi}D5Rc8@eVP3xylfoa>r@L)+pVf2dYQ^5C%
zy-P#s{gJ5gQv3R!BisYso-EyNUg~i&`@5Q2dQ?%W-_}zVE-n4sw*rLf=L^cY65jkE
z6;d47Hs>l4jC0S!A$vHVai&Gr5#Al~ryfdwHSLi=GQTXn%y)Z#Tn*+ZRFV*F@0af>`orznJX8>uP!ejaS#7#pQ^%YWhu`-_spzJ#XPV8y=)t+i&8*~rz2FSAHQ4oE*thdgQ
z%B*y$pOS$!m#8LnqC$qlrwg4kj&}ouKW^;45?2GI$i19)#ku7971r^yIvegF(I
z-WgOnk6K0Fm1>L#Y?lz@#%OCYwO{1Oc1|aKTvv03n9FepK&@8P58IgaARhxM|GOCM
z|9&p)@6>-2i#-DFq8o&O_vD190^_DoQN)oYI&VnI{QJ(Dm6$9^qwCr^#Buh=zBw_->YRfNYjn}m=(YGB
zVL!lNL<6#)HW9*~>ewermk_WOBl}!9)#3o5CQI5!*2nLU)`Jlsbh_G0z$B!m^~8N=
z^Fjg6ZN1VAi3HV9b%LVO5*^!x&`VmyLy709F|#!-Ae1|!-ktL=GT!e1EX>pY>aB~y
zJ#*o|6pABD<}fgy*B-XEa&Iq>EKB*
zV{NfW1hTRi6MkSwFc=~{(S3tnTq_TqAJ1#1A53yBwg{O26e{)f0e?xN78A|&e^~Ga
z1OXyfD^3_4HuTSI^8JS5L44Tv?_rE1LM3JY#I;+zEBWwzxiOC3140M4hlaqZ3)$Lf
z(EGCZFlxD)8Vk$UF+A0pCg1sfYJWdxIY`-ZWw^Dr`{quX?evmh&oa^n3{=6hRXgY-
zTGRv7_BT5j<~TXlQ$9fVz|G^Z?)<74$f&;HB6Ny4K>uo
z-i>wBnf`}GO-G}HM`Pr}R?{Zug$ogP-($GmSr$kKb0a?ByVFh5k8b^7
zbEk-NDQK9j;9k@7H!1u!H`T4vPvBwTtIBLdF7hO>0HHtrQrN(pF2F#Gs?peZqC5s4
z0-DxH{>6r2XiLm?=Bi%WcJjMTlT5m#t)Hl=Nrd$B8C+-hA$L^W`$gLLXp7|OBs1iz
z$@VmmqS$~6+23K8|stdFF>U-nubA
zVAMBf#Yk!Z3>tH8IlNri<-ILWzb+_+u<8`_UgZAtCzgxu^iI<$kMJ%%th<>ispf~AFPs$NU6xhx&KLu|>t@EgP@=($y+w0zu;
z<^40!r#?p;4PV}@nf5d0#f?-MMx9+pogAU)Al_!kxixc)#WFU^h|plB+m(
z>mBNUM??+AFx*Laa$cERnxTN==IiJa9a_Jqt!%gH)Nni@zBgCIw*~;dS`UZq+4TBS
z1d%D?+2z2J%n9>thX)Wu$P3Nz$g@R(j29U%TbduRt)%J9&^FhacG$(kp8g{i>LwpQ
z$rZn7jdcvsv@{?yO^BWK2Z@Ts5UR2Y6mvrs`9seuU*poyu(h$C12H85)QxD?V(zQ8
zQcM$LG)n$W))H-!4Ro?juw;@p+Utm#hZgu2)gNOxV{|!d2?B
zWN&hW${RkUo{lw9RXsMF)SAPeDYlBKWZKRMO$`<3yebm7H|E#Vh9$QqT)+rAM12gn
zFY7Hsl=_Q|7wCZ6^86iP$!p&x{1?8i2X6~OIIdr>(4nbB=mKBwF;tlb(4U%X1FN5X
z7s}y(8K<)Tmx958ml+tQt!1c>Yskv}(vr!4>B6Nm;mmKTZk=OQxBMh`Cy)aT_(zPd
z5oELS=V`UxH9Wld@PsXm^U%$-z9cA>KYJs69d$#X+gjqI#8+Pz-jOzW^TLLf0IQ6w
zR)wzlGG5tsaYj8|#Jx)Mv&mVlA9L0h6F7l&C_7OUlMG>Q6?i*`@ph*V0Ns7)^ua&X
zRCA-G>h+bIPur*FD9B?t2zH5-MO4XCDMot76PPx?NY|%*d2uhYYVh!xjlZX0pYpQs
zS`JQtJ}WzK{fNWF@us?mvuobf#0RewU1H@gmz95c!t;a$dmizZ;!V!N)Ni??pM{~a
z$;YzWT3p3`~n)f
zR&zo>j9b0N1y=)b+Of|MiL5wqp|`ogb(!b}Ka#pbKik7kvNj+58)x|VKWHXClBq!t
zVcl^G=%{sqN~bb!6utVjzy$@Ro_?yIruRgDF?Q0_@}3=>p#kuS@&63RXx9QZPh^k4
z@*@7U>O2$NZIbTYv}GDIU8-R!NR+fp`AboL-h}HRJi{6E!fF@}KvaED8nar&{Yu}t
z{%&Qx^mXw2+HYZ=7fqfz%6tAzg09%P5SR;W;p8{s6`NUoJ(&mo?p({k2P!G~i_oFP
zB<=EW>M9fXGcVN)el|SMJ4vf?Lh*_9je?@(iUH?CKGU}zr?#)3R3>@EPeFW}s$R
z9?m_T8;dbUbBPfoG$Gg9$Za>+x)-vqFzOc?
z2+F(rkKSsJJ)BY>0@Vo|1lVJQ2K&~a9a_;4f+jcfY~(>6;OnQ%Coh6q%KV4q^|qor1#KZ_^lh?)!UtYLkR$5f+$1E
z3cTUZ-zZ0>MlZ?E9rPi#?3`jQ{q!2^{tU>DN@EC
z-5wZm=u`JAJsI)2Xb<@O(cv|k8BxUHE+_1+aj~eIw&D9&`ALjn$-3(qbIG7$u^O?;
z%6M-3A}0}!eu2DTCj!l_RVo-SWd^YK@8ayyd~3R$ix~uXI421EtU3Lzg&e}gMXt;y
zg+1GpqSJbc_iV4Or{`isq#LAY$UJS?P&NR%3ZPFXO
zsG6-EY6BTW=-R{RzG!oO^e)hkE1C~hg(>&-SltIq9YE1Jlwnx%Vo)s*481;oeV-Yx
zAT~;z9m-#F?bU%vmcbM)RQ7aRxIBZ)GmiYmj8NG#l
zfP(`1!jo0yc$sM{t2~sxhf_?P=O{%`z=u1p9_?#XmLC2OYQG*7daa<pg712m>Ys>5`MXEp-q4eiAKl;+Q>cTxtHUSxhzyJbQ49%cD8veJep#HGT3K
zouU5(FOlUUq+ZIf3eSO_!}{T#phJwB=ahN=fEMH$z64~OD$jT*|7oCFtoO0eI}v4m_xzJ#
zmLrZ+$0AAD{@Mj9em}iG{gI3LKIGtcHFy2J+m#>E?;nnbZI6GfOn^Cq5bNY9?&-}Ak-&Gx_HR3E#Fm>jw%W^P;IIEuEcCrvZ0e3kl|0%Iu{V{=y_;?Pe2xulxVJzk{g?Yu=O2qof7)9a7ml!+$^styFAJcSl
zP{JQWr+WS+4j!;5S3!#g{MIa=5C0N;Teada`Rfh)m*A_j2Mv$6g&3AU=pOR8D~qD-
zLlu4UwAPL|0DewymYgX4?>EyDe}=LR!|n8`_nn3ZacxsQ#-&l=GV*iV@taVY0I5*6A)F{p7src9@tfD3}hb%bew0xTTLzG;jm?BkzGPMo}yaha3uBia7#w}q-vlU
zc?8K*2VYR><3x7DqBt#o%WKbcMbCd4o;4Pfja;`%SzsLSrAdx75aqKDWXe&;wa
z{N{&pv$}WdQWrms3d<^M;_Y#yzZ5c?i1>qIg6zD5Xh~7D(FtwS>tMv{=r$pjQwE_+
zP*pFk?1LisP?b@D^;WIF&UEdL^ucv~yi4lmL)Eys_i-7`(mCzMUxI=fZ+tS|Q_dh<
zBafN|{Ata2-r8>X_~w|Oc-pD5sT$vW!)%)uu&ip*Iw3so5aQtgW6;}nN>TovIjx_M
zeNEgYW#mIzXMWS=Dh<(WHQ%HhKR%R5lJIY##&@qWeC5Q<3!v3Wng@vUovKy)R=I4u
zMlUTE`43vJ@D5!iZ%rxj-sFJNZAGrJDNkMi+WRRi}swKf192V6BFzS%l~?
z*+Sy>^Evm#zUvf2OuBB`j>cQw%_Rk<)8?|sm9Z)GM@#vaMWT~hhfg3uFeXKe2O$XKfirSeapOlX*=^L*rO9%
z2B#xmAqwIYJOcdC>nh~_4E<+uYwc6+u5H%*KYt(hQ_?6&TJ}ZzbFAs|zMI7P^_m&B
zR~2awskpLx(8)_}r@UE64p!jo7DI{f(%v#+elTd_82LcWXzOfBtH>`f-mu-e`|FYm
zYx3T7jH~pbYch1G9kFLu4x53VRD=_p$s>r-Oz{GY#uPs_$HC!&80#wmT;TI%_INBu
z=^AjQ4A=w=+rh1MD1*NQDI2L5cQvbijCR05bpKF->$+hg^_PcCxLqv@W=_J5JuD3=
zvqC&@jyK8And7a0#8Kff`J3BVJdVY0t{WPhL)*@{*a~JKf}w>P62Fs(Qx=VTVcGhV
zEMw?Hgu?ZMZo4K{N|^ywo^=RslcSQu)o2)P1DW&{VCRM@LIo&ZP
zK0IBvIa#flS_hYS{n|@;h4;
zL$})F%>7cEr};%U^`v4+=rp%~Rha0MaAy1{kn&o3jR1lw+~F(W)a|==>R|2sM21F8
zuR+yaQ8!2OB@>N%NZ-|yAV_5nJy5A11%s|1t@Qnxb#k?3CMV1H*L-mP;z~F|qR$OpOA?+v#Ug75oDqewBw5
z=>(4Eo4I?s1+ZrDSCIX(jhwnoSHudqZ8g7OlQ+_Xn}aPqmK$u3+u|ijEoZ`_)>u2v
z_6))7N&LA^HV4IRMYqj_U;psk4(T8H2>xYH%>V^j}0GWYQn#c0z0Ml>SBnY+z@C;T(R`GN`puZjg}2
zz7X%2%{nHyYISEu`=m?HsQ#!8F9{p|>^F}b!T3eg$0n23o$tT<%Ssl%doO<4m&|mZD)4Dl
z`r1tsTM+p(BgS&MfyAH{QN4pg%Y2wd=qFaUBa2bsDmG#URq$VmLmq%u3wM^|CmuNwCZ}ujQmc6IW`AOuDG0J4EmVeY4Q7T3)n|J0laKc66n&e
z+C7NoG%vr1`pD8x@?L5z>sbZVNU`TYmA(t+1(fySKp?
zKoAU^Nkv`}JyK@!x2SEZnJjm)eP_q0plhulcl0J5)b^|1e3L6Bvj0vy1Te#ohWtAq
zvhNaIhp?)6$Yv(>T?t5(NmX@lul+I{;-dC2JKs2NCy-j|?7d<&&*+#L=VUIrt;sVaPT&vu-(;+1@
znja}T8?p##C&IYz`;^?lzZC6MX81#?(F2Re=S~@ke(tG6sdhsK*ZIP?r
z{;#GQKwATZd&%s9BgkuHdM9J$JAer#azFOz@e*Q49B>_#dunM>u^5@JcXBuL69bQH
z`UP?5L9sc93L|SdEwa6>7W{0jLah6B_4`0eBPsXtp}|Qbqa)9prEiDQfScN-iv^tc
z8toqRDnMgV-AZ@gP!*4s5v26yn2wpajlU&vWe-veAUp6*zsfiWr3{Gf-7c5;RDrg-
zHr)qa6vd#mK+zGO*OTlkH+3)9Dp~?K`z6d=jF^7_3Q8N02Wd%sY||Wt$1cPq2$eiC
z5r}2^q}TsagF|s+ZsPjoPkGi}2FYHNf~`HAvOcN5MehOQ|etwmfgTb8EPM&Q8GR{lNPH
zXGIF1Pr`?{fAZg|t)SpLIAS8NfNIo2erS}M;J(#0VO^p|M`zc^zF;9MeVhVB`ZT<8
z5mBF%-hQ2E-AEqC%!TDPP1lFA{d%})`T8S0zpB=!N0-ho%^QDt2S@83Eo3}zF>g8*
zF4wiDdez+iwU>J1a#gKgc#RG3*;sg|U8A?(b^XsjYPDB({NbzFsc*8-skWx;n2)6f
zrJ}a%ZjB8yg=cdW$l~Mp{S+^Z7dDKPE2A5mPC6qMg`6&a+m`)t_{PSCN7fvn_U$@Z
zGsW}5^|<&w8wGxj$b3S6z8wbMSIog{xk7%J@rL~Hw%2NyP*iwBnO20Tm@hy0Ve&RM
zfaN9yScrH4^!(Yd0d903DLe@P=Us#z($%e1my9Pj<)1-EYMr}VwYjIFB|M7tU+wh#
z>=Mq`_k??GAOG=(6Yc~~Cz8ilKj0WpJ)y(Aj2J57i&Mg;TEcfGV$5#H{pUP+j~7n(
z!Y(ufK)LOANzGupSX$LIqk4?VOyMh0mV$!X1I03`>vFwOpYbh%=N0bQ0Gt+j8-?7@
z1vau8F7o^0o)KoD-&!iT`uB@tn;vOfn|LMcuQ6Xy;I6@Ip0RZuYTw7BBswmbAS0Ur
zQUwqYT7FEl+wSA9e@TwXX4>vnyL--ZTDeodFVbk&AIhJ(b9y+;#BAgjA2*Jr)CG8G
zFIuLJ8JvW4$_vB*Q!|94E}IjlPs2E0HccHxK{~CrJo2)_>urw2*9B@6Ll4`>zptOF
zldlU>kTP68tzSR>LZm=b
z88QM+uXqb%-wj=3h~R``D-w_UB*RD3=>}Lev(WNjj-3|Xx`pty1puHb-1JQm_W~v-
z8{SRvk<6|fy|lMDb=coO)VyidZs>G4*;2hX;dl;{f^{mxH|9^p@q}scY3|-$Jd3xx
zHYPkw21es_-KyKHHhb{Hqx-@U!_biVwK#ffqB7276|f#)59w^bG6fWq(th@d&sR@u
zbDT7LmVo>=YLYbMifC~>mm=~NgoR#DU*IRa_ZMK?X6&&`!QlJ~9ybXeRvgy1!({_$
z0XAs(I(S}TT@9exN65cFZ@!UNC3Y1op2h6qAH8RZ6ebn~S;TCV-W=-NDQ`F!B8Xe;
zlo!Lg-c?-%NlTe7mv!K;9=ipWCLCGqVQAMN-D6P*%|Gy)r(Di1vr;bt-4ghpok?nj
z+QejUvfH&oba4Vj`=bqTM|nd6Ks!;SC$2!YsA0swb}xohJ9dT1E={ST3C(keD_+^U
zNAYpiBh*Qh#{-l|a+b1{>7o|G9K5@a3-5!oE8itAfI+oTYJaxo8ENp1h1(GW%*W;u
z4J-5`2OY8XKYLl26TM$5+b=@CgX-mi6#XjwuS*aGTj6yxtul&i(LAxgtCp6`y19Dz
zpCp7lzK+svx)=4@Hli{B=Yy_X+wRKZBivd$3~~N}sF%ACdQqJ)J|vu)_*8-It<$)0
z3r)hI
ztiuIh8Vp)$r$6>3KPef#*J}PX-}8&eaL|VF)5S}-i(fuwmpK!N2HuWbXz1ot#D|7Q
z!Wt@OXjSa3p$|2LtwUG-R2BNAzKK_vY#xh%R8#GrW5o3W?0@qzM=k0lC*AA$9B()I
z(9Lv~>F2(#k*&Z=qcbh`66xEidE?Rj@d^*M$cC$}ZQt?)>NlSTg;(_}omeDGEVQu;
z-Av|BjnX-9__l9-qI{!wE>*rxempsAlls_gd3S%M9y&F^w+02iy-!4K)L1McrzPjC`(Q&p%OTtw^x5DfVh^v@u#}LpCjoT4zc`MpCMcyCn
zWAml@sV+a((+iM$K!4r=t}tT=KNY8Ski|2E)e-`PwQITF)YnD636szPrk{Wdo1&Xf
z>A7cW8n${K`7}*-s#xQ0f=XT<9gFSbtBli}CH|t52P0BprKIW
z+?PlwZ39BxD1m!!-xht|mwgTQgk|pc;?QEw&ApS11-|xdjm`Q`C1nyh5?SFCE#faB
zR0%;Bn*jj9M-)O}0AQi~Qb?oW+89BO$d-gM!F7>-c+znh8(@z#N_v+Na;RSkYZ(
z?;ZBDYj&rLGRS)x%a+fd-KKxx;*+V=H~nW_5;ttavp1EgA7!Ki6trd!ggxvV=e8S&
z%r#_ff(|fs3kIT+#-Y>c{nm37q^t6h|7$c65X3|m{H1VmaJ9HM*}Xjijm$GrbskrA
z?(k$EeY`W-X0b>kgmX-O)0$GJtQB$+Wg2K7-aD(^%cQ*sOF=*)SAi}do|v8!1hg;$
zKno+BPCO#D-b?F5)@6sR)BfQ(U|Q5{x6C^#FWcvNl}#IXG#3*yx4->~NVTRL7RX!$
z^fw}~nva%pBLpRQ2LGbs4yXOamc4tLr(#v_hesV{un0JHJ15bsDnu6-y+&|$5!UZz
z{mPOmI={R*7;5v+JXi29sw+y@d>#?w3f^B+6iU>Oifa=~ftNE>u8|eHU*p0V(?>H4
zG)s#MGnZ<-ES)_HX2|v#fTage+WR|UXwa3P&bT7a4D0>s!<qfJva0(698ZUQtJFQ#E9|lLF~?tHQ4`YbFUR*EWggx7V=@YYqED
zXq7d#hN4Di`L4be#3rzT>wo9kpX8=N_?ELrntgY7t%F#4LHN40diZon66q_
zJ&ft{C1lp`GXa4|)^1B<9BQvh2PGlP?Ue;b)o|+;ev}K_Kvgov`?{gyh|>08vcW`S
zfz#leCf+AUF|Z5H+)9hm4F=9c@yp+hl#4j!dXSDCvJMYbo?MlDn@jDytHyqYeRLs5
zz8+t&TokEzt^B>UvuMFVqd(7&=NqwT-4|bo4Azj`97>oS=rdp=Eh9{_?~wJ9L^?`R
z-%H)v+){q|ns6EY-GySCW4+P>pA)AH@)kxJg{^R5wql*jcZR#3uync1uioTgf0NL`
zPpN-{hGVPQ$>wZt6aK1XGF`*L&%r6sEZd492F-p$z^9k#Vaojtgs1-bR9o}Am9#%~
z_QQWEuEO3!g5iu1K$6dJ>bLl_RXxC&o7O?J>=VtKkdM(@pR7Bm=M|bNlVGbrwU0SN
zPmx9-7l5+4ot~5kth?|P;VKL~gm_>2z)9QA&!^{LKZs+zw{IxXGiK8y?$y~@1sqk>
z0Rb+NbMXZ%vKfROnE2f_M-tjX4&B;60k5x^oVy&)9oe(5b4b5E9(ED9q$p$0>*XZl
zXDWR*6##l#9Vj>;wQ@W6+v_*R^F3PMv{HuBHMN#&>gvo}4h_+eFljg^c}y9E4yzB{
zm#L&PT^OZ;i23oW-NxSL4w;`@7wCqw0Z>1d4!2kQZbf{mpl#A+uD?0z;by_vcegh!
z^`ghAR+_=yL%lsZ&q>DBZwZi@$f
zA3qpMn!L9KnnTO>cl)!=GTSGXGUFFmB=&%mwhbW2b;zDxCW?`k1xwO*Nsh&;x3@`8
zgvm33Z;8IRo^^&rYgA$Qs&1ICcGr%q3ufN@d7!-yHPed4!C-I6Cl3+EC)*;bfl&j%
zmx>{!TOAz;7dkTbWdh(b0P+t2w43#rQ+PKtB!{k@(L|%jUmi`Wk4bv1b(hj-vrkvb
zf%f^TC*AKGGutS);UV@E?pJy29-D2*l>kc99>s@=qb9?JDP}4A7Xd{=G2#WJW#rC`
ziz!YQeXfHuK(GHhppmXWxicZE&zi)3V*A)9zYi*x!7)TjpS{zj$8<X=@cyMPc1rFMEnL3-62GFUKgT~LAHag2p|_)tiPdp(3}3rkSWtO~_x$_@4rgk8T|Pk27}x@w94G@JUIG&>LI3Mh
zPZ%1&PI5@pe<>~urr{U&<8f~o7Iev)ey%B>
zK$99rxnz^t-P#96+j^BZO6@mSkZ2u?_?YS?`u9e(^Ml@;v#mCB^7WHOp3Q*)b>rw#
z=wceys%gqxAJEfGyroutXH(N+8gxptu|YECTY)RJ`e8y);s;3>f*Lx!)34k>1$~f6
z9h0DYuZA{Yb!laZdeJfFjh==;=#{O2I_xp5QweP6(MHgltVD=ktU|Yr|58Y(02}q}
z^e;vCSSXA^nRy2^pTqEx%b9hi3O=B3n;aKrYEhfWINx&1#Bk_@+p-u{z}|&FUO76N
zKb2@z@Agqw7I;vkU*MS@U$?m#ieGT~OHrD)30*wuY%gBfce!
z)Ka5cWZiz>PV(dJ)KI(vkthEK*QIrd18^v99?l+f3!rg+4m{Us&Ngx357W5a6t_kk
zwYukAk^jYuLN+K^=N}R@Rj?>>HS20Z|Mlu$gVov8x6w~njv1n1Z^RdBdeUQ>bJ6?h
ziYHm4(tDAjs4;@XX5N$iD5^TAubU<*(I&%O#&zi_2hfDRZVStlF~MxD%wGRYA|_e%
zJscxC
z?Pa_fIxWoF*@#ykd#v7){cw*Xxw?1dPf5TYEef*lgyusvL>FSXf$uXyG2j6m3pu~_sZrGeNfbKu3
ztDJQ_1`fczqRi^=6s;YOa)~5Wr$0$4S4X%w6Kj+w1c-h^y!-o*woL1|vkJdY*}^pP
z%n0qpfCk;+_0=19@>1+hC2^mZ?u>YF$(5N+9*82{0Jo(7Sy2WU7TQBEp2H%*`wvmD
zZFfjrAPC@((7uz0hDoz44;4DFjxQj3nsSDMm#?}OZi|f6y`rn)KjTk7284JcK=vUD
zV3^iX@Nl967Q$kw?csWF8*zyI*pShRJQD7Xl>2&TacU+Hv
zCF}#h7yH_WgyEUz1EjZ1F@WT1WeHI#>T7`mM2R8)s|2D2s{zb7oN+)_eck_;;`DyS
z2`cNYgt+n80^@7Zao!GvLRb7hX9~Zo8#;1$&0OR!V%0^RPuhsR4Jg2>-jsgQ=L90e1%ph_dvRBl%=w
zz3ESb-)krWT}X*=7Z{BtjIBb^y2-Iip~AMLrmB?rdgyJF+a-)N_C{to!g|@00b44^CqYow^I**zag^
z4#o^Z+vzDYPsDi9YS_AsB$f#fq@y@hUmG?YcJlZdXT)}DtXsaPe^#=%61`A?cbdoh
zM=Rx53%;(|m);C@bIE#chDmME#4ER4{5fq1_6ICXfJ6KKPRuKL8_U$w_G_7pwXRlFMM4V<<@Jll&$Z_=p(&{rK=2d
zTr?-*1_V6M=E&@XK(-O7{*8qKk&>7^RsA&SU73*w12Qayk)j_h@g5{NF#k9Wx%eXZ
zBZ}g`>+yH`zektxKW07@|JBcuG_=x4Y)`E>Qp6=hNvb2r=ucr**6*2___3V%b1VcG
zkLK&;JZxDvJ-u+Q6TVg-VLE*L%T)+I%qiW`aaK9*C|GGAKk8(SVYp?rG`i?ewQ18N
z!%fNa=GXbAG19UtO0ZJ!EI3N=`{nzGyM=$Aeu<+iLeDc1;EJZ7cLHftkfFzq3SAj@
z*6%Ex5mf!=-c0_@{_I5Bt75wW>^J?Q2((xSQY!H=L3!Q+**1qAi>%{kTv|zE0GLf<;y>R5bwPC=nu{G<6qOtR;#prVL1=f@SyXCH&J$5Ny8U2A(~uL^`xSSK@^
z(oe4g65*Hlj{oW_t&E4iKMGQo!I6N#u8Qsp+N_B>(VbyER`C&jmTeMjf)k9>7>bje
zQk
zKrBc9HW@rRt`SO<=>EyFpR8v#{-d}{dcE3+5N8o6>z#fk$)fzAL11@z9yrybxc|RP
z4&@WxbM!fz0k^Ok4Zn<=q)eQ2yBB6Mx=>zm^JdaBUrLLF{@cO8+1$b>T)IHSejoqo
z{G5?^9g}Q~u-JzT#d$Q`SeDap@Z_b~ppfE-Q<&RVWm(_i;GuglqhYDx5nZD^7XIjn
zCv_r~HAiGe%b2D|Q|`#VVPH4#v3EGuozzNJ;FkyWAQ=}I9BA*h^eK%rcc*~;`lrUr
zV%m&7A0Nuk{juLaASmFpuwLlT>69USj+`k3k>;@6yH^vpRM}rL->Q}#nE&=@fIm?K
z{Lh5mr+&eg#RnCe%Ew-I0KYuq5wV-@mySe%+WAHi3wy(M8=2q%tc|vswg27w?f-5s
zGyI+T|7WQCulCD-yxus*=&yzgSmWsi7}!(NVOFJHK6LaK3RiO88;1c#Z4;ynqEZt-
z#t||M@3e&~^#;aOUBGZ>z9uhn*dRD&Tk&VeKR%7E+cYY;vpkR~e}I;1pqCRItuOw$
z-Y@ANG`6Z({#2@rbs{JCgdaNM)F(3roPZKhxYW*tmMJL8vs}?|&2g*hW7y2;ZI7m3
zJ9IU6WIb~TblDz3`Ku1>KrO+~kQ-%z&3ce9`931EF!s}Q11)Gji=Q{guEw4ZpnMI8
zA6JUZz?}eIn~JF@wvyZO8CPBHOLOa;joz@feemaR$#t1)pbS`|BDZ?p4Iwr*ogeVu3djLozQ2ke@75bpXaRECzM
zrYrZdDRuKNMVoL<&PZgXReVSa2qQ`33lNJ+6dhL{PC~l7<@SUFW5`)`3=v3%kBMrf
zr~tA4xOR_y#&t5@)xzWGfF8}KJBCoc{q9-Z52dFxx|>{8m0QhGr#s#X;Se4l@Uygw
zY+DFN6~k(Gk8k~7if6EAbC9~R{WZJiX?;f8@fTZ3T0m2xHZx{HUPRX1tN#ZR^ez0|
zzIgoJ#wGmR$(gE`N*8yNzv2JE+oKD?}fyO>I~LZ7M6*v0kEEfyc{gA!I1T`BedWX
z;Byl6j@z4^JCwNUNMOGr99zl<*696sbl~P6o;L<;y#gYO;L8@ETcT&aVM_Jy>4c
zu7Ok1)W3hA3E5L1&LQDKmdCgh&AGNMp6n+A-R>XTfO1X>;T99Z8&Rhna4vBy@*kdG
zDW|Am%5WhK=qCbGYVSq~=R2^$ud@9mA^pHb!}E54l0MnBsF=}2i8DLrJf-$~BjWXG
z7xNx8b@T*USp_C!%KHO1gS)sA2gGt$Bt}+cQGZA5xS_vr9$2i5ndj8*jE}ezi|k^a
zff&oGY48pgt=5LV@E>!hUg~+*I5mSn&)-RiHP@uPwZ%HsCCQ33VEMFe{T;VSfndZ<394(fbD
z@p&y#-usa)(flf3S7R~YNEOaK?Rc6iVF*ozXRibbYTI?rvu}IB0kF-CUK7rK-iMya
z1nf@N!=uoRdwP4(1gC4`N^C2mQ1nsS2|n&o`0QC4hM#e1uXAwhv7R^ch%n!xt)xs7
zf|NQ~T`vl7zb=3S
zMX6{&^f(!qO3>3gK(T2q9XBj#Cd)3}x+3{4`h>CVHG`7R_w@vM{>@+aCTy(7ZqU^q
z6kKgp?pYIh7ytx~)v33@%K!v`HPgL{^P?N{8_+WtN|0^
zEMzd$ursKZ`|vlC=YW;>0o8BSGn;L@Tyx4DYhP^8Hn6TAKy~w=<#95!b!*oF7nSno
zbZ_C(WF@9G(|gXGye5r0+|KFTh4dcW3Fwh2bSazNcH-o
z-5n>ctG{f*u+~G+a${|gYNvH*U3Z?+fbt%8F&_?o?}a-nOjY2lgiv9S|PxMJo2y
z7QXP9lc35azMInYcf3RzM
zPvH`7VJe)2eXoHny$i)KGbYk@UU!O6m4raqvYfM=Q#RabXvt*t1w|nfFAQ@r+ZF>X
z^8!u4BPtDOA0I+jb|v)DbUA6zp70nKwN&KnxuMGW%;{zK-UbgQSLpv^?lw5s?nJfSdiX7~%Xn@D)J?LoY3;)z}HNnv7felk`P!zoPv5+xFDL)gn
z$aVDEgFZ8Leh8-lAmg0r5~-I);w0c+N>Q6V-5(c)=ggw
zms1+g0gvqPjn~preuw8X03LTD*K*1dNZ`sZaFyZe*v5lSUF{{mO9VdmA)@En((0Es
zt26J7iAm55cJI>naJB&ji1*qim4DHrLPN@s_eF9muZ#2gn1?4$B
zyBpRQvTD!pGP2?qWuHaz3s8ge(<{_;^>k(;}(c$u_=Sw;1;j7wj{?&3S{PKUd
zV(Wn6jpqz-bF
zqASI498@S~HXX%s#NeNv@W0f&RDZ1reFy?(RDl`j#-xxgQ@X=dm3vs0>!m?}s*u0>
zgDduIQ
zhgAT%$8Pt4D08zJdln_fAOtWHTSbe~#CF4t+Yz|Ns|~y};3>8iix1&AM8p3WLxqNI
z9@)?yRuICqAA5RX>k@q059tK|>R((fLxS;V*kK3R7Z3d5
zsTCvCD-Mzq9yMERlB&x&%a+J)ZrFm7^w%UXEl!sc4LR^U&1%sQ3yTTRnI8<>wupR=
z7Q#L4gYdHzDGS4W+E=p1uJ6RiU)Y9jmSkZTEuL!YHMTKc&vECuPPSr=>}Tcq50rLl
zIU)Nm-d|40)S}g!COu#X&h`ey++%zvFEa$=kM}|Z@GFd*eQk@1t@M;uiv9A
z%7wLvD8jhrP)OM{0~qUtklJX32%G{E5%GMcm$TDP^t5LkL!2#dsID2>G({^fTm&&s
z3PKB!$$I`nemJGnU2I3^mX4K;Si&urV~f7b(gkDC2#ba;ZTm7is_@x2fwsz&&V$}w
zDTF%W-kMu%A%F+t4^Qe_pe5*w9NJKU9p5f^hL;8E(VtjmYKV5&e!kFmBAgn+uWc|o
zh`_sk;}k(-
zpwJ_Tm=7YMb(*|;DYE@g`jbfXd8^aQwA07+-1zyP-Dihbrx$xm(;cbVLM2ib>F}Cx
z$Q!e8Fmi@(&@3mX+6P>omCsqakI$3+R3rRBeQzBybq9DjrJ-Hsg+jO8UbMe7eeET{
zkZDMi61jc|dl}&74PWBAS|oywKm;d&1<)I;VrjH7=3NZUgQeZKHV`q_GU2Q>`dwFM
ze8aX&YWlJu2>$xsl$m-ZFM#kkV?o%l9a~U@KHD7S?nrP24BIz_h6erE!uO!J_hiGD
zLp>reP3FejDfGgt!S<^U7&$FK%0fOH6XQX63V%!VXK9OuB%l|gM*pe!@^)KXpg$Cw
za&@^KRN?#=rlwlIKJ&2_G*&nEz!g}5g;Qgt%vIo=w>{k{Q-7FU#x6iP1I}-zwOO6c
zlBxzfmTR|BFG|(9!M476s79#py7HKNR_64#r33xVixfYgPb3fR&FTfHR4pn+1GtxB
ze_Nt;(?n6AZd)H=c#)(~$hw3;^zYa5vnwKlTHHK66j0&P0PR1TnBWRJhQH{C_8ew|
z^DJd|{a-9RWS=P-V2E;qpX1EK-jBqyZ+oH=;R}8Y%(0gk#-nHTFaGIE)#VBz?>Cs6
z1@Rpabwlfk_5yCMB-JY<3DM4n50oPE{T9vu0_a@ac_P$d`mHF#-fLDX5au5Bd~0lm
zC3iQU83`*5qe=#Ih)qcHCv%_^84KRjSZkTwC8(=LVQi3?Vd@n`)CpdXk-Pt
zZIrN2%D>Ge(jE35Lw~MoD*}!0Why73nHm$Os_HOU*eLJ{JHn3C=
z3NXNhFCvX~o+f59U_v2923Pn;c5|$o|oK+ln#E=kVY(pEgU~bI2
z(84&j=`yGh4#FSvn@jhzZw+u5?B|TwdIx!74IM{ixLUS#hoz)Gx-yM%Mig|bVmOND
zGX5mhHY2MAWcaiSErqeL_}okSj<*KQtV|z+&tKfbZ9%dXsqyRKY1W;57)wVeo~4kgh*TZ6IN)6w}~+bf`Z&4*D`X
z^_Ye2!h&AtS!?W+ctv_5Ul6uJFj*gZ;VGCGT
zZLu8jrPlr~PkE-UHPx3eff>Pyp&Ra%S={+&Bd-(DnZB1On$`G|w>k)PHRcbGn>pTq
zid_fd))`fGJ^@37F!^cCh&vUAJvv#*-Jeh=!3($}2n9w2xnh5#q&R-)ezf+;6R*S2
z_1ib}*cPyXDFIF}>Y8ivMe6YiNr`uy`6Q3BJG><~B>2eZaDRRGPI_7qSD6s8312|Y
zOY+T%V;LXAl)1h=+(mQ)=rRM_ma0w#o;_zO=Dq5PS9j*b#&g}TBI{aTpoH3qs~#ah
z2a}55S3D{cfiObn*K9o)l`4&7okA?M^}WTVBUD)mhy#=#x9n$FTDc;O%}U!?q^H((
z%+Gxmx5A25L8=BGFdti)mPqUgkW)xRto5t5c$69&(*Ve$2X|fHI%ip#1ZpmeE0sl~
zXJn7y?0!QXr=zDXj=kzo@xtrQiYt&
z+0#Q+LXPwkX0V{6g>T&8kB2sGe4=LF^3Bcz4Rc%m@15z9L|WjFwz1dNDL10Gr+T0}
zX8PdO>)?{J=Av4PdRT-`m*lh-WVDyc=wodj9
zUb_5w_*{AMNlYftM7PyPo<`U5aU_Q)NzPGDQq*8@&`8?FGQyazRqVQGxr9B-ZOc149J3expj%Z9tZ9watm
zLMlgD9>Ha^o5;*U-`?h!(he~$2F5vf=Pxh*2wzaY
z&oDlQ9mY?jS1%^m&DX2aC9_T{bvv^)zOd(}7rnOa0VYMz>!p5ow_~kDVQz%ivpCd?
zSOa!`M>&BZ)7Bzp(wpX5W1b%TE#`aWM-FeoD)R@9*a439KO_uyoK_1QfeU)&1r9yf
z4d?JX%?j7D&T)SS9CijZb|d)#@35~FQ*1hziDj(&rPj}wiZLGkpsI)Mo{;e{^LCY!
z+V=j7kePII|6c{lz#%=G-?)QqP$Pi0Ac?%>yUNhEw6g)~px)11d)cA*-Hqf|i8bEM
z36-bCGh;Lj%vR1CULd6P@&mCv#-#Oi66o-9;wyqBs_TQ&AD)UFA%LH{!rW{D{lGLx
zcp#Ew#@W#G^LpiNDW@Q-FQuRA1RvebqYOP?JtD&ZJr1HqDCAXp3eJOSjM;!K$Tkk(
zgtx7qoe36QhG-C~L#}EU>Lzg1FtY$Ho&OYcQc5C!)%4&$Q6&g}yZHw4<`E0m(~5r`()5J%52Qh|YeQubCVlOctV@G(DO~
z#s)RD=P_mumE3Cy?NkYAEX!oaO4Y
z=JXallLjpXr218=$>Y68Cn1L>Z%&k1*Jo8K=STvIU_6Bog#eLV71tGcT@$S4ac5&Y
zNLnFh*Y3P{iNvU~zjYt4*s2rAFu1^f*5cI9=x@QlR0EBMy^)9Zbj)tzv3KjjKBbPC
zhd%nlSBkqP=$fI#;sg4*HuRQ!)t#jMBZ~bV4d$*mNak
zU1-KkP$XaiB~yag-#(PSJrWJnnK$F&HhP-I@EZ&OF+Qlwa4YmTNAs(Q^`5X%+iznH
zs{LhGxv=Zy|zWG_3#92@kvfQ#Pn&^u)@#jCF3g(XH
z=(gvyJdV@M$-bf`|knJG&phc$u=O(lT~eJZ4iKdIDIZv)3^E
z%5@h>*}V#VMCv#d>W+X@d4FuUlZenU#ANOEbVF6$JdBG?kKc8ttF-aO38CJIWI+D>69y>S*=;IRFA2-7
zC(Ai#zyY0b2>F3LJx(OIMX)>FPOgkMR(O0AZag;tY*H@Hl|LX${CIsTZ@9KGsgs<`
z$S|)RR_O6ko!iv@3gEBjFX5vp@HG!=b_>d8FDeouQI3$HZ#}&ecuSJbJK9JbMxEpY
zb3x8GHQ5gNQ?8{a?|44#pJ1KJnuFe3-NB`WI>pZCCj^C~Ct+wbv$VlqAutK@;#M4d
z+6R7-YsgOLDL$GZP2s`8DcGNik+!NMdwd}t$NB9AY-C35qz1L;g~C{w0ObA-CBRmr
zoPjg`uG4hLzWwz`!ntkkHOMj_X=v%uEE!=CXt#_2Sd4mHr~W6}a?y;u>C3nwV9dv~
zpM{TrT44>BfGVkACsX_Lx_w}^(vtC+PoOkAxS^jl?Gi&N?M0$RG`(3sQD)ujGr70q
zhLi@)(PeKhoO#Dd#8IfsTduB*UCuTw`ANBJdFzpp(I1{bc*8P792UwIV?!x^aE2aR
z%kKO2KRlg=@?=ZC;X5AC4@JY_;KjV5Ptz)s(_eOWavGE7gN*zS3f{L&0)E{1KYBT?
zB&lJFti|MAT2I`Z*o`J#jOSw?YygS|jrQ!3
z)%u=(SO@zzu?KR6^XB#9v|>=*BRS|~*u9Wvy~7sa_FPU9MHcQxIgX*~8`22$;)#uB$
zNvI?=b_wAFZCv3eo_N&OzCth^#2VM#Y_B^t#y=)pQ(iUq_eWqAD)Yg9-Kl3Y#h)Zpq+6+AOS=U
zLY0*KtUi&)67W5@*?>0Ys!c9(8k?pFJP12VEeZ5S`U;tkZ_4@oN$pXoOMfAS9VW$4
z$6bSPGN9RnM-iB^#>!*65j8CAQs2^ywrTf|S2Vw>?eWa`EP6ib3U(+H;$(1D
z*a}K|v!;TNYZCrH;Y35AF)kDOAYQf0u;#L73?C0>!eZQ@t6ER?r9==U#eU_Uv>U
zUkFqv4MtJ?An$$T*Mys~KkT;FMvzI#lYsTMG@LoES4qR_ljHJGM8sp!QmB5@(_HhQ
zT>abkk!Ce);8ZF&(fF?(d$~xoeb#7q&Wf+nn3Z*|zvGu?&-Cbmu4W$IT)+jQW%^&D
zQ}=z@A?%kCh=iUIIkKbWw(a#|=x)jx~lGeLi)aJFs1TPNXA
zMDo^XKz(9277gq9!y|-t`A3PzTQ}|@$Os2VJ&+5|9(lU5`I0OAx0XG0cj#jvkz~+<
z9S4&uTS`?^xD{iW32jx{njnN7+Ey)Q?Y7YXPRKil
zzCr2jBi$Xk<}%D(gIYrS5WRzZ;UJ2cqS&gW50;Xl3#2J5%2pQjN2IPWYx^d`C
zIsVI;DBYdyh2w@mCGFz6iamtt#vZ6UDuonjOo+2-3sO)NoHD6pHGH22I^Qbc`i^k=
zOXlOF@B;vP?U3a8-8r%b_jtUd57|LvM*VHrXCHMlFOugF&;B2RE0>G5=l}KR@4wGW
zJgL}%K8ZL;&WW_WO5sG~TK7-fz+Ax4o=F9855U^Fs+=Gc|0?$xm%px_(%t0blEEn&
z5m*(^?qX*N(Ju;h!uRXrs29Gh&wn+-G@xtEKfzp=={nH;RF5|34?-*a7&8$fE{wli
zW)nlR)J~4ds1A3_fc)BUovTMTL8K^I)B$bNtVw5efY2jXl;#KDUv+XHAoj=#ZzUo$
zf#yKa0uoO$Q5%0$iuy76cbq?;k~Zpzd$oW8`+ry#y6RT{jBZ+x7**)$-Oipyyuag?qezn-J0
zVasxV_`x^umrWpSWD3d&ft+S!8hg(gDQI5_V4l0tN+No0*nQdn{T*Xaj!>iu(5^BD
zW<1n3?-ZK~Ixojw9QfcLM4NQ~X@q@tQfIXzT0kNwQAe1V!6q^@HbwX;3xNj>M9x%>
zr5oXl&~ep!fs|72Q3;A^oBT;Ec^@>W{K{1*fWRXiEoj!CfBvM$NJ;L}A@TXkRvXxp
zg=td_ITjNNOWBn%4&i#cu$vDtX}CH3%J53-g0a?!?UbKIe%pml-aroU*Hg$SUo0O=
z9%00Il!JCgK&U6CBF;bQ`PlyXqEvSBF-J}4jEn2is|`p#pRdqKhso#Mb%NBaM%)tp
zT|vs7y!>FUWndo@`T@bjti*=1!x#oYZ=n){NCA`YzO8$i{h&XT@B6a~pSD^-&$X2N
zoq^eDzz`iOtU_2vht+V3N{_yIW{4HgOAWHH=GRAl(L-{rQ|&yB>DP!Ay&KR}jpR3_
zHd(+zxbFh(D#uJ!?D(Jz(g7OfDpvA;9oy66`M-4y`p@tGL~TP_RaIqaka2#s`C+>U
zJsS;=9=Eb5B?@B^1=3dWg#4jExQ$>X4tnRp5IYvtHM
z=%fUA{*{rYb{xF$S?Xz{G5-)_v=1~NozBF@IHg|IvptK{0>(S=ba*}!lratLpEK!P
zEu!`$bk--1FLeFx&B;l(nmVac*_gSu9k5I>E~w6$j6Lf|e{h`;rJ7&wvbMrlqP%}?7E-=9Ff$z{d60p0ra=!aCDc4PU7ji}>H3z#v{nGR@k^>>#O?*ig-7w^
zkdQ>*d{za!x2S`ym?35?a=)EsrAY8)fP@(!nk0}0Hw(B0xGNw`URPaw993Rnkm)G_
z_|6*Y?%)alsfg)uKmA|MQzHF^uan)C6Mkggh?%T-8@KVw^;ATN##%!)*6ch;XgqOFKvo?nZ0>qM8k
z|KYhT+4l(-%016*gNtYjElfQhZYM?QDF2cC*|ic~qG$b0tU
z(GR`*%;ISQID$6=YZ~iU|Ni@+(hf7xJA+k$e#=qogU18j%+Q&Pih(n)36WNyk9P%E
zu0N4W{SrIcGrMwIlXjaUPBCbP??)K*hnCMgb|1GaBYVDW@M~X^j+`ZXH~}NJt>dd9
z)X_j-itxREvxY@n&1DH2w`hnrpi3W`&;p><+$4!C!TYxRcJ}EB-utgFT`zAp2M_uA
zx!eP%uJ}vG1f|PHCOB&F>>C7}Ef|ZyL3oN0IAiAdjV&9ZZCCL9;iNnGd*f7XTQ@{U
zsxhYj?obn^Yst>8*VY^!v134JVk=_0T?55W$fy%bc0KgAp}MLmt_GBiFP??2^xw`k
z$h^%Y(t=+2W#UlEuX~X11Wkwvy1`KU$SfMF&$tEfWRKj(jZsaK&L4xp1h8gQ1qpkUZ1dJ52L|Kop!?zt8#n}X0^Afj|tnGc8PA~wR
znFRm?^3)N%nRcmsAKf
z{;r`k`Dj5`#YlBq?FtpHT5{-E!J1
zU3y@h)w2*2#dxX1*Y^>Hw5>1R`8qfoKLWm
zVEGJda&-M3)-;A1ZJ${7V+gLeRc}w%!J9j6qsj0vTMdSH4#03^jO$hR%SeZ8$I)Ne
z=SJ&(^4uuUvs;3kBO>JUbc_64?^pGgAG7~lB=7Lt;nv!cE+_?UI5q`1pe33l8c`4V}^DUM`w1BPB02B%cEdPy>yvK
zi^7XVIE!Afx0`%?_z5bswq#1FWforAtnjRM6ClBUX=_eBJ~vtpEtoHPm`U>-=HpI|
zqQ@(CoOTQKP?yoWSDAq=t=1X%i`=2{Hm_h+UFn#BN()_txRH#{8%
z6@q}+mcNNWTtIo1YQ$ab2C=6W7CV;dFSOm(I<5_R!m}JKX<}yx-tvJufDh*_CMH4~
z(yi=_E+3#@H*po&US3-3)dd8AqY&wT?-Tof=kx#c{U<1#^Cu_AMy!nkk;>NNKeOgy
z^$)+#Q?~p(!^8W)bp{H6XO%$73~caUcHp7+_=H#5<5_nBDZj~T9b&+~?`n2g{HlYd
zq@sk`=&W-!v>~8AW^RGGA+XPM>}lR*T+5#PD7_*(&$&ka-Cj|#uTWHcNn(QkoJD$+
z!GoS=9-VgFTL{1^Y0$+>`Wg53qt1+Bg7lS8_Tpt@lyr1k
zNnVf?TMzYoKm{^BQlm}2MesJQ-!}2EOYQ*!IsU=MLL{h+X3v_HhiO{MQ
z<{{=9+xg7;m4z-VLWBCNS@vbKkJH5TIxJdGu(adjjVS5ZQ+atu9A)0{h{w9F_l5vs
zWRYlXlX(~PsUm~WGFlTd#Z!J2_kCgwBUQ`aR0CV`Xu`goK8_duJG(JJ+d1HnC=EUR
zSRJ=1wh=sW?hy@_-9mMQzX|)%*rN41I${$L7+`hY%oOx0VJlmWS~(8BGF#J|#mo@O
zI8a3wl4FEudk1OW}HsXu->u|NI8A62{WoV3j*$&?AF3hMuZuLcU|-i
z|2dC`!5pjYU((n3HemywV2h*KB4kfkM50`vJXB(I&&RYuYM^#2%!@BSVjBs6h$y1CY#H?mACa9M5LItu@|L1_n25!heUo*H#B08lmNq=iG|}
z-v-yRSidLqOD-XAJb7vGR8t~Hm
zXE@aPhZfbRDr;9VfzmvYyn?b$_VWyHBDe-&l^?6cazL|7cIr0>Kjvlxfmb3MXqu;?;>e7eW(wd~BMq%;jSwy~W6w
z;E^P*cU>T*vW;*Yhi@XANV-8iFKE7>K0w&*dDNjUJ|iAI1CvQ#P3jF()lkLpO`Cq7
z9#>l`lFxjeb!(;Q-DxIK*lV@|9*wDJfatSaMYfi=D$|NgNVfKGus_=Lx)m4tjTecF
zXtTX<=>&1^33S7)Im<&^k*9sMeob<+TL_4;ykV=G!WoCK6Qlz5pI7iEkjX
zOJp00aQUHpccleciPD>$m_Nemm0N0l*QY@WGx`JY)g~75)`=8
zj!Vpaa5dsnN-U)GL};#6rt^b{PpKa+`b@`a)iA+i;(81vW5#8|ncoK8s7LZ^*ng+w
z|G<7+)D(lo~jk4dTr5KE+8UBV<*r?FQISZayvitU}gXCV4YV1
z0v04S0`FPLyLVYV`trO`aQ{}~*-0?prr}+_$)g%LtXu(3
z6JX}$PMA;)s>SwWg4&w@GULV}&df`}}3#lt_VS`Rdj6F?v
znX^z=D^pt>I&s&2Hn#r*kST-t)f(W4wyhA*(b>5cVTEUlk=+G=8N_L`O#P`>{NVE51c?}EO6{j0-@{|Xi#wTe#8
zUZI&eCzTh#DtwPVuyE9iRW}<{{&5cT6g;yTG5sU#owm{9D(IMdSeUACCoT~e;+HP-GIb~bU4_W5?S4$)wx!fE>p;7sa-fe5=0MkO`+
zFAiirMdTVz9{A}S{-^_5#Vb|C8{SGmM8|xWjrI7DQ6I0J;_YJG%MR^{Z`Hm+cDXF~?SV@#9LZF{JNk-ncuZc0%wL2tTSJg(p@C#@=M)LMI
zyddg}95JugyB%qhNi#OX3PohyOwPButz=!9t`m*dV9c^y03M|=ElNc1qhEfr=RB&@
z`P|ld}O_@elYOnRM|TP_$x?GL~7+2opw)yNU_FKBY1=vxAc`d~2~M1==rj=IP;;
zh;pTd6T^5-p18N|sw{&Oe)r35003w_e;1w3>TPBuw-heTM{uXn=^Xe(U^Z~fj%kr_
zt)1T`6^8CuS-ABrS5$}gXuneaa!Fqz>1LPKDGi)pNpbyP5$VT3;ap91N|1q|p{;@a
zZ`mB_xFzr^-#d6Qy_xU*O$m7OY@Jd%