From 625944fb8deac6974182bcaaed299cc009c84de4 Mon Sep 17 00:00:00 2001 From: Lupin1998 <1070535169@qq.com> Date: Thu, 30 Dec 2021 09:52:53 +0000 Subject: [PATCH] init --- .gitignore | 136 +++ LICENSE | 201 ++++ README.md | 109 ++ benchmarks/detection/README.md | 12 + .../configs/Base-Keypoint-RCNN-FPN.yaml | 15 + .../detection/configs/Base-RCNN-C4-BN.yaml | 17 + .../detection/configs/Base-RCNN-FPN.yaml | 42 + .../detection/configs/Base-RetinaNet.yaml | 25 + .../Cityscapes/mask_rcnn_R_50_FPN.yaml | 30 + .../Cityscapes/mask_rcnn_R_50_FPN_moco.yaml | 9 + .../detection/configs/coco_R_50_C4_1x.yaml | 4 + .../configs/coco_R_50_C4_1x_moco.yaml | 4 + .../detection/configs/coco_R_50_C4_2x.yaml | 13 + .../configs/coco_R_50_C4_2x_moco.yaml | 10 + .../detection/configs/coco_R_50_FPN_1x.yaml | 17 + .../configs/coco_R_50_FPN_1x_moco.yaml | 9 + .../detection/configs/coco_R_50_FPN_2x.yaml | 4 + .../configs/coco_R_50_FPN_2x_moco.yaml | 4 + .../configs/coco_R_50_RetinaNet_1x.yaml | 13 + .../configs/coco_R_50_RetinaNet_1x_moco.yaml | 9 + .../configs/coco_R_50_RetinaNet_2x.yaml | 4 + .../configs/coco_R_50_RetinaNet_2x_moco.yaml | 4 + .../configs/keypoint_rcnn_R_50_FPN_2x.yaml | 16 + .../keypoint_rcnn_R_50_FPN_2x_moco.yaml | 9 + .../configs/pascal_voc_R_50_C4_24k.yaml | 16 + .../configs/pascal_voc_R_50_C4_24k_moco.yaml | 9 + .../convert-pretrain-to-detectron2.py | 36 + benchmarks/detection/run.sh | 6 + benchmarks/detection/train_net.py | 77 ++ benchmarks/dist_test_svm_epoch.sh | 28 + benchmarks/dist_test_svm_pretrain.sh | 28 + benchmarks/dist_train_linear.sh | 24 + benchmarks/dist_train_linear_1gpu.sh | 24 + benchmarks/dist_train_linear_1gpu_sd.sh | 27 + benchmarks/dist_train_linear_2gpu.sh | 24 + benchmarks/dist_train_linear_4gpu.sh | 24 + benchmarks/dist_train_semi.sh | 24 + benchmarks/extract_info/voc07.py | 20 + benchmarks/srun_test_svm_epoch.sh | 24 + benchmarks/srun_test_svm_pretrain.sh | 24 + benchmarks/srun_train_linear.sh | 31 + benchmarks/srun_train_semi.sh | 31 + .../svm_tools/aggregate_low_shot_svm_stats.py | 127 ++ benchmarks/svm_tools/eval_svm_full.sh | 40 + benchmarks/svm_tools/eval_svm_lowshot.sh | 64 + benchmarks/svm_tools/svm_helper.py | 171 +++ benchmarks/svm_tools/test_svm.py | 174 +++ benchmarks/svm_tools/test_svm_low_shot.py | 212 ++++ benchmarks/svm_tools/train_svm_kfold.py | 162 +++ .../svm_tools/train_svm_kfold_parallel.py | 151 +++ benchmarks/svm_tools/train_svm_low_shot.py | 144 +++ .../svm_tools/train_svm_low_shot_parallel.py | 145 +++ configs/base.py | 19 + .../cifar10/r18_last_1gpu_cifar10.py | 75 ++ .../cifar10/r18_rep_cifar10.py | 49 + .../r50_last_1gpu_cifar10_from_stl10_lr01.py | 78 ++ .../cifar100/r18_last_1gpu_cifar100.py | 79 ++ .../cifar100/r18_rep_cifar100.py | 48 + .../cub200/r50_last_2gpu_cub200.py | 84 ++ .../dogs120/r50_last_2gpu_dogs120.py | 87 ++ .../fmnist/lenet_last_1gpu_fmnist.py | 49 + .../fmnist/lenet_rep_fmnist.py | 50 + .../imagenet/official/r50_last.py | 76 ++ .../imagenet/official/r50_last_sobel.py | 76 ++ .../imagenet/official/r50_multihead.py | 89 ++ .../imagenet/official/r50_multihead_sobel.py | 89 ++ .../imagenet/r18_last_1gpu.py | 79 ++ .../imagenet/r18_last_2gpu.py | 79 ++ .../imagenet/r18_last_4gpu.py | 78 ++ .../imagenet/r18_rep_imagenet.py | 71 ++ .../imagenet/r50_last_2gpu.py | 79 ++ .../imagenet/r50_last_4gpu.py | 83 ++ .../imagenet/r50_rep_imagenet.py | 71 ++ .../mnist/lenet_rep_mnist.py | 55 + .../pets/r50_last_2gpu_pets.py | 90 ++ .../pets/r50_rep_pets.py | 70 ++ .../places205/r50_multihead.py | 89 ++ .../places205/r50_multihead_sobel.py | 89 ++ .../stl10/mobilenet_last_1gpu_stl10.py | 79 ++ .../stl10/mobilenet_rep_stl10.py | 64 + .../stl10/r18/r18_lr1_0_bs256_head1.py | 78 ++ .../stl10/r18/run_stl10_dist_train_linear.sh | 35 + .../stl10/r18_last_1gpu_stl10.py | 80 ++ .../stl10/r18_rep_stl10.py | 64 + .../stl10/r50_last_1gpu_stl10.py | 82 ++ .../stl10/r50_rep_stl10.py | 64 + .../tiny_imagenet/r18_last_1gpu_tiny.py | 80 ++ .../tiny_imagenet/r18_rep_tiny_imagenet.py | 66 + .../imagenet_10percent/base.py | 66 + .../imagenet_10percent/r50_lr0_001_head1.py | 4 + .../imagenet_10percent/r50_lr0_001_head10.py | 4 + .../imagenet_10percent/r50_lr0_001_head100.py | 4 + .../imagenet_10percent/r50_lr0_01_head1.py | 4 + .../imagenet_10percent/r50_lr0_01_head10.py | 4 + .../imagenet_10percent/r50_lr0_01_head100.py | 4 + .../r50_lr0_01_head1_sobel.py | 71 ++ .../imagenet_10percent/r50_lr0_1_head1.py | 4 + .../imagenet_10percent/r50_lr0_1_head10.py | 4 + .../imagenet_10percent/r50_lr0_1_head100.py | 4 + .../imagenet_1percent/base.py | 72 ++ .../imagenet_1percent/r50_lr0_001_head1.py | 4 + .../imagenet_1percent/r50_lr0_001_head10.py | 4 + .../imagenet_1percent/r50_lr0_001_head100.py | 4 + .../imagenet_1percent/r50_lr0_01_head1.py | 4 + .../imagenet_1percent/r50_lr0_01_head10.py | 4 + .../imagenet_1percent/r50_lr0_01_head100.py | 4 + .../r50_lr0_01_head1_sobel.py | 77 ++ .../imagenet_1percent/r50_lr0_1_head1.py | 4 + .../imagenet_1percent/r50_lr0_1_head10.py | 4 + .../imagenet_1percent/r50_lr0_1_head100.py | 4 + .../semi_classification/stl10/base.py | 69 ++ .../stl10/r50_lr0_001_head1.py | 4 + .../stl10/r50_lr0_001_head10.py | 4 + .../stl10/r50_lr0_001_head100.py | 4 + .../stl10/r50_lr0_01_head1.py | 4 + .../stl10/r50_lr0_01_head10.py | 4 + .../stl10/r50_lr0_01_head100.py | 4 + .../stl10/r50_lr0_1_head1.py | 6 + .../stl10/r50_lr0_1_head10.py | 4 + .../stl10/r50_lr0_1_head100.py | 4 + docs/CHANGELOG.md | 37 + docs/GETTING_STARTED.md | 287 +++++ docs/INSTALL.md | 160 +++ docs/MODEL_ZOO.md | 184 +++ docs/relation.jpg | Bin 0 -> 330127 bytes openmixup/__init__.py | 3 + openmixup/apis/__init__.py | 3 + openmixup/apis/train.py | 235 ++++ openmixup/datasets/__init__.py | 17 + openmixup/datasets/base.py | 38 + openmixup/datasets/builder.py | 43 + openmixup/datasets/byol.py | 41 + openmixup/datasets/classification.py | 44 + openmixup/datasets/contrastive.py | 33 + openmixup/datasets/data_sources/__init__.py | 10 + openmixup/datasets/data_sources/cifar.py | 227 ++++ openmixup/datasets/data_sources/image_list.py | 38 + openmixup/datasets/data_sources/imagenet.py | 10 + openmixup/datasets/data_sources/mnist.py | 112 ++ openmixup/datasets/dataset_wrappers.py | 55 + openmixup/datasets/deepcluster.py | 33 + openmixup/datasets/deepcluster_contrastive.py | 69 ++ openmixup/datasets/extraction.py | 19 + openmixup/datasets/loader/__init__.py | 7 + openmixup/datasets/loader/build_loader.py | 133 +++ openmixup/datasets/loader/sampler.py | 302 +++++ openmixup/datasets/multi_view.py | 65 + openmixup/datasets/npid.py | 25 + openmixup/datasets/pipelines/__init__.py | 3 + openmixup/datasets/pipelines/auto_augment.py | 1053 ++++++++++++++++ openmixup/datasets/pipelines/compose.py | 42 + openmixup/datasets/pipelines/transforms.py | 146 +++ openmixup/datasets/registry.py | 5 + openmixup/datasets/relative_loc.py | 65 + openmixup/datasets/rotation_pred.py | 45 + openmixup/datasets/semi_supervised.py | 157 +++ openmixup/datasets/utils.py | 9 + openmixup/hooks/__init__.py | 12 + openmixup/hooks/addtional_scheduler.py | 645 ++++++++++ openmixup/hooks/builder.py | 113 ++ openmixup/hooks/byol_hook.py | 43 + openmixup/hooks/deepcluster_automix_hook.py | 162 +++ openmixup/hooks/deepcluster_hook.py | 135 +++ openmixup/hooks/extractor.py | 61 + openmixup/hooks/momentum_hook.py | 261 ++++ openmixup/hooks/odc_hook.py | 90 ++ openmixup/hooks/optimizer_hook.py | 73 ++ openmixup/hooks/registry.py | 3 + openmixup/hooks/save_hook.py | 39 + openmixup/hooks/validate_hook.py | 93 ++ openmixup/models/__init__.py | 10 + openmixup/models/backbones/__init__.py | 18 + openmixup/models/backbones/alexnet.py | 81 ++ openmixup/models/backbones/base_backbone.py | 57 + openmixup/models/backbones/lenet.py | 75 ++ openmixup/models/backbones/mobilenet_v2.py | 274 +++++ openmixup/models/backbones/mobilenet_v3.py | 204 ++++ openmixup/models/backbones/resnet_mmcls.py | 965 +++++++++++++++ openmixup/models/backbones/resnext.py | 494 ++++++++ openmixup/models/backbones/seresnet.py | 223 ++++ openmixup/models/backbones/shufflenet_v2.py | 299 +++++ openmixup/models/backbones/wide_resnet.py | 302 +++++ openmixup/models/builder.py | 56 + openmixup/models/classifiers/__init__.py | 11 + .../models/classifiers/classification.py | 107 ++ .../classifiers/mixup_classification.py | 155 +++ .../classifiers/mixup_momentum_V1plus.py | 336 ++++++ .../models/classifiers/mixup_momentum_V2.py | 525 ++++++++ .../models/classifiers/representation.py | 71 ++ openmixup/models/heads/__init__.py | 12 + openmixup/models/heads/cls_head.py | 146 +++ openmixup/models/heads/cls_mixup_head.py | 266 +++++ openmixup/models/heads/contrastive_head.py | 38 + openmixup/models/heads/latent_pred_head.py | 37 + openmixup/models/heads/multi_cls_head.py | 83 ++ openmixup/models/heads/pmix_block_V2.py | 486 ++++++++ openmixup/models/losses/__init__.py | 13 + openmixup/models/losses/asymmetric_loss.py | 207 ++++ openmixup/models/losses/cross_entropy_loss.py | 328 +++++ openmixup/models/losses/focal_loss.py | 118 ++ openmixup/models/losses/label_smooth_loss.py | 169 +++ openmixup/models/losses/utils.py | 112 ++ openmixup/models/memories/__init__.py | 7 + openmixup/models/memories/odc_memory.py | 233 ++++ openmixup/models/memories/simple_memory.py | 65 + openmixup/models/necks/__init__.py | 10 + openmixup/models/necks/conv_necks.py | 120 ++ openmixup/models/necks/fpn_automix.py | 41 + openmixup/models/necks/mlp_necks.py | 445 +++++++ openmixup/models/registry.py | 8 + openmixup/models/selfsup/__init__.py | 17 + openmixup/models/selfsup/byol.py | 113 ++ openmixup/models/selfsup/deepcluster.py | 131 ++ openmixup/models/selfsup/moco.py | 218 ++++ openmixup/models/selfsup/moco_automix_v2.py | 1057 +++++++++++++++++ openmixup/models/selfsup/moco_mix.py | 360 ++++++ openmixup/models/selfsup/npid.py | 130 ++ openmixup/models/selfsup/odc.py | 148 +++ openmixup/models/selfsup/relative_loc.py | 107 ++ openmixup/models/selfsup/rotation_pred.py | 94 ++ openmixup/models/selfsup/simclr.py | 109 ++ openmixup/models/selfsup/simclr_mix.py | 265 +++++ openmixup/models/semisup/__init__.py | 9 + openmixup/models/semisup/dmixmatch.py | 403 +++++++ openmixup/models/semisup/fine_tuning.py | 95 ++ openmixup/models/semisup/fixmatch.py | 184 +++ openmixup/models/semisup/mix_tuning.py | 473 ++++++++ openmixup/models/semisup/self_tuning.py | 255 ++++ openmixup/models/utils/__init__.py | 29 + openmixup/models/utils/accuracy.py | 63 + openmixup/models/utils/channel_shuffle.py | 29 + openmixup/models/utils/conv_module.py | 163 +++ openmixup/models/utils/conv_ws.py | 46 + openmixup/models/utils/fmix.py | 236 ++++ openmixup/models/utils/gather_layer.py | 223 ++++ openmixup/models/utils/grad_weight.py | 59 + openmixup/models/utils/inverted_residual.py | 115 ++ openmixup/models/utils/make_divisible.py | 27 + openmixup/models/utils/mixup_input.py | 356 ++++++ openmixup/models/utils/multi_pooling.py | 38 + openmixup/models/utils/norm.py | 55 + openmixup/models/utils/scale.py | 13 + openmixup/models/utils/se_layer.py | 75 ++ openmixup/models/utils/smoothing.py | 48 + openmixup/models/utils/sobel.py | 24 + openmixup/models/utils/weight_init.py | 73 ++ openmixup/third_party/clustering.py | 309 +++++ openmixup/utils/__init__.py | 8 + openmixup/utils/alias_multinomial.py | 75 ++ openmixup/utils/collect.py | 83 ++ openmixup/utils/collect_env.py | 64 + openmixup/utils/config_tools.py | 13 + openmixup/utils/contextmanagers.py | 122 ++ openmixup/utils/flops_counter.py | 444 +++++++ openmixup/utils/gather.py | 69 ++ openmixup/utils/logger.py | 66 + openmixup/utils/misc.py | 37 + openmixup/utils/optimizers.py | 116 ++ openmixup/utils/profiling.py | 40 + openmixup/utils/registry.py | 79 ++ requirements.txt | 2 + requirements/runtime.txt | 12 + requirements/tests.txt | 11 + setup.py | 193 +++ tools/auto_train.py | 140 +++ tools/count_parameters.py | 44 + tools/dist_extract.sh | 13 + tools/dist_test.sh | 17 + tools/dist_train.sh | 12 + tools/extract.py | 182 +++ tools/kill.sh | 2 + .../extract_backbone_weights.py | 31 + tools/model_converters/extract_dir_weights.py | 64 + tools/model_converters/publish_model.py | 33 + tools/model_converters/upgrade_models.py | 27 + tools/prepare_data/convert_subset.py | 35 + tools/prepare_data/create_voc_data_files.py | 193 +++ .../create_voc_low_shot_challenge_samples.py | 131 ++ tools/prepare_data/prepare_voc07_cls.sh | 34 + tools/single_train.sh | 9 + tools/srun_extract.sh | 24 + tools/srun_test.sh | 30 + tools/srun_train.sh | 26 + tools/summary/find_automix_val_median.py | 105 ++ .../find_classification_val_3times_average.py | 86 ++ tools/summary/find_classification_val_max.py | 91 ++ .../summary/find_classification_val_median.py | 75 ++ tools/summary/results_summary.py | 104 ++ tools/test.py | 122 ++ tools/train.py | 142 +++ tools/visualization/gradcam.py | 421 +++++++ tools/visualization/my_dist_analysis.py | 329 +++++ tools/visualization/visualize_embedding.py | 390 ++++++ 293 files changed, 28808 insertions(+) create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 benchmarks/detection/README.md create mode 100644 benchmarks/detection/configs/Base-Keypoint-RCNN-FPN.yaml create mode 100644 benchmarks/detection/configs/Base-RCNN-C4-BN.yaml create mode 100644 benchmarks/detection/configs/Base-RCNN-FPN.yaml create mode 100644 benchmarks/detection/configs/Base-RetinaNet.yaml create mode 100644 benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml create mode 100644 benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN_moco.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_C4_1x.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_C4_1x_moco.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_C4_2x.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_FPN_1x.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_FPN_1x_moco.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_FPN_2x.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_FPN_2x_moco.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_RetinaNet_1x.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_RetinaNet_1x_moco.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_RetinaNet_2x.yaml create mode 100644 benchmarks/detection/configs/coco_R_50_RetinaNet_2x_moco.yaml create mode 100644 benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x.yaml create mode 100644 benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x_moco.yaml create mode 100644 benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml create mode 100644 benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml create mode 100644 benchmarks/detection/convert-pretrain-to-detectron2.py create mode 100644 benchmarks/detection/run.sh create mode 100644 benchmarks/detection/train_net.py create mode 100644 benchmarks/dist_test_svm_epoch.sh create mode 100644 benchmarks/dist_test_svm_pretrain.sh create mode 100644 benchmarks/dist_train_linear.sh create mode 100644 benchmarks/dist_train_linear_1gpu.sh create mode 100644 benchmarks/dist_train_linear_1gpu_sd.sh create mode 100644 benchmarks/dist_train_linear_2gpu.sh create mode 100644 benchmarks/dist_train_linear_4gpu.sh create mode 100644 benchmarks/dist_train_semi.sh create mode 100644 benchmarks/extract_info/voc07.py create mode 100644 benchmarks/srun_test_svm_epoch.sh create mode 100644 benchmarks/srun_test_svm_pretrain.sh create mode 100644 benchmarks/srun_train_linear.sh create mode 100644 benchmarks/srun_train_semi.sh create mode 100644 benchmarks/svm_tools/aggregate_low_shot_svm_stats.py create mode 100644 benchmarks/svm_tools/eval_svm_full.sh create mode 100644 benchmarks/svm_tools/eval_svm_lowshot.sh create mode 100644 benchmarks/svm_tools/svm_helper.py create mode 100644 benchmarks/svm_tools/test_svm.py create mode 100644 benchmarks/svm_tools/test_svm_low_shot.py create mode 100644 benchmarks/svm_tools/train_svm_kfold.py create mode 100644 benchmarks/svm_tools/train_svm_kfold_parallel.py create mode 100644 benchmarks/svm_tools/train_svm_low_shot.py create mode 100644 benchmarks/svm_tools/train_svm_low_shot_parallel.py create mode 100644 configs/base.py create mode 100644 configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py create mode 100644 configs/benchmarks/linear_classification/cifar10/r18_rep_cifar10.py create mode 100644 configs/benchmarks/linear_classification/cifar10/r50_last_1gpu_cifar10_from_stl10_lr01.py create mode 100644 configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py create mode 100644 configs/benchmarks/linear_classification/cifar100/r18_rep_cifar100.py create mode 100644 configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py create mode 100644 configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py create mode 100644 configs/benchmarks/linear_classification/fmnist/lenet_last_1gpu_fmnist.py create mode 100644 configs/benchmarks/linear_classification/fmnist/lenet_rep_fmnist.py create mode 100644 configs/benchmarks/linear_classification/imagenet/official/r50_last.py create mode 100644 configs/benchmarks/linear_classification/imagenet/official/r50_last_sobel.py create mode 100644 configs/benchmarks/linear_classification/imagenet/official/r50_multihead.py create mode 100644 configs/benchmarks/linear_classification/imagenet/official/r50_multihead_sobel.py create mode 100644 configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py create mode 100644 configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py create mode 100644 configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py create mode 100644 configs/benchmarks/linear_classification/imagenet/r18_rep_imagenet.py create mode 100644 configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py create mode 100644 configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py create mode 100644 configs/benchmarks/linear_classification/imagenet/r50_rep_imagenet.py create mode 100644 configs/benchmarks/linear_classification/mnist/lenet_rep_mnist.py create mode 100644 configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py create mode 100644 configs/benchmarks/linear_classification/pets/r50_rep_pets.py create mode 100644 configs/benchmarks/linear_classification/places205/r50_multihead.py create mode 100644 configs/benchmarks/linear_classification/places205/r50_multihead_sobel.py create mode 100644 configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py create mode 100644 configs/benchmarks/linear_classification/stl10/mobilenet_rep_stl10.py create mode 100644 configs/benchmarks/linear_classification/stl10/r18/r18_lr1_0_bs256_head1.py create mode 100644 configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh create mode 100644 configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py create mode 100644 configs/benchmarks/linear_classification/stl10/r18_rep_stl10.py create mode 100644 configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py create mode 100644 configs/benchmarks/linear_classification/stl10/r50_rep_stl10.py create mode 100644 configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py create mode 100644 configs/benchmarks/linear_classification/tiny_imagenet/r18_rep_tiny_imagenet.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/base.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head1.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head10.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head100.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head10.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head100.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1_sobel.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head1.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head10.py create mode 100644 configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head100.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/base.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head1.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head10.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head100.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head10.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head100.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1_sobel.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head1.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head10.py create mode 100644 configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head100.py create mode 100644 configs/benchmarks/semi_classification/stl10/base.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_001_head1.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_001_head10.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_001_head100.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_01_head1.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_01_head10.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_01_head100.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_1_head10.py create mode 100644 configs/benchmarks/semi_classification/stl10/r50_lr0_1_head100.py create mode 100644 docs/CHANGELOG.md create mode 100644 docs/GETTING_STARTED.md create mode 100644 docs/INSTALL.md create mode 100644 docs/MODEL_ZOO.md create mode 100644 docs/relation.jpg create mode 100644 openmixup/__init__.py create mode 100644 openmixup/apis/__init__.py create mode 100644 openmixup/apis/train.py create mode 100644 openmixup/datasets/__init__.py create mode 100644 openmixup/datasets/base.py create mode 100644 openmixup/datasets/builder.py create mode 100644 openmixup/datasets/byol.py create mode 100644 openmixup/datasets/classification.py create mode 100644 openmixup/datasets/contrastive.py create mode 100644 openmixup/datasets/data_sources/__init__.py create mode 100644 openmixup/datasets/data_sources/cifar.py create mode 100644 openmixup/datasets/data_sources/image_list.py create mode 100644 openmixup/datasets/data_sources/imagenet.py create mode 100644 openmixup/datasets/data_sources/mnist.py create mode 100644 openmixup/datasets/dataset_wrappers.py create mode 100644 openmixup/datasets/deepcluster.py create mode 100644 openmixup/datasets/deepcluster_contrastive.py create mode 100644 openmixup/datasets/extraction.py create mode 100644 openmixup/datasets/loader/__init__.py create mode 100644 openmixup/datasets/loader/build_loader.py create mode 100644 openmixup/datasets/loader/sampler.py create mode 100644 openmixup/datasets/multi_view.py create mode 100644 openmixup/datasets/npid.py create mode 100644 openmixup/datasets/pipelines/__init__.py create mode 100644 openmixup/datasets/pipelines/auto_augment.py create mode 100644 openmixup/datasets/pipelines/compose.py create mode 100644 openmixup/datasets/pipelines/transforms.py create mode 100644 openmixup/datasets/registry.py create mode 100644 openmixup/datasets/relative_loc.py create mode 100644 openmixup/datasets/rotation_pred.py create mode 100644 openmixup/datasets/semi_supervised.py create mode 100644 openmixup/datasets/utils.py create mode 100644 openmixup/hooks/__init__.py create mode 100644 openmixup/hooks/addtional_scheduler.py create mode 100644 openmixup/hooks/builder.py create mode 100644 openmixup/hooks/byol_hook.py create mode 100644 openmixup/hooks/deepcluster_automix_hook.py create mode 100644 openmixup/hooks/deepcluster_hook.py create mode 100644 openmixup/hooks/extractor.py create mode 100644 openmixup/hooks/momentum_hook.py create mode 100644 openmixup/hooks/odc_hook.py create mode 100644 openmixup/hooks/optimizer_hook.py create mode 100644 openmixup/hooks/registry.py create mode 100644 openmixup/hooks/save_hook.py create mode 100644 openmixup/hooks/validate_hook.py create mode 100644 openmixup/models/__init__.py create mode 100644 openmixup/models/backbones/__init__.py create mode 100644 openmixup/models/backbones/alexnet.py create mode 100644 openmixup/models/backbones/base_backbone.py create mode 100644 openmixup/models/backbones/lenet.py create mode 100644 openmixup/models/backbones/mobilenet_v2.py create mode 100644 openmixup/models/backbones/mobilenet_v3.py create mode 100644 openmixup/models/backbones/resnet_mmcls.py create mode 100644 openmixup/models/backbones/resnext.py create mode 100644 openmixup/models/backbones/seresnet.py create mode 100644 openmixup/models/backbones/shufflenet_v2.py create mode 100644 openmixup/models/backbones/wide_resnet.py create mode 100644 openmixup/models/builder.py create mode 100644 openmixup/models/classifiers/__init__.py create mode 100644 openmixup/models/classifiers/classification.py create mode 100644 openmixup/models/classifiers/mixup_classification.py create mode 100644 openmixup/models/classifiers/mixup_momentum_V1plus.py create mode 100644 openmixup/models/classifiers/mixup_momentum_V2.py create mode 100644 openmixup/models/classifiers/representation.py create mode 100644 openmixup/models/heads/__init__.py create mode 100644 openmixup/models/heads/cls_head.py create mode 100644 openmixup/models/heads/cls_mixup_head.py create mode 100644 openmixup/models/heads/contrastive_head.py create mode 100644 openmixup/models/heads/latent_pred_head.py create mode 100644 openmixup/models/heads/multi_cls_head.py create mode 100644 openmixup/models/heads/pmix_block_V2.py create mode 100644 openmixup/models/losses/__init__.py create mode 100644 openmixup/models/losses/asymmetric_loss.py create mode 100644 openmixup/models/losses/cross_entropy_loss.py create mode 100644 openmixup/models/losses/focal_loss.py create mode 100644 openmixup/models/losses/label_smooth_loss.py create mode 100644 openmixup/models/losses/utils.py create mode 100644 openmixup/models/memories/__init__.py create mode 100644 openmixup/models/memories/odc_memory.py create mode 100644 openmixup/models/memories/simple_memory.py create mode 100644 openmixup/models/necks/__init__.py create mode 100644 openmixup/models/necks/conv_necks.py create mode 100644 openmixup/models/necks/fpn_automix.py create mode 100644 openmixup/models/necks/mlp_necks.py create mode 100644 openmixup/models/registry.py create mode 100644 openmixup/models/selfsup/__init__.py create mode 100644 openmixup/models/selfsup/byol.py create mode 100644 openmixup/models/selfsup/deepcluster.py create mode 100644 openmixup/models/selfsup/moco.py create mode 100644 openmixup/models/selfsup/moco_automix_v2.py create mode 100644 openmixup/models/selfsup/moco_mix.py create mode 100644 openmixup/models/selfsup/npid.py create mode 100644 openmixup/models/selfsup/odc.py create mode 100644 openmixup/models/selfsup/relative_loc.py create mode 100644 openmixup/models/selfsup/rotation_pred.py create mode 100644 openmixup/models/selfsup/simclr.py create mode 100644 openmixup/models/selfsup/simclr_mix.py create mode 100644 openmixup/models/semisup/__init__.py create mode 100644 openmixup/models/semisup/dmixmatch.py create mode 100644 openmixup/models/semisup/fine_tuning.py create mode 100644 openmixup/models/semisup/fixmatch.py create mode 100644 openmixup/models/semisup/mix_tuning.py create mode 100644 openmixup/models/semisup/self_tuning.py create mode 100644 openmixup/models/utils/__init__.py create mode 100644 openmixup/models/utils/accuracy.py create mode 100644 openmixup/models/utils/channel_shuffle.py create mode 100644 openmixup/models/utils/conv_module.py create mode 100644 openmixup/models/utils/conv_ws.py create mode 100644 openmixup/models/utils/fmix.py create mode 100644 openmixup/models/utils/gather_layer.py create mode 100644 openmixup/models/utils/grad_weight.py create mode 100644 openmixup/models/utils/inverted_residual.py create mode 100644 openmixup/models/utils/make_divisible.py create mode 100644 openmixup/models/utils/mixup_input.py create mode 100644 openmixup/models/utils/multi_pooling.py create mode 100644 openmixup/models/utils/norm.py create mode 100644 openmixup/models/utils/scale.py create mode 100644 openmixup/models/utils/se_layer.py create mode 100644 openmixup/models/utils/smoothing.py create mode 100644 openmixup/models/utils/sobel.py create mode 100644 openmixup/models/utils/weight_init.py create mode 100644 openmixup/third_party/clustering.py create mode 100644 openmixup/utils/__init__.py create mode 100644 openmixup/utils/alias_multinomial.py create mode 100644 openmixup/utils/collect.py create mode 100644 openmixup/utils/collect_env.py create mode 100644 openmixup/utils/config_tools.py create mode 100644 openmixup/utils/contextmanagers.py create mode 100644 openmixup/utils/flops_counter.py create mode 100644 openmixup/utils/gather.py create mode 100644 openmixup/utils/logger.py create mode 100644 openmixup/utils/misc.py create mode 100644 openmixup/utils/optimizers.py create mode 100644 openmixup/utils/profiling.py create mode 100644 openmixup/utils/registry.py create mode 100644 requirements.txt create mode 100644 requirements/runtime.txt create mode 100644 requirements/tests.txt create mode 100644 setup.py create mode 100644 tools/auto_train.py create mode 100644 tools/count_parameters.py create mode 100644 tools/dist_extract.sh create mode 100644 tools/dist_test.sh create mode 100644 tools/dist_train.sh create mode 100644 tools/extract.py create mode 100644 tools/kill.sh create mode 100644 tools/model_converters/extract_backbone_weights.py create mode 100644 tools/model_converters/extract_dir_weights.py create mode 100644 tools/model_converters/publish_model.py create mode 100644 tools/model_converters/upgrade_models.py create mode 100644 tools/prepare_data/convert_subset.py create mode 100644 tools/prepare_data/create_voc_data_files.py create mode 100644 tools/prepare_data/create_voc_low_shot_challenge_samples.py create mode 100644 tools/prepare_data/prepare_voc07_cls.sh create mode 100644 tools/single_train.sh create mode 100644 tools/srun_extract.sh create mode 100644 tools/srun_test.sh create mode 100644 tools/srun_train.sh create mode 100644 tools/summary/find_automix_val_median.py create mode 100644 tools/summary/find_classification_val_3times_average.py create mode 100644 tools/summary/find_classification_val_max.py create mode 100644 tools/summary/find_classification_val_median.py create mode 100644 tools/summary/results_summary.py create mode 100644 tools/test.py create mode 100644 tools/train.py create mode 100644 tools/visualization/gradcam.py create mode 100644 tools/visualization/my_dist_analysis.py create mode 100644 tools/visualization/visualize_embedding.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..4bd8619d --- /dev/null +++ b/.gitignore @@ -0,0 +1,136 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +apex/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +openmixup/version.py +version.py +data +.vscode +.idea + +# custom +*.pkl +*.pkl.json +*.log.json +work_dirs/ +tools/exp_bash/ +pretrains + +# Pytorch +*.pth + +*.swp +source.sh +tensorboard.sh +.DS_Store +replace.sh +benchmarks/detection/datasets +benchmarks/detection/output + +# add temp ignore path: +configs/classification +configs/selfsup +configs/semisup +*.json diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..ca9f7cb1 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020-2021 Open-MMLab. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 00000000..a4276bf2 --- /dev/null +++ b/README.md @@ -0,0 +1,109 @@ + +# OpenSelfSup + +**News** +* Downstream tasks now support more methods(Mask RCNN-FPN, RetinaNet, Keypoints RCNN) and more datasets(Cityscapes). +* 'GaussianBlur' is replaced from Opencv to PIL, and MoCo v2 training speed doubles! +(time/iter 0.35s-->0.16s, SimCLR and BYOL are also affected.) +* OpenSelfSup now supports [Mixed Precision Training (apex AMP)](https://github.com/NVIDIA/apex)! +* A bug of MoCo v2 has been fixed and now the results are reproducible. +* OpenSelfSup now supports [BYOL](https://arxiv.org/pdf/2006.07733.pdf)! + +## Introduction + +The master branch works with **PyTorch 1.1** or higher. + +`OpenSelfSup` is an open source unsupervised representation learning toolbox based on PyTorch. + +### What does this repo do? + +Below is the relations among Unsupervised Learning, Self-Supervised Learning and Representation Learning. This repo focuses on the shadow area, i.e., Unsupervised Representation Learning. Self-Supervised Representation Learning is the major branch of it. Since in many cases we do not distingush between Self-Supervised Representation Learning and Unsupervised Representation Learning strictly, we still name this repo as `OpenSelfSup`. + + + +### Major features + +- **All methods in one repository** + + For comprehensive comparison in all benchmarks, refer to [MODEL_ZOO.md](docs/MODEL_ZOO.md). Most of the selfsup pretraining methods are under the `batch_size=256, epochs=200` setting. + + + + + + + + + + + + +
MethodVOC07 SVM (best layer)ImageNet (best layer)
ImageNet87.1776.17
Random30.5416.21
Relative-Loc64.7849.31
Rotation-Pred67.3854.99
DeepCluster74.2657.71
NPID74.5056.61
ODC78.4257.70
MoCo79.1860.60
MoCo v284.2667.69
SimCLR78.9561.57
BYOL (epoch=300)86.5872.35
+ +- **Flexibility & Extensibility** + + `OpenSelfSup` follows a similar code architecture of MMDetection while is even more flexible than MMDetection, since OpenSelfSup integrates various self-supervised tasks including classification, joint clustering and feature learning, contrastive learning, tasks with a memory bank, etc. + + For existing methods in this repo, you only need to modify config files to adjust hyper-parameters. It is also simple to design your own methods, please refer to [GETTING_STARTED.md](docs/GETTING_STARTED.md). + +- **Efficiency** + + All methods support multi-machine multi-gpu distributed training. + +- **Standardized Benchmarks** + + We standardize the benchmarks including logistic regression, SVM / Low-shot SVM from linearly probed features, semi-supervised classification, and object detection. Below are the setting of these benchmarks. + + | Benchmarks | Setting | Remarks | + |----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------| + | ImageNet Linear Classification (Multi) | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Evaluate different layers. | + | ImageNet Linear Classification (Last) | [MoCo](http://openaccess.thecvf.com/content_CVPR_2020/papers/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.pdf) | Evaluate the last layer after global pooling. | + | Places205 Linear Classification | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Evaluate different layers. | + | ImageNet Semi-Sup Classification | + | PASCAL VOC07 SVM | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Costs="1.0,10.0,100.0" to save evaluation time w/o change of results. | + | PASCAL VOC07 Low-shot SVM | [goyal2019scaling](http://openaccess.thecvf.com/content_ICCV_2019/papers/Goyal_Scaling_and_Benchmarking_Self-Supervised_Visual_Representation_Learning_ICCV_2019_paper.pdf) | Costs="1.0,10.0,100.0" to save evaluation time w/o change of results. | + | PASCAL VOC07+12 Object Detection | [MoCo](http://openaccess.thecvf.com/content_CVPR_2020/papers/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.pdf) | | + | COCO17 Object Detection | [MoCo](http://openaccess.thecvf.com/content_CVPR_2020/papers/He_Momentum_Contrast_for_Unsupervised_Visual_Representation_Learning_CVPR_2020_paper.pdf) | | + +## Change Log + +Please refer to [CHANGELOG.md](docs/CHANGELOG.md) for details and release history. + +[2020-10-14] `OpenSelfSup` v0.3.0 is released with some bugs fixed and support of new features. + +[2020-06-26] `OpenSelfSup` v0.2.0 is released with benchmark results and support of new features. + +[2020-06-16] `OpenSelfSup` v0.1.0 is released. + +## Installation + +Please refer to [INSTALL.md](docs/INSTALL.md) for installation and dataset preparation. + +## Get Started + +Please see [GETTING_STARTED.md](docs/GETTING_STARTED.md) for the basic usage of OpenSelfSup. + +## Benchmark and Model Zoo + +Please refer to [MODEL_ZOO.md](docs/MODEL_ZOO.md) for for a comprehensive set of pre-trained models and benchmarks. + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + + +## Acknowledgement + +- This repo borrows the architecture design and part of the code from [MMDetection](https://github.com/open-mmlab/mmdetection). +- The implementation of MoCo and the detection benchmark borrow the code from [moco](https://github.com/facebookresearch/moco). +- The SVM benchmark borrows the code from [ +fair_self_supervision_benchmark](https://github.com/facebookresearch/fair_self_supervision_benchmark). +- `openselfsup/third_party/clustering.py` is borrowed from [deepcluster](https://github.com/facebookresearch/deepcluster/blob/master/clustering.py). + +## Contributors + +We encourage researchers interested in Self-Supervised Learning to contribute to OpenSelfSup. Your contributions, including implementing or transferring new methods to OpenSelfSup, performing experiments, reproducing of results, parameter studies, etc, will be recorded in [MODEL_ZOO.md](docs/MODEL_ZOO.md). For now, the contributors include: Xiaohang Zhan ([@XiaohangZhan](http://github.com/XiaohangZhan)), Jiahao Xie ([@Jiahao000](https://github.com/Jiahao000)), Enze Xie ([@xieenze](https://github.com/xieenze)), Xiangxiang Chu ([@cxxgtxy](https://github.com/cxxgtxy)), Zijian He ([@scnuhealthy](https://github.com/scnuhealthy)). + +## Contact + +This repo is currently maintained by Xiaohang Zhan ([@XiaohangZhan](http://github.com/XiaohangZhan)), Jiahao Xie ([@Jiahao000](https://github.com/Jiahao000)) and Enze Xie ([@xieenze](https://github.com/xieenze)). diff --git a/benchmarks/detection/README.md b/benchmarks/detection/README.md new file mode 100644 index 00000000..caeb7ae3 --- /dev/null +++ b/benchmarks/detection/README.md @@ -0,0 +1,12 @@ + +## Transferring to Detection + +We follow the evaluation setting in MoCo when trasferring to object detection. + +### Instruction + +1. Install [detectron2](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). + +1. Put dataset under "benchmarks/detection/datasets" directory, + following the [directory structure](https://github.com/facebookresearch/detectron2/tree/master/datasets) + requried by detectron2. diff --git a/benchmarks/detection/configs/Base-Keypoint-RCNN-FPN.yaml b/benchmarks/detection/configs/Base-Keypoint-RCNN-FPN.yaml new file mode 100644 index 00000000..7cbf5eec --- /dev/null +++ b/benchmarks/detection/configs/Base-Keypoint-RCNN-FPN.yaml @@ -0,0 +1,15 @@ +_BASE_: "Base-RCNN-FPN.yaml" +MODEL: + KEYPOINT_ON: True + ROI_HEADS: + NUM_CLASSES: 1 + ROI_BOX_HEAD: + SMOOTH_L1_BETA: 0.5 # Keypoint AP degrades (though box AP improves) when using plain L1 loss + RPN: + # Detectron1 uses 2000 proposals per-batch, but this option is per-image in detectron2. + # 1000 proposals per-image is found to hurt box AP. + # Therefore we increase it to 1500 per-image. + POST_NMS_TOPK_TRAIN: 1500 +DATASETS: + TRAIN: ("keypoints_coco_2017_train",) + TEST: ("keypoints_coco_2017_val",) diff --git a/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml b/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml new file mode 100644 index 00000000..5104c6a6 --- /dev/null +++ b/benchmarks/detection/configs/Base-RCNN-C4-BN.yaml @@ -0,0 +1,17 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + RPN: + PRE_NMS_TOPK_TEST: 6000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "Res5ROIHeadsExtraNorm" + BACKBONE: + FREEZE_AT: 0 + RESNETS: + NORM: "SyncBN" +TEST: + PRECISE_BN: + ENABLED: True +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 diff --git a/benchmarks/detection/configs/Base-RCNN-FPN.yaml b/benchmarks/detection/configs/Base-RCNN-FPN.yaml new file mode 100644 index 00000000..d40fe5ef --- /dev/null +++ b/benchmarks/detection/configs/Base-RCNN-FPN.yaml @@ -0,0 +1,42 @@ +MODEL: + META_ARCHITECTURE: "GeneralizedRCNN" + BACKBONE: + NAME: "build_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + FPN: + IN_FEATURES: ["res2", "res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map + ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps) + RPN: + IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"] + PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level + PRE_NMS_TOPK_TEST: 1000 # Per FPN level + # Detectron1 uses 2000 proposals per-batch, + # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue) + # which is approximately 1000 proposals per-image since the default batch size for FPN is 2. + POST_NMS_TOPK_TRAIN: 1000 + POST_NMS_TOPK_TEST: 1000 + ROI_HEADS: + NAME: "StandardROIHeads" + IN_FEATURES: ["p2", "p3", "p4", "p5"] + ROI_BOX_HEAD: + NAME: "FastRCNNConvFCHead" + NUM_FC: 2 + POOLER_RESOLUTION: 7 + ROI_MASK_HEAD: + NAME: "MaskRCNNConvUpsampleHead" + NUM_CONV: 4 + POOLER_RESOLUTION: 14 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.02 + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 \ No newline at end of file diff --git a/benchmarks/detection/configs/Base-RetinaNet.yaml b/benchmarks/detection/configs/Base-RetinaNet.yaml new file mode 100644 index 00000000..95ee124f --- /dev/null +++ b/benchmarks/detection/configs/Base-RetinaNet.yaml @@ -0,0 +1,25 @@ +MODEL: + META_ARCHITECTURE: "RetinaNet" + BACKBONE: + NAME: "build_retinanet_resnet_fpn_backbone" + RESNETS: + OUT_FEATURES: ["res3", "res4", "res5"] + ANCHOR_GENERATOR: + SIZES: !!python/object/apply:eval ["[[x, x * 2**(1.0/3), x * 2**(2.0/3) ] for x in [32, 64, 128, 256, 512 ]]"] + FPN: + IN_FEATURES: ["res3", "res4", "res5"] + RETINANET: + IOU_THRESHOLDS: [0.4, 0.5] + IOU_LABELS: [0, -1, 1] + SMOOTH_L1_LOSS_BETA: 0.0 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 16 + BASE_LR: 0.01 # Note that RetinaNet uses a different default learning rate + STEPS: (60000, 80000) + MAX_ITER: 90000 +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) +VERSION: 2 \ No newline at end of file diff --git a/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml b/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml new file mode 100644 index 00000000..8e74dfbc --- /dev/null +++ b/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN.yaml @@ -0,0 +1,30 @@ +_BASE_: "../Base-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + MASK_ON: True + ROI_HEADS: + NUM_CLASSES: 8 + BACKBONE: + FREEZE_AT: 0 + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + FPN: + NORM: "SyncBN" +INPUT: + MIN_SIZE_TRAIN: (800, 832, 864, 896, 928, 960, 992, 1024) + MIN_SIZE_TRAIN_SAMPLING: "choice" + MIN_SIZE_TEST: 1024 + MAX_SIZE_TRAIN: 2048 + MAX_SIZE_TEST: 2048 +DATASETS: + TRAIN: ("cityscapes_fine_instance_seg_train",) + TEST: ("cityscapes_fine_instance_seg_val",) +SOLVER: + BASE_LR: 0.01 + STEPS: (18000,) + MAX_ITER: 24000 + IMS_PER_BATCH: 8 +TEST: + PRECISE_BN: + ENABLED: True \ No newline at end of file diff --git a/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN_moco.yaml b/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN_moco.yaml new file mode 100644 index 00000000..52ad1fb1 --- /dev/null +++ b/benchmarks/detection/configs/Cityscapes/mask_rcnn_R_50_FPN_moco.yaml @@ -0,0 +1,9 @@ +_BASE_: "mask_rcnn_R_50_FPN.yaml" +MODEL: + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + WEIGHTS: "See Instructions" + RESNETS: + STRIDE_IN_1X1: False +INPUT: + FORMAT: "RGB" \ No newline at end of file diff --git a/benchmarks/detection/configs/coco_R_50_C4_1x.yaml b/benchmarks/detection/configs/coco_R_50_C4_1x.yaml new file mode 100644 index 00000000..e0826b40 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_C4_1x.yaml @@ -0,0 +1,4 @@ +_BASE_: "coco_R_50_C4_2x.yaml" +SOLVER: + STEPS: (60000, 80000) + MAX_ITER: 90000 diff --git a/benchmarks/detection/configs/coco_R_50_C4_1x_moco.yaml b/benchmarks/detection/configs/coco_R_50_C4_1x_moco.yaml new file mode 100644 index 00000000..98524d0b --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_C4_1x_moco.yaml @@ -0,0 +1,4 @@ +_BASE_: "coco_R_50_C4_2x_moco.yaml" +SOLVER: + STEPS: (60000, 80000) + MAX_ITER: 90000 diff --git a/benchmarks/detection/configs/coco_R_50_C4_2x.yaml b/benchmarks/detection/configs/coco_R_50_C4_2x.yaml new file mode 100644 index 00000000..5b7e4240 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_C4_2x.yaml @@ -0,0 +1,13 @@ +_BASE_: "Base-RCNN-C4-BN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" +INPUT: + MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 diff --git a/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml b/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml new file mode 100644 index 00000000..8e310683 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml @@ -0,0 +1,10 @@ +_BASE_: "coco_R_50_C4_2x.yaml" +MODEL: + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + WEIGHTS: "See Instructions" + RESNETS: + STRIDE_IN_1X1: False +INPUT: + MAX_SIZE_TRAIN: 1200 + FORMAT: "RGB" diff --git a/benchmarks/detection/configs/coco_R_50_FPN_1x.yaml b/benchmarks/detection/configs/coco_R_50_FPN_1x.yaml new file mode 100644 index 00000000..142319a7 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_FPN_1x.yaml @@ -0,0 +1,17 @@ +_BASE_: "Base-RCNN-FPN.yaml" +MODEL: + MASK_ON: True + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + BACKBONE: + FREEZE_AT: 0 + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + FPN: + NORM: "SyncBN" +TEST: + PRECISE_BN: + ENABLED: True +SOLVER: + STEPS: (60000, 80000) + MAX_ITER: 90000 \ No newline at end of file diff --git a/benchmarks/detection/configs/coco_R_50_FPN_1x_moco.yaml b/benchmarks/detection/configs/coco_R_50_FPN_1x_moco.yaml new file mode 100644 index 00000000..c341eab4 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_FPN_1x_moco.yaml @@ -0,0 +1,9 @@ +_BASE_: "coco_R_50_FPN_1x.yaml" +MODEL: + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + WEIGHTS: "See Instructions" + RESNETS: + STRIDE_IN_1X1: False +INPUT: + FORMAT: "RGB" \ No newline at end of file diff --git a/benchmarks/detection/configs/coco_R_50_FPN_2x.yaml b/benchmarks/detection/configs/coco_R_50_FPN_2x.yaml new file mode 100644 index 00000000..483789f3 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_FPN_2x.yaml @@ -0,0 +1,4 @@ +_BASE_: "coco_R_50_FPN_1x.yaml" +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 \ No newline at end of file diff --git a/benchmarks/detection/configs/coco_R_50_FPN_2x_moco.yaml b/benchmarks/detection/configs/coco_R_50_FPN_2x_moco.yaml new file mode 100644 index 00000000..875c2a73 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_FPN_2x_moco.yaml @@ -0,0 +1,4 @@ +_BASE_: "coco_R_50_FPN_1x_moco.yaml" +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 \ No newline at end of file diff --git a/benchmarks/detection/configs/coco_R_50_RetinaNet_1x.yaml b/benchmarks/detection/configs/coco_R_50_RetinaNet_1x.yaml new file mode 100644 index 00000000..52c63ba1 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_RetinaNet_1x.yaml @@ -0,0 +1,13 @@ +_BASE_: "Base-RetinaNet.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + BACKBONE: + FREEZE_AT: 0 + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + FPN: + NORM: "SyncBN" +TEST: + PRECISE_BN: + ENABLED: True \ No newline at end of file diff --git a/benchmarks/detection/configs/coco_R_50_RetinaNet_1x_moco.yaml b/benchmarks/detection/configs/coco_R_50_RetinaNet_1x_moco.yaml new file mode 100644 index 00000000..2fdff1a5 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_RetinaNet_1x_moco.yaml @@ -0,0 +1,9 @@ +_BASE_: "coco_R_50_RetinaNet_1x.yaml" +MODEL: + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + WEIGHTS: "See Instructions" + RESNETS: + STRIDE_IN_1X1: False +INPUT: + FORMAT: "RGB" \ No newline at end of file diff --git a/benchmarks/detection/configs/coco_R_50_RetinaNet_2x.yaml b/benchmarks/detection/configs/coco_R_50_RetinaNet_2x.yaml new file mode 100644 index 00000000..150a607e --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_RetinaNet_2x.yaml @@ -0,0 +1,4 @@ +_BASE_: "coco_R_50_RetinaNet_1x.yaml" +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 \ No newline at end of file diff --git a/benchmarks/detection/configs/coco_R_50_RetinaNet_2x_moco.yaml b/benchmarks/detection/configs/coco_R_50_RetinaNet_2x_moco.yaml new file mode 100644 index 00000000..b1faa8c4 --- /dev/null +++ b/benchmarks/detection/configs/coco_R_50_RetinaNet_2x_moco.yaml @@ -0,0 +1,4 @@ +_BASE_: "coco_R_50_RetinaNet_1x_moco.yaml" +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 \ No newline at end of file diff --git a/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x.yaml b/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x.yaml new file mode 100644 index 00000000..1d60b437 --- /dev/null +++ b/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-Keypoint-RCNN-FPN.yaml" +MODEL: + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + BACKBONE: + FREEZE_AT: 0 + RESNETS: + DEPTH: 50 + NORM: "SyncBN" + FPN: + NORM: "SyncBN" +TEST: + PRECISE_BN: + ENABLED: True +SOLVER: + STEPS: (120000, 160000) + MAX_ITER: 180000 \ No newline at end of file diff --git a/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x_moco.yaml b/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x_moco.yaml new file mode 100644 index 00000000..7dbcfbb9 --- /dev/null +++ b/benchmarks/detection/configs/keypoint_rcnn_R_50_FPN_2x_moco.yaml @@ -0,0 +1,9 @@ +_BASE_: "keypoint_rcnn_R_50_FPN_2x.yaml" +MODEL: + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + WEIGHTS: "See Instructions" + RESNETS: + STRIDE_IN_1X1: False +INPUT: + FORMAT: "RGB" \ No newline at end of file diff --git a/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml new file mode 100644 index 00000000..a05eb5e2 --- /dev/null +++ b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k.yaml @@ -0,0 +1,16 @@ +_BASE_: "Base-RCNN-C4-BN.yaml" +MODEL: + MASK_ON: False + WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl" + ROI_HEADS: + NUM_CLASSES: 20 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + MIN_SIZE_TEST: 800 +DATASETS: + TRAIN: ('voc_2007_trainval', 'voc_2012_trainval') + TEST: ('voc_2007_test',) +SOLVER: + STEPS: (18000, 22000) + MAX_ITER: 24000 + WARMUP_ITERS: 100 diff --git a/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml new file mode 100644 index 00000000..eebe6905 --- /dev/null +++ b/benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml @@ -0,0 +1,9 @@ +_BASE_: "pascal_voc_R_50_C4_24k.yaml" +MODEL: + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + WEIGHTS: "See Instructions" + RESNETS: + STRIDE_IN_1X1: False +INPUT: + FORMAT: "RGB" diff --git a/benchmarks/detection/convert-pretrain-to-detectron2.py b/benchmarks/detection/convert-pretrain-to-detectron2.py new file mode 100644 index 00000000..e8bf5434 --- /dev/null +++ b/benchmarks/detection/convert-pretrain-to-detectron2.py @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import pickle as pkl +import sys +import torch + +if __name__ == "__main__": + input = sys.argv[1] + + obj = torch.load(input, map_location="cpu") + obj = obj["state_dict"] + + newmodel = {} + for k, v in obj.items(): + old_k = k + if "layer" not in k: + k = "stem." + k + for t in [1, 2, 3, 4]: + k = k.replace("layer{}".format(t), "res{}".format(t + 1)) + for t in [1, 2, 3]: + k = k.replace("bn{}".format(t), "conv{}.norm".format(t)) + k = k.replace("downsample.0", "shortcut") + k = k.replace("downsample.1", "shortcut.norm") + print(old_k, "->", k) + newmodel[k] = v.numpy() + + res = { + "model": newmodel, + "__author__": "OpenSelfSup", + "matching_heuristics": True + } + + assert sys.argv[2].endswith('.pkl') + with open(sys.argv[2], "wb") as f: + pkl.dump(res, f) diff --git a/benchmarks/detection/run.sh b/benchmarks/detection/run.sh new file mode 100644 index 00000000..2b35e59d --- /dev/null +++ b/benchmarks/detection/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +DET_CFG=$1 +WEIGHTS=$2 + +python $(dirname "$0")/train_net.py --config-file $DET_CFG \ + --num-gpus 8 MODEL.WEIGHTS $WEIGHTS diff --git a/benchmarks/detection/train_net.py b/benchmarks/detection/train_net.py new file mode 100644 index 00000000..8ae31c9e --- /dev/null +++ b/benchmarks/detection/train_net.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + +import os + +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator, PascalVOCDetectionEvaluator +from detectron2.layers import get_norm +from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, Res5ROIHeads + + +@ROI_HEADS_REGISTRY.register() +class Res5ROIHeadsExtraNorm(Res5ROIHeads): + """ + As described in the MOCO paper, there is an extra BN layer + following the res5 stage. + """ + + def _build_res5_block(self, cfg): + seq, out_channels = super()._build_res5_block(cfg) + norm = cfg.MODEL.RESNETS.NORM + norm = get_norm(norm, out_channels) + seq.add_module("norm", norm) + return seq, out_channels + + +class Trainer(DefaultTrainer): + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + if "coco" in dataset_name: + return COCOEvaluator(dataset_name, cfg, True, output_folder) + else: + assert "voc" in dataset_name + return PascalVOCDetectionEvaluator(dataset_name) + + +def setup(args): + cfg = get_cfg() + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer( + model, save_dir=cfg.OUTPUT_DIR).resume_or_load( + cfg.MODEL.WEIGHTS, resume=args.resume) + res = Trainer.test(cfg, model) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args, ), + ) diff --git a/benchmarks/dist_test_svm_epoch.sh b/benchmarks/dist_test_svm_epoch.sh new file mode 100644 index 00000000..216229f0 --- /dev/null +++ b/benchmarks/dist_test_svm_epoch.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -e +set -x + +CFG=$1 +EPOCH=$2 +FEAT_LIST=$3 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5" +GPUS=${4:-8} +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ + +if [ "$CFG" == "" ] || [ "$EPOCH" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + +if [ ! -f $WORK_DIR/epoch_${EPOCH}.pth ]; then + echo "ERROR: File not exist: $WORK_DIR/epoch_${EPOCH}.pth" + exit +fi + +mkdir -p $WORK_DIR/logs +echo "Testing checkpoint: $WORK_DIR/epoch_${EPOCH}.pth" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + +bash tools/dist_extract.sh $CFG $GPUS $WORK_DIR --checkpoint $WORK_DIR/epoch_${EPOCH}.pth + +bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR "$FEAT_LIST" + +bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR "$FEAT_LIST" diff --git a/benchmarks/dist_test_svm_pretrain.sh b/benchmarks/dist_test_svm_pretrain.sh new file mode 100644 index 00000000..5297899b --- /dev/null +++ b/benchmarks/dist_test_svm_pretrain.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -e +set -x + +CFG=$1 +PRETRAIN=$2 # pretrained model or "random" (random init) +FEAT_LIST=$3 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5" +GPUS=${4:-8} +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + +if [ ! -f $PRETRAIN ] && [ "$PRETRAIN" != "random" ]; then + echo "ERROR: PRETRAIN should be a file or a string \"random\", got: $PRETRAIN" + exit +fi + +mkdir -p $WORK_DIR/logs +echo "Testing pretrain: $PRETRAIN" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + +bash tools/dist_extract.sh $CFG $GPUS $WORK_DIR --pretrained $PRETRAIN + +bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR "$FEAT_LIST" + +bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR "$FEAT_LIST" diff --git a/benchmarks/dist_train_linear.sh b/benchmarks/dist_train_linear.sh new file mode 100644 index 00000000..ce1c4224 --- /dev/null +++ b/benchmarks/dist_train_linear.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e +set -x + +CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/" +PRETRAIN=$2 +PY_ARGS=${@:3} # --resume_from --deterministic +GPUS=8 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256. +PORT=${PORT:-29500} + +if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +# train +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/train.py \ + $CFG \ + --pretrained $PRETRAIN \ + --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS} diff --git a/benchmarks/dist_train_linear_1gpu.sh b/benchmarks/dist_train_linear_1gpu.sh new file mode 100644 index 00000000..51244e11 --- /dev/null +++ b/benchmarks/dist_train_linear_1gpu.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e +set -x + +CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/" +PRETRAIN=$2 +PY_ARGS=${@:3} # --resume_from --deterministic +GPUS=1 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256. +PORT=${PORT:-29500} + +if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +# train +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/train.py \ + $CFG \ + --pretrained $PRETRAIN \ + --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS} diff --git a/benchmarks/dist_train_linear_1gpu_sd.sh b/benchmarks/dist_train_linear_1gpu_sd.sh new file mode 100644 index 00000000..eee380bf --- /dev/null +++ b/benchmarks/dist_train_linear_1gpu_sd.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e +set -x + +CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/" +PRETRAIN=$2 +SD=$3 # random seed +PY_ARGS=${@:4} # --resume_from --deterministic +GPUS=1 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256. +PORT=${PORT:-29500} + +if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + + +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + + +# train +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/train.py \ + $CFG \ + --pretrained $PRETRAIN \ + --work_dir $WORK_DIR --seed $SD --launcher="pytorch" ${PY_ARGS} diff --git a/benchmarks/dist_train_linear_2gpu.sh b/benchmarks/dist_train_linear_2gpu.sh new file mode 100644 index 00000000..36131104 --- /dev/null +++ b/benchmarks/dist_train_linear_2gpu.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e +set -x + +CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/" +PRETRAIN=$2 +PY_ARGS=${@:3} # --resume_from --deterministic +GPUS=2 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256. +PORT=${PORT:-29500} + +if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +# train +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/train.py \ + $CFG \ + --pretrained $PRETRAIN \ + --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS} diff --git a/benchmarks/dist_train_linear_4gpu.sh b/benchmarks/dist_train_linear_4gpu.sh new file mode 100644 index 00000000..9f176d2c --- /dev/null +++ b/benchmarks/dist_train_linear_4gpu.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e +set -x + +CFG=$1 # use cfgs under "configs/benchmarks/linear_classification/" +PRETRAIN=$2 +PY_ARGS=${@:3} # --resume_from --deterministic +GPUS=4 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256. +PORT=${PORT:-29500} + +if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +# train +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/train.py \ + $CFG \ + --pretrained $PRETRAIN \ + --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS} diff --git a/benchmarks/dist_train_semi.sh b/benchmarks/dist_train_semi.sh new file mode 100644 index 00000000..b6d7e37b --- /dev/null +++ b/benchmarks/dist_train_semi.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -e +set -x + +CFG=$1 # use cfgs under "configs/benchmarks/semi_classification/imagenet_*percent/" +PRETRAIN=$2 +PY_ARGS=${@:3} +GPUS=4 # in the standard setting, GPUS=4 +PORT=${PORT:-29500} + +if [ "$CFG" == "" ] || [ "$PRETRAIN" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +# train +python -m torch.distributed.launch --nproc_per_node=$GPUS --master_port=$PORT \ + tools/train.py \ + $CFG \ + --pretrained $PRETRAIN \ + --work_dir $WORK_DIR --seed 0 --launcher="pytorch" ${PY_ARGS} diff --git a/benchmarks/extract_info/voc07.py b/benchmarks/extract_info/voc07.py new file mode 100644 index 00000000..2680b198 --- /dev/null +++ b/benchmarks/extract_info/voc07.py @@ -0,0 +1,20 @@ +data_source_cfg = dict(type='ImageList', memcached=False, mclient_path=None) +data_root = "data/VOCdevkit/VOC2007/JPEGImages" +data_all_list = "data/VOCdevkit/VOC2007/Lists/trainvaltest.txt" +split_at = [5011] +split_name = ['voc07_trainval', 'voc07_test'] +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + +data = dict( + imgs_per_gpu=32, + workers_per_gpu=2, + extract=dict( + type="ExtractDataset", + data_source=dict( + list_file=data_all_list, root=data_root, **data_source_cfg), + pipeline=[ + dict(type='Resize', size=256), + dict(type='Resize', size=(224, 224)), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), + ])) diff --git a/benchmarks/srun_test_svm_epoch.sh b/benchmarks/srun_test_svm_epoch.sh new file mode 100644 index 00000000..33f41262 --- /dev/null +++ b/benchmarks/srun_test_svm_epoch.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +set -e +set -x + +PARTITION=$1 +CFG=$2 +EPOCH=$3 +FEAT_LIST=$4 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5" +GPUS=${5:-8} +WORK_DIR=$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/ + +if [ ! -f $WORK_DIR/epoch_${EPOCH}.pth ]; then + echo "ERROR: File not exist: $WORK_DIR/epoch_${EPOCH}.pth" + exit +fi + +mkdir -p $WORK_DIR/logs +echo "Testing checkpoint: $WORK_DIR/epoch_${EPOCH}.pth" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + +bash tools/srun_extract.sh $PARTITION $CFG $GPUS $WORK_DIR --checkpoint $WORK_DIR/epoch_${EPOCH}.pth + +srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR "$FEAT_LIST" + +srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR "$FEAT_LIST" diff --git a/benchmarks/srun_test_svm_pretrain.sh b/benchmarks/srun_test_svm_pretrain.sh new file mode 100644 index 00000000..a475138b --- /dev/null +++ b/benchmarks/srun_test_svm_pretrain.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +set -e +set -x + +PARTITION=$1 +CFG=$2 +PRETRAIN=$3 # pretrained model or "random" (random init) +FEAT_LIST=$4 # e.g.: "feat5", "feat4 feat5". If leave empty, the default is "feat5" +GPUS=${5:-8} +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +if [ ! -f $PRETRAIN ] and [ "$PRETRAIN" != "random" ]; then + echo "ERROR: PRETRAIN should be a file or a string \"random\", got: $PRETRAIN" + exit +fi + +mkdir -p $WORK_DIR/logs +echo "Testing pretrain: $PRETRAIN" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + +bash tools/srun_extract.sh $PARTITION $CFG $GPUS $WORK_DIR --pretrained $PRETRAIN + +srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_full.sh $WORK_DIR "$FEAT_LIST" + +srun -p $PARTITION bash benchmarks/svm_tools/eval_svm_lowshot.sh $WORK_DIR "$FEAT_LIST" diff --git a/benchmarks/srun_train_linear.sh b/benchmarks/srun_train_linear.sh new file mode 100644 index 00000000..4f857b58 --- /dev/null +++ b/benchmarks/srun_train_linear.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -e +set -x + +PARTITION=$1 +CFG=$2 +PRETRAIN=$3 +PY_ARGS=${@:4} +JOB_NAME="openselfsup" +GPUS=8 # When changing GPUS, please also change imgs_per_gpu in the config file accordingly to ensure the total batch size is 256. +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} + +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +# train +GLOG_vmodule=MemcachedClient=-1 \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py \ + $CFG \ + --pretrained $PRETRAIN \ + --work_dir $WORK_DIR --seed 0 --launcher="slurm" ${PY_ARGS} diff --git a/benchmarks/srun_train_semi.sh b/benchmarks/srun_train_semi.sh new file mode 100644 index 00000000..0aa8f022 --- /dev/null +++ b/benchmarks/srun_train_semi.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -e +set -x + +PARTITION=$1 +CFG=$2 +PRETRAIN=$3 +PY_ARGS=${@:4} +JOB_NAME="openselfsup" +GPUS=4 # in the standard setting, GPUS=4 +GPUS_PER_NODE=${GPUS_PER_NODE:-4} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} + +WORK_DIR="$(echo ${CFG%.*} | sed -e "s/configs/work_dirs/g")/$(echo $PRETRAIN | rev | cut -d/ -f 1 | rev)" + +# train +GLOG_vmodule=MemcachedClient=-1 \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py \ + $CFG \ + --pretrained $PRETRAIN \ + --work_dir $WORK_DIR --seed 0 --launcher="slurm" ${PY_ARGS} diff --git a/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py b/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py new file mode 100644 index 00000000..34f59377 --- /dev/null +++ b/benchmarks/svm_tools/aggregate_low_shot_svm_stats.py @@ -0,0 +1,127 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +Aggregate the stats over various independent samples for low-shot svm training. +Stats computed: mean, max, min, std + +Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low +shot samples. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import logging +import numpy as np +import os +import sys + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def save_stats(output_dir, stat, output): + out_file = os.path.join(output_dir, 'test_ap_{}.npy'.format(stat)) + #logger.info('Saving {} to: {} {}'.format(stat, out_file, output.shape)) + np.save(out_file, output) + + +def aggregate_stats(opts): + k_values = [int(val) for val in opts.k_values.split(",")] + sample_inds = [int(val) for val in opts.sample_inds.split(",")] + #logger.info( + # 'Aggregating stats for k-values: {} and sample_inds: {}'.format( + # k_values, sample_inds)) + + output_mean, output_max, output_min, output_std = [], [], [], [] + for k_idx in range(len(k_values)): + k_low = k_values[k_idx] + k_val_output = [] + for inds in range(len(sample_inds)): + sample_idx = sample_inds[inds] + file_name = 'test_ap_sample{}_k{}.npy'.format( + sample_idx + 1, k_low) + filepath = os.path.join(opts.output_path, file_name) + if os.path.exists(filepath): + k_val_output.append(np.load(filepath, encoding='latin1')) + else: + logger.info('file does not exist: {}'.format(filepath)) + k_val_output = np.concatenate(k_val_output, axis=0) + k_low_max = np.max( + k_val_output, axis=0).reshape(-1, k_val_output.shape[1]) + k_low_min = np.min( + k_val_output, axis=0).reshape(-1, k_val_output.shape[1]) + k_low_mean = np.mean( + k_val_output, axis=0).reshape(-1, k_val_output.shape[1]) + k_low_std = np.std( + k_val_output, axis=0).reshape(-1, k_val_output.shape[1]) + output_mean.append(k_low_mean) + output_min.append(k_low_min) + output_max.append(k_low_max) + output_std.append(k_low_std) + + output_mean = np.concatenate(output_mean, axis=0) + output_min = np.concatenate(output_min, axis=0) + output_max = np.concatenate(output_max, axis=0) + output_std = np.concatenate(output_std, axis=0) + + save_stats(opts.output_path, 'mean', output_mean) + save_stats(opts.output_path, 'min', output_min) + save_stats(opts.output_path, 'max', output_max) + save_stats(opts.output_path, 'std', output_std) + + argmax_cls = np.argmax(output_mean, axis=1) + argmax_mean, argmax_min, argmax_max, argmax_std = [], [], [], [] + for idx in range(len(argmax_cls)): + argmax_mean.append(100.0 * output_mean[idx, argmax_cls[idx]]) + argmax_min.append(100.0 * output_min[idx, argmax_cls[idx]]) + argmax_max.append(100.0 * output_max[idx, argmax_cls[idx]]) + argmax_std.append(100.0 * output_std[idx, argmax_cls[idx]]) + for idx in range(len(argmax_max)): + logger.info('mean/min/max/std: {} / {} / {} / {}'.format( + round(argmax_mean[idx], 2), + round(argmax_min[idx], 2), + round(argmax_max[idx], 2), + round(argmax_std[idx], 2), + )) + #logger.info('All done!!') + + +def main(): + parser = argparse.ArgumentParser(description='Low shot SVM model test') + parser.add_argument( + '--output_path', + type=str, + default=None, + help="Numpy file containing test AP result files") + parser.add_argument( + '--k_values', + type=str, + default=None, + help="Low-shot k-values for svm testing. Comma separated") + parser.add_argument( + '--sample_inds', + type=str, + default=None, + help="sample_inds for which to test svm. Comma separated") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + #logger.info(opts) + aggregate_stats(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/eval_svm_full.sh b/benchmarks/svm_tools/eval_svm_full.sh new file mode 100644 index 00000000..4fbbd26e --- /dev/null +++ b/benchmarks/svm_tools/eval_svm_full.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -x +set -e + +WORK_DIR=$1 +FEAT_LIST=${2:-"feat5"} # "feat1 feat2 feat3 feat4 feat5" +TRAIN_SVM_FLAG=true +TEST_SVM_FLAG=true +DATA="data/VOCdevkit/VOC2007/SVMLabels" + +# config svm +costs="1.0,10.0,100.0" + +for feat in $FEAT_LIST; do + echo "For feature: $feat" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + # train svm + if $TRAIN_SVM_FLAG; then + rm -rf $WORK_DIR/svm + mkdir -p $WORK_DIR/svm/voc07_${feat} + echo "training svm ..." + python benchmarks/svm_tools/train_svm_kfold_parallel.py \ + --data_file $WORK_DIR/features/voc07_trainval_${feat}.npy \ + --targets_data_file $DATA/train_labels.npy \ + --costs_list $costs \ + --output_path $WORK_DIR/svm/voc07_${feat} + fi + + # test svm + if $TEST_SVM_FLAG; then + echo "testing svm ..." + python benchmarks/svm_tools/test_svm.py \ + --data_file $WORK_DIR/features/voc07_test_${feat}.npy \ + --json_targets $DATA/test_targets.json \ + --targets_data_file $DATA/test_labels.npy \ + --costs_list $costs \ + --generate_json 1 \ + --output_path $WORK_DIR/svm/voc07_${feat} 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + fi + +done diff --git a/benchmarks/svm_tools/eval_svm_lowshot.sh b/benchmarks/svm_tools/eval_svm_lowshot.sh new file mode 100644 index 00000000..ae85b126 --- /dev/null +++ b/benchmarks/svm_tools/eval_svm_lowshot.sh @@ -0,0 +1,64 @@ +#!/bin/bash +set -x +set -e + +WORK_DIR=$1 +MODE="full" +FEAT_LIST=${2:-"feat5"} # "feat1 feat2 feat3 feat4 feat5" +TRAIN_SVM_LOWSHOT_FLAG=true +TEST_SVM_LOWSHOT_FLAG=true +AGGREGATE_FLAG=true +DATA="data/VOCdevkit/VOC2007/SVMLabels" + +# config svm +costs="1.0,10.0,100.0" +if [ "$MODE" == "fast" ]; then + shots="96" +else + shots="1 2 4 8 16 32 64 96" +fi + +for feat in $FEAT_LIST; do + echo "For feature: $feat" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + # train lowshot svm + if $TRAIN_SVM_LOWSHOT_FLAG; then + rm -rf $WORK_DIR/svm_lowshot + mkdir -p $WORK_DIR/svm_lowshot/voc07_${feat} + echo "training svm low-shot ..." + for s in {1..5}; do + for k in $shots; do + echo -e "\ts${s} k${k}" + python benchmarks/svm_tools/train_svm_low_shot.py \ + --data_file $WORK_DIR/features/voc07_trainval_${feat}.npy \ + --targets_data_file $DATA/low_shot/labels/train_targets_sample${s}_k${k}.npy \ + --costs_list $costs \ + --output_path $WORK_DIR/svm_lowshot/voc07_${feat} + done + done + fi + + # test lowshot svm + if $TEST_SVM_LOWSHOT_FLAG; then + echo "testing svm low-shot ..." + python benchmarks/svm_tools/test_svm_low_shot.py \ + --data_file $WORK_DIR/features/voc07_test_${feat}.npy \ + --targets_data_file $DATA/test_labels.npy \ + --json_targets $DATA/test_targets.json \ + --generate_json 1 \ + --costs_list $costs \ + --output_path $WORK_DIR/svm_lowshot/voc07_${feat} \ + --k_values "${shots// /,}" \ + --sample_inds "0,1,2,3,4" \ + --dataset "voc" + fi + + # aggregate testing results + if $AGGREGATE_FLAG; then + echo "aggregating svm low-shot ..." + python benchmarks/svm_tools/aggregate_low_shot_svm_stats.py \ + --output_path $WORK_DIR/svm_lowshot/voc07_${feat} \ + --k_values "${shots// /,}" \ + --sample_inds "0,1,2,3,4" 2>&1 | tee -a $WORK_DIR/logs/eval_svm.log + fi + +done diff --git a/benchmarks/svm_tools/svm_helper.py b/benchmarks/svm_tools/svm_helper.py new file mode 100644 index 00000000..9fe14470 --- /dev/null +++ b/benchmarks/svm_tools/svm_helper.py @@ -0,0 +1,171 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +Helper module for svm training and testing. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import logging +import numpy as np +import os +import sys + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +# Python 2 and python 3 have different floating point precision. The following +# trick helps keep the backwards compatibility. +def py2_py3_compatible_cost(cost): + return str(float("{:.17f}".format(cost))) + + +def get_svm_train_output_files(cls, cost, output_path): + cls_cost = str(cls) + '_cost' + py2_py3_compatible_cost(cost) + out_file = os.path.join(output_path, 'cls' + cls_cost + '.pickle') + ap_matrix_out_file = os.path.join(output_path, + 'AP_cls' + cls_cost + '.npy') + return out_file, ap_matrix_out_file + + +def parse_cost_list(costs): + costs_list = [float(cost) for cost in costs.split(",")] + start_num, end_num = 4, 20 + for num in range(start_num, end_num): + costs_list.append(0.5**num) + return costs_list + + +def normalize_features(features): + feats_norm = np.linalg.norm(features, axis=1) + features = features / (feats_norm + 1e-5)[:, np.newaxis] + return features + + +def load_input_data(data_file, targets_file): + # load the features and the targets + #logger.info('loading features and targets...') + targets = np.load(targets_file, encoding='latin1') + features = np.array(np.load(data_file, + encoding='latin1')).astype(np.float64) + assert features.shape[0] == targets.shape[0], "Mismatched #images" + #logger.info('Loaded features: {} and targets: {}'.format( + # features.shape, targets.shape)) + return features, targets + + +def calculate_ap(rec, prec): + """ + Computes the AP under the precision recall curve. + """ + rec, prec = rec.reshape(rec.size, 1), prec.reshape(prec.size, 1) + z, o = np.zeros((1, 1)), np.ones((1, 1)) + mrec, mpre = np.vstack((z, rec, o)), np.vstack((z, prec, z)) + for i in range(len(mpre) - 2, -1, -1): + mpre[i] = max(mpre[i], mpre[i + 1]) + + indices = np.where(mrec[1:] != mrec[0:-1])[0] + 1 + ap = 0 + for i in indices: + ap = ap + (mrec[i] - mrec[i - 1]) * mpre[i] + return ap + + +def get_precision_recall(targets, preds): + """ + [P, R, score, ap] = get_precision_recall(targets, preds) + Input : + targets : number of occurrences of this class in the ith image + preds : score for this image + Output : + P, R : precision and recall + score : score which corresponds to the particular precision and recall + ap : average precision + """ + # binarize targets + targets = np.array(targets > 0, dtype=np.float32) + tog = np.hstack((targets[:, np.newaxis].astype(np.float64), + preds[:, np.newaxis].astype(np.float64))) + ind = np.argsort(preds) + ind = ind[::-1] + score = np.array([tog[i, 1] for i in ind]) + sortcounts = np.array([tog[i, 0] for i in ind]) + + tp = sortcounts + fp = sortcounts.copy() + for i in range(sortcounts.shape[0]): + if sortcounts[i] >= 1: + fp[i] = 0. + elif sortcounts[i] < 1: + fp[i] = 1. + P = np.cumsum(tp) / (np.cumsum(tp) + np.cumsum(fp)) + numinst = np.sum(targets) + R = np.cumsum(tp) / numinst + ap = calculate_ap(R, P) + return P, R, score, ap + + +def get_low_shot_output_file(opts, cls, cost, suffix): + # in case of low-shot training, we train for 5 independent samples + # (sample{}) and vary low-shot amount (k{}). The input data should have + # sample{}_k{} information that we extract in suffix below. + # logger.info('Suffix: {}'.format(suffix)) + cls_cost = str(cls) + '_cost' + py2_py3_compatible_cost(cost) + out_file = os.path.join(opts.output_path, + 'cls' + cls_cost + '_' + suffix + '.pickle') + return out_file + + +def get_low_shot_svm_classes(targets, dataset): + # classes for which SVM testing should be done + num_classes, cls_list = None, None + if dataset == 'voc': + num_classes = targets.shape[1] + cls_list = range(num_classes) + elif dataset == 'places': + # each image in places has a target cls [0, .... ,204] + num_classes = len(set(targets[:, 0].tolist())) + cls_list = list(set(targets[:, 0].tolist())) + else: + logger.info('Dataset not recognized. Abort!') + #logger.info('Testing SVM for classes: {}'.format(cls_list)) + #logger.info('Num classes: {}'.format(num_classes)) + return num_classes, cls_list + + +def get_cls_feats_labels(cls, features, targets, dataset): + out_feats, out_cls_labels = None, None + if dataset == 'voc': + cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True) + # find the indices for positive/negative imgs. Remove the ignore label. + out_data_inds = (targets[:, cls] != -1) + out_feats = features[out_data_inds] + out_cls_labels = cls_labels[out_data_inds] + # label 0 = not present, set it to -1 as svm train target. + # Make the svm train target labels as -1, 1. + out_cls_labels[np.where(out_cls_labels == 0)] = -1 + elif dataset == 'places': + out_feats = features + out_cls_labels = targets.astype(dtype=np.int32, copy=True) + # for the given class, get the relevant positive/negative images and + # make the label 1, -1 + cls_inds = np.where(targets[:, 0] == cls) + non_cls_inds = (targets[:, 0] != cls) + out_cls_labels[non_cls_inds] = -1 + out_cls_labels[cls_inds] = 1 + # finally reshape into the format taken by sklearn svm package. + out_cls_labels = out_cls_labels.reshape(-1) + else: + raise Exception('args.dataset not recognized') + return out_feats, out_cls_labels diff --git a/benchmarks/svm_tools/test_svm.py b/benchmarks/svm_tools/test_svm.py new file mode 100644 index 00000000..49d34e20 --- /dev/null +++ b/benchmarks/svm_tools/test_svm.py @@ -0,0 +1,174 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +SVM test for image classification. + +Relevant transfer tasks: Image Classification VOC07 and COCO2014. +""" +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import json +import logging +import numpy as np +import os +import pickle +import six +import sys + +import svm_helper + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def get_chosen_costs(opts, num_classes): + costs_list = svm_helper.parse_cost_list(opts.costs_list) + train_ap_matrix = np.zeros((num_classes, len(costs_list))) + for cls in range(num_classes): + for cost_idx in range(len(costs_list)): + cost = costs_list[cost_idx] + _, ap_out_file = svm_helper.get_svm_train_output_files( + cls, cost, opts.output_path) + train_ap_matrix[cls][cost_idx] = float( + np.load(ap_out_file, encoding='latin1')[0]) + argmax_cls = np.argmax(train_ap_matrix, axis=1) + chosen_cost = [costs_list[idx] for idx in argmax_cls] + #logger.info('chosen_cost: {}'.format(chosen_cost)) + np.save( + os.path.join(opts.output_path, 'crossval_ap.npy'), + np.array(train_ap_matrix)) + np.save( + os.path.join(opts.output_path, 'chosen_cost.npy'), + np.array(chosen_cost)) + #logger.info('saved crossval_ap AP to file: {}'.format( + # os.path.join(opts.output_path, 'crossval_ap.npy'))) + #logger.info('saved chosen costs to file: {}'.format( + # os.path.join(opts.output_path, 'chosen_cost.npy'))) + return np.array(chosen_cost) + + +def load_json(file_path): + assert os.path.exists(file_path), "{} does not exist".format(file_path) + with open(file_path, 'r') as fp: + data = json.load(fp) + img_ids = list(data.keys()) + cls_names = list(data[img_ids[0]].keys()) + return img_ids, cls_names + + +def test_svm(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + json_predictions, img_ids, cls_names = {}, [], [] + if opts.generate_json: + img_ids, cls_names = load_json(opts.json_targets) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + num_classes = targets.shape[1] + #logger.info('Num classes: {}'.format(num_classes)) + + # get the chosen cost that maximizes the cross-validation AP per class + costs_list = get_chosen_costs(opts, num_classes) + + ap_matrix = np.zeros((num_classes, 1)) + for cls in range(num_classes): + cost = costs_list[cls] + #logger.info('Testing model for cls: {} cost: {}'.format(cls, cost)) + model_file = os.path.join( + opts.output_path, + 'cls' + str(cls) + '_cost' + str(cost) + '.pickle') + with open(model_file, 'rb') as fopen: + if six.PY2: + model = pickle.load(fopen) + else: + model = pickle.load(fopen, encoding='latin1') + prediction = model.decision_function(features) + if opts.generate_json: + cls_name = cls_names[cls] + for idx in range(len(prediction)): + img_id = img_ids[idx] + if img_id in json_predictions: + json_predictions[img_id][cls_name] = prediction[idx] + else: + out_lbl = {} + out_lbl[cls_name] = prediction[idx] + json_predictions[img_id] = out_lbl + + cls_labels = targets[:, cls] + # meaning of labels in VOC/COCO original loaded target files: + # label 0 = not present, set it to -1 as svm train target + # label 1 = present. Make the svm train target labels as -1, 1. + evaluate_data_inds = (targets[:, cls] != -1) + eval_preds = prediction[evaluate_data_inds] + eval_cls_labels = cls_labels[evaluate_data_inds] + eval_cls_labels[np.where(eval_cls_labels == 0)] = -1 + P, R, score, ap = svm_helper.get_precision_recall( + eval_cls_labels, eval_preds) + ap_matrix[cls][0] = ap + if opts.generate_json: + output_file = os.path.join(opts.output_path, 'json_preds.json') + with open(output_file, 'w') as fp: + json.dump(json_predictions, fp) + #logger.info('Saved json predictions to: {}'.format(output_file)) + logger.info('Mean AP: {}'.format(np.mean(ap_matrix, axis=0))) + np.save(os.path.join(opts.output_path, 'test_ap.npy'), np.array(ap_matrix)) + #logger.info('saved test AP to file: {}'.format( + # os.path.join(opts.output_path, 'test_ap.npy'))) + + +def main(): + parser = argparse.ArgumentParser(description='SVM model test') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features and labels") + parser.add_argument( + '--json_targets', + type=str, + default=None, + help="Json file containing json targets") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where trained SVM models are saved") + parser.add_argument( + '--generate_json', + type=int, + default=0, + help="Whether to generate json files for output") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + #logger.info(opts) + test_svm(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/test_svm_low_shot.py b/benchmarks/svm_tools/test_svm_low_shot.py new file mode 100644 index 00000000..75ba81c9 --- /dev/null +++ b/benchmarks/svm_tools/test_svm_low_shot.py @@ -0,0 +1,212 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +SVM test for low shot image classification. + +Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low +shot samples. +""" +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import json +import logging +import numpy as np +import os +import pickle +import six +import sys + +import svm_helper + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def load_json(file_path): + assert os.path.exists(file_path), "{} does not exist".format(file_path) + with open(file_path, 'r') as fp: + data = json.load(fp) + img_ids = list(data.keys()) + cls_names = list(data[img_ids[0]].keys()) + return img_ids, cls_names + + +def save_json_predictions(opts, cost, sample_idx, k_low, features, cls_list, + cls_names, img_ids): + num_classes = len(cls_list) + json_predictions = {} + for cls in range(num_classes): + suffix = 'sample{}_k{}'.format(sample_idx + 1, k_low) + model_file = svm_helper.get_low_shot_output_file( + opts, cls, cost, suffix) + with open(model_file, 'rb') as fopen: + if six.PY2: + model = pickle.load(fopen) + else: + model = pickle.load(fopen, encoding='latin1') + prediction = model.decision_function(features) + cls_name = cls_names[cls] + for idx in range(len(prediction)): + img_id = img_ids[idx] + if img_id in json_predictions: + json_predictions[img_id][cls_name] = prediction[idx] + else: + out_lbl = {} + out_lbl[cls_name] = prediction[idx] + json_predictions[img_id] = out_lbl + + output_file = os.path.join(opts.output_path, + 'test_{}_json_preds.json'.format(suffix)) + with open(output_file, 'w') as fp: + json.dump(json_predictions, fp) + #logger.info('Saved json predictions to: {}'.format(output_file)) + + +def test_svm_low_shot(opts): + k_values = [int(val) for val in opts.k_values.split(",")] + sample_inds = [int(val) for val in opts.sample_inds.split(",")] + #logger.info('Testing svm for k-values: {} and sample_inds: {}'.format( + # k_values, sample_inds)) + + img_ids, cls_names = [], [] + if opts.generate_json: + img_ids, cls_names = load_json(opts.json_targets) + + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + # we test the svms on the full test set. Given the test features and the + # targets, we test it for various k-values (low-shot), cost values and + # 5 independent samples. + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + #logger.info('Testing SVM for costs: {}'.format(costs_list)) + + # classes for which SVM testing should be done + num_classes, cls_list = svm_helper.get_low_shot_svm_classes( + targets, opts.dataset) + + # create the output for per sample, per k-value and per cost. + sample_ap_matrices = [] + for _ in range(len(sample_inds)): + ap_matrix = np.zeros((len(k_values), len(costs_list))) + sample_ap_matrices.append(ap_matrix) + + # the test goes like this: For a given sample, for a given k-value and a + # given cost value, we evaluate the trained svm model for all classes. + # After computing over all classes, we get the mean AP value over all + # classes. We hence end up with: output = [sample][k_value][cost] + for inds in range(len(sample_inds)): + sample_idx = sample_inds[inds] + for k_idx in range(len(k_values)): + k_low = k_values[k_idx] + suffix = 'sample{}_k{}'.format(sample_idx + 1, k_low) + for cost_idx in range(len(costs_list)): + cost = costs_list[cost_idx] + local_cost_ap = np.zeros((num_classes, 1)) + for cls in cls_list: + #logger.info( + # 'Test sample/k_value/cost/cls: {}/{}/{}/{}'.format( + # sample_idx + 1, k_low, cost, cls)) + model_file = svm_helper.get_low_shot_output_file( + opts, cls, cost, suffix) + with open(model_file, 'rb') as fopen: + if six.PY2: + model = pickle.load(fopen) + else: + model = pickle.load(fopen, encoding='latin1') + prediction = model.decision_function(features) + eval_preds, eval_cls_labels = svm_helper.get_cls_feats_labels( + cls, prediction, targets, opts.dataset) + P, R, score, ap = svm_helper.get_precision_recall( + eval_cls_labels, eval_preds) + local_cost_ap[cls][0] = ap + mean_cost_ap = np.mean(local_cost_ap, axis=0) + sample_ap_matrices[inds][k_idx][cost_idx] = mean_cost_ap + out_k_sample_file = os.path.join( + opts.output_path, + 'test_ap_sample{}_k{}.npy'.format(sample_idx + 1, k_low)) + save_data = sample_ap_matrices[inds][k_idx] + save_data = save_data.reshape((1, -1)) + np.save(out_k_sample_file, save_data) + #logger.info('Saved sample test k_idx AP to file: {} {}'.format( + # out_k_sample_file, save_data.shape)) + if opts.generate_json: + argmax_cls = np.argmax(save_data, axis=1) + chosen_cost = costs_list[argmax_cls[0]] + #logger.info('chosen cost: {}'.format(chosen_cost)) + save_json_predictions(opts, chosen_cost, sample_idx, k_low, + features, cls_list, cls_names, img_ids) + #logger.info('All done!!') + + +def main(): + parser = argparse.ArgumentParser(description='Low shot SVM model test') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features and labels") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--json_targets', + type=str, + default=None, + help="Numpy file containing json targets") + parser.add_argument( + '--generate_json', + type=int, + default=0, + help="Whether to generate json files for output") + parser.add_argument( + '--costs_list', + type=str, + default= + "0.0000001,0.000001,0.00001,0.0001,0.001,0.01,0.1,1.0,10.0,100.0", + help="comma separated string containing list of costs") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where trained SVM models are saved") + parser.add_argument( + '--k_values', + type=str, + default="1,2,4,8,16,32,64,96", + help="Low-shot k-values for svm testing. Comma separated") + parser.add_argument( + '--sample_inds', + type=str, + default="0,1,2,3,4", + help="sample_inds for which to test svm. Comma separated") + parser.add_argument( + '--dataset', type=str, default="voc", help='voc | places') + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + #logger.info(opts) + test_svm_low_shot(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/train_svm_kfold.py b/benchmarks/svm_tools/train_svm_kfold.py new file mode 100644 index 00000000..b3a7f1d2 --- /dev/null +++ b/benchmarks/svm_tools/train_svm_kfold.py @@ -0,0 +1,162 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +SVM training using 3-fold cross-validation. + +Relevant transfer tasks: Image Classification VOC07 and COCO2014. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import logging +import numpy as np +import os +import pickle +import sys +from tqdm import tqdm +from sklearn.svm import LinearSVC +from sklearn.model_selection import cross_val_score + +import svm_helper + +import time + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def train_svm(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + if not os.path.exists(opts.output_path): + os.makedirs(opts.output_path) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + #logger.info('Training SVM for costs: {}'.format(costs_list)) + + # classes for which SVM training should be done + if opts.cls_list: + cls_list = [int(cls) for cls in opts.cls_list.split(",")] + else: + num_classes = targets.shape[1] + cls_list = range(num_classes) + #logger.info('Training SVM for classes: {}'.format(cls_list)) + + for cls_idx in tqdm(range(len(cls_list))): + cls = cls_list[cls_idx] + for cost_idx in range(len(costs_list)): + start = time.time() + cost = costs_list[cost_idx] + out_file, ap_out_file = svm_helper.get_svm_train_output_files( + cls, cost, opts.output_path) + if os.path.exists(out_file) and os.path.exists(ap_out_file): + logger.info('SVM model exists: {}'.format(out_file)) + logger.info('AP file exists: {}'.format(ap_out_file)) + else: + #logger.info('Training model with the cost: {}'.format(cost)) + clf = LinearSVC( + C=cost, + class_weight={ + 1: 2, + -1: 1 + }, + intercept_scaling=1.0, + verbose=0, + penalty='l2', + loss='squared_hinge', + tol=0.0001, + dual=True, + max_iter=2000, + ) + cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True) + # meaning of labels in VOC/COCO original loaded target files: + # label 0 = not present, set it to -1 as svm train target + # label 1 = present. Make the svm train target labels as -1, 1. + cls_labels[np.where(cls_labels == 0)] = -1 + #num_positives = len(np.where(cls_labels == 1)[0]) + #num_negatives = len(cls_labels) - num_positives + + #logger.info('cls: {} has +ve: {} -ve: {} ratio: {}'.format( + # cls, num_positives, num_negatives, + # float(num_positives) / num_negatives) + #) + #logger.info('features: {} cls_labels: {}'.format( + # features.shape, cls_labels.shape)) + ap_scores = cross_val_score( + clf, + features, + cls_labels, + cv=3, + scoring='average_precision') + clf.fit(features, cls_labels) + + #logger.info('cls: {} cost: {} AP: {} mean:{}'.format( + # cls, cost, ap_scores, ap_scores.mean())) + #logger.info('Saving cls cost AP to: {}'.format(ap_out_file)) + np.save(ap_out_file, np.array([ap_scores.mean()])) + #logger.info('Saving SVM model to: {}'.format(out_file)) + with open(out_file, 'wb') as fwrite: + pickle.dump(clf, fwrite) + print("time: {:.4g} s".format(time.time() - start)) + + +def main(): + parser = argparse.ArgumentParser(description='SVM model training') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where to save the trained SVM models") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--random_seed', + type=int, + default=100, + help="random seed for SVM classifier training") + + parser.add_argument( + '--cls_list', + type=str, + default=None, + help="comma separated string list of classes to train") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + #logger.info(opts) + train_svm(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/train_svm_kfold_parallel.py b/benchmarks/svm_tools/train_svm_kfold_parallel.py new file mode 100644 index 00000000..1ffbcb8b --- /dev/null +++ b/benchmarks/svm_tools/train_svm_kfold_parallel.py @@ -0,0 +1,151 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +SVM training using 3-fold cross-validation. + +Relevant transfer tasks: Image Classification VOC07 and COCO2014. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import multiprocessing as mp +import tqdm +import argparse +import logging +import numpy as np +import os +import pickle +import sys +from sklearn.svm import LinearSVC +from sklearn.model_selection import cross_val_score + +import svm_helper + +import pdb + + +def task(cls, cost, opts, features, targets): + out_file, ap_out_file = svm_helper.get_svm_train_output_files( + cls, cost, opts.output_path) + if not (os.path.exists(out_file) and os.path.exists(ap_out_file)): + clf = LinearSVC( + C=cost, + class_weight={ + 1: 2, + -1: 1 + }, + intercept_scaling=1.0, + verbose=0, + penalty='l2', + loss='squared_hinge', + tol=0.0001, + dual=True, + max_iter=2000, + ) + cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True) + cls_labels[np.where(cls_labels == 0)] = -1 + ap_scores = cross_val_score( + clf, features, cls_labels, cv=3, scoring='average_precision') + clf.fit(features, cls_labels) + np.save(ap_out_file, np.array([ap_scores.mean()])) + with open(out_file, 'wb') as fwrite: + pickle.dump(clf, fwrite) + return 0 + + +def mp_helper(args): + return task(*args) + + +def train_svm(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + if not os.path.exists(opts.output_path): + os.makedirs(opts.output_path) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + + # classes for which SVM training should be done + if opts.cls_list: + cls_list = [int(cls) for cls in opts.cls_list.split(",")] + else: + num_classes = targets.shape[1] + cls_list = range(num_classes) + + num_task = len(cls_list) * len(costs_list) + args_cls = [] + args_cost = [] + for cls in cls_list: + for cost in costs_list: + args_cls.append(cls) + args_cost.append(cost) + args_opts = [opts] * num_task + args_features = [features] * num_task + args_targets = [targets] * num_task + + pool = mp.Pool(mp.cpu_count()) + for _ in tqdm.tqdm( + pool.imap_unordered( + mp_helper, + zip(args_cls, args_cost, args_opts, args_features, + args_targets)), + total=num_task): + pass + + +def main(): + parser = argparse.ArgumentParser(description='SVM model training') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where to save the trained SVM models") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--random_seed', + type=int, + default=100, + help="random seed for SVM classifier training") + + parser.add_argument( + '--cls_list', + type=str, + default=None, + help="comma separated string list of classes to train") + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + train_svm(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/train_svm_low_shot.py b/benchmarks/svm_tools/train_svm_low_shot.py new file mode 100644 index 00000000..b5a0fbb2 --- /dev/null +++ b/benchmarks/svm_tools/train_svm_low_shot.py @@ -0,0 +1,144 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +Low Shot SVM training. + +Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low +shot samples. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import argparse +import logging +import numpy as np +import os +import pickle +import sys +from sklearn.svm import LinearSVC +from tqdm import tqdm + +import svm_helper + +import time + +# create the logger +FORMAT = '[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s' +logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout) +logger = logging.getLogger(__name__) + + +def train_svm_low_shot(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + if not os.path.exists(opts.output_path): + os.makedirs(opts.output_path) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + #logger.info('Training SVM for costs: {}'.format(costs_list)) + + # classes for which SVM testing should be done + num_classes, cls_list = svm_helper.get_low_shot_svm_classes( + targets, opts.dataset) + + for cls in tqdm(cls_list): + for cost_idx in range(len(costs_list)): + start = time.time() + cost = costs_list[cost_idx] + suffix = '_'.join( + opts.targets_data_file.split('/')[-1].split('.')[0].split('_') + [-2:]) + out_file = svm_helper.get_low_shot_output_file( + opts, cls, cost, suffix) + if os.path.exists(out_file): + logger.info('SVM model exists: {}'.format(out_file)) + else: + #logger.info('SVM model not found: {}'.format(out_file)) + #logger.info('Training model with the cost: {}'.format(cost)) + clf = LinearSVC( + C=cost, + class_weight={ + 1: 2, + -1: 1 + }, + intercept_scaling=1.0, + verbose=0, + penalty='l2', + loss='squared_hinge', + tol=0.0001, + dual=True, + max_iter=2000, + ) + train_feats, train_cls_labels = svm_helper.get_cls_feats_labels( + cls, features, targets, opts.dataset) + #num_positives = len(np.where(train_cls_labels == 1)[0]) + #num_negatives = len(np.where(train_cls_labels == -1)[0]) + + #logger.info('cls: {} has +ve: {} -ve: {} ratio: {}'.format( + # cls, num_positives, num_negatives, + # float(num_positives) / num_negatives) + #) + #logger.info('features: {} cls_labels: {}'.format( + # train_feats.shape, train_cls_labels.shape)) + clf.fit(train_feats, train_cls_labels) + #logger.info('Saving SVM model to: {}'.format(out_file)) + with open(out_file, 'wb') as fwrite: + pickle.dump(clf, fwrite) + #print("time: {:.4g} s".format(time.time() - start)) + #logger.info('All done!') + + +def main(): + parser = argparse.ArgumentParser(description='Low-shot SVM model training') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where to save the trained SVM models") + parser.add_argument( + '--random_seed', + type=int, + default=100, + help="random seed for SVM classifier training") + parser.add_argument( + '--dataset', type=str, default="voc", help='voc | places') + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + + #logger.info(opts) + train_svm_low_shot(opts) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/svm_tools/train_svm_low_shot_parallel.py b/benchmarks/svm_tools/train_svm_low_shot_parallel.py new file mode 100644 index 00000000..f3a0843d --- /dev/null +++ b/benchmarks/svm_tools/train_svm_low_shot_parallel.py @@ -0,0 +1,145 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +################################################################################ +""" +Low Shot SVM training. + +Relevant transfer tasks: Low-shot Image Classification VOC07 and Places205 low +shot samples. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import unicode_literals +from __future__ import print_function + +import multiprocessing as mp +import tqdm +import argparse +import logging +import numpy as np +import os +import pickle +import sys +from sklearn.svm import LinearSVC + +import svm_helper + +import pdb + + +def task(cls, cost, opts, features, targets): + suffix = '_'.join( + opts.targets_data_file.split('/')[-1].split('.')[0].split('_')[-2:]) + out_file = svm_helper.get_low_shot_output_file(opts, cls, cost, suffix) + if not os.path.exists(out_file): + clf = LinearSVC( + C=cost, + class_weight={ + 1: 2, + -1: 1 + }, + intercept_scaling=1.0, + verbose=0, + penalty='l2', + loss='squared_hinge', + tol=0.0001, + dual=True, + max_iter=2000, + ) + train_feats, train_cls_labels = svm_helper.get_cls_feats_labels( + cls, features, targets, opts.dataset) + clf.fit(train_feats, train_cls_labels) + #cls_labels = targets[:, cls].astype(dtype=np.int32, copy=True) + #cls_labels[np.where(cls_labels == 0)] = -1 + #clf.fit(features, cls_labels) + with open(out_file, 'wb') as fwrite: + pickle.dump(clf, fwrite) + return 0 + + +def mp_helper(args): + return task(*args) + + +def train_svm_low_shot(opts): + assert os.path.exists(opts.data_file), "Data file not found. Abort!" + if not os.path.exists(opts.output_path): + os.makedirs(opts.output_path) + + features, targets = svm_helper.load_input_data(opts.data_file, + opts.targets_data_file) + # normalize the features: N x 9216 (example shape) + features = svm_helper.normalize_features(features) + + # parse the cost values for training the SVM on + costs_list = svm_helper.parse_cost_list(opts.costs_list) + + # classes for which SVM testing should be done + num_classes, cls_list = svm_helper.get_low_shot_svm_classes( + targets, opts.dataset) + + num_task = len(cls_list) * len(costs_list) + args_cls = [] + args_cost = [] + for cls in cls_list: + for cost in costs_list: + args_cls.append(cls) + args_cost.append(cost) + args_opts = [opts] * num_task + args_features = [features] * num_task + args_targets = [targets] * num_task + + pool = mp.Pool(mp.cpu_count()) + for _ in tqdm.tqdm( + pool.imap_unordered( + mp_helper, + zip(args_cls, args_cost, args_opts, args_features, + args_targets)), + total=num_task): + pass + + +def main(): + parser = argparse.ArgumentParser(description='Low-shot SVM model training') + parser.add_argument( + '--data_file', + type=str, + default=None, + help="Numpy file containing image features") + parser.add_argument( + '--targets_data_file', + type=str, + default=None, + help="Numpy file containing image labels") + parser.add_argument( + '--costs_list', + type=str, + default="0.01,0.1", + help="comma separated string containing list of costs") + parser.add_argument( + '--output_path', + type=str, + default=None, + help="path where to save the trained SVM models") + parser.add_argument( + '--random_seed', + type=int, + default=100, + help="random seed for SVM classifier training") + parser.add_argument( + '--dataset', type=str, default="voc", help='voc | places') + if len(sys.argv) == 1: + parser.print_help() + sys.exit(1) + + opts = parser.parse_args() + train_svm_low_shot(opts) + + +if __name__ == '__main__': + main() diff --git a/configs/base.py b/configs/base.py new file mode 100644 index 00000000..ab9923a4 --- /dev/null +++ b/configs/base.py @@ -0,0 +1,19 @@ +train_cfg = {} +test_cfg = {} +optimizer_config = dict() # grad_clip, coalesce, bucket_size_mb +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +# runtime settings +dist_params = dict(backend='nccl') +cudnn_benchmark = True +log_level = 'INFO' +load_from = None +resume_from = None +workflow = [('train', 1)] +prefetch = False \ No newline at end of file diff --git a/configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py b/configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py new file mode 100644 index 00000000..9e298c1a --- /dev/null +++ b/configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py @@ -0,0 +1,75 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( # mmclassification + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3,), + style='pytorch', + frozen_stages=4, + ), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=512, + num_classes=10)) # cifar-10 +# dataset settings +data_source_cfg = dict(type='Cifar10', root='./data/cifar10/') +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10 + +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=128, # 1 GPU + workers_per_gpu=6, + train=dict( + type=dataset_type, + data_source=dict(split='train', **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline)) + +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + # initial=False, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # imagenet MoCo version +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.) # imagenet +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=100) +# runtime settings +total_epochs = 100 + +# * 1218: CIFAR-10 linear evaluation, size=32, bs128 +# Test: CUDA_VISIBLE_DEVICES=5 PORT=29471 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/cifar10/r18_last_1gpu_cifar10.py ./work_dirs/my_pretrains/ diff --git a/configs/benchmarks/linear_classification/cifar10/r18_rep_cifar10.py b/configs/benchmarks/linear_classification/cifar10/r18_rep_cifar10.py new file mode 100644 index 00000000..f2ce9ad6 --- /dev/null +++ b/configs/benchmarks/linear_classification/cifar10/r18_rep_cifar10.py @@ -0,0 +1,49 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', + pretrained=None, + backbone=dict( # mmclassification + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3,), + style='pytorch'), + neck=dict(type='AvgPoolNeck'), +) +# dataset settings +data_source_cfg = dict(type='Cifar10', root='./data/cifar10/') +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10 + +test_pipeline = [ + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, + workers_per_gpu=8, + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), +) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=8, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005) +# learning policy +lr_config = dict(policy='step', step=[150, 250]) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 350 + diff --git a/configs/benchmarks/linear_classification/cifar10/r50_last_1gpu_cifar10_from_stl10_lr01.py b/configs/benchmarks/linear_classification/cifar10/r50_last_1gpu_cifar10_from_stl10_lr01.py new file mode 100644 index 00000000..6bc45ea5 --- /dev/null +++ b/configs/benchmarks/linear_classification/cifar10/r50_last_1gpu_cifar10_from_stl10_lr01.py @@ -0,0 +1,78 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=10)) # to cifar-10 +# dataset settings +data_source_cfg = dict(type='Cifar10', root='./data/cifar10/') +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet for transfer +# img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10 +resizeto = 64 +train_pipeline = [ + dict(type='RandomResizedCrop', size=resizeto, scale=[0.2, 1.0]), + # dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=64, + # workers_per_gpu=8, + imgs_per_gpu=128, + workers_per_gpu=10, + train=dict( + type=dataset_type, + data_source=dict(split='train', **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + # initial=False, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # imagenet MoCo version +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.) # imagenet +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 + +# * 1230: Transfer test from STL10 to CIFAR-10 (linear evaluation), size=64, bs128 +# Test: CUDA_VISIBLE_DEVICES=3 PORT=25717 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/cifar/r50_last_1gpu_cifar10_from_stl10_lr01.py ./work_dirs/my_pretrains/ diff --git a/configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py b/configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py new file mode 100644 index 00000000..c354da7e --- /dev/null +++ b/configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py @@ -0,0 +1,79 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( # mmclassification + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3,), + style='pytorch', + frozen_stages=4, + ), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=512, + num_classes=100)) # cifar-100 +# dataset settings +data_source_cfg = dict(type='Cifar100', root='./data/cifar100/') +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10 for cifar100 + +train_pipeline = [ + dict(type='RandomCrop', size=32, padding=4), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=64, + # workers_per_gpu=8, + imgs_per_gpu=128, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_source=dict(split='train', **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline)) + +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + # initial=True, + initial=False, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # imagenet MoCo version +optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # imagenet, [choosed] +optimizer_config = dict(grad_clip=None) +# learning policy +lr_config = dict( + policy='step', + # step=[60, 80], # 2 step + step=[30, 60, 80], # 3 step + gamma=0.2, +) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 + +# * 1224: CIFAR-10 linear evaluation, size=32, bs128. try lr=1.0 + 3 steps +# Test: CUDA_VISIBLE_DEVICES=0 PORT=25917 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/cifar100/r18_last_1gpu_cifar100.py ./work_dirs/my_pretrains/ diff --git a/configs/benchmarks/linear_classification/cifar100/r18_rep_cifar100.py b/configs/benchmarks/linear_classification/cifar100/r18_rep_cifar100.py new file mode 100644 index 00000000..dcedae3e --- /dev/null +++ b/configs/benchmarks/linear_classification/cifar100/r18_rep_cifar100.py @@ -0,0 +1,48 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', + pretrained=None, + backbone=dict( # mmclassification + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3,), + style='pytorch'), + neck=dict(type='AvgPoolNeck'), +) +# dataset settings +data_source_cfg = dict(type='Cifar100', root='./data/cifar100/') +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.201]) # cifar-10 + +test_pipeline = [ + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, + workers_per_gpu=8, + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), +) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=8, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005) +# learning policy +lr_config = dict(policy='step', step=[150, 250]) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 350 diff --git a/configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py b/configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py new file mode 100644 index 00000000..5e90afd3 --- /dev/null +++ b/configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py @@ -0,0 +1,84 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=200)) # CUB-200 +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: UCB-200 dataset +base = "/usr/commondata/public/CUB200/CUB_200/" +data_train_list = base + 'classification_meta_0/train_labeled.txt' # CUB200 labeled train, 30 per class, 5994 +data_train_root = base + "images" +data_test_list = base + 'classification_meta_0/test_labeled.txt' # CUB200 labeled test, 30 per class +data_test_root = base + "images" +# resize setting +resizeto = 224 +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=resizeto), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + # workers_per_gpu=12, + imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls + workers_per_gpu=10, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, # 1, + imgs_per_gpu=128, + workers_per_gpu=8, # 4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # ImageNet MoCo, basic lr +# optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # STL-10 lr +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 + +# * 1203: CUB-200_2011, baseline, size=224, try ImageNet basic lr=30.0 +# Test: CUDA_VISIBLE_DEVICES=0,1 PORT=25003 bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/cub200/r50_last_2gpu_cub200.py ./work_dirs/my_pretrains/ diff --git a/configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py b/configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py new file mode 100644 index 00000000..13164942 --- /dev/null +++ b/configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py @@ -0,0 +1,87 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=120)) # Dogs-120 +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: Dogs-120 dataset +base = "/usr/commondata/public/Dogs120/" +data_train_list = base + 'classification_meta_0/train_labeled.txt' # Dogs-120 labeled train, 100 per class, 12000 +data_train_root = base + "Images" +data_test_list = base + 'classification_meta_0/test_labeled.txt' # Dogs-120 labeled test, 100 per class +data_test_root = base + "Images" +# resize setting +resizeto = 224 +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=resizeto), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + # workers_per_gpu=12, + imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls + workers_per_gpu=10, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, # 1, + imgs_per_gpu=128, + workers_per_gpu=8, # 4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # ImageNet basic lr +# optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # STL-10 lr +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] + # step=[30, 40] + # step=[18, 24] +) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 +# total_epochs = 50 + +# * 1205: Dogs-120, baseline, size=224, try ImageNet basic lr=30.0 +# Test: CUDA_VISIBLE_DEVICES=4,5 PORT=25105 bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/dogs120/r50_last_2gpu_dogs120.py ./work_dirs/my_pretrains/ diff --git a/configs/benchmarks/linear_classification/fmnist/lenet_last_1gpu_fmnist.py b/configs/benchmarks/linear_classification/fmnist/lenet_last_1gpu_fmnist.py new file mode 100644 index 00000000..a86d8df4 --- /dev/null +++ b/configs/benchmarks/linear_classification/fmnist/lenet_last_1gpu_fmnist.py @@ -0,0 +1,49 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + backbone=dict( # mmclassification + type='LeNet5', + activation="LeakyReLU", + mlp_neck=None, + cls_neck=True, + ), + head=dict( + type='ClsHead', with_avg_pool=False, in_channels=84, + num_classes=10)) +# dataset settings +data_source_cfg = dict(type='Fmnist', root='./data/') +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.], std=[1.]) # MNIST grayscale +resizeto = 32 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=100, + workers_per_gpu=2, + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), +) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=2, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 diff --git a/configs/benchmarks/linear_classification/fmnist/lenet_rep_fmnist.py b/configs/benchmarks/linear_classification/fmnist/lenet_rep_fmnist.py new file mode 100644 index 00000000..71fdc1f6 --- /dev/null +++ b/configs/benchmarks/linear_classification/fmnist/lenet_rep_fmnist.py @@ -0,0 +1,50 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', + backbone=dict( # mmclassification + type='LeNet5', + activation="LeakyReLU", + mlp_neck=None, + cls_neck=True, + ), + neck=None, +) +# dataset settings +data_source_cfg = dict(type='Fmnist', root='./data/') + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.], std=[1.]) # MNIST grayscale + +resizeto = 32 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=5000, + workers_per_gpu=2, + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), +) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005) +# learning policy +lr_config = dict(policy='step', step=[150, 250]) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 350 diff --git a/configs/benchmarks/linear_classification/imagenet/official/r50_last.py b/configs/benchmarks/linear_classification/imagenet/official/r50_last.py new file mode 100644 index 00000000..1d476619 --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/official/r50_last.py @@ -0,0 +1,76 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), +] +# prefetch +prefetch = False +if not prefetch: + train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) + test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) +data = dict( + imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + workers_per_gpu=5, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline, + prefetch=prefetch), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline, + prefetch=prefetch)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=4, + prefetch=prefetch, + img_norm_cfg=img_norm_cfg, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 diff --git a/configs/benchmarks/linear_classification/imagenet/official/r50_last_sobel.py b/configs/benchmarks/linear_classification/imagenet/official/r50_last_sobel.py new file mode 100644 index 00000000..ec4f7723 --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/official/r50_last_sobel.py @@ -0,0 +1,76 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=True, + backbone=dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), +] +# prefetch +prefetch = False +if not prefetch: + train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) + test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) +data = dict( + imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + workers_per_gpu=5, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline, + prefetch=prefetch), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline, + prefetch=prefetch)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=4, + prefetch=prefetch, + img_norm_cfg=img_norm_cfg, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 diff --git a/configs/benchmarks/linear_classification/imagenet/official/r50_multihead.py b/configs/benchmarks/linear_classification/imagenet/official/r50_multihead.py new file mode 100644 index 00000000..54c995d1 --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/official/r50_multihead.py @@ -0,0 +1,89 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='MultiClsHead', + pool_type='specified', + in_indices=[0, 1, 2, 3, 4], + with_last_layer_unpool=False, + backbone='resnet50', + norm_cfg=dict(type='SyncBN', momentum=0.1, affine=False), + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.), + dict(type='ToTensor'), + dict(type='Lighting'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 32x8=256 + workers_per_gpu=5, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, ))) +] +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(norm_decay_mult=0.), + nesterov=True) +# learning policy +lr_config = dict(policy='step', step=[30, 60, 90]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 90 diff --git a/configs/benchmarks/linear_classification/imagenet/official/r50_multihead_sobel.py b/configs/benchmarks/linear_classification/imagenet/official/r50_multihead_sobel.py new file mode 100644 index 00000000..bc3638cd --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/official/r50_multihead_sobel.py @@ -0,0 +1,89 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=True, + backbone=dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='MultiClsHead', + pool_type='specified', + in_indices=[0, 1, 2, 3, 4], + with_last_layer_unpool=False, + backbone='resnet50', + norm_cfg=dict(type='SyncBN', momentum=0.1, affine=False), + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.), + dict(type='ToTensor'), + dict(type='Lighting'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=32, # total 32x8=256 + workers_per_gpu=5, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, ))) +] +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(norm_decay_mult=0.), + nesterov=True) +# learning policy +lr_config = dict(policy='step', step=[30, 60, 90]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 90 diff --git a/configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py b/configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py new file mode 100644 index 00000000..125cfa19 --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py @@ -0,0 +1,79 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=512, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') + +imagenet_base = "/usr/lsy/src/ImageNet/" +data_train_list = imagenet_base + 'meta/train_labeled_full.txt' +data_train_root = imagenet_base + 'train' +data_test_list = imagenet_base + 'meta/val_labeled.txt' +data_test_root = imagenet_base + 'val/' + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256*1=256, 1GPU linear cls + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=5, + imgs_per_gpu=128, + workers_per_gpu=6, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=100) +# runtime settings +total_epochs = 100 + +# Test: CUDA_VISIBLE_DEVICES=3 PORT=25010 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/imagenet/r18_last_1gpu.py ./work_dirs/ diff --git a/configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py b/configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py new file mode 100644 index 00000000..c8d14689 --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py @@ -0,0 +1,79 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=512, num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# ImageNet dataset +data_root="/data/public_datasets/ILSVRC2012/" # ori +# data_root = "/data/ImageNet/" # node16 +data_train_list = 'data/meta/imagenet/train_labeled_full.txt' +data_train_root = data_root + 'train' +data_test_list = 'data/meta/imagenet/val_labeled.txt' +data_test_root = "/data/liuzicheng/ImageNet/val/" + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=2, + imgs_per_gpu=100, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=100) +# runtime settings +total_epochs = 100 + +# Test: CUDA_VISIBLE_DEVICES=6,7 PORT=25019 bash benchmarks/dist_train_linear_2gpu.sh configs/benchmarks/linear_classification/imagenet/r18_last_2gpu.py ./work_dirs/ diff --git a/configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py b/configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py new file mode 100644 index 00000000..f7f1645f --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py @@ -0,0 +1,78 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=512, num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# ImageNet dataset +data_root="data/ImageNet/" +data_train_list = 'data/meta/imagenet/train_labeled_full.txt' +data_train_root = data_root + 'train' +data_test_list = 'data/meta/imagenet/val_labeled.txt' +data_test_root = data_root + "val" + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=64, # total 64*4=256, 4GPU linear cls + workers_per_gpu=6, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=1, + imgs_per_gpu=100, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=100) +# runtime settings +total_epochs = 100 + +# Test: CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=25011 bash benchmarks/dist_train_linear_4gpu.sh configs/benchmarks/linear_classification/imagenet/r18_last_4gpu.py ./work_dirs/ diff --git a/configs/benchmarks/linear_classification/imagenet/r18_rep_imagenet.py b/configs/benchmarks/linear_classification/imagenet/r18_rep_imagenet.py new file mode 100644 index 00000000..43dea2c6 --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/r18_rep_imagenet.py @@ -0,0 +1,71 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', # 0802 + pretrained=None, + backbone=dict( + type='ResNet', + # depth=50, + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048 +) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') + +# test: 10 class (1300 for each class) +imagenet_base = "/usr/commondata/public/ImageNet/ILSVRC2012/" +data_test_list = imagenet_base + 'meta/train_labeled_10class_0123_8081_154155_404_407.txt' # 10 class +# data_train_list = imagenet_base + 'meta/train_full.txt' # full +data_test_root = imagenet_base + 'train' + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017 + +# resizeto = 96 +resizeto = 224 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + imgs_per_gpu=128, + workers_per_gpu=12, # 5, + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=12, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 + + +# test baseline +# Test: bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/imagenet/r50_last.py ./pretrains/moco_r50_v2_simclr_neck.pth + diff --git a/configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py b/configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py new file mode 100644 index 00000000..b20734dc --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py @@ -0,0 +1,79 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +data_base = "/usr/lsy/src/ImageNet/" +data_train_list = data_base + 'meta/train_labeled_full.txt' +data_train_root = data_base + 'train' +data_test_list = data_base + 'meta/val_labeled.txt' +data_test_root = data_base + 'val/' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + # workers_per_gpu=12, + imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls + workers_per_gpu=10, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, # 1, + imgs_per_gpu=128, + workers_per_gpu=12, # 4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 + +# Test: CUDA_VISIBLE_DEVICES=3,5 PORT=25010 bash benchmarks/dist_train_linear_2gpu.sh configs/benchmarks/linear_classification/imagenet/r50_last_2gpu.py ./work_dirs/ diff --git a/configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py b/configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py new file mode 100644 index 00000000..6b46cfd4 --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py @@ -0,0 +1,83 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# ImageNet dataset +data_root="data/ImageNet/" +data_train_list = 'data/meta/imagenet/train_labeled_full.txt' +data_train_root = data_root + 'train' +data_test_list = 'data/meta/imagenet/val_labeled.txt' +data_test_root = data_root + "val" + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + # workers_per_gpu=12, + imgs_per_gpu=64, # total 64*4=256 + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] + # step=[30, 40] +) +checkpoint_config = dict(interval=100) +# runtime settings +total_epochs = 100 +# total_epochs = 50 + +# * 1105: ELIS-1030, baseline, size=224 +# Test: CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=25012 bash benchmarks/dist_train_linear_4gpu.sh configs/benchmarks/linear_classification/imagenet/r50_last_4gpu.py ./work_dirs/my_pretrains/ diff --git a/configs/benchmarks/linear_classification/imagenet/r50_rep_imagenet.py b/configs/benchmarks/linear_classification/imagenet/r50_rep_imagenet.py new file mode 100644 index 00000000..a87e2526 --- /dev/null +++ b/configs/benchmarks/linear_classification/imagenet/r50_rep_imagenet.py @@ -0,0 +1,71 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', # 0802 + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + # depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048 +) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') + +# test: 10 class (1300 for each class) +imagenet_base = "/usr/commondata/public/ImageNet/ILSVRC2012/" +data_test_list = imagenet_base + 'meta/train_labeled_10class_0123_8081_154155_404_407.txt' # 10 class +# data_train_list = imagenet_base + 'meta/train_full.txt' # full +data_test_root = imagenet_base + 'train' + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017 + +# resizeto = 96 +resizeto = 224 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + imgs_per_gpu=128, + workers_per_gpu=12, # 5, + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=12, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 + + +# test baseline +# Test: bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/imagenet/r50_last.py ./pretrains/moco_r50_v2_simclr_neck.pth + diff --git a/configs/benchmarks/linear_classification/mnist/lenet_rep_mnist.py b/configs/benchmarks/linear_classification/mnist/lenet_rep_mnist.py new file mode 100644 index 00000000..caa64756 --- /dev/null +++ b/configs/benchmarks/linear_classification/mnist/lenet_rep_mnist.py @@ -0,0 +1,55 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', + backbone=dict( # mmclassification + type='LeNet5', + activation="LeakyReLU", + mlp_neck=dict( + type='NonLinearNeckV1', + in_channels=120, + hid_channels=120, + out_channels=10, + activation="LeakyReLU", + with_avg_pool=False), + cls_neck=None, + ), + neck=None, +) +# dataset settings +data_source_cfg = dict(type='Mnist', root='./data/') +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.], std=[1.]) # MNIST grayscale + +resizeto = 32 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=5000, + workers_per_gpu=2, + val=dict( + type=dataset_type, + data_source=dict(split='test', **data_source_cfg), + pipeline=test_pipeline), +) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005) +# learning policy +lr_config = dict(policy='step', step=[150, 250]) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 350 diff --git a/configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py b/configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py new file mode 100644 index 00000000..0b517a38 --- /dev/null +++ b/configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py @@ -0,0 +1,90 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=37)) # Pets-37 +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') + +# test: Pets-37 dataset +base = "/usr/commondata/public/Pets37/" +data_train_list = base + 'classification_meta_0/train_labeled.txt' # Pets-37 labeled train, 3680 +data_train_root = base + "images" +data_test_list = base + 'classification_meta_0/test_labeled.txt' # Pets-37 labeled test +data_test_root = base + "images" + +# resize setting +resizeto = 224 +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=resizeto), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + # dict(type='Resize', size=resizeto), + dict(type='Resize', size=256), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + # workers_per_gpu=12, + imgs_per_gpu=128, # total 128*2=256, 2GPU linear cls + workers_per_gpu=12, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, # 1, + imgs_per_gpu=128, + workers_per_gpu=8, # 4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # ImageNet basic lr +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] + # step=[30, 40] + # step=[18, 24] +) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 +# total_epochs = 50 + +# try SSL official pretrains +# * 1205: Pets-37, baseline, size=224, try ImageNet basic lr=30.0 +# Test: CUDA_VISIBLE_DEVICES=6,7 PORT=25005 bash benchmarks/dist_train_linear.sh configs/benchmarks/linear_classification/pets/r50_last_2gpu_pets.py ./work_dirs/my_pretrains/ diff --git a/configs/benchmarks/linear_classification/pets/r50_rep_pets.py b/configs/benchmarks/linear_classification/pets/r50_rep_pets.py new file mode 100644 index 00000000..8beed19f --- /dev/null +++ b/configs/benchmarks/linear_classification/pets/r50_rep_pets.py @@ -0,0 +1,70 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', # 0802 + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + # depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048 +) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') + +# test: Pets-37 dataset +base = "/usr/commondata/public/Pets37/" +data_train_list = base + 'classification_meta_0/train_labeled.txt' # Pets-37 labeled train, 3680 +data_train_root = base + "images" +data_test_list = base + 'classification_meta_0/test_labeled.txt' # Pets-37 labeled test +data_test_root = base + "images" + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017 + +resizeto = 224 +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + imgs_per_gpu=128, + workers_per_gpu=10, + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg + # **data_source_cfg + ), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=12, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 + +# using for visualize representation of Pets-37 diff --git a/configs/benchmarks/linear_classification/places205/r50_multihead.py b/configs/benchmarks/linear_classification/places205/r50_multihead.py new file mode 100644 index 00000000..8826bb41 --- /dev/null +++ b/configs/benchmarks/linear_classification/places205/r50_multihead.py @@ -0,0 +1,89 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='MultiClsHead', + pool_type='specified', + in_indices=[0, 1, 2, 3, 4], + with_last_layer_unpool=False, + backbone='resnet50', + norm_cfg=dict(type='SyncBN', momentum=0.1, affine=False), + num_classes=205)) +# dataset settings +data_source_cfg = dict( + type='Places205', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/places205/meta/train_labeled.txt' +data_train_root = 'data/places205/train' +data_test_list = 'data/places205/meta/val_labeled.txt' +data_test_root = 'data/places205/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=256), + dict(type='RandomCrop', size=224), + dict(type='RandomHorizontalFlip'), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), +] +# prefetch +prefetch = False +if not prefetch: + train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) + test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) +data = dict( + imgs_per_gpu=32, # total 32x8=256 + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline, + prefetch=prefetch), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline, + prefetch=prefetch)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=32, + workers_per_gpu=4, + prefetch=prefetch, + img_norm_cfg=img_norm_cfg, + eval_param=dict(topk=(1, ))) +] +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(norm_decay_mult=0.), + nesterov=True) +# learning policy +lr_config = dict(policy='step', step=[7, 14, 21]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 28 diff --git a/configs/benchmarks/linear_classification/places205/r50_multihead_sobel.py b/configs/benchmarks/linear_classification/places205/r50_multihead_sobel.py new file mode 100644 index 00000000..7e5cd869 --- /dev/null +++ b/configs/benchmarks/linear_classification/places205/r50_multihead_sobel.py @@ -0,0 +1,89 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=True, + backbone=dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[0, 1, 2, 3, 4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='MultiClsHead', + pool_type='specified', + in_indices=[0, 1, 2, 3, 4], + with_last_layer_unpool=False, + backbone='resnet50', + norm_cfg=dict(type='SyncBN', momentum=0.1, affine=False), + num_classes=205)) +# dataset settings +data_source_cfg = dict( + type='Places205', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/places205/meta/train_labeled.txt' +data_train_root = 'data/places205/train' +data_test_list = 'data/places205/meta/val_labeled.txt' +data_test_root = 'data/places205/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=256), + dict(type='RandomCrop', size=224), + dict(type='RandomHorizontalFlip'), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), +] +# prefetch +prefetch = False +if not prefetch: + train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) + test_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)]) +data = dict( + imgs_per_gpu=32, # total 32x8=256 + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline, + prefetch=prefetch), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline, + prefetch=prefetch)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, + imgs_per_gpu=32, + workers_per_gpu=4, + prefetch=prefetch, + img_norm_cfg=img_norm_cfg, + eval_param=dict(topk=(1, ))) +] +# optimizer +optimizer = dict( + type='SGD', + lr=0.01, + momentum=0.9, + weight_decay=0.0001, + paramwise_options=dict(norm_decay_mult=0.), + nesterov=True) +# learning policy +lr_config = dict(policy='step', step=[7, 14, 21]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 28 diff --git a/configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py b/configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py new file mode 100644 index 00000000..9fe8283e --- /dev/null +++ b/configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py @@ -0,0 +1,79 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='MobileNetV2', + widen_factor=1.0, + frozen_stages=7), # 0-7 stages + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=1280, + num_classes=10)) # stl 10 +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: STL-10 dataset +data_base = "/usr/lsy/src/OpenSelfSup_v1214/" +data_train_list = data_base + 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train +data_train_root = data_base + 'data/stl10/train/' # using labeled train set +data_test_list = data_base + 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test +data_test_root = data_base + 'data/stl10/test/' # using labeled test set +# resize setting +resizeto = 96 +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=resizeto), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256*1=256, 1GPU linear cls + workers_per_gpu=6, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + # initial=True, + initial=False, + interval=5, + imgs_per_gpu=100, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # MoCoo ImageNet +optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK] +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=100) +# runtime settings +total_epochs = 100 + +# * STL-10, baseline, size=96, lr=1.0 +# Test: CUDA_VISIBLE_DEVICES=0 PORT=25530 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/stl10/mobilenet_last_1gpu_stl10.py ./work_dirs/ diff --git a/configs/benchmarks/linear_classification/stl10/mobilenet_rep_stl10.py b/configs/benchmarks/linear_classification/stl10/mobilenet_rep_stl10.py new file mode 100644 index 00000000..661fe172 --- /dev/null +++ b/configs/benchmarks/linear_classification/stl10/mobilenet_rep_stl10.py @@ -0,0 +1,64 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', + pretrained=None, + backbone=dict( + type='MobileNetV2', + widen_factor=1.0, + frozen_stages=7, # 0-7 stages + ), + neck=dict(type='AvgPoolNeck'), +) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: STL-10 dataset +data_base = "/usr/lsy/src/OpenSelfSup_v1214/" +data_train_list = data_base + 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train +data_train_root = data_base + 'data/stl10/train/' # using labeled train set +data_test_list = data_base + 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test +data_test_root = data_base + 'data/stl10/test/' # using labeled test set + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017 +resizeto = 96 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + imgs_per_gpu=128, + workers_per_gpu=10, + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=12, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK] +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 + +# using for visualize representation of STL-10 diff --git a/configs/benchmarks/linear_classification/stl10/r18/r18_lr1_0_bs256_head1.py b/configs/benchmarks/linear_classification/stl10/r18/r18_lr1_0_bs256_head1.py new file mode 100644 index 00000000..4681867f --- /dev/null +++ b/configs/benchmarks/linear_classification/stl10/r18/r18_lr1_0_bs256_head1.py @@ -0,0 +1,78 @@ +_base_ = '../../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=512, num_classes=10)) # stl 10 +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: STL-10 dataset +data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train +data_train_root = 'data/stl10/train/' # using labeled train set +data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test +data_test_root = 'data/stl10/test/' # using labeled test set +# resize setting +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=96), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=96), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256, 1GPU linear cls + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=2, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # MoCo ImageNet +optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK] +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=200) +# runtime settings +total_epochs = 100 + +# * STL-10, baseline, size=96, lr=1.0 +# Test: CUDA_VISIBLE_DEVICES=6 PORT=25519 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py [] diff --git a/configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh b/configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh new file mode 100644 index 00000000..2fa2adc3 --- /dev/null +++ b/configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +# Usage: +# bash configs/benchmarks/linear_classification/stl10/r18/run_stl10_dist_train_linear.sh $GPU_id $PORT $WEIGHT.pth + +base_path="configs/benchmarks/linear_classification/stl10/r18/" +exp_cfg0=$base_path"r18_lr0_1_bs256_head1.py" +exp_cfg1=$base_path"r18_lr1_0_bs256_head1.py" +exp_cfg2=$base_path"r18_lr10_bs256_head1.py" + +set -e +set -x + +GPU_ID=$1 +PORT_id=$2 +WEIGHT=$3 + + +if [ "$GPU_ID" == "" ] || [ "$PORT_id" == "" ]; then + echo "ERROR: Missing arguments." + exit +fi + +if [ "$WEIGHT" == "" ]; then + echo "train with random init ! ! !" + # random init train +fi + +if [ "$WEIGHT" != "" ]; then + echo "normal linear-supervised training start..." + # normal train with 3 random seeds {0,1,3} + CUDA_VISIBLE_DEVICES=$GPU_ID PORT=$PORT_id bash benchmarks/dist_train_linear_1gpu_sd.sh $exp_cfg1 $WEIGHT 0 + CUDA_VISIBLE_DEVICES=$GPU_ID PORT=$PORT_id bash benchmarks/dist_train_linear_1gpu_sd.sh $exp_cfg1 $WEIGHT 1 + CUDA_VISIBLE_DEVICES=$GPU_ID PORT=$PORT_id bash benchmarks/dist_train_linear_1gpu_sd.sh $exp_cfg1 $WEIGHT 3 +fi diff --git a/configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py b/configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py new file mode 100644 index 00000000..db186647 --- /dev/null +++ b/configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py @@ -0,0 +1,80 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=512, + num_classes=10)) # stl 10 +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: STL-10 dataset +data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train +data_train_root = 'data/stl10/train/' # using labeled train set +data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test +data_test_root = 'data/stl10/test/' # using labeled test set +# resize setting +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=96), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=96), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256, 1GPU linear cls + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + # initial=True, + initial=False, + interval=2, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # MoCo ImageNet +optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK] +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=100) +# runtime settings +total_epochs = 100 + +# * STL-10, baseline, size=96, lr=1.0 +# Test: CUDA_VISIBLE_DEVICES=6 PORT=25519 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/stl10/r18_last_1gpu_stl10.py [] diff --git a/configs/benchmarks/linear_classification/stl10/r18_rep_stl10.py b/configs/benchmarks/linear_classification/stl10/r18_rep_stl10.py new file mode 100644 index 00000000..8f49541d --- /dev/null +++ b/configs/benchmarks/linear_classification/stl10/r18_rep_stl10.py @@ -0,0 +1,64 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', + pretrained=None, + backbone=dict( + type='ResNet', + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048 +) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: STL-10 dataset +data_train_list = 'data/STL/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train +data_train_root = 'data/STL/stl10/train/' # using labeled train set +data_test_list = 'data/STL/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test +data_test_root = 'data/STL/stl10/test/' # using labeled test set + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017 +resizeto = 96 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + imgs_per_gpu=128, + workers_per_gpu=4, + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=12, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 + +# using for visualize representation of STL-10 diff --git a/configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py b/configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py new file mode 100644 index 00000000..5ece46b3 --- /dev/null +++ b/configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py @@ -0,0 +1,82 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=10)) # stl 10 +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: STL-10 dataset +data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train +data_train_root = 'data/stl10/train/' # using labeled train set +data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test +data_test_root = 'data/stl10/test/' # using labeled test set +# resize setting +resizeto = 96 +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=resizeto), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256*1=256, 1GPU linear cls + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + # initial=True, + initial=False, + interval=10, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # MoCoo ImageNet +optimizer = dict(type='SGD', lr=1.0, momentum=0.9, weight_decay=0.) # [OK] +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 + +# * STL-10, baseline, size=96, lr=1.0 +# Test: CUDA_VISIBLE_DEVICES=3 PORT=25530 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/stl10/r50_last_1gpu_stl10.py ./work_dirs/my_pretrains/stl10_baseline/ diff --git a/configs/benchmarks/linear_classification/stl10/r50_rep_stl10.py b/configs/benchmarks/linear_classification/stl10/r50_rep_stl10.py new file mode 100644 index 00000000..1f4eeab9 --- /dev/null +++ b/configs/benchmarks/linear_classification/stl10/r50_rep_stl10.py @@ -0,0 +1,64 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + neck=dict(type='AvgPoolNeck'), # 7x7x2048 -> 2048 +) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: STL-10 dataset +data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train +data_train_root = 'data/stl10/train/' # using labeled train set +data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test +data_test_root = 'data/stl10/test/' # using labeled test set + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +# img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]) # coco2017 +resizeto = 96 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + # imgs_per_gpu=32, # total 32*8=256, 8GPU linear cls + imgs_per_gpu=128, + workers_per_gpu=10, # 5, + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=12, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 + +# using for visualize representation of STL-10 diff --git a/configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py b/configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py new file mode 100644 index 00000000..d5a1cd9b --- /dev/null +++ b/configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py @@ -0,0 +1,80 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=False, + backbone=dict( + type='ResNet', + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=512, + num_classes=200)) # Tiny ImageNet +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# tiny imagenet +data_train_list = './data/TinyImagenet200/meta/train_labeled.txt' # unlabeled train 10w +data_train_root = './data/TinyImagenet200/train/' +data_test_list = './data/TinyImagenet200/meta/val_labeled.txt' # val labeled 1w +data_test_root = './data/TinyImagenet200/val/' +# resize setting +resizeto = 64 +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=resizeto), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256*1=256, 1GPU linear cls + workers_per_gpu=12, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=10, # 1, + imgs_per_gpu=128, + workers_per_gpu=8, # 4, + eval_param=dict(topk=(1, 5))) +] +# optimizer +# optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) # Imagenet baseline +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.) # [OK] +# learning policy +lr_config = dict( + policy='step', + step=[60, 80] +) +checkpoint_config = dict(interval=50) +# runtime settings +total_epochs = 100 + +# * Tiny Imagenet, baseline, size=64 +# Test: CUDA_VISIBLE_DEVICES=0 PORT=25027 bash benchmarks/dist_train_linear_1gpu.sh configs/benchmarks/linear_classification/tiny_imagenet/r18_last_1gpu_tiny.py ./work_dirs/my_pretrains/ diff --git a/configs/benchmarks/linear_classification/tiny_imagenet/r18_rep_tiny_imagenet.py b/configs/benchmarks/linear_classification/tiny_imagenet/r18_rep_tiny_imagenet.py new file mode 100644 index 00000000..9eac0bb0 --- /dev/null +++ b/configs/benchmarks/linear_classification/tiny_imagenet/r18_rep_tiny_imagenet.py @@ -0,0 +1,66 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Representation', + pretrained=None, + backbone=dict( + type='ResNet', + depth=18, + in_channels=3, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='BN'), + frozen_stages=4), + neck=dict(type='AvgPoolNeck'), +) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# tiny imagenet +data_train_list = './data/TinyImagenet200/meta/train_unlabeled.txt' # unlabeled train 10w +data_train_root = './data/TinyImagenet200/train/' +# data_test_list = './data/TinyImagenet200/meta/val_labeled.txt' # val labeled 1w +# data_test_root = './data/TinyImagenet200/val/' +data_test_list = './data/TinyImagenet200/meta/train_20class_labeled.txt' # val labeled 20 class +data_test_root = './data/TinyImagenet200/train/' + +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet + +resizeto = 64 +test_pipeline = [ + dict(type='Resize', size=resizeto), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=128, + workers_per_gpu=12, # 5, + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, + **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=True, + interval=1, + imgs_per_gpu=128, + workers_per_gpu=12, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.) +# learning policy +lr_config = dict(policy='step', step=[60, 80]) +checkpoint_config = dict(interval=10) +# runtime settings +total_epochs = 100 + +# using for visualize representation of tiny-imagenet diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/base.py b/configs/benchmarks/semi_classification/imagenet_10percent/base.py new file mode 100644 index 00000000..48e29a60 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/base.py @@ -0,0 +1,66 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled_10percent.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=64, # total 256 + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=20, + imgs_per_gpu=32, + workers_per_gpu=2, + eval_param=dict(topk=(1, 5))) +] +# learning policy +lr_config = dict(policy='step', step=[12, 16], gamma=0.2) +checkpoint_config = dict(interval=20) +# runtime settings +total_epochs = 20 diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head1.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head1.py new file mode 100644 index 00000000..5bd55efc --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head1.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head10.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head10.py new file mode 100644 index 00000000..9dc9a79e --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head100.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head100.py new file mode 100644 index 00000000..a1d324d2 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_001_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1.py new file mode 100644 index 00000000..c3553048 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head10.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head10.py new file mode 100644 index 00000000..7d5bedaa --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head100.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head100.py new file mode 100644 index 00000000..6b696915 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1_sobel.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1_sobel.py new file mode 100644 index 00000000..53cefa4b --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_01_head1_sobel.py @@ -0,0 +1,71 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=True, + backbone=dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled_10percent.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=64, # total 256 + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=20, + imgs_per_gpu=32, + workers_per_gpu=2, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) +# learning policy +lr_config = dict(policy='step', step=[12, 16], gamma=0.2) +checkpoint_config = dict(interval=20) +# runtime settings +total_epochs = 20 diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head1.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head1.py new file mode 100644 index 00000000..414d07e8 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head1.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head10.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head10.py new file mode 100644 index 00000000..9b8c6f1c --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head100.py b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head100.py new file mode 100644 index 00000000..f7cc627a --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_10percent/r50_lr0_1_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/base.py b/configs/benchmarks/semi_classification/imagenet_1percent/base.py new file mode 100644 index 00000000..83a38a04 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/base.py @@ -0,0 +1,72 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled_1percent.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=64, # total 256 + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=20, + imgs_per_gpu=32, + workers_per_gpu=2, + eval_param=dict(topk=(1, 5))) +] +# learning policy +lr_config = dict(policy='step', step=[12, 16], gamma=0.2) +checkpoint_config = dict(interval=20) +log_config = dict( + interval=10, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# runtime settings +total_epochs = 20 diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head1.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head1.py new file mode 100644 index 00000000..16bc7988 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head1.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head10.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head10.py new file mode 100644 index 00000000..f16c5269 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head100.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head100.py new file mode 100644 index 00000000..e7e4355d --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_001_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1.py new file mode 100644 index 00000000..dfb6c97f --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head10.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head10.py new file mode 100644 index 00000000..a8fe6d76 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head100.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head100.py new file mode 100644 index 00000000..12a80442 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1_sobel.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1_sobel.py new file mode 100644 index 00000000..ed16a61f --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_01_head1_sobel.py @@ -0,0 +1,77 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + with_sobel=True, + backbone=dict( + type='ResNet', + depth=50, + in_channels=2, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=1000)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=True, + mclient_path='/mnt/lustre/share/memcached_client') +data_train_list = 'data/imagenet/meta/train_labeled_1percent.txt' +data_train_root = 'data/imagenet/train' +data_test_list = 'data/imagenet/meta/val_labeled.txt' +data_test_root = 'data/imagenet/val' +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=256), + dict(type='CenterCrop', size=224), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=64, # total 256 + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=20, + imgs_per_gpu=32, + workers_per_gpu=2, + eval_param=dict(topk=(1, 5))) +] +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) +# learning policy +lr_config = dict(policy='step', step=[12, 16], gamma=0.2) +checkpoint_config = dict(interval=20) +log_config = dict( + interval=10, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ]) +# runtime settings +total_epochs = 20 diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head1.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head1.py new file mode 100644 index 00000000..9c469652 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head1.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head10.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head10.py new file mode 100644 index 00000000..97d2be11 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head100.py b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head100.py new file mode 100644 index 00000000..94e75883 --- /dev/null +++ b/configs/benchmarks/semi_classification/imagenet_1percent/r50_lr0_1_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/configs/benchmarks/semi_classification/stl10/base.py b/configs/benchmarks/semi_classification/stl10/base.py new file mode 100644 index 00000000..934f20ed --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/base.py @@ -0,0 +1,69 @@ +_base_ = '../../../base.py' +# model settings +model = dict( + type='Classification', + pretrained=None, + backbone=dict( + type='ResNet', + depth=50, + out_indices=[4], # 0: conv-1, x: stage-x + norm_cfg=dict(type='SyncBN')), + head=dict( + type='ClsHead', with_avg_pool=True, in_channels=2048, + num_classes=10)) +# dataset settings +data_source_cfg = dict( + type='ImageNet', + memcached=False, + mclient_path='/mnt/lustre/share/memcached_client') +# test: STL-10 dataset +data_train_list = 'data/stl10/meta/train_5k_labeled.txt' # stl10 labeled 5k train +data_train_root = 'data/stl10/train/' # using labeled train set +data_test_list = 'data/stl10/meta/test_8k_labeled.txt' # stl10 labeled 8k test +data_test_root = 'data/stl10/test/' # using labeled test set +# resize setting +resizeto = 96 +dataset_type = 'ClassificationDataset' +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # imagenet +train_pipeline = [ + dict(type='RandomResizedCrop', size=resizeto), + dict(type='RandomHorizontalFlip'), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +test_pipeline = [ + dict(type='Resize', size=resizeto+32), + dict(type='CenterCrop', size=resizeto), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg), +] +data = dict( + imgs_per_gpu=256, # total 256, 1GPU linear cls + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_source=dict( + list_file=data_train_list, root=data_train_root, + **data_source_cfg), + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_source=dict( + list_file=data_test_list, root=data_test_root, **data_source_cfg), + pipeline=test_pipeline)) +# additional hooks +custom_hooks = [ + dict( + type='ValidateHook', + dataset=data['val'], + initial=False, + interval=20, + imgs_per_gpu=128, + workers_per_gpu=4, + eval_param=dict(topk=(1, 5))) +] +# learning policy +lr_config = dict(policy='step', step=[12, 16], gamma=0.2) +checkpoint_config = dict(interval=20) +# runtime settings +total_epochs = 20 diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head1.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head1.py new file mode 100644 index 00000000..5bd55efc --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head1.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head10.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head10.py new file mode 100644 index 00000000..9dc9a79e --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head100.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head100.py new file mode 100644 index 00000000..a1d324d2 --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_001_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head1.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head1.py new file mode 100644 index 00000000..c3553048 --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head1.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head10.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head10.py new file mode 100644 index 00000000..7d5bedaa --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head100.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head100.py new file mode 100644 index 00000000..6b696915 --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_01_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py new file mode 100644 index 00000000..9ea9112c --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py @@ -0,0 +1,6 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=1)}) + +# CUDA_VISIBLE_DEVICES=1 bash benchmarks/dist_train_semi_1gpu.sh configs/benchmarks/semi_classification/stl10/r50_lr0_1_head1.py ${WEIGHT_FILE} diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head10.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head10.py new file mode 100644 index 00000000..9b8c6f1c --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head10.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=10)}) diff --git a/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head100.py b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head100.py new file mode 100644 index 00000000..f7cc627a --- /dev/null +++ b/configs/benchmarks/semi_classification/stl10/r50_lr0_1_head100.py @@ -0,0 +1,4 @@ +_base_ = 'base.py' +# optimizer +optimizer = dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, + paramwise_options={'\Ahead.': dict(lr_mult=100)}) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md new file mode 100644 index 00000000..b3ff4668 --- /dev/null +++ b/docs/CHANGELOG.md @@ -0,0 +1,37 @@ +## Changelog + +### v0.3.0 (14/10/2020) + +#### Highlight +* Support Mixed Precision Training +* Improvement of GaussianBlur doubles the training speed +* More benchmarking results + +#### Bug Fixes +* Fix bugs in moco v2, now the results are reproducible. +* Fix bugs in byol. + +#### New Features +* Mixed Precision Training +* Improvement of GaussianBlur doubles the training speed of MoCo V2, SimCLR, BYOL +* More benchmarking results, including Places, VOC, COCO + +### v0.2.0 (26/6/2020) + +#### Highlights +* Support BYOL +* Support semi-supervised benchmarks + +#### Bug Fixes +* Fix hash id in publish_model.py + +#### New Features + +* Support BYOL. +* Separate train and test scripts in linear/semi evaluation. +* Support semi-supevised benchmarks: benchmarks/dist_train_semi.sh. +* Move benchmarks related configs into configs/benchmarks/. +* Provide benchmarking results and model download links. +* Support updating network every several interations. +* Support LARS optimizer with nesterov. +* Support excluding specific parameters from LARS adaptation and weight decay required in SimCLR and BYOL. diff --git a/docs/GETTING_STARTED.md b/docs/GETTING_STARTED.md new file mode 100644 index 00000000..bb3a56fe --- /dev/null +++ b/docs/GETTING_STARTED.md @@ -0,0 +1,287 @@ +# Getting Started + +This page provides basic tutorials about the usage of OpenSelfSup. +For installation instructions, please see [INSTALL.md](INSTALL.md). + +## Train existing methods + +**Note**: The default learning rate in config files is for 8 GPUs. If using differnt number GPUs, the total batch size will change in proportion, you have to scale the learning rate following `new_lr = old_lr * new_ngpus / old_ngpus`. We recommend to use `tools/dist_train.sh` even with 1 gpu, since some methods do not support non-distributed training. + +### Train with single/multiple GPUs + +```shell +bash tools/dist_train.sh ${CONFIG_FILE} ${GPUS} [optional arguments] +``` +Optional arguments are: +- `--resume_from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. +- `--pretrained ${PRETRAIN_WEIGHTS}`: Load pretrained weights for the backbone. +- `--deterministic`: Switch on "deterministic" mode which slows down training but the results are reproducible. + +An example: +```shell +# checkpoints and logs saved in WORK_DIR=work_dirs/selfsup/odc/r50_v1/ +bash tools/dist_train.sh configs/selfsup/odc/r50_v1.py 8 +``` +**Note**: During training, checkpoints and logs are saved in the same folder structure as the config file under `work_dirs/`. Custom work directory is not recommended since evaluation scripts infer work directories from the config file name. If you want to save your weights somewhere else, please use symlink, for example: + +```shell +ln -s /DATA/xhzhan/openselfsup_workdirs ${OPENSELFSUP}/work_dirs +``` + +Alternatively, if you run OpenSelfSup on a cluster managed with [slurm](https://slurm.schedmd.com/): +```shell +SRUN_ARGS="${SRUN_ARGS}" bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} ${GPUS} [optional arguments] +``` + +An example: +```shell +SRUN_ARGS="-w xx.xx.xx.xx" bash tools/srun_train.sh Dummy configs/selfsup/odc/r50_v1.py 8 --resume_from work_dirs/selfsup/odc/r50_v1/epoch_100.pth +``` + +### Train with multiple machines + +If you launch with multiple machines simply connected with ethernet, you have to modify `tools/dist_train.sh` or create a new script, please refer to PyTorch [Launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility). Usually it is slow if you do not have high speed networking like InfiniBand. + +If you launch with slurm, the command is the same as that on single machine described above. You only need to change ${GPUS}, e.g., to 16 for two 8-GPU machines. + +### Launch multiple jobs on a single machine + +If you launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, +you need to specify different ports (29500 by default) for each job to avoid communication conflict. + +If you use `dist_train.sh` to launch training jobs: +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash tools/dist_train.sh ${CONFIG_FILE} 4 +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash tools/dist_train.sh ${CONFIG_FILE} 4 +``` + +If you use launch training jobs with slurm: +```shell +GPUS_PER_NODE=4 bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} 4 --port 29500 +GPUS_PER_NODE=4 bash tools/srun_train.sh ${PARTITION} ${CONFIG_FILE} 4 --port 29501 +``` + +### What if I do not have so many GPUs? + +Assuming that you only have 1 GPU that can contain 64 images in a batch, while you expect the batch size to be 256, you may add the following line into your config file. It performs network update every 4 iterations. In this way, the equivalent batch size is 256. Of course, it is about 4x slower than using 4 GPUs. Note that the workaround is not applicable for methods like SimCLR which require intra-batch communication. + +```python +optimizer_config = dict(update_interval=4) +``` + +### Mixed Precision Training (Optional) +We use [Apex](https://github.com/NVIDIA/apex) to implement Mixed Precision Training. +If you want to use Mixed Precision Training, you can add below in the config file. +```python +use_fp16 = True +optimizer_config = dict(use_fp16=use_fp16) +``` +An example: +```python +bash tools/dist_train.sh configs/selfsup/moco/r50_v1_fp16.py 8 +``` + +### Speeding Up IO (Optional) +1 . Prefetching data helps to speeding up IO and make better use of CUDA stream parallelization. +If you want to use it, you can activate it in the config file (disabled by default). +```python +prefetch = True +``` +2 . Costly operation ToTensor is reimplemented along with prefetch. + +3 . Replacing Pillow with Pillow-SIMD (https://github.com/uploadcare/pillow-simd.git) to make use of SIMD command sets with modern CPU. + ```shell +pip uninstall pillow +pip install Pillow-SIMD or CC="cc -mavx2" pip install -U --force-reinstall pillow-simd if AVX2 is available. +``` +We test it using MoCoV2 using a total batch size of 256 on Tesla V100. The training time per step is decreased to 0.17s from 0.23s. +## Benchmarks + +We provide several standard benchmarks to evaluate representation learning. The config files or scripts for evaluation mentioned below are NOT recommended to be changed if you want to use this repo in your publications. We hope that all methods are under a fair comparison. + +### VOC07 Linear SVM & Low-shot Linear SVM + +```shell +# test by epoch (only applicable to experiments trained with OpenSelfSup) +bash benchmarks/dist_test_svm_epoch.sh ${CONFIG_FILE} ${EPOCH} ${FEAT_LIST} ${GPUS} +# test a pretrained model (applicable to any pre-trained models) +bash benchmarks/dist_test_svm_pretrain.sh ${CONFIG_FILE} ${PRETRAIN} ${FEAT_LIST} ${GPUS} +``` +Augments: +- `${CONFIG_FILE}` the config file of the self-supervised experiment. +- `${FEAT_LIST}` is a string to specify features from layer1 to layer5 to evaluate; e.g., if you want to evaluate layer5 only, then `FEAT_LIST` is `"feat5"`, if you want to evaluate all features, then then `FEAT_LIST` is `"feat1 feat2 feat3 feat4 feat5"` (separated by space). If left empty, the default `FEAT_LIST` is `"feat5"`. +- `$GPUS` is the number of GPUs to extract features. + +Working directories: +The features, logs and intermediate files generated are saved in `$SVM_WORK_DIR/` as follows: +- `dist_test_svm_epoch.sh`: `SVM_WORK_DIR=$WORK_DIR/` (The same as that mentioned in `Train with single/multiple GPUs` above.) Hence, the files will be overridden to save space when evaluating with a new `$EPOCH`. +- `dist_test_svm_pretrain.sh`: `SVM_WORK_DIR=$WORK_DIR/$PRETRAIN_NAME/`, e.g., if `PRETRAIN=pretrains/odc_r50_v1-5af5dd0c.pth`, then `PRETRAIN_NAME=odc_r50_v1-5af5dd0c.pth`; if `PRETRAIN=random`, then `PRETRAIN_NAME=random`. + +Notes: +- The evaluation records are saved in `$SVM_WORK_DIR/logs/eval_svm.log`. +- When using `benchmarks/dist_test_svm_epoch.sh`, DO NOT launch multiple tests of the same experiment with different epochs, since they share the same working directory. +- Linear SVM takes 5 min, low-shot linear SVM takes about 1 hour with 32 CPU cores. If you want to save time, you may delete or comment the low-shot SVM testing command (the last line in the scripts). + +### ImageNet / Places205 Linear Classification + +**First**, extract backbone weights: +```shell +python tools/extract_backbone_weights.py ${CHECKPOINT} ${WEIGHT_FILE} +``` +Arguments: +- `CHECKPOINTS`: the checkpoint file of a selfsup method named as `epoch_*.pth`. +- `WEIGHT_FILE`: the output backbone weights file, e.g., `pretrains/moco_r50_v1-4ad89b5c.pth`. + +**Next**, train and test linear classification: +```shell +# train +bash benchmarks/dist_train_linear.sh ${CONFIG_FILE} ${WEIGHT_FILE} [optional arguments] +# test (unnecessary if have validation in training) +bash tools/dist_test.sh ${CONFIG_FILE} ${GPUS} ${CHECKPOINT} +``` +Augments: +- `CONFIG_FILE`: Use config files under "configs/benchmarks/linear_classification/". Note that if you want to test DeepCluster that has a sobel layer before the backbone, you have to use the config file named `*_sobel.py`, e.g., `configs/benchmarks/linear_classification/imagenet/r50_multihead_sobel.py`. +- Optional arguments include: + - `--resume_from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. + - `--deterministic`: Switch on "deterministic" mode which slows down training but the results are reproducible. + +Working directories: +Where are the checkpoints and logs? E.g., if you use `configs/benchmarks/linear_classification/imagenet/r50_multihead.py` to evaluate `pretrains/moco_r50_v1-4ad89b5c.pth`, then the working directories for this evalution is `work_dirs/benchmarks/linear_classification/imagenet/r50_multihead/moco_r50_v1-4ad89b5c.pth/`. + +### ImageNet Semi-Supervised Classification + +```shell +# train +bash benchmarks/dist_train_semi.sh ${CONFIG_FILE} ${WEIGHT_FILE} [optional arguments] +# test (unnecessary if have validation in training) +bash tools/dist_test.sh ${CONFIG_FILE} ${GPUS} ${CHECKPOINT} +``` +Augments: +- `CONFIG_FILE`: Use config files under "configs/benchmarks/semi_classification/". Note that if you want to test DeepCluster that has a sobel layer before the backbone, you have to use the config file named `*_sobel.py`, e.g., `configs/benchmarks/semi_classification/imagenet_1percent/r50_sobel.py`. +- Optional arguments include: + - `--resume_from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. + - `--deterministic`: Switch on "deterministic" mode which slows down training but the results are reproducible. + +### VOC07+12 / COCO17 Object Detection + +For more details to setup the environments for detection, please refer [here](https://github.com/open-mmlab/OpenSelfSup/blob/master/benchmarks/detection/README.md). + +```shell +conda activate detectron2 # use detectron2 environment here, otherwise use open-mmlab environment +cd benchmarks/detection +python convert-pretrain-to-detectron2.py ${WEIGHT_FILE} ${OUTPUT_FILE} # must use .pkl as the output extension. +bash run.sh ${DET_CFG} ${OUTPUT_FILE} +``` +Arguments: +- `WEIGHT_FILE`: The extracted backbone weights extracted aforementioned. +- `OUTPUT_FILE`: Converted backbone weights file, e.g., `odc_v1.pkl`. +- `DET_CFG`: The detectron2 config file, usually we use `configs/pascal_voc_R_50_C4_24k_moco.yaml`. + +**Note**: +- This benchmark must use 8 GPUs as the default setting from MoCo. +- Please report the mean of 5 trials in your offical paper, according to MoCo. +- DeepCluster that uses Sobel layer is not supported by detectron2. + +## Tools and Tips + +### Count number of parameters + +```shell +python tools/count_parameters.py ${CONFIG_FILE} +``` + +### Publish a model + +Compute the hash of the weight file and append the hash id to the filename. The output file is the input file name with a hash suffix. + +```shell +python tools/publish_model.py ${WEIGHT_FILE} +``` +Arguments: +- `WEIGHT_FILE`: The extracted backbone weights extracted aforementioned. + +### Reproducibility + +If you want to make your performance exactly reproducible, please switch on `--deterministic` to train the final model to be published. Note that this flag will switch off `torch.backends.cudnn.benchmark` and slow down the training speed. + +## How-to + +### Use a new dataset + +1. Write a data source file under `openselfsup/datasets/data_sources/`. You may refer to the existing ones. + +2. Create new config files for your experiments. + +### Design your own methods + +#### What you need to do + + 1. Create a dataset file under `openselfsup/datasets/` (better using existing ones); + 2. Create a model file under `openselfsup/models/`. The model typically contains: + i) backbone (required): images to deep features from differet depth of layers. Your model must contain a `self.backbone` module, otherwise the backbone weights cannot be extracted. + ii) neck (optional): deep features to compact feature vectors. + iii) head (optional): define loss functions. + iv) memory_bank (optional): define memory banks. + 3. Create a config file under `configs/` and setup the configs; + 4. [Optional] Create a hook file under `openselfsup/hooks/` if your method requires additional operations before run, every several iterations, every several epoch, or after run. + +You may refer to existing modules under respective folders. + +#### Features that may facilitate your implementation + +* Decoupled data source and dataset. + +Since dataset is correlated to a specific task while data source is general, we decouple data source and dataset in OpenSelfSup. + +```python +data = dict( + train=dict(type='ContrastiveDataset', + data_source=dict(type='ImageNet', list_file='xx', root='xx'), + pipeline=train_pipeline), + val=dict(...), + ... +) +``` + +* Configure data augmentations in the config file. + +The augmentations are the same as `torchvision.transforms` except that `torchvision.transforms.RandomAppy` corresponds to `RandomAppliedTrans`. `Lighting` and `GaussianBlur` is additionally implemented. + +```python +img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +train_pipeline = [ + dict(type='RandomResizedCrop', size=224), + dict(type='RandomAppliedTrans', + transforms=[ + dict(type='GaussianBlur', sigma_min=0.1, sigma_max=2.0, kernel_size=23)], + p=0.5), + dict(type='ToTensor'), + dict(type='Normalize', **img_norm_cfg) +] +``` + +* Parameter-wise optimization parameters. + +You may specify optimization paramters including lr, momentum and weight_decay for a certain group of paramters in the config file with `paramwise_options`. `paramwise_options` is a dict whose key is regular expressions and value is options. Options include 6 fields: lr, lr_mult, momentum, momentum_mult, weight_decay, weight_decay_mult, lars_exclude (only works with LARS optimizer). + +```python +# this config sets all normalization layers with weight_decay_mult=0.1, +# and the head with `lr_mult=10, momentum=0`. +paramwise_options = { + '(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay_mult=0.1), + '\Ahead.': dict(lr_mult=10, momentum=0)} +optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, + weight_decay=0.0001, + paramwise_options=paramwise_options) +``` + +* Configure custom hooks in the config file. + +The hooks will be called in order. For hook design, please refer to [odc_hook.py](https://github.com/open-mmlab/OpenSelfSup/blob/master/openselfsup/hooks/odc_hook.py) as an example. + +```python +custom_hooks = [ + dict(type='DeepClusterHook', ...), + dict(type='ODCHook', ...), +] +``` diff --git a/docs/INSTALL.md b/docs/INSTALL.md new file mode 100644 index 00000000..7420e0c9 --- /dev/null +++ b/docs/INSTALL.md @@ -0,0 +1,160 @@ +## Installation + +### Requirements + +- Linux (Windows is not officially supported) +- Python 3.5+ +- PyTorch 1.1 or higher +- CUDA 9.0 or higher +- NCCL 2 +- GCC 4.9 or higher +- [mmcv](https://github.com/open-mmlab/mmcv) + +We have tested the following versions of OS and softwares: + +- OS: Ubuntu 16.04/18.04 and CentOS 7.2 +- CUDA: 9.0/9.2/10.0/10.1/11.0 +- NCCL: 2.1.15/2.2.13/2.3.7/2.4.2 (PyTorch-1.1 w/ NCCL-2.4.2 has a deadlock bug, see [here](https://github.com/open-mmlab/OpenSelfSup/issues/6)) +- GCC(G++): 4.9/5.3/5.4/7.3 + +### Install openselfsup + +a. Create a conda virtual environment and activate it. + +```shell +conda create -n open-mmlab python=3.7 -y +conda activate open-mmlab +``` + +b. Install PyTorch and torchvision following the [official instructions](https://pytorch.org/), e.g., + +```shell +conda install pytorch torchvision -c pytorch +# or assuming CUDA=10.1, "pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html" +``` + +c. Install other third-party libraries (not necessary). + +```shell +conda install faiss-gpu cudatoolkit=10.1 -c pytorch # optional for DeepCluster and ODC, assuming CUDA=10.1 +pip install umap-learn # optional for umap visualization. +pip install opencv-contrib-python # optional for SaliencyMix (cv2.saliency.StaticSaliencyFineGrained_create()) +``` + +d. Clone the openselfsup repository. + +```shell +git clone https://github.com/open-mmlab/openselfsup.git +cd openselfsup +``` + +e. Install. + +```shell +pip install -v -e . # or "python setup.py develop" +``` + +f. Install Apex (optional), following the [official instructions](https://github.com/NVIDIA/apex), e.g. +```shell +git clone https://github.com/NVIDIA/apex +cd apex +pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ +``` + +Note: + +1. The git commit id will be written to the version number with step d, e.g. 0.6.0+2e7045c. The version will also be saved in trained models. + +2. Following the above instructions, openselfsup is installed on `dev` mode, any local modifications made to the code will take effect without the need to reinstall it (unless you submit some commits and want to update the version number). + +3. If you would like to use `opencv-python-headless` instead of `opencv-python`, +you can install it before installing MMCV. + + +### Prepare datasets + +It is recommended to symlink your dataset root (assuming $YOUR_DATA_ROOT) to `$OPENSELFSUP/data`. +If your folder structure is different, you may need to change the corresponding paths in config files. + +#### Prepare PASCAL VOC + +Assuming that you usually store datasets in `$YOUR_DATA_ROOT` (e.g., for me, `/home/xhzhan/data/`). +This script will automatically download PASCAL VOC 2007 into `$YOUR_DATA_ROOT`, prepare the required files, create a folder `data` under `$OPENSELFSUP` and make a symlink `VOCdevkit`. + +```shell +cd $OPENSELFSUP +bash tools/prepare_data/prepare_voc07_cls.sh $YOUR_DATA_ROOT +``` + +#### Prepare ImageNet and Places205 + +Taking ImageNet for example, you need to 1) download ImageNet; 2) create the following list files or download [here](https://drive.google.com/drive/folders/1wYkJU_1qRHEt1LPVjBiG6ddUFV-t9hVJ?usp=sharing) under $IMAGENET/meta/: `train.txt` and `val.txt` contains an image file name in each line, `train_labeled.txt` and `val_labeled.txt` contains `filename[space]label\n` in each line; `train_labeled_*percent.txt` are the down-sampled lists for semi-supervised evaluation. 3) create a symlink under `$OPENSELFSUP/data/`. + +At last, the folder looks like: + +``` +OpenSelfSup +├── openselfsup +├── benchmarks +├── configs +├── data +│ ├── VOCdevkit +│ │ ├── VOC2007 +│ │ ├── VOC2012 +│ ├── imagenet +│ │ ├── meta +│ │ | ├── train.txt (for self-sup training, "filename\n" in each line) +│ │ | ├── train_labeled.txt (for linear evaluation, "filename[space]label\n" in each line) +│ │ | ├── train_labeled_1percent.txt (for semi-supervised evaluation) +│ │ | ├── train_labeled_10percent.txt (for semi-supervised evaluation) +│ │ | ├── val.txt +│ │ | ├── val_labeled.txt (for evaluation) +│ │ ├── train +│ │ ├── val +│ ├── places205 +│ │ ├── meta +│ │ | ├── train.txt +│ │ | ├── train_labeled.txt +│ │ | ├── val.txt +│ │ | ├── val_labeled.txt +│ │ ├── train +│ │ ├── val +``` + +### A from-scratch setup script + +Here is a full script for setting up openselfsup with conda and link the dataset path. The script does not download ImageNet and Places datasets, you have to prepare them on your own. + +```shell +conda create -n open-mmlab python=3.7 -y +conda activate open-mmlab + +conda install -c pytorch pytorch torchvision -y +git clone https://github.com/open-mmlab/OpenSelfSup.git +cd OpenSelfSup +pip install -v -e . + +bash tools/prepare_data/prepare_voc07_cls.sh $YOUR_DATA_ROOT +ln -s $IMAGENET_ROOT data/imagenet +ln -s $PLACES_ROOT data/places205 +``` + +### Using multiple OpenSelfSup versions + +If there are more than one openselfsup on your machine, and you want to use them alternatively, the recommended way is to create multiple conda environments and use different environments for different versions. + +Another way is to insert the following code to the main scripts (`train.py`, `test.py` or any other scripts you run) +```python +import os.path as osp +import sys +sys.path.insert(0, osp.join(osp.dirname(osp.abspath(__file__)), '../')) +``` + +Or run the following command in the terminal of corresponding folder to temporally use the current one. +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +## Common Issues + +1. The training hangs / deadlocks in some intermediate iteration. See this [issue](https://github.com/open-mmlab/OpenSelfSup/issues/6). diff --git a/docs/MODEL_ZOO.md b/docs/MODEL_ZOO.md new file mode 100644 index 00000000..2d75be82 --- /dev/null +++ b/docs/MODEL_ZOO.md @@ -0,0 +1,184 @@ +# Model Zoo + +**OpenSelfSup needs your contribution! +Since we don't have sufficient GPUs to run these large-scale experiments, your contributions, including parameter studies, reproducing of results, implementing new methods, etc, are essential to make OpenSelfSup better. Your contribution will be recorded in the below table, top contributors will be included in the author list of OpenSelfSup!** + +## Pre-trained model download links and speed test. +**Note** +* If not specifically indicated, the testing GPUs are NVIDIA Tesla V100. +* The table records the implementors who implemented the methods (either by themselves or refactoring from other repos), and the experimenters who performed experiments and reproduced the results. The experimenters should be responsible for the evaluation results on all the benchmarks, and the implementors should be responsible for the implementation as well as the results; If the experimenter is not indicated, an implementator is the experimenter by default. + + + + + + + + + + + + + + + + + +
Method (Implementator)Config (Experimenter)RemarksDownload linkBatch sizeEpochs
ImageNet-torchvisionimagenet_r50-21352794.pth--
Random-kaimingrandom_r50-5d0fa71b.pth--
Relative-Loc (@Jiahao000)selfsup/relative_loc/r50.pydefaultrelative_loc_r50-342c9097.pth51270
Rotation-Pred (@XiaohangZhan)selfsup/rotation_pred/r50.pydefaultrotation_r50-cfab8ebb.pth12870
DeepCluster (@XiaohangZhan)selfsup/deepcluster/r50.pydefaultdeepcluster_r50-bb8681e2.pth512200
NPID (@XiaohangZhan)selfsup/npid/r50.pydefaultnpid_r50-dec3df0c.pth256200
selfsup/npid/r50_ensure_neg.pyensure_neg=Truenpid_r50_ensure_neg-ce09b7ae.pth
ODC (@XiaohangZhan)selfsup/odc/r50_v1.py (@Jiahao000)defaultodc_r50_v1-5af5dd0c.pth512440
MoCo (@XiaohangZhan)selfsup/moco/r50_v1.pydefaultmoco_r50_v1-4ad89b5c.pth256200
MoCo v2 (@XiaohangZhan)selfsup/moco/r50_v2.pydefaultmoco_r50_v2-e3b0c442.pth256200
SimCLR (@XiaohangZhan)selfsup/simclr/r50_bs256_ep200.pydefaultsimclr_r50_bs256_ep200-4577e9a6.pth256200
selfsup/simclr/r50_bs256_ep200_mocov2_neck.py-> MoCo v2 necksimclr_r50_bs256_ep200_mocov2_neck-0d6e5ff2.pth
BYOL (@XiaohangZhan)selfsup/byol/r50_bs4096_ep200.py (@xieenze)defaultbyol_r50-e3b0c442.pth4096200
selfsup/byol/r50_bs256_accumulate16_ep300.py (@scnuhealthy)defaultbyol_r50_bs256_accmulate16_ep300-5df46722.pth256300
selfsup/byol/r50_bs2048_accumulate2_ep200_fp16.py (@xieenze)defaultbyol_r50_bs2048_accmulate2_ep200-e3b0c442.pth256200
+ + +## Benchmarks + +### VOC07 SVM & SVM Low-shot + + + + + + + + + + + + + + + + + + + +
MethodConfigRemarksBest layerVOC07 SVMVOC07 SVM Low-shot
124816326496
ImageNet-torchvisionfeat587.1752.9963.5573.778.7981.7683.7585.1885.97
Random-kaimingfeat230.549.159.3911.0912.314.317.4121.3223.77
Relative-Locselfsup/relative_loc/r50.pydefaultfeat464.7818.1722.0829.3735.5841.848.7355.5558.33
Rotation-Predselfsup/rotation_pred/r50.pydefaultfeat467.3818.9123.3330.5738.2245.8352.2358.0861.11
DeepClusterselfsup/deepcluster/r50.pydefaultfeat574.2629.7337.6645.8555.5762.4866.1570.071.37
NPIDselfsup/npid/r50.pydefaultfeat574.5024.1931.2439.6950.9959.0364.468.6970.84
selfsup/npid/r50_ensure_neg.pyensure_neg=Truefeat575.70
ODCselfsup/odc/r50_v1.pydefaultfeat578.4232.4240.2749.9559.9665.7169.9973.6475.13
MoCoselfsup/moco/r50_v1.pydefaultfeat579.1830.0337.7347.6458.7866.070.674.676.07
MoCo v2selfsup/moco/r50_v2.pydefaultfeat584.2643.052.4863.4371.7476.3578.981.3182.45
SimCLRselfsup/simclr/r50_bs256_ep200.pydefaultfeat578.9532.4540.7650.459.0165.4570.1373.5875.35
selfsup/simclr/r50_bs256_ep200_mocov2_neck.py-> MoCo v2 neckfeat577.65
BYOLselfsup/byol/r50_bs4096_ep200.pydefaultfeat585.1044.4852.0962.8870.8776.1879.4581.8883.08
selfsup/byol/r50_bs256_accumulate16_ep300.pydefaultfeat586.58
selfsup/byol/r50_bs2048_accumulate2_ep200.pydefaultfeat585.86
+ + +### ImageNet Linear Classification + +**Note** +* Config: `configs/benchmarks/linear_classification/imagenet/r50_multihead.py` for ImageNet (Multi) and `configs/benchmarks/linear_classification/imagenet/r50_last.py` for ImageNet (Last). +* For DeepCluster, use the corresponding one with `_sobel`. +* ImageNet (Multi) evaluates features in around 9k dimensions from different layers. Top-1 result of the last epoch is reported. +* ImageNet (Last) evaluates the last feature after global average pooling, e.g., 2048 dimensions for resnet50. The best top-1 result among all epochs is reported. +* Usually, we report the best result from ImageNet (Multi) and ImageNet (Last) to ensure fairness, since different methods achieve their best performance on different layers. + + + + + + + + + + + + + + + + + +
MethodConfigRemarksImageNet (Multi)ImageNet (Last)
feat1feat2feat3feat4feat5avgpool
ImageNet-torchvision15.1833.9647.8667.5676.1774.12
Random-kaiming11.3716.2113.479.076.544.35
Relative-Locselfsup/relative_loc/r50.pydefault14.7631.2945.7749.3140.2038.83
Rotation-Predselfsup/rotation_pred/r50.pydefault12.8934.3044.9154.9949.0947.01
DeepClusterselfsup/deepcluster/r50.pydefault12.7830.8143.8857.7151.6846.92
NPIDselfsup/npid/r50.pydefault14.2831.2040.6854.4656.6156.60
ODCselfsup/odc/r50_v1.pydefault14.7631.8242.4455.7657.7053.42
MoCoselfsup/moco/r50_v1.pydefault15.3233.0844.6857.2760.6061.02
MoCo v2selfsup/moco/r50_v2.pydefault14.7432.8144.9561.6166.7367.69
SimCLRselfsup/simclr/r50_bs256_ep200.pydefault17.0931.3741.3854.3561.5760.06
selfsup/simclr/r50_bs256_ep200_mocov2_neck.py-> MoCo v2 neck16.9731.8841.7354.3359.9458.00
BYOLselfsup/byol/r50_bs4096_ep200.pydefault16.7034.2246.6160.7869.1467.10
selfsup/byol/r50_bs256_accumulate16_ep300.pydefault14.0734.4447.2263.0872.35
selfsup/byol/rr50_bs2048_accumulate2_ep200_fp16.pydefault15.5234.5047.2262.7871.61
+ +### Places205 Linear Classification + +**Note** +* Config: `configs/benchmarks/linear_classification/places205/r50_multihead.py`. +* For DeepCluster, use the corresponding one with `_sobel`. +* Places205 evaluates features in around 9k dimensions from different layers. Top-1 result of the last epoch is reported. + + + + + + + + + + + + + + + +
MethodConfigRemarksPlaces205
feat1feat2feat3feat4feat5
ImageNet-torchvision21.2736.1043.0351.3853.05
Random-kaiming17.1921.7019.2314.5911.73
Relative-Locselfsup/relative_loc/r50.pydefault21.0734.8642.8445.7141.45
Rotation-Predselfsup/rotation_pred/r50.pydefault18.6535.7142.2845.9843.72
DeepClusterselfsup/deepcluster/r50.pydefault18.8033.9341.4447.2242.61
NPIDselfsup/npid/r50.pydefault20.5334.0340.4847.1347.73
ODCselfsup/odc/r50_v1.pydefault20.9434.7841.1947.4549.18
MoCoselfsup/moco/r50_v1.pydefault21.1335.1942.4048.7850.70
MoCo v2selfsup/moco/r50_v2.pydefault21.8835.7543.6549.9952.57
SimCLRselfsup/simclr/r50_bs256_ep200.pydefault22.5534.1440.3547.1551.64
selfsup/simclr/r50_bs256_ep200_mocov2_neck.py-> MoCo v2 neck
BYOLselfsup/byol/r50_bs4096_ep200.pydefault22.2835.9543.0349.7952.75
+ +### ImageNet Semi-Supervised Classification + +**Note** +* In this benchmark, the necks or heads are removed and only the backbone CNN is evaluated by appending a linear classification head. All parameters are fine-tuned. +* Config: under `configs/benchmarks/semi_classification/imagenet_1percent/` for 1% data, and `configs/benchmarks/semi_classification/imagenet_10percent/` for 10% data. +* When training with 1% ImageNet, we find hyper-parameters especially the learning rate greatly influence the performance. Hence, we prepare a list of settings with the base learning rate from \{0.001, 0.01, 0.1\} and the learning rate multiplier for the head from \{1, 10, 100\}. We choose the best performing setting for each method. +* Please use `--deterministic` in this benchmark. + + + + + + + + + + + + + + + +
MethodConfigRemarksOptimal setting for ImageNet 1%ImageNet 1%
top-1top-5
ImageNet-torchvisionr50_lr0_001_head100.py68.6888.87
Random-kaimingr50_lr0_01_head1.py1.564.99
Relative-Locselfsup/relative_loc/r50.pydefaultr50_lr0_01_head100.py16.4840.37
Rotation-Predselfsup/rotation_pred/r50.pydefaultr50_lr0_01_head100.py18.9844.05
DeepClusterselfsup/deepcluster/r50.pydefaultr50_lr0_01_head1_sobel.py33.4458.62
NPIDselfsup/npid/r50.pydefaultr50_lr0_01_head100.py27.9554.37
ODCselfsup/odc/r50_v1.pydefaultr50_lr0_1_head100.py32.3961.02
MoCoselfsup/moco/r50_v1.pydefaultr50_lr0_01_head100.py33.1561.30
MoCo v2selfsup/moco/r50_v2.pydefaultr50_lr0_01_head100.py39.0768.31
SimCLRselfsup/simclr/r50_bs256_ep200.pydefaultr50_lr0_01_head100.py36.0964.50
selfsup/simclr/r50_bs256_ep200_mocov2_neck.py-> MoCo v2 neckr50_lr0_01_head100.py36.3164.68
BYOLselfsup/byol/r50_bs4096_ep200.pydefaultr50_lr0_01_head10.py49.3776.75
+ + + + + + + + + + + + + + + +
MethodConfigRemarksOptimal setting for ImageNet 10%ImageNet 10%
top-1top-5
ImageNet-torchvisionr50_lr0_001_head10.py74.5392.19
Random-kaimingr50_lr0_01_head1.py21.7844.24
Relative-Locselfsup/relative_loc/r50.pydefaultr50_lr0_01_head100.py53.8679.62
Rotation-Predselfsup/rotation_pred/r50.pydefaultr50_lr0_01_head100.py54.7580.21
DeepClusterselfsup/deepcluster/r50.pydefaultr50_lr0_01_head1_sobel.py52.9477.96
NPIDselfsup/npid/r50.pydefaultr50_lr0_01_head100.py57.2281.39
ODCselfsup/odc/r50_v1.pydefaultr50_lr0_1_head10.py58.1582.55
MoCoselfsup/moco/r50_v1.pydefaultr50_lr0_01_head100.py60.0884.02
MoCo v2selfsup/moco/r50_v2.pydefaultr50_lr0_01_head100.py61.8085.11
SimCLRselfsup/simclr/r50_bs256_ep200.pydefaultr50_lr0_01_head100.py58.4682.60
selfsup/simclr/r50_bs256_ep200_mocov2_neck.py-> MoCo v2 neckr50_lr0_01_head100.py58.3882.53
BYOLselfsup/byol/r50_bs4096_ep200.pydefaultr50_lr0_01_head100.py65.9487.81
+ +### PASCAL VOC07+12 Object Detection + +**Note** +* This benchmark follows the evluation protocols set up by MoCo. +* Config: `benchmarks/detection/configs/pascal_voc_R_50_C4_24k_moco.yaml`. +* Please follow [here](GETTING_STARTED.md#voc0712--coco17-object-detection) to run the evaluation. + + + + + + + + + + + + +
MethodConfigRemarksVOC07+12
AP50APAP75
ImageNet-torchvision81.5854.1959.80
Random-kaiming59.0232.8331.60
Relative-Locselfsup/relative_loc/r50.pydefault80.3655.1361.18
Rotation-Predselfsup/rotation_pred/r50.pydefault80.9155.5261.39
NPIDselfsup/npid/r50.pydefault80.0354.1159.50
MoCoselfsup/moco/r50_v1.pydefault81.3855.9562.23
MoCo v2selfsup/moco/r50_v2.pydefault82.2456.9763.43
SimCLRselfsup/simclr/r50_bs256_ep200.pydefault79.4151.5455.63
BYOLselfsup/byol/r50_bs4096_ep200.pydefault80.9551.8756.53
+ +### COCO2017 Object Detection + +**Note** +* This benchmark follows the evluation protocols set up by MoCo. +* Config: `benchmarks/detection/configs/coco_R_50_C4_2x_moco.yaml`. +* Please follow [here](GETTING_STARTED.md#voc0712--coco17-object-detection) to run the evaluation. + + + + + + + + + + + + +
MethodConfigRemarksCOCO2017
AP50(Box)AP(Box)AP75(Box)AP50(Mask)AP(Mask)AP75(Mask)
ImageNet-torchvision59.940.043.156.534.736.9
Random-kaiming54.635.638.251.531.433.5
Relative-Locselfsup/relative_loc/r50.pydefault59.640.043.556.535.037.3
Rotation-Predselfsup/rotation_pred/r50.pydefault59.340.043.656.034.937.4
NPIDselfsup/npid/r50.pydefault59.039.442.855.934.536.6
MoCoselfsup/moco/r50_v1.pydefault60.540.944.257.135.537.7
MoCo v2selfsup/moco/r50_v2.pydefault60.641.044.557.235.638.0
SimCLRselfsup/simclr/r50_bs256_ep200.pydefault59.139.642.955.934.637.1
BYOLselfsup/byol/r50_bs4096_ep200.pydefault60.540.343.956.835.137.3
+ diff --git a/docs/relation.jpg b/docs/relation.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00e039b2c10d6b5489cb38d257ef756b17a9560a GIT binary patch literal 330127 zcmeGDXIN8R)Gi8#CQU%3NRuc=DG`+}ARxU&MVgT=JtP8BqzVW~jY_YP-U%J0 zN|#Qk(i3VRg_Gxb-@W&Dz1MfHv(Ndn|GWd%VqRgcnKjlLbIdXBanFlC7fXOEIvUy< z0167gKR*D#1sd>0-Ot$p0MOF|+$MiO2e?FW2|z_|Q2;2(r>pUb0nq&WIRIcv z!TUekwiKfOtAB(ufciiCkjMN-{`r=FPXGCr>MOr$#Y%C}1ptz7frjE=`S&vNiGq@fn&uKM9X$ghxkJqr03`(#6(uzl z4b4AaLlH#&KY*H*=IYJ6YM0myU(yPAu*(D|e4`V5RMF01G=dSj_sTPbo`Lfk7dMaa zEfG<%+p==<3X1n1Jbt3Cp{b>w~BUp!Fxs+j#r)|44gu;XkqL>s{O0k|DIwY|DQDbpNjo2 zy=DQA0hIp=6(uDV4HXp?%_SPLT%x=5kI*sD{VNRrDVP5h=6{6cf64`U5DM}b)YR0p zjU(3Zj*(_r&W&oF|$Pc8jJfA|)vi;@2`HiPVg&yFo$0J`=g5Lb_w#M?;1Eh8>c@50ix%JM|ZZq}#yvTlb_ zVUQD1gclcpkR5CQP|qNQKiAGe+v>f@bb=|?w5%19NK57x~rO-nZ7L^g@arGgf==*Ckz!AfDw9| zGN`GfW$EbN1>oK;CbsdJTO}c$m6+>IbnLhQP$}Cjv#{7|7i`I!YG{c>?k37FDe zzbn4thP!1z@Pj_S0F*a^Nugfgz+`k$D{uFNhjpJvRc(p+u_~2Z*SNLrhuqvVYhmvo z{-nRK0z^Hau7=8h8Y|+#l^PcD2HwkhOv)8Eg8^7@}S<7YBdw!+?84Ip0=rjmD z4ei9t4lYYyp&Qk9kxVmE3fkh5IOGfSWcDep0CvaSJHY94_f9>Z=g})#Om%U|IjYNw zFY3msQic|mm}qy*&29#nl}$kqc81{8b6Ns>XXmNqXLrAhwmKf~=pRffH3#!rQ3hbi zZy)%)(c4FE?W8-Hsrcsm_REWOi}`uETXeSED?e$eODO4REX=R;J-iV@H^|+4dF2Pa zvW*D)Nr2pG{_ocWgZBfx<~qfCt~CaG&iC@ZH#aT!_JAm$oR}?Ke6MfMf>>s+8@2Ab zwYAnIoo;>&_XmlO;$$>5V07abfTjHOCuU)?ZU6%>Coc4!97o8bmbk8vXDqdc$NQG{ z%3@#azU|(r&dgbS(t0v6a0rv=|8eicueeWUw*>B~-@B#Gfo17nnXNqVmmaU>2S4!-eT=Xfm8vl;uj%NK zOuObd-Sv8JK@)t(jz4+FmJ@4_jA~}e_1l`E4Vg6`D?1kY_*3em2gAUXgq|f{T0V!U zG#kHX;kOJ=Z_OCA)G=7(^LuZE#}IKsGdtP67V1z<%;Cv4&SnMT0ymu4DQ{MIiN>2W zrUd3|D-Rw>K=_1Kqt8kO84k4+B!(LI#12QrCSCK!u)Ou_XnKGP)ba4T~za9XZf$vuA`&8fu7$r z9W*=_?^3-f&O{i|2K-gMO3;Z|Ms?1zMEcBFpX-(93Ol{5x0m^{?4!5g2DodaS<4c@ zZ(;LtQnK^ZWm4pdgku-^D-lOEttyMtoav_S~Ed-_33Co3?oJk$>bz zQf4b;iX;KG!(3~3V@5_2e4#GOX#+l(C$09~UGHC)8m>B06$y#12)%Y(Sbn3JsbROX z0GuA2or`NxZpx;;Y})zqVBe`ibu=A?l6-J>qM{K+sQZ8NUbYjM(34%94OYpz8dF>4 z=z(^zB&l)KH`u34TJY|D&GJchPzL<`nhDTWEb4Zba?!{rGYVa^Nr#8H#MAetsRtCt zKG;s%EQVm61$VbZkw6>@JBFTWXW~(@>Qz|3@9G68cq%>$Lzws5AeT)RullvrcRkujO zGvUt}#C7W*r;99Uy#$-M09QI{Tw8Dd8?RRyg;O~%Iv7o>)t4>C7Rv6#s zh)!0a1>vdPj}~Qb`%0fFlIROuIa*9ie*SGUOGsHWkFhtf4-fs^jIv$4H$6~gEzgXo z?bg#!g%`jRPH13W@b-tTBG89854R;#%&ivRZ&_+JH{D%ci>;qMnswN>Y03Z^TW2Sb z9o@Z=TPW7;1&aFWN|7|l8tVGbWIu=L$h`oxb^E98&SRC@;hfDt%vg6T%{UG5L*f>; z{zmSPz7i+P`_B$aWfcFaQD3DKIg;W)Uv{`A_w9I#e`S4P6nwd^W@-WE+YPk~>0VQe zFEl%ylA4Of<<#tuR*pev{ycaYR1{-dQ9LwdRyT7iOZo7A|1XK!k*X&CwJ(W~-`o8p z+6%xhP7EzE`rM3^sA>BRoVm`b%W5dKe+bg*&t2fpfcK$pAbVTvR>0knAQw>rdjGIJ*u#vFT~Bl`8)BsL?Y}Tn?_A2TwTKWjE^v zUb_I07a`DPxoG5NXZcfpu}O*7cddns%y+*f_`P;sy#K=QCdd2x)FE_8af!aWihbGr zjC#M$U`YEz73i2^$5IjA+C-gW<6Ls_C*C%SfUG6Qs2LVDle5DSiJEWCq1dh9Iccxz zSy7Q$_w!R^J&6Gd#|2_nG!aG^`>q5Viz`y4t|7hN%=?YqCSTZH2Xfo1>EU*LP*B-vs&7*|c4i|^hx}vJ#D!tu=6yy9CoZuy zg{xX7)UG!YY+*p5?uk5}@uE6z-DIoMvOue#_{XeSXfcyyo|>zl?FRNo6>9ctH^#=z zdJt8MyKpeuWxLIZEN-LI9DaxL>i8gXRF_%p zmqTInS-b<;bl zX4o|=hAp#qiw`QzP8YJb>$ZX%)%`Vl5Yar2?O+NUKfGB*hGA_@3Rlb>4zpX1baWCA zSCprshw;u(@i7?NCWyVJzTAb^I`Bi)XwvJt$_!^qk++xV3K>?b?pzPIiaPv}#w^pK zIFX~*CuLnczZ)1dbD|>Q5ER-uo_u@a`Rkm4{h1TgVu9E8x%ffb1)yE86gK+ay}NEV)JmSG?rYZIl$GK@KDpOxGlM}>Iu`)J>A&Awd2n_30H`?5 ztHj#Rw=*Tl4`o#SDoL(1nj>9N@iktJsUadLvVYdcQ+%lJ0BR~9E>!`xA9 z)!s_FKy{I*Q#~(o@WY&j)$K$u$quTnn1k6`(`y@Js}qP{%5%38YU!E6<_|ABhTKg* z_E5f-ZiEolVLoxoF}2#tYPHJN=Z&pw{C)CyyTnE^-8ax_f)oe@8>(|0Y>bDo8$P5V zAAE>-H>v7pd#1Lm z&|)^D$W8%kwHs#htafuj4s@@u?fyoACGjsViJZ=EcY{wDTFL1w3pfshd1`PbK@z?I zRKUPQ@mgfWvbj#a0@LHHZk^Mr3VdC-ToJnRX+PiJ4zGf~Ddp#>V8q-=-35SQmh6o$ z0BTHT7XVsQAR)ox!Nf3uJp*l;1&Ta|WLfOEm<^=sojj=g&RjZDlob+}yW7T>eIGfy zC0>EaG&;S@!FB<-Mxge?V106&P5Uti@g_UF@T|qv;bDVm-jhEdUO8`^7xvQ7b){}q zU2TYmt4=#qfF>6#S!wrKbn(5;kINc!0QjTrGaTk58iF34Jzso+d0{RoL3pvr=i#2v zxxpr@L#zHo>dO%q@-Kfi+Pb6W&Al*R|3`Y@hT-L5mKQ(F`{aJGu7){@9b5p!;RUIh zfzDqI!A#IM#1_(3sK~?~^lpxtLiBNyyqL0&YsE~bKql7$y3RhrgGZ6}#wRrnbSppE zqv!g$Za<3yvg0mo>sLB->N&$wJ#U|%xm^Za02W_0Ze!&sGl`j7C4|N0oJIjC!#|V%Xjzxk}2Qy7)4G z6C8peUM0#cd#l$TXBrp$$K0GyZt29 z6m$Jh_M{0mVdwr`yy?~S?D`D}Iz@Csn!(f6E5f*4Y=$||%?M6TR#R}(L`@JNSj5_g=`CXLPv=cJfj)-i%WN1@vx3Rkp-ujB8-rfY%K23>jv?L3ZQ z_9JE0#0NHNuK7(CwrYHsjw>A%qDD^|L`_60#bRGW2APf!=h@0(BHL~3{qhEiVZ*lvV=TARo zd{nM;_Fh}IPk1}>qZx@IqkveQ16BNNv9TX(^7PB`^vPQoFN&v^yGv0*nV4ny5tKnl zQt~ulzSrCnKY8biqv$)A!o{()5<>iBlz1EL5^NN48SccS>fFrjE5wtW9^NqK+R^KK zHIoT%l4h%%>GtTMA}#fD%go;1WXB|AdoyuiW}ZjeJ|RRv;BwVHegNQgQX9u68Y{ci z{=7iEVEhc)DcOyy$fn~k6WlPxByVNWne(Yh2^?!3->W~UGa?Oe#7mln}wSX zqTHgM|J>{p7hAK@e=G(Fx%0-2+i$F6k)`>*|MD>RmZ7tyUesQSe;i<#;vu(#gYxY{ z2?GF|?M7{cNOfz*VqO!14r9_%H*p43+3j}>+=aLWZrT-)a~G%$IeJjTxFQOFxhx@R zY$2wFTGy`mI@_UWi+d{c-2+{3oClA#2%=dC?2pl!U3v~I%(%u5_$9DSy_S7_nnoGKcqxv>d8g&yH&up1m=h$HSxShIt;DsOb1y4`!a6|;mSENa;zqrV)kGWg2Hjhl zSb^D;@nHFdLY~s&ld5PLkafQ!xqlIQNqO4 z@p)GX*#s8+OAuYUmNroY=6IYPA(7}?*IH(uV}SfdykQ9Df>IExn+3^X=`xtg3C7<# zx+hSTa@6mMmErg}!hoi~uhhE+eoQdJRV^8W+FpZSgFYPBK)m8p=LwV1RW7Ax?dDmn zx(um>gmRnveYG8Xpz0rcc0zIj= z0k)PJR_`5xDjrC|BLykOmtIe0FyHQ9Q&l$skIe4Z#1|mZ(yYe0+q!f+H6z_90*yAR zjd&9oiFz|Uv;B;iaZF@XufhyiVkRZ+>!pRqtiBAmRXI^A3*Jg5?vGuEb`H~bA+#Nh1b%T$Yy5JwQ#ze@v{GPKEXIO!S?-g#UCjGGiby8hCjL28 z6h~R)xZKp>R@3lXn=|cIiSVz-ZN77T3Ex!agOzugLTv}I&kILPZtINb`atzABROJw zbXmf4=QS1^lXuHe!i#pBEp z52)*VQ);8PGSsoGMxFX72BfHA${nrT+%J49H!<|H!Rd#XOwAA^$WiCJ+d4`%{@al9 zy1sOXZCydM^KJQ@U9pUxT>HbDXkH*LRR6i$fbXu+*Ze&Gt1 zGebJ`IQrDQjCzbQ!(z>=<`o-tOi4x0@1GRPmxd?~#XRIMd8>SWQ$pB1omu>N%!On; zJm+$ZE(uEvF$>5aJ&b}bG&%)ss`sb}#-Cqy+F{}}w993t-%vbzh2K@h~bfd}|zS8@TLKC60lX3gOQuo7n< zl4B-5Bx1^-FI%3WJ$*22GJOt2u^Xxf`Qv^YszX<>QzyANGYqBZZnK5A@Lr93_Ui}s ziKVF$5%2fwi#5{rcIY>p-tu&^MHOf;=UG@tZNMy&UwPEzM06N?WlRhE68#aHhTz0) zvaiq^Bh47L^65w%p7s>yKMmNy%Y&E}V}m9z_f9_J$a%N(q25w|!;SIMak-%kA5NwE z&r5DIrG41dm4K@hZHgis>r3^aw2_Fa97Ba!le0Xp&8lJMs?yr{$-7@1xqKTFZJWzx z*8`8)&BzNlH_8q90nAWSf@Ij_JXMvLd@FR5drYFTUP!s>D&^648Tv3K^!*B%1Q8us~m`WPMc%?Bjr}dj)dN z8vcQt6#fm_ZrxNd`w#j>iFUjZ9ua{(+5rc3ZfSefW<&65@b2}mupvv;_|oljy~)z_ zt^=#@vod0jd1!^}655B~wBv&DV5lSxf?4cTRlx$YT?tkNR&V4VJmnQ?zH8l8NqP37 zFuKXv(_KsH@fG!VCWCj5))a@F6}JfG{`LB70~PF>Cg0Timcx^k^rEIA@~-o zA5|j(s;=_;Rz{Ft0LFKQ4^U3MS}VYVv&QLi=-nu~iCvN(eAp!PVa5rG)4){ADE}dzXCKCovUXc*=N3@Z>z}vHzTIX? zkJ5LR-N`Mlbv;GtBJ9akIG7xH#XHc@yul4^|)Z z*47orC2s+#;BJfIC{Ds-7c>j2b0rRBj`EKBt#_xPCfiKO^yVF{z9v4G%kSCR-Y)T5 zTI&U88QA|v6mwFy98mv%GrPbk;7iSR_$Oe}^n=#0<&rFa!j<&w#!q`#<@ZY17&sukTK`|eGFr?Z%u|AT& zWw=qid{zw}T($3zyUW^Xm&|v&I7?w*9_2$uLe7zbI}G9*$QhB7h~IEW0~TkPz*Kcf zmZhZW-o?-bjo=`KfjUxK2ZaK>w3HX#6OB6 zq?2pJA^Fap;L-p@XWCaXAbfLz)8f&2tC(NwOOFdQbf}uyKnkAcXF=){V>`-0N%{B0OwOyTybZZx0rV^)nMWGoV_JbQ$?> zI7iR26V%nby6)K`Nro0i zD%yMHQthjF&s$dF8iFK$C86>aWygNNv5}@+D@e8z{VLDa8GJtL6?y}0-6@+<@c#Sy z{P=ij>Q|Twk59{?NRU%NfvyjEU?Z*vY;W@PF!nUyS}AVnFn; z;KURN$w(&nA13%wegF}H>_Oth^h!Tsl{)HjVS6@wJuJ<>SIKb7w&wCF+=Bhm8V!(Y2k{D77KYC;avGu_C8i!iobeCF87l}5L(FH$g^IgUS zw^%N<@V5f5bCwLM?jNe|QwjZ5EAKy04Iq-Euhn^|?*-t+;RPV)YO6fi)?Z%$dLz0( z=i7M~fWHcI$Gy8@g`Ih%!B&NASmNws%6QZ4WzpWPY2;_nQkqX?{>PQ=QIM}>4}wdj zP9uqoMg4Qbw@uMds~|_<5bcIV6^eanbdw%ttcb#i#S>!rFI@nTO2b6%>p=>5G4jBd zF{kfkQIV?m#$HYlY?dS0470b+)PIG!q5^qtq}~KWs03F$UitUXK!1@}$-5VTgq*g; z;|+iEEej&Y_bG}Cf(Z{IDT_BJ-&6diV3<7XvA2w8UMez5{@j3c{ner(N4V_ywl>yP zuxFx#x88GxIA_}02onh=jU#B9=~v=I7evOTWTTzrf>;b1P?gP5LUB&3H8K5F0q=Db z^!)?FY2-Us-)PqP1X16RXz5TqndE&NYLd`aPk}4+B4a1~_+WXS0 zG99uSr{&B2P6_PHfoC8maXRIP_N1TDdQ}IM@Zn1dgypmqkDyM~HvZ)EB24Wz?2j+AoCA?8H_U*^|I;In=zjV4K z4z8_MpVW+lKfic>-1AWIu&N7;(I1Xn;AwqO*MZ<^mrYphJ&Bj^SkpW;HHWtKF0KM~ zw(`>YxXRf_Dk|pPo5z{>1c)nY{A!%KAr{9ZDLq6`hS(sVS=x1p1d4QXJ#SXlB zf9EOUs+@}gx#;im4#l!XJ7A-6pV3O~&Qh;{(A~z56mxvLyp{B;UZ=p$d8Mv&P%spf z>(}GASJThS>nH8-F>TahIYzMF!9>J^`xg~bhv17?QP_>e-+{~|1jmB)E1l|ve4{~zx1DIV_ItB(9Qw~BOJH0@eTHN>)f-&G07kaB{Q>qB+ zs#oML=a*=AVE^%4&^GtYh~B}F6#mHV;OIaJr+Gcqv}Y=r!86YyZ;)QxWkPzmyy*?> zIqVnwAb|fSnYfd_L5h>ZtAnFam0&`4L?nDWzkzIwv9J-tWk>um==>Bou1vO5_o52` zSoj>60>&7-ozX&7{vrJ!NTDuZF9PrakmQdsAV$r&kv{c;$>6kJ`Axh+XTQy2i*&In zXqxhm;~}yM#zLOs%{UQKw~N9BK+CRV(2QXB2Z$wuC_1n0rAaapaNNraKyD7E=>&pP zz5wv4iv97sy>9VhR8X^J{BW%k9JB9SLyj7twX8RX_@K$buDh3vPErOtc zp5^7Gdv+uChQSw#n5HWhe@-2Ql70zIt?m=pUM&x*is8Obs*#eVVjJ&~I;@}Y!QP^h zVS{FFQE6rV4nWiOH>^7SgWK@2RS!SI&%fhrj~FP7<$M z41%S4bu@j(FK5EVWn9zM?#qtuhuDX79gm99?!4}ADY@z<5ET*KHjkGJ+Qqq#e+~8^ z(Ps$VhU6hzLxz%WL0_S+Ek=HedU?@b_gG=c}Ti!;y_xJ3?kC_zt{X z_GJT@#&6Vzc4xoPsyM*STpK=;QkgB_x4@J9^l@{Mo@iZ5RyArEV-tHbBAoGKj=|+J zo*%B-F5W>d2L!NJ8x$;+hkpT^0%O94{jh#^n)TT;HzFc;-fADN>bLJ*_j{!{_kLBB z<9r$&{{eNG!27)yz0d6D&@tLA$eN6Qprn;U3Q!6eWt(ddIz_gF!@%^)E?Bk|<}%D_ zXTXe;y97Ks9c#kshYNiL*eXFBuP>~n_rGe(%{64(fu7{ zM$??tlV8pFpBb-JchsRSfp`jN(0EyPzacKz&fD#lf~Mr)cFv1cdrKK+I@)cpC4mDc zwhVs12!J{+EwXKKHpWZ=XA~;5onDJp{BlUxRee=7^!L~1LbLTYTWE@vCNoy2#$3;{ zN%qWk8+`b;iGM*myhrn1S){p{#^Fu^waa`weI8Gr4tP&nw zu1iBIyDhc*$+`bl&&z~N?x^By#!9wPluIj+GytL^@pitLxfWLDD#vs`Cq)V0bxi^G z&e5|^=+pH$E>~X4_W=~$FeuqN-U=l3EXYo@wZ*HYq@AW}YzI4Vu}1)QaQ9KXV3h%5p_n zZBO54;OtlKZdxX`qpB##F)Z@T z>SSxOMmh?_uPlhs?L@BYzPkCL@NZpX26BO_@`>Qv6Pl7^--npJ3^b75+|NwYK&n@( zDsGf@u|NDyG{TcsqygE7_;ae;h?~Oe;+j^M{BwPzp*~#3%rt# zvXO$X%z#gM&rFqFKqb9Q$(ke^_51KBOVswjZ15)Wm0#v0_~1lOlfXIxx7A!RAh6ky zbm~4{0CpJ2F%VBXIrs=3hKuadyrhl1jGAV3u7y1`I9nP8Wg08HO4mF7p&G42EoA@? zOq$8=?}hONAn(r9=5UWeFqj2)-wBfIH~BLo+I`W!_5Fv&T8Hhm25JsPZVeg(>(`X3 zb;`re_H()t>h^vIxx0;xmdI_pRT+u!a=XsEog>M412e5d!;VDPduTaS+q_--Nda22 zHc9p4T5(uSoJ^Y?SC-ZxHDcjV^;N(jSPV9Xs*r(dO;j&lC0wr=C4dTncfGk*C0(dH zL-`^<^=t{zx_ec-{uD_Vj`NEq(NAM}JI#7camFj=w-S}VkVe-3SQnDE`dPkk;R&Ie zwt=7?w^`nk7n3fftpY8RdHGH3PbLgzUCJMB2{apl-TB@N{UizPTHiM{5&n2Sq#I|P z-MX`vao|hQMfms%geiMT*)&YxQFg_;??tD+oTLq~1 z(VO?ne6~X3!VTfi_iiULk9H*kbqH{rCb|hR`}pM3q_Ww{X+kw44WvP`F1fq7wckq4 z9-ouhz$L=$$z27`pM+m|HyjVF%CA{Q(uudtEW{ShMuO9BG#&`W=DhWuA+qp>Ghc#W z<4-(r;w!_k53=j>aV%X4ey|n2aBhdgTiMe0ucv7!fAXr1BQ+Z>&52Zgi$|KiNj7mh zv|4KNt+WmBH-|b$pVS<539)j}NyWp?pIJ>72dJNZg zwjEhMKZ;@MucM9IxP`ifdW#4#1p%6^(2LQF^j_aA7i{c)z5i63iiI8XNNmsxgdV)( z)uM84+k*cWXKTWwNuoc+&YB0i)VT3GpZ+MJrB~@*jpCxDpvVza6mndsMo)ycEeBOs z35zxzsux*mpEfx_wXpRB>54eZ6=7EkF%2xFCjLjO5bXEpAK0%FY{nCTS(C{lyx(rf z!Ohl20WtXvWa8`}$MPY_ioqYV6CccJm+yG1BtgBJG+ABID1Tx)bwm5|DaGSK*}myR z%Drb_*K|353Nh?l0Gxs0szNw3Ji8yb?ZL#j!l@;5ZJF}q?0_Eou+Qe{gu$ifwqzXX z5veU{dJunqvtkxr8bUe~Ax9kHHe`g4&%RW|h>I_Y)r>+qt|VS&G1+$J9Oi({Q}*$X z$cc|{*Vawm9oh|2feUV_SfMWf!QS&?M=ExmGePUw+dtYRtM$0}`^>2R#A&`E2ouXy zsb>g`xM=2loZL=^D_G5NQ0txurSI+=6lL;}mxZ|!e?+r)kQjNV8dzOwuo4N+~ZF54_R z=fHqST6OP1JK#~&QR0JR269<9_yTaC-b`@;cts*pC2|v{3eDOb!22AaE;5IE|6us^ z4Hymn^k4Z+#BpRti!aIG1cc{v0UbfTPf0ukKh*iD;Nd=->SZ7?=p(*}j3TZ(ZgyL3 z&)UfMtr)g^Qg?&!_6uf5MKaz6Jru_(p@HFha2J-D$L3gY{DQ`B36ApB+66PtV!Y(r ziPXI~&(#;1_kTJJ!rBOiKbj_B+_)y)ABqjlqtfWeW8!McUdeenfs2?4W56A*=ogf2 zwo+k*Y=QM-FT*XGe-%UzX*w9j3-Rp=qL3Wt$cQ7cUYeLLyt=-&=%FQ{XzytM9GHX) zv(2rjmr$tmzA>8=wmUG{Z@XB8pR&VDq1Q8VnJ9ZZd0szodWLeeQiOhIf8lCVuk3qY z?o1B*4y#5*pB9cdDr&O>lfKS8=umfjH)dI2!U zh_ApI35LWf)SKx3S!BdDAJP@f9Lt*QNH8@W;X1gx%cXi- znH5Op%sWOO={*P%>3bW-v`a2op?+)O~OIFRJFqOw_OM2|OBfOSw}!3IoE_P%g=ylR~zqmEiaPMw9FQ5S&mN{%AH@dwgTpz~#s{h#ya6{-^l z@)PM%BirTszp+(28n?>r-O!J z!WHE-UfYNZ^LJO~STd3KNf`l3?_QnL6P~M}1-_%YwaEotl@S#L#Mh&izJd?ju7OUj zR$c&pr{@yizZIw=W|fu;;-_X}kA_JYk81}pW}Fu3%bE>H$ywHP zzOey!7dFqrmLc8colk(sTA-3YPzxu5X6a;!xSD~W@_W^((cl!rq7#+P*s zqvux{`986{Aw0$5zi#u36Mr;6M(a^}L4wM+emi4-(m(rtAiCtz?LS@f(_`LK{B#|~ zxu0xBVvzdwUJxI^He6V11hU+~Yy-`WJC9Obd-vP%>BVVY@y%G#21`|)WDrZf$p4m!i%0C6-JC+gO|PxF_= zHbN4?1wIXZIzIc%iV;;M&{fAe$R1a|^_BAPt2WnMd1m*!=UHHCGESgQhGRlEF90Xk ztI15BYaGiMSLcr@9vZpD@@wKsE&FpP^~1M2HE)&a%Z?hxPNYok1j2U^%SwwJuptuN z#4&yPvWLUTz@F!T#CqbN*QbgxUgO7dXTLC1B!g3O#qvk*M^ILG&@P@8{_BUIG-(-h z?%se5fdSyZNZX4iTv(CAdSqVTW}(T@%j=)tz&us$_M3oB;iyFo8P;%6z4qiQQD)a0m7^xO0bDgD&eox5+HF+zymQ zo>~3g8IajB&lddv(b$sRcdQxnK`p|KHwV4YzMY7xPBJ(<3(Nw-Bd;$^IlfAx`0J_O zSN50VGN05|`KDGQb`VUxJ@l~$FjRwX!u|F^cG5y<*#B^tR6l^jq zfiAE)IQy}5_##bS)$e>rkU`femA>{f^m1@Qwy*J@&o*gW3ag&?Y{l?Qut8N;Rr(rS z`%0{gq}lU{?8gP_p3xHWCnL4(H)4Y?nc2VM+F-16r5D;D8^Y7cLp)=fL>gYSVVpYC zW1T(15u|r~VzATX?d*?N!7)H2{2_Ur+%ulaKPwYqO5+;~)mhGH#W$Hh_J&Fq?^#XT zSY806hZ{0QA9Ht2*hQK>|JqkTBdDBgn3@cSw%y08 z%kxdjGQkbpfp($4L+EMAJdkYK@m3z#q$(HYQjwtEt+3@5nFR$w&a$;>cr)#tGn>GW8@_`8#e1NGr)AJ>%ozIz92!{^O*ROMdX@W-gD@YO885>mU_;`z;OtRsoO1-(nv zg}6pgcU+c~nR*_g!l2EmRA}_h3~y3D$N8|0@9MpMFm2b-mM$RdI6pf)oV5g*Lti4z9ly`{U~`ujU_ zV4&h1#(gtp^Fu~;2yq`+L@(k;-OkT*eKP#SNG?MK%%?OJ&y{}fmM9-RdzpfU!zH`^ zT0C97Yg63o^vXO%5gbTtgx%c&)2lM*u|&A?Oj%){tmxGxGYf^^GCJ%lD7<;8P4o5; zm#6sQ1%MnTFYjN$(Vm2(a0Z1>`ypkTfe@A2m9?_HY#y~6zy(IW=uA%*6`H_a0*8ox zQ)(waVB*sS;QOpCxc$ZjAU{cyO=Od8D&s?s2DhX(dZlk-Y(Fq}o(TMT{&w+JiB;U4 zaj)A^xx&txx>C<`S9l=>GLW*LkKQs`7e~3*DZ@oEdz8O%H>oq%DGrz@U7;#=%7^Y_Xg_h6E`vSmB zBId#W=FgCcb7Xo4`Q4St%uI-(b95BNyUVx1ptQlRKYUVxvL(?a*`j*d)1Jd~Rin3u zbzSoDnejUnWw_9jx5P3zP_Sh;8lgS0hj^M=ob&YBKxM=8ojrzf6(JRO@l6Aq*ajhC z_%i&DKjzl6pb?SjQ_)9d>PI$4ednH|Hp-*DklRY4j?syRJ~0UW z3G(uRF&;h>%;q9LuLJm|WVhE?=Tu(uE3wJYsHQc8s@jNekpp+Qh9vtv6d&~E&gkSs zIo}tW*uf*)1qV&bgnNp}Ss?I@sjX+PL#lQMbD4>x{!jkO-FQqep$rKC6*8k9%eg zuYALEE1mh;$$4)O0>SD00JWopH<_MuGGpui*7bX?yE)|4Qhm%No%`w=Z&G|xdE2slm!Pm*N&R|bsak=0_nH|-xp;PxNnYLE{ z=>{kYwE@;8&D@!rC3VkSZ}}^*EWM-ti8Zw46-5

X%V`Mfl6Gh;|O|wRhwSMgsoQ zt(+b+^v)y-n}7EDyg#`_0f$P%p6-O!;2DSdblrwm z%&LDc*^K_+{rUQt1_iJrApQf41vY3)jlI_8Mu#&n7!?|M_lsOT3o}}N_wf1)=b_gE z+8ne4y_syI&0Zw>`GRhHyxbq_-c#hU_}Lld(y-gh2Yr3%^QTnbv9LoWvUpy9K8N^;LuJG-ig0E&S6cpL+&#P;Y!Jnvs(^!-PLP0p9V{%fn{}_~ zml6ZNL@Q~O3Ux8`sqwu&><$wA|1-Ogf)Q9USve`3V>o@cZ`%esT!Cd1Z(sJhuTk#8 zb35y^^FjZdwq)t(VdK#>zZL!=j&tQilK35}v@#sdjB%=4+^ntb(NNU-BRrqc^t$?O z=^bq$@!*Naz~3yh@wTW5_-mrBH6}N1lBa0QKK;qhTRr})?TUZ7KGXhL!a!D9j7jwE z*cIyoRBf|w{u_S%ed%4c1>Gti1(!wE9`}23^&IC7ODM`)8JG+;9ArhStR}2tWcOXu zn)tfaoc|RG8E7R%a!|vnnX7T?E1?Qm8wGtfOR2#U6;uho2$zSHjlPAgjAGcwvPZUf z?E=yK^p^AT(}3FHK`>6_35o*TnI0Bb+vo*-lz7}1t7dnvCclf7(O8;mzRAx30Sa83 z2Ky7-iPr38m2D6m%5`zNcBF>9Q8RTWJN`A} z-O;_pXSK2Hj$RhlQa-`IRJ8%j`r;2b#Fxla2h;AC{$Cq=hPGQ!Hq50yK#I(D@JyOM zWM^AD|Ct~?{2I=c64u}O;xove5%{@6K|wQthlz$caXuHbT|so%0>|TEqTPk;O8>}&7GAT z|D8K!c3FcvFFE*?)!0oI*Gi!q!Jk{2ae!Dc7v%l<aW5o)_4MMKmLe1IW-4 z)z;!4s^#@Otn^aF?_+#;!vqxQE^#?dS2-o-+iu#&y_|ROC)5XeFgw!k1>j_}4c^Hw zzJs5?UcNCLRnlIdDf;T0`p(@peAHnT)6&0b59yAA6zj;%#Ci4?d(N`NImMz|yL!y?@gqYYaJ30UL2WVpU){U~vM z_$KgPb19>z{|ueU->(cfGO7772Hegg{XrHS)yg}%GxUSi*fIp2eP{Yz12|CuP7 z8@Rwp75-71b4YixAn6%{0~E?_w9lVRHT*pH##*s^= zM}*g~0kpR}x%m$m|JbQ}j49PLJTtX236gN0N%dGCRW|sU>!sDoAl^Z_Z{I7N%tcf{ zXJKX(h1Z;g)mm*NzwMX3Zb;6%sd*I3QlEH<@6oF9cKCqY_PZT^8Zzz%P6n&24O1p`OolRJ7@7vDie*4h(TT$&$VwRQh zkLfm6J{_HKd z7*K*0C{yCh*9b5=Co-tEdXi`u;40!Jf%x(4oHa^oz2p*DXv^b8{ zY$_=>t#5i=bnpJuip$_KjPFbVYpexj4;;#-s=+9=nibUIU)QH{C7b?86y*0#u;jkk zAtczzj)Kbo*Ojk)105eibXwhmQvXSn=a=IQ=pIa!oiG>vl}usstc2!DOgc%B6a%y# z^Tca^DXiT7QtUjdy8vh+*Pa3Vh@(yrmL3>MPjuCzs0EVLoSoh|opT5B%w~GemJ8PB zjhXA@IOXD_fyfQC(V|?M<8=q1E_v0sB9{CcX)jI8d*o|=Uy(?7DZ$S~GX@}soxnc^ z7GHd2Sk58fSpd|2My)`;zFQa#e7F%3FY0uhM!uf&mjVQ#2QSV1rN}DqmL}^>m6T4Ct~{oju_YxT6fuE8 z{vU&P*Rhup6(nASbo+8;(h(zO0n7onDzgbfYqdlJy4l!rKFI0(*&|+qy<43TeXE0K z+v_`JYGB&1(lTbB^5q5K=AAyKyz2vFuxrX@(;Tf7kTdDsb3xv^SU6SxV+rqO2%Aj@ z6ZvLWm?!fl(NqhnF}xZSuWeAo^-I9+PABW+>xQn@Z2P1)^pl6_0iNULKmb8;g#qM!e@alSZ>B-o zMD}&JY8{~+pwA7wg<+M7^HEF8NOo*Dy}zifT<&F##KZIg$8v6g0K#i0oR;`cBZZZt z?4nE8CyM3qSS~63NXN6G*}sXXQF!@m?I4*SVX{$-oc7TrdYHxPjZJ=XD@^9(By>{i*hju%!K3 z&aA@TRP*key{g4EFD7Eo2dVGtuKeWXxs5R*iyZk0{04Zh9$+(lN6c0AG-_&zW&KDK zEiXhA`$7+0B z*{EfH>!4k;mh@xs1p@iGTsM@!S_>oHjcW63PoohKdYQBf@US2m`Z1CoZ&-dLr=hyw zz$C&Nd7%`oi^tD_PqR_*KmY;80(6h0c>u|)EC;G_jYHwhM|q?@Dl&LhU~h#5)?XwK z{0dBXKZcAF{Lwy4e6~iuheHgl!GUqjP24d_vHznm$04zGeb{?CIfl1q@w(DTpdz^s zOm&A;)XtA9S_k#&dkmR#efA30e`<-;O}*6dmm*c-N6_YJrz+u7G3hR;`gE^N7i)t# zsEhm^Y0tb(ZdzPY70JP&WCE1~M<6xO9^F_n=S&?ed_^Lp8;f_I5B3Iv>)p|bQX0w2 z)nC*(6fux%zf?nv zj0qE*`h4wjHx7@<6G|H`a@L;97;gJ3kM@ErF8XGxr04E4_PRP0TfXzhk5zw9dED7-FL^mX(D%|)^}`@qO4+9;W(yhm6M1;=k!2EhGa{ zjtB8|eSI-#>LL|(qfBtkg0x%hBD(mlu+Ga;barlTa)#8DiQ9W%@pqNt1P?JGPpVd-I#4Ic`l6;99l8b8|%ti?!^2lQ`dpjCN#dn zzt+WR2Z96GY&{Sz&3FuKo#(KW=Cw^w;Dk@3J-_hM@MQu>BxnaB7udG~sz#?BG`X)? z%4{S~sWr~M&vke%CGEK<;%M>nrAa8q?MvBRtud2z4uB52SUqV)^IW^F1qA78RAiyc zkjV}2^Yhxza8T2ftoAJuT|C|o0Jv1wRDsaU1uSqZCIqB;G{2h<{+&V+= zg8lTzSY_b=0gb8Zw^4r1QY;Ndp88ya+AU9WEhmL*ZY6R}!B&65kNLbot^?hwb0Ew@G_p4(4a>mf(d`49AF}q16Kcm)Hff?F@3&Td6~ut|1yR zv!u-R;Re0djG8~oiOovtoe+9ukpH7#6tcsLr)r$(Xy}C8=kld?#mLhq<~!SBQ&nT8 z4L+S5pvZ$p5m4s`S|$E64XufTQx>ylEO77nVp*e;w&Cva#JgOVu0j#In&dBg=p!uH z5V{!hTlLdD+qnp=vJ=CGlZ#-NL5hVamzkJR^+CZX__}+Hs)(3#?3PQu!~KkzWp~rP z#s3fEtTF4_1CeIWD2w{~49C>)q*bhIR1TA#L8-vOg-%2(eicYFM;w>n}EMh(XQY44tLdf090%R{4{15>Q8=(AGJ@s}r z9;m(bj5p@?5B^d-V8Bq3!u)lKhfX;PNy)#Y0&L(aldFz;p(0pDGa&;AUhmYH$U;Td zhz|icB@%xr@H%HI*juFZ);p!R>tpN2zx}9NRZAT=QB!&Cpm$NTqtm2sV6XeHnk2vE zhg4m@)+YlaUkCn{zYnN)ufP_u@6NkJY~=_~dWI4cgjk!7))_p@?{ zd9EToeQwm1U5Bb$ok4(2J~7%!edtKGa%&R45BgG2xG_Lctt+K-ROG|?!Yr+p~hm{tb-D`(%CGDm!`(*`Gwq{b`;hj(6q9@Mi}D5Rc8@eVP3xylfoa>r@L)+pVf2dYQ^5C% zy-P#s{gJ5gQv3R!BisYso-EyNUg~i&`@5Q2dQ?%W-_}zVE-n4sw*rLf=L^cY65jkE z6;d47Hs>l4jC0S!A$vHVai&Gr5#Al~ryfdwHSLi=GQTXn%y)Z#Tn*+ZRFV*F@0af>`orznJX8>uP!ejaS#7#pQ^%YWhu`-_spzJ#XPV8y=)t+i&8*~rz2FSAHQ4oE*thdgQ z%B*y$pOS$!m#8LnqC$qlrwg4kj&}ouKW^;45?2GI$i19)#ku7971r^yIvegF(I z-WgOnk6K0Fm1>L#Y?lz@#%OCYwO{1Oc1|aKTvv03n9FepK&@8P58IgaARhxM|GOCM z|9&p)@6>-2i#-DFq8o&O_vD190^_DoQN)oYI&VnI{QJ(Dm6$9^qwCr^#Buh=zBw_->YRfNYjn}m=(YGB zVL!lNL<6#)HW9*~>ewermk_WOBl}!9)#3o5CQI5!*2nLU)`Jlsbh_G0z$B!m^~8N= z^Fjg6ZN1VAi3HV9b%LVO5*^!x&`VmyLy709F|#!-Ae1|!-ktL=GT!e1EX>pY>aB~y zJ#*o|6pABD<}fgy*B-XEa&Iq>EKB* zV{NfW1hTRi6MkSwFc=~{(S3tnTq_TqAJ1#1A53yBwg{O26e{)f0e?xN78A|&e^~Ga z1OXyfD^3_4HuTSI^8JS5L44Tv?_rE1LM3JY#I;+zEBWwzxiOC3140M4hlaqZ3)$Lf z(EGCZFlxD)8Vk$UF+A0pCg1sfYJWdxIY`-ZWw^Dr`{quX?evmh&oa^n3{=6hRXgY- zTGRv7_BT5j<~TXlQ$9fVz|G^Z?)<74$f&;HB6Ny4K>uo z-i>wBnf`}GO-G}HM`Pr}R?{Zug$ogP-($GmSr$kKb0a?ByVFh5k8b^7 zbEk-NDQK9j;9k@7H!1u!H`T4vPvBwTtIBLdF7hO>0HHtrQrN(pF2F#Gs?peZqC5s4 z0-DxH{>6r2XiLm?=Bi%WcJjMTlT5m#t)Hl=Nrd$B8C+-hA$L^W`$gLLXp7|OBs1iz z$@VmmqS$~6+23K8|stdFF>U-nubA zVAMBf#Yk!Z3>tH8IlNri<-ILWzb+_+u<8`_UgZAtCzgxu^iI<$kMJ%%th<>ispf~AFPs$NU6xhx&KLu|>t@EgP@=($y+w0zu; z<^40!r#?p;4PV}@nf5d0#f?-MMx9+pogAU)Al_!kxixc)#WFU^h|plB+m( z>mBNUM??+AFx*Laa$cERnxTN==IiJa9a_Jqt!%gH)Nni@zBgCIw*~;dS`UZq+4TBS z1d%D?+2z2J%n9>thX)Wu$P3Nz$g@R(j29U%TbduRt)%J9&^FhacG$(kp8g{i>LwpQ z$rZn7jdcvsv@{?yO^BWK2Z@Ts5UR2Y6mvrs`9seuU*poyu(h$C12H85)QxD?V(zQ8 zQcM$LG)n$W))H-!4Ro?juw;@p+Utm#hZgu2)gNOxV{|!d2?B zWN&hW${RkUo{lw9RXsMF)SAPeDYlBKWZKRMO$`<3yebm7H|E#Vh9$QqT)+rAM12gn zFY7Hsl=_Q|7wCZ6^86iP$!p&x{1?8i2X6~OIIdr>(4nbB=mKBwF;tlb(4U%X1FN5X z7s}y(8K<)Tmx958ml+tQt!1c>Yskv}(vr!4>B6Nm;mmKTZk=OQxBMh`Cy)aT_(zPd z5oELS=V`UxH9Wld@PsXm^U%$-z9cA>KYJs69d$#X+gjqI#8+Pz-jOzW^TLLf0IQ6w zR)wzlGG5tsaYj8|#Jx)Mv&mVlA9L0h6F7l&C_7OUlMG>Q6?i*`@ph*V0Ns7)^ua&X zRCA-G>h+bIPur*FD9B?t2zH5-MO4XCDMot76PPx?NY|%*d2uhYYVh!xjlZX0pYpQs zS`JQtJ}WzK{fNWF@us?mvuobf#0RewU1H@gmz95c!t;a$dmizZ;!V!N)Ni??pM{~a z$;YzWT3p3`~n)f zR&zo>j9b0N1y=)b+Of|MiL5wqp|`ogb(!b}Ka#pbKik7kvNj+58)x|VKWHXClBq!t zVcl^G=%{sqN~bb!6utVjzy$@Ro_?yIruRgDF?Q0_@}3=>p#kuS@&63RXx9QZPh^k4 z@*@7U>O2$NZIbTYv}GDIU8-R!NR+fp`AboL-h}HRJi{6E!fF@}KvaED8nar&{Yu}t z{%&Qx^mXw2+HYZ=7fqfz%6tAzg09%P5SR;W;p8{s6`NUoJ(&mo?p({k2P!G~i_oFP zB<=EW>M9fXGcVN)el|SMJ4vf?Lh*_9je?@(iUH?CKGU}zr?#)3R3>@E&#PeFW}s$R z9?m_T8;dbUbBPfoG$Gg9$Za>+x)-vqFzOc? z2+F(rkKSsJJ)BY>0@Vo|1lVJQ2K&~a9a_;4f+jcfY~(>6;OnQ%Coh6q%KV4q^|qor1#KZ_^lh?)!UtYLkR$5f+$1E z3cTUZ-zZ0>MlZ?E9rPi#?3`jQ{q!2^{tU>DN@EC z-5wZm=u`JAJsI)2Xb<@O(cv|k8BxUHE+_1+aj~eIw&D9&`ALjn$-3(qbIG7$u^O?; z%6M-3A}0}!eu2DTCj!l_RVo-SWd^YK@8ayyd~3R$ix~uXI421EtU3Lzg&e}gMXt;y zg+1GpqSJbc_iV4Or{`isq#LAY$UJS?P&NR%3ZPFXO zsG6-EY6BTW=-R{RzG!oO^e)hkE1C~hg(>&-SltIq9YE1Jlwnx%Vo)s*481;oeV-Yx zAT~;z9m-#F?bU%vmcbM)RQ7aRxIBZ)GmiYmj8NG#l zfP(`1!jo0yc$sM{t2~sxhf_?P=O{%`z=u1p9_?#XmLC2OYQG*7daa<pg712m>Ys>5`MXEp-q4eiAKl;+Q>cTxtHUSxhzyJbQ49%cD8veJep#HGT3K zouU5(FOlUUq+ZIf3eSO_!}{T#phJwB=ahN=fEMH$z64~OD$jT*|7oCFtoO0eI}v4m_xzJ# zmLrZ+$0AAD{@Mj9em}iG{gI3LKIGtcHFy2J+m#>E?;nnbZI6GfOn^Cq5bNY9?&-}Ak-&Gx_HR3E#Fm>jw%W^P;IIEuEcCrvZ0e3kl|0%Iu{V{=y_;?Pe2xulxVJzk{g?Yu=O2qof7)9a7ml!+$^styFAJcSl zP{JQWr+WS+4j!;5S3!#g{MIa=5C0N;Teada`Rfh)m*A_j2Mv$6g&3AU=pOR8D~qD- zLlu4UwAPL|0DewymYgX4?>EyDe}=LR!|n8`_nn3ZacxsQ#-&l=GV*iV@taVY0I5*6A)F{p7src9@tfD3}hb%bew0xTTLzG;jm?BkzGPMo}yaha3uBia7#w}q-vlU zc?8K*2VYR><3x7DqBt#o%WKbcMbCd4o;4Pfja;`%SzsLSrAdx75aqKDWXe&;wa z{N{&pv$}WdQWrms3d<^M;_Y#yzZ5c?i1>qIg6zD5Xh~7D(FtwS>tMv{=r$pjQwE_+ zP*pFk?1LisP?b@D^;WIF&UEdL^ucv~yi4lmL)Eys_i-7`(mCzMUxI=fZ+tS|Q_dh< zBafN|{Ata2-r8>X_~w|Oc-pD5sT$vW!)%)uu&ip*Iw3so5aQtgW6;}nN>TovIjx_M zeNEgYW#mIzXMWS=Dh<(WHQ%HhKR%R5lJIY##&@qWeC5Q<3!v3Wng@vUovKy)R=I4u zMlUTE`43vJ@D5!iZ%rxj-sFJNZAGrJDNkMi+WRRi}swKf192V6BFzS%l~? z*+Sy>^Evm#zUvf2OuBB`j>cQw%_Rk<)8?|sm9Z)GM@#vaMWT~hhfg3uFeXKe2O$XKfirSeapOlX*=^L*rO9% z2B#xmAqwIYJOcdC>nh~_4E<+uYwc6+u5H%*KYt(hQ_?6&TJ}ZzbFAs|zMI7P^_m&B zR~2awskpLx(8)_}r@UE64p!jo7DI{f(%v#+elTd_82LcWXzOfBtH>`f-mu-e`|FYm zYx3T7jH~pbYch1G9kFLu4x53VRD=_p$s>r-Oz{GY#uPs_$HC!&80#wmT;TI%_INBu z=^AjQ4A=w=+rh1MD1*NQDI2L5cQvbijCR05bpKF->$+hg^_PcCxLqv@W=_J5JuD3= zvqC&@jyK8And7a0#8Kff`J3BVJdVY0t{WPhL)*@{*a~JKf}w>P62Fs(Qx=VTVcGhV zEMw?Hgu?ZMZo4K{N|^ywo^=RslcSQu)o2)P1DW&{VCRM@LIo&ZP zK0IBvIa#flS_hYS{n|@;h4; zL$})F%>7cEr};%U^`v4+=rp%~Rha0MaAy1{kn&o3jR1lw+~F(W)a|==>R|2sM21F8 zuR+yaQ8!2OB@>N%NZ-|yAV_5nJy5A11%s|1t@Qnxb#k?3CMV1H*L-mP;z~F|qR$OpOA?+v#Ug75oDqewBw5 z=>(4Eo4I?s1+ZrDSCIX(jhwnoSHudqZ8g7OlQ+_Xn}aPqmK$u3+u|ijEoZ`_)>u2v z_6))7N&LA^HV4IRMYqj_U;psk4(T8H2>xYH%>V^j}0GWYQn#c0z0Ml>SBnY+z@C;T(R`GN`puZjg}2 zz7X%2%{nHyYISEu`=m?HsQ#!8F9{p|>^F}b!T3eg$0n23o$tT<%Ssl%doO<4m&|mZD)4Dl z`r1tsTM+p(BgS&MfyAH{QN4pg%Y2wd=qFaUBa2bsDmG#URq$VmLmq%u3wM^|CmuNwCZ}ujQmc6IW`AOuDG0J4EmVeY4Q7T3)n|J0laKc66n&e z+C7NoG%vr1`pD8x@?L5z>sbZVNU`TYmA(t+1(fySKp? zKoAU^Nkv`}JyK@!x2SEZnJjm)eP_q0plhulcl0J5)b^|1e3L6Bvj0vy1Te#ohWtAq zvhNaIhp?)6$Yv(>T?t5(NmX@lul+I{;-dC2JKs2NCy-j|?7d<&&*+#L=VUIrt;sVaPT&vu-(;+1@ znja}T8?p##C&IYz`;^?lzZC6MX81#?(F2Re=S~@ke(tG6sdhsK*ZIP?r z{;#GQKwATZd&%s9BgkuHdM9J$JAer#azFOz@e*Q49B>_#dunM>u^5@JcXBuL69bQH z`UP?5L9sc93L|SdEwa6>7W{0jLah6B_4`0eBPsXtp}|Qbqa)9prEiDQfScN-iv^tc z8toqRDnMgV-AZ@gP!*4s5v26yn2wpajlU&vWe-veAUp6*zsfiWr3{Gf-7c5;RDrg- zHr)qa6vd#mK+zGO*OTlkH+3)9Dp~?K`z6d=jF^7_3Q8N02Wd%sY||Wt$1cPq2$eiC z5r}2^q}TsagF|s+ZsPjoPkGi}2FYHNf~`HAvOcN5MehOQ|etwmfgTb8EPM&Q8GR{lNPH zXGIF1Pr`?{fAZg|t)SpLIAS8NfNIo2erS}M;J(#0VO^p|M`zc^zF;9MeVhVB`ZT<8 z5mBF%-hQ2E-AEqC%!TDPP1lFA{d%})`T8S0zpB=!N0-ho%^QDt2S@83Eo3}zF>g8* zF4wiDdez+iwU>J1a#gKgc#RG3*;sg|U8A?(b^XsjYPDB({NbzFsc*8-skWx;n2)6f zrJ}a%ZjB8yg=cdW$l~Mp{S+^Z7dDKPE2A5mPC6qMg`6&a+m`)t_{PSCN7fvn_U$@Z zGsW}5^|<&w8wGxj$b3S6z8wbMSIog{xk7%J@rL~Hw%2NyP*iwBnO20Tm@hy0Ve&RM zfaN9yScrH4^!(Yd0d903DLe@P=Us#z($%e1my9Pj<)1-EYMr}VwYjIFB|M7tU+wh# z>=Mq`_k??GAOG=(6Yc~~Cz8ilKj0WpJ)y(Aj2J57i&Mg;TEcfGV$5#H{pUP+j~7n( z!Y(ufK)LOANzGupSX$LIqk4?VOyMh0mV$!X1I03`>vFwOpYbh%=N0bQ0Gt+j8-?7@ z1vau8F7o^0o)KoD-&!iT`uB@tn;vOfn|LMcuQ6Xy;I6@Ip0RZuYTw7BBswmbAS0Ur zQUwqYT7FEl+wSA9e@TwXX4>vnyL--ZTDeodFVbk&AIhJ(b9y+;#BAgjA2*Jr)CG8G zFIuLJ8JvW4$_vB*Q!|94E}IjlPs2E0HccHxK{~CrJo2)_>urw2*9B@6Ll4`>zptOF zldlU>kTP68tzSR>LZm=b z88QM+uXqb%-wj=3h~R``D-w_UB*RD3=>}Lev(WNjj-3|Xx`pty1puHb-1JQm_W~v- z8{SRvk<6|fy|lMDb=coO)VyidZs>G4*;2hX;dl;{f^{mxH|9^p@q}scY3|-$Jd3xx zHYPkw21es_-KyKHHhb{Hqx-@U!_biVwK#ffqB7276|f#)59w^bG6fWq(th@d&sR@u zbDT7LmVo>=YLYbMifC~>mm=~NgoR#DU*IRa_ZMK?X6&&`!QlJ~9ybXeRvgy1!({_$ z0XAs(I(S}TT@9exN65cFZ@!UNC3Y1op2h6qAH8RZ6ebn~S;TCV-W=-NDQ`F!B8Xe; zlo!Lg-c?-%NlTe7mv!K;9=ipWCLCGqVQAMN-D6P*%|Gy)r(Di1vr;bt-4ghpok?nj z+QejUvfH&oba4Vj`=bqTM|nd6Ks!;SC$2!YsA0swb}xohJ9dT1E={ST3C(keD_+^U zNAYpiBh*Qh#{-l|a+b1{>7o|G9K5@a3-5!oE8itAfI+oTYJaxo8ENp1h1(GW%*W;u z4J-5`2OY8XKYLl26TM$5+b=@CgX-mi6#XjwuS*aGTj6yxtul&i(LAxgtCp6`y19Dz zpCp7lzK+svx)=4@Hli{B=Yy_X+wRKZBivd$3~~N}sF%ACdQqJ)J|vu)_*8-It<$)0 z3r)hI ztiuIh8Vp)$r$6>3KPef#*J}PX-}8&eaL|VF)5S}-i(fuwmpK!N2HuWbXz1ot#D|7Q z!Wt@OXjSa3p$|2LtwUG-R2BNAzKK_vY#xh%R8#GrW5o3W?0@qzM=k0lC*AA$9B()I z(9Lv~>F2(#k*&Z=qcbh`66xEidE?Rj@d^*M$cC$}ZQt?)>NlSTg;(_}omeDGEVQu; z-Av|BjnX-9__l9-qI{!wE>*rxempsAlls_gd3S%M9y&F^w+02iy-!4K)L1McrzPjC`(Q&p%OTtw^x5DfVh^v@u#}LpCjoT4zc`MpCMcyCn zWAml@sV+a((+iM$K!4r=t}tT=KNY8Ski|2E)e-`PwQITF)YnD636szPrk{Wdo1&Xf z>A7cW8n${K`7}*-s#xQ0f=XT<9gFSbtBli}CH|t52P0BprKIW z+?PlwZ39BxD1m!!-xht|mwgTQgk|pc;?QEw&ApS11-|xdjm`Q`C1nyh5?SFCE#faB zR0%;Bn*jj9M-)O}0AQi~Qb?oW+89BO$d-gM!F7>&#-c+znh8(@z#N_v+Na;RSkYZ( z?;ZBDYj&rLGRS)x%a+fd-KKxx;*+V=H~nW_5;ttavp1EgA7!Ki6trd!ggxvV=e8S& z%r#_ff(|fs3kIT+#-Y>c{nm37q^t6h|7$c65X3|m{H1VmaJ9HM*}Xjijm$GrbskrA z?(k$EeY`W-X0b>kgmX-O)0$GJtQB$+Wg2K7-aD(^%cQ*sOF=*)SAi}do|v8!1hg;$ zKno+BPCO#D-b?F5)@6sR)BfQ(U|Q5{x6C^#FWcvNl}#IXG#3*yx4->~NVTRL7RX!$ z^fw}~nva%pBLpRQ2LGbs4yXOamc4tLr(#v_hesV{un0JHJ15bsDnu6-y+&|$5!UZz z{mPOmI={R*7;5v+JXi29sw+y@d>#?w3f^B+6iU>Oifa=~ftNE>u8|eHU*p0V(?>H4 zG)s#MGnZ<-ES)_HX2|v#fTage+WR|UXwa3P&bT7a4D0>s!<qfJva0(698ZUQtJFQ#E9|lLF~?tHQ4`YbFUR*EWggx7V=@YYqED zXq7d#hN4Di`L4be#3rzT>wo9kpX8=N_?ELrntgY7t%F#4LHN40diZon66q_ zJ&ft{C1lp`GXa4|)^1B<9BQvh2PGlP?Ue;b)o|+;ev}K_Kvgov`?{gyh|>08vcW`S zfz#leCf+AUF|Z5H+)9hm4F=9c@yp+hl#4j!dXSDCvJMYbo?MlDn@jDytHyqYeRLs5 zz8+t&TokEzt^B>UvuMFVqd(7&=NqwT-4|bo4Azj`97>oS=rdp=Eh9{_?~wJ9L^?`R z-%H)v+){q|ns6EY-GySCW4+P>pA)AH@)kxJg{^R5wql*jcZR#3uync1uioTgf0NL` zPpN-{hGVPQ$>wZt6aK1XGF`*L&%r6sEZd492F-p$z^9k#Vaojtgs1-bR9o}Am9#%~ z_QQWEuEO3!g5iu1K$6dJ>bLl_RXxC&o7O?J>=VtKkdM(@pR7Bm=M|bNlVGbrwU0SN zPmx9-7l5+4ot~5kth?|P;VKL~gm_>2z)9QA&!^{LKZs+zw{IxXGiK8y?$y~@1sqk> z0Rb+NbMXZ%vKfROnE2f_M-tjX4&B;60k5x^oVy&)9oe(5b4b5E9(ED9q$p$0>*XZl zXDWR*6##l#9Vj>;wQ@W6+v_*R^F3PMv{HuBHMN#&>gvo}4h_+eFljg^c}y9E4yzB{ zm#L&PT^OZ;i23oW-NxSL4w;`@7wCqw0Z>1d4!2kQZbf{mpl#A+uD?0z;by_vcegh! z^`ghAR+_=yL%lsZ&q>DBZwZi@$f zA3qpMn!L9KnnTO>cl)!=GTSGXGUFFmB=&%mwhbW2b;zDxCW?`k1xwO*Nsh&;x3@`8 zgvm33Z;8IRo^^&rYgA$Qs&1ICcGr%q3ufN@d7!-yHPed4!C-I6Cl3+EC)*;bfl&j% zmx>{!TOAz;7dkTbWdh(b0P+t2w43#rQ+PKtB!{k@(L|%jUmi`Wk4bv1b(hj-vrkvb zf%f^TC*AKGGutS);UV@E?pJy29-D2*l>kc99>s@=qb9?JDP}4A7Xd{=G2#WJW#rC` ziz!YQeXfHuK(GHhppmXWxicZE&zi)3V*A)9zYi*x!7)TjpS{zj$8<X=@cyMPc1rFMEnL3-62GFUKgT~LAHag2p|_)tiPdp(3}3rkSWtO~_x$_@4rgk8T|Pk27}x@w94G@JUIG&>LI3Mh zPZ%1&PI5@pe<>~urr{U&<8f~o7Iev)ey%B> zK$99rxnz^t-P#96+j^BZO6@mSkZ2u?_?YS?`u9e(^Ml@;v#mCB^7WHOp3Q*)b>rw# z=wceys%gqxAJEfGyroutXH(N+8gxptu|YECTY)RJ`e8y);s;3>f*Lx!)34k>1$~f6 z9h0DYuZA{Yb!laZdeJfFjh==;=#{O2I_xp5QweP6(MHgltVD=ktU|Yr|58Y(02}q} z^e;vCSSXA^nRy2^pTqEx%b9hi3O=B3n;aKrYEhfWINx&1#Bk_@+p-u{z}|&FUO76N zKb2@z@Agqw7I;vkU*MS@U$?m#ieGT~OHrD)30*wuY%gBfce! z)Ka5cWZiz>PV(dJ)KI(vkthEK*QIrd18^v99?l+f3!rg+4m{Us&Ngx357W5a6t_kk zwYukAk^jYuLN+K^=N}R@Rj?>>HS20Z|Mlu$gVov8x6w~njv1n1Z^RdBdeUQ>bJ6?h ziYHm4(tDAjs4;@XX5N$iD5^TAubU<*(I&%O#&zi_2hfDRZVStlF~MxD%wGRYA|_e% zJscxC z?Pa_fIxWoF*@#ykd#v7){cw*Xxw?1dPf5TYEef*lgyusvL>FSXf$uXyG2j6m3pu~_sZrGeNfbKu3 ztDJQ_1`fczqRi^=6s;YOa)~5Wr$0$4S4X%w6Kj+w1c-h^y!-o*woL1|vkJdY*}^pP z%n0qpfCk;+_0=19@>1+hC2^mZ?u>YF$(5N+9*82{0Jo(7Sy2WU7TQBEp2H%*`wvmD zZFfjrAPC@((7uz0hDoz44;4DFjxQj3nsSDMm#?}OZi|f6y`rn)KjTk7284JcK=vUD zV3^iX@Nl967Q$kw?csWF8*zyI*pShRJQD7Xl>2&TacU+Hv zCF}#h7yH_WgyEUz1EjZ1F@WT1WeHI#>T7`mM2R8)s|2D2s{zb7oN+)_eck_;;`DyS z2`cNYgt+n80^@7Zao!GvLRb7hX9~Zo8#;1$&0OR!V%0^RPuhsR4Jg2>-jsgQ=L90e1%ph_dvRBl%=w zz3ESb-)krWT}X*=7Z{BtjIBb^y2-Iip~AMLrmB?rdgyJF+a-)N_C{to!g|@00b44^CqYow^I**zag^ z4#o^Z+vzDYPsDi9YS_AsB$f#fq@y@hUmG?YcJlZdXT)}DtXsaPe^#=%61`A?cbdoh zM=Rx53%;(|m);C@bIE#chDmME#4ER4{5fq1_6ICXfJ6KKPRuKL8_U$w_G_7pwXRlFMM4V<<@Jll&$Z_=p(&{rK=2d zTr?-*1_V6M=E&@XK(-O7{*8qKk&>7^RsA&SU73*w12Qayk)j_h@g5{NF#k9Wx%eXZ zBZ}g`>+yH`zektxKW07@|JBcuG_=x4Y)`E>Qp6=hNvb2r=ucr**6*2___3V%b1VcG zkLK&;JZxDvJ-u+Q6TVg-VLE*L%T)+I%qiW`aaK9*C|GGAKk8(SVYp?rG`i?ewQ18N z!%fNa=GXbAG19UtO0ZJ!EI3N=`{nzGyM=$Aeu<+iLeDc1;EJZ7cLHftkfFzq3SAj@ z*6%Ex5mf!=-c0_@{_I5Bt75wW>^J?Q2((xSQY!H=L3!Q+**1qAi>%{kTv|zE0GLf<;y>R5bwPC=nu{G<6qOtR;#prVL1=f@SyXCH&J$5Ny8U2A(~uL^`xSSK@^ z(oe4g65*Hlj{oW_t&E4iKMGQo!I6N#u8Qsp+N_B>(VbyER`C&jmTeMjf)k9>7>bje zQk zKrBc9HW@rRt`SO<=>EyFpR8v#{-d}{dcE3+5N8o6>z#fk$)fzAL11@z9yrybxc|RP z4&@WxbM!fz0k^Ok4Zn<=q)eQ2yBB6Mx=>zm^JdaBUrLLF{@cO8+1$b>T)IHSejoqo z{G5?^9g}Q~u-JzT#d$Q`SeDap@Z_b~ppfE-Q<&RVWm(_i;GuglqhYDx5nZD^7XIjn zCv_r~HAiGe%b2D|Q|`#VVPH4#v3EGuozzNJ;FkyWAQ=}I9BA*h^eK%rcc*~;`lrUr zV%m&7A0Nuk{juLaASmFpuwLlT>69USj+`k3k>;@6yH^vpRM}rL->Q}#nE&=@fIm?K z{Lh5mr+&eg#RnCe%Ew-I0KYuq5wV-@mySe%+WAHi3wy(M8=2q%tc|vswg27w?f-5s zGyI+T|7WQCulCD-yxus*=&yzgSmWsi7}!(NVOFJHK6LaK3RiO88;1c#Z4;ynqEZt- z#t||M@3e&~^#;aOUBGZ>z9uhn*dRD&Tk&VeKR%7E+cYY;vpkR~e}I;1pqCRItuOw$ z-Y@ANG`6Z({#2@rbs{JCgdaNM)F(3roPZKhxYW*tmMJL8vs}?|&2g*hW7y2;ZI7m3 zJ9IU6WIb~TblDz3`Ku1>KrO+~kQ-%z&3ce9`931EF!s}Q11)Gji=Q{guEw4ZpnMI8 zA6JUZz?}eIn~JF@wvyZO8CPBHOLOa;joz@feemaR$#t1)pbS`|BDZ?p4Iwr*ogeVu3djLozQ2ke@75bpXaRECzM zrYrZdDRuKNMVoL<&PZgXReVSa2qQ`33lNJ+6dhL{PC~l7<@SUFW5`)`3=v3%kBMrf zr~tA4xOR_y#&t5@)xzWGfF8}KJBCoc{q9-Z52dFxx|>{8m0QhGr#s#X;Se4l@Uygw zY+DFN6~k(Gk8k~7if6EAbC9~R{WZJiX?;f8@fTZ3T0m2xHZx{HUPRX1tN#ZR^ez0| zzIgoJ#wGmR$(gE`N*8yNzv2JE+oKD?}fyO>I~LZ7M6*v0kEEfyc{gA!I1T`BedWX z;Byl6j@z4^JCwNUNMOGr99zl<*696sbl~P6o;L<;y#gYO;L8@ETcT&aVM_Jy>4c zu7Ok1)W3hA3E5L1&LQDKmdCgh&AGNMp6n+A-R>XTfO1X>;T99Z8&Rhna4vBy@*kdG zDW|Am%5WhK=qCbGYVSq~=R2^$ud@9mA^pHb!}E54l0MnBsF=}2i8DLrJf-$~BjWXG z7xNx8b@T*USp_C!%KHO1gS)sA2gGt$Bt}+cQGZA5xS_vr9$2i5ndj8*jE}ezi|k^a zff&oGY48pgt=5LV@E>!hUg~+*I5mSn&)-RiHP@uPwZ%HsCCQ33VEMFe{T;VSfndZ<394(fbD z@p&y#-usa)(flf3S7R~YNEOaK?Rc6iVF*ozXRibbYTI?rvu}IB0kF-CUK7rK-iMya z1nf@N!=uoRdwP4(1gC4`N^C2mQ1nsS2|n&o`0QC4hM#e1uXAwhv7R^ch%n!xt)xs7 zf|NQ~T`vl7zb=3S zMX6{&^f(!qO3>3gK(T2q9XBj#Cd)3}x+3{4`h>CVHG`7R_w@vM{>@+aCTy(7ZqU^q z6kKgp?pYIh7ytx~)v33@%K!v`HPgL{^P?N{8_+WtN|0^ zEMzd$ursKZ`|vlC=YW;>0o8BSGn;L@Tyx4DYhP^8Hn6TAKy~w=<#95!b!*oF7nSno zbZ_C(WF@9G(|gXGye5r0+|KFTh4dcW3Fwh2bSazNcH-o z-5n>ctG{f*u+~G+a${|gY&#NvH*U3Z?+fbt%8F&_?o?}a-nOjY2lgiv9S|PxMJo2y z7QXP9lc35azMInYcf3RzM zPvH`7VJe)2eXoHny$i)KGbYk@UU!O6m4raqvYfM=Q#RabXvt*t1w|nfFAQ@r+ZF>X z^8!u4BPtDOA0I+jb|v)DbUA6zp70nKwN&KnxuMGW%;{zK-UbgQSLpv^?lw5s?nJfSdiX7~%Xn@D)J?LoY3;)z}HNnv7felk`P!zoPv5+xFDL)gn z$aVDEgFZ8Leh8-lAmg0r5~-I);w0c+N>Q6V-5(c)=ggw zms1+g0gvqPjn~preuw8X03LTD*K*1dNZ`sZaFyZe*v5lSUF{{mO9VdmA)@En((0Es zt26J7iAm55cJI>naJB&ji1*qim4DHrLPN@s_eF9muZ#2gn1?4$B zyBpRQvTD!pGP2?qWuHaz3s8ge(<{_;^>k(;}(c$u_=Sw;1;j7wj{?&3S{PKUd zV(Wn6jpqz-bF zqASI498@S~HXX%s#NeNv@W0f&RDZ1reFy?(RDl`j#-xxgQ@X=dm3vs0>!m?}s*u0> zgDduIQ zhgAT%$8Pt4D08zJdln_fAOtWHTSbe~#CF4t+Yz|Ns|~y};3>8iix1&AM8p3WLxqNI z9@)?yRuICqAA5RX>k@q059tK|>R((fLxS;V*kK3R7Z3d5 zsTCvCD-Mzq9yMERlB&x&%a+J)ZrFm7^w%UXEl!sc4LR^U&1%sQ3yTTRnI8<>wupR= z7Q#L4gYdHzDGS4W+E=p1uJ6RiU)Y9jmSkZTEuL!YHMTKc&vECuPPSr=>}Tcq50rLl zIU)Nm-d|40)S}g!COu#X&h`ey++%zvFEa$=kM}|Z@GFd*eQk@1t@M;uiv9A z%7wLvD8jhrP)OM{0~qUtklJX32%G{E5%GMcm$TDP^t5LkL!2#dsID2>G({^fTm&&s z3PKB!$$I`nemJGnU2I3^mX4K;Si&urV~f7b(gkDC2#ba;ZTm7is_@x2fwsz&&V$}w zDTF%W-kMu%A%F+t4^Qe_pe5*w9NJKU9p5f^hL;8E(VtjmYKV5&e!kFmBAgn+uWc|o zh`_sk;}k(- zpwJ_Tm=7YMb(*|;DYE@g`jbfXd8^aQwA07+-1zyP-Dihbrx$xm(;cbVLM2ib>F}Cx z$Q!e8Fmi@(&@3mX+6P>omCsqakI$3+R3rRBeQzBybq9DjrJ-Hsg+jO8UbMe7eeET{ zkZDMi61jc|dl}&74PWBAS|oywKm;d&1<)I;VrjH7=3NZUgQeZKHV`q_GU2Q>`dwFM ze8aX&YWlJu2>$xsl$m-ZFM#kkV?o%l9a~U@KHD7S?nrP24BIz_h6erE!uO!J_hiGD zLp>reP3FejDfGgt!S<^U7&$FK%0fOH6XQX63V%!VXK9OuB%l|gM*pe!@^)KXpg$Cw za&@^KRN?#=rlwlIKJ&2_G*&nEz!g}5g;Qgt%vIo=w>{k{Q-7FU#x6iP1I}-zwOO6c zlBxzfmTR|BFG|(9!M476s79#py7HKNR_64#r33xVixfYgPb3fR&FTfHR4pn+1GtxB ze_Nt;(?n6AZd)H=c#)(~$hw3;^zYa5vnwKlTHHK66j0&P0PR1TnBWRJhQH{C_8ew| z^DJd|{a-9RWS=P-V2E;qpX1EK-jBqyZ+oH=;R}8Y%(0gk#-nHTFaGIE)#VBz?>Cs6 z1@Rpabwlfk_5yCMB-JY<3DM4n50oPE{T9vu0_a@ac_P$d`mHF#-fLDX5au5Bd~0lm zC3iQU83`*5qe=#Ih)qcHCv%_^84KRjSZkTwC8(=LVQi3?Vd@n`)CpdXk-Pt zZIrN2%D>Ge(jE35Lw~MoD*}!0Why73nHm$Os_HOU*eLJ{JHn3C= z3NXNhFCvX~o+f59U_v2923Pn;c5|$o|oK+ln#E=kVY(pEgU~bI2 z(84&j=`yGh4#FSvn@jhzZw+u5?B|TwdIx!74IM{ixLUS#hoz)Gx-yM%Mig|bVmOND zGX5mhHY2MAWcaiSErqeL_}okSj<*KQtV|z+&tKfbZ9%dXsqyRKY1W;57)wVeo~4kgh*TZ6IN)6w}~+bf`Z&4*D`X z^_Ye2!h&AtS!?W+ctv_5Ul6uJFj*gZ;VGCGT zZLu8jrPlr~PkE-UHPx3eff>Pyp&Ra%S={+&Bd-(DnZB1On$`G|w>k)PHRcbGn>pTq zid_fd))`fGJ^@37F!^cCh&vUAJvv#*-Jeh=!3($}2n9w2xnh5#q&R-)ezf+;6R*S2 z_1ib}*cPyXDFIF}>Y8ivMe6YiNr`uy`6Q3BJG><~B>2eZaDRRGPI_7qSD6s8312|Y zOY+T%V;LXAl)1h=+(mQ)=rRM_ma0w#o;_zO=Dq5PS9j*b#&g}TBI{aTpoH3qs~#ah z2a}55S3D{cfiObn*K9o)l`4&7okA?M^}WTVBUD)mhy#=#x9n$FTDc;O%}U!?q^H(( z%+Gxmx5A25L8=BGFdti)mPqUgkW)xRto5t5c$69&(*Ve$2X|fHI%ip#1ZpmeE0sl~ zXJn7y?0!QXr=zDXj=kzo@xtrQiYt& z+0#Q+LXPwkX0V{6g>T&8kB2sGe4=LF^3Bcz4Rc%m@15z9L|WjFwz1dNDL10Gr+T0} zX8PdO>)?{J=Av4PdRT-`m*lh-WVDyc=wodj9 zUb_5w_*{AMNlYftM7PyPo<`U5aU_Q)NzPGDQq*8@&`8?FGQyazRqVQGxr9B-ZOc149J3expj%Z9tZ9watm zLMlgD9>Ha^o5;*U-`?h!(he~$2F5vf=Pxh*2wzaY z&oDlQ9mY?jS1%^m&DX2aC9_T{bvv^)zOd(}7rnOa0VYMz>!p5ow_~kDVQz%ivpCd? zSOa!`M>&BZ)7Bzp(wpX5W1b%TE#`aWM-FeoD)R@9*a439KO_uyoK_1QfeU)&1r9yf z4d?JX%?j7D&T)SS9CijZb|d)#@35~FQ*1hziDj(&rPj}wiZLGkpsI)Mo{;e{^LCY! z+V=j7kePII|6c{lz#%=G-?)QqP$Pi0Ac?%>yUNhEw6g)~px)11d)cA*-Hqf|i8bEM z36-bCGh;Lj%vR1CULd6P@&mCv#-#Oi66o-9;wyqBs_TQ&AD)UFA%LH{!rW{D{lGLx zcp#Ew#@W#G^LpiNDW@Q-FQuRA1RvebqYOP?JtD&ZJr1HqDCAXp3eJOSjM;!K$Tkk( zgtx7qoe36QhG-C~L#}EU>Lzg1FtY$Ho&OYcQc5C!)%4&$Q6&g}yZHw4<`E0m(~5r`()5J%52Qh|YeQubCVlOctV@G(DO~ z#s)RD=P_mumE3Cy?NkYAEX!oaO4Y z=JXallLjpXr218=$>Y68Cn1L>Z%&k1*Jo8K=STvIU_6Bog#eLV71tGcT@$S4ac5&Y zNLnFh*Y3P{iNvU~zjYt4*s2rAFu1^f*5cI9=x@QlR0EBMy^)9Zbj)tzv3KjjKBbPC zhd%nlSBkqP=$fI#;sg4*HuRQ!)t#jMBZ~bV4d$*mNak zU1-KkP$XaiB~yag-#(PSJrWJnnK$F&HhP-I@EZ&OF+Qlwa4YmTNAs(Q^`5X%+iznH zs{LhGxv=Zy|zWG_3#92@kvfQ#Pn&^u)@#jCF3g(XH z=(gvyJdV@M$-bf`|knJG&phc$u=O(lT~eJZ4iKdIDIZv)3^E z%5@h>*}V#VMCv#d>W+X@d4FuUlZenU#ANOEbVF6$JdBG?kKc8ttF-aO38CJIWI+D>69y>S*=;IRFA2-7 zC(Ai#zyY0b2>F3LJx(OIMX)>FPOgkMR(O0AZag;tY*H@Hl|LX${CIsTZ@9KGsgs<` z$S|)RR_O6ko!iv@3gEBjFX5vp@HG!=b_>d8FDeouQI3$HZ#}&ecuSJbJK9JbMxEpY zb3x8GHQ5gNQ?8{a?|44#pJ1KJnuFe3-NB`WI>pZCCj^C~Ct+wbv$VlqAutK@;#M4d z+6R7-YsgOLDL$GZP2s`8DcGNik+!NMdwd}t$NB9AY-C35qz1L;g~C{w0ObA-CBRmr zoPjg`uG4hLzWwz`!ntkkHOMj_X=v%uEE!=CXt#_2Sd4mHr~W6}a?y;u>C3nwV9dv~ zpM{TrT44>BfGVkACsX_Lx_w}^(vtC+PoOkAxS^jl?Gi&N?M0$RG`(3sQD)ujGr70q zhLi@)(PeKhoO#Dd#8IfsTduB*UCuTw`ANBJdFzpp(I1{bc*8P792UwIV?!x^aE2aR z%kKO2KRlg=@?=ZC;X5AC4@JY_;KjV5Ptz)s(_eOWavGE7gN*zS3f{L&0)E{1KYBT? zB&lJFti|MAT2I`Z*o`J#jOSw?YygS|jrQ!3 z)%u=(SO@zzu?KR6^XB#9v|>=*BRS|~*u9Wvy~7sa_FPU9MHcQxIgX*~8`22$;)#uB$ zNvI?=b_wAFZCv3eo_N&OzCth^#2VM#Y_B^t#y=)pQ(iUq_eWqAD)Yg9-Kl3Y#h)Zpq+6+AOS=U zLY0*KtUi&)67W5@*?>0Ys!c9(8k?pFJP12VEeZ5S`U;tkZ_4@oN$pXoOMfAS9VW$4 z$6bSPGN9RnM-iB^#>!*65j8CAQs2^ywrTf|S2Vw>?eWa`EP6ib3U(+H;$(1D z*a}K|v!;TNYZCrH;Y35AF)kDOAYQf0u;#L73?C0>!eZQ@t6ER?r9==U#eU_Uv>U zUkFqv4MtJ?An$$T*Mys~KkT;FMvzI#lYsTMG@LoES4qR_ljHJGM8sp!QmB5@(_HhQ zT>abkk!Ce);8ZF&(fF?(d$~xoeb#7q&Wf+nn3Z*|zvGu?&-Cbmu4W$IT)+jQW%^&D zQ}=z@A?%kCh=iUIIkKbWw(a#|=x)jx~lGeLi)aJFs1TPNXA zMDo^XKz(9277gq9!y|-t`A3PzTQ}|@$Os2VJ&+5|9(lU5`I0OAx0XG0cj#jvkz~+< z9S4&uTS`?^xD{iW32jx{njnN7+Ey)Q?Y7YXPRKil zzCr2jBi$Xk<}%D(gIYrS5WRzZ;UJ2cqS&gW50;Xl3#2J5%2pQjN2IPWYx^d`C zIsVI;DBYdyh2w@mCGFz6iamtt#vZ6UDuonjOo+2-3sO)NoHD6pHGH22I^Qbc`i^k= zOXlOF@B;vP?U3a8-8r%b_jtUd57|LvM*VHrXCHMlFOugF&;B2RE0>G5=l}KR@4wGW zJgL}%K8ZL;&WW_WO5sG~TK7-fz+Ax4o=F9855U^Fs+=Gc|0?$xm%px_(%t0blEEn& z5m*(^?qX*N(Ju;h!uRXrs29Gh&wn+-G@xtEKfzp=={nH;RF5|34?-*a7&8$fE{wli zW)nlR)J~4ds1A3_fc)BUovTMTL8K^I)B$bNtVw5efY2jXl;#KDUv+XHAoj=#ZzUo$ zf#yKa0uoO$Q5%0$iuy76cbq?;k~Zpzd$oW8`+ry#y6RT{jBZ+x7**)$-Oipyyuag?qezn-J0 zVasxV_`x^umrWpSWD3d&ft+S!8hg(gDQI5_V4l0tN+No0*nQdn{T*Xaj!>iu(5^BD zW<1n3?-ZK~Ixojw9QfcLM4NQ~X@q@tQfIXzT0kNwQAe1V!6q^@HbwX;3xNj>M9x%> zr5oXl&~ep!fs|72Q3;A^oBT;Ec^@>W{K{1*fWRXiEoj!CfBvM$NJ;L}A@TXkRvXxp zg=td_ITjNNOWBn%4&i#cu$vDtX}CH3%J53-g0a?!?UbKIe%pml-aroU*Hg$SUo0O= z9%00Il!JCgK&U6CBF;bQ`PlyXqEvSBF-J}4jEn2is|`p#pRdqKhso#Mb%NBaM%)tp zT|vs7y!>FUWndo@`T@bjti*=1!x#oYZ=n){NCA`YzO8$i{h&XT@B6a~pSD^-&$X2N zoq^eDzz`iOtU_2vht+V3N{_yIW{4HgOAWHH=GRAl(L-{rQ|&yB>DP!Ay&KR}jpR3_ zHd(+zxbFh(D#uJ!?D(Jz(g7OfDpvA;9oy66`M-4y`p@tGL~TP_RaIqaka2#s`C+>U zJsS;=9=Eb5B?@B^1=3dWg#4jExQ$>X4tnRp5IYvtHM z=%fUA{*{rYb{xF$S?Xz{G5-)_v=1~NozBF@IHg|IvptK{0>(S=ba*}!lratLpEK!P zEu!`$bk--1FLeFx&B;l(nmVac*_gSu9k5I>E~w6$j6Lf|e{h`;rJ7&wvbMrlqP%}?7E-=9Ff$z{d60p0ra=!aCDc4PU7ji}>H3z#v{nGR@k^>>#O?*ig-7w^ zkdQ>*d{za!x2S`ym?35?a=)EsrAY8)fP@(!nk0}0Hw(B0xGNw`URPaw993Rnkm)G_ z_|6*Y?%)alsfg)uKmA|MQzHF^uan)C6Mkggh?%T-8@KVw^;ATN##%!)*6ch;XgqOFKvo?nZ0>qM8k z|KYhT+4l(-%016*gNtYjElfQhZYM?QDF2cC*|ic~qG$b0tU z(GR`*%;ISQID$6=YZ~iU|Ni@+(hf7xJA+k$e#=qogU18j%+Q&Pih(n)36WNyk9P%E zu0N4W{SrIcGrMwIlXjaUPBCbP??)K*hnCMgb|1GaBYVDW@M~X^j+`ZXH~}NJt>dd9 z)X_j-itxREvxY@n&1DH2w`hnrpi3W`&;p><+$4!C!TYxRcJ}EB-utgFT`zAp2M_uA zx!eP%uJ}vG1f|PHCOB&F>>C7}Ef|ZyL3oN0IAiAdjV&9ZZCCL9;iNnGd*f7XTQ@{U zsxhYj?obn^Yst>8*VY^!v134JVk=_0T?55W$fy%bc0KgAp}MLmt_GBiFP??2^xw`k z$h^%Y(t=+2W#UlEuX~X11Wkwvy1`KU$SfMF&$tEfWRKj(jZsaK&L4xp1h8gQ1qpkUZ1dJ52L|Kop!?zt8#n}X0^Afj|tnGc8PA~wR znFRm?^3)N%nRcmsAKf z{;r`k`Dj5`#YlBq?FtpHT5{-E!J1 zU3y@h)w2*2#dxX1*Y^>Hw5>1R`8qfoKLWm zVEGJda&-M3)-;A1ZJ${7V+gLeRc}w%!J9j6qsj0vTMdSH4#03^jO$hR%SeZ8$I)Ne z=SJ&(^4uuUvs;3kBO>JUbc_64?^pGgAG7~lB=7Lt;nv!cE+_?UI5q`1pe33l8c`4V}^DUM`w1BPB02B%cEdPy>yvK zi^7XVIE!Afx0`%?_z5bswq#1FWforAtnjRM6ClBUX=_eBJ~vtpEtoHPm`U>-=HpI| zqQ@(CoOTQKP?yoWSDAq=t=1X%i`=2{Hm_h+UFn#BN()_txRH#{8% z6@q}+mcNNWTtIo1YQ$ab2C=6W7CV;dFSOm(I<5_R!m}JKX<}yx-tvJufDh*_CMH4~ z(yi=_E+3#@H*po&US3-3)dd8AqY&wT?-Tof=kx#c{U<1#^Cu_AMy!nkk;>NNKeOgy z^$)+#Q?~p(!^8W)bp{H6XO%$73~caUcHp7+_=H#5<5_nBDZj~T9b&+~?`n2g{HlYd zq@sk`=&W-!v>~8AW^RGGA+XPM>}lR*T+5#PD7_*(&$&ka-Cj|#uTWHcNn(QkoJD$+ z!GoS=9-VgFTL{1^Y0$+>`Wg53qt1+Bg7lS8_Tpt@lyr1k zNnVf?TMzYoKm{^BQlm}2MesJQ-!}2EOYQ*!IsU=MLL{h+X3v_HhiO{MQ z<{{=9+xg7;m4z-VLWBCNS@vbKkJH5TIxJdGu(adjjVS5ZQ+atu9A)0{h{w9F_l5vs zWRYlXlX(~PsUm~WGFlTd#Z!J2_kCgwBUQ`aR0CV`Xu`goK8_duJG(JJ+d1HnC=EUR zSRJ=1wh=sW?hy@_-9mMQzX|)%*rN41I${$L7+`hY%oOx0VJlmWS~(8BGF#J|#mo@O zI8a3wl4FEudk1OW}HsXu->u|NI8A62{WoV3j*$&?AF3hMuZuLcU|-i z|2dC`!5pjYU((n3HemywV2h*KB4kfkM50`vJXB(I&&RYuYM^#2%!@BSVjBs6h$y1CY#H?mACa9M5LItu@|L1_n25!heUo*H#B08lmNq=iG|} z-v-yRSidLqOD-XAJb7vGR8t~Hm zXE@aPhZfbRDr;9VfzmvYyn?b$_VWyHBDe-&l^?6cazL|7cIr0>Kjvlxfmb3MXqu;?;>e7eW(wd~BMq%;jSwy~W6w z;E^P*cU>T*vW;*Yhi@XANV-8iFKE7>K0w&*dDNjUJ|iAI1CvQ#P3jF()lkLpO`Cq7 z9#>l`lFxjeb!(;Q-DxIK*lV@|9*wDJfatSaMYfi=D$|NgNVfKGus_=Lx)m4tjTecF zXtTX<=>&1^33S7)Im<&^k*9sMeob<+TL_4;ykV=G!WoCK6Qlz5pI7iEkjX zOJp00aQUHpccleciPD>$m_Nemm0N0l*QY@WGx`JY)g~75)`=8 zj!Vpaa5dsnN-U)GL};#6rt^b{PpKa+`b@`a)iA+i;(81vW5#8|ncoK8s7LZ^*ng+w z|G<7+)D(lo~jk4dTr5KE+8UBV<*r?FQISZayvitU}gXCV4YV1 z0v04S0`FPLyLVYV`trO`aQ{}~*-0?prr}+_$)g%LtXu(3 z6JX}$PMA;)s>SwWg4&w@GULV}&df`}}3#lt_VS`Rdj6F?v znX^z=D^pt>I&s&2Hn#r*kST-t)f(W4wyhA*(b>5cVTEUlk=+G=8N_L`O#P`>{NVE51c?}EO6{j0-@{|Xi#wTe#8 zUZI&eCzTh#DtwPVuyE9iRW}<{{&5cT6g;yTG5sU#owm{9D(IMdSeUACCoT~e;+HP-GIb~bU4_W5?S4$)wx!fE>p;7sa-fe5=0MkO`+ zFAiirMdTVz9{A}S{-^_5#Vb|C8{SGmM8|xWjrI7DQ6I0J;_YJG%MR^{Z`Hm+cDXF~?SV@#9LZF{JNk-ncuZc0%wL2tTSJg(p@C#@=M)LMI zyddg}95JugyB%qhNi#OX3PohyOwPButz=!9t`m*dV9c^y03M|=ElNc1qhEfr=RB&@ z`P|ld&#}O_@elYOnRM|TP_$x?GL~7+2opw)yNU_FKBY1=vxAc`d~2~M1==rj=IP;; zh;pTd6T^5-p18N|sw{&Oe)r35003w_e;1w3>TPBuw-heTM{uXn=^Xe(U^Z~fj%kr_ zt)1T`6^8CuS-ABrS5$}gXuneaa!Fqz>1LPKDGi)pNpbyP5$VT3;ap91N|1q|p{;@a zZ`mB_xFzr^-#d6Qy_xU*O$m7OY@Jd%*p<)eSa+&cqX#x*Gh!B64^ ztPa;*{4P7Cyq-Wq82c&~J(HT}W#tGV^Vf?fOy6`mbKu7W$Cf4dchQv1YU-%RVMom@ z(*n|5Khhx|6~R601JywZp_*-#sp@fgY=bI@_`vuH+OzSC+qg9GOiRGM)azu(K~xl5 zhhYR*LCgYiZ|t*~dQMUbqY7vI6O{p$6xiW?ua@pnt$nsJdMKx4&r6DD*3Hc^4(b2I zb3v`z{a^>*z%q_?GE;F}-i`)6L0Ay-%>Q3@>JOwVzM?mwkJlL&EO*)0RF-`Fdv!>n z^=H$h?=vJgQ7k;_=rx>9dNTT=SMtBSDU|=0H_Z@P)DT{h;=mz>)yX(>Ds#t`qKzyo z##+wJ6d81%H*46xpHnagiw0q)%kbYK@+|%A5_-i-PsYEbd<;OWS&)~8MAkfKI081* zLzN^q^D`SouOC%fJD`*8dlwwMPfneaer5$!6d2z)1^I+@st8s|ppwUai zINv;O1w8JV#fx$6rh>-rO8xJW=OcyN)yf0wVY|f?&^thujC?x&P}H|M>ZnHgi4vn1 zL*xX3-wC1SZX+A}uSftX=RVHYE0T+rE5=$eFB5%W@1?^ZYQoHy40a`SXNbLoGHheF zR_hdRJ@JK{cJC?Y%a?)-&6GPuV;24%IqO+@(cs=s`&`~c&c}JG9yUZ3bDR<0y3!#J znZH=Ux??KR{!%R9J4Jc_C318~{p#SaYsUxnClzF0#g@M#%^w--HD2N_)*@AhI|qIh zGgEn0EX^UPVF8mypVIz4Xf=Jgr~>}@zMeli9HoL#pg4>#bI(&x)QQ~xs;v~3`Zpx{ zM}9n{JDdYLi%AO&D{Fpu5968o9OhP9^=nY_&?46u0=d8#?ixhCaM1>DARhb-W4rnS30 z;5F;#3(1v3*&lxlpB(t~(BoPBCedVPf|*PruA8dAt26bS+1$VE>5^FS!2eperVpk^ zT*X=3!7@Dc$b{wro&aJd;xHqQy!rm#s#IH&?DMS=F|R|vE>&)u+g-YQ$+1W6r0ypC z3_^km583I(>|w&$dO5caMs((MN_1t<{b(aN4oBmHrXmrh>P>0ih#s$@!DF})@z%X}AIGjK-WM<2#xVGUvPXz9 z+}6~SePT^}*-?u@n&qbDFEE!9@YrBoR zR3rd1@vdZoft%c;Y_lqtCg(09CR>+OWn{ZrNSLv1$J*agft{A7TyGZ)KWk|y+o7*} z;H9p_2_ra!+f|u*K6&=>_Y{(~b%b@wC4aY5Hm%fOzl(^Gvp=;SLK?=8MGr045Su;r z`}ta6n)VjT-5hede`NBjNz^rk{G{uEcfJy|vaETH&pyhzlF3IxaZ)It||3uahw?ipC+8;Di|spD?E{|w=W#~@(if8WOwuZ)FDc1uJ8xa z374@(t^>@_;e{4BHtz=gT=+tQU09T=)g%6h(Md}-O>-WTChAg_cTuFYrgFM?tj|Uy z0)H7Gxk2I#?*6>`E>3#f^wzxoSqjoKe4z}A`G-rR=TmXr<3r)Flg9m*vEzRQ z@(iS3k@?a6YPk1@$_ndf{7m*}yA0iSQQn4r@GkC0ir*F5D8VMzt|J?`57|an9>s}iO2~2|^h8Ge$CVv}#dc46Sr?r3lOK|hE zC~3clgWl$?J9fX;ZQ@ITc)*%RU_B$3fmjoTb(^`uuXb0u2Xd`sjm^KtI{W9R=Kf>z zFwrE@UHjoe{H=^DzkpCK-JJ_rI6bWujIP#75Z;W4el8Jc#8?D1elF{DDh}*4_9#v~0lCtpK9Kn_q93ic%57LLQGzxh13Lw5ZouC%w`CNx9wCzX0yYOwVS zFf~cU9-3T%5Es|sGGCp1CTo$!a2f1V8Qb_PPdx93;fpp3FBZ;J4srHC;8bEtz{9Md z7JbRR+M2S2%J6CPKvh9l5+j?;*91K{k*vt?7c@0CkJ~H~zl9`! z|J!EItJj5g;B}0!;fV5&1E*dc);U~2(M%N+5Rp^0=tg^IGfxL~{a}2qwp-D6Wfeky zc?-fmx=w>2r~F*j*HlgU$G?y@9Ns>w9y2%~@R!B`-vbM&TFOm!8NDBJ&nRp;qwHH3 zG!QEOR8^^iQznXCyL;eZ%0Ky>JbvqAibq`&=-Zdu-wTM3NSG#`gSFw69?!dmwDjW+ zK6p2nzV|(e)%RUQcKi!xf+M~?tUpJR~4nwH=aF5#EIdkUclE{6&rb^r~;Qg z4zLWKB|k~k^{VaNzLwIwGopmCT4moSgO2(2&YDKx*ubnJw=v0tVt*|3vPtdCbmXu?1?rzWuYf+o-vfAft7AgbR0LS-%zstqH-54xUW z1UFixw8rI(V`BrIkGu1}G+fE9`5OE?oU_dc@t@J+J3Kv!xb3P+*}LQpnaJw{-4J@) z8Arbv)XbT3rKppJYbo@H)Q~moQ8|UOUcSZCS%bBszgB)7o_HS!zNY?4-v^W4qwUg3 zIb(@_-nc8;54QCnI4o?`+lD1JLiM}I+N~`P0_D=2mDrklOxy?B?OxYW7vy9by1|(3 zo#6*rG@>4Q>s-i)RXOIG!+Z6n_4n2Hr|jOZvMO!_*aZ+5rvYR}Qj1Yex}trb%&~Pd zXKo4jH{VOs96fG-L^Sa7*_BtXT;CKW8vmY)?NSc zFAxbQ!y~N;a;P&!>jZ|R(F=42Q$eqZy)RzB=sH35?}C-PJ(<(bvj*BQbg> zO{4L0uDAZC=zpe{C}=NZ+CBclk`m0ea&|y|DwWOb^fa$1R za0hlNVUtK#QlC1vhRST<}aR*sAXox zwe_^(#}Ax3ta+#gJ;BYJ#4$&6x{+mq6~qT;VMi;P=&)U7m;Uf z{!hX)|Fba8|LE`dt};?6fj>baW!N>G=yJr%rNcoh+82kab;zHNNBhgZ%C1Rb#{u2y zIRT4TVY zJx6UmId>*uE$Z=)vDw|%n3zy!w|V0T-{)B$f~9h0J33w+minl>#<}U<|}-qY@-G=xER8Q%@V|(``|B860!~z zW=$0Vpzy@?s~gJKuWAcvoTJ>(hUvn)WE!Ajyd;DFLfm^tHPyZAqOpRCROuxsAkup; zQIRg7AiYE>(xiz%kdP?7BVCFhB3(KGsgW*SiXgrBmQX?<)c$ z2V-QcnKsB=bH4L=pYp>E<}!Z`4Y5mq-!!(tw)=_*%B?rb9c3_XI`lxEmOFIEwW|KfDJcH zH@4TZLiaQolI{VY4$7)DfOuTK@_ z1@2SXd*L-)^HX9oJglN$&%M|)d?U)o z49HP38!TFw{Z!R)k=ge|o)fgKy-2Oyg-C)kks{xaq<&&)x;89#a>MmRnFi>x^M(@) z4k-6aLr+9+B9_!>QdXtit~f)o@=5Rne4tK4J?&)kr{&fs@kDS-4g-)~OM|6ZCIQ=P zh^8g35_Hgvk$P|*|QQT_oe((*hy zCH~JL;ACbOb$+L>uV1e=tV2IY$Ho107o>{!j#-Ca)H4F75aXqdjyYa2i zSTm@w-cj_F+=~?P~+&(#uo%2R$xYnU3IR@zK-eK^C)$tOTU{T z6?wa^_ddgSXBJwP*B7&1Kj1j+FqmTRK>w*bujq^v6-5cJfxN>xoEUV0H9 z(Oy_|EXFKM8?Kmo#E;!Di#eqYeDB61)U5C)riA&sB)!CZ3p<&~Zx2vDJ_ICEcwxvA zlp$iM5|(wqXeynnSXFR#n~}?@dr}V-U_E^fghaQBwt9thI?9)%e`5XpY=QSq-XNoE z!E{UcULdk>f-Fw%XY&hx!yeB}l+9k^8@n|u@-=>)KJth;^am9yg}lN~4qBM%AVCst z+yZ0~T{|9pYb0BfE2%DAc3KeHcKFq9uGPdu1#@k6GGq|TgF}4})Ac7xN%aV(I4Hho zGhyMz&Ke&~skNypVZ!516yVyq^INH=7T!gPFbiqPZ&fL*JghTKjK33CoD7GZ#``Ky z1V)RpT-3V&ijGg&=;RpI(L^scPggllxja zH6S4vO)uRAdasv>JXqWH_Jhtb>(2AnikdiQx(oEQ`g=dmwt95%WB(3^8Z9}eX6`St zJrbS|yLuQORNAkum*yy{N4LIg+$gNe8X$han`1gU=B4p1$VEL1%aE}$ z3yjdi?dqJG*75#ZFUBhRq4|)Tsw^-v;uizVFMw|pyr zJIZn4OhE)n+A{!R3E4;&vCSFBHq;aOm`b~Ed|B(mwxx2}v}drJDg3_j%BK9G<~H=H z*UOcrY`!{5XIM<}eUT@M_KVk68hg$69mN1nnU=h@nordc0P5mKo`xI^5dcpFVPJAZ zO=0$H2vF_in-Vfb2UoKvpG1F}sC(zn*OQ)D{6TgjVKDHf7=3Y{4#cp$;xCXs5G`%D zkiV>=psl%W3<&N3hUjGi`?BWyZwMZ|+!IVJG3-jSu|(h1$A;Z)3D||pujPOK97MQU z=wb_TPLqyauP{@O3DCE!wS>xs>wXu-y?(ZtUJ8y=M|^^1cF!|lTxQUJFYKr$;X6`w zb88Y!;K%Vps9lfR!GOo%PbyQDW^VlH=-vs7)A|Fn(r>=OYF>MBB>dR*1(vIyLCj1TIdF^-;R%hD-m3sUkhq34^!)1qG{nv zF1@F^tRQQ*)jT<(3Z(U5&^<;y4qtWzIFHf3(`%3ElSsc8LcMomT%feX8-NJh_MjN% z@|zFwIJM6$NA+vF5&urc8S@JBZaz)uu@agQfBn!h+V!%3xrnHiGZQcC%gEmvzHvI0 zLs!)p+AtC__)?+ZvZoz%`idVw}!OA&>fHd)ox$GU)b&oSiD z4giaO0BEaWBY%Nr$I!>l6;7liguo7$=%`pT`!cKEV0R>khno_;_1>S9d#fiBS)jKc zyr!F>|N2$sTuQ@E>*iOe9B$?tDyod>6|_EHT8TMKwfpd^`{}e%TzuZ&mq>yYH%VWC z8zvH#LvEP)3$*pvc;9#lU7Oz1CdS}M0IV_2jTjN1x{!xLUC2q`=id@02}Pk-O#%IR z1u(rv%{L%b@x(-u5xGMO7*%P3lYnp?ZD%Z^dz&Hh_E^~`@1K4B0*uEryLLdj0Qwm~ zHy;4r)xg7shG6>O=45w^;KWj0mrP2A!m&yA6N5EqH1?uLuxGIc4DpKSt*VDXFp3<| z3}%O8DsayA*n%uoZn$~l_m-APi8rZRw_|qG)@h_JG{s?;=wTQg`<0rH$v1OFV&HX$zm=xRAX++j7yQz3nmjhl6umP^lEO2 z@Ay4O#ks{-ugI?kslvt{0v$$Fub%jlev|ka3}o7+o6GDDhfmU&!0c_`jRn$sEdx!LAEhx{q>O$mbLPl?=IPG*XBRCcPrgG5!bpuD zEwCURVwE}?Z}vo!*$q4sx1tUVGJvxhSU%W~coW-$9O{vV5gQ-EgR-7~Q1L)2R zfU6wk5eY_>bxK+eFQsQIJS!C#qrP6*MiIB%B!n!jZfh3yWHhQv#Cc?p4gwC!>JFuqU^&0lFY@3k(`~78Op%xX6tLWK$ zQ!_k6-wr06g_2RJ0UuX{Fm-IOkl$wGQNLaG245bmc^D}lhTB(wE(h*sH5FnAxvh>v^%@jf=ud6aZut z>R|rxY0C&Azi@(b5~HL{%Wi(JZPe$zlw`3>7h(hFE&~}EoTx_1<;aJ3?zNW`{Q&ub zOyr{A5XWWo&20o58vK2{Z1sJdRQ=lzECI!<0Z}CCim;D-X%l%@1}%V0d$+nZA0afs z|85`&dK2S%!6JrnM_)vtZsv7k$^xl5NjVtjj71Xp(AR3Kqz)QKPMr_W$*VMCe}Qh_ zB6?f@-4kU-w$or1Y_RmI{Sj`{O5O{@nWtU}$1g7vEGA+2|SgXc||oH^Y$ z9IyEEe@D}9s}`B%oT~ev>!6YAsbFfN%GeR_X?13=9kki&<7bwGhG^{e6@N1>v1EDU zWnjJWr))&PcjllfMviIkm!~vC=LOL74^@YUKLwO-lmH8b9r$Wsrp^T1--nC0#mnK9 z0aQ>~`D=hw3iz4KIvMC@CiMW#qssEMzd&D}SG15h@%rFPFdhxeS8hjx`bQt)Vx{1t z^~7ua;Ox_(Reyb= zOq$CNZ2oz~0-r+Q!%H;F z1s21uxxYf`%4eIE4a6#YC$l#vH+H+s1yA0wIA5Ere&;>`Ra>l(@j9g6YUaFd;V29k>*`*0Z zvJeHZjs_Wlv@NK;0@V>`S>jFg?#%~w7r~=W6CNr=W{zmaz^{@SW@t!3vf|<(qylSJILg!4W z>pU&G4DIyE^(khq)W$XS9Mx7E-G3DA9}x6HX$i!$Wdne$HKhZkc$aO>W94lWKMNmFVhi=VwxW?n zJSzsfCvxqjTI4%$d32+to9ZLyrrP&ECi%8iZRGg@||Cv;4s!$f&&#$Ukro)&!Y{odBr zbJ+(-H8Un9z(rPQu36%<`@0m?C(=0foO!`cv3b<@leOzTH^8ErUqg!HvKlhWvXAaRDkRPD2scZaW3J@$@XwO{|bCYzXAB4JXky)}tBfyg5y;k4rj zs%b0O!qQN#{K&p-fY2tNQnq|~TSUdVW5C&`E6@RJ zJ34>56do&bai)lrx`MTRcPvtR)`XWF^237c*Y*%!~;MoS&!8dzmPf8z`B1%6?rBG%#nR}ocSXa1vS zHHR~6v!|NH=hRw^ z+vqDu#|kE*O_Sv>>a?ZJ90sTs8cC7-USF=hm{2;q0V3DnFD(s+w6bn$e}?Jwy5+J+ z{zNzzdI5I%CLX`ufT)EIn``cLuFG)oGx9tB-X}pyeIdE{oOsE7@aO8?Iy&#D>L|P& zS;*&bZhv~`Xrn`H>c$Tn<`E9qv<&uYm2akflJJL_jja0@C;(zC>niU0HHx1Iendaw z4rU><6vmokF_tFm_7m(H5pbb1kMxV3lr(|XaH{xoNM+e?w~Hwm*=_=Amm^5x_g4C0 z!N#+Ytilo8p|!|b72b!}gQ3>2So5Kp01G$ZU^L(Q0S_VQ!B&?1!?j!N%3*=xX!}n? zcvJF6t=FdDIR)tbYlDHGuyTtY!3aK$Q&T#mqbtjq$MYu~+4C+KUa*y1-gV+WJ{%L? zopbZxk>X%(_K?ZU%~?>hB*QO9RujnFEAZnu*W(P+4p#)z%QLY& zgKUt&jn~0SG;S-a?h2RpDl(-59ZfK}V%M@w)5NXw(BvR9QEqRJs>WjnKvxAF-Niic zp-P>U_*U7M&&hGs1i(3h+b;WDMFYnor<^}24g}K}@{J#i4T&on6rfWPCj$d0V6E9P zBD8WO9QFh;Msw<86MjrV0n0a_O=Xa&_Rx2a@3UqupAWAGlKJZAQ==mtFw3Za@S8TY4^n$38y*1q1$=SWw&N2`Hv6k zIvS45xkaSIM0XhCOM4WfrY!E7NQ0#(>Fw^w+imY7xfN`$>bX7)*&Z}1WLm}Pdb8mp z41kE~AeF(ru|wk4LX(717Ti`erB&I7A^;$Jjxw-$EDs3XQgqQqEtAm7DrU#nF-8h&R|EZX4k6j z&PaH)jv*Um{8L;QhgJt6c=be-nqu>oc$WwLgNwpm8o9=|t{6WN2VHsx+7y}^#;@NR z%Y6@^R&wfF3x_AlWOmGmvKR%DU+$pZ*Vd}x4d{R^Q@#pc!8#2>j!DZ^N8I-d0t z!T#*RyRC3Y)o`aS>(m=0gSoIso7-4>XFy3QO@|M{H2L>3Rchjo{u?JzUZavR$<~c` zKjDtYnjqqRB^y&Rd_k$~Gh=ZH$@WS0kH%UT!hHjCu*t4NneciClp zv}?0V$*e9+<1q1ewChS z>MYxQRy=4?vv!v|US{3^``gcY9a|Bi2>cyfOuv*_^B#PLxeN-wawBi^8yc9rR1ofI zqSr1jVU=~xd?_t4mBOd-iD~u)!db4M44|MA>LxbyIC7*0z1diHyA$*EqunLY(wSo> z6Q7vp>grXQNfpg9B~J6=zYu)Ry&qY5-p^7+?7PkOvU;43W64Vum8xvSCzzhjxvLA$ zM%?oVHeCe0T#xxS0kC9>azfTp_~kbT*2oBDNFJ;|N)??3t{4aO%hzD>w-=Xs}~Y??$Kg3_zrh8tA>MxmL?8l7=NA1qm3NF1bH`KloN z`j#8mXwT-wXL7f#-b1AWcKoM!%Pq?WGT+ge?hYR&9@RP!HT?`NgU3%gk0FlNy0Ya0By}zMy zX8l=l%t5!rOj)3ig=e{LWXDVMusgLPY>Cm)jea_(Q*#~@-C1Huww_aQsx3|yGJMR; zJ|(8DLpz8%hz#;yW9@NVES+{yXYq6xoSmAa`OuT(f=S`UxrVf)gD=~4mfP`zuyF2ydoP9L`wWva z*0R{^B@Y8}d({ii0&U|Gxf5lnzDZIlTn=Evh{Z#g;6jTPS6$AP(-N#)En;gYDyKF2 z4uu|v%KiMj_VmH9{O|*)R1bj*u8s1ouffVB`11Jcsg)tV2;sp-I-9Oi5+wPbSTIDq6h2nZ?YIOikkwB_*UQaOy-j`%*q zj$c0l6tpcfjP!&~P)W%6JaM$`!nJKW*`^iUhymZEJLZ;Em;FOz&$Mt>_lApks>+pc z)nS1*R;VMF)fl8{oZSSoJcm7`_MZgapEejb-E2+EcLvrjkFQ%yBU4k;pS8Y3KKycT z7X$>xmwDzekeOhv%XMLm8m(LcZv85#0Zsb*yW(2IRE3vbXVX-Hg278CTA1V4&C*H5 zSHEVCDA*i~`KI+P_h2rse9#o`=sUSHp7fSqXztv7^vr4(4q3hl?ow2#Yxz9Kx0W(Z zp-mg4z)6!w9l{0h#D79jXvx9`V!ro#2A)lLH!ZUn}!3W$?Gn1Q!w2nr}0tPbk*g zmmh|vL`2c9fc7>-wNH8&;~b)vIff~WE@NDhk}Z<6##%FqDh}Lycas*1Q|%&`srvsO zi~4tCYB&jin++{N{&|Tc@z+;mLtaUmE_am*w6QrfP>`qlpY)d2mB|Oe{s(`h%7@?RZxq4VAE^KOmv-F{GV{1@6od>lb~wGQOERGTVgf z{hBDJN{8O3OIqffwP>5~&v z%_{^sr^fnDgIGBvE$ZIU&5)#pSyfdvFYoUf!3)A>0en7a)M*$>kG-3Xrn5{z>JqP~ z()$E}4Vum)LO#!i%*x<4f`|-DfN{$$8!k*s5egNPYOBAIFl?F-FWfU(x%eC;WYEb4 z9%(kpo8469uGO8nftumfm*=Uv{F+iZNP!|@@P%53BPm?DHQ!)C$7hM;z#wbM+bUHXJ00i#~l zAr?e-|MNLbVljq;WbkabRR6|Tkm6kmmP8{Fcj81H9#Eap}(4Q*E8dXEzlcoaA7t?A{ z4QDI;WFe0YS7IYq4v;jgg%8E5_M1!)E$X%kXA~d2dJ)poYQ^+nwd&{7+CT#od=9%o z)_2tc{F$ar$ziB-{?D6=nqkrb)|VdES?b>4rR!pQ1A3d(%X_H3nw2yNqlt$&#^d$K zccpC4&#TAE@rToQ6JzvY}m+D(E%ZH`Fl%{}J&uHl@d!7mPNKz~a;^GADlB1|fA& z=4J(tF9rdmOfP3o{-{;;4aPH_>b==?Dw@;Acdk;4Ic=7Z;5xP}A)sgOYQ6oMN13dd zP$xega=&SH(>zjPLq)!#TM(lmvmq)7*-b1eX)*0g8l7ydUeL9ei|-M{*hp+p{|6(U z6@yS#WbX<9oc}QeT9zZ?rd!7F-`~i?;AtRPd zuZDcRvcpvmO+Khda@jI@i_!fPnfkKB}UhEGsSH14hg0+4};)sXA z*6FDCwtqL6lf(T&fQ6gIv)P7Me}OVYwn(byfvzj;NXl8*+g;d_2vqCYU5~mehvRnT zmU(aKhTeKZvlgm9HP^shQUhtVq&MBl`!_14{=8>F26jQ{Y+_vr?64>@Y&a7Byy7C) zBQI?sw__uoch}B20wyN_M`~uEkK`UVZ4j1gJH|OSJD_GDMVf4%IEG?0*PC(mGYcOa}+FQo9_c3-ff?iZErLuU%y@8?w@j|5Yuc@R(C` zT4Gt=3n^vg0NuK$U{L#HW$$)7>A=%Q>dfqz2fo1xn z@|LlS3{)y!GA56|rJ@nJhC-b~Ry?`}<(B+H-QBqxLQi%=Z#cO0cr)3>OMIa9z1PdT z$nZWZBYcrL2JW_Ua0L?8>{Qw9Ug%S6W-D=t>iNYD-VL$uG=(j&o)Dnhexj!7(RS6qAfI3cF%$QAgI5^O^dALV&czwQJ6eV8r*%QQZ!Dd3; zK)!RuMvki`MY@d5#cC2cc(?YLBvMh8FgA#lpN#1dbI=pg$Vih}IMOatfh@Y7d6%jj z?2-s-t)o41f*XB_m zQf_}!Dh36&G4>L*@#agAD2s<%*}U%}pZC@`M1*EsRq;)vD+aUd}+*R9O*(6{Z=kq863FiCcE`=QYM0!&hwD{XT2Sv zga1@l^ci*Y#^(xUQUvlhte><8IkPf#TRfQJ>1ezXmnT|NQde9$KW$~%ArbyccG!zu z!-3BIu7>s0<;oKf2i+#}vMMDI?U$+NfUH9UiwJ5EAREMc)SMwS)BghL@y<03;JTGI zVk-S*eIkc3Q{}RWG4zRRh6V|*Yb(mj>dVqRQq7ce6gnNh!^haQHLhu!YpbgFjkY${ z&DUAS`va~pIJFbZKhr}i{QO*BxXPRy_j-7^1HK+wZFF=R=IQaE3%&gLclSy;Y-g(Z zdG87LfL?1?3{*^e;D(si4*-jYw*E>7_Akv7JPJ3a5`V=NR;ATryu-O%2~WtA^#~f>IJ_*N~%!S#d)y6^+I=#lOjcqkCHQ1eQt`WaX3~kWs(wt z0$p(dq!bZl5u67bT>HH!Dj`zjng#o|j8 ziB8BVej{DG-uKqi$BB<3+04!EaPxi40vu-aBrYfNYK``_(OnSkTm&g5x~^A5aH+ca z&uEQ)EYAvGcMr3rfjUrpJ{ZTfQGAI$yL-qsTXyA4Ddr?C2ql(WR{2lWBy}=CwI!Ne&H} z7pA%F9?^IZQlUOzbj0Ej?;=|k14qo^(UoP%zJk`F1*$xPzJea)NqI?=sITfN> z73QpWVhMpBEwl{N+*#f=v4oV`CV5(O?TiuY_UumkqRxUBk$X~06;aOAp=BW>M7Z=Lj@g&)P{Pic=6<1Dy!pQ$W~hGHRRT3Kf!E{{Uy;Wu<@B z5&H+w`Dv1nPRJ?C4lt|R8o)*s)4;xRJ)FDJX=TYEpXzo_;Z(gfInpdQ0BgJxy^?K;?Hu*;cj#! zu+w&V8%~YKu2j6s7BQg5tF8n#cbVYY4H3isv=E;kn;rai@#zLd!i72E_>A(aNYhU1 z4y;YK4b%(A#f^UvGQ5d&S}PMAw3fjP_nzU9{A(o0h>aXZb0zIPSsDKj!U$7bc~!Ni zJ0qY)P|xZN>;-L8U(*B&QD+5FRGaP`3$=e`)N$WBfy7q*x&Fsuv}2ijMZQV;^nv;< zlS1x)FDL&$kZk-PrzKzb$7a2hwa?;&ujJ{ z{k9`uN^#HLPKjzq_DnnzA88`e?=Ps6czzDm49G$sOm_D57y1(T008^(Db2JHt`GzL zVXaaN*aMkit=rw}$L!#*sN_?Z!q01u!T!LB#ZDMP)p{3hgauG}`u^n#j%|^EH)m#f zU|}Zg2GC#FjDkq|+v@9iy)7s$-pPm^s`@sk8yE=N6CN8_*XH$A5FVJ~%`t)+kpU`1 zH97(zq|C*Kl}pdVKcjv0vGequ5ACH96?-1eWQie2o}?+@oP51?jLE9(!n|H+OU8U~M#A-Njd7qb1Kl0kZKRfDP?tIyz*51m1*$9$W zLQ=y_+EtYFQ7l?{oP-*Neu@17ZI!_{#u%bn7pkgkEWF8}9tl0187J|F$%}?v5KKBP z@!M>H3?ub|fNp()>iGW~1GoR(YlM#aajBU4NE9-bO(XZ~7IRu%)$~-U?v>_5jeN%s zAIzV(&T&GXy1@*s&oGHNDe2-fdLrkx?!Tu?mx$(1 za}Q72)#<6V_q8`~QESVCUh4~C^yOC4ZC5d7%=@pV=WTTRkB%1z2JK)9fwklbkF5MX zH(1x6o?qH2`Z&kB)TvRlbXmk?^3kt7`xbPw7l(MKoK)2@**+VziiMVeYk&mvxvy}c zy7*3;+jQGp0_h4#>|biB6tuj5S9L&)em!>t3j80)cWlgUqJK0ib?C^*xP;1KRpNJ zrRaeB@0^-s@LZUviic0vXqN4GrBoYz{G#$wwU89QalBJ&=VrcQD?wi-=Aio=ABZ_a zy^Oblz8-ZAkcsS9^RVV{B5jG`R>ZocNCxt4@UoPjNg&JRkfiWX^W5;kZ}7o4NS0%t4(Q zsZkC7w+WL>{R@Q)4@gTtIUj&fi1-nka?>o6cAew-Po^5Mscv;O_8ZSk0>Q>Wp3YCA zVx0Evgu9`AS|O4^4_cEuNRbZu8F>MKCem-q&eP$m6kzzco8}D;yG$G7$T5BN!mzL? z(Sei!EOrcMWL^xLrd=sVb6qnpGk>wrv;qz3(7=iVg@A7>`jC?XRnv=STFT~XZY2ub zD$S$*91IPVe_j*CQDafjs+#jV{miFo>Ml=)m?jWN(hDquwjw? zo7*WVVgGCfpbPzwa}nZfja;%Habi17FTq=9UgcPKwaT`*7~m(76gB zh>f^4t1UJCJUAwx<~XXdX^MDCD*s6br{Rj71!BvE7Wtibm}@rw0`;DOs{!dA{jULm zc{Xhiu2SM;dIKs1uXj08` zN*XtUXYFYpaFe;={-hkDF-8u`bh@~(X&Qs*D!Q}8%L#9w!01pI4^ggZv-TXccxQ`X z(!$|BON8MY_>x|ko3dxJ+P8ua9e7e4luM~-{TWx+)1OJuwT=S+g#kkOjg_b~JsZ;l z!+@R>>zu{&W7o4YyaCp=8+}#zh1-h3rFnMO?+A0-SGCOINvUD>KWAg(i{u?{zvDUQ zCgnh!pjQ#UvT4~)!!vg?@;^Tte>26xTIRrmSzv#@>nW~z`6^W&rW6-N%7Q<@C|NR# zGmpc?;H&2IE6FPGbZq-i3bh}elHzKDXPO;ik*(r*^_3f9?D#v!Q`=0AmpMKbMkKVd zCh@>M;xvB+v0PNPidQL1{r#q-!n=fIdQyBo%#M+zh!TI={d_gWGNAf^G=(&cJ}V?W z8-YTACQ_|j2=EmtM4NIG981kZ^>p=puUa~5Z7aG*Vsum$RyoAE11~{qUQ~&fT`|mT zbi|rCq)xU?0(y4})~=~SQs8jO-}};^CE&Jv_uugc`A3DO7p-ToJK2B#o?sirC%oS& zSW?u>n+>{DT#`{xT(J4p4Jb{lH6836*mb3k#I;spum02Nd>+jJaY4^8Jj2(>48K)y>--2C&k`Y_2w-N2O75WL^>yAdF_6fDItb-qk#`6P0uK24qMQB9)xxEv?c;qWFf z-_#!xrUE?^JvdR}!IhBe-DSr7F&m3rD-Q~YBQ`s9oYhvoPJ3EwcDwd}GZ{kH&=MnD znw0NqBz{~<)ZP``Ps`s{oX`8aku+*!Gxt4?#ny0te->1ta2IWXbc9S9JjQ#bk|nU? zUfW#vxRsxJV$2xEJRFn#5K%CL1MsHt2~{v<@t8$xj*CGPa@3k;QYZ-VfhdLoNi4qP6ct$=hkYc);QHuB1f# z7BvX;=3^#u9s$hix;+1eLvtp;inek&2 zCX)=WiQfIuZMZ>%`4G>AlwpR@l!7du>VVqDoX@IUzsEY+uBKP0vxdqDIk_8O>wPLo zaoqs!$XI0s?eI9`w_yVK-p29d2%SBlok|%8McS--XULE9ft#j^;{WszxO27@gQ8YZ zVqiRjMx(4EI^uYSJw;x^z>mtErm^NCL~v7@q^tkC%!HuR7=@cUsxz~;SBuv$RQDe1 z8qLbOEj_)N(J*YlsPo!0WXv`HcYiGaV3;k%7NoapyaT`%0Kw4S6w0m1tvWWPk?ULt z!yN@?BN?|O`biV5@4!D>d{Pxd!-`*}H98EK`W^;O-S$af-|W+p8PsIg$nz?cp57f( zC|bEU?k>HadFY&Wub@Uy0LZ=7&{hUrc~&!CyQhZyBl@M`5Zlz}6WFlz>FbJo+3cvZ zTyax|{@}Ltmr5~Z)1^Qq!*I=UnP??fLfE6IdCQ1VM`vm}6UnwnTL6=sh@CGk*jh6o z@3Y$yi&E7euZ(%Iew(zww~knw%8&yc4v~YL(+)!aF|RH5!u4FP%AMZI$t{@kwGxK~kV- zxew~72e8tm(ZfTRcOciSH;%`=_x`*?0LHaj~GF+11&Al4Z)bj*;wPOcf6KU+5LVn6Y z*+y%t?rhQIuS={2le(p8MPEv67XECd4hIxlX*zp=MR?13)X!^ZuXqUXmgD4-6HOf& zg%ZY^0X3j4#`)X;n1C|{@^laUs1g}A8O@R`Pu~`#7gpz!R%HiPxOMO3^33p>+yXv} z3#KPCC;h^JS;uQVWXyU@{rujSM(TI}3|HR!H|p|-4}%6gDR?tLLHqj??Gvs$TZ!}Y zb8Omw`}E(Ts*&OX@|}`K(adO<)K_4Ba~fg2Cv#@kRE1ya*Y*RD`TH3}I8s`x-PuFuvUtz+H z{TXVB8OMC2OPE8axVBCJCZvO|UDI=g;cz9F)5g%FkSN}5gVzT4Kfl&qq)O^Sbi%LU z>2isjBXX_%3$*i}2@dOu)UD6VS2bvsdy@WKM}rWToFNp)1}N*@W~|6L4Qlp%0$Vox zqxL`5?>zkPJ_p`3>QI)h>^1$?a*9W`DFX#qvsS8|Tu__RQ%m#)k{ItW={Ir*I1g4s zxUSzT(7y91R`?qoE&YH#uoc~Xca2voAwMPP&PDUcA0Qvz$w34J|L%OJHQ}p*o&q&% z_*Gj74*FoBc=rNea1%jO-n7N0mOS=h>7(aM9)Qh?9@!IXFnpSj9+5NfaIEdLD=xbTRb*uH|*j`%G0hWZ9%w z?17R_``5qJkc=x3Cd3Vx_i}7x#uH(mB2!wMJ4Zc_lD%3W#=CEWcgK5q<5;Hu7dX}< ztaCh1#xS!+Q&#Mnev6!bNwYZ+*CKWWAN}L)Y$Hvih+a!4yp-B5MM>FhB$@n)@-m%u zPupLhSQ|k+q%5#P3yE@0qf(uG!9#beqhM{P`o5w(S0`OppdVJQbJ{VcJhwV-RA6_P zqt>vWt)?a+sL#y7lr!9>uufGKk6P0GbmK|YZzH_`B@rFYUT(cAt?f?E?H}cjGIx+9K%imEE$;AY;|Nb;1VQlP@8OacCsWST+rML>uB+J~cT#DRPjB{2@z-Q5|e1&5;SV>#MHxem!7Ii8h- zaSq83Kj=to@Jo2?5;&er8Qq0o_o$hFP$%-R?GNj#4IU)}y`Qrm7g%0wwk0Dd$Rpqy zToXpFldNxZ+K~|~QQ5*%#PQLg=i)Q){f>3c_Cx3ugcJ7^I)IhQ!xuT;EBYZ6m zH_md#-5A#)ln4=+=8Eb2-DaP~En>fGLba1syyAq(qsMGg8(Lt552LRw1F8<#DcwYENO_{?_iVJh=8m4bB1LWc8?h-m!Wvnx3$S?z_=?W99Kvx zKFNv8=yy`QpO;%e zPMYB;OwbmN(9%O+2Pzf z5`ia1)_2j{`zQI-Ta7U8;F{uVP{iP!g| z{GF1|*BFEvtv5xE6`%D)DXesib}T8(@|c(;r)~>Qq8gjX;p6AvUE_jJGM1gKcQOeN zCWS@ppnM#-D~CfA<;&JgKeXIM~?yl2O zK7XasSwcC#7oW_PeqEy9bOc(>*tJ3w{$AB=s^(B^z$O$V9+S}fL#18{p-={8h1=r! zG0`U36Z74yGM!iMMn6VH=ww!Q*{7{OO}rrVJ;%6WL@J|dP%`E&KYPTpVx6HcGns_a zjX=mx(k1y)j$wgzTZ!+XI?3O*MHH{c(5tdY6x*l$~(6l9hMx&;$p)BB2TKn=CeY1b&YI z&(7}?5*dWg002*onLG>?CTh0eLrdUVmA}b+)9g15@s_$A><=mTi0wtWl3g0c-|d=| zvr%U~jHbQ!#_}V7=H43*No!3WcbjVPK=%A19z_Y+*E)}O+IuFuXy`8JxXA9#)}vQ8 z5mopmvbvrtxw#y9Y-zei(;gpz;K77NFN1Hw1X=B|%u#$*^(dz_@9u=Nc{_e+*1?n3 zbdiMj(9(H;s+?eC0n{l%IV7gvXG;Y3O{T{9PLcn?-FrthwXS=kQB+h)06{=XT!JE9 zKsqEU0wN-!0#YOjB25HEsv%KOL0SX^6og1`Lg*brN4nBGB=nw8LPCmnuC>2?_WJgB z)*a{Ed-fjV-akUZgh3`u-giFFuRZ!!mBH7>j3O^Y(|LLtRKflUZ3LGMEuE&}yhQr} z_wBil9|wn)m7SCqJ74-@TnR@+OP?&trv_nU(lYYP4RQ^zP^LORzO?ATt| zxC2wC4p_xl7Yu_#8mf+eB5K-H5pwTZsrQj_*D`7>jyR+=`&6LO9-$qa{wCDi-L-7o z?XwHu^|mzzBcw^V^)pRvbt;1x{T~Sm%R`0OPmSW5oGoWf^@S@BRSLE$SWNTb?3Ib; z@8oLk&oFFW_O7LEF9zW68q8p=r`>=&2|iYikj&j`$e?PNHwqim08A{;AcP!7WW$bM_{bvIz(bDS;!ICy3N@SDg^onH=lbP$kIMUpl*v3DfInsFU|n>x3@=H&$Lh+7XA6r)dF%GwPrGw- zG5UlfocF?}<&uw8YwID`LW|AXu)*`eZ8 zi%1;D?fzgKC&Axe++?MAn2?~~nk|W;G9+eK=&->2p{jStPr__6F7uX|_J#xs?Y)~8?G?5BVm~9M&Y{~Up`qdirwvK% zaC^>egKC2pRo;UaaVK_`UhtL#eoeFmw?XNUl{3pcJrPE={!o>^?7;ifMrxbR1+ zPO!c!2@mq#q*$;Q3$`?tCyh zv)Sx?4s`f6Ft-0cbojq3s;=R~|Ab2XI3OO`BxERk=<1>36R&_oJZwAyHA_`q7P{VN z4e*S=OR6+63b~I1>)|Eu;IW+(ZpMA0L@6~Lj1gn zn#WcGvPddJV_hIUSL=!18PoK&>?e@F`YxKodoB57pX#farZGm!{d+wX=k<<9{`?SO z8doSBGHrSsARdK5nzsjXif-S&*)6hD;2tJ?s_G>b^ARS1WLrhCdCIFB+AMj?>DErE zvtr}q)XZ6MdO*ypyC@q88_a}I-iTUMnb60W%3W04cO=4Ns>8 z-T=m;4&g%iEt{@@(}+#1+in_`56;jkk@3@)(*zvv zRjP#qoc{OiCEL{9Cq|V?NkEug0Meq_TImieEbpOwM`^zgz!cNb{cZu%LRy-n(91N_ zSee~WUC5K7$DMk{LjAO#g-b7AXzh}7=&VF^Xeq#Wx{!g0%CrONpx<88zOVj_ z2|6TNN)sd+&pv)l3T*;pL1p6(-$rvn$Im>!m86>C>o~HbT_x(reA!ZSRz^6{@A%F% z6A)Ol6jNcaT0EzGgo@N#Z)cz=Kew~JQRmV*4#3?MyUm0D23ggg--A=X&E^OZNUUX> zqUf}ErK}p)SzlNp70`&ROH*#3p>Gb!LTJ zHORB&RLEC%h>)=qw*8K!;-nXIYkPANfo?^O=8}U+khSgEk3(zaQ;r(1uaz2tMou(> zIX}-BiW=zb-ga-rrJ`7Ua7}ekVrwFhlrfc&oVHSU?CWO@J}oYNQ+su@I~@h5x1^~r zD4|4JGlq?R-+yMBw_$F|LX}-otA&B5MY$mn8U!GExQ?aIy;wgD1J-rQ{h zGF@%QbJWG7>p|uO*gBeKW=Jx>oq!*Hl^@~*Z8V(erWJpLv4_q@a6HZ@?M& zr-FI$wtP~4x`62sZ~^~B26a6%=e^ZI>?t*R#1z;=%~j!s+9YZBn1iR#t?TwR3V2`f z5~5k+Ia{5NFPF{Jwjz@C>BTxf5Z&9t@|{plqIWRjB|#c?oMH(~17aH*3sEy=+0-o8 z)fFz(K5RMVw5XT2b9Pihr*WJrHGl;8MG-4~XZcG`V<&zuq4_gALLu zEZS~XUCG$r;mpLwob=_Eq~D{r6~LFQk(z8#4`zd!+H67r5jtw9+7G2YZ}C_j*4laE zeOxsP5HS1=LKvs;6Xlz5hY^}&L!bpY>UK=+1LBK=7ic&Pvwv}TsT+Zz zU7;T%dAB5bUmu{Wd)vA4g%@8TY#5k?D#dnCpIuWx-qb1{^xxryUZw;;b>jahho#(y%9_;b0!Z~0Ln9w91 z(oiHp8`Z25=jR`mz7V2Woh>+!DtEBvdS;YHq|GuZXa*gGl)KlwcUuHzUUIu^FSt7R zPH)^9Qmvv#SO8e3omFd=?b8Cq*{8$D`+DuMZ9PS&ZE5M;Wfx|Ww@CuNn0=hXSkRbr z**qEQ*lGl|xA%Vm+*FK}M!zPpW!Y7hMLvFK${qnoX(B&d7Ut5|vC$-92@G^241783 zs9oIv_AK}KSV;og`2~HoL+d83&jwz@r7Y-280lJ~sNxz4g4VtKD{L>Rw!tjZ9qwNt z)+}o-S;Wk>RKk518#p(@b{JO@qFAgWTx2ZX;5#nfcUY>n;4A4zNIzeT`jk{?)58lq znWoF#=PKF2hwLQw(@~N|_0LVu^azSj5{#z*OAZXylaEY~(^ z?IC5}MWpr~Hi!J944H_I9wFuavIXIfbF5^MJ%C5C1UNq!F;=B?8I4)@UL5XQy`F?#JL)z zGWLsufT*Q_g zKSXWrZ@H?^x$_Y+r2P zxz~86I5LUTpw(|rxdGV&XwS9GiBZ2mmQ|#)lQA96CRZXu4aA?bKWX0BK7*!B*eUOf zMx5Xa?cSPro_fAj>Zv(RHR3%LWAN$WvwJn;vTagw3LyzG%Bx8H9Y90^3@F!5BCG~VTzWb0k6N8eWH0%7}KM%z_u_V zMFWOHYrn9R{Zi$Hw#)3%F>pC#qF{5k==`0qnOU*EET3|N%HbNxJV&!9@1d#o9HVY-wzq7(*CzkUMuGBq zmH}(A}4yp1(lU1J3Z>byemoml^qyRHhz^2iTpz!1gZ;FUe)Q zp4~|BcHlm+`0m<3`6Y{Mf^AP-79gXGGY+9hp4C%z$pp=6p--DL5$9XdqNM5YFxU5%1OE;VLY^~|4sUG#%hK4L#GBd|WCoO_=6YVJ>?oYWE7yR>Muq*x+@OWHV z5dGtQA!dUC04y(H`*Nr!$N`kYvyc#`bipt})Ayn4;k>AE#YqDx^=IPf4`Lyg!!B%o zA!ibh=E1G@ipg&SKVn7rqroQ>yY%FxVuUXWr~53`8tX99Y9u@f({HN2o#g)fv}D0> z=?8o4*0Cv&h;ghHOC^?<5yAXzIXu+{p*|pS&n+c!vBtJ(-Z!!(_vpZV^Ra7GE4Anc6|NI3#{}@kCS9@34`MKOfT#~-xUVo&u zya!w_(Qq^{BdG@E0z_pPG0QVxpEIq_SXx4lPV!o47P!0jbTw(sgzR&p9%Ccw#HBNQ z`deS`6zI*r?9A<9y;7)V3JX2Hg&AY1-)9Y|F~VbmY9d--;OZyxNdwk%1S(+-us3GQS2%l^KeQokhWn1V||FJ1cQS^&#S*(Rl4rHIY=()Cegv$rcJITIakat{evNQ7FReFf6eZLes&vvM$Zy>voUbp{-%mS;(nGh?gq26IRRPh$R(%%ACqGHnRv;uIU>L_wzb960 z9@Mn|&_aGe_$2E_V-RgK0@qln?G9dR#XPi6zc;=d|KymflhdgdUX$>w;K1WKtd~G7 zNbSZ-p=RUNPvNFJIP*zh11;WnXo0Fl97L0E9vE}A7+-50J3CssCvWQV0?27jK`H!y zSwH*_5-P($+`aWr_AW=zKcN-F-B1{b9={Wwwk&t&rp~sij)F*U${$i^FoeQ1^3&^k z5ht1X8#N*MQ^exWKTr#Pq$pimBl&lEbIT`c3g80kgnRrScCP}2HpS0V2hdQog(__8 z0t)Kby2U@F(mR$}w*V8@%z?4Dx4ue+9jIq1m35G`-f<|9{jP#VaWXf^JuzxoX zI%_bP*SFHM%>KOPbVpIrSM+v73ihXwO5%@nUO-3eO2W-VBojMLG%{jESDVV+g44B1 z*!^@R0l3$S5>)G}A6R2Y03YxP>2DDgtWqf-3jg4jn+^2;^lME4uzU_=+sfEYV18h^ z1{C`=&dl4=R&><;t!(K%6lWvT}$cV;M+SbdMCo4=ZPaJP3fQwtETW?x&Xf1xe# zT>TKZuSl4zmgR4C;#&=~?-vNxMUQ@8@!Ut9;l`Hc<`3-z9DCmxMif<25CAaOOuek?Eb7g5%R^-( zmDq3l{+;duyBDsZ78CY?fSHFuLa*sEgD>**bp|#{Tx}-T<-eu-IJ~=DIvdF;c+W3D zdjAo&w|88J{6bW&{~V=wrt`_F-xG_02VoxGt^&Zi2Ca%;SKs)qFn9^3jIp=*`O?|N z6!)11c_-|)YZ)rvVz~Dlxym%#_k%U;7fu(jt{b6?=!Zy*X_^Md0N{s-y3w{B1O>EK z&s=tgaFOFLLqiRNH}W1iI=qdUjTFdD&Hio=jD?xw?!zTxM#jl8%tG3Ng*FiR}04_rBEzBuzyVzB+W-w6Gx`@l);hG8!9+E($mg$Nc786)o$+W1mj zf|fpchLHm+Nvokq5?8hvJc_nMSX;NcHxwD|1pYm=j&7zThk7_dJ94%T+Wfy@>X+waKsZFn0q{7_(m zfUq}fS`T#36CM|E&wQuk;hXP@yEo&j>GpD)`#&8~N-cJ3p$eW-nsT+NqMg0letVkJ zqvhn6{Y*|r_*i`7+1`-d^KD}p0uP@CVTzmm575@2+u%#m%PZ%xt)!8aiuj^Di^<}> zFr0k)+tgFMrYSgV4S*lc{o5Uy2mb_Da^7i!3W_Qc-{s*w)XQA;j)b2Tv5U?D2GCzP z6hXjg*PI}oAh50t<~xfDlIqaP8wsB0pTs?2%W6w-lRlXLt)hd*9& zhYRYE_l5GiF*@08Sx-VJ8{&A2S?T^(yo-iua!O2oy#Fb{h}{6y39hj5sTv@Cvh0a* zbWHr?I7Vn?x-2fR|7K)~>~W^cR;KL))#Qp$p-hw}m$pjSenja4<_J{Or=Kv~oKC5n ztq|3QN)D}Q1>0GDwrh*}kiE+D(0A^k*Id-Iy~Og@HGKB&a%*&0W`mmgwphtqIJ7z- z@V*hS+pgHC3I?Z}tF}l*W^5!2Y-J2}uVOQ))l|sJQzd?zMCtaTeR}|)r@)CyO z#uQcHGBLy%0>BgKACEdMMgbDgGd5-sjp<@Gq7_>}$s_*gO)_=ewQen%^xbaH4mQ;Z zuF&9Z}fyT_kjF`>iqw9Z(~Z??Y=Qi=H6vKt0##ul0~C=jYQr zMopd@>t5<4Y0$oQHPmN6jwZoVo*v^p`fk9Ee34l;SeG#=O~Ln`53WHE8c^Fj=?$_W$r@ zSqkID-}787D!6*dP(cj@EKw@94KT$(?tAxL3<7G8rVBG&`pnKH&0XzRS}4+i-C=*R zYtQ4K3oCK5H>1}6Mwr0Bbr6+rKUe)G?4NvGIFGhKXaAir88`D83A>tWIC^^o`WHU+ zePvUVE2j_KeQ=;2bSoz-nG2M20W<@WbWy*)nEI|s@5_^bkn`CZ|5z=OQ3k>aC4V|+ zka(1uos<28;8(GL>RZG>dE5A}$Q_n8CM?<}vLx{=;4VWxUV1E6x$}o?_3(5M4tz7q z4zSUa9zz{dU|Yk0ZK;sXO!NwK{V%XSjFUOMfVoT!^skgsBG#N@ML3Z;u~E_g)KEEb=z{oVx@-hXzIzi%nA?zuBD7W#Lt7$U z_8%%)bO&#`9&5DLNlNOeHK~T!Uf##G*oX~ixu8PRCJ(q4j321f^ASd!5M31BS#CkN zki;4-F@ahZzG*9FMz+H5lXb!~B-IaJn+cnH_9H+egrRi4TPz}jSn+9c91f#M%&ON} z6P3-RkDZJ1^F`I~!rW@k5^BM%Owl5eevsVZ!M9s(dLoyXI+}_lzdsxaSy}~#Z+TD_ zJ&AG6sL}lzw8T44{evq|tM4B56_-~u)F8h8*DM)ax6Xy+-jd;F99-`nAP8For~RWC|2J5OG>DEV zuV6qS^aM7#$>fWB55sE^^U9{#R-+Psex*LiAD6MKUl!W>NfyR}f>urdylZ7GhP@4(%<(DdbkbdofF`wM%) zpsAmRG^2|dN8AjNT&PgDuh{f+vY1}icSQ{K&e1S^drjbArVvotxk|T9@g{T^44wg( z04^|A9^KO-qAG`tpDHRIm8iqk!$us)5ir&oKP`SjC|14^Dj6dd`Gp?q9mK}hrX^K= zOma5-+szYvG%7roNcIqZDsq=*s8<7+|1cd>V(vya&=+DjmOJl3SIcuo>6lx<<(h;Z z`p2IB=l%U(^ZHlczpC(>N4U+xz;rEQM&>MxcOvw@()Bl;(!W65ZO4N(s|$~(R&z-$ z-~Kq4eAsK}T1ctg(aP>oSt~6RIhI0AqaP;n#@F4RZ9etE=VG;<`t$|ijv0@6-49j+ z_~&fEwYI6vUtvlNxgm70leOg3td8vofKpcLU!QcD1N#=sl2VcPI-8w zH3E^Vd27_r%2eW!;fil3gkWndSib`vjhoSJYlWQjuf7)^-MFurI|Y+o#F>`stV~*? ztLon5p>DdDUDg+??2dKG~dmoLOSVpFy_6*NAbkYiJL#+Uog8V)TDWuC}8&+ ziyo(2lk%|p!W`8GWgk#^k;ZgH0jx2T0nbHD9$ehnNG>ZYFD?)1={x)GC^sO6IAF@H z2ULYXLhWHA_~%87mK(0#mYMBd`fKV(^bO^ka(MSvXmxWK}vX=Z^*Z(hjHrLFj zR0Fw#=x)+UtW!q5BJ`)?RpTEDhnRf$x-*R2I&Q~=sMd{Hix{7Arvu}dz4@&Hw7Zed zXdL&03#d)gS;)CT=6Wwgfxymrg0WCyH#dfxtlUZGqtfz`V|$yxXb1eK6y{o-N6F{? zHh~+c>Blu5){JbZ!m3rQ$Zc6Um6g~JXP7tf6G#p?>nESKsGogb(qTm4ejGIq6@n0$ zM*7#Ee*}DZ5T@ZrhJC)9IVtYV3wD4QbMdnDwMii?sG;Y&IFJe}Ol?vfuEGU*8dqnw zQf%F^N|Ag5t~Zx}3x$*Lu0f6TdHJ960~ESLgv%4C(8B!w*?kOlKW<%+2siwm(EOvL zsf61|1^Mpc!~J2FY~%?1_>-+NcW2eb!jMC{^tnjzGCXcjyEd&iZsPoC-v zWK|x4F|RG8Fmj;Ede8TpZ_||1Sbl*nU$){x4x{)V{sP?_I`IpHd!o+_ST&9#-yrRW zHRU*=ccbiqlh-TC1hj<`y3D+9=YD~ndqMVjjyqf5H(oadPB4LCaUIKdhvta{{-9=_ zL%1^U=1<>gHZkcpaBFQEs*8)<`{s8XXs*uh(d*!2O{&-!ub%)JV^Ztm-@7(RdV1i~ zPF6l_HL*(!kZXG8J8#Zpwphtfr-Z7Wpn5L=aSb7?c(O2=#kfk*Yol(q<)>iFKJ z0(JBk3$h4$JrDo>D6^k9ILCsEzUYBkvbpbV>9+1c44^@rV`;{{^u4l4G;OQTd|`6) zb&X3#z}UOjSytya&wk5+DOTh^{7LW-4lxP3rFS&XiBgHdPv%NS%j&R=q8mS?@Fb1e8L*^E3bpCjFt2TT(SN6tG-bt=$sSq zw-+gMQ4%~?TSR;h33K^}zZ`*ykPI2K9D;tTkls&w?n!EdI1-VB49E6~Db0nA$)$>eZ zUidX^M`I7d$?S))*65VN%5cT!+^7!zLOSSgUw{n=Jv=dAcY1ClPtw*7*|&$<$%VMa zJtG^3^u?Z2U1Ll$q}sB5##>KGywZ})rnEJC_-Y_y*o=N-o@+ZE0ygvd?XI`@ai(9L z1mfy2Qp#rR@tr<4Yxs#<3#gno?)%%7m49p)=L_i zGQ%GA_}R*8vA67L@2&yNWxzbHiUZ*~139z`;nuRs^wGI$JpupDTPb)C$Y~B|cAOXA z-w}?4#{hl|H<6HgGDuxNX}!*vaRR`OI;iWnidA* zwcV`3@}j=9aIY%t!q!l*%=@WA&b>n1!7)xRrL0(+HiNozFZxZD3tF934-p%(TS1L5 zQ8#gVIXUOAf~3MgYM|qM|A_ngXMp%0HXcO-v3Dx5W=DgMJ(d6+7>EX9KJ?jL|707i za{^jv^xSZotAqj2H9|gD2?lTY^Z$OXAOHbq+j8ksiB79DQU`ZUvNwA}L;q+rff?lZ zfWZH=Fu%1a@nmYFVJ;&!g$3dw=|@ z@RY$TuW~N1f;b1H-U3}ECrBjwFI!1zd5fVZ7ln=~i|854o!cgDYN z+-yIenVo_k3lBQN)~ziwMc?bXv{$hu$BBFO-d?0w!pPu zr7R(>s6uQ*T{xAyr>#|}KXAZ2&*bL2b1zn;K6p89X1_|;kb4tD*n{yXyGoe+DAi1a zn7z)!%FH>=hqoRt+6CnB+zrAJ(i_I3klBX5jbw@`VH~rg8QU7C6~wsv4K+IvO_xJ1 zrXq073WLl(7%L2~(3jAP6baOrN`!2tUf#ZcskT7ldtl6lo;fBKr)K(DK!fL7=IMx| zXK+TP?9+$v+K@RKcGVU9-C3VIYW5>OXPL1?Mh&6w?4L@hGiQ8)s)6562|@$=& zy|80!koc8}hh`5~n%+iFbI|WW?waETBj9(~EbmI?neU=L(^{=Q-EJ`n_mkIWSp8lW zJ`?%3;^=TW<{+F!jM6?MQPKAsYx5YF@0x&nzns$9O4!a1aNN)Oxr2kde0&jS-Z(e( zCw1*8e(T8xbF&+B>85@Q3NUtaf#SZ*fjj8q-StUOLnW1ZI<%r%H4fW z;a4uRTi;%6bve`$*XYBjVInM*l5h@4Kf9W)Z!kt;t__ip#tvKl=#{9DIjsY;;j9GX zz>8=RA6qw=(U9wq4_YWrOHoZkM1c3<=xe^=;SH|%k9SixKGUUb-Ynm|nIRO7Gfg{1J_7;RyWp(d=9LF#FBTP=bh6(qDH+(gM6+$ z{ES?_-bB+uH5L-R>C(My6Kf>c`-cxUg1}C;qvp|KAGT&v{5ZOfN-0tNg10oKbaZuAlb6X$I1r-wW!Y^6TZs>y#@ zoMb$A0mMgu6fM>-F?lE#o&CB%J{W7=sI|!0a&(SW;JBN4oB8aAT==b@ty(&}Ra33e zj0qI~G#xjSNM~z;+)iF!m7g*u-pBQBECH(zVX>F|-{d>~>->tZTR8M0LYc__`6Mn+ zhg|Yqr2qm&?DO7O{{?!zF1ovA90njQjGMDvrNonAvB5t4s%>4-qAB8(DRk#QsqJ0?QtR5T?jdkzZidUm8__n znnPV>x?=dvRR}kxos3$@dv^O0+t9nQ77mdapSz;eJAvp_e}jty`jO4gvLD;M0<}ww zm@jj_rYX`D`wrG6bF^a5&n3LSp5Y%W`{gB%%jgUEF^XMN@QKTPc`xYfj|EHw828vQ z&BIj?sDp z#ix&@A(erY>a{%55t_?Nxr(K;XmXmA90x*qlb9nW_FvQ`ck zg($-b&r#Q|8;@z3?$x`c8n&d$GyXcbP_(UvBT10oz>_ zN8_J(o&P|svu&KlkSR$v$u4{y``1Hew;njJ^C^Zz6<_&y$IKyV1$K;iYUOKaFUP(4 zs*+pZWKc6E>##iEg>0GvLKfFv60m9{!St~{*~VbTo1Spot#O`2|8Q zM!)}lK^ga6Pks)&hY?(vE|SeXxX&M)2!2%gN(Gr8Rsq}|Tsn5qqd^2@LghER(m~!A zTN1}d_HT$SE*T*|n7|dnh;5R�DVJz!NvLMD2nHS=@)e#qqo;p3+pm(P*T#j%5Z zvs$^|kT#nICwRGZGj~sk1dm z2-sMx>&z zKm><7XfDcMI#r&es?^juN=V6EskEn{qG4g~qT&npe4pFeX0Cku1p;ruY~~`*=>4f8 z|G(=ZL{;&*y?mpNc^ND5d0jtlu#Y&t?KRjXzsn#1m1NP|=L~%)HVXwzZ@ulS4iE`S#xwQY}kkdKpI9Pl=3j9?zekop+OgNhZbkv=$}puDO579sf20u} z+-#Z-%Dk%pP+LC5kc%b|s>{Y7y;8NF%3zX;7`Id(`ED8buc#wr=-=HwxBku!{d3;n zKPZZCwGI{qKc0vWcnWG&q%OhUClqCE7*h$auNz1g40$GS9BDMtH;L{Hfd*1OS4&0A z{Q@OP&&!|iNw`BoW{7{_7SMd~`nlH}5wy`(xw%uH`j>5r^f=U~;YT)4)ni@sxa@QYk}jxoe2uX22l*udVTA-(K(XL!@@O0!L7#D=%M&wC`R& z|1F_%)g)CwKlV3W-rLH@>5GgEC>NN`IkOEa8u&Ca+khE;3LHCyubz2fpb^nfq}CC_ z5cpQ>qSuemNt;}XZ$3!qAghGj8&5U^{PTZ|gW6RL>$!KmBV{~i11+T8jt~F3Y=}e` zGs|JZbU^~ctsZ%#V}d;T&{SD&hvwA01kv*KBV1 z-c~~PP$#qK+}@oZdjets(xf0lP}8J79vk|!M%(wZEP&EkcIh4Zp`dBCqH$sE@_NXw z{Aj|g5<-^rs=xGo2*tz7K4Mj3KH%niwVm;w4D+Pxui^JwMuk1t6`S$9@#Z!z@6-(QaAojP*z zUavw2Zmo)|X*IALrN2uwUhgJ8@F{K}(Qo~jnauTZRhfJXxmb2+E_&smT{WX`ZijE1 z%48YqCr3`fBKI@jez~)&K~1+s`FrO*@@>o1d57LoScKoklbi93yoK9nrrtk_)-UYy`*c+=l9gL9#_ZHjTp61z~nzk8xcTv(sSAw}=26phn&p{|M%$~-C`qFhWe zZjPmu)SkLu#$7>p*|ONRnA6m%4Hu@TnRh_g>O=tGo0smEK~d7DijuZ3j&w8}^bpTu zE-k5ba@86kcYLgxG+==U%}k2brB_zV`;VmA4W`)!3OBl*cfE`xaE;1S0n4TVBtVJE)XZelImC{O8YZa^jx1y9TRpI?l(2RvnA*NWmy2A|p% zm%Mr!W${J-6!>=cD=GW4yz>%|-hcdf$93s`&YrhdERdB5zY$9&2K0)Dar5bW*}Y;N zs#B+g(jNG{)OUI@9W;a)S2c_zXYc7dXEI{&z((K3>TZMprhL4gQl|uYEK+h!V^=Ob z|G-i-1>3W=mh=BEF%VBreUE^5x|?UdW318ZAapitJM7L@j~K~Cu3;)kXs>vSf+EV{ zr1vpjle8FlPzdEwr#=27ihC+UZwVde46gZh%#Cue09r~yN8lW>@0l9gvKa#rF%PJa z$Aw)|`l-+x;gJI;+X}GU7O+|`!d011+h%<>)NtL|EvFz-3mxzQ)l ziS6QZ!wz`2prnhW)VIW`XzhtdGo9s+#8##8to`cPAI7Z6UJNJV9FOnXFHo5qKWlew za|oEGVE2IhUEFWDTwnOvnHHWmS3!Ul1GEIh0-xHU3ohG3`37Rr z!?WGauQ2yAXMo`{(zLUa*Lh}J+^voZCE142pDEZ$;2&6|&J1>~Q_WDm(V{Qye;v630@cqP zw>*!{f`gs(!m(=9s2#q5go$ViE)^(WMhcIrf2TgGl-QHpED2a-`~X|hA+WYi#kQ?}9lhfS!6 zY2#k(Q*PXM5ckA7ro?YVrW%MHHa!#tE)5~e4+q|lX(v8VAZPd)<9?vB3+n4tX>rG_9A$()t!P?N4_7^>3-YLtn(zKf6j8DdJ} zHpda@k)!J!N?qYOhO7jzBQ#?n+ctAxLA;It7 z-~BkJDqr?Vc?i`hccnLhAj*4hA#?qdi+vTX1a@J4Lr(UbVSYhbW#N!L zw`0hkqH08Pr~myp>k=CU_3;a$eXpZ;9}KP{B`BKhjspM6-C8vKiGHogJX$_L*=-q_*i6XP6Vnd|QT zw{74#lZf6S#kFB@Xy*Eb|8=0{cUt9t=O1cM{5bYcF@K2uzpVCu&mRBJ$%+Hsf5A5V z6S4B2Hw!r3od3l{Wc|G#xIOedrdOZiUAWpPkcxNv1@Ph0-yUW*#otC8Rd}2=!jh~_ zb|B5&qj$X}=%h>O;xQvG8r|;#AD&CiYM*a`T&CZcjM0CN>gC&tdtYOeO~Kvaxpp*6 zq~KDe=5rsAx820v{sR+&+X-i+{6KTmK{3`RTyi%jl0YIOd)mxTeCu@};vqkFr(Ean z5R+$HSd*jLK{T7p^2-y6Q6}bEmN938h4(Wl!3^YP|P| zMYN5z6nU$geW>5S%{`pAP(fc$z}{Z%bl{J`A)Nc_HjSJ`v0}tz%Qw586t;BcCVL_e`zW`^bdYs6k+wWMFucN_t@p){_`4Trq>h%p0O^heJ9 zqng;8xjV3|EiGM9@$Z>$TVTHV7T#53@aWx2hKC9KBC{PMI6UjtkSNj2Uwllt`y$F= zB@m`i`C5O|nBjG$$nVhy4fDQ18j}CS@qt-0A!QGWn@XqDOri3Hn8C~={Zu46^LIRzWlE$ruo#3buc7xufTPj<&kAk{Y*u}p4M z;~)jiw&G5W=L{^`povNU#=3V4vcJ?6$!K{>!7gurJ0$0*c38N9a<_aROuoCA>K&%! z_j$;U z;@T+#m~42V^hr(Mpv`NjsadU8(%U_9B{2btz@z;fFJ`CJ0SZGHW6DRTesZ8L@|eCzMjFpYMA~4n-8eC#OjEE@{%<+U zdUQ;u$eCI=JYffF*IzQJV_t(c?#oN7srrOJ^Pa65@<7pL1 z5p};$#Y(h^<%hW&Fg&zOi0x(=pN4`-$Wlz?{!<9Y&5wKAV_a-h{V}(D z-W4AYHfcA4xL(S+%s@t6@O$)gaBjOHH{0nUYSlEg8 zF$VWzV*uHI{DN;-MB>Xl z84lZS-!8Ee=l9T2sF}041( zU`%9)npB7T<(UBK{u*wQcPsL+`R6PS4`Pz+~`U;BWfHFA#3ygTrIqYCX=A zOLd*I;cn?tGeFJUPhcJbmr2hhQVq2rdn%PtK^0q3)V_x8D*U4G z+1^vT?xa!rkS2YYt^w0%Auia(-X&9sm!wV^4echOn1vTpA6 zED?3qLC2fM-;wjB{|dck?P2m0tCmB`hdWNK2eD*@LH;lH-aH=ae*YUMmG;S&NTxzW zh%9B9X_I7`kR{tBA%rCRm?`@{Aqj;@b`#mR$-ZYdc4i1!XDnlwrTbIYIp@00Iaj~? zcg}TP-{0f@J?=kj^T318eCGXrzMrq>LQm*p>?n?HQUC`QQMEIMj%$r`w*%+tPwUf% z>s`b?DeuLMO23{j=9Y{QQ;hOUn#jYMZWk7t7L|N0H50%b;IxiwQNdzoDmL?1P(O>g z`ql7|XHW}~CgQ@@`?16?!=1cO`Jokps?@wU1|#{4H}dkTQzAsUk^u_e$xAj1vLboA zD(cOVcyEL6s_(0XzaEPVHhy1lvkSC6P(zUxwKD$~*qCc@z z@Xumcl z{URv^4zsz}!_4^&fgAM3ZwjX_hCzPRRUcwjQ6-m=TZP!LOB%bY4GR@e1S7+?d ziKotCr09qo;PI+ZK;reKyI(ffh6R4E-6`?fiA^Xl6PL_=7!{F2vh9C)%H%l(*1i&q z1eX&#!&cTSvd`Z`svVI&C)fO}K&o-CaJtr;?5`7p6NDP#N^ly*F-J38(bS&ok7gTZ z4o~g6nF?On#gd6prDrgBfqh&P??Ot9J+h>JMaOdA#i%ZTz}Z}+O^vt}A)Sp)M(>O^ zc-RPBRXS8~eeO}~{NtQGbC;)i>5`y*cM?q(t*dh920AyO=Nm=Kw5*rRiU$B;?E z^nKlCDnuy31j+^Bw_ylcX=hxAbRt3<+g?2#bsD^I^3&q6Axw<1q8nlytg94^%B1E8jA!74J%CF7L&>7#D z(rxNNFD^@63`60B54>7`INY*vRs&A%6;4B4lBGpll^N(&b#hqZ%s=!Rmi3)24L|eY>OmIGEz``$i(37r;F(_LVnRBdS7uQ+zj*`c%PR- z-$ze%HLs5wz!)?aqF%P#2blB2P_qB^_WF+#?*D!}2LIXBJD6asgJak%KD{=bn1_bO z=21SW+3@HFgtRl0%jizUB%vIC9IiL`dKoBxDwp15Mhq(ZJnQLM$ zKQrHBb%33W1zJvS-;Z)!Lu^PuWIKvv0puX-Kf`|B3B@hc^thb z5;wN`smBm!`4uS7m2@-fW^5o_Lyz1*FV%~Mo8jK?ctGV(-DRm9@HBVd*Jj!^m*7}RTC7jylr;R=c1eUVK#oyt{VoigwFJ|Sfg zmNmaAH8eDG#zP3`mJnH1BUJC8lu?bwC*sz@0nEALWvYGeOf;t6uHK^8FnSM;KZD7x zVH-eU$a9oT)Xah|JxMD#*r$>Gn$$K2eX~j55pmba%=_&F-p=5n3x(dhd(=l-I@|ax zgpao1awN^Vf28vb#1BS@Y8%cG6n z9UUD@^|5YIm22${<7!7ge&koPx+s`9jr%~8qZ}u!1X)VPKtIa(cvQr}K107oJ%TR) zI`pDMv9IU=_{Zzv$sR^c5jSnipVoCve44?+sR#vI}2fa0d@-wb6>s?}Ee#*2c zmJpyM{^ol2j6>U&{t9qZNWpdRDP6?p%o|nTTg1j@Z+I&oseZ5qW18|e^$cp~%@-?8H>1c?cji=^tK=0D4vGF6+l<@=Scaxp{ z0F8wKo(7M2dHx((hKw$+@?UzmuTZYnU7L-P8IvsH^+?R55+O#HXwKZ%TN}F)EVUJjU$}z@gcp*(A+k2Jrke zEAdR$WxVKjx!sU4bD@(~2ndpaSc){SN6eS>jsi9(reBz0&Z4&9`qp=OoyuasVrnw= zs|4WvFR)$v2W+2LOJ@EBmxuhonxV<(DDFMzqU_b{_7m4EzM?=yP&ONm z&n2pw((g|^P6&pMH(uxdpzMW4#Uo57QEU|HwdR4P=J1x)S&c#+?_;>ZNvgrz;NSkM z+t}Vk7j?C>vPaox#?1pGR1UoJK8TRyenfLN*3xl$;kvYm%!W5_RiDJavLGa#*D-e8 z!t^7BXWobA*)opz3G_cvEIXT*LdEdZG`&5(N4ch1xxxD_5l0T}br}e~PRh_!0O0gv zh_MHZ3OF(Jn;2W0eEZg5{Is3%gX&rCf^jK4Rv(>XwE>y+Abn*M1_6pMW!gAD+$3s+ z^F>DGTi1Si%7QwmN|&}6L_H#XEs3XA)R_r)(M;2tavpH2 zyEqE7@=43PxF`XFgpIH8aw3i!?|cEdG;HTy#W2MsO<0f_e1vuhOVC(_rXx4Dt$dC8 zV<5Sq+P91I{|Bru@K3mAXARC3O9RxkkH|f~#Az~;lyBA)4fLH~Bt6|h> z2bTUzD{z&%i?RKrebUIqLR2cmF0>#i2XB&6HD`;}qq{;Z>Cx39U}oA?O&0A~t4M;* z%bxqR{88&ocCY(UfkOqJ1`D|;rL6X_<9qdWpR4X=z1ql9rcS^hOA2A4HJw5E&uF=#jw53`Z1GKgSqJsJAJtVWpspAJ} z74#$X$4X)L#L`Egtk-uVCL`hddC`(hS&>7EG{B)TjndIhKO}@NyOl|5Rl>A74bjQS zFh;&e9=rx3-c#v?KX|M$UcfPbVCiH<=$Z8(_Gn zqtAEny6gSNyei7sL_NJXJ}1upP$t?fz(;fmZz*x43Z3GDg!UdK$u|->R?$ulaN!L_ zM9(r)L{|14cmcHTGKrPYkVaoOvx>ndmu&Lz=*MWC2&2PiY9_PJedm|k@Gqtr!&#g0 zv$36)n|C}%8KzSQ5NlzxgWAY>MC6tOgNnU|?!^qP;iC|u6nGj1`JmR;O^jTzHagnQ zYzf`yxY7t6>+~eIieOrt4C4l}MT_`zN9j$he*XKkQ#FmG^ns_v;9?KJE&uVVS=~D^ zoE6WrQh~b89o4%j8&nt%qX>>5LO#e6zP#U`cy_{yv8=!D(VR7W-NoWA=ghe0il5`Yw+^1`ETy^3( z>d_<6xRO0I-gCSn$36OX=|Is!nItAJ;Yn2M<95_@mSmQv7g)Gnsw>`~jzY29Euj3s zAHzRmrh&GvoiO`*k4T2Eb3PtQZX4sKCsle)x-g8|KO%4;QX#Jm%8NrN_bN&zI^DXT zKwq6#nvx-yzy*Pa(L*8269%(b%dS;)0C=~OCh4?GnLn~7HT9l)3%74XHan>bdwG-$ zP2t~7@xr45%kTD9D%!HW1}V7JX-XX*!#+RC@*%Ifr)QvcSWN7w&iMGQy?jm?w^jI< zfDkf$F|rodXHl7o+2k4c0?ne;2PFy6!xfM?44?MBU?F~Tv~ z3{_IK0M>;Aw$*3)K8ruewsA1A-sNX&{9x1K)23_}t{z?jjzHx@z&4D1MbY+6L3#46 z%s|V$oykSkUg@Kx1z$4@J9q0>PsD#TH zVAZYn1cXvSvZ`y6vS+@O#Z}#p2`#}#!|x)1a`16(;%NE1PDTtym7^FMFp6l7TUfJp zMtxGXr7WoRO3LQybgK6DwwG7OUpNIfi1Epwbs$^jlJ-~1rVQE?i(pQD9U#^emb3^* zJ(EAT@LBr06Fic5{{)GnUf;mZQ6J?_p}ww^Fuf{X*A}BWAG_K(V0M$dQIgsI`@ohw zx#NdtZXeSxWb7HcrThzVw_}EAFMg$Ae_6(KO?zZ)1n*`fN2z@;0_5{4)ZV5eTD`Awqmlr>-whz<$(xnnjs69JD{B1~frEpA6zJb&j}@p#5lS|y!~I(V zaw{pI-R|e+g=9_jq--xnod3oouG~q50;S#~m;Rs0V~;>|^^phs7lQK)_lw^;Ak+$~ zittgHdPG;Xh!E?Y%QJD<{qj^onvs0y#r#(ah9M3y55~@~IX55=;VOADjzf)otrlN> zF}oL|`owVtBv6{EbbQM3u=_DRQ-e#TtAmqj9L}6sP=6dYe_j!&@!np3_Rc+4EBfX@ z?!e5rR#R4DdwPaIJ{JjE@8~y-^||jwRmKe~@}Fjz4OQNm{A7Lf0;DN~W(UD5T{qQT zaK9+el{)~45T4C$CPmI7B!k*!H*y4%LY z-)Sm+0gw8k%5w>_Fq@UA7$MC%N8=*wbjme%^3D!k9tZ^4y3}3qVNGd@A5%%QB!}d+fDu3-%Mz z6@A40<1LbFGS4&hCl`E}Kc8>ZQpg7^l>5k%ffJxu)!PF_?+zmBq#8XA`vR#!CA8|I zf}5g?e`jN_{*1Eq9Q#)V#YyI8`aG;Y2gNy=w**KZKcR>ir-8ca;F8o!VSo?5)(wc5K`e0sGEe}pp>!<5 zGFCxP-nO&#_zMc?)EJJ@@|g={y%5 z`P^YzqzOM+G^=eYo!g-3M5_NLfY13iQxloXIu2o7{`5Lzu<@{Wjfi+)x>e8#5y^iKUl*xiFDRXs9|=am)d&e^+`^-UIVHb@0pEl~5413mdyUt;K=*@}Q+r>9h(M*AhP zR;o-Lz0Wq$#5%O-+&4CgTt3C>?YAP(t?6ZtyaK3?Z$M5T0|Is2QFs-Qd#r$;NekM^+dUYcdzr zEhsKY{DS$Z&Hin1EV!9wQFkCcK277{z_6qN7QYn*$db3DsF(f>rNeFA&Rsd|CZo+C z>HwnoG1qsd83uUsa?#lxx435hIe_*t}dEXijh;qv6pBlLl)`j!vaT z_6yzW=RX2ia5~D?{=7z?Yyw55N_1>FZ~3*L(}}k8{B_&gF0$KDqTOWT8{%?@thcGo zO8t3Er|`J?Bh@q9SStrQ>v7bpN3YH{1(Q}WW@ak}Jz`t;u&@MZ+#|}vx?rgujG2*( z_o)gMWRl$Dq+P&hdckr39wqIm8$Gjzcznr)2TQP9FstXD>$<(dyQ|-)gfRSD3N#=) zeBK6x#}X#oW{^uS?ysMd(L12@5PO7*{Z6D0CG<9D`50W#ey@8pXC$G-l zs!U|8UKMX>4?i2>`LS%Yp00-Js8nfM?MV-Sxw*|1(@+B2)Va7i{$8Gtj(PDjX?wQY z?jU;9Ja23plQjXevLK2h*0@x2pqk%5PwV>SxNl5uhqqThL&q?E!&}B=6=E!zyG|81 zt69Q-qSWTH0BdugR3nJCM-gO&$+GKgK3Cm#kbecfFbxa4xQY8X+$uSizAQ;~L($?s z0GDeBD*5Y5^#29P$$x-o-Ura|S@-&!`K%d6UP)vWP$^yf^q0)OTTH)%BCDGrg*il{ zD#ZVVn2j(@Nfh>Z!mfHkw6#w36Y(-@hOChFe0*ABq}3*_vp{A}F@KS1SyT4d+-XEA z>FQ*2j}>Etn6FpxrnbdlkvaI)#uel$)#Z4B!u`2zM>WgXO?E{EBlwV@%Gv6BO4(y4Gx{)+Tj(JI{ZbK@R-L~_9 z4G0Iq?f8cmR!s5tIKW${dkoC8=8F}?!{Q@SPlOx|6fRSKdc0*(>_{E|ij>TMV&@MpFgx`@ zBH0q}s@=?25QQ4zkJ)%F5J7fF-zUe;6hnj~9>%Am?XQNUTV1uZp#tEGP$g%Mj zYDE#%J6ZT$reBndFUoT4|r^UfnBH*!Ci_&&3_xCW`vhRlty? z)>(SkXAIS3+_@oCQWkC*-gI5{*bDT%Lw2>K{b$LBMP_1Zm{?(z9tzm`CdhwWs+u^fah07BWl? zI)Jp`ZuPV5?}%j)H<|&Fv1r}%%g6U%=H(}?>jf#9#L(JmwdXc9lK zYP6-83ei6ZC!JcfkG$w_c0!l0Wnul1lZ;{1VJM!N!i?GPp;R$(c(Ld1Y&1D% zYhsJ}J^&6nJCJMm-Fj^|^W@a^w+vGH>B|#N6z;G)C9+0_B30}&NQ}y+7=89DXc082 z+(`fj%RAA>%I#CxMhSPCf$X=tZ1c5PwS^YsE(#w)iK30~JWz==?Q?CD?P`MBl+qWI zx0kF$m7ijaFmDJ-bqj{AG5WZIbtB0&2+n`CGwbddZ98Mr?JD>d!UtHv4sQcaCa0EI7PFu>k@7fgiR!U$V?n5`xcd z>9ZT=Tph9OuMS2TiNjce-9rKl{9cMy{QV)V{bV#f*|_0j=OhfMN=vyi;;?)RkYSEw zQnOTP1-cWIPN*BtmVjoj<3=ces3dlmRg^pNX(t1S>M1Hww5qCd)49pc4$zRcv#~K- zeb0R0i-KK-zk}|C956;G_u`BPx~)hDhY?La;*r=d@!%Ddcc7VX1Z?#^$3Zj!(W;k? zb<6(B)g!h}ozUH~23i5iB91y^8rxml9lnpP&|qT|pw3NaAN(1h z_;0_1nUu6tMfk?F2=#XzKl+T^Nl>i6Zss>Ka?b#|Iq?$8OH2HaU)HyhTR0-KCLu9> zBNe`)6vm}W(De4FI#MuHYeFt20FS7{Q$@OCd%Tjy%V@WnwhUe&+pCZA3sJD|cytlG zH@vo#D;A&A3x-M}b z&>o)zvPtCs_>D>T{fLS~df&CT&Knn6a?3?9AH#I?Racd5doty99F{7u`oLmw{%`Qp zuKaqqU>FxCyV{TNW~Sr+#LeOS|EAa+cK(fGlLpKGMc11MM4KX>i|Fd2^858h}PO9XZA+$E3Hw&Myi99j!#bau(^n|+SKUizb^%Klw!-r99vm(d8^fT!-=wuo36C0<*t_BQ#7uIwA-L|wwKoV zOCrp;P{GJ-;LB?iFzLDbOA#O;IT%Z@XD5uiLP@Jx3Q~Zf|0!zXYGPSu&&Jl?oomCL z$2$tcIlDeQak*e=d2uIOmv0aD8ss42wr|t6>Y%LfZQR-09&Np!FMo~QBt#q zYZJsB2hBby94z6nV)EQ8pF!qYgy$QCVe=t^X6`z8at%RD=dkCf(abeq-?M@>acx!_ z2Sl6~BX^A%sc+CEH^E7dB2F3d;RZG5MndV(W8Sm#X#lkIORD-S2a><{yTM^eC4oLz z{BcSHcX001h&{pX!mQu{P@yqn<`Q&L>=@WJ2Q#SNG(q%XbeA6h0u_cRF+x43XERHg zlfk+Q*P*;rF*1et2nS)m&I}F` z7dn(Co=W+saj|gV;nIrvEVQCD{64%pI~g-3AlUPO!IpjVR9n zn_n5^sA0Hf6CC-qlEibgzY?Y9jC5x@(2v-mHQCzNk0FHp9yZxI|hyPBUCX!N$4T48QP4 zz34^eQD)AOGR%5n(^_Qkpux1d<>);pnYrE=+q~|O3hyW-G&z*go^(*@A|0L53dIZb zKM#^>DG5#0QpMcw)SrF+&ftqoL5mDp=JWliD?ll@I!B9O;yaw`6zIqTYUf9JIVl;!j$Ir!7G0~gJ= z{$it8@JGYmx9{XamiP;hj&7@_=;?@)pPiJ{;zgf4@#zsOSP%xHLxl!69ET<*ab%%N zh!W}A(LQ05p-WS*1k8ap@VP|=5T1XQWdX38Pdq*w6#x&2;PvTt*e>bEE;Q3igj(h% zACc<1jl?JPsozbMf}e7WvKNTl#P%Gtrmqu?-hRFn45PXOO})P0~eDQoSUbdn%H$aqR1t zpzW)bL4lLcRJUYV$Kw{NKl0)2H+cD0ojQ{4Syg-aJhhRy)pTra+duOflS zW^@?bz_qtj}9Uea{&yYy9=lcxU^4ruDl!RU2!I;R-s2V2uncy;8OjsTBfd_Rq+x|up;r+sQ}*&r)qK9D zA8Idd2?t(0*Us9WK2oY&!#Z4Xlh#4eBeAdGXM=jyUq)+|*Sk4^2pYtBXk}dU#x9Jf_uY6$T5V91T+K#PM%TB!PFa(>Tmz=Kw`9MBo)3 zBX1i?!FF~tEW-`-^1C122V$Rg-m1T-+yW4ZFmw1B%1!@P6V-pbPw{Z7!$A4PnGu^^IFF!Nf(cLx5T{UP%!@X3)r+t-`^+cS30;ij$*7 zxPre9+k3h?r&~r7TDOhxbbZ6$+#3ZOcU^AxiU!1ieEPW=+5Fc|p4BKzdw1(W{OKe- z9iptLg}!goBD|vLW~JN$jAjpsnT3dP41Yg6HA0I){{D^ODU`;YTZ>=pE(Xn8yl}kY z9rFU6ZP0)`KoaWor5UGE-s2!vu`UN2SxILf?w9P2JwfHAn@~#dNfh;JKD%Mw(_8O~ zZOo6icQMH4AUVlMSqc@N{wU4Gc(3UB0UTVIetz|kT%E$vIb6W&7HB*plD^N#)Of#PcBLFVok9kAbo5$22GjK;QgQ0%PHzuHdx11ELd9?`)nn48LEABr5gNcW6BsC%6G00B_UlCFv|@ z;qj)C>#a$_I#_O*Dur2tz9pFip$R81nMc=^&93iDU6ZAA%*AWw`KB5^a22qxe&fhh zc=bIy=N@b_W}@`X0_`)H~3cSs3v5JOx+D^hNWMa3#&NBWmUGqiju)CD>;ZQ+E;#oD4m;6#6llDn>2 zf)$3a>%I`IgDYJPJcu2IQ`q=&k!DTDS{FEPnvOor`X0Z_GJt)fWHq&Wx*$w(ads<% zf%!6nQg&}>=xDLL;5O8`rcH;pwJ$SxDZrvd5Dr;FKy6+;WvtHJJ^$rW`ix`dT=lz% z{_XtK2ivaC(uU{CHBOCx{w&3JqiP3w2BrfOT=l?+Q-p~)!t-$QS`BBo`@tsfGYu#P}5t^VtW>R$UPp^1k;}CI`iOo3^l6POGO^ZLB~W z_>T~4OOEI}n||Ogk>1mYT4`w-sJ06wLE0p;aHfz=$7XuqCzGUwN(0|eqswJjs*@ac z)egM-D_M8mfIRGyF(%uW$qqCE>|>p=+f={Vo7^K)p?tdU-Z2r2XRK7I4DIcGG+zo! z3JP%Y5F6r5+YaJ}6T0^6oj<@3*@bMt&CrCecfHYCo^YdGq|2@cx0mG`n($Jk9s>fY zH?fSUT7cBZg}&YqawhMeVA}&Gw;SE>B7IVP)P6+On{oX<3D)!zi^UzHweO)6UA=zY zBdM>ZpsT&W{`L`Q2lIXnn4B$^Y?uEtZtULwaoiXnz|_C`k16g{rOV_3_?SD=`_#hT zb6ZPanxtA;S?4^@Ei02u!*%*(_iIW3-m9UVPS<~dG@6!x+`8+DRbEj#yr_rZwp(YK*XKBTx?{>9#nUcR;_b)LQ&*eyoufFt zVO+U{gV!ev2l8Y)Kc-c*W~Du44iVODDr-U!xu%R(!X>JQc;|Fwb_}Sv#2q?X&gY~2 z3>=QAW<6W;sOk)XBt4TUmXTIffi&r~ELfu)fPfB0UDumeYt`wN6kFPo1FW2zV4xqN zEj7!|sAqSz`Ku|Vpv%js5eD37qa2Uqqx6h4C2|(x?$v%)jdeZ>5ckf>#{#>Eb%Pae}mo zP9d&qzPAYz?0d(T2&8l-k*inH_eFKdCf}_(`=ya`?ClXWojQi64L?H#7tafe5M52%|8Zg{@ zofd8BjoMA{aAHrVwE<%>$T9Sz*A&Bk;|2zZaS&}y_gB(x3Z`+3t*^T50a|KKYM|Ej zkO8=*H;P{=>`Oy;{SDK+bY+__cwE_AAvhAF+v6}EUGO!9yK0o}W z73405g@5;DQ&N|JH6eM&|I{1yIMi|C0AMdenDkbvu-_AR)(rdT+3|4c7*g#EZqXF! ziK@AZP$Xk_xWgMn%k1TEMfFAY35^(2(&p_dmMf>bFni5RiqgTmj}f%6b(7*mup+|i!3QW+h8VI$ znzg&r*PU%A!B=)4MCpgL&L7_+T6ZGi^6n})C|_yN^F@UseSyK;u^!UqasB{ATr%Py z0vM09-Y^nLsira9|M6-U`SS&O&_62*6Vv#OiE>6Ym@z`yUt7(lIvTH^AT+|{&Te+| zw(m;uF6dQw0T*opIRp`a2+>n3;pe=&4K=yKET~vZe*vd&Oehv`BUXc$&bo9S_JC1u zOkJnn*+Pe2n6QrVk|j^awwN-400lhI(h1@~JR#=h1ec2`22`Ogldsu4PonT<(Cjuv zv7)HEC{So8*gdp9q9RtKcYt|uQ^yB{d#C~BfM@k@@SnKnK^jeJjiPSQweb`?Ke7j^ zkYOXgX|oN?M#S))T|R&04$ZJFPm2hCZ@R$yI%U7QNTQ9&`t7hI7F+wU@FU1UC@^3w zC=`PQ8a*s6>(6)cC|oUWo;U?W`%FPq1m0p;Le9^7IPA`oMp-_nC@G) zI`pm^@ZT`6k2P#%u%Ta3V>U4fg;mm*a6_@;HS<oVpGjAsuw-*$2#!T69sN<|>@HC*izmM@2;h zqkdGPE+xgV{PVDoi1~>kU+H)Gt>I2zrK2wF@p)OViujs%fQ>J4mYy`bzOaZVl2XBT z2G0MHPG9j~LtcJ4b?PHCv+jI7;^>W_3TXLO@B@PF-Y{d=OT7O^aKV*+ZxWblhQrX* zkb=(-KV1spnsq?MDR+2ce#TQ3X}x|#noX+zf!$VEPd^XMw7to@5eW3|Tmr_#<3JYP z%G_;jGfmr`*Jba1&~61sedj77k%Z=~cHPwtO5vMQ?#$+0@7(P!`=mir_x2@+=GK#64{t!1 zP@?2aoqx+{&W^fo8t&3r=x5lJL3rXOVrW2PLOY9yjVqctj{9TIY40yGlYO$hSj=_N>$ za5AQAo`z!O3!$H-C{8&|p?3|BjkaKK6?|j5Uf!uQcfpylJxPD8Xb_8s9X6A0%Tg{v z&tu`0GsFr=rDbd%U{~x%Vm;8tf12tooFiX<9fe#gakUstVtgPJ*{c|?p6Pvg+GMRq zL`IVXMr0)Mze!gukeQC&d{gm}fBGBKMj#hPg)4aJ`Wdkkzyk8)RxxPC7Vi#{pz)+G zw3bC{RZ4MuJzmQWqi^t{8>`DPCn%e#2OaUkK}(<7Mm|Zr*!GU|dk0h~iB>KC4j2Q}-oM zE93Lq_exiSJdcv(pCZ)o9TgGj@5e@%SI(YICAAPfH0aKziMVNONNd3RNiXbxfZ-Iz z40>i2n+bbXEdUXx$WEf!KCZk3ZpC~L7<_z7RFL+X4c=yox(cvQK|q$gw7bLJV~BVS zT(JgftkmN$a_=d0ng>4^SA6;jwP_VagyE2p7Y-o`0#16zjeHTo4v0`izmLz+Q-8@$ z^S9lH2~AsZ;!mJ!Vmu|pBV5waxf zpsV%@R?*&%L6x@@!@Q=Q;O*kbsmYmR8YY_XHUz)7G~dFF%dW{U`U3Z}C4z-WEm(@8FXtIEb}42hHE`K| zjVG19#1=LU#BRn@3+bR-yau$O?ZH)QCViU$oRmzTjfS3TkaG0bc?(JU5U$kI^~B(n$&aoc)#aao?7^#j)kZjKKenS*84 z@--u`Zg|mw7j^;OB<}8`b1As0$|^IUr(^pv?+KPQM>lQhzc4+3YnsBCYE^>wTqz_wwJV^Pxrl{7Fm%`fB!4rhch5PnzNd|La z^ur-7*eZatftEvpN)MSFfpXD${7^*B3C1Wg6WO9*JqQ7fj7@=u4PxcNra32p^Q6mq zzX$gxO(<1d`C6i&Y9NM&?LGCItP|Jj3BQ814vJk!(yjv*CGj?5k{ubPJ~&)6XU zDyt%00u17nA{V(qG%MzOe-^sY^{*Do&-DaL9Rtg>yTDWR8bEBfn18%+C1u&Z^i)Zh zfJ2&llb4%-05O@VBV*5Z{~I#Gb@Gy{Py}_7;8Q==hqo|_&a{^02e_hzZ%lwmMR4Z{ z#e4o9BS|wbXS3|dpTL_X-E=-Bk698=iT7c5#hR9Pr{;~K0;G7-atCq_?YaAS&olPMe*PYa3`0$!&Z@7`eyqfG>ZHU;e=ss|}u>4~kY9Cnd8<4T1pMsZp zJDxq5_$p>!(yWn7n|o5jwyM9mDpp*ufZm*1^H|wKfMvA*a^Z5=d}&6tH@fknfcyOi zX~#_W>a%75g)SQvWVr$02))1No|aa$w~IacQaubn5O!;PwCQ zxUBKc(f3ZHhxTmnzs8M&KLBy(bBW^Jdx5xbyHMk&(1ij8j&DrQ{gKZa=Ve#1cE_Uu zy_D`Ma#!@*c&c!}8WQ_<^SdfOAgS`V!?FK~&%qt(5i*VxRfHV@1X>!Q$7PrfPxZFM zM1#|i0P|6EuBfvJAqLdq+$nfMziYxgSE5hK0Bh9R0Wp2#gr09q`3#8raPKih$K^aI zQAi)bP+V9>QH1jHzt~|9KiM%sTC-0d1tRS7K+?c9h*UwK_q7dKMFvu00iS#`15MJ0 z8spCxEZ9D#jyh80kPY1`TvTn^rC%3;KW75*+uw)!lk(qKWaCgH(fV@zGt0^qHh#f@uLMrfe1#l6b6>l!Cv#EinmTf z`?G62+T>(llVXpBrFwsi!Zr0jXZ3oYYP2PpYl?Kfv%3-SgwVQS@Kwg+{CV)!%pNXodL-_b~PyZCM~Sk$2dDE_pVNl zMr#OD3n1QS-oP{r}01eV}`GL1ft@d&2Vo;-3y=p@9K1OY+^elI~R z9p3fG-LT6i)8KtG!>sY%aifn(t`p;R|F4mT?+>yiPU!z4jlTqGe7|{5mX-u8j~g5N zgE(88=RLX1eV}ac`0>--r-`#clIs^&jXqmlf{`R1=YC__VFEDD>I+iM>FwwG4FLVKo4@WxMssm*7|> z1}E?DYOOo}ooo5!wCKb*NAi#LKU-2QRQUpYnh$VS+Ej5Ot9HrcU7JZMOZ(tn*LcK? zvtXR^a`hJCl{1e1eqDPT=rQBXD+5xOSzP*;dLMgI!NIwOAxbC|2c%ndM!v~Zbm1~3)FZ`2eOCOn z96^qSDKPlH19ZZQCN}Yn$=v&DD`_I=32+c>`W%0{iS85!08v|mMC8EBdn1Gwy3YW| zrRNiNaE6BfK!%8JF4}wABV|TD9fAy5xUg-0Vn;2;Y#)K_d#@^pja~&Pp^Kle7D+;M zs`oZBeIXnGD!$-q|NlXi(03pN4uThxlgN_8AcL|-$b^lhJnnGL9RNbOETD)QboIp~ zep#>GP;v6J$H|_RZby~@e!Cx_d}=PA^#dP$;?;1JTS1U#^M2`Ta4%ORu5ZMZ_+~m(JU!YDmkV*rWnvp1|Q>PyTC@o{`e)J19du^!A#%#9hy9&N&gl~ z{MnzQ{-nGumbDSazN-jxiqu=f%$hQsxy5q+h>6oyBs`qK`;iwNw4SRJMt{G&2fI?X z;~SF^{UKmEd^x&A6!v|%#wgKAgi#56sKx#*YhpR52xvYG$G{a0KESB+_+2OscZV`* z73f811AypgO$6A=`6dt%Q-g}5?*^{X77z7rQrG{E)iY>^7o`U$T%qWmj@@scU{V%> zi%5}e&qF4i&}D3RE5hF-6{iP5x(P6lyZNzDu{#p4K`SPZnxsdk=-vyHH1&2yb`(J{ zvA`soG46`;?+hRbNN?w9nyD=awPKfMttlpyygim>@rv%8l~Es5wEKXF0LXjnU7GM) z6+HUDS0sPnu_x3AZvor7=u=?SyyIa~BUT!+{{f2l9C3?CkCvDtlyXacEgt0YAPZfh z*=?9%PZ7N&Z^j^u=lcxW47M<(G*s>DU6}**)wwE&B2+xEhGT7@rhgd@;iFeN+(gK9 z0Z-#>US0ueqQWDP(FyDjEHQXeo?mx{tKHyXmfqP%SX>DGFunTp2p+!Acy?v{^TcNR z-S1B{i8w3?B}n=w12&=f+z^IyGN#_WHuX-?z(VXgv>VGTqVh;#)<}JLVkL?W>hIW( zum3O}8g4%ob@a2oj9?&i>HZ<*3Lucw=R# z0HW>wUM^WpZRQFAk-&FXfsENAT6XtbN@aY1nvO}&j|_o32mY=i6l-S4KR}H@m@DGA zpOo9B_0YM$plD9PG%=jmXC)7Uv7+(*@ZIM=cDfx?6;0F=cz$q$7WhEkfX0?|Bl(CkfHFk;-<2U| z9@(}7M!TN7F&&m`U?;tH<$vlXyRN#pK-i5_I_@6JtkRJ?6)-II!bSfI>el%|eT00# zCrd7Yn4XvgH8WT*)=_B0tejGL-lqmH)+DJ@r6z_|M4GARFK+*Qeq=~b z3R5z-#H)+11Um|`wgf7ph8|v*b0rx z3gL3;`$S=6ZM^i zw|@2w|BJi#3~OrLx1i^?jDH4%0ci> zo-20xn2KxA#9@N1a~7|Y)y*$)vbq-*Y;GgqN@PS@f+;>FMNjq|58zMo)tGV;?gS=y_Z4&?-Ry`|0VqvF|rwW;m0O6Z*EV4c80 z+o&19J-dvYe)$_IN0$=*f4GJD7wG%zslXI!<+X9>-6vJdL z*Cr%3ZD797!PX|xz*mSm>oT1@Tj>`o_#7#u5OYcrwV!gM8>UI9O8>~^Ao#d(Cho#6 zH%G_@WoZa%8+ffTcd6`xi`3c4cOcT_j6N-rqk-1(1#Cyit(-*0t#>j)R|I8I9ZcC| zl|_-p$-qeN+A%d11{zTo)O!$daokAp4@-1*R#u?To z{+$Tadw!^t`{CRxpXl>-;w zDo9fgjaG?K6P~yZ9+_wYZ-Tp#Gs?I+Ys|Dwr8NoMT_K2OECPmRqvzyEetVnWP2#ox zbnyGz_xleSz0hwk(3oslAw2jq{Sz*0CeSraR$Aa5?tUY2c!Zi z<_FzDi7zAgv0CppKj@6IRS-MtBUoB13^;If71B(guj4XpjL9;EQa6-&Xm9bm8#XdX zB5o2pD}AhTnA%DK;Wam<_x3~0pYMk-2(1HAgsVjjpzFXxKj@Baow2fz3=FwmQa3tU zwjHh#DF*e^JjM0lf&lm>#$k;p_Wad&``xNzwD0HD8d1mAgvqBX%M{v^(pJ9JvrZ3x z2jNAVGS*}y48LLqc4&-iXrJu*x~Ytn51)WxVw^D9?`87qjPFBV+_Eg7L_5!a)(pMD zd~#}r&qzi}KROPLx%#j+TqogUDA|Ae7Ll4o>p~ns*5~TiD)NK0q%*#JdF~szx57KEjKzoI^|ovxC32U z<%Nv$qoKeDnsVju$U&Lg9e2oznwTD~#(_I=7C-2|24m9UY$hwn2JVER2~>o=+H+kI zV$j+&1ZqanL(h)!OIlTNqHINXifNh~s25&n*b)7L81mo`I>F7q3&pMXhvHH!CDV~7_+z|-@XGR2rPN<1FNh@Q0{^SpjiRmS5bCUbtsUMr;UJbn6IVsI*MU38*+y7a--T)Z5FxMO9XCL&F zZQTD>rY4Brm$ElI1hCa}TnJD2`04S;tJi8>rxp=|G^6>=Uj&nxKamSqfPlh}KZ^IZ z!!rMFqv)s5U!6Emf%iLK&$O~C-XVlU3U;PCv8V=(DNO?n#lJO?u1y1W8$a959|G;4 zA0{m?HF68Fm6@8qe7_|GOk7*r#dVO3W9(B+(_^EZ)d79p+wy4tgssI}FL+d5_FaFL z$MeUdT`A!NVUdx659=8#;?7}3N2(#~!y5YJkFwCWfJ#r_Ya1E{Ihm+5;>(BF*<1!b zw(K#e!{H|#@?qrkR%zFgZ&WNle9t&^amkKhK_Y=79fm5vs-~ikel80!9eJo4Ft0CDRX^)Q!o`tON>N-og>1&@UU3k?PVY(IlZ?kKq1fvb0_f6 zfY!clEG-oMuIj)y8)D=54Bynv66j8=J;|h!HgMCWmm=dzZIUvlk!--^vp(4635G$4 zB2gRiwagwUPXr&)Vo8szc0C^bpevW|p?E6o6xL%ZLDV8D8rUT9Y&-ar|AK(~@4P4d zh9HgU{J18b2AN*WSSL#pagERdiXChaJTds>ozo-dA9MjtpiOiRW;~T`wYTDkGiuAt z5ro_s+_x8J+AswtF@p#P{Sd;GQjZ!Cf4+_Y)k6S8+|$k-wpR)3xphxZaO=>LdJ&P> z1v?7hZM;Z`_52^EeND z?s0aF&n?On01C%*&@PN0)TQ#wBDfG1n#XRlu+eV;qA=u;Eud}_pkXK2LZNnlk667hWhq_O_#_^ALO@g5$8=OhePwN%-JJN ziieL)A3H2nx0=a)mqSA1NP_;A4-BFwSwn)21WSEEKKlZst|8V@<@Y zD+dn3Jt(XjsO1JSUw|0(jl5gH1y?G1GUCm)m6P{1j6)36_Y_~~DJ))Gh*0_NvD1>a zSl-~nDUMc-5AEWm7eA+jOEmvpDE_>|M+G}2pKMZ#;8M(c>w($nu|b$oVCX?;r28(0 zDZrvC!Ey*OCL=?OC6>b4;1^*_UlDj{8_khW3s{GKpmFV@vQdXp*J+$YhrkCNz@jjP z_EpMO)QwjpyVhb^6w_1C)b(q$*o7_9H%;2im9b`UGdOj6Z1tftC7-f4ynB%!c>LFB zihuCBeFW&rkkc$htV`-~bCBsXu&uxE2I&bbE`bBvzf}oYQ(Big9q@y$Y#g@~zmjU> zLSq=gMp9ppx?t^*2(l_%W&h)F~sYjLXPlSEQKkEOSx_=I%52kDXo($OsnOwist8n$ua%2ksE6`0EZXK-&j30C| zj@9zKR+Ixo0-L{yqvHWT#|oA-A~U3Jhm|w@JO@Dt0frN5^<__9$X*kDSWt56@}d@C z#4)y!dbLvlbbRrUQnNr(SIclPLnhGD5rX($71C9dsLIoRCMU*GS_oCqJ{&WQLmQ^K zbxuDoun~6qJ@J=+6aiQ}Eo!_qh43Nx51)wJ8DIdSsX+d3R z85~F}IH*bp?O=8j@XwQmo7&TQj*0{*9uLxuLdws!n!0z|7i=;FhY81Pn?H;fuxQMi4g zfb6}ZGyYmok`1OESW(X15C58n+9qMR5WKl(fl2{+mcQ?`>Y!;FO;2Ku!d9ZII98Mt z)x=H1cDrVfTrPuXJ?QY1(vACwC^!qan~{9<2%zj+09f{Qpx|kvJq8L(YCVnXKYdbv z`+m^>kPilZEykf;UIXsIioBlP&4VsFNyc6G^a3FZY)v zB{k$@uj}%jqIpq@l~r6io0tls53622sSe*AUp-Cq+box-ZMk;P5QIdQ2O9h(4{^h5 zBvb{s@LQGw@+tq3at4T15({R^`3;En#Ht;DxEqwKduRs-1`Z8BOk66zr>dxg0`GDu zY~=@ISK%o+EtrsU@rMx`hF zj#!6_b=W42gc{wrAoPxjfI@TKpO%hihukfC7qxj0zR-Z*YbY2^`W3k-Lzuieh6Xcz*RTJr@V1r#MRdu ztSqAzI-EUKSr?&vP*+zlFv1-^4tv1MUFVu<3(mxGUZrc*OW81I_vVQIS_;+}-uRB-wMPSVgEX{{g8G@bsuNSgN zuWI78!1i@UzUvefOeuJ;5q<~z0C@ZtjO!{Cr$kpy;zPzow+#cX<9Kuz0TqF3^F0i4TM~a7119jq=F88Go+l>) zSBpF~9}_j(BIARY$ppj|Pt>J>vaEmz3s8VQr*T$&zdmUD)^(>MmW4E1o`nal5#|Q1 zi+FY-Q>#^tuLI>OVSkyl&#LART?`7#<7so+EM7Y6(4&(Nq(8U0%Om1EVGk4jJAlgk zZ{DxSeS1wAe}cMneENm2zwnbhBK?yiXM$x~d`ZFK$;e;lwI?{7Uw*9c$Cd|}3h|FL ziRK^K6orN4pCKkIhUvfeV93>Ef0S@cNTGkEVF0=ok_iwYyC$ifzZj~e{iF%i4RVg~ z%un9=ZpLT-Ilv@B;8&A~z^zn`oAGC^js$(NK5oBi_At?Z0-bcG z+)oGz^P=aod8xNx>vAQi2Y`+0Fmo`q1vA~`D7!dgtzs^~j>~RUYt;#tX{dC6UCmVX z_*aKN9@P$)tbd*8Z3loHh5&0OMn8)k)Nm_1ALTr%69%RcF9DrryX7^<6mXH7ytYec z0VLJq6WWuSw(1SLZ|wVn&V+B>n!0^?ufC2C;4dP(){lYIB3DX}BgWFD_x?KZPNVWm zKqh+jDFxZEF|*=Xtc#H6nuaGDsQ;-75TPf8xc2?OmI67YnY{gnIZ*R&Is;${|1}5N zr_RBqzhC=vMR)=9l-53E#|?(*#>S{ijtH&*UALoGEJ6`$^-f#>uzsCZYvvGN4>{9I zXeiDt;s(U~-`kOtU>G|QZODTQSBScEvX-@CkVBYJ`KZt3(Sp@gr~1OjM;#>( z1*%tQ?-91C1e?kidJDVia7hZXNlDi~VByxrYYcP)GO-831nmGd;(raD*t;SA=NCP` ze7mjZc`Es;&s+9Y9fiWjh-znRJIdRrKo(%fsqX?ZnG-7dIj>?1I|lhk@;m1~Qw0?W z8I3EqoS?ia?g6|Wz;YR>&*;7LvOZ(o0G5it>*#`y1zg3Az{w6@I#E07bziB5RZszB zQG3b{x=PXALXr+rcuSAvoibV3ojiP#s^3QfQo{$(kSqTsFnj+Vj`kn3znFEMvJb&b za1d*z^g&t6FIr71Z--E2<>WMXKX6cGO~%nHGZQfrYlD);RYn8+l@9jspb`?MFdu9Z z9JZrC!ah@~vhXsKUeWkLCy$#S*ab%_K>}DLT9;eKL{N*bRxo(|70ea`WH&b)EE*g<+<5C1p`HtUu_KsR3xs7w1N? zGKn-B)P!q0+r+lypsgFMWsu^!H<5l?#d~V)5JeVWxfV$Y%pRIgkV-E9So+mDORn>l z$MlbQ(~|B%c}bjZZiqMsN?OkqQDhJmq$17?$5-zQ#l zM9l=?*q)E8agR@5+$Wn2<2zoVqT3?!4!(i)&l2)mOq(homrlut3H4teE{XdD~CDNq={1PlrOGMXVBrlRI!e81>|4T$d! zU&=_q$tPUUvqiU~G?}lR3ECQLX=&9h4ASe)ohmLWRu31ucj;9l53@wdr+v48vGCO# zj^PWbSLAzBtPQgh_snei|0w~5Bmq1LiJ=A&=VYiZo^C1v_AU5_sY|Mdd*4JLhr){kJNw* zQIG8jsEG89JlhdYRK~Yz{XmaHS42F6`MCL>vIc4y-7VCYL9!zw^4nC5D#7Iic`^kr2 zWxM+94f6HCkUhUgsPEf?RqbdS>Wklmn4h#sh5fn4O1G8elV)k6L(T9MK&5N{H+`0W zY_U@FbmbT+`cfFx*%le8YV`atC3|KjJ`f#pvjSM>+AWdgR+zn*)l(MjcG9tpy#nbi= zB1ndiPHIemzP$rJeAkQdd|u+Gw(!cPXr+-${OJ)UMn56BDq<#3&mUAkC(I=+Wg^$$ zneKu?C|ND`z>bT+T}|^v=lq!$e`M;>&u?xkm$i)HC~VU{8(>Y2cazx@Yt7=7n_CnY z{y*`g4OZvC{?ecdKJ#yNs3MR1M6cDGz^!M1$wFcwmsIsL<*WWkmi2)f-k40FtC4&@ zOE-7Djs%`wa0aULI5~?`&)H}@ZX6B=cP{*?PHU}%24k5=Z_ATdnybR19R|CWANdujUwW?$#^ChW78-hr>uYXKW z+JmzucxsK{QUbZJVmVOHG3Qe*u#@D;{=Q@+pZpnrmdhjiuJoSBGN7#dS*}Ql`4fyQ z{!jj1_56?M)<1!L?fVPtQx65N(Qs4Ujv!9se0AibHdJ$i1@)c$SV@33;G`b~>GQ2h zd>paY9r2E$-g_v2D#?+mXOw5jPRfCm-y{ZeEROBl%7!u3pnaa&Y)A=R%gv1;w`q9` zZ6687_Lxjw;DeObyGQqWmaIn&QeF|i-{=#C81WCUP*NXZgaV^}j*ty4+#si3Tvj_Jd*wawYxaZBq8^C{s$IY3+n%G<9PZl4xx zZCv2TsyASXGe792+UHZZ<}beND&>$6+tcjI5@J!oXUMvN=ODKLSx@6H45hc=jH-!A zPhK*)UeQ%(eXvouC$G}zG;i2RI{FjzFRG1mjyb_)`&u%cvRgm4%FL$%Drzs2y;)xA z#DS0f0jfmt%mK5LxP+p(ZLG}1yTXt=s$BQZ(F2ty{ucYvmH0=mHcX$3c%~~4X^0nY z_`K8-xZ`KC?7I9R6=6*Qb&SM6NC+CXgi?+g8$rV*a8pt6Z3?slF~NnG4xuQUI4$&X zc0kDsJR#5s8(bGLCUmCmCZJd($5Ys5E9B}t?`8F_@CL5Y_ThktOzb3I`v;`L&kQT} z?E;#WgG>NJbp(&8P39mr;>~YI4_(72EkVky`R;a2ibvk#7Cq5=DNWwxeWdS`au--` z)j^64zN8WJoJYETjZuknFij0xa)w7oCvDQnS2>`wgu8n9>)rLwXsx~?XUTd$)t!*7 zq1$UkBif8-^PFdRQ{%XArKTR8c>U6e0Gf!)&qmZ-0G83x*faQ)dDki4I1HT|F=O2TTLVldnH?o;oa>qs23d2<(Bx@oxo@BzLc z9ShMk0L1yH#;0>|)Dz5VgxglWD7`KRs~bckB;qPcrr_nkxJ_Km4?1oqi6NmN?Q&L> zmQ7?Mm9`cwxNntPKj%grv(v7UA4L?sz_xuBzktUrw*!;CEDNm6bpjg80}Fgb z05~~77Wp2XG_I^d#3^=36AGZ-1TyERX}45xu@X93mljHj26)qd+u0|AVUK;tPLaRbUdQ#oVhMM@< zukZM;7r23jZ(Q;OEZSa6$YtkRa=C6m?|KQcoWda10V9`v6eu262Rbr9l&j`ZtqMu$ zmpc8WL^nHmxiQY$C1+VjW;)slD6m(x&a$@k17W59pI%dV&DSsMLBH`4Gu5~eQVt?-5;*y7!P!X zmjtdg-zH*@dEg?N3G7|HQh?W?%M$4}rH5#qho}W8@;Xxa`T7^D@~3@I)3^Gof-kk zBttT;#fVEc(?T2ox!=m%^44E$@tz%c`q{}5@S;JEh_SDJv&`o?k}r}YE(JZ7BS z=ZgZtjJ#<9Zpw5=WSAKD-3W>?2rsv@Z`6MQIZQ=S=zXWKycYMAHcqBzvCfSSztmPc zrutphJSFcoUUCC84y}QL;X}~lnk#Z-&8s(SK11X!PfV-1^SM0wmgP&9VBN@{_5KdG z=mkB49f6;M1IaSGW+V2+E3FKQLYM9R%_w2hXWmRvKrv7wGmS{oe{6F%-;H4v=skUq zle4*`Bu|ALC@wCJx)(0?TFCudkYkF(xWuOubZ!T);BF3ODg;GjFpwL|SEeh-1-I{p zj|x3KaC<&JByzu*Q~ws00f4sh`~*8*v`5o+tnTwolhsGgr0iq%6V|9 zVl!1k`~WAr8w9EM){)VSBl7LvJN%;kWBR?(ABu7%uo{Irl0t(KJv-I_wVkk)^^1It z_?FQ^{?~)Ndk23dz7zK3bGCqdZm+R)k7oRZ5|I?F=Si3^GfP1=6poyB}uQf$dPczEw#Iav|gUsBAV4HVh*3MdJ?ayxc-irOu9{FxcTQ8v^ zHv_FUnag&y9G8a`zIXGjt;*K2>TO(=k9S?6Wjo+Ob2(0hPI=!5pg@*0JyvI*W~S-9 z|G}jzr;!z_jdUPmYnUAoT5D{@(OqxwM0of;clv5$ho9KWZbAUK_G>^^VpLDId@0Il z(=rZqF>qAzFuqbnwc8st2(RRdpS@TMVa>;3&L8W#MMi zWp$6nJ1ODf>^^IaW5JwYUI?pC%tYmEZ~Uz<)((;npRWtfnl~rEXXNm<&H@I8L}JDh ze$a8OZ6QNQxau`dm!Ntu$R%0+xs?#7M^p6&hUh!*lofZ>1khb1iU;-mz4&cyYB5EQ zu%WNU@T|hm?5u^rEYrusi(n=n!@Alp zVEYn&+xj!tDsl#3H@h9EXER@YR|@8h#40AkSCz!F$IqG&DjPzmAQ_Tc#u&KTzNj{Y_vnICkb z9-yvD8hai2`n?#kIHr3`rAjvM)66O>SyvjjsWLZVAA$_867q4XBYUUMP|KR23(;R4 zK;w@SsH5m+bbn)AhzaqlSN!$H4+FYNF11?u`;nO@ogjB5aVn_2z|CzpU+@*P@xnaPSA{wcZE+z`y_P>z;2|2WlB zVIB$Kme&aC$=!`( zAbPVjZxwxa>j$HHIIO9&IV;qooHwQdx6lh$+1s%WQEuXqjb)-Mm3S?dFX#^zR}}j1 z-OTTSuSnS#)yOo3Ebq{l#X`p%sggu4LQL%#KkdYQ3|ocJxVhsi5M=nX!&#}Au-xf| z4UJ)YdXI^amfEuXTix0@=Mh0lpN)h$=WmTT&z2{b0=qlw{6BR`0)okI#Ao+WbX!O0 zhF}&IMFlV`@CwUrjt~!N8v|r1<0;8P`^Rdkj)m>ol~u(Co(~QeB+hDYc!pQJall9h zlE8PXPe{AIyE`#xF-8PclE81SAtiI%GwnU-=%cmf4faBNH5}MURGo-VS2}9%ZFrg5HLJuJ0`{9YS&cU4SNyFcd(=5@ zs17NzUo)vG9&UB~MEnKL&*RKAd{8Az_ooSAvDMH_T|Ft;0aQek;tQJM$cb?V4SX$x z@j~7B&cdwX=UOx_h76F=2d44p#}(rEM5AsMg^y~^0G9l^s(g^8x934PzNo-j__9=1 zD$}`Z+#*$$)VwMlARRpdzp&&d5@OAnlnOhexP9(2PXoK#cZ}?}@cmh5vQK}ar=vUQ z2U?b%0xiSBxklNOw(BM(&mXQT;d~nlUFVL&yeIIfYF8O=aBKDC3Z=J^UAoMLVmCE6 zR_VheCN{j{H6)TwG}py?51ikLaTzS!ssgf@c75EHe;UgEEu+g^JkL3phw@4v%_I9a zC3AE23K`}=k2%h8$c9HdEMHGaa9D z(AH6a2oIipl1aScOv zfZ8s%OV7X%J-7Od#vuwOKQf1weuVGY$5N#bSN``A08p!~fqnERLY{d^^AX2xAg^~8 zeQJxj4s=eXh|MH@=eT8)FR(9x3Gjk8e+?0(=;2)f)Zm~a1VWf0^*bng*dk4ZUs9V= z^o}&m1siNB(Y@Mb83)l*09Na1B*g^G2@!8RAAv{K6{?s$dY9KCwZG)t)jHm{pCR2! zb2QVB=TyE{sD238d-`jN1w@KqW>-H)u@{3{hfVg>;}d@&U@Dl$c6uYD-Cdi zf2LfdR^l(-EK?toigI`M3&Mb=ODV^Vp}~Rb7TBl&Rj!Bta4ylx!nyZB#+T-0+sIGQ zCjz*m4#C?*$JdCr3Pi9@&0E~|BiG=@F>TLCREi^@HY}y=?7#K==2K-JY8_sqtV$IB zC~WfB(2G?211rm`k3Um*Ph&u{;pVmvybOzFNvd+7@+ZWXf@L<{AvM+3Yg^5cDg{wdsL z{xx#`U+reWh&`kP+yHKoM9X3EjhqU`d;KRbkySs10?KAZMInF>&21KP`_|P`0}R!^ z2efrX>!-tS?coWj@r_V6_&JJGxqhNaC2Q>*4J8`4XtZ6;>TW(!^lWCvwc0 z->3Y7H2te8R^h)@v34t7{8hy|8ukB5{ymHM(~_sRPcF?nOKKN(=5)0*pHJCR?cWjF z5vg?ir5P3?a>az@4+o0?xs0h+$)ml2^#Z?_X?gbbqawxlt-RU#cv;X$V{+A82h-Qf z&FhVBAU2-_TwPyz;&F>6s5gL2pe{+yu6SCUvFqLPEzlQ2b>2dpGA`yN=Uy>S(%Wr| z4DD)aOi5uzy{CUqk(_Jywk+*i4QPg3yGgb19S7Z9b;3@*5J`>h=pM~I8xIUcn%WI0 zIquGW?$&0Xt1Ss5-L{1dcDMA|n0yh}EzW6XtuUeY;i}39NLte^9_nFqJTPfYnmI6{ zu7oC$BqNe89m5LU_{N`xsF)6-tpr}y+z*{V?-w6**9{F@=LZOvESu|MeUam$TjOjLfJ&8urG&wF&_ zr80b{ojl?5U(V=z7uem>&s@*j3=sRX*@#T(|r+ut%1N|A$Q01gNCVVDUXqCwWKYeArib0nPRO#)&+ zJP0?V3y-L|ZGJP!zIO-!T0)>_2@KqRMe|bOYk*9Yg^0=+`@9xRs~L}SZSeRg)Vs!c zK(xX*yT_mFRmcR))g&Q+4t7R6Trn&63V9j~J^^(SA9 z&1^tE9XNLQ!l5{f3}PH$EsNr^r}SCI;NJnL%XCF05RX^LYV@q%;XL0yy%Mk$*PG9_ zBbi@%u^padtT12Z;ciEKaMiI!54a@88WaE9>jDcM`?3RID-#05txvZF*CH+#73W(# z{N8<$b}C(fD!JWfYW-9+*^ZI#PNB>=-61WgHqBKfaMD4=eku^rP!N-3MVg$ar%0X& zjzx@1TmozFCX41e8iaW7f(v});y0!oC#^c7syqck~qSEijRo8CRj4yfBG6qz+E> z_O|OXI|`g*$?5xass;1xWHwm4??J5tcV6^T!4r?}^68nLIkPqGj2J4|b6Z55_sNKF zJ(8fwrLXr8MXfvuo~PMw2HJZ&BODG=-#s<=e{Cx0-udd=j9`C|vD2*d0B@u9H_RdV zR)^^^|A|@U2i02xhYztK^~Ga7hcnR`P8*SRjAvAR_r{8X(Y9avC*APqlW8%u%KdhS;s9rYo`do!J?ct-+igxRpiKUtQ&`vB01{cK zgJD}{E_DFzjz}OQD*@27o5lu!XYf>&#I;<80ha=g>-ltwU~kLZErrFc-A=MR5$|LF zO|ve(%Q+vnT`%jS6N`O~7&jGP)h3zDR|eYC+h>F0YdzqUzyZHJj6&ub-p*@l_<*SB zn&wD+*3yOdjN_bK017dz+thU3hIGPESE zanDO#RiT-{`EU_ktF3r?0`5*yubh?E>`HOSa}iOxb(s_=y6@J0(@?tOm(btziKS4y z;Sah9)cVE*`f2jZbVP<)1hb0ZM(MH-Zm&rlL45&EhZ$!aqlG#-}bAoH^fzkCYIg z>aaz(X`Rs$iq^`D4wt~Pd>PrJunzxkZz0M#>IaHM0GN>2pltZD$*$b#t9y5A`Tjc$ z1#6N-lP|P}QDL%lSn(KR3sLvVfK)02d3&Y!@s6a3egV#i>`S~p>tjNW9v|NW7g9re<*=-5Ji;Zdp1m^ z!eK!`I}vshCtQ!n31HMSZ|^FhV)XUD8Bp{GT7dL!nRcE@?Eeq}@dy8$WKr@p;%6Vh zacQK=HE?U(*tN27&hIj+ExyN4Wa1J`6`=eBL{u)4&kroF?&*k9eFL->jZT=MCqiW* zrE8`zydjDcaTkttC&I;LM8g!!&F%D7nD^z~rqVZnXC;?FPvC5jL%{eHG-KI=MBT14 zQ+pJJjO2CqwNuuXi8r|)xgw+g^@d|$BBMAZJeONFzNCkzGrTm%Bz0{f>? zp60I&VBNPeHQuocPx zb@9F(BSUzU6&GH!X08#&0Sq*s)7@V4D70X9pEjp$UqXx;4Q-t+T??&}K=2U9ObHi{ z8*WcTnl7Hwk3K~_{RDO&UI$^8weksS+>xPt&RXbe)8s`RZ%4Wvu%;bw+N3z^FE6K8 zrltO%i-8i)Q0!U2D=ITy5%Dw~@P-ZnssV&t3LWj~zmrMsy%x$31>E3*;NnEhsS&pA zQKHcEA(M{0Q#{KC;{A)zNAa=}Q~!Vu~mF$CXwyBD)pa};~6)j~N(u8ke0ooXp`*ztJR z_`vdu+C`ah6`@^4RoO&a!F>n)j#0NDu;P;yIO~>7a(D$$(wpx>0OoH85`O72UC>Jh zC#>o(wfSCs{tv$P`V0|VK%&Nmzu1sa?^-bFuTk(g==p~9;l~_1H)b$Q6gqS-lqlvJ zb+9UE0vWQswlRtoNmXKa`BFOe7^JQ10x1{%FhB}#^Mj5Rn2zXF-8UYSyU4knXOh(@ z_(YJ(D%;p^QaiWd<*xnO&op3wsyJbH{DeyK?UpieMG?RN<0MoAJMX!ziNA~oEw7B{ z^4JV`df6#3uAdvVCG$oqmSsN!wkli~2m3KKrvs1r<^{ur4~M7jGt!`=?_i3@eJYGBZtc8#hiV@N?vCM&+J6Kk{u{?Z|Bl<@C%{T=d1a=&Joe7h zgZ9yycRq!to@r}*OgA0VRS_~30+6YQ!jd2n0=B`~MOCE1`^+&aIlK8%Q}`h3#L8t8uKd!p`ptFw*wN~2T zJ!SFOzi~wvK##?66q88|(SEJp{yMNYu?BV}&Mu>;REwCEJneGVbqcdBw8+os-9|G! zwh;v(6gKEJyxmub0<9;0YEra5d*R@e+QSuZTlaFE7e-w`3jBWiU#+ewqE z;*|1s*-6Pa=3j_!&SAf33Vub;U#ztf?yj(%h>~tF({U;4bx!??d>4CMOjy4x8T8>$ z)mr_TlIRL|88aYIQOi$_??e3tz!xxl(l%`0T1HsLUaOQ?HFPmbnGzdjLH@^Aw z@bs}j0P779CJaN51#sBr#2+G^nzj<7ad)sZo(zgSx2?1S+fVT%+Dq4gPL4R-I7Mrz zY;TW9){JgkWn1Fj)epF{la1L|C5Wg!ICWwI6L0q_d*$hz=uA{0xfC%jG!76r`o#z_ zJlx(ysNFuOVLovtK4zY(LcWMYM?pOjrQ>&r{Y?_NIb zyDIv&R-N78Y1?Z1V~$JBA{Y(|VhR~LhBrWk&rLvUn?*X|ti(q)@diDn-0IcFlfJj6 zJkNs$-mCneYt{S)ig%h^Yk-ErN_HB+#2bkHGco!^r?i#=M^d5W;Z+V^YC!5!Yk(Bc z3=(Ln^S=Wcp`io7#LMHzC!+`RiHPxw(8AlMQC-*i@BXE)8oS#O@8 z+c##Qe%~i1F~%ciuchX1)JFdpqk%Y4b10{N&@sB!sNTC46Zrjk=c0lCVOe45v+Hws zbVC^}!gbl%>mqN`d6LZ8;$c_jPkyr3(r<%5DNrcH?m9&)X2PTV37xjL4-+$UBlr~$ z_XoSCGN0R4*cA=^%5PRgkjyIFGKV9cP;r|&lQ4&;4YqQAwc75RamZB%DA|V^M7%kf zKjj)?1`0c}mN47P>G^|Bcal1cA(9d{C5R#0(gg3phq|scilF%XSXTiCbo{vi0sfYj zjK!_(7e5)xRU%oh+;d8yoPurr3V0y3w-ZiW&E*im5v{WEkN)%aVS&28*@O~u{~7-< zxB+|=MsTZw25=E2QrliEPDn}J@O*<_O?_Mlx_V6rdc|q$A>E;aaD)5(#1BAhSLktN z-|e$HkRSMdonc_b_z>Va<>I2nKZ!AMD&$DJ3zBYo&2I2#hGYR^zZjC0{J9}n+20tF zCD_f~|J7gg|J6ME(dE&v&9jw(Ea?u$ehLYurXoF78Rh)b$YKDD{%~;5 zC?w)9Mj`wDillP15fkeH9YXPNfAs8S^4AUAFlnF3;R}%4tNSKh+02N3IL~`JZy8pn zurSa0gFdo(*^_}L^om?23^Dzn`IKNwlX_C#As4AUkmzS{r@H^#bBdzF#FD<0ik7 zfvn_nV3E{K#2FyZ`^^Vp_L19eZGJB0Yi9E!O8pwq&W z^%N{kWi%XW6;ukAzkH}<>g$&BeQu3uJjW z=zf~`!Wa*njp#b5msG5WcFc7@o_wkdx#P|1Hz90Huc`s^+sdMZQ-f60vni~^?kNCw zW&0MB)M2UPpDss9F$vRHf4u$m%le!_zaY?xvUD}}4ZhcRJaO!k$#ZxoMJ5q7+dP5C}w6 zh=>qD=_M*6B?1D{AyE;L8bO-0NRv+JNK5ENdY9gNOQ?aAeO~wUJ??Mjn>pvl`GHw^ zAubl{DffNdS1CZ1n!dRTj&(#?Xa~o$jWDcAGk!mS0oyMO6#!Kt|WP7g@w`{W(^^XU;TnJxv z232T$;&h0fy|wH4dh9M{qZk=MVIj3l*`ra%uEG7sPrQ&UzJo^{demcNDtgkmCFn1z zlse|+h6kErc|){g(=`vnjT0X@m&UAn%mL>uH0dw(+8@7~!0ORwjNRy~jrM}p-;bmi zwkl-3bk}v@K~;}tqI2%eCdFZH8J!Z*b4X)w8P@2>1fq%;f}4VAiipm`2^L@8KhW@t zn^`RbD#6)jVO5>9qh*sIlDe|28L4MlX<+u!hy1ho z?>@`!7&Y1J7Fp7ZEp{6F?pmzrvt1>%o;Krg>1<;=7K=h|*3i-rEBcjT@%>U5NfO<- zq_&9Tv(i#{s_uf?^>9MfJXe-elWQeR+|=b)4_if6EYcsSCE;0>IZ!o-%F|fHZj4+b z{1=TW#Ll8TJQ@#}?+EdK0|zOd#g0g^k^F8G?EuMHa-Dv@jgeN4J!gMv zn`F8C=TEniGuFB9oO>l}l?^%R7F$Ou#B3Tj(!a}^F$@`nRzw+ z&Z0cFPAUXMFf2orS`e_SEt7o%uTke&^$iT)e|2;q2QuAN_n|P%9`kHeIL>Lx6>*YQgE98lzaFJX_(Y?1vnka0z57ao|2MUlUh+2uZQ})A(bxTu< zL$e&L@O$j%6zXc}%AuZ?2 z`G+G=sLn0%Uzh|ZLsX3b_RhZaZZGvqA~hVzIPP`tO%E%~FlK$Y?q0#yX9nw+o;_(h zlgL_pRvi?w%*c%mfII<_E{@~L_q?p&Z&Al9<-U+yH19S}+sYy?o;R22&pUBVLgb13 z+dGr!b#pu&DF5N#iLf~Lv&fHty$r<(&^n#k2@6y8M`!T?e)>M4DxDZ>0JP4dE-G5qej_I5%tnafg^`QM8|T#F8SMtG&)`)`T72>uw#B# z@$zD6kLXAXlR(={o?ciJow<&(vLD&fE3ZtLc^-*8bLokNpJJGzFPa6xw6g1#HyhXd zZ6pG3`%;NTmC%T`oZ4VBP)S|#@X}x{&dEj`J6`X@9NPFwES;Z@GwUD4XPn>M7HWAc z(Utt!Bq>Ag!hsx%;tw+-;08ejVAAV%3^%E-uWBk*@HTvMbs#;UDEazVO;aUqN)aio z&LhpIQX|vtfie0UzUPufalEJdhy0t+^y|$^O)>{?@=-4}S|0(ILFl2Gj9TT|tsMf# zT951nFecq~M6IMc;0fANzN}L~q@eW#=gVdX^XZ3NgBDO0PrI)=sFiF%^Kn_x94`dH za+<%XcEg`XE=Wjn7vX-})e7H)kZQfq#F!1g2aL1?E*pp*%>?DnAAkys} z|5BS_1-hW7`GoH;a@JdFOG|oN&f}u6Ksqn!;{!qn-eOu(0Cs8l+l>`Wd0n!QN7gZ) zRm6^cHSKDTm*-RkoMVZVT-GQY6jyW=I}uz}Luq814Ct)06F-;-Ek|HB1aYLj9b?GH z>lEc_R8H4HjwQJnDUQz^qI?!ecuh4_K%$vSPT zH+1h`1^Q^ZWdspD)6|FsG)3?e=+$gzybjcVBW_eNkK)J;Ma|aB?WmFE^z!BpL&{wjPAtdJBY9wRn$SJq5<)!MajZ1e9&42RKad@R5HU<5qDgYSk@uaG2 zY3aB=2WpxB(Ru2p7ZW9c`qhh_>PO^hYSimqDz&E#Fj?S#PD)NeHCJY6v=?H2TY#P71>tJmmbM`)f6Wf9|Te zf9dZMIKF%;8~#%Q$M)a5OIgRp|Jj%ue#_cO_b(<=8S71DqGd88e{O_Z{(ozPlang3 z)1~!Hl^luDYkd5Aq^rhQQ=i0ewCdWjcOhKv)3eKoS_T0ds`#D#Gcyk1gzZ$Ldws)efokV2JW7YN+RO zc*}U^(wJdq#T8*H0tv+P+6ABKeUrxUG$L#hx*gA>oyNcW%olQ{8qKa2 z5>heN*e{IV0wJU=j|)@}w%KdBs(}C351-LW3(F}Ds^{pr0jvHm$Ktx1EGUHflWk2&!F~S2?SsqKfV9q<3plCHYRSoo*7~|JdH^JaWfg zP^tD_Vo7Q8C6~Na70y5+VyYvQXg^i@9><4><|^Mw`Cu=k{1ETKePbu!My^r=(i7Nz zvNR!^1I@HR{4*Hu$0HS!h?cn$-;O)4zL}T%dnbL}FPvzfd97c5J*rIq8~oFO=8V)y zLy}zWFvfo)?@8}x>t8+wZ*(8`EIgjP0uS~`NOsLymNUvtJR7;ZsUM@SWH|ez4Rr#s zh92H%iZ)f8nhT0|aCi3^31tTL1Ki}7gUtdggxusA6z7uJ$sWH0tqg^mY1~XHbED|n z_YKI*R8ij-Ht%>00%G9sFj_7W-Wa|AJSiG}B@;#+(_IL^S7Z@)8cVFBw1Hq|oXef?A zt3dGY#%8F96t`Cd4hX-aBRwdyl4F z898nHp`*=>{iw2L=e;jOutN;U1waH-!oE6^itL3jNmYszpCxj|mT#$++kvwh;5*O9 zZa-iTC9?rKt=vk0YF0k5F)y{d8{*F$(q!&P!2FVs^v&y{xCQ zHil-V1Jw~T8-A~I^b`|Bzvh?Niy{`XOLE?0%1nb>#u3}g`B>H7kAZS^PlWWC9f0jt zAotUY_Aimy6f&^1Uy;aU(du%!x)Jp&%J>&{cBer5E=4u>>$+1dgs64!&k-akvaZ)H z0P6@QW|`nhquP46BWdkLHO-rA>TK0}>=#`ay8?*P6#c@wuQ_vR`XGC2(%v81~=(GX%`<0HM1%D^nFu}bJc6R3?vmY_$ei#}d7xg&lE%NC6 z*KINJy*E_}hpHX#N1ojfwOGVY(x6e>Z>K@F=+r2cC3VwZ=>p($yxssVr#}JeRQW4L z`ZtE{q5q4|7=Lf2k8MSh0KOOH)7T)@?`7wL2r_wau8G3=D;@3L`U-#`n68{*c&A#84>{CAO1}Jj5|WPM2(Swv3cDg z?)psy>%mTpk4okyqZqQ=Sw@V+nop=PT8mZlnF}81FZnjDC6YyqZ}zqjQMGgTpT6z4 zO|h@hZh^e|%>D*7nFT7atZj!)Di;!0YRg%3{ii3GHLNA;cQ2=a?2Er9C|aLzzLB{G ziY}0N%XNr(fiW0F5|XO-Hg13yV&8+=v&NMsC})RzMPkZ2LoGZ{e9M<2pL;CLu37)G z!3!wo86<&~=1UPKMG=y=CfRHFZ<9W(d>m2E$!OwjvgWMH&U%qo+rgOcH>S=-iV=*$z?>eMj@V`-n>HgVy=<&ZSo1^Ou% zNgTDtF>U`I!}Ss~Bwc+1yvuVh5}j^5f=M77RMwQ1KIgky8{(^<7QOJ|+eJ_oJl}Oy zo3OY0kRakieKVm$dhPv7Nlvx`WBA?4)1@Vj(){J1aE6+XyqGu!iZB&(d*Tex_Gj)M zlw<^AAdN-?>2k+1bK`eo)9E^&Zr+#r0w@L_(&^92Q8*a^msG!cw66!g3FddeGwilk z$8;$dSI4B+|9tN?B;kEonAL|*T(C;ryL@jdP-a%2*_)+E!i){<0tgI~*DZq2G(+kt z`y}E$NF!|bqL6vo1f%jzZo1|-2H!*63^~>4I1k9G4*25~heV1r0rVbumJ|sS*C34f zHYUDsdShM^)U4~&AItmDRrt^iP$!1Md8^525Qrqms>T;!U(aoI1d0?VXT~M=7k@!V zH8DK)`6f@}@1YMtv;kfY8*dkO`KcK=Zi+5R8?mc-R${v~Rs?iTY5Z-z(P;hmm4%!6 z*V0Hqxv&Qb6SSlDlWR`9e$+g$C0WyLOu1|LPIS1G z*Vvnsd1Kdj^xsS5Y^*({ZOdKpKaRQej+-CN**iE3rEpBlpedpgbTaI3V{)24Wl|I? zhUm;AZDlD%c?s@{A}2+Ya%&dFcgA1+U+j*B zand`*IX0ayvikXC4~zf+>e>}l5S&?N;xH$?r1NzAleNayW~7@I>_+`_bLxN$Wn|1~ z^!u2mh5N}{4t8sNp z7I53$I&|XL&J)t4{%XRB1zJb;<^ftA0n#ns&p|?G1TF#AxkahR8e;|&G`d%5Fc>Cu z+jSLAgwjqz{Gc`E;T6R&<~3hOq-BBEz9{XG4~U=6WCvS-Aec`=>z8mRz3%pcb=oF| zR*~(40jU=WubcuhL9TpCIvNH_KxtIZ)N zb8lW%3Cr@*#8v2hD1eF~i~<6}XMTfUc+RC>@3?eQANCS?*~^e*ndv1#bZ+w7aMiHd zkL;KXI{I1EXQ|BN@~ZcHQS-^t266Peap5CtpSDeJWSTb0k5GpmqTHoBGiRL}AsVzR zUXr~?aksmsJl=WBvip&ZljxlX=7~b_=x)ER8A1KoBV|c8INtvCZArVa9Q*8)E3Ldc zdZ0FrW}!Eip`4V!#!Bh;Q>%^FvjxIOD&clS)Iioe>O2v{s^2WlH2nfoXGip|0KhII z>?ble!r9i_mUAsG3w$!E((&xET!2X+lzuJ^`x;`23BnUXMjIyWPj&`hnj>0ECrHkj z@102q1xUO&RM!a7RhN?m5(wzs||%`_T79=4&C zzlX><_gTpgkv&Lh($)-`^b|kj9K5}^K5#IeLmzrNvQqOzTiK93aHzpA0F}UUuiytV zsCaNqeR|@(@+D9KYPAlpLlnpoGO`&CPJVO)GM7`S6LQk3b6BK9=>9|gkrJvaSsdAl zFVU&UYRIfN0D2+C%1D)zusjwYO~SVY_TUYPov)vy1Nr2j(^ zZN6(sjiH?WVW|i?M)Fn#EEP{^K6vny1+Y|fS=JG5;b@Ax&?5h&4H=RYP=r2LZ2jJvMw& z4?`cnsQqQkY>eh+to%jRggmR?`zu{OCIs1y?_?7j35tsgm41|vn8eNg19WOLZ4QdiDY5}&kZASG?pAjNwsbsv9}jaASn;G0rBO0-1@PF{dHf&YW1y{%=5+q6_ea2rz~smM@Y=ghccFiRpX?I^-k`0 zEuKIsP4elt_OO3%3jMdaHjlucQ@OOHe@oxmQ%?B8A#ZC7m;fC=lo5N zgjG+#Zp7%Jwi#c_NQDfr7(V>1{MVQM$xPcsaCYDPE2DpjLPiNt|Apu#Qp4MN6HBsJTHTHI?vo6<}vs%tq-YvFwZ;u?lyJ0R>j}z+kx|nQ& z@W@(wcL)%WtRt5%tg8cRfJVetYlE^#pODpor~Z4b9DJ8J&EyJbTN^aEJ8+`YE1w4OAV4GK z^@iOq2f2@3Pk7vn9)mIfl&A@G*A2QeoaoP1M)vPsdYZD_32Z4Uy768yVa|WO%~5?_ z^SS=IiAdeU%{H-=;@AOL1u*yQ-B?DB)KjuB?eHrlG zM*iFh@NBB17{IXY`ThrrMQHnRl^jy5u~=#@_EM}wQxf_CwI5E)`vaVSbBEm*eDU#= zF?-<%5IAs;`-@N%q#OE}iZDHg=D+yF!Zqc-+k@Ij0nR+K(pSJ4vIa;gaiX4b0S%OA zUdT39idIoI1QwghZ><9y4KF!@@g_?n`^Jw_Gri>LQRXWJIk|;=xBSAcZKmm5=T7LG zI|O)qhDL!0S0#}^NNLRt`)rEEq&6X-O*m@PwoilaEC9U zr4@e3-2qq5oYwMX)JraoDb3i@4*;7w&9o!;8s&73^6H_K?#87DzuN2vI02U#^RVTR zClEh|g+7|-vbI;gkNG156h}>h%J!KX(5wJTG&M>}2L9ef56+fzza_`Wq{)IuR%LVU zxtej8sF*jWBjP}Z0DW6D@r#>=1|cNuitU#h`9~UAO7)%AS2aLRaE*rjZK0^zV19np zJ&|(qHo4P(vMs<_`slQ{&F8%h(b6gGC_sf)1sp@#et!Y|H}C)EKoZzA{0|N!Nq=!5 zX*3UIA6jR07IWJl#^0qK@%o(uA{_VXFAgMsB^XcndT9MjNh!7-Vql>Y5~TP}M+I9C zd8g2r1N2!7)GRE(Rx4RnBdHVFbYo!^(esK=)(W_X(1)f(TgsNgM=qXW67TZc$k?Q& zugzBo=)y*I7syv9TK4uae>Fffb6*D5`yWCT0#alZ3KZ^(IqJN@c}p}3HRm1E)Hi10 z6&kuyDAK(B0N$A>;{QxuGy~g(9od_HG~i{NO1a#dI%fYlZFqwY%v<409S&XLg9_2g zdG;-Lk)+3Rji5jp58;fOY*gtvjRE*%q0ipOxLuJ`nV5~^;!{pBVf9ZUcsxc_mphj_ z**9Ye#ey$A60<8`b>7wzo= zPibFxsCq~jpl#E=pyOvySEWOppJ14?e}f_DvLN_P8o0Tc{@hXWg<6G7PFIhra{ln!_a5Y143ZhOyMqKzo7Yb98vC!e_%F7UXs&#jaU~ z3-rg?dje(v0{hGzk_FJ4)MGe`3GH7<=33=_xm;j-tDJMzh7W{)X*DV1e4K|mWpoxywa^xRu1-{0dN zhPd)wnKfrd%mEIU5>ZVc(oGtZ)x?o&vB5D>=v>N3@q^~xm%D~%_#DA4Z8u!b@_7sb zfJ@B<{IfCmq+c}x%j;I#p6#Vh#Fem8;?|xI$j=q zc6fVs(nZeFSJytp%+%%CIJhClX5^2iTHo7*cf57*6L%03xXP_c_X1vkS!*H2?!rhY z_er!Lp8eXG4y(~lG53aj4%W=wbmmUKtE$+ghKIa2_%#IPasGpl$y`uTssA<8TXp4w zwed$*wE89IipzX+WF!l2DVz#rKY*$MXVucpj*dZCp${zI|4~T^U>A6eSC`{Rzb^e} zx<|}~H$ctofQETSwX+&B+*xOCY?;>sHbCXc2uEGaxBrCfSmot)F=2|%=R3b%Ke$7M zw{K9m{3iPV$9&8b2(55U?E+ZamKy6V>}8AZrCB|FM%(^yS$Fpx-WmX@u)*E5E=*z6 z8=6S_<*0{cgXJEDM@ERQk z<6U@>^=2uhpz7^Oj?PCz7n;b~h~f0yIDt#4%=tO$!`jx^hfd~~!&iDDm@|oC)(6u2 zy14wxUMah4V>h`<%Zoy%BSp`=47;iu;rE0|Fo61$8i13O3iLYlD;>Lxd#MeMM|;)| za;-$-MfA>Q^r~`zP9=uM%s{wB zE|4A*=^v6F&%{eL#^@QYJ{5RwSRKQ-ZxI9xty3m&g1lXr79qKRB9{U#W;)G85~{2CFA_T#}HvpyOuZ3gzbJf4>O5@5uUm za9SrY&;jufrE=3o{qCh7Shvpo|Fp~2Z-W=Xs~2fPON{ji0`H~#uPt_TT_>A}cZN5S zXGrXn!vqzOR=1g!YuMD*v1^3r=<%!M-o?QfaQ4diM06dD#a}-Nw=;yjhGfw=_2CYo z?VY@kh@ruw48G$ki>lsLWC+E9{HT-w3bKRr91+Sd{{WaW^B=x?l4AN{Z_&Nc6F&^y z^%f8cnQ5XOE%}wszvxlbTDCb&Iovsv4k(j8JA)b(ye2%iKF+H(Hw)4P2RF^6SfXE) zSlCYQu3e|0iLmmCeJJqNz^!6gntmc@6P^F zpmXgL*=$9bx372Cm5Qm4DXkB7jvR2eJ+jwJnW}e~K^Nu8cixGbzy933A7zdmVppTa zgB8XFiQH&il21-kyc&rzJ!Y~l+jV8Z3fv47^&dBVf8oWw-+j?{_9hF5sVUyqbqfjq zR7e&=w%~ZEG2nA%lT8_Qzj9g<6Q3R~fYZ6o$aaJmHIESdOt>56&=M z;t5w{so*v{nrxRL{9HGj%5)B&lu3!F7Sni8wbC;cC;{tk+dQHejZFWx@-5v2?22sO z&ZBv}0+B^*A(`)?UVirhWE1s?YQ3#DW1nTdJR6a^KCqWayPl&@tpZU+1h8a55($yO zRz^@=EvxK;dkH2O4LrHil-ty-{3;!bT_O5S?xBTA4n%ql;@KyX^!su)kbq$`Pzc%W zh0;U))jpjDusWDcI7IbWcG}tSpf~InF%*$Vs;a5X`8yzQq^A%K^a1@SQUa)O4fG>mocQOTHaJ2TrWv3*m8xQ{Kuo~sOuH{Q@HxBzs)x+vFm)w5tiQ6P*Mq2rJ;@}ijt6S5%amB_utYhs z5D7Z%UVWM->1BjE2~OZAtc3T3=1U~|ynt@1e0N>;WG1r8Y2pT?CVRGXcXx-W#AMi=E zL5lRR6=@2bR2u7LP~``5J_}yADER`cR|Ue6PlUZX;*zXE?lItM!Ea)tl-KVl-8^SBMQLI&<|#9EB`WC{K{{@fn^UkeOmPG z0aBE!C(XCIk}vP_LC0C-E_`2XZDnG82dJchQS0&l!f${64>;x8hj}J~iVHH0mN%CQ z+B`1oK*n@`rCVV>@T*NYl}E_&8(M4nV>ZBEyghE5Y^RHI)TmWC3(qL!) z?wv5|9(CeT`xU-28jv5pVyb&ePcorRFe&>Dz&ZQNRd&c{8qKkU;~-FhOF_nU2$5Fb z1Igm3%>=;ic3**6{TXaPrFuFEUvC7YjOmRzrQhu-P4cPzysLW%nupCq0S;vT@L?ms zcDU%1lWkh4%+iB8Eocj<`xGGTFM>K_qbV$tYx+$z$zHHf@+CWG?%SUEp*(D6YQNH1 zoZ;GN-2;U>gjh=GoxgrpA-%$*1-P9i+-D%n&OpWU3^ZI? z1LxXlg%r9;HTI4URka1aA8a3C=*xt`&Z(bJ6C!C`*ns!sc+r!e)7pmh(Rd4SG_#vp zr%T!-lUiCTowP8<;?{07X)eYsFpjLFq;?X=(ON>XRWsQb7eA@`cChC^$G z@rBLv^0WJS;UX1ZQFkeR3Jp+B`6m~%#H+yAm&lyZ#};n`YWrMWv}i7cv%5ATka+EgIlg`EeXgA z2r>n!zj4b;0d9HA-&+Fd&olv(LV|q*gsIXy2{|%x_~Ys@&ewry@QJ@xlQy}iEL5L6 zQqVbQ=-3&JPh3H;$js^?QxK)jQ=}9v7T6rweGu4 zf+ZJoO41l}Y)<8%(;%>Orj>qrC+1o<0!S@|bxPB?k*9a9pht4{>&G~*?^iic!%n{1 zqoJlUNCXXVN9ANFiBI(s>Y1m`O&L>fUtBtILhwf!I9bN8)~CekFn=ahEj-hEJgtjR z0+nb2!I(?z;ljnJcjGRQYk-o}6xOIL(ht_p#((n6GHG?XxXPm^`Fb(=I(;6U{>lX9 ztF8rM{KBzqV`qzQzrD2E1&Bd=@$M+qmUtrmJe=sC!-G2_lW;~WpzihJO?DHyp&5hpM{s5vF4xM`xByDg4nwq~ zI#%j+!Js~e#JyegJaFD1{g%zZM2Q#fw%|;kqRjQEJ5b74RFb9sZ_*axrsZJZPug#K zanr6M_{qgV96ymiNFgCAJ)Y9YW`Bgb>n zK2U_;{-?j)IqJpIm}8vH?) zCHSUF$ggx?t#l!*{dmb4%%a!#JXdl}hFx?GWS(F!BowELQ<|4O z`)0!=G$??wLJh=E#wy6Ih7v&!NuN+N_9RVb#VVz2$y?T#M_fsUobw+}IyqF03f@PW z{4(aMztsNQ0PoNiWDP-&P6E(GbvBJ1J#yJcj4H_Q42HcZn5;nNb&+^Bxn&6l0401RSscys;{vflyUuKS+D zx3Ob?W}W9>D(`t?(&D=^9fi^mrs-gPO+pX_a#7hUN5`JU%_dc{ z2~SmCvVQ0GT&FjA8RJZ~aRWo^qM!F3BYclYgZ3-09Q096?T1!m(UW}OF9443XY&dJ zFlc5ZzLfqb!u#yBR;_&WwmlT$Nh+JGzznnR+QK=C0f7!^B-1ACs_D>qPCL&}=lBof zvbqnb8&&(g*RGCyBWMNl<>%|E9ln}(UC^glaEFtU5l}mCA{mVU`oq$yzG)240Qm zqNE+;+^mDemh_~6gOlUTz?Lh5JmB9Q^wf1;TK=4Z{=A!w|x?L z=(exIR>ym54LLMXateOf^S1r=|L)JaPjuQ|H+kBe*rJ##+_IGd3pJEGS^l5HmGp%}Hoepn)! zSg_0&y}!yEE?SU}HBQQ_pPJH(%h0sy|Fk&Ao#VwO7Aegtt`!&laZ78F<|O(CVK`e- zrWRljot%8^YC#-;M5wI5(;)>>_4XtUucJUz5H9i3nnqphV{UL&7AnJ@#K!DNhb!LN zTZsZ7hzXzA#>5*KQf-sxLUvGt7q_>rf3VDu42hV6FyhX_2)#`)DE~v|&tRc$ zKBTo2K?wgh!ZOfBP8Nl4uD;B8IHU;CLhnF&k8MVx+Vn;fxA4jGzpR%# z{PD;~hr{+1Gw)$)f}CjC1R!4&BW{pHEdfuFDPC&K%Q5xdr#K;eRUfa?9X8321aUf{ zm4qLb2{jrNgKMx?dI@~~M5*`12)9q(k4yC9(UVJ{1^8nM7Xd>N#4}NNr;eD00qMVS zCqghdgQ#{$;JQrRMTog;g@v<{EZ{764(Jd5Y-KF|i74lvhOHmTmu% zd)(10f0X_w7P173-zk=ve>daZ>-=f+Y1Ucx&d*(1sjd#mTSVANixLq2`horGy&t&^ zhra7WRiCbS%tuU(DVXXTelQFH)& z$Ym*mbDyV>Lg5 z7;<5TdwOo?9_PMUlyPf|J5qx3(ci7WEyG)%5viYg@Y^FGE)-krxp=Mp>XEYYvPYqf zJ`!!-kWS76R)XIYYg8TiN`{TGo%2X|eHfb_r;<4uXmbGf6Ri^DNs3S}QotavwJOSE ztajSE`dcGAr7#fudfB5iuUeo5?ppC2Y2jYof6$2D`ArMG_BSo`_MiIG!2i4bDV=^a z#f0RCAPm;HdaQ6Fh9FLs+%)-7{uK0jdaGPsla6@LT;z2j_b-12Cr+`c3vd5|zd)0x zz&=jHgPtaIckar@ma_;iayxC^g=W9C9J(2jR5cFzmLOI>PtwBzjNf*m^2XdHUj`9@ObZ}e~4!hE-Sk`()IVEmzsdLqV@PXu1S3pr8NE}H=_!qPr zIPoVq;lH=tfUNrpZSasvCmCzqX%eK}iF;qwXpOzQqc$gwy`H152Lyi}I4!p;uqti3(f9p`tRjZ~{*+r9#n}MMlu}9h6jOS9lNs zLFAo!{43o%fYkcN2LWOR40LY+N%G#oX+E0`yrUH|(e$)WChjH4)Tm>K-Fdt;eliUp zf?fi<6ZA%=OW$=z)x%lm$yQ+!{+e%nRU4UV+U{jsmfzO}@x@R|8D!d)TCJ^V&}GA5 zx#r9!w-p*<9dgeK8xIu0@L=Hj0?`=U%%^2{6v7GKu@6+MH+NX<$2u-6FFJx7K%>ss zB=N`>4t7$BBU(vu50eW4sz?fOas690=I*p<}B}uLvJ! zO}4+;h6f$yBxB&>$nTIsy9n|3j0|9rx@x~E9c;gFy<&8&N za!Z(Q9IK4SCfxfw-uV2#=8azgYfSS?)tZ6&ykI%4vj)~L)SaXcgl!BaT}98(mrdj2 z@$Yfh`;~>=CJczUJXDV};$74;Mv1xKW5VB#nYqlH< zyAwXo1S7YB8-Ht>JjHg*zyAWeOG*%I zC@tsKcQ-M9~?NY4Y|&Rz`ZJf_CSJ=11_PbMAW^a!NYQ_5DT zfG%pouP1W6xvn_s%t_`n0oAel9mv&o(pjO{w>fD3Nh88%i9Qk-4?_c#hdPV=p3J!Z ziaVDO|~gpeN4tvie`Nwm(aJ{CA!m z`n8`*`KJ|J37^qlVytAhRx$R4M&hq2cX7{i(3;ht`xoeUT>>asByC`~dQqd0 zg3AOeGl{^51R%(6xRfVOyXEm$0zJ%LsKBxUY~xeGOjYSFuHvot(9NxwS|NKs(5zAs zsp`+p0G~X&*xmd#>*5aMsY7Y1?$E}GARH^{5TRF(ls~ie;+o{CkwA1o!-8h)rZ?xclvm!X|tNU5sVj zs=wox8V(FMhPuBPh>YrjDJ)z`$?Ui?D8}ZN!}E_03)o;URkAaMnb5SxetNYoqXama zj;0I_7U9hk3!95;G0&F;KQnRzV)BnNJ?v;GPcs_1LBYgHXHw1hbID=2P97KpU zZRjhKl_=(wB)OTd&W47V+kLV%88<7}{lIsUE9$l8c6g(Lhp=NH;6gs~5)}exv#$i9 zN_e-awD^~Gv7SYY<^p715ffJ(hvvg51%a~!TK<$Ka5UDL36>y~H12&`TR)4;5Vkz* z!LbNE?06l!l)y60?B4i`!E>#`fTGGlbri;O89gr`(=0Yy_SV6`^sK1czH&aT)W{W* zWX!vwr1fn=r+3~s&^Ud-AoW(6fE;f`c*tj;4?wC5g4Qr0aQGR-2PgJ*-=NU?r2L|m zVUE9YWf3X>P!Q=asUq#;Eo3em_-kWY_Dnkm9r>0MAgp|;_c7%-uY|#6D>?VuTSR0wC5LI{i0rhunmpAL4Otqih zOB+)T+ANHVVnruP$cllxQ6ApCi=DNm=4$<3kc?5Q;j!#U{gP}QgH+S(6i*L<&ml~+ z4raxKv>D6y%DkhJK_kVitRq*A3Y`QziiD}Xqu4P!5*XbB379xid65X$+$_FBYstKs zVmxs>xgGsgkI(+z&V!9N>@xf2#M4tz0kktKqIZarC4FAem&{wP_HQ!^_8IL7qqia@;2=->X-iX^uTWlcG^lnrblRi zqUchpxB246)QsKYO22^zD80D``vFK87$$2_6JDMol)kOC!E?yevpjyr7$X^ZtH?yl z=VK=abO6A=pUNOnfE6CP49@-+1L1w0i zby3ubzmR95nsU^O?fqlI=NDkbczO;l&`Kln!F7;?!z*wEz0HCEe1-2y!-PrQgN%T? zaVe|0+aFEMy-|}(F36^u{p~Af?z*)#g5x^6HAjYJD5V2qwArCGNPg6+Po|qJ4F)WB z0qz~39VH^tdlvW6Y3?%;>oDUq#$%|t7)oHrGF1df{&<@0%_h#N(7S-402IPg9gqI$ zVEoUndirlPX^LzSDT}c6kPu#@w=~#M7>b{Z=(`a;qV#&sm>MZV((pES{bDw1UH(?n z-q~8HLBj5G%efQTDK3IRJhZ^9&57ObBz3CqG?Vg$#k}bzqsOcS?Rr-myo4YB;Y@h| zb5#cyBb}P|rHWGW!v}zQr|Uiv2i?JTyVi^NHp0%ds9ATO z!p7a7`{sYOMEt@g9ptvs1Y!4)GZ!L97O=A};C$~y-Y@NhG(CP2-9F|sTZ zJw8O@`lfqtlQU;6tlMu&sV;O~e8;7nXry+06@L|&AaOSSd5q+we4Z$S^~-UU6)yXL zTnF#$DZkRKWrIa3zH<9#3oNr^S(zk^ZgqP@f6eSJ{lSGqeO;n**6Y~7ET5KD#%*o0 z{hwP|4eed-+Bk3d)y=-nB|)HQbEZD(#G!fhA3@$X6qy&7jxw`cspbnqEC5*y{1V$( zxjO_0`ecKb;tV@40w>$2xtlN7{An{$>zqH#LI9Vt zi4t3|-(gwP+cdqU6AyTaD<-J3&5JIh?(RU|!I@+x-s`it?B{of=8Lql!TI$Bk;EM( zMx-0IQ5w9#8?SHh{2E+*SbXavEsO$dk>;D>j?*)E4*u<#{lT5-%CnPWo=rUE);z3b z5go_E1cdw{m22OTX`c@dE~4tN?>yM+a^zNbS#|@LUXfLMgTQ`WDCa|q$;6iVau^E< zc9UQiA%OmD%;I)3Qprqhy<^M}*y#q398CF|=kk z8CJG>!)o7XG~O@i?EZ4-nbIi_jOx#*!rq*ygSujG)j#mc8o86)bl2dM>C!2IKk(bi z%?uFd>U?%wTMwWK>-Ge$dBjP_e$K2!S%H0NxR!7FRc|y$$T$u zNaq?Gw;oiL_7;(hrk9sh11@w~#E-7&(w7NOJ{&H|N=L-R59DXI4W5n)yH?OVX83&; zWB<5|jlug@Ix-)Hs~W^bQD+o=>G9A`Idks}r5`d0C4a$dW$A@zzjlno0;>TxLu_s= zagM%_bDH*%lj^;CkTz*qn+1EDaJZzaS-<~MzC-%G%8huxF-CGII>OuidW+0zxHkOF z6XFA`j;@VjOGV+{=y|SF7uAAAtQrg%wEM5MLyUVbZQB|hF3-om({P`12oJmX=jd^p z9nOxIpa6WnT3R}?6<(Z%#y^!h*)anhe1>ReQ-`rjSkG5iZ;v@xy_iL{-g(1@*v-TX z;V07}tTH*w>{e=+iGD9Jdz@%b+y z?^n<)9%=o0BUX|3YzY{*B^1DTuuZ!lkwDCYQ#ny6Zw5^f9ZgDA9>|k_rrzv{U}P2AzB34^U%!bh%WcF9L+r4@LiGv4ep9lq zmDdzi94>usc8lB&Vj?Y{eb7mMuEMQt!W}7dKV@MsEn^1^stao)cGY41+?BhHsbncx zuUj*)(+B}D(JW(z9|@EFl};}{?L$al0_SNZ&QlE9-|576glI?e3D+xQ{F-c$6LYYu zb!y`ZA2&xwrMi$ycXmFwVB78A;#r;XffL8K;eNw(=VtWcgj$2Lk4v>nw6F2}GESKRY21l1C%>bz;$E^8or4df=!$Bs3sy};tmvtzReA44GOu0$ol0O&cQF%5*k8* z+$hWv4(Bikc?ut@07%cUhAKq0i=2Rz+zU8R-9394yQVjCPlpmFwxJW?6mqAfrJc@A zio&T#yJtBqN`2Xl-(Imcg8{$hbAVZNr|O5c3~08yxr$dngohjZSAZgErx&Qsdvj9V zAry{6n&!DZTBA4gSGwCqU5qOXoRn!)jj9)FriziqOg~9wzezlwnr2;4H#NOmGWhWp zy?fMi(3NBcML$NNgdHk>*CACe4WOIn9+2^--<2Xo0G55dOM?RhgEP63saDT!4hI*+ z=f1t%EX*h1$GO!+;pnZWi4ZMUTc%B+4GP8M`L3YAjVFd!%dlzHAnV;@$FKRmzi;qC zIw;H&_3X?^w2$woF?AORL`BR$AqqCE_ zg!_~)z<1-$N;GZlS7QLr_4@xwT3HlcB^G(ydLCHm&n15E48fVNmN*1nCu zQ$#Q+7$4AED!fi%BHp{wQrb?_vGsU#0e7GqAL$w*EvNe2iy{Bvc%o_e~QmIrpKKjxF7lJ!s zY_yUeM0h|f$ahjUny|5SRCZ^>sp?3&afa#`ao;^W8vy>2ll1U*0k~9uB9KWl)b&LleR%sSB42K7A1e9Yba%q z%z|26skif3u=ZH!OwEUt9_d!iyI<9{??eFe2fu{b_NU+qS_Y#bwI$N}7k)t8C3P`>7EJ)B0!Ef{hc*i&iEpA6-J-yqjwFW_L}fvq=u1C(fSR z1gqd(vA@=^=d&=?9?Ew@PfNF7Sf!)6pxexsLbJY3cTJcs;U>v!@2D95oKgyz%LQ>( zGSiZU>uYi7z#`q}6_E>i`rv_{kKxOst6@@VWv&ZqvXab60TPP7%d_N5bc86P>l3OH zQ49b454iQSP}o8h=gmjMcfX{?TNf7XS8GYL=t304dVq4wiKN~DzHsb5AsSC2y0^}< zc&VGUrOV@ll`S6R;|!#NlikYQbZUaG1UhJZ@Q7BT^SX*I?I2~Y-(vjJx&xXzTaDZT z6#9R-#fWx7=`D7~E5ZZDE=R*LO0QzC(s(7XU~1#g-L#a;S@vc|v2=hVozH0=g1qEL z6*MyTYV4-hw3h*f`p4qxga}_7qnea!`e6AQCYMZiY+PAaTUHynH((kB<9`-=N5q`} zfyswhjnmIqpg;kYL%UR=(TIx|Hy)G{Q}3F@o?H%GiH&;smQ+H%eZH?YhTPGbJJoeu2}(kJLG~p4U3zWf<7k$av-Ndkqe^b$_rYT+ zQxs~50D{TQ%%eHhS0f z?iu`5Gxzg{h>MhWX!{Z#@=JUV>_gI%rB0~fr(MYwePQ;QD!y_HwGuhk`!_g*wX<1}%>bG*GrvG&MbM4Cr~DYRJeD}Z4{~o(eig7?!)R^Y(#Na3r}Ps3x!K-tr7tT zBblSjSBYLnr%$|ci8`HBQJm6OO7?ugYGr)sis($_L)DuCT!_rlvrY724O*S{`zOVA zoMJ_a#KoiCwZnagQg_V}X8>x<<6Nys4w?-?p$IfNY%a6!%e0}emTlmxdliEm`D-ar zPyPTn7h3N~Dl3poJawofhh_MVxzLjz%@;Mg1rG!)GcYRvOwI(&!s#6hT?Yw9%}iIb7Z-Q{b1Hcf>tK1UvON_VP)Zc zaG@V*EwEV!2H2NDuPvuwT|h^~K~>mbxL>#OrL?oFcr(Jj?z02+3_GI0tPO=TUMF-a_@q~WWUCqiclX(p7ro9Tbv#A6~;};EPwE(DnJvk$^^{e{(Hf||3Zo| zBRcl|A2C}~o@o*~IGaR~{v~`z;HMqo%Q_>XUk*UyjXw9%FLAohP|72_{Yw0`Kf!2m1Jq#@e%c_KTZ*4kcsg z-SoqKj?BCbp2nq=(7cyNX7Zma66BskP`1B3*^faiu&{fnFI!3by9Bdia>YKFQ*|-_ zx^ZssQTA-cZyHE%-@&W^+y>7a-O$VX+H+f~W>3gHMFR+J00G?MI$8BNi@lG6DI3oK ztzUMMDMNTbxq<)<&1>C*;@#%i^J>P6q!JnJ>IuiNOVfIZA?n)52=s&%%Ibp44mu3* z7Lvi8DArUNRBeR3JbE2iV+t^(bqgiEwfWib^7i36h1mA z`Zqowg?s`mE(*vMYC7|ATO0k$E~tx~j`Nw{je8ChWY?*4WCKH}&E%xzL<@gWmL8Lw z*rPkNCXZgy&;^{)*Fqe}zue)J&fdZe9r{BHC`u5oo*PKUi-v4j|a9C7@Fqq>b{&Eils4DJSpc0L#xRn1C z{550-NYGUOla2u#iKf_Mn=<|je?Zs&mOtP%wto+LI5?kFrF~gXwiqb7IcYx8IovDc zX<~ymUZWSrN$fc+%(gdbE+8Y9$FEMG4{3T@7Jxc@xthnXj5dX6{A4&pVKoaXq&W878z8<=qi0h5SATQ8j^@Bc%l%l1haxj zYlW-wp_!fFug47z4xoSf)$FZ~0;;=Pe8_eX-5T;z19LHvuD#T!&LLcKrR+148>4*U zRP9>#U3>M9bkGm8IDG>?7c>+66`r1;LJXAdH)u1f=B#tuVQ+sSRQGtFd_k>tkuJZy z648!X1d-W<+I+j3W#S+g&@XoBCQ;MYns$X(;2GwZjV`MErpZk;kUy&)B;#TIOSvyw z>u_>(U*SymqP2e-g`1?JWebD0r$D0N%Gt2$Hq5C_z40R3hRQ@=HH9znym+Gc&6x7Tf=%x5)ozFaJ(pP5zsg|6h6w^8bt8;xp!7 z>Mgo(@D4_CV6*<_ui*AR zqG)M-Uwg`d?)7{a(|NC_c{z!X0c_*!t0(#Qq<-erP4;`MzXphXLPLM+BJ-=Ot5RG) zpTvHZx5Q$B+eG5sxA-qpeeF0JI^mzchFvEF_(|I2XO#dMer4AxVyv=&GLC(wlZIh} zjI&H%0V%u@W$c`X24c+2lmQ?9nR_EsR;s$fv3d&ipUdUc5_EeMcV;V)oh~IY5EeJD zZ`>Hok$u;k+Y`J`jQc|T)N)tUo3Rbs>{vt>x}1#Wr;2G_C}IRLI`T>%>a3MXBfVZQ z7xtY4hx5ICr-1v2E$@1osU6$Mx$&E(IP8#<(f-2?Z?SY`-mId2@MeV>%0NZo07?C0 z7e`OM2s#DnK(b7}z%*E1q9j_u$4}feMSl+5dI!+nS-%3#TcfYE*jR)w3dY`OHZkEc zF>I!}Kyxaq1p9jGv}}dYZC;s!h~vfzrZavMyN0qs^ zNsA@z#O#96%&J4en@M>zsQV*&e694y$K{x&2^Ci(!yWjdi&VgI?J18 zfbI^{vsGy>_QTU=IYfY9%Z;NCIfU@QdJV&Lhdf~dD;pbc{@{D{1&|+R9nidfKy&(? z#IY?=hogW5j73{?GV#le!KIFvo1?zl*e0&Ybsn80LEZ2<&c|Pa7h`t2_JYGRgE?DgCP|*PQJn7 zfmq9zSq&G~(J<48+)202G7?aXkhv3ArAKQ5oNW_ri-FS z#6d3hqv{~Y9MB~(knKqNQ`BA$n4jrTgQk1F^UYfg^`sl+erc-x-9`Wbe8!Lkj+V3M z-`qfkqswQT2cw>96BFmudD|7bW!uV-c$^(9cITE4$3M9&SQdN)R)Ccnr<8HahY6BZW!1ZH z&NpA&>MN=UTctaiBq#vUt>CNFUex)9SBttSo0g*$AE{$%D1>)N3X4Mew%9Ge{Ysw( z0UQTgfUa}-N|OSx90>ru_;5wZ+WQ5-;dH0r4_s(gs+ z0dxREW_=h4z`z{%k7}&U;lF7PiX={S@W`;D2J3lsmsal@61dXd!~Uuduk=&L)B|T4 z>K(MLJGQ!9kc?J?XWp&e-5wyw*Zu^}R@ zabsI$*~_CpfA9=1$d|P$(KLVJP~vYgo4+jIEWs&Z=11Gz5%_ZCnX|g7{aUni^4|_c zV?&#wM--tKh;Iq^8}Q1(Wa%cRVZ~WSn++wbQQLX+L}o)}g8g|u8tuZCqwoAZgDPK` zR{?j|bBDNsJNVUY1^-1UU*-zY!0UN3+icNanrh-*7v9hpcVkVVIt*=zB^%GtX(;05 zIS?)dZgykI>BNj0la9^+z6Edg@>an&SiM!#B6z(R4wVS}BwaZ-E*yt;l8TQeuO+iyNm-KUIw%z5}_F7er3gTOL z?(elh%AbXt`8anWCjgSfOq4VWJU}r}$2qXnM26NGsh^USjExQMb&=-VjPa9l8X)xn z11roS813mVaSUMDK`C(epG_)=G2emgdA)DD2L_DhlWR)J4{H)s1D%KUu7|gYn+*b= zvXQ2ED8I`I5n2cSKvVJeNS4>s>p1tYCPAX55zalbNNXiI|IX7Cg_f2jc>r?|l>fdY zdlsEG8wWZ~*1}12C4*VVHPV0F@6VlaZMC*!ZS$pv)@@ESKFWg(o4`sAsR|Zpepwsedm0st1 z+y1a=0Y6YMbpSv=IED%Z@J&n?zup`=Drx2EkpO5kvg@0rjHn~2T>B=Fq8_))l(%t? zQZ;lKqtMP{Qe2%emAxEt;3BD@{kk|-{@iaG75>fRL;PTnnl5g&!y@c0+|u#Ip}LYL zU$~DczsV3fg3mTF?4bwL0PzD|VM$gPV{HM;)QGlXqfx@K9t7?w3jR0E{m&HqFY+R> z8qv|wpvq^j7}L5b0X3+oFM;&K{9 zJc;zcsb-o8Oghh-lOEni&R+sn%>}ItS=%%1hNW`(ozs$wYM=LC9!n)o*S}}6&fQuk z8{qbDUNbUXP3lB#Rg#p5yMzK_81MpqBvWm-LTPjRpPuHA3NN*Xc}m*8*e;J#iB#z8 z{{#}(O(uMyIZIDtY!`dO?yl{v_%96knLykr&4mEt&3kO@iuPzzEsNs&KxaACy#hCz z+dU6k)O~*kuOIA9i0i%pe&&`M-w7}!?kXOW@HzxtJZD(LHgx_Vu4sKTNvR~z2iTw- zxXg7%&i}f$3C(6R>&8P)+aFgzu(nPI3B!7;V?L@V(e^3MfPOpb2%}zBQUY|P;S*6# zjTPHRz{ph?wRc$avD@3WoEFd!NZdfs5(gKNVI5Kjn-qq`1c_se8gBnakGR@{^R*iO-*%FZGSS7GwdBJzqJQern1JZH zsV_~V-g*O&^v&8s&vBRpsObPTZ0qMUjLG#a((Qod+82`ln?|N)x0`I} zJcro9Uy??*Fv-ZrR_Zwd&U(FvmL(rg#4;L%e*U)+ z{ePMU@T?_d>RbuaIV~$B_;!>98U?rnv_ya}5$9Ki7lhjChJ3ajdxo#dTfX1qhREcz zjJ}0d9Il`_ns}T*9BM57^ZaHk4VitFC~l=AG3I1J!YY$$o=_uvmhG7ff7q+IE`B>I zRN-*;EL<0Ler2e@kh+kaY3Spbupdix-pnKRt?#5#XA+j-159jizcHh%a-$MD0N2|^^ zt2^b={z|R);!MGn$6fSKbD4C;Hl)~RGl6Z{dT!Z-sRSDvZe*((-?nHa)of5a6`I^%q>A z%oaN@bUg$qQ$CwJ{xtc)d?-n8ntvgUYdEXxez>i%TJuh=4B&!=+%X(YfSDK`n?A2E zPEv3_X_ydAFj&inyenZCC+i>!?{*IiVON*KLCLWpjRu%p=lKP&SlhFT1jM2F3$)n; z0-4;4r@v&>q9t0G-h1dDe_(Rd5yuD<7&_Sfqf;?PH&monfPVaj%I}bEK7^bUI23^Au zR<ko^WuT3sjZAgcOC#@f1M8_B0B}omA{72iKjBI_c>@22COrRJyve_*;F$k4{3-OG zZCd`4yImNyoZLjnkN?PXDd4KLdWqDxEijgi+o?2)0MpD z6Ql&%_z<`F>Ihxe;HBnE-&GO=w4CP;-ex+MNv2;xYc9*2w$z60fA;-N^HL!9eRB)B zt)?b@$49z2%PHqmRDG>vA7CCUnxMOsLbBoP_RoBr(mQP_O+|#QPT96M^{9!^@2bzb z%RsyD+NP{HZf`F-0XfuH1c5&T0A|2QP~vcv;7sJ?Y%O8tcd?GJY(J^68&m_8-|@M4 z?Qno8*z3iG+d*(7;Q<9TX`tsGnXXXP(H@1?t9s|bQ>~XwPXrD_rmD=Tc~<@u8yUV= zI+gBc!Vnuol=tM2*ZzTO4Qq)12UF#XjjOogUUb3wG7&(15sv@J9%o;KRDfEglov}{ z2HD^;qi-@@IHINOU$t|_gXFPLE6M!q(y`a7m~ulC?&~9W1{icZwg!I;5=FW?ruB?* z%;6A+67HR9yl9(Wt<5zvm5u5S0vgRlno zV!qy=)ne186O_V%{#H25y+ z2x4rMu2mYT>g{j5v>$Pnj;@YHNe2Th=qrKn|4A4VcrDsbMUGgr@B_bH=FO_oS*N-W zmn^L&f7E9tasQp`>4ImTk1Z`gwg7Of^X6=@jZ|HH(VD^Z?gA2Vn4#zb9s4oHZ=2mV9D z8fMX>{0CVExqstTg@|yzF)_3H7(SSv)S^&4vqv&*DN!g zrMMMdy(6aH#}cac$<>lY_s`hV9mNxm$C!-uo(^DC$x?9U1^`-{^M)XtfWwu5R#7cc zX5Ixg`+!6L>D|F=+-*9p@zXUvkO}Nf1%{I@LIiOl7k&xHhV8y4U5l(cFF##7tE3Vi z29*}{R?cf(;$SCU%Xjt-t<1A_KB;V*wwd{{uYWyEMmT}Lug_msrR38BQ*Q#emDvoP zOJ*fN8EPP#?8u5z#;LbQW zcX6!qCHvqLizl~lE5+Rj)moA8=JG2Ph!OkIMFk`ZosdVH>opQr2-m5VCVs<{J;)Yv zTqM3LuBkqP`lGe?$aA&^FK=Ea_WS`hq}MU z2cBkmr?GJ^*YFnTEu=D+dC|VD_aZ0%0C2 z%1}xg5}+kl4dmK6;6F(pq@A6y-tbC#d+&SI3p?a5!2MY$@LkDL=~7HHE3^t(x(ld! zNd!%t8dfwc>tBqR1g6ECh0riDuWLurQ2k0Eb7{~@olX!P zadlB^9QlxE3y?*WJ65Zd>A^rZHnb#0|s@ z1gCH9w+M+v&-+malL;q~7~u)&Ob4~W7s=in^LLb$M#Qf02C$tS7=22&V>$(0Nt5Y9 z;Vj(gEZXKH4)`_Ap_>wQ9U5N@g7)^bw(?yRfiDovqy0$uBpK zj_N`_5K=-a6PNR6%XOtHhNdeF%YwEFf;N7zRq+O%d?hii|E8%dPQ|TShY?zQQ0?3K zrQa3wZB_l>c84=5rT23QxKX)w$Z}BEp<5upoTwT}X1%$)=pJ>IIFGw@a-Oq`l`V7Yb7+P`zQH~hHS;?5%)MbCp4&XCF(zdJd)S2exAKw#<^efPpF}zkD zF#F=TErEU}aaP?Ib6^X2B0+#LBxEjiw!)zc9XXETfw)cEXIBSlIbF|l%dBl|h`nMi z)l+$OW_+}?=Wgm7MzP1eZlD?}_@|qqOlXr3t}AT&!=P7|jXCyKs-5+&+fWD3QvSCd znr8v(A>P*?(gS0_OVxnkE`XaU6H^yBzTz`mXK}HUn@PTKQ&+;K1X^(};rY9#!#VsM#jzlD9_;ucuQR?iIkJV)rm9gF= zRV&80_{CRoGNRlMHMlAFDFtwrg}4Z4d00D?7AD?H(y_*Px};i4xXa&wPh?q%AHSE^ zu04ECH`5f95de`y&m}?2<2v0Zy5^5!4j|D~0l?PE_sDEaYDdbBWSu?x>5@iis#n&C z_N*jIr zWNW$-9^JCbLA0E2s#FexoA}#`4ex$Q`~q`lT@5>m3e|e^Fj_~;u4VleC<4NZoJ&IT zfiIEoLpUK%tzX4oNnEWMX~If+NvUU@zAkHZua^z??%sl#_OCax?~_zGXMw}+Tp610 z;26ONBblaZ;wT%be!WpqX;)gWMud)=L+=}ViOVN&iI;t!8+X_$2Y^N{VjM|TaBSHzbcxan@kPS~A^%wrk zK(yj{%ZC%C#XGwtA|a=`lCJScs@hql%Xh8VK{4H2G`?ngKv^jP9x`!cHci4f;Diu@ zulg{KVEI|e>q1(!UK!vzz=aOp5Bc>}$2D(-Hts`1@UcRNcAO@TFy+=e64HBwe%JcS@JJD4i;6mXIKx;>0{hi?s2W!bx3we>)*fwM%*C zLh%>49}8%lMd#lm$`Cpz9~OwEIlXws<~Kjm<-W%q{rtwDDE9DW7&TRp6hqcrg0z|) zqL^FmyM0-6yl zXubI0wa|!+y_?i8oM3z$xg65mwFCM*8f0k0x*=?eohTLIw-79VK*R7TncrJRplFejlz_-hL~&SM^4N;hB44 zcWcRL0^21BADD}*LIkF7XT*p#U?sSHq$}?{b675{qEU)J(d~A%?47pLJA(fCb5BYI ztYFQ1wXe9sID5`N`%P0&?tA_HQFfUWnT4p1=cO{R;DKZwP{nl_d)(yx#LOYjCw*6H z@6J;0TmKOt3lL&bY5EqQ&YcpXbW~JSey}0E|s?v%T%X}EYBiNe%t(k$QnJz&5 zyo}z6r@9hGd#LDX>G#qI!RjD3mJT+~BGMkP zcwpL>@q~qiAB^_wOFcT?KySD7B4^KZ6-yl2L=pu=aFY^!B`R$RnIyv@^7!Ct$tN6K zXMVu^lZ{81@GLeqZlwyTh*ZE?+?^5qD-}A_NLGViL!fdJrn8_E17{X=zu=rO*?QqV z6TF)jBUfc6I!fced6?P-qHhv_YnmZ1Ik7ZOGBJKbdc9PKz#6Jl&rYcqEJFAZozyEM z4ZVXj8`lbM%WG8*T&|(}2|1|-ui7KHQ;8;@m&*x6ZjbD7P9st^DHX!fPnPQ6TPM9> zUDYe}OrAzhO=QpQV%r|n{Nb_~qgW(7$D4Y;;n_*W3vaO#juW#H1TVfim<**y%@>(6 z;-f$4v`0d)(eoJmDYR&V#=QF_*aO@;Rb)i^`efi|Nqm`GmzQX$z+C*}@gq)-_u2i0 zMNC7V3o_a5!CzvvJnYlOp5M^*<9ng9-AI{`+3bh*E%cIgy5{Y-O5d$i9Ua+Id@~7= zk>o*K$u>7$qD@1vu3rE71Z)Dv*rc9q1cK}W7d4d5MaggA&f5xWuku(MPA^8UUySRx zg(SbExE%Bqw0|yPwZFEfS5ZNJL&8Gt^r~s}zTz-K32jS3_|dEz_axGP!Y$|H*fdv( zevhh0Ik~S~WB-Jlfi|GS*?(fD4T~7!jdMl}@<&NGP3&&uX-sT^U z2$O<pUKWeoAW6q zVkzTL`iAhU4-mMe&QAY>cjK`yM!CS{tk}}%ps6c zL0*Xmp!*tI;5X}SzcTa)yJM{QDSy+ZcHC}ogW|lZdl}LDL1@RhHa2J) zp0cX)oUB_M+2lYs-!5(nD;&-&Fx(jLCC?`HW=HSsUbt3=dfex*ef30&^BCRsbD=>9 zchqV_|F#!L3F?qwQrq3{1GMXEi@%=unYjQl?b^JI_oCTv${9wkCB~m9@t%JGJTnBI zRs31vEAxMq`2VTeI9lId7W@cGQutevF?`6~{eLGDS^kww^b5eDHyJGb`~em6gbAQR ze*Je9GKgXeHT-+-5XX31U;3ZHL@R&G9jY1@!w)B2HS^V?|66nst?LW5-U!*}A7iZm zo;82iVR6oXAvcbsk?2Cmk*$WQ9BWshNvq}!Mv~hR8@@hqvA1QYYB2upAz~5U;11F9 z3%@ooD(3badqpMvd5PH7Ax>f{j_;ksUTz0iY@BRg447ZFbDmuRT%ZlKO!!&o?G2mR z57pG)soifZf(9ZWI8_vHhuZz`@t%(88A#h8rh0Z$&tlxmfW^7mVy65h#;iK_S(1)g zIp`Qn_y)l4>hBn}u zV+NWJ;_%Ni5%5DwCHSmN*Y3oAyUnGtw*wf@CFPP=kjLCdJ7FlGAYJbQvVnvna+@};9W;!fY;+IP(u00^}Si1e5Oz?K-X#z z?=f<%%1HL2*;nRS<7JZJWd9qrd{j-rAi*B}es>?@v5CS%BRwH!U>*hzlBftw|gfPE=@37A6{Ne;}{H znrdyBy2cfkv{m<}o1y7_<>%(t_tj5?4g@aDThtay8OFY|=pjH?PW&m%OvDMihb1tZ zEI@NO*MF@9)s|Kcm|`b%vFRakGal0s+X3ov0J|i0r%4@t%d*j#14_Sk;k~o`>(krg zs*k{BcB?`=IS(`>P_#r#++YWY6aFd(#@H(BgTY44)zbF{iLB^m3uRo{^Gm1cy~un} zi;J3vh9a1VjQ4TOA$A>=kR+)_GmV^Sv-KxWbxgaZ)rW>Xv(B}~ofH6mg0 zn&f@M(FMt8vR4AY%^6?*G?T50+#A+P`D5r=%+F9WZEjN|KuM&Lw+7hD9qVq_Mo_esBtQ`E=>sTAKNxKyI3S8*aQZp@#AqaUS-yffKLVWoiQ&ALd$B zDlhD4H(IrW$A52W*}VnQ9{BSY2Fgp*YZhjMMa~T5I2Z#7CTUXkTvKdu1D$RmL)Ar-MCX5bICONI5PLLh~@}L zwnYE$=AZvj4$mw`Ov6dct#OYJX8wg@*WgzCSx%^(XfZ01~X8e_9lpbd%V=DMklsqw)h|_TU3QcV?L7{i>mZ zdXb4U*`KeuyI(?j2}O&4Bb=)Gr-ApMbq`U2yoj6NhI^gIbQpx1K7qxu%R6((F#|i9 z7GEz<+_4)yz@?O;Cn5dtUlSKz$1>dHy9g7E?L-%dlP4(O(RQQwAREB;*6cZ_&BNi1 zo1S`saC)ih;*e$Z8$)XOw__?Wt(*bjkII&t?Xq{axxwiMz2)mwlL(0gP}mG66%-iP z*pN|oTms%KChaam$}rpdlw5*AD2i1Y(2OG-%t18^X(;ULn3pZj~;B` ztS)6&NrVmdRye!bfxF5iTwm zt~e*dv_5kIhd^;8W;)3ha#a0{$>VdG98K_fE}SHspeu~g(FQU16pS_gGAZcK7y<^Y z5?Ss4)rLMec0EnPvfLkHULHyvH)v$T7ltG9nwh<*geO`HfXf#EO)*UPC6?Dv& zC3hHHa7XC{@gv4wds~1-=w_}dRr$a7g5&enZx%v%jxj(d zXSFx*z!vHQ%OvO#O^SDubz@g=r`3Xo( z6p~=%#uE6WwTBWXno9DY_1Ru@S?4$ zsW!Of?FOf#?8nii%p@m)C5p?SOL}s+&kTdjAX_o%ff6zVHS=m@v0;&3#v#JYKbIFW zget3hUY+Ukkdmp54x$PTtcjSL?Hjo~KyzMB_j59J@!`3~`L6rk_Y)SIuuQ;ZAoZ&& z4>Ek`M~T)a>d(wyk}Vznri`892KTkjqFYDE{Ah*smk3E?-=|`VYr8~MgahCWaXg=m z7=vEiYR$=XwfiQv(a)>Biq{5E*H}!Z6S8vp?YvjX1zpYp%PGlEz(K7p2eO-gQYaen zNy~cU#!2ppHAp`c&nlkRr~?9;J-y7W6p7Us2mwX3^rgqz*MA>`-khp}><%T=yFwPB zt&Msh2fRqW1J#cMIMr+CV}@*rDCq*S+8q$30WCE1wf!*z`V0(B@)5|?@*p52aa9?# zBZsLH@21rX!e=2zy*x9fW)HhHudM-<5b5<*5q6!zY?}7C-$r`zTSny1Kf@ybUBjD^ z0+Sl)tBJRRnqJmfaZ+}DoLVZy-fKf#Z1HszeF#LY;<0+)K2Ws^qR>Pv^5C?grSi(s(Ym2nB8QXp#M^?BW z-nEvsL_O8uZcWLdayzLev|6TFXmp*ZiI{=6uRTz2Ry=Od6;61q4;HhpNwf{*(dC;r z(U3IwLRSQjRqg0=)t5cU1X=b_qi|p|-i37eoR7ZF5IqzyU1q$Ha(*wBACOYAM#k$g6u-gnnjOKB5YacZDHNlP&Nn+9M6h zUTg-yCMf;EB1c{0W3kRpo)b<0LM(k=3GQjIEopn+cwfnJL{1>xZi@jC*;->8v=F_x z)!%V+1d!RQa*3ZuaeR8IuG}?E)FtGd!Tyq{lr92i8XhC^K|A7NmhfS4q+ZL! zhtDc&9=^HT)D&II`yqY@atbU6V{At;k2td^XTv@uz5~^)&8+M{NF1_UAGU> z+%EQyD#w<4?LQs>_!}GYFoc6}@^1lH%YOx6fg}GUR{dQ!`WLZE?N8cN-}YZ2(Ut!S ztm#<-Ql!BS?tcYjjSgu&o!Dw+5dV`3#_%5wQTxo1NVWq|2$p5k>yl{GXZDH;mj zT_i>>O-&CVBExT9k-_B?KoLvtp@#2X9+b*p$A&>DGPA;BO=_f2a8#y_MI7#Mf2#?g z&=JucCr7bflW{Dt$hko;m1er&82afu(0P+B2y22axD7`?r&NGPT9T}BevtEzYe**D zT=L&ndxF!8b!OBrzV;F1qMjn38Ar&vK%DDl?dR)7Pi?F*ai3f52~*<~%3FK;{(S)Q zi5g^{!rO{f%nc($9Fr{k6V5F}$_0EuWWDbZ75<#yVS{4FKazcTlEb-3zJeay1A7M( zAHdcr9-KIvlU-4&H7Q{Ys{~l_i(@p0q!I>Nf5`F4cFKOstUeUpGZ0M~$%rId7`aaG zASSuKW&IU3oDlTSh5~3XWG)L$$J{YHv9aIotA$QoV5TYG7|+(W-ZpvVb8+|W#Y=Oq zEkppBk3eP$m)m7OzhLvHiTCPcnF5*KesNTSjYKf0DTGlyfzycF1w*P5Cb<;D+HK{ zL8h+UpVI;-J*$LSN4Mp%rF1LtX5ZOKE|zxJ#0Kya8_}}tpVzU_OG9MfofI2a{G;ao z{m%G*|GUI8bdsL%Vi@g&3g&ELo#X=xjdOL$x_*Y$CFU~3y`wK|H~1(8{W<_6!A_2%jgutrozMXCQq+;>Mc6|dQ% zh*%J$DF_l36{K2dQW8-C5hViBO9Vukh=BBvh)5NPbOj*-B2pv06M8SwJ0S_ZCX^5$ ziSKx4*37+g@0xYzy}Rc9k+l*chI4Yx@3+7G?Y+Od*1#LRC z8y_2$T5xAMV|!)tpq#1)(YS+%SGB(R3o8|dyif4T?7~v58oPs2c;`KM1_>)}(!b^; z)r)~VmmW*&oMH86jO4b&c(t0}_fxkF+B};*-qJ|bqqZGv^gK9;!T=njZvD&>ehCOe ze2$uXxpBMc1z+~3uBVvWRoG1nt>;yLx^p720O`W?zxhD^?*CiTC7+pqZo-~`pQn0b zXxikO-Z2>{`*_VzW2A9GgZf+I%?EQjXREHCU%_IU&{Roe^T}=wyt6-o+kWfQNV7>` zGo6q%H#5L{#16MwTolYad;?GmpyO{a`jI395UT7+C;W}`3A@=xZ7mx8+u|Zuo=80N zmmU0inu9}}IXX(DeSdETM!jsi>W%liDlW}qe=Z^1`7UE~`9P<C2eMo-NqMlxWo!($*fO)7@sDE9-n3hR$(X(qOf*L- z?oaX>RhVFYbbe*VBhzTOUbQ$swz1L32I_txy@}%TrM%|F8+Hb z7zPk4(5XcRP@%jF@=9+2UwmtB?}MXVp~$-kMXS2;cDY{L&*8iSDn^nXp(3!P3akys zsm34B1xJhHB8|Hhx2;MYz7O?yE9E6#H`=jrz6bj{M8hNNW#Jk*j4oJdCLjc|apvpM zJ>{X53lkfF1#(8;D5Ml)rV4r0hF9Kv=tKO1FM$~0Kx?@Ics*xw=$s~ zQ3Z90ujT65{cK{tSx@Cq!^pB_A7pc^$&e=B^|Q_A;=g`h+n=;(ob7!B#qK|SjyAw7 zN;6JslJkm@hrBdq*duNn*oLknOGzf8lP6X z9jPMKKQ9Ts22^k!zM=2m`gAz%!|2rvIUtSYJv~9$mUvPLRkWfQh^456hK@@bykOdZ!T^ z518d1VrRn8n~vaX0L1S(0Fp|^-4R&BSq>Vf9&q-WA(Y!VMnTY*T8oYBZ%8AgFJ&zgaC4s`<9D<%$bWkQ+JU$u(}?^b5*-Q&S+z zP(v@sK(FlrNp%S}n&|UmUy}c}B0#ruC!HDD70ndLx*XQ?vR$n` zL`Ay}*bA!J{-}_kq^Gssm@PIBaMunN-$m+POceNJU0xPt6Q<7|`Tl0%&$bJZ$AnWc z3lNh&cR=Sb;&Z0PRx&8nT3Ah)`ZM8tCzIMlSC(O(deFg?3~2UPaKb<5X*h?m754y!gpLj ztbukQ#%@}qU($VpV_E~5IsG-7Kh3Fa85zY2c;QHQWbe2Qj-hf@k(!;K0%_S5_-=#aN7HT<4&5HoApdshsp3?M*lk0uE4}~C&y&E?3Xnf?wDCnqN`l-s-hBeoNt98*hDb*n5SSgymvyHiOSjlV^hBP-HW?!{Ed%6=(qb9 z%idkAJFj&^xv1bu@~8M007I7zVgvJeiO+1jlfeaJIksm9-Z)9hwFD$Y^-4=%hGyM5 zZ!G)$WJPLm1kdeewg32{{eISAMX>Y)kwrT3yI0;OTQ#A-w=h$ zVpnLFnG|TJJc(U(nx438saWBV^^@_)dkQywJeBteK7t{UiQ&e?(zI0 z^odr|W9d5TDl(W~N|@024T#C~VjRz)f@1cut2yKZtx2ctQQ6BdLTLqRo9ojkLw{}` z+prO`e}@=g?? z@)Bt=G-$nT1i?)(Oef*pv3gM8O{}MMakBVRsI*>D(3+2x-LUH3b=d|7*wI>^<%$E_ zK$+DtJ>JT@t>o})n1p68ju2RVQ}cc^roC?J=Je4W zbur{a`m-8(0{DdaY-?&wju6@040+W9l;P@$F6eEj`jX^Wqy39=T=@}%O#Q?t!RwO@ zZ)fISVO%%330nX{OpKmlDWf+_e7M zkQmC9A%C!8*o@_dT_dnEpUcJ-XoR#*l4Vo(>n||AC+wq}3zqbNkQHgh1N6?luyl$o zFn|lt^4XUg5}wyT6%s2>{%Q7?dRA2m?E09^88uj@h`4dz5Qr18?&Ac*sg@ zoFb!%aRGvCqNFdq0vzivc(6m-Qhs8{nO?>yY&ny+HV#69Fh=+KVG&5)7ko%AbB^7I z7{L?;Hca*+V{I_&H`CXjn4C-WP}xgJBw_R;vgk!OZH~8ybydR(OWq+Y zUH;AF+9tB~IXmGH*&G^nb8iqvi_-E}NXu;Ihc5#MLJeAI0ZY~JRgXw#vNC+kI&j@N zhmA9myJD0cv{rFJ=N-3Sj8Mc$R-6>vX$P)IhDkGM%P{|YC*=-R%Mqor1Edl4ng;U! zX2QB;n87dg<{-~>4tvt_?Lo(j#<5L>JAimk9GxIzxRQ)o?I!6q1q+?$F5i&t2F@|@ z`(tc|r;l?3CMZ#z9gTOAyJ~@=7y-A@#&=y@o=>^+;Vo901Igk`@3ux(5a}KO2|k#d z?BCedAPnUtPi?226cr-q^;zq#A`it_gnonbQ}gnj6p?|>9cAkAXZ$((GDaaL^n}vl zlA`c?wkK49Ao4*`TA&f;TRoLS6)2Zeh;9ER6d`}Xj}!Q0-@^PfUMF9_v3 zgKsof()Wqi@hMjac$1)2%FL7X*91RJ?k-1caS%>zNAPU1u}J9iKfwsJm?nE)br0<< zcuFqU-0WdrVxTI0WrsjrEfuND)8~ezWls4Ft-7B)evTji(XwxFCV+lyCQ^f!0$r^- zPzN1{I+He9bCSdIWai_q{ixAGt{{E7nBly+Q)F~ zg-a}eZu-GOQ27T*;-l1o>^xbrM(&k0W)J%*01d*P&d2s?j~1^iLHF}JkK`f)3U3@>-~7?%LSSig#(5IvS9H(j8LJ7%SYH5ZM&=h6QN^c?@znTUt*BjguxY!Z(Hzjtn& zHoIzB+S_?r<#Li>;I>8H1oLH=-%J8nyAL3i@HbVTj#b z=z@0Y512z)k%LL>hP%6K^K87j#5xe{zJn=jQ?MB-d3m?k2WYU8Ah%}9lRl*1-M-$l zwY)D)g+%!_fiDij^>Zd2E9nE%7E^KyQN6QFi#%_bo*nA1S()Kj-N~Vu4jLK`-5d3? z7$VEo@%aW>6x>Upf8Lj_AXLJ}Rw%ZE;LBR9AC#X(8+%y_xQbkpB(tUo<(%C`XDwFG zCF~eJ9(Y3+fR^8g8ktv2}SYYz&~gLQn>$+2_O2a=d5#p#2$oaPtSp?{6gAEAN@D$V)<;emshN@ zmRQ_J&UV1Mc-~j6INZl@3IbT{)|7~4^_Y{iCj*q+#F}h+{^g}ps~VjZAI`rn%yH_~ zuivmpA~>@zGJwRdEIjnI>4zF=2`6LG-U;LC6_2;?gk0b_4aBi4$oJ z5-{V+ahIe-ZvM%2yVLaqInbS%aQ!?}L{;JjYD2VqClV`_`57{0#Zw7|CQfWXZ0*v* zW>l{KcL?`?{@&g$a)s~As=W0@|@;q0bNQTMgZs)BZ4KfR#9?B8tXRn=U3kvtNDf$I~JT5AVO zSfwRUdn`JA`Wh$F-yUW0%LJyOx1WWbll7wc5|ccG>-Jw(U+gH&xG7|HZ=_w1QP2u2 zuU@}K<`se6Y!Z8#khV78yY!_XkjY%xiBW_=06tlp(jU9uA48T^-&5=JAQF1tUqOEuOAr^{7zS z+vt`-9Kx7GwF)(p+k(fYfetNPL_p*fs$)$eLz0N;H67{r0z3J zN0|f!GIFN8y-vI*t^HB+<-w zX<7FWm?}sFbtK&7_OzT)<-TOPP!!79Vk2j>5C(h42be)Ov|uqf(j&-tf*}wV>kPmj z^arOa=qBeN|GYCw)p^Z5=$8!b5!M*lbgOGhaif#TvxuJ~x$u9rU-qAy066fsG#yY% zGEVxljMG%9rmg_yTe+I>;~R3PPb37p*beIpMCc*w&A~?;AiPLhN9PbBp)$ujLwv~q zuG%gmxp*SZXhY7wcb(EKF61w4eFtR6XdAE}U~~YL|K#KiqNgg2{>%Y*I5wny>&ClQXAJLNAV9GN(lceC z=V(AjFUXtBvV4z#Vkz#oCqJl@olm&+*-gm%MOoBmGmm@x{QRw}5ll#VBnhCI>R%!k z@+$yMWu^3Qrt=tPY9IN4al-vT*=)%G&97{kLw)7sa@AKh+Fa31tnTRpjj^WkadQOa z7GT~&8u8Lnk@bd1yHm_W=-Sz{!O9?ygnf~>lI#hTkfb+yFy$bv(uiP|r>h@?-}OZw zKXQ(Vk;33y9*?XcrPuq+7buy-mKCLv-CV@2-^4t25~%v}%;pmi7T=AbaW61*t+S9L zxG+fU4qcGO2Ecd`(gCA+O$6|%T*52}FpdMf-?bZq*p-cDP=^l_#hEG&@-M->Llz?& zvE!|E?XzaE_E#^WG&*F>`TOwp$JS&7xX(8|PNZ)E9ld03g6CVf5}CUZg?&W65?Gq* z-18uE{>BA^R_ELF?H;`><1J99I1DsC&|%%p zc;9(?*M}BD|BQ6PPz{kJZAhpETRC=EwH5DM4pxhzFzo6pI0$df}+9OdP$NZPi5t37l(b)l0`az)qsoRV`e>epNK&& z8py7rN$Sq;V5Cqu-v~o?x%9x9=H^XtylUAtP(1&9GPCUSYa2GDJqJgi`#Vzs+vNGf zy_vah&`-3yg;-;sZ|upU1CW%fUcLZp^viH-4)}!o07pmbN3IO00`Xr#*sr*Mf`bEw zeu`(-OH++uMG%_Ac{yD7Fj&z11L>(v58AC%phT!~SGQob;M_LgfrYl|Kekm}O)pJ--P9E;&lkh1lOT|>eb?`Ny7Vx>DYGDE&%Wy*8IMmvcbLsNg z8=q$Nb__0Izo4ihWld-3kq}2{D6VLu>40pL{+O5M+AH13SN<#B^wX=`GnY$K9NE_@ zSigQ{2i$r~WDcNDM;^iBR?}zPC^jqY9BwVGh zmK#|wvx5pk2=NWjZaLytjwSKk^gb7uRvZqg5k9=?TX41Z$RD+vog!w<^xzG3gpx{j zs!~Y^o+}^Hfz2CXWDk>!O!qli%3U)HLct~bZoui)1x~LZ`8N~YqYp@cTX4D~1OjeW zfGCVHCPV3wp1A#2135eU*bB&iKQK#6ImjTAHhoHCZd9R{D}fETm#=Y>^@UPNfb6rZap)i%{6cG`C*9W4ED6x+?pj> zK2Ev?oMHj`@@@-=WyfjXF^SHFudf{VW+jYh zCMxf_T2tTF&s@L2hG?V70;UrnUT<0m%IT#}f`U%Ua0I4I-v!TR#RM#Rvt~z&lgo(c z)dM2-?eqmkH(3H=KOhCjP;~>LW9v`dP?XEe8|vQYRc#tUDkKpU&6qR+3vNo`!1@Zb zl|Q>3`LySeYbt(lAM~T1X=G)rmBZ4_M2q)*^t`WZ(GDZO5*0uv=_u#3 z)qcv=rG6ZXCV&`h2D&DVhlCsSEgAaCptxJG)rB5=SL^21M(Oy;)V+ksQ>{y3G^Buk zPVkQW?Zwf5)NA5}J{gpdNjjNq__QLjr=v$c(R&`4 zaIMBCvA){GqR$kqS;s5e++34UonWXdas6)Y=7UvUPM+sQ+;u=0^%5)?CsZp3ILm6m zfc2sA2BW*b1pD@7#;JTqBg<1=F4%5~@au@KZ-`~(0Hv92W@oVQ>918~g|Zo%?Rz}6 zk~Ez^v2!ef)I!7|8e6=%I9@yWrdG~!S9q4|k4fnDJk8>bb|8mi^@^7@=gpRH!M#d;_)Y<++6)1UEu%n zpXp0)0B%)*yBU0;b?1#4r1+l!OWPzZVs z(&iZWVxt?7EHYKtNbriS8OmnL`5}g?Rytm{=mWXea|!{59-$PqJGu+placuM56I=X z5Kw;<`^Al|ia*L)c1Q)SDZNF*Le^<$g$}O~vGIOWOo=w&|1$*O|1kZv6NTz zA4v}mq~C(COr80;XH__8bNjl7QKt1UxMN}c4MbXeHz6EuZ;kh5c_Xee*D%jl-zI#p zhQc%aSY09N9d0H@9q{d1$81QFLRpZ@ZCOtZp5i5`XbV{O)X9IJV)$QuMFW55D_R8h z?N%-KUYYtpm8ncz4eJr)}tOlqjo9yz_7_-KLI>2VegxC+M%` zER~6(xuzaEqPSOz_mp**tVCCnSazi5WfIf*$M7rU9LlHYtu-NDs`Kqgc3nR; zpp|04p~}GN(G>VZ+8NR`IGMHS^Yi}VwSC**L0ahyV-qfP&Ps9rJxFx-!Z{LM`r%+n zjXF-VZ|z%kO{I(AlctQR(QPJn$8F_=AYn~kxB#pLN^bq(_15ZvQ@NK!?-Revu}I(btb>XRvVNaox0{t+y5V4!rD50{1L^5Df!97V zS1KY{vIGJEp)|HYP)F^E=8*&upB&Eif6FAu#Hy+b)Z8YB+<&z8{`JpzslaB< zB1w{~AWgI&Ij}Eb!Q?$3OpvtG>JHb}w+-?)#{tXDQTi+w_mKJ73`FC|7n(WMF^y(P zwIx71v-7~<- zeE_aYzY-}5?0FE-vzbqLL=+dCeZWEXZrbMEU(n*#h<+tpPf*E|1=sl94HL~}|M)*I zvPA)x#D9J4RZ)WI>2P_H(*4b|FVYFVgI9Z6Jx_2PwkLPAaHtNo0jQH(N`Ix{i`o2Y z)KvUj3E4e8xUn@6YD0gsB~uH5BS|B5ICj798%~rOpIiwEX3K9X%I!BgnliJNp(lGD z&}7MF5!G5`j;68{39=K>Vp%>f-ntp<0!d4+JRp<#_PX zfh)~TjiS4S(pRUMmwq#;2>SKa?V&)wjz*;0gU|On$w;2)S2_i5xDqsd?CE8f7Qh&n z{`GJgX*akSdcrQ=LVz?CYvS=a^Zw6?iH0g&8H=|ZgE8&ODUV+ou6VPnL7BDU72DVU zWS(zALj;kby)G90h&53f=W12$rXQ@`4!BT#GwVPhSQ4<{_ybLUak79ay_RuUWK1^s z@cbcysFhx!YSWFW2RRXa9l@(37dYddJ8zUAfEffhL1p0sHTczE2nny7k4zlM2*%`3 zdx`ad(6x+Kvicr>-DJ7t=18jFmmP-y7W9yL&Er{I&eq9S2~K8$_Bo)zBDgYecgq4Lb|ajRcRylQ2p@X zLv$8kaOJaOw^?f>|5v3pe{OB$@4>&V<9}qn6ZrJJv?9VLNMH0%ROwG3`tq3YyHi@g zKUP7PKxkZB1kXm~`LY(=XeHr!`Hl@L$w-l3c?CicA>cJn7hl$Xg;YheG2rY*-R?J|1m`EgubI z+HwvskROLNa4_2^CRfKN#3!El%=W7*=(bh7lF?9K-&?T`RUoIyOO`5da0<>&9l}df zMa_XO7TgeU>@+D#2dAtwy{wE|kMneTZr~yxdw=tOLDW_C-lX}q7RP<1w6}*3@hJzu z+OTV&gQGNe(#D!30nH4*LG{abNZ}cjb@vlv3_Pw(cpN!Se;*}2*(z{9=jD|fZC#D8 zT0{oQqbc9Wq}oDDe})zWhta3&IIGC-9VK?BYN z5jElYh-~|=76h1b&t3MY6f0q%Gt8&PbJQ;0WR}snc{pGLKIw^BTn7xmd$Yi=6*1b8 zT*{i{q`TBcQb7HFSr`N_GSHo$Clq0+vK?h^bm*Mc&586luDYdj(bwVWuwPR=hxUNQ z@Yv!iw9AF}z-t^X8_yWfh^R6URZ-(p$c)EUU$XzSpDSGz$kcdCb1Bl^@ z2az6Nf@{H4M<7fk<}Nh?p9gKI1V(FBS458J7bUiM`sYe;+88a|w+f7kz0P;#L60hC zaQ|#!1KR(&FP z5Yp7E_yIY^UGZfF z+Yc_r=GqS0B;V#$AUS)FVcY2fIfi$82rr5IUJ6K=6E_cXkzyVrhH{3SfT@rKWoC_h zFq)Hot-V{Qk&SO{EQFTb?PRxesd z%bouWg+O)MFpGjhN|L>#eYjTUIEuzmkM8~SMaaIfJc*t#;?>FEnCzpUF_|)jnW}l) z+%z-oNhLlsdoO=nkgm*816|XRuqlsZ)p= z)Ublv2h!MkZ@sy!Vd?FSy*(i`P@Hz5js6-9@r<}fXjPoZK7RQN6RVpxriEi1OG*5b zOGyNbV8F;xweDq{l4}0S_1mY5A9{43>#7)Y6mn5cr(4p;p9^XR`Yb0_0X1g#XgfUv zYX<`{#$a3=t8oVs9Y8sF+uYYQBBU%ubo98J7=1#tm#y*h#EG|AhoJ*N0tCg21X>*- zZBEpsoCxlUEA9LNYx`;jn}8hX{B zmAxG4|DvXxIcuf#ayw_s@e?<-jXx(XiS=@*X0D0Bp`=ZzJpES>UIK^e4ut>wIlFx>cf zC(Bwt`<*Zf$$JK+!}9qR(8?vYVy9#(mv)o+^fxj`>AKV#Gxeku{MmQ3<3-ks=TstM z1BQ*BsYRix6fPemZg+_0m*TXSxJCxGVG69-Ho-gogP;`7 zUXFj>Vwl=7!_kc7@^Z8wAu2raSZ-s@Oa}ug>%`;g{152Igg-W32tWM*vk*QY+A^zv zu|H+CtvxO~MnU#N$_oLr*;V3~qUD_t38Jm&bpHp2U2RU!^kXx6KM5R=X16cFy*O}X zdHPbvtB*m(0c`KSJ%70{S5NoO@4yPy9zpaWE>JZ+64f)XdZeb_&ysElZwuxh9G?-I zTYU@ z9|0nUiv*d(@wgAj5{a0-X%O{Ai^86!>{Y%OJDWV)fNQQ|n>h%hA=R;BmD#E3Qya3m zWi}tgF1UkETkb_tG2~7Fr?WYYcl{i6Z#8SR74Vzfwogj_u(pe`jMSKKjZ{VwP$525 z4vec+x60eXuIyO`LP;&j!CA8@vx&2>q(rJxv~se}*=JbwtZUu6F&hoN49qhd0QNA* z3lKM4+fEjUFY->54+>?wA|AS)(9iNJ47rfb=5urbl=`Ip*=Dv>iM`_#LlYE7ye09< z+4xENc_qmjQ$`dK#$L6Ps9r>wV^quAlaz0X{cAz#>h4BspB@g;yFi+@AVAw+rC;~ zMc7(TMnm!DY6$bDFP5g~ro<3y;3*hp4`I73c&onn^)>yhji(!ZKFAb?Kd=_Ip#K2w ze+sT5#1RvT7O4kPj3EXS9Yd3Tg&R{GT(Zq;Jx$QGLUnwGswRE7>EXUdkn(OublaPJ zPtLdZ@R^Q!UFMG$fSMasVH-EfwS82R<#PB_15V)KT z|J?E!T=m;^$eUXnlBiw&8~||q(T%p`patRlRa7m4?Q`C$@!+^(u`HvFu|90zPMf@)7a?X!xQ|&Tf<{cFH1zXBe@U~K)xIPQEsWnsZaBR8czVH<>Wr8pQ9?U5flM4?pn!y zGqNWwPxk@!eS%BrH+aJf6heKcVB;|iBh0`&PVYTE8e*MiOAOow-y)}JeW#uqhF0H< zagYlB^i}2lvvB3~DZ}}8=ycx6yM44<>0?x6BgMsj-|4;MshVmrE(M~+;UA!d8k3(t z3;J`Nw!3dmD`{yN4U1Za-qjGayPh3Ze&Q<|lLGH6tXbQpn9(`-&ta%1j&X<<+WN)s zL$3?CzpUyp{Q4rcyB4!v2z-w@@m zBS)P7R}S01=OF$oC-EiybxhXXKKfJG>5DF41G@2?GAbAdvQK6?B|Zq z%=U#@&U!w~vWP*yOdj%ad0$DVg7d1>uT~lEw052lpCE%HoI98f+&Bm0IFPgjvM1Zt z722RaU+H^+{OpS^>{yrwNQ+l)i>-l}X$4r*IA83~y|{FdO!+ZH4OaRd34D!ITgzLm zZ8VuyGIyXgfL&OYpGjEDN_Ql1u+xk;x0CPVxph@b`>h9d(-}qu>o~WpJyzb`Z`_S; zY@Txc5(N)aqkaV$81NtAtUvNbmNw^L@*+JQ)|=r#DIA?1uZXl0QI9m5^2rfqQWbh{ zSOGszPiI_!gA$dL0K8QqBRLkMU14R-l=4WrJ+$_-uD6)uV6OrfFL$252ePwoAB;{& zZEu{uH27_Zt==3dufJT$KxD3~@e*LiYaujcYW>TsuNHnW2KrM{$*ymint?qv8>DME&~~UCd+s^edvZKLCZ^g5q5uPtlOlPshLw^Qjrg z=e=WYT|;n}ytbDm;wx9%izXr7PDkG2ZBE80H~AlacN64-I=3^VuyyO<(=Ss|p$Aro zp_E(H0wOS|dSmQSZ3f1nP&TC`YrOQ#6~V_&Jn8T614@{&hVZYqr(jNqc3dGE=r7n{ z4&?`bTOEXJW{H6@*|Ck29qA#&-fKsme+=Duu@{;P8Qq}VC1tLb%wQz;d@>_sLk5?} zzglE&_@zvii5~`~M%8*S&*>is6{OyMeDE-dpu!0%C+<_=|>$MRmL zdC$4^Sx0EypfO=uf#`R3v?pQ8kSNlsVtmq!nqUyg>(mOyFya3 zp@MN2Vl3!tuEf_e1u_}x)!u2lkG2cLiQI3S?wt+zf!9@%r8`t*rY|EEaNOXtUY&U9 zP#Y39d^MtYB|c2QQ`JT2_2od4{59G!sh(Z zSnvx_12Jm-IO{HkpwS3AN;4!)*7A`v!lov`CfAOharD07e_49S2yy>)-8(geD9x9i zNV6qDbR9nrl|WBvQ%eWaVX?N~+Xhm`Y5Q_pI z_QQrAUnB}rDa9BDGy}!+?katasbG;Xg5@J= zqIv%F&Cj@#rECus-;$@0~d-y9GwZjl19CO2aA46(T?RMSfyR z!c9H2^vIvP)oyE!o!gnka@F4QlE)`kX=FnwH9845MjKS)ihg13My9Yp=xisJ2jWmg zf1m}>-s%OVw+*nd2zo31=Dh}HIx(K22NRr}oU}00boUhRRqisL^^jO?jn=$<8LbDi zk0pgK*b|D)D`^J^V6?&>PVjE>(#($EKD>%yk(mkCq}CHTLd%}7aMRJ+>RobAqAx=4 zPEBFQo$C%Yzg83q$fYJd4Y*ID$CmAmNR}#C>h`oQDjph)sS`{QpQza~N5OG4)k zAfe!^xjy+nu9HVmDZ|Wz-pLs*OS`P;uN~Kh^wo~N75f1Mq%vgy!EW8R9H<>B{nhyZ zj9o;HZC0mq1Z1xMBp1?PIRHYb9_arc;Z2$bwU}eo82HbQ^dTfoe>0_0IS8fOgCToX z5$on$+hufDbVn7q>#5;dGhF8Y0SDqY0_P4CS3DAJu*n3DlM6-za^Y1qg=j_2NNCb= z_mfi;p`nq0AL&Y3w&@CPC$yjLrIR)EH^IY4IHTHRN=O?tZXz+0CWRXt(O?XeZvly; zf_CFU!~|r~j|1gaW%SMS71l#Tbn$huhCKP>3bnzHPh@}NAr=A0RV4dAI=H_E-rYkx z5#o*eHa#Wz=FOItFM>2PKR0NcW6$_R67NNb(`-o6_1$bM;OjNVCEn5#dn9DLJK6ib zYY64OPOq3XB>ZM-l^z!dCesS++?2h|p8U1$Ag_4T=u^UZA-&emS0N$c>>h#by)mC( zY?GEJI?MLZ?hS&hVcpL&N&PD#BONNZ87%V@0K+*Zg!Rb1)RyxiV))FZ`Jfl9&Fs(?MnqnM=Q;aTBm12;pK9`rUSWiIc%jf#CIg zZKWda%`|C)7Z>l$M!~8eg*bryc7Atyf?CzgFsLHFcyfPq%IVpIIi~=2SV8PtU(qMp z8|co$Y6E-I&omX7+AJ2MM=?7Hb&cez7rSnlG%ikv*%0aT#@??KVmY|Ns4oDBRBLJs zxxY=*eY4vpwc}>NuCX)^$+SH9D?|B98slen;Gw6_*qP2}*ms8<0-TB25!KqoAyl z@v7#RGYEBc$B?d9ro%(rb@c#=&%^?GWVF+Dr(HRGOlTl zcIJApETRX*#<)GoOIAaTxqA(~JeU0AxVw$<|8cakB-m-p?tv zWk}dHLs8-gt8gk48CL%6)|VWWj5dZ;ucov%V9d#1z<-)dm&fnd_Cq+_b72_gWVH3J zb$yBw3DqQdQMQ-W;p4JLL`2K2t(Mm0!i$ZDHJS^I155pXn~PQsN0|4gbq;OyZhv+ACFRhf}R7{7h=WXLh$waeBo z1nhbU>4%^VWQ`Nbwd~clUKY6NsxeaxFo*2{9xNMa4YAY}EGpTizAW=x+8XHEKTDaQ z7s}EFOi+~P^rZKR*rq(c=H#K~gr0q5B4n^hskCMzud&Qbu&r(A>>qP#Knt~ydyf!c2anx9(#ik_| ziE+C)=y=%CKGV}6R*y9r*W8GpfS>I@?BVU;xai$8#4NelEiq(4XXL~I!o5=YRbN|% zvE~s@ASmP%VuNN(&jIt(ZgATVmw`whSE6S9qPy!KjCv%#(!V|9UlHuS0s9C$qio|& zHffy7=WAdyXr!kfw#0?K+HqZ}ssOtEEnXxPdYYjZAr}^PZAa zZ|cGBA-V)f;l$4lY7pk;*wcsWke$s5jI6g*)_Tn?e8QSD5qH=QghYhf3Vfx>(zH(H>skG*() zwx4_%_1d91Zkxg_@$1W5RWY}cx2rBFGUh#AND&iFxDwd)qcow?4vG zqlBc6%B-K6dcAhEdt$cwBb#$1z9J5_Yqqf}e<<<5wU#JHX8*1zr6AwQ3P=?o7Sq7k zzq$9MygAtlH&Ki1MFvQpHeKb-7O6mpunN64IOM2-lC*$dakhUHrKk58KZH^eN3JOO@Nc^saqHfMezUzu;K& zSqUOdzzPEw|21JjF3mjuUN9v6U_hHly%Y}bK?E>oU2xO8{UhIAN%$pLD8o4WP1>AD zp-HLd5UIBnJE74i#B=}+0t6qH`p+6ckZ= zjSkq%&u%H-#(%yU!Q;e;b4`XIFKz28wtR8DcNT+64RQ>Po}M~qZ1e0#*vVVBfNyFx z+ULZcL~Drix+Mb`g$(h;gO0~Kg>?Hph3tv!OD8QlQ!&lcSI9jTE(GYAWv*qPxW3Nr zYwfKzPZade$K7H6$qRICnUF7_A-H1ppO;8$*ijjA&y$`xX4ejsRXQ^7Nt7>!`9?IW zcKdlA06m3zl^qn90FC;_Gnz=|z#!5g#MqBV%;x3_>+7s1IHuGZFyA#r zCV-F`{nWcWU$X`bv{Gpas4j6rjuvNVQtVugvq3|cXrA^z@;@;Xog675RL3mDP@?pj zeFkoNvG&qHx{Ign3mwUltD)!{#U&h^Y4-W zZ{Y1Oe@nh+vjr!qHB!cyrbM9M#2F5}V(n#VU^SyyrsxO1;1m9FsZxwKPX`7kzlyj; zvH2xTI{TJa78^D#9DX(V`G4;)nFo@Y*E7`iG_L2>3l?SyJZBKALq7bF&5v_YC=!}6Y*yAPd zsZHni3>L2#JUB}k&HZNL1L$mLe7ty*O*9kTrE25MI`P7`uDT{YmF|zUjDX@ZCb7lH zwbQ+MG&Axw-we~1Ewt9z+frSvO8K)#LO~4UfH@5L3|)}wPr}xLc(w!5yms%G4XxFI zO&^rUU-J2sVWw<5Wn^39H?P&o9VwO!Z&%Dd(KmwMD(&8@f%p3NP|t8RJWY+4)$E=& zfy1V1m%JsFQ46efI4-y|X+oDVNV34lC0)DF7UFqN^!&Hb!btE}VyXDH7T$KP7r$n7 zF9WVuz-X;hfR&l#XgahG5&wiFo+2)a3;!8-(dGj-YTe|?u}y|zi)blgk?OG!FFYeX94(nj#2tU9Q$&qvbjjC^oXYxJJlC?Xa`>V`@%@ zUvfFDZ1iyZPV~_$!C9i+tM6)W$;~M13v{yY5qBX~0K^3UX1YQaIBxN)31cf=3qeBB z(}?gsv$FTq&Un!IrE~)lBF{N{{EN-k*In-^Zw}i0J~6MEKZdJ3bbbHjMQ-0duUNsC zr(f66Pk|nxs^$NZhJcmGu{H*n(+w$;z;Hn|B+}9PO3e&%}4AbVYaJ&>4TM-W-3idJp`udaDA}u6@^k zHavB@W#sx)fVrJKI`6tx?`Ho1`n*gh4i!EWpT&ftOXVASnY~MeK(W-Er;j4yU$Ox9 zD9p_R_gFW0;jVGqgX<_?aIRP%8 zk{)VBtKb&PXs8c8UK`BstF5KHJy6RO8ZK(zl6;Sn2?)b_dkPMmDeDD78l$Tx+Dn4% zZ4N>M2A}_AtRb9e`t+vp5j7GnzCkA&HeH&T1G^5%So{|Wwg;|l1;jW!a06=Iti%4GNCN_taEg*M+KUzwB2Cb8$IkP2 zToZifW4H{d$Bv!Ev(|42>%MC}K*BzpqNo25YilD`->`meq;ia<_3nJ_cT505`iyCf zM5!EOi+*;G-a7eu;k~^{zKuBWw?lFOL38z&APzW=mujM1b;Jxeeqna6;0K*>X!t>E z*7GN~U^TIZfL5Bz=5_b;Wx|9zXnX}A*S!6PAz8{0Vz}+842?t1*8hjN_l|1nefI@H zP>N!dBArMPl&Ta#DX}3SB29XUihzJL=@1eT0jZ$~C*6gxR2#FtXA)>c5gb9OrZz(3? z)>D&h8r`p`sKJXoyb=YG;ho*?>+mQHx5i0?9w1tPdJd{+Xa|`uJu5#RV}w2@^ah?pDS<-m`tKN9P=1FxGjZgGZ#f!-NWMluIQ{janr(~d$=beTU684G-)ECuxj1F>#CEcC5Jeb-os%J62gVD?2SfA)qUX|L61fUjrT(4}1R+gzHxi`lzt4 z{3c1f@z3Ga5lH~06SsNTnX#=qW8DVz)MS{;tPrwOLo5S-hbU0!m(_V?J}36f=qAge z!bD;bYBlFUXd1N;+X(|Cq0NGhxsoj2i739t}yr%KgZHPt2-u;Ce(%Nq? z&Yx@YO`_{yknY}Ez<7+cQK8`Ckq^x3GFBc?Aj0m0Uf~tZaE^U})9`Z^?H+baeo4yH zY3#wzk}V+B_^)_@FGD4&i(-Q>z-n3Zzua~*tEnmdS(b84dtP1pdOM}}Yj0M+dc=r*OKbOx1dP(~F4rK&f|PLnInH@my@>q^H3N{YPE zA)UY(fnTr>kdV##mhR4p2uv<0j1yF}a)Kcizi}%Fx7dw{UaDH|e?scA0s${ZZba`ui#vxR3xuc1uT_-$4RyTSoZ+pQ zYK6FofKKf=h3_i+h262QPs%1i{|TvR&vcb2_NeQ?H+9$&?d+;pp4+BWwBto=KzD z>?BzF;;BFCuh2&7S&1n)qRz_;f9?#+Y`M}J-A4Z`ceD6T@y<$!hPDQi7h!Y&_rmq~ zYNi&svrhGw2M0|uT$OZH++ABbSPS0V7#UVM({Np$UZc#o0ffIH`4RGERmFhoyt+h_ zFTO}DQ%1aLrcvC7hfQSi%c&|hi$Ulwo}&K3xuBKJi7Pdiaf79QmRGtxPO0WEh_Xl)ovgih zGK5YAQmRFk`#%N+9$>o}J_m`qX+}~;d#eUNn_~Fc_1{Yt9o|Dzu9*SL0qjjL<2>=S z@j~9@z;i#C9_1M8dmm^mrMBY0$d>EuI8x8%a)Kml?o=Jk_#Rcl)j-cbrkx)$(uw^I z=kD5*(atq%% z*+F}%2s`Ie&pT{4LQ(yu|JAnj(4joxi|1CAwu*ZDUAPlPg<4hX`uWHb)gr}1(R@*P zYDK2}&{_QvQ>wW7^?db-`dk3a)fm?a4i&4;a8@Np!oj3{Sn|9}w0zLXq>tG`t34#H zqbkt(+7G}H;&ec&yHJMoxjFbGy42$5CSthbAqi4e_v7G62R<&}{_Ns8xxxMsLm>qVvZ z91G;Np3LzqrFs6Plc1qQ59Ol(tp;d?W5@wowy*v8`SHod2IX@O8^h%I&ku07um{-U zTDe7cW?-V5e7DK6wW#nEy`8&c)kBgG(<_Vv9ll|O_qr78mTkIium%-9C_hDJiZN>* z)+7BnRDT2xYOq%izy`^`be}o#zvKVkoU1c3qjP6m_(G3N%g=SH^-;$bK(#PBHaIo~ z)N9~FHJv0&A|BvP^<^#g^A&;L9)24S%b#E?_e!>6h96_3t!ucLr*r<%J!SkfB&TNR zLkwLEFGw${)%(#(=k;5xry`X>eb-G?Voi#qQO^v<^%sB~?HmZ7<8L~YZ(pABk6Snd zd+gZsYvH0mGp9rSVZqg_%ekDOrd8U4eP9fj0Bu}lmE@0(4$>A{*)R#`332p$^8K@q zSSB4d4G~1PM(`1CCCGwRdp(qgvjTur4MCvW1AE+6?U_&0v*V5_?k zYFJfWE?JXtXOMP*lvw?UjT{T~i(^0QUNeA7r$C0}-r*`x5sLFN=w|gx2TpCbYU|k8 zD%GP}V1bL{F%I>ofly-h0=(tu1AwziTNwLPnB&-0X6@ag6DH>C&>e&qo<}kJ=e1RT zc~O6E(5)j=Vm^B!ziyK z3{v%^d;IC>JH_j}0bP&?^&CJiZQP_*5|C(dNX9o|vhI}~!Eep0v}&HlO+91{MTVMD ze;xsi6(hKXvOBY0#UH}fxnsv@2eg%-cTL!`ARlCKCQxBy77jmaLKz&Pq;#*M?(Uz7 zXkzs&d)S2NJF@$7R5#l-lh-}V412C0ieW~?-Wk%PP2oivY5l`@dPORC&iL;xG_INW zExu{1&jPj9lc1pB^J5=Rj#sIE)ECIQ*WDg~C9<3CBo9gVxDtdcwfKhpb7(ySBAU~f zq7|bt5U?#1YBJ=8e_4Kbt)N)t(3H`0Sm^Wajl%C&9$fh$zdrn4ESK8>)I7ilf$re( zJ$MaKfX!dnF7>9&CPtW4Ugf6ss|#!ieGSb;8vB~gJN9A{J=%~i#; z0h`Tg@|ivI%uDQ2$1PY@08PNmueR24uEEZgf@i~+W49Cc%l?#&fd`d|3flz={Yz%e zjslSTWvk(c#H}G3(?J%w@09;eY~yZ!556pqGV6*UrT*?Cx9=%~acm(KjpTMlP1G}u ziVxYmlfR$m%|Sn*oqqW*-MfxW(D!)fEv_OjOlkgX64A7ed;*w9PP*|~NoW*c6cZe2 zr|P=tt>^Opi4mrr* zZ+mto;mRyyCCGA$uQ#mEdG0HM)6}>?n>_}@PSRI|<>2p+*P1f!G+LTNYUA0T>wo9& zq@9zRY$USrr9D1`&w+4uA&x~tMJD&*&)q?|A3T`~890{uNWjKWt}Mo0^FcE^k51I3 z+sRvLV@omf9yd%uc;IW%!D{EjMX`~AqhL#;x1Yod@LJ#o;_Ue^hvX`lkiXnrAa<_K zs{wf_`uE@g`9?tzcYSAkzP&`YtlyQ*TcWu>?~NZ#WGSYVzyxq}=^Bc75HxlVh#>PL zZl$_qf40>R`s&tUwmYWvs(2N6t}p4|Kx5zf11~NEdcl?igwR$(kXrO>#!kW-enPJg zZu1M;7%#KeN+Rl(yrQGs<){0eMBPlI1`sfj`*41LI~`&~gao&xZG{WlRzWr}pTUsT zZWr3I>0dPUY^BTuzo)iYSLtPda&*@!?gyWjra-Xq^?#+a(kT|lGhtyaauI}&gW*9e z^R)rP8a7GeGIAWwj1*#dS^A@7A;UtRkuT^}m^rFXH`9)xc)Tvc2iv<7g(+)qosDdA zRd>hD3k@uHkcBcW<-3A(uOr$!=3blVAG)-+IeK4S5327V)1ZV|`SuAnq2Gebc~cdr zH+wKc4QV7u1z@3Z28?p2MUZ}%UC}J&vva1Xi#|9_>cMCfQnd zXP9xLx(WTS&+W@ftxBL<7acdh<*nh&sG=k_ZkFdCPs$Z8Iuz9pZ{ZH@X3KYaj$qgm z^|JsMnT28wl#znnTZ%DEfZM~Xm9;U{@7`*(g|V%d=Le>~&n^3TV@F_bpvCCMzBjXu zBX++)6F78>$2_jI?4;SX${j|v9CLL@&*IlTxRu%HvcRe2yciT|g9<+iUR}1f-+%oc zd-qZp4tEJlt$~ih0hamMQ{c@D!<;A`aG49feENvv<#FuqOLZ8$_XmEk;pcfG58;@0 z$afh#aQW%{16IzF#5MHFCejYhZE8YP@JgSq@CA#`m!AI?C;vx+kK_lXyO1!-3*g$q zPQ(Q`YpvZkz?RLZ-5_yY+Tj{Y=8b2}R)P=0F9y3DisLd+fGjr!l+R*CZ3^at618iW z8A!9F=lBCG7wtFfbglk^YNwK*l^LN8?dRupZCdP*W4vWod$mS^UFSe)TWJrs840qH z@(F&l7V2rR@%0qT1S1%@M-(GR@X%k$s}a~jB`MvHh07z$X^h1&w;k|UQTtFv<{b7w zA2kD}wUb=H2r-p*TAAS9K)YB4hR7xytF1rv<#cussoF-q>%QQeLsYN7nSXEhv)NeBP}p4#AwA zo{swD3+@`sX$O#WZ-k8RwBR%82(%2Z#T`zkqGz??UC%3il_ftC;&uC=VGmz0U-Zb6 zY&Aa*R|Obor#%UO%tu`}k)jl39AV^bVcGVk`;hT&$XYMQ-IuLeL|3uJFG;O*$oT>5 z!?&PX)~ukvqb@P^t^2}=qK(6h>JQy-kBrRiZMvE(=XTFBtDR$R^+#<9lg$vk)MSdP zOA*CnL@X*;yLxP$!b%a_#=5+ma*05nR;w`>MgZ3%B1kftCVxOjVTsUaY?oet>MD3L%PsJlEIP|E*U zTGQD*VLjQZdaA;wowaj!=~bQ9MYDrFcsod|Cve}P=AxW-_z_~GE7xQ5CtP+dY{0vi z4ZkfM6rc?axcgcV6?vNe&22j9D0dSyD)T%rNtnC^61~GiT$+BJ zUumzi`{`V6BrW+e1ECv~86)39I04rdVG*V<)>?ZoZp<1F?JQO4^2X>Yv`atbEgpWY z&2>yAXqgU|flH>*<9*UoYloGfkaaUZ{#&@dD#()Dr9e(ao{CRPkKV^Q&49y=x3_p=E}x>T?g#|oH%c1NW$Rj z)m*ACt>k~*7}$81lG4ZbJDlc|EN>llbgQxP*lfR-EQ%b};GtsGAO-%bP1S-cVeZ)* z5|8|`w#t5G8u$z}?5p5Kepp%tk=6D`+eWz7Pj>fICeVY{4!6zUDTV zOo{6Mr5pU_a$NiwvI1{mM>X1OoF2CZ^31DJ$XmU zm5qE2-r2ro^~DL!4607#DJVDBJb!BeV45dNP0JTw=yoTU)+E!dDfeuS3D5xxuVDp& zQmgJ0(WIDum_07c7-343P4X(Vxa;Tc;JU@jZiHuiJsuy{(5|#dL^a;lU^by z9JdcHJWV3Rfv}vJl%xMW_n`H-Pr93{=fcAE3hl?jBEc<@Ry=KIU#aJjK2s}^r;uG& zH60hb*URM2Pq(j3u2nl1R21^6u>#Z*qc?s3YyY!2uK4Tho3|_Q^i*n=UXY|)xR@CcD#cQ%-!eihO97} z`XtJlP-|zH)u|L0{Uz z6?O2Z6#vT%gmfK(x1W>1zJ|I0vxfqjO0X@gq@D!%ZcoH!{nIfrX?wOpX#6vz8hijz z6J!Hwu^QWg9FMaQPju5xmb26o;i&bZmXIt9zmyk??K5pxk!6QmC~*7ygi<&e8fJe% zUL$npAzc1&Lwp`yNvS?g_loend*((m1!5Ohr81?CIYwYksI%+vUlYjT!L>vEY>i0w zB9vpee(8}JYmDf0MtV3g>5~D|V5wOnm@(q!K7)!ZBDB645m2&6N3i`3MQOg*$VOay zXX1vNtgKx{_`DJ0JIu8uj+2iCC!!0cGj(x*3}QyCfQ5#SH}x z=#aoLw`ObUSG67Hv3BdlC<>993~uyX6eG-O_Wa$sOJy|#zv%6AwHI!Nwh?T8?G8*# z2@BbU{BFPZO#ZQe0Pj4#<9q3l!l*_$JK98TUKzY|u1nXdS8hiF*aXwT%a#Av0-G?Z{Jz z0KlHC37nl>kZYorz{CCGWph-_U=xVUoq<5M*;&mxWPvsX`vJ|MmV=ls@LZz+bR2yb>pwu}@{$E){V*~99RB)#Oelm9ax9Dgr??*^y*&x>#5nbmJ| zZd_(qhJe|9F}h(k66rcO2%8re5$hzICYR4j2i2-vE!P*@JcFu(XQJ4_t&M?Rph&FD z`tz;Kwvw(Svdhc3pFo@ExW@jaqmNG-cOh*@k)_koUTR#~te*z9WXz7h;#2PC+?f*o zfJYeVns$&5wSrzwg@Z60gjBvxHjw_XYS8@)x!>jvp#wH2OW$7icu2{{ShE&wIDi%Z zS@UjL{LTF+O^u$n=K((2XEnpQSr-4q9j6od2}dR7n3JaW2n4s9B?G!_}nT43jK zbD2<9HQkjN|Fg6|O@6M4BAvCRedNTe@s1PLXS?C~%eK;Ue`Fs%7l8WgfKqxPF~}U_2Z28YL`n02 z#R61zwl(F@mbdDqeU7o0Ytop`xmn;t!yz^aDU57}3DrVYrq(B(+g>Sth*8!>$y@vO z*1Ua>d2V`)@8nBK&0oX+TCZ-+8CCZ9q;rN?Y9ze>{0TEfGnlV)_vLRjz!y7nwr>U3 zE5zs@s@AoX_3qL1*P?oTR28PbBAjA3ug^0PT+>wD`W0+t%eczxR;Yita!J3qu_zJu zNe^Z;te9Sk8!XB#sd~43DGYS?Cd{^{+Rtc2(H-MRjhTWYu6s>%=r z8UH>kQ?s|&kX*=Vj6m&qqC4pmpq1_a$`=o|CM4pvw@ieDrFy-8S>LO74(++#q3kM} zsSx$al(lq?9FN>Hd^BNGW4UD1-XC^9Pw|tnE8ObRkMh+muoBikmDk)CmU)iAcNq?7 zl}7WQYWJ%ECASZxStN$>0iK>G3*@|fM4CD6yj0`AfA$k2oJ2kZnn@c#M!`;KQ(!PA z;;DNzK(+yeG8%GDz$jsGDgTlMiMU#yYnayKt(70Y@90Y@K0uA3x8*2|tW*a2`58&4 zAg9Z6b${tn7m!{bfQ-X|6 z;mi)r7ZHv`*aD>GO3#cuzNqY4M$;AOOV(gh&PhHRtJGV%q>xUf;~#__8cVV8HCx1E z;w{5Q7xhc8kl_pVi~S+zNzUY0f(q*JR8=yQ5Fo zYcJaG^T7@{OYNOmY^m+$@*heqK_B{&56K0{tR#QCl&7we38X|CX%;*fgMWK9(I5ou z8}HHqgm8}K+US)*e)E(|5+mgyRdqU@>b>6HhH!eLoIHttTp7Y1eR|5pBlV9h!@mai z+1eB3=2n~aqmSVv{T7*1v!$qyznWu?&5P={HczV~zdVE0)b|g59&lBv zxzM*=^u^*t5=C3POF8=rqpuQ?H}6FMkASDMqFZ2&ssxS>wwmfSR(Pz8da}0crk!BB zs8_+`l%IfCtA-c>`8BOCtV}gl{8%H&>cN19I*XA!#KT|x&o(UlFI~lIoT9#D^TPd) z-0uqS(^prtmW>H^8mQ$RpVaro=FwzOjS7@2KR%YhLaQg&%-B*c@P)WU-wBd@sGw*^ zoxg(Zg+{?k{My_@v0|Cm3DYg6%pP)E?Gw@+n-2$;jnzZ9+?|1#`(_{Pw@JR+WpI2U zdg`jRPP3XW*Qte-W8LYreUNG6R*oLSMZin*8v7!@TvilBO4;18rne7{c8P5V9^aT0 z1|EpEB2aExm&pEF_`lO_HQTX;r`Q_qijFP0_um zguMWKD1V>ak6p0D6T@|{AVF1hcsh+9%nIMWFhknpwO}E-c^w{ne;&Etls^9OTzEKG zu&*pQdkz%~J*^@79m3?F@|)(zdtsUK14Aatj>7~iv02RyijoxYxL4TyH2c;|fQ?q7 znLF)Vf8ZS$49Xnm_5QnN80cp^WskNnblVnPsB6@ zSg97+mcEI!iAB^C*?Y2-XmO&b-O4xo1~-3%UQTy|`#o4##V_ z^Zh{UV=Q%Q(Z+k=7`E%UmBGbU)?2q+Yv!9M-T&x2vHD!sTC?Q8IZG=kd1QY0m*5`r z>YYe6_4%L+!xY{8Nhj0jQJ4Hg&SSS%r-FYhVTF)WvrKA|V&#NMYya;DujtC|venOz ze|>*eeL%oW9bqH+;6Nrb zV}50_Ax1DkV-I7`h+mRGgIh-~1}VGes&3KP<0SCgdjfksmE30cl4RGmhI%50npRxDKlBC4xJL(O~;y1Dh1=&NxzH zBccTjT!w`0_g!zE4@ZlasC00r)_g8!Dv`c>Sfrlbt#kyMMu;xrTj5c^*B7>YEu=pz zS==T^YwlAApIlevo|-vm1k4)irHQ}4r$IwYgX)}VkK!lp?5POlTvmPTCqd_@bPOTn z(O9rV{uT#8-G#6Dkg%8OXGT-VZaW_W+4DNl>T#>0)%f&BDwM|EAw8@N_bknEdN`{Y zq0D7eo6UQeTBp2LhI*w<88LBu^6oT9mI9Yy0kPqij&w=coJqfbr%!JMnD6cjWz^8&kYRYG=mVj-M$4rCu zZ?Hw(DyJj|Tf1nw6wrHo2X<)zlOv;Jn_{S&O5PixWdAy`0e@D}>jL`JGR} zg7tpg1^%sx#DZx4I6@<>NKNRr@Fx_q9`_}RyeZC_%>2qQO1Fa+*t^%`@YDp~HsbY< zW{_-TfDDKfD9Y~omvkYT515-&!gp0ESHMc!7i`~Tf5xtT`Pc3HP|`hc-1Zm%9p|1Q z!~W9cT*dDhlT837TOc1S{w0Wxpfu$)zu?_gTj8>y3m)do*)`?iS2D`uA25H6#`<*Z zzy~Y-Q&+!{s!KDzi6JR9f>)Hf1+ASu3AT&+Lcrvn!tFYTr3Wl~h2yTKa{FD?Qs4TM zu&3jtAiFB<)y|r|Y+^!ZHm6MC+L?=ZgM?7*2!3b{0jHmUw|~>`1*;Fz2Di`E4J|*M zx}X0#?vK3N1f6tEJ@FYTubLKwo2!ED^8T|H+v8PQg#9h353KCF{hLKKEq2wU#?~dQI2)AD3TM%Fcmo?XW!0tU$^+9DiF&AmXa2<7@#(?T%1N^{WawP#= zwdKKB3EXIiOILg3L>9;zUpLAZ=9%J zLSqJhX7TJI)Yv2n{9IF|1zf2gg)?F?BCcLppR7!4IezGu_Fh>Z+zQUr4B4V0w>fEv z>zx{|ixtr>ZYd^D?xc2oWuc>{5kQJ#*v#31MR2kbrY4-T&i7KM*mdjWlPLB2jc@dQ zM*3pA0oauq8tV?>&s=-+PAp9zb>5~hJfXn38j<7@i2Rh#>G>d$p0StHUwi4Jt{dGI z*N~2GH`tSQ@<~+VU7FM#Sq%l`fWnIbn?0H2*sZOm%j$P;GmXpaZRViw!ZY9%tDAON z$o9pC>!ZjkbwoGp!pMmDu_4W(t$YcJWo=fRZo}^x?jmmP z9rrN;bsl-y>D3n5r!D!Zq6JJj`OL0#rBHef8WvGVha|m#G0`~v;mrwvR`bl+L1{(v z?DMBp3Z9rhppTK?Z049M2gNcHU5@&|-B5t+5aI;cdVh$t}P2m`n>Q}I5KwT^ zW|hL(M;azdv_PVN_0Y}@+65gI+=~f&$qptwda?az8Kmo={9O4s&$*pSWE!n?;!zeB z92{J6-Io#DsvoXZT|E7QnODRDtHk*THY0*NgL^hRj$Y{7QYr+pH37Xvb2X^n7pqBY z>mxijIEH@_XXb4k{Lwt)tyj(0ZPk9JgZx|y2XNyHXp||M>(76$e87ZDqbq75TOUEx?e})j#_}fkwfPES9ce7|WHbM`k(Y?OL_6&fnS_>Tu92DC zn@n`C8*RZ6V8SOy#(t*lz6NNN?<{})X@7-6Sw;`NX2K8o5p=0y*-DkF84-mRG3 z3R=KrKmc(wN{%Qvhb_{$_?ND~7fGgdKu@XZlhu)Z;O$jiWV?em8K&HnGueI_6V9X? z-C1OJdPP#XD>3W_D3#iP{*M=+3(f_@uNO7JsK!SeT=zZfxQ^q$CMzh^XVszt-61Y3z$ngos z7CCxNX&Up$QsPRJe7pTjvO-9}*N*m!wY>B!ozSZHd#!XqeiUvdn$~O>hID2ax9@F2 z4koA?7Z0Eub|Q{{=UuKS`ANu3_Ed`<5Q%D1_SI;-tpo^z3q*l-ShUN%1g!<=8|;O@ zba#oT!h-!=2h~912&ILhIb2CGjC%jq6H-k@L;1^D)_XHs(YDSc&95pgP891 znDZ)s>HMSUD3a7nHSPfLq6*Y9P>YyhVY3AGR`J6O&E>qPuoYuc*Uuq4Z_#dex_3;a zO!<9A)otj&N-9BrZY6SlgbxJ&uNr)S2BjJv*e8D^e_ED8aN2~f3%(P zYf#F~P*Trdx-TPkpk1n<87M1lzyuzFCofY|OR?k71@2JP3+(ugU)W0@sa=g;XA5Kf zky!`r>lzHh1%RLOSUu0Grc~K`Ie!Be7F|EFUW@8vLxNyI!Kw z+H+oUZIesf%=YG@IF1K&K@(%zvRP0yDJDW%V?`gAYq3k4nFYRI6F9+e(qEhL#C^|C z9Pm=uFQ!H`2dxiY(jKO97HBpo8}WgijUEX~7ZnZ}T-bH_&~y|PyPK2s=hE!M4$OA6$@uQYHH`|E!4tJD+>Gv)g8m~goAqbPeu?<4y&xZGfguTFsu z(RqzIYoqaF<`bXecw`pMzr-?W-n+$hYo7sB&IYTrlL&B;D)nH7Hv>kuEOn#>7?5GG z=O8R*QbSb=oQL0x9Rr>ca{2X!HeLJP%gM^g+7C;-esJR!pX*R{h_G3#Nh%$7!;!+Z zM|-$HrZopT(%v2@Q`jYRLLX6M6>B=`Y54`tsq(1WU+-Z1JYjUf96$USNP*NC8aIFg zRd-&ioy=Kd2i!HQ6P8OWw70jjV_4Wnw#|9XbtUg#?YZrmv(E)!z%z(I%>a)gS4>Q% z3iS)>MY)D}@!i=pmz=WUg@wHZc(b1h z9Ym>u8rOvPZ#1kkk?@BGOb1He5L|%jcfcReH6&b*(1_~lB#95X*8gTWBUrpSQR_#? zM{|d{K*J6}J2S|{P7{=q#?mQ+4L`U`&9!?zSan+9ktl8`^ZunwZc`p<-IMVQDF*FR zu=!zVxg+($4=9Ca14nWG>C*>@R&41kxJ%I3^`k6h3rVgsW)s^!yp%d%x}qnn_|uui zBBoqA)ICe+yM5F-Am}0Se!53to-?U`dQ!XPvVYMz9p2RuZKZ5#*3-XquxqGU7n;&h zBUzKyOuG&okgp)505F)asD~#P`m-oz`2`YnmFk+uocf;A%=zJ~4pXP;+np((qesL= z%0n;;W#pjdxWMC&>lSq2t(j@b^$rIe{vxgrST2H%9E`XKJVZIjMGL8EunxfHFVVVE z7Rkos>yukEzDq)m6KXhRq{?)S#=$(a7((H&YlHtnw?%4P{xhTS6JLnYtsbKFxw=Xl zxmEgq&{!3F65xmqJl-K4f%{V#DEUkG!~(Ui_!-g*1w?URdWkuQgaCgGh&!R)?(EZ+ zfATqvHUfRhmJ-@r?Ofjti6&bodZ_d%9bY5+IPpCSA)lk@4Jb-oR@}PTes|Xu6kala z8ZMT2HDBPxiGMsB=@@bm`rsbZ{>Leg$V6)cb<_6~Z<4cVtn^@3>_mjq8oCz?Pya*{9IZ!XOL5^sY}J?+%Y-t+I;`B_X_HusUtcWj%9|8G#o_lMDd1WD z5XOpojvH^D%N+^NBTUZIl5?G_pUL0;_JQ&EO~5<5SF$U!S7l?m#*nu~vqW$TV1gL< z@EqkbkW_>{{?qF=I*ywzFQ35`^uo)P@uPwEv2D5E9+s2J6KaPZtTHFGomRNENynJ< z4~vBrHJ1tJqlqKOs_|{;iPh$Q8^CCHoBVheZt!MZR(y}{SDF8pgvpLr<|9aF`PB$X zU(DnWcsS;)v^N^i+N_nznOuLWhWn*ucfx9ufgkZXp6O>WRkd%mT&sVQCvgvfbYnkfkF8wc{{%u;&-6 zY1(e z{KeQR!?LJkL;ghq>Zgbps<@)bN0aqzG;agXQcWU?qc<3J3;LUha{g{>b^S+T(t=#` z6*mD3+ZR9b)p&cJEi`RN1uyn*Rb5>C*5(z0UZdeYx&xvpO2%KhIMn825H7jDo`>Hb z`k!)E(7%KK|0o$?I|LylW*`f+Un(YYx5FnP2Bmm4rEJ@3!0UcRYHxXEY21AmlN5Qn zxEZBpHLaDDJ8ch(b`%c4o#ZTXFIvF){5d*M`5v5R-Jtux18fkhSP)%(tEoZGdoq}1dZ(p7uc4Q4iWEsb6BGwo6 z$N5PJA}F^G0!It?|D^Wq><^AUWZCd-GQAGl5ytk*@9W9b_@bPnx95+F!cU$SEj~w@rde zr`-Z`#Ee4oQjb>2EyLT#S=f9Ec$T-_wWv8OKr zh?b!VINDRNAA`I8>EuQe@LPLx>C*`SwmEgeD)aQDj^LEt)b_49`q%qy>5sV$Aj4)7mZo#o z^}klL#8Z>1Degyc5$~!pSYAC7$=xXUXVF^weoynACJL)(ocs)JZ%vDZjoyRZH`27h zu8Qh`Ezk>dHfx6^W4~|eFvfEHrJL2hjkWKGIbq^51t!XEWos%mY5cvZABEojE}Bi~ z^;f~1mBh}C=#MBJSsy=?i1LOL*UHw0D4WQ2LwFJ&WNVRlr}k*#RuzwMLjiB@fT!N$u&= zSYfv>W0UvYC*|fezH{0*<8N9i6Q(V7k3Zatg55l@fi6ZgQvF^GA?$N&llIpxhilDF$Y6J?b_bI=Ea*y}2Rv?Jr#&#}DjEPE?hx zm)?s$J;f0aA7ZLUj`fsFL3#maT#po< zr|nMLkqQ8oY&Q#pvEXnh;F^_*pz?}>tx2?*>zN7;W59cf%As2ujTzu;J836;^gVY`l>47(JzBv z-0~e=*^&ApHR#44EnqX?bbF@sl$zdx;N8cjwA0Za#cuE4fA%?u-j>AQT<{PVZ78P; zy4?0%XYc}w;#6Ky zrM8Qu8~hEe-mzzaqSrP&_*N8J2Yx!Fw$_jw?B7ICvpq+c&is%Ua%l?f4II$llZ4RKwTr+b$?u-bEZ_y8}7xf5|Q3(Ylzoj5#*Lp{97D&-c4|{+4Dan*lgve#i{p&q0uZ>oZd;K~D7HQj{{IWqO(IicT z1gAeb3~LlI@_8x0Q`d&WN!7})roMgJqau8)n)46U!C5v(z;P(C;G?v1g2D@q?d7;y-?_^Wa^jr7O(LsMK3Zr*Nfx9_xkt$N124cLyl#7btK$C4)^=a3#oi5o$4Vn;b#;4#InVGGR03f@jjE?Z80@)iE1sXHKC;G? zJ26_8FHV1V6r<}s>he^#?^B5htZgA z{5BWY-ciJvC&f3rE(DKa2pW6)j|7Rvs@<-9CR3m5d#KC6M!g=i%PfW_=)X5V4e~M_ zK|`eJvvY~yF-`ojSbc-vIw)VHa>U{O*|*`wpe*uv_-K88&U)Z zHnbZE1LwA$9moWp2Pxt9w;K=9we_nGI1XY^BfPY|ZHGR<^9Aj4RaF_9Ve_!XR9J+KF#VkI>0_v51$pz~w3WvnP^@J50#7&}L z>bs}b2`4AYV%vY^_at09lntGzze2=sf^6J$5(&l<1rZ=cTVig$;F(wpDAJ^v`i#t z=&=<|9d~&_mM;aL|LQzpTlS*jSba%(wC~XVjb@6%zP%V9&EbcgCh;t8zNQ#8KmA}; zw+gyDI_ek#fasd<VK)1ChTl25@2Sm)$OFXw_yBslc^nM8k;PQ z(V{R0OHFx0CLnXEk>#-xZPicx$oXo2T!l$28f_AjTnxT8+lNiTb=RMW3dg=T!5Ke9hx#dD-_Hq|l|d zivxvZ*dze6_=J0>Ayn;T#=3eK>-gqnj_6P3uTbk2oj>p|NA_eup$mcra4#-|MDr1*q{kLQuJ*>|5AibKoiAtU0G}L;e=b16D z=~2DY3JIt2fQ1SRB+9$@M(TI{09IJPuxksM-uBW0WKx-g><=xlq@)#qoqp-fLI(s@ za6}Y#IN~+ZIvCL|R_|CC+Tl5do-=q~Co;Phtgngm@K2(#UYZ~ClX!~|_6Zn&USFyE zGU$Db9_H)(0XCCjNcKaxjUKjt;fFxl{Ec1RS2~z#ta-UL<~0M#N+^oHe*0SZIW_6u zQi#XH+=pqDmo!2S|G$-iQ;qH%|Tg((tY_0(Mv709#9&4Dhsx|8&mP7vGV&V{f5 z(ida@Ec9_~@V(a`<&}kYQbdeIp>Ob+_+)q#lu{EhOGV~QF3+&GYel#GH|pLysOc!` z77d`%ML_8gl`2I91QdZFN*6(tUZTXbc0n)JPMzU<0|hZ1}W;+ zk4ZW_lWbOa&peI#`U-tN4lW6d$Y;Rh3Ram@YGbb13Y%2fblO#JQ@(Z_`g97e9OT~E zaGsR8xfl5IqREWUDtgA&zVp$7_6OC6Tu~wFs_epW8dDN`7AY-JiG}qi7@%C^8%gLYUdhNsav%Ndh{eP(x<711>ATAjC;je zqiD)MeZ@Wf+FuCcV^jA@+nKrPc2{Sn>FFDd7MFh3)`~s5_)E5TfMwr zLw->-7xclu@xlGph|jx6&IF@>8sm&9X+LnRbRnurC0_$0aU?NXZQVm*{+0@QZ|O21 z74bK`CK?}!$$nhINjzpJr|1mDK4~wR2fYP@B^yt9H;d?L1alahR zJ}5@){JDS}{Hj#-FtD|nQ`>J?hG7>wfQy6@O>r;j{99Q1*%ftd#d-%`mfkn7wERld zlqLn_p4pFO!EYaVvyBj^>YNShwZ=_T7Y zf-Wun)O1Ba;hXZcYFu+?(|vjPD%|BTB{?+xkIaXL4<~L}#g@X(6@sDCM&8QFo^^29Ks*31TQJCQ%O>h`W&EMp={4D0R0+@!2})Ordb!*_JHZnm;Xph!v_)&`vAJMB(qbDOg~N|Jc&4} zgvA4P4I515>}0U+V$N&S2eaervvy%?3nI?ar$v2#_#T|W9S0ki!w_sIQT8B^&h`wz zTZ*JBv>EZgkej3a`(iJ0{vZVgb~ZVGKMKn*9rDg8uZ(*FOFP0Mo)CE1&HGEXYmD@F zsqd&*C_9ytl<}hm(+rQXbKh%^+)ZMst=P`5+SZIo4*ClT_91oDx+K{Dy7&I>B99Ey zw)%gdm=EMA7ZFA!vc}fGOnN~{L`Mf%ZR^y8ZmA6h zI;Mf0%1XARXa(IwF;Srd0JpwqlJQ+Jw~AugJG%PrmU!TtzkIQBN%jx#Iv0+VEa8QM zwFrtPeQ-!Glmu_$J4`HKe2KpGY_YuIj>9|$QTa-CXVYa@K?z83Hkara3|{~%IUkb? z7;diU&!5W91g)>jE9Pc)2u_v%$Pw7vWWwsJH67dviouW*f1UT)M5?9Y>9?Q~hdML_Z?%gA-%lY-H z!>NZ`tV|G*sI^d1;klQ2xesOd_X3%(9#VQV>Ia9VH@5C`Zz z*#;uPD=u)uXK*hJs#cq;-5^0TKPD*7XE$LhE!3JB%lhZn6*628Yn_Dp1wLwPd-L-wyuV+%L2{4H>@r1g4QAi+fI zthd<@jfw9j*EyV{ZVeWTT#9fz6aKy2`&~!hFJ@thBXlCYk$&VSQkdQWXQ#>)whTC( zf2{~mT_LrfdCB|E$9ozAfS3H`CD0Q~;BSUQ5H`f0bS`pAwk;X0|Gm-6t0VOehlU1k z*QNgBxfj;-APA3nU$QL!+HHcPpuas@OqA!IP+etuM8sksH_v7GOtXYfbI#UGd&6aH zx9=45-Utb^{)Pl=1ZNG-dBXo~k$6p5RX}NPLI3BO4z;K=woX@U#5sQDO^v)8(Pcy# z0LmMmj4AP7`sB}ofPJ3OwW*r8H{zoi51sFcSZt?qY47e9G@4CB;bChecLi({lzH-j z#WaU$;P$jrSMIXc%*~-2w>4kRl?O0}UqQc|WvwRr61$t|O!YsdMv8Y7G$w%+FTo() zJ|lX7HQ)VJs+5tsx^^{oX_(Fm##qj=jKEgWfI?D%|MW(DCwA|hr=@EjN9d*zu+3;@ zluJiZNS`6wyo0t=FNqga)#E!dIxp60 z+|XW}nP0EGqgE>n7zp`1?wPXG>E`NIuy>(pWMZ*_@; zvj%dMYL|rZr`q<_`|YZG@D|0$1_-%i{xcOylGZ6Bi6uOFK01~p|4HEKQ|Z@!S)zvb z#$(B#1BuRhj>7Nc8D$>Te33(5+zW53P30HFLZ&aMvo@ZYBGrzjYMim}_&5I4~%6$y=JB|C^W(JNd-sbBYd`-Pai5$0j}w#F*BiL|iWATG2`ki~8ln?OmCxsi z6{|wmMX#d@t~Uv){Uv~zb+1saxyoBX{TPk)PNPpDM53KFWpTvKC$I#FC@eXj#=DVv)Y8!?L(;@4TeFTA@p|a1`;n$oHTx3TSF$~G*_`AXV8NXSau~6Wo5A&hO z8C|{QnCsM|Q1y4!`xhAwoB>d01Xx-A1wgU2)03LvU*Y@+#n1)xOS&r2Dt+k+?!<%i zR17;KgYEhC+!z+=M0U0%BrCEX%8YZ@VgvLe{8$|fT+z@?PgucyKNET zmv_26J=hA{H*X$ScS0+Wmq1~ci)<4+KDhi-9^pxstTXIMviXy$Zg#P~G)6MO%v&k1K#DgjwRWKLzoifdZ-NG zJ&t^!{H=N~Ng&jmc{_t*itr$9wqejhf$N+k zCS8oq5JG2fUr4g{^As(~`ErbA!5PHdJ7{P)lcD_sCJ4%5j&n6>)g=tDjtV6T@1+er^;RUfy=<-1EE2=`-IKHJ z^=6OYU?+~vN5of#C}I@4zixN>Nu=C_1iC+d zWN0biz>s(hBvIjW#OwVBiuB8XTfK+dshn&8RoL?>Fd2T590m$14$PsEBr+DkLqnt7 zfniGv|Li%k>bxS^pu*I6Y%P#Ctbx9XI67*Y;ZrPiYy0f`a4vy96-VE;&50@e^1AQ` z54tG1cOH1W8a`c@#n0I5?bQ~VoxdNcJXItaf$lPF=FuwC5p$$~X504&aGHISfFBBv zjlJmzV*Y{T1>$@&(%{+8zM{%5avo>!b@yKCR)52jtb356(lHZK#=Gel209n>@1xjRaUfFt zl~ZUnwftmSy4kii1Id8OTYzuCHo%X2YUIB-NBL5k*CS46k`0cc)2;iRBtWEi1Px6+ z2{LH25{h2o+XLX*uu{e|Og9lbt|7Pb4S9MAG!JILXN~9zaFVu}@cgV9-1@2>7@Mmg zO9Rmt6jhIM0y#*B$`6t~RBdk4?QCYQo<~OaB||BMI+YeEYl95=wjW9HrN6flZ(+Zy zvcc|gYY$MP?JVns>GQTyT~GqM(<65NcJ#kHzhpA|2YdalTLdJY=#)ts?b)ceL6d#nU%-`r}|H%kT1=kdQ?W z;|6&SA7S?UsK_@I!LO)BEp3RWeZmXRNh2-o{-9!J8sPVDACA3QnIqoVWWQlg;c8c} zd{H&|aG|<=%(>aAMbo&`OL4Xm*D6f6*7P8|XAT5@02?Al(ZM)b_$KW$QrjblkA6N& z=ZYBz?=mS>O?7%4IRzIfQre44(B6k`PY!SZo-^NgKg^oW`f(xX2`y$pOJj3*op0NI zdG%Xw?DmWSoppw^)eUp49`q#*chX7f=QqR^>-jh8e_&fl89mjh zX{|z-OYrTO-8|6U&2{~*_87i98XcAf7|lM(qAAqW<~+rD-AQ;ve0O{Rw(XA%-)LZ@ zJ}T><)P}CEFk4*gPKEb6i|-bPEYG7`GURDxO(Eq1K#(kzpPf2X^a#kGJj1>Q)l&++ zdifi9bNe|MjzYfEql8c;e0OWqDlvu%(V%tYnH@KM27b%|F&feo=AA=N5u)a1G-b(D z{e{3k&KjUM?K^P2UdpCd%wH1hZOwu4Og!xE7vOHbHQ|k-vcO3v%511s07x?%v!s1T zvSpQc7VcJ<{ZY6?pGF;JzCOY=Gy)_4Lh4*pE|}Gan(JK@S7&2AWJ9eShtoE3bO??h zu>_x?;P4o10_7=!-zoL5Hca6Iz0C3)$`|^gbN^z!k6B)QH9e3KCz(35PG3AfrzeGN zo~I!fX!;pXq)@f&m_-@zi5~x|rvrYY<$AAV1&+wOD~6g{m`_LVcr#hQ0yTW)A8MXw zYc*KL;X$@f_Ta2eopf4@2lY+i_J_G2VpnQU+mWAGyv1zT_RMv9A*b-zwky+NDL>M8 zCg@9PPv^Qc(wc%bwWasF=`4RCV-6cA=6NnM^CW1wGk5MNA*yK!nF98~6`(1j-$VOB zzF*9>hlxw9}T%$!B zSFq0-p&h@?ba+RzD(Vly;`uJ-pd;@+NfI7mdU%cc!9Q@W%AhT@F3!tas(ZMr@(r4P z{oP;48$S*7dNBC<=3UqrzCkWxqsK!WnG|2O>J(9c`~fZ3+t40%FYq`3A+rBz{4WG| z)_AkLpDfki8WXM29`n>*PYaRyJu?bF&<3;XTtcWDtly?e(p|seVpgAi-Urt$X-h1R zw7j5Qh^ZitSu)f_7w3jUo6dfy+rCXYR$rvH?r!lN=iKKb6?_wgcNtqz1NHNfuK0Vq zxacE?c@3H9&RMYbvd0R3|Ai25LSI;g!fi%(i}c#fC7%oQ= z@Y*&U3<}0r)0IJVRoe=dkJ&Nk@UygP1V_R6#jFUs>L3R>|%Q@9G4QM0u zMT5}fB=q@HFC+fkIA_)rI{lQw;aFM>33U`YZ;`TFO0!+LS0~bLLaz8wfBrAzI!bL4 zSeF|Sj1Xup59^TJ?`_>S5}9Q?I;NrSQJ`?!oRn*+HH~_P#anHL@U$~cJCwWcfRQAD zdsQ)as}k-f96NRAsJT}7XRl>HC#CyPqKsZhC75%%{rg)M=e;!2YUiN@PTQHJ(hOQN zQSpcQorr5+JTxTSH_xS-#6(e#UD|_E4&XXucRFhUzS{!5z$GxZqSa-4_0-!#w`-Th ziO>k-yIXLjc#^bw(TBpWx@79i93ku`SF>hTVsT0eecP941%I~H9|2mufsAQ))0Bi6 zTmEN&rFWa^diXp++f3A9sgE0?2Q=m8QEXFTaZQI>S!mzk?y{=HnM;2mZmPHJe_D4g zs64b1C-1r{T21VZ%YS4C#ZO?Kz@fx*Ap5Gh!-f6}@x_Xwnrdli?Q>q8(rK!r$%ig3 zB5E|t(e_{ajQ4~7#P4C1!1}T!^V3hD$D13syPQzoS4Bj znnNm2w+1vLP-GvyfgB^$hl#Y+y;E2_6W|K*r>%WUr|FTe;okx>L5AqtJpNjSIEubk zAQI?h!y~1=_(mdYIUTZN_rGZ{^`B0f{h!a(bZ(m~tNH;d@<=0uBfeg3i{P+XZKzk@ z?LHc$W{BGSavB7(!<#bIYKRk+v_Dg3TR{B=VW{KF+9)Mc-o=doL);PX5AlMVX$o?z zVwuF3=Jn<3aCgn~%fpyGmm}6Mf>QQ$fYGppZ3UFU#=b1DWLy41m`+7d`=3~1Avx3% z#r6gH8M_Ud5fu;h=OulM?zuvV6^i|@>E3Iq3I-*abdT<2SO)G1D3Iwkv1J}(l+MtebG2K~2qvCrRkLTB9dp6px6yh5QopH4wX$Sc4`SBA3xg^#Pk!SKx=v8 zm@PQdv&VY{aw}L3r;kKyt9wV(_X9(FlUmIp*5E`cY6Pes8 zNR!L5k)Om&V3z6b4o}Xrw|Z;HZGipZY{*Cy^(5RH4V^?Ddyaxj(LR8!J}0?`Vn&uJ zh{QmBh>p|R4UW@~Q5TKLW4CBYI1epFmQXpqp9W~qVaJ<#;%I@7!7a)6-P8dqi9@LhFF^We zgtfpnTUM#9*ZTeZ=C&?63Ao%QD{iGxFH?AMRaHMmSC1Ft>}|kg-p@T+z4o!=Nh`fO zX;v=ljqI%4knaQCxT z>L_&e($`KLJ_pLePt%Vo?m$gOju_Bir&FWiZvK?Ia<3v444f;`bt_L3ed9Jt_8gBN zKKu)LJZVKYF?A)zU0d{sTHr1$zAtQEQEqYb!47zEL5Q z?)1=!PG8V$t!pXYegV^j>4FhC=8h8UMLnyjE2?0IUOav$p{IFvcjo&MP)n>v+9n3oo%Ezv&QZC*Mv=ex8h2# zf-l-!s%ad*CfcCf#!vI~J(@g>%-(DYPOgKZWY1tg2)C{Wat8aEJ%AXu*V4o4YT?}- z*IR?FDi(YN$!RcRlGoT_{szvoCKX_Ja}T|Cs;+~pY)}3YR?vdfDlOaPOSN#I>l>Y> zTdswG70y0ynPWwgj;%;A#NP5bU4CtsBSj=kAk3>9&F=OFc?tJE?EvElJ9Wu~naT>5 z_FU|QwosH()2dG{9qP`03J zMnjGoS{1LAI+LbrKHU%N>w3BQf!e+j0V~RfDF2jD%A0Xt2 zfdpKlnmTA7__9)hGrC*$CTID3n$Hid&Qm+>FG1`2TzOQCv35a$4cUjkapsfLnTM$F z52f#hz2$1cWyi`*HwU32(ImHD`#>zG|3wcyNifsos{+f_;90XB__Kp6pyP!sIhw?s z=nEwiLX)VEJW{)lYp2|B>8Bl@+ARRt_kG`qszxV=Z)9Tyw_h5)Upn?PN>+I>(p&M? zBR{PN?*% zw;yBv`}6-j98w(G+He}(ggiBhxP4KIpe59`wEN+9d39Ant}N^Co6~MJLffc#%$6Yn zZ0mOw%>F`*T(Y^^v1BAyIP7<-CyJ~D4wZ!41aNimh%3qr>P_pGyx$#lkBR^0a6TAo zgAom(QL~KX1k!fvq(O@!r$?4_NfoiDtkw66ZFA_!$8l4ZJhC?vb1F-O!~0l8dr{Z= zDRT54x#bCHbN_;v^i#QfjcX>g9t-FQJ^K-o-6M8G>qs*6c_}&~)+{*=Eg0AtX3v zP7H-l!FkC2h)rSaVD6GCW9|s*Y{P#+Cjz@2=DmyLWs=I? zN!sV)e*qRd#>pJ9xobI85o#ow4rsaid~+66GYMZBlir1kxp1Zm-kD^enGWi?uhcZ8 zWdGS-nf$dTkLmm1vETj%OIH{}9e5CucSG)Rgf^&6q!Xn1-P6{raBIY-M*dv%f>$Gy2`? zCcwC^eyEC(8c}c_y2TMmD!BUagb1Hk5?1D~IX7C(T;T*kP%jvhpnZ`lO7?wR2|q#g&k4H! z>xXOH?Vsxp7w?9gNHP3sFmF#kLE3+YJUy;fh=+3jX^S@J@a*G^AVHOWPP@q%n-&N-RB0y zW83$@Zc{i`QHw)sLx>TymRIDEF&+Dk?+QZGj!tH$S~4{FxSRPKlysn$Cg8tTkojtG zNzCDO`iJh+eP_2aaI8I3f>Yz*d25r5~|73oubcfV1rf2OUSM*do%XP zh~T5IA5faA-#FR(B2l^tfXJA&jC;hs;U9Ls&6&)12HeW8Q|^qu>JNNc{N6!zWA0MN zj}4x}=PIvHJ3}KeMAHr*#8L1OcuD->+M=F3vq8pEq~jR5w2tcGuP&%kWAcG5aEgd* zCy6{VJPjRg{kKCSoGGrz2_fj3|NJH=`Go^Fg+wAI{B3(IT+8olF@$P zDl13T1}1EEQ}_0Mm$ZYS8txPKEbrWfKz=ZV%P@w>Njr?~C~|nDcp)WzdY5NvjJw%q zd{1U^7Aum7xPz3cn`_3D^Z)P_SY@5xWT6@oXRE$+Oz{nM+mwB;cH6O8c=HyMoz3O+ z;hR6|B-y!ntNnO=;l!U(WA?Mt(+2FyiOT;Lnbj`jgH?RCKyii$tSK^4@^ZbX)Q$>)_H>b?Ze#)zQ$@nXr zUy|!KiQqapP}3>=`M2fkR|eIpkN+F$E*n|! z^9;e&B~? z94j078GLThmy^X6!N)PK4{P(X2zs$HKLOoxlI!sUM%tCt&En>Z6ZI%lN+zmd${Zj}M7R-o1k;VTh{>s|7Rmd85h_05KG z?=saL2brmSXLc7p*(a#bd1B_)ojikuO;b2$-2xUg0k=)l9$Pj51KKjENaQ)>h(^I$ zl{2gelgxM}FxCazCJ@1#6%2IP@4$Y0%^^qw^{JTFUd+R&~#87(}{I;D_g4J=>rA zHMK>#AFe(4A-0LcAFHV{$c+gekfA5);$JU!=N;LWx?YGezdI9&Y!R$u`4U~^)% zRVewvtPmM9A7*WyR4G}XU*W3RGaDOo?mqH)eO5S|Y{jo%Sr{*cT`FnHUMDX%)ruwh z<7c;r5Ut+ZL;f?o@Mjz43c7Q*BGtzGy_Ii#8Sfi1e|4PYQZ}cZTx$ht(qu1?NdI&w zRu7_RGT7n75ja5UPv|KAz+JxR#Iw%wEs&kBG8b ziQxy19{v{~F)$94uNk=xMl=WC7=Is?JMnwn>yu;q&!gXKRc6kK=mqX{a8AMWiABiW0%DxlAIeKxm%!&-w-J9>V zZgLW@w4af)t4QzKV5XEdK8n12f}&hfYWIh!sVw!aY z$`VCEH3ua3tXy!ZuKZs8vP7#R<~~$M?(ijc$pmQM5JI)g_F{->5$g^{=i}x(W*;37 zFbecnIigMBw*TD}4h!Acj6Z}%pxhkg7W&aXv48gH0%fC{E)M*!x*w#R(lI=`7{C^n zWgET;?_Yk2I7cx?HQF`EuN(plb9Z4L6{Rg&?O?Fo?OO2l-Bxx*)loje`YSghfzOSd zviX<#XXc0&{-boz9@2QZP7tqZ6_Ew!{9V=YF+4RjLCI=|(aVLZgYCslsMFcFf%%yW z*?_Crmx-!UG4?fUZ|Ttf59jE?()$RBgKNHBpFX$mKVVu1wp@+enlcELbL2qM*#pG2 zlh?CH6h3%e{v~?yty(8rRs=4@z(MRyBY0B)C3>30!5NU6u(ewhJ%<~6=zvM5_r4O! zM)@mDZ|^1EPTs4sy>+6~+)nL=oZ4L=1kg?&fbAH_qpr*mvHh?~cCap5Q0O_ai)eHT zS;~xk)$YCxW2(~hrQq^|8%g~&dL3c4*265o;KQEI4mvrcNO%lI2>dc6{DnGCdrN0R z4nV8uQS+M&o?>T!2Gd#cJ)cU(yF!NALnb$yRZ;@cd%;oNDd!5bG39 zS?GzBJ>yDR4lt+m#f#9T!`QUW zs7ebxpJb0rp=jSbA!d|%L46r`oEE6kLJ?|XK6ST=t4*^~0E|Ot!i`Jn(&7>OCnOTu zee7`W0pIlBOQyVLpcqj2k)3?FlI3t!9HSjMhkB=`FMl$rI#kKcS&AiRYme2f|FCXZ zyjZCfrIIB`+HW;u0OH`9X#9Hx>4du5g{yYQ(gB236zOfe`(azWpP^|{j)#%A4wH&2 zjitkJcZ+!Xt-#mKTCEE76fDoY)@tp?sI1vE+=yv(l?WYR9uZ>)4=Z>a&SubXq;^8U z$529%kzMHA+V8UDGdil!I@|*Tf^b|0>QW6yKe>%CqPIDf^W2pR+cL4FlEb^?{tT;RrUzCb^ z13@kYnY09GD-nwm2dLAbuxV$sKi! zj7Sca$kgCS-|vFRZ2>QfW}dsg{-Bhd^k+#NZ+T zG+_MNYE(Zm6id}5j}TZ9SN){ED`a6LzFj>w6&f03+Z8Pyf5Igx^5)Guv9$BqjQ^lt zK%Z}!(gjfCb`${d4B`C#v*N6M?Hc${%(T}H9Q1DF5wZsmJ}UE=Ju`Tw&ZO3V*tdYi zwLZ(GB}unq!r*qZSoJF03e`Mi22+%nLC9NuWZ9DKxbMm>i%EIA!G$ZR;>ofDr5(C47`Rh z%DEpJRC}GM9#QA6HCsjyo(j|M;?48qjydm>Yx6;Olkv+&0F%6qs8MT}EdU1LQJ@i8 zjRb9l@_jdzZnm6ac`^_5X^Z2my z-K*aEy^B8!lK2QimZ@huZVDW`^(shp8Ar5H11(mYi!$M9;Ez2dj;yPNno_jvQZ>;5k{I zsBH=JA=lDblJJUTeRpjLIpNDS!ljQ2?4IUVCEq81>ME3%P1gS!Brn*tJ2~EXs7Pn! zoUd2Wqb2UF-|s!)uzJ{#=l^&``!UH-;kDA9p+587u+?3*a9B+LqlGvJUT&rtYj4e7-4OrT0|tNKMD=_gIOm zYi}sNAY`~nococm`nTD$5)~<;tveAY7{{Q-hW_L?{A+fQ1$ zwKrJTjL}!o(}xhc#rkglq%WP-fHXVY9Flo3sww{YTaB0D;*)$YwJUp?Ngq+|^+OEL z{py8%0$Z9Jk}Ve2?O@RlvvK9Gh&R+(M6YpVer0G@G{4H61P5&e zL|ZU2&*@-HCzyiilXZIloq|DG$$kHJEZQBRYR z+a_Nf+XNe?0#Q#ChQzN6Y95?C!K2)CDSTUC!{zoN=-fE!33Q3%NkHJS97~h6YX&zA z_Gd2uRr0H%xBM!&h5ncG?~Tmd@MAi6P$MXjC>>$mE-rJ0>DbY4x#59a2{<=&0;ZzB9eTtA^KGmtGr+{l+hWBC zW0~w><;5v_meJlVGw@=7C-R}pbn8pwfXZeX+^Zw`fnEG#NY~ylBbP3lNG#nOT&S4T zfFVQ}sh5(i(m0*a+*Gqp|t7}DwYcI@GkC^{RCR6`f8hf~CxLR5-cO3Jlw zd^Hh)ptYCutQs$Qu-xy7)`pLQcQc6gEr(AW@u-ZqQV4C)@P6|{R7k>T1?x9xmF~I>^4Qmn1}dC_+N&A`)vJhC6YPo zF5MqMN_*Z~?TWuXrgot|qe1)D;z#*t))hqC=>RSI6*4cd8TeO!_#go87r{>|CfMVS z5%V(-$%txvR417qO+QKA5LhtT*T~OM z!LzzXAYyWk?8*rX8m@6fjfTSv{<%L)FqzWIsiN~?ymX%J`NA;1KbwT_k%PXe^X_qX znf3=EN*#A^dL4YLpd}X-qrBY!y*)4O|I%~F12YA6Ir{CDU|oM%aP_i4`@7Ao5t=Tt zMf*$x$pF}bV61<>?F1wU3oWx&#sA&|oV!JskxipC&W@@*4Ev>m1^u{=?etgm5M(L6 z1@p~6ZO#1?xyrnpl$#~5EgRc+7JFB|4qbY&=+*UYpP40!v*?x6_ z{z7!?uzx<_-k~OV=@wvwmm)v~VVUD(&b%qTZa~p_04q~$G({2ms!r6VHuABvn@*X| zP?sN`ST0>&dVHC?@C2&>tgosO6^<*%P)~6Y_=-p>1nW4l_GTf+=BwH{%H`keuRJ?l zI{dKtOTw2sRXsUZIo39Gd@DilS38|)s3fX4VcGOsY*#cA3qoc0lqarn_@!*{{UYtE z9s(rz$Trwa@tfmQ^ATB@A`kpUFL>UryX$6zUG$iEpZJyObw&#FgR zH}jeh+X<=*L6HJRi3)a;Djt3pe&^vQiJ$*umM#p8B_f;P6`QS_6SWJus8X|a-Y0(N zqolGg+6*1SH5`S_<;BDGl?A;nOy(I~ zbtLZArIg#)3N7_5Js5Qpe}8cQBJ<$CQsz6XN}Za{+=`>{V_`f0ROeh+a1T(4vs?|d zL?p{BXrj_s`lww`FtzC*J-fF!H5u*lplwr8_KDE@o}MoOA4*^Yxb@pb)8w>0W>SCj z=CSjt@M9U@FSYk;)fi3(Ebd^R!SGR&lJB)y$OQz|TYI(j2Bzh1?Psq9xhCmoFF6C~ zmc0mEyQ^d$O)Z5Qgy~3zqYR=p#T8P{m8fpNq0F6C{nigIPj7~wKq}DNQI#L*U1C>$ zN_-#i_tx79Ej;zQ{=>}l>Eu58g#E_5t7zEM^2Z2#=_ly0HwvV?6%0zK#`|>_NQij4 zEK%DK|Gwz?dzt~%7(!3~uRu+xi~m~kUAVwhhnNDnYylf9^d_?CD|!Ept@p2zU?@%K zvirMSl+ua98{00~GYK!2u!A-KlL_Ph+2rwm!QZ)niiT`|Gxh{X@!XryQ(p7sNUI9` z6va!y=(UKl1gKplnFMubU3hr+Uq)xj?as~nw!pcl4&*2E{V`d5DLq3}6X&Blmrs|n z-r-9iQT-}!wYwd$RE&pnT;u}6`v@~4wxv7sO3SG7H+l!mnb)=cS8VqG z4<3X59sREXv=-~avS)(|e`JXh+pQ7i6%2buw^WYyt3GADi$G8H)FoG!A`bL)C_>kR7dhYqoR{^P} znmo!3?G;bsz|%3b8aHjnxmIu9P9I^R*>6--&BJh5CImTx1csH^M&F5)X8w@=7SVuX zBgs;sT70vXVftG-+II}E%73}Zkn}@uBZP^%3RVIOX_I!M&hEO3h;|qNu2-(52gBPO(I=WugmG#Vp*#3I#WF!pnx&H`qD*^)H z%0S^i0y(B#Gl1fjfF`kW?_=XLE9uuCdGV1xNoG*AEmI$5yQHcS%(EZgWDfa<=gd>C z1{qk9O~NSJo>hZDCPJs7y6#(5s-9JSX)SH_?MZyuwbLji{YPJm2Hqn_Pp~v%Ti~G0 zh*fO%4C(7%$Vr?lmb(!@W8u*|JlZCKR|Wy!D;#i;We35wOM)MjVicXMPN%+h<8iTqcVN2xvktc3y;1}=CD#m0KszChO*=0&x!+XjBh602GbZk2COv-QL_ zhC^k%dwxI%?<7kY4s|thGsw5#AsWeMp7h%|U#@5m$^-h85G-8#M88;xv)3Nr3)8F) zf_x-{?M&jro4*=Kd}T+!ZWkj4ydN7l+*||Xjvl3f#*gaO+Ta})rNKY=Uvu*M#whx&ES zN0z&j7D#7htBzFHSjjSfV!Nr8fHL(b8&Gsnjgm;Ips0nLwWL6xt@kT3f>s-*oqrma zywMjbmyxe{A#!Q-^1GzW_t>t8J1-APs>6{R*4v6;jqU-*%XdNT^x7bhg&GUtoKg2s z;h=+(4nbTVLnx6}TL;L^Ha}IZ>MFiR(znfRif(Z0iz_>|@_wF;5BMb4_axct7iIzk zE`^kak6M3*r4NY7jXq_^ae0<{nVsab=C^Qq_NmJy?y-r-NoNR^{0hGVjQoC#;jvVH zX}ZoFGir6g0dz&m7w&(-slf0bf&OhP*W_P_8Szv>v*~SvVR+NfirG{D;EglK75S#6 z-|U5k#xHbvPgU*;^k0G;j_rX4?-V=oWl#{fN8xs6j)v|AM>Rr^dg3@6ID(kxM{DI( zR-E{VK;YET=wZ)*=AK$oqxki{Pp#}{WNq&x#fJxB&9b?oL0BYi(gnZK8Jn3w9CwR3*Hj}kw;3U8}g-=Bv+6FWi6L+Itv`)ghY*6^;UZn57ja&`SwtCTf4swbgo{OHl8ggE_6 zH@F7Qq3$b^Wusbzp=4czKgrkz0aM&4P;Zc^$vSsm3(*?kl6-6Z^y2Q1C4kY=QgQfq z@S_ckLGCtD?X1uiF7BYzHdC1S_quS&uU2L8Mx42q6~yMlhfORTg!>g=Vvqy9k)H^Pel<|U{me%*h+99znF3V3wh;r2<8!UJduJ#=Eypg;=VMy#FTv7i5cVg zzGTxQtRnG(O=ZViCHNp&cp(VY4!qk%`XEvur7@&^ZH6F6E>c~!|5KqQbnbX_ptlB- z-$@r@KYPxr;=ognbbTtH!O_Ys4lGIz|ncmO#Sm=t;s7!`vm0_+q^Rn+cjdIlkUXET@ z#0JeZ^M-wF4tZ$)N8?7Vv7ontI`4S_rWiNig&wZ^?TCx@t}Vp^p-@8}p=gjP^O5g! zb~&FIy^|kF9P#SVNgwAO7ffQ%ia@D>F)j2n3|9jy!VH*es#B%(ziS{9cxU>4Ic==o z8c~$A*nSNOj!$Z@c*L0h<5DZi_VnHRK!+DimVmCa&u{)8%)NOy)bHOnJfaYieTykX zg+wIlRI(*eDZ7ct7Ao1oj0oAc5Q?&-vW#8UX{@1=oe^fNS%w*Ee3<2XpWom09QX0u zzw37$&vjkLbsYEeM}IKWjL&wS@AvEdTApF~=vHKfImML%UXd16>;9mbOFbVZvtmgu zvxykhHj(q%^PK%>gcu6r3z)xy8Q7}H$t3nU?b$f^L4WH|$$2G%gsdL}66bQ?Uw!hG zX|2rmZFE|wBQ-5IUK8q8o+x1+Y<}l4OMV6z$O5OM6@lf?0BK(9@ zLAcf|q=+TKi4LgYj{F*Ql(O@PZ*4QB%uAW=X;;~xfvhkUQ=7WPrlqbnJ|!D151R}-YEEK6edB_GWb5O} z4PBW?0T5>)={eP{#mqm`SU0RBG<7X$@!Dce@V#4U#Eiy+*F|KMux}Wz|KV@M`wb+^ z{J)W`F*KcDhIw@y+$WXYK!#elKa7R;FqDOoOj4oguFi7OIfI>?N@pD=?PZ%@q;=gF z#yiPGyE|uZU6|(xUNyMQh(fzz#Zh08FvT{ZAnJ}ecC>tR^K_1bs)=jE$p^$22Xamz zAF$@9OFjF+K4v>he~S4Fwl;%gz{a-$?C}qU_Tv0sFq#M$mzf^JtjJ$)2x-+@9a3fe zCQy(*tXRZ7zR*AyXrS(gI%u+?MifMXp3Iu~p<=gk59xNnA)nLlGF%0EPTQM%?Tf;f zq;xJH34FslZp(DVx?^^))`J@NE;0g)inIO}*ka)*1vCzUQiay2%waIcESI zT?3I}LUQ}mpN!q@m=IBkJKL2P#T354;D&$0>r6h+d{MNEudqkc{$_{O znIrD%Q|=c0Lt)R-wca1eq-mShvZPJ0mWL{UBlBVu>72+r4CeW>{4*U+{d+mZ+Isy|KJcMvpPD1fRXUXN`ohp#V z_genR^RIpWcwZ-4MMyr2+3LLh?&4oC>+h|No0|w{1at};`+F9;*m99_iuhapfIQ7A z&Znp$O+&)Qg|p2@0N&nbsCAH5`j0x)8B(Bnjw>oJFS4LrgST5Dq9jKoX^1R^sSxk+|r{r?9 z?wC&|mTc4QmA-g@V*kcxUd~jGbDY+h@MH=5u8Ho>s0DMIypzx-C2ey)88;v@`AhtJ zh0+Xl`1xfPXJT3VeD;H@-<_1GyUr{ZM)j;8wTqtY%`L2OMXcYpE~N2 z75-5x5tB*76{z7HYRwz_AiEo*Ee2@Gy1K66)qP6VKSfy$sUHrzZd(QMr<{3Umk{m% zE<-n6dQ``sro95^Q?m68$mH+r7r-D;xfmjjDfAUnj-C&>!L3|Xv*Niy?!dc?U ztY(l(NC;0uJe_?BDDM~c*_G=qcsPBEZw7F=pg4vewX+*~_MlEY%NmCDOgi!yDjWgixB*PJ~Evp8U) z!-DKXlG5ifkk7zW?iI3CyR$lvrucCCp7&3quSr3~91|a3Lsgv&57)5yqn%G_gb-#q z4FF>}^EL&1NhC^+ZsZZF_wY#R2PkQE6@QssyD0j2N}thP>GZt$a?~qk7nW>}jX_pl zR)uoBI_C1FkR1!(KAMPR2-TWJkU@aOiLwhn*G=0HQ6{#q-^o*^X0?M@=h--My}Ue! z^5p2#M`Qh7S9OWrf`{*G17%voF;Gze|9enTG$R>u7(f%j>l51=DUk%DB$NnrFT~5+ zsuS{zl{YA5kBI zx13jOq4j)B|FLgft&Kc+FWK$_WHWAjTZwi9-RG>M^!G~h6q^wRdT@wF$^XY4G^b2j zD`1Y~$kn{LNHo*7BQF&nFYoFaT0J0n#CCVJs<^b%0gwT3FIiJRVa~%C5&uFd|9_Z{ z|Nr%MOuI64LMr|vvkzW3-r+kHh#MPVWR?Exxr^yJ{5jM5?pCqkFAWFl4a)(eXN}83 zv}U9Za}>eycouI(u!?QtpD~fFn~IVA{K!4&a6fy1>kbYybAP`EUKR`i`iRC&?4@C#{Ha z^y`#+8Np@Nb-#tB3}O>Z+lUU4s6QDG&K=p#+0$x6Gl~&zysXR->>&{%DC|i_=0jst zuD@U!rkSD@Iuk;phAYI^=Teqs?mYpaHv?dgM^3?U==wv6bGlx*CaJdt2E^v)N=tUz zd>-ONF`W6ypLg~zpT|@xEW#&|R9(o8c5FqQ8Bdj**Ut2>b1v88@S+HiCxU zB+_R#mGAmt24M~t;F(A+ETI(1i8>W}cTkVpL+{YI;vXg78_aJ>OMc!9vbk{^-qo2? zJx)0cGY}gZJxji_34DiqyUOA7@-cM%x%fn{%Oi-|x6!K`T_oH4nfTSr6N{=(*x6^W zrtet^%J^ZxX(z8y4Jd7?1Yh6O>yk{2IO|nhJHSD>lwosc&16jKH1>R1X?7yP*Cnl%CEo zH{`JI?|7Dy%QXxk|A3t+BBibXjz|bT|5(8tH2kB=4Spvgl@8IB} zCtfdHKQz;1^5Bl z{WohIEb~8$Jh$Y|H*~N;gXEAdESK%>?_M1PC#6~qlUGwe!ywmrwr8ogp0n|@hzYH! z0lS&~WPNH9<1Lzyjnbs>cJL;lcv{9o)k_^XxzvWFYX{$7^e=JvQZ6{yE2Z%kM*ql} znDK&<4blaaN^tLWG&kIq& z@T$6c+g;b~Tn(1EFQ9vrrc~nfSGRYK}Lt)|~sR<@w~BBjsg3Km5A%O*)O& zNAnG@QNbr(_E0Oxj>2yg5Ag7W$G!qOUaeVIN0c_Nl$dp1*nGO_zyR0K(P$2x415hk z1wu~e&MQBkIr0N@)I05BSn1@{l_afF*|gGq+7TyM$4^u!;AZ*Tj?gbh+vUeFPYt!0 z3yN;TP1_>=+|<<&mHbTidOxDBq-|W2etoo}pt>$bVhOb8;|~w2e!UlV&FHfr$jE!d#`5t&9jfkAp5YLiJ$m)+pG7HiuLRDh6gmibUjvs zS#=}otw^bFnLUnDfB~;Qz+(@k>KOT}e>C!akhvstkX_NV)aA&098K$AP@lq&0tP1m zVr9nOD*^b1C>jE=2y~jgqeeQmO~mtq7(xZ8+m42?iViF({6c_ulh3^2OcgLpJQ9@N zHOoU+ukhTdM2c-H*M?~4W?%6XIq}ZKHTHGoM?sfYF!qZ`XC$f@T|HCmGuD9S2$jB$ zZG1b?-SMmUuoEN>+s!8+=t3gYJJ9mn2^}e)%@(0@2O-m-KMT=ZA=%ahBcaY4{Zey5 z`}D<}FIboh?P(yt@zRA9s1m>?lHKG7Q!VsJt=8%IGEewqPIRW^LbOD-b6U{7h8dew zQ3L5FVSI_tXO^R=4m3`uO?mAvm;}OQU_XTE`aAH!q1rC-G}W)ak9t<^x^{YR>KneA zD%w{QU{ZMhx;-N~UPtM}DsRzB)3=Q2-ysbbg*qQyX+D3ypvozU{?K7ROz`$O3=p6N z1zpB7KX|>1-ICEbTck~$*A7g5<>Dq)7pA=KTIFy};rYzBc>zTn?s=ynfw-OcX<4&d z^9`4!6=;gN-JLuiI2LJgNKiEAC9*s$9lCPBpO69)`iZpL&dYO`{AIP1i z!Yc1s${!>~u*}I%pu-EM>!NEaCamq>vvoc67rX)c8F6dkh|~zn(4ak%u9zPKaJ+p!MRe!xzMv=qOYI8n zrHP^NPk2eG@1W8fKjDMfzEQJ0 zndhL`c^-KC+?}gvd`?trCK=JZ+~h`nb7(J)7%Bz~6-zoFXChlXk8* z6mO3fqRWlPkMUJZh_U>hS_bly4YgZd+Q6J+Oks_4XbDf$?@_!eDJcrtu!X}$mri~A zAKMZBr*@YAo1cUJ>&Jhm(n<0Fy3LK#lFo6*6*oFheSH6K%B?pC931Ry>`a40PT5U; zPLxeE#_{nP+GHoeVBBdz``Uz{r(h7I1abnUJXJkJ^7P_uTjI5|j2dz+;*;9v1kb?R z^*51;vvs~CBT5W)7g_|4nl!yy5}~2R%T&QD(EX<6R+0<0?IG}h`6c)zuEX;vw`JpJ z1{#3>cDZT5ECT$MX zoq(IiyIOjOADM``I_0VIpzhH@@f&do)$6DKLKA> z%V3E$?{WmzF6*fuyikY7l+|<%)VUfPLg>$^A9{`IO-k@3ogs{J^t)_ zDkE(}BK_rP%IkKeR>QNvX9gXG6+ubRy~x9WLwB#4{duj+r;yu?4Tlb!i{5^`J9_Pc zzpnOp;rJJ16=J>TJ+l|f?glx0^coCG@~E?WZA7y8JhmzvGxV> z?#n?Y)m@qovk%Qnzy2Xexf{t>%TTr(T=EKOwz=_$+}C=mzT42K3iHuxR|HxnC`rua z(*?&JAn&=8Zjq)Gqa>x@Qy-MFZ}>;Ay3hMbXf;K%ByrvrTjhbFK{zORbE26UR{^l< zP%wg{d_)k5vsHl%A&0X}s1e9EZ*5`en!Z#^z>A9ShC^^C5K$?J?#KiLN%K9?7VzU! zDxCiG0{m@0;Q3%9&{8g=(vbdKKT<&N`7tlt{c2uWdtW}EZ9dda=0x|C+iuZlPcU^= zfwKL`ejR#hH6uE1Zo1R8O)g$1vgN9#sn3q5$laZo+--iCvpoQsPkG3kcFcUvE;-sZ zsR}i7($1iXIr=H$LBZF)yIb`5AeDU02Dg9*DoK72=AYrY{~=1we@yFXr11&|*;%%T zqq55;9j1>kmn5Dqq%W>*l?wDcf&Eh01@(4B7r=~Wz(eM|lnX#`dCY3gzo@L=ts~~U zrl!b^wUjUq29gTNE+C*d|NE?tgx?AKSEL{1# zTfoIaIzsQ$_mDqtcsr645#%dy5jy7}n)nD1ue3%h=%&Q0Gv%+C_8J36?qqq{J!iY~ z$)35Gz7wCp^GYW4HI0LAGLAZf#+kNAP|kL)@Ov_k_3b_0b)@6_!!(~g6-LJ3G}+eBBe zl1x=-$D%<`>8w4@+vS7Olwox(%D-#0=Oxz{i~1iiToHD%ADOMFpV;dAh`QN!4!Vfz zhn~Wq7{g}q=nQ|=nv2V9?s%~Xdb{SWN}G$PE>-eGl#tgx{{;ikJfUEgY%XEaa{NQk zjn!@3?VHB^2j05L6&Aj;uBoeAtl^b4HVG@ZC3ZYp<6gMdCVrX8nOuSDmtxPKpT!|0 zW^}B2^S}LYvnX)VS(>gWkUVNF%AeK07CSV9->$7z+Nf)l1E>ENOA<&sHY z(a+-F_J8g*ymoTE{*zPo{4dTqtaBTGs0HLkNygpI(}T=P?Yy*jc3o}CN4jrcCKhI% zJ4pG`;_9FeyV%X4#sUGLdcP&3v`yX?48e^UX-u9Odyg!wqT6Y!S?3+mS1@mbgg}DF=gMEjps!$ zQ9k5>)qJweamw*^yf2P7B4;+Ph@E3cVD}OCZzW$3oJSD7 zoLz#pF`YlW*;n8B5=~c12H4*39J{43iTS@N&1v7$$BTpfdKqS$1_n zP2mVd1rrhA0}r>cuQivuj47t=op&V^FJ+vJcK^J-o}M-{7-B{8B|W6TnL^6zKta

#;|z`XKqZbsJF!D)Qt_nUysn4%<$>9nVT=BwJ+%WYZ+N?BCyYAx z#&OE{1_F?$sK3k`Fs4Lx*mTKIB)i_dRouI&iI`4WG|~KZw|5R(<$QrHJzQb839P75 zr~wV+0j#U}B(_W3(#-W^klc)}H1o2ke{0)wX*7A-^FCp|1U`HmrH00dF!|8!%49?*Rs=(w3Y-r!T_5{yWGAWo z*y#mz`oXpW%lMt&^Hj(1gZ}{;p(7sxxIR4{9Sx@n1G2@?<2SqjPb1{u>oa_5d~}uQ zxh>HmyDF!PKOzIMcW#K33_==?a$Kggo{qRIn*Uz=P63 zfBrk#wH@?L(lr#Tt^oma%04?08|UH75$lz%k%i#3B{SK+Lx-k+l@uzRzQ{+JuYT=YkR zARi;r4X-lq$meLq)j9YQH&UJNakX@xe*Cgvwm~Lfda~Ov%m4)nA>1SM0uMQm*`_|g z*w=@(L%8zps$(Masd7PJuxthxr3+c~^HX3`FX%xqIlw$9-Kzubo(~qBvVBc?Q3Cc{Fq=ilg)ItPBT#-0bnOM9k@e}tw);aF0XGH znrMl`U>oRRuPtxvy?!*DIfNt+U~5pV@|~jJdM2Z5j=!&|)3c(7{v^ug3RiB&28Ker zf*TiRtI*_|@F`QB70NfXkKR@>TtP?+DKvRQJ8gcKsrdx?{N;X44*) zPh&%M?J2KklPAo|dW4ocmHE2UyfjLq4=1R5`W}RBZ{qEkoEVBe zRey+=w4bI6;dgG;lB-0l3^?aqgvxS$gnDHmM63OeK?QQ^QMD{HOQN6%eGq3so1wjFSZSPa(6KX6Zsx^I6Yp!GGjkojss< zuWE#}SofNil4_k#Hs}pCc|#8aQmi@&(^3UVs}8dVm#cIhwotP&*WijU zMgCwFQVwhz9Xq28fxmpe9HZtqbj*8Bh6pqwlgQaAYrEGZ53jrLvbm;B_Em0IxN_#1 z&)j*MalmM598JD#{tQzfs;DRHmFgvN=JfIU`m`yO7UT6Ly`R+Wn$4LO3F-%M;=`C= z3*FDuK%xfG)%UZWl!@ddc9#fh*-U+0x%o3Q_%5FSGnG}ul|_R;=I&eg+1um>IuKY= z(;$Z@d18z=dg4=_R+$qv`aKWdda@r8kTy>1X`XbtIF(e(vW%=jCbykM;}D4ZQ|QT> z?xqrIR#e^VYyDSS8V-?M20gFlyN)Q``m*&pbX4`uBaK4Wj4QyfS7>*brU9NPWAwcN z{LRjDsKb`|?RhG#PLg#=7{L&qqV51Y2SeL)K7E+QW+@>vRQ!M1!;Hn?Z=W=wK4=XI zK9ZBd8|Uq8vo!d7t+_LCO9($BoT6~|75}I6Zw`F~lC8Vo1mQ#PzaYgEzeg`!EYx*y%HmT($xpK^*5dE>dxY7)g*hC5DLUfwS9zK+Sut2e5_qX>BgDF>x1rliXh^ zrW&iNCcUlvN8Pyc7=%pxYs?8~I>vw(V+n3{^7QEA*Fn8=%3=;3%eKD)u9eDl?6@?( zh^VJj{W5m*`Ix|Rl$Ernu-kwjTf(QX^E``RXIYuz?f6IMy3CXfTgUDt{OJV7WOgh3 zj1xm>Lg{S1M!Rmn*zo*LN2$lUD>{$so7U##Hh*M`bZTz`U!ETqN|1#idmVWS)eqi_ z!{B@L8`~iNV5N$xcOUDRsiDH!Q+Fmnch5@BhMtBr=ksH2 zU26lg@`MJ}C2xqz$r>qU25hPpX5jE$4<3F$!8-g4^Bn`p50LWb;Zp*}l#0T+P2LH2 zWn%EUbJTs?+x+px*)Op<--i1|dp=+WZU4!^qwQuqb(oP?0xhB4ME|S|Bz4VOhb6CT z=ppg^XHOy#{aaPFZrW9Nux|Z}N#OqmB>peI#`@Rbe;x0`L?Z}A2!TzM5VNOEdsHnO zKYL?rF2K!&% zVauG!(f?s$2GwWVnpn-2*U=@?tEbhhTUQjf%-n}V6V&%6FYHIzqjC0af*yKWj2Fr+ zfwBkv(J?#q4P!%5U-Jxp4&v>*^1ifRPL1*7-?LiP4F$N%{zD_{kMk#*=gP+lp3a4- z9JP27+@PVN&$kyVX}t4>Hw{flKnrY!f?PT%hWNc$`zn)TbYSx7iezR{-Xkz|sbA4X ze=c2w<8M-bLBnJXDxGp5K6vh%p>K?%_(+*cu70>dwHLY)clL8}y3E%Aoy>X76~`E` z~8n`z9;73|W~hJLBY!%wCiZ*yB7VMlJq0qWZa$lsgd$2^$-k$$$#%@n~8CF@r= zx-<8tOg-G?TzB|3WM;y3mpkxTbRlKxS~U9k36@=#d#c34+7>pR zbx=zT`6%GG$sPH? z{1pnEgGZy3o|zXxX? zN-`zsxv(swtC*_?&T^ktN$q*^3r$FoQXFexrQL=KiMk63O8%tN8(hyXwlf;8ywveA z`uO&>5$(^O2e{;cpw3huMXaAK24zUc1vqF5sv9EEgm+rAxM_q1BimoQ)R(2T>qotD z9nZPx%l#(>=vk*_;d)OJq<&g$Sdl0c-Z|cO^w>>gYn@$g;@eKX4xcai((gZ??kRb)oe!Nr(gkl}?|Hp|%*h2@ zVMcR2Ke=?jb$#CU{{3DK-mD}l6V{RZny>AaLm4h|zdW6Ms@8D5`UO@m0AfW?~bOrXjsCjU0L%N~B$5_G5Yef_d?>;DcD_ zE;rN%)Qb$xPEC7Y1g#!kc}_{esNFo>V|DCBiH_78aoC^Dm^NK{c?^9hzU^GT5uCvaU8s`AAJ&aI_*4`P;uQ0OjC&H(mDAPfzJa9n-TsLuM5S9oQy~EgFA3V^IGO$5Py8ajJ5CV$&3ua(y3aXS) z(9k{#M-+T1jtd&Iwl_}v;;P9O0(k$k>#-m&&!6JPNI;zfY7dOU4k(=bA**1rJ|Q`h zDX=O+SSdr&X@Y8)-3STvFRK?U4DnC{D3XYX z(mOELCJim^J|=Nps1GUqkekLAbg&S5J@L%oNX_BGG=<$^3WC%{dw`lk@K?0KnSIp2 zJkdcbKcfRL??$gEJ>7M~juaieU|!@azOXJa#l`@S*#2@T>VrG-fDRvdwflB7TxfOM ze%kx|w$m_3_VvBB?Pj$122+e6WaV?zBBBx1=23r|wY_tCl7#-eHVmL`i8-WAUR>on zYKd>vIID46jb%4>4nDcVJV+v=ub`g`>*VS$%bhz zj}Nr9fPxAQz)p4EBuAM@LQK!gH&v9{j~&-W-d^-QR6yZw5in@)@(Z^KfF(z~y&e(6 zNWm;7gUnRIOW>0_FlA&4y(TYS%kslqbentT4jKP(_i?dtfHAk*(;7;frJy&wWBJf2 z^90m+P<2Qag<3)<=W+)Q>BW3J-u6-Hl2F>vZfI@lPAR(d_1#-5%*yc(Xq|zj*V}gP&KsU@zOq+?j-)$&qXgo_1A)cr`JhPL zjJ0*k!#A0A+Z@s-m!Fw=F8pTvY?dHnx(6WXtWMK%co!HI?Dw*JlV?mEQzE1<6x)hJh5Qk2<-NK9+1!3#t**Qbp@&1p>=C< zuk>;7(b$~x%nG7qNDdt=;Sty9R*XWFj;EtGWw~opd=`rAz^mQf^UrAg3?%&xJj

zC*lGP#E-5em<|sQBM6m9K?Lz2BV!mRT)o~xSt|8*u{@$NslHYtZ&zr)o79&MH z@hm}vF8EGKq+4WP`_MRYT#fVI%V%7&Kt@m>UUeTP<*VrS?$8sfKk>*kY(pSjKy`rn zf^xj4Yc}dh9X5vkP)DUF*gK%e+Eb3NPS!)_cF;S{XG(RW5$}&(vkphqS#1S_YjtK^ zk*SKBwgL!;OQA*H=ws}mlh1s4o|q4a&u^-lsw6z6UorCBfkQ6IkV0m9Niyq7+nLd# zL7~f6$WW!?Nl}SQCxeIl&06js6glGD*2Vx7cS4p3d|&88S4#eTK$S;1RC0A%F1N&b zBXelT?FQC8)8m%7jit)OzWs)~k(zi*R_Z0uXJ?8|J(95~)awwUe`Efi^BC;AI64Xf zogo_o>L>S5SH=+@-$HDDoCcH*&lzcK5*|Od@`pvZ%!iw$FvRk?>TYQZr?KdHrS1zl zKtJh0PPc4K{tQ2&wib}n_f7ss9lM2jnnT;5yPc#^>`VR!XKVePkVS|}Ba#R)881K$ zB7Ymrpx=OUJ8qxD-)p>gNG@V&TBkT8WxJYq^Q)nlu)oWJ)OZ3j-a!-fZ|E4dSjR4ciQ4)cJ#F_3Y}L%|XU67EsV*9$4e9x0l> zvW^bZ9#3BIHPCBBZl9P#ez0!KdjqdSNa|?>@lrz}uhLL6C=xCCIyzs1!&xcTXX8q` zP_06*hpY^Y<#YczArtslI9&kKrB#vyq<(sbC>5Xj@X61W=?3rZGemFd&Saz2o*nA& zJ7P86#sQv@>!DAe+_`?EaT;p3D;*>`Yly}s`qSF_SeZi?W2Zwdbt7%BK-Oz=MM zL_k!(PKqrBrL?jvS-0#=P)J8pN$jyYQwzNT$&A;D-=0aR^kgMIHC{`-058KfWRZ&J zk|$;caQKQ;!l%$%PIJM^cXB*@W=8DH?n}E5FUDQyz7;4_@>Bhf(P#4nM@eXh9Nz&K zYSBU1hsN&SGKT!&i6W$|*YpL^uMVK+#XK+JWz0eV%{R zukE9GI$ZGzMQjFstcL@>Wb1t`D&1Fhk&Zw^a`L2RGTfo{6C($8((rL}gon~TX2RT}gnXnW2lO0ZfZCMnm|Pc2yPc#DAQrO%JK2eVmz zS)ceGkYLjcPIn+0r_a>+=3U7A0T;Rf=Z)r-waO{~UUWFJkJ0K z`IC2v1MB%@QYbIM#jAO$-lop%hAWrfd;9lCwZn~#fAGsB@E;cFSGjkGcNsIcKQEU8 zxVz;c;Ezw*ImCo#bya&D5G!{7F)^8Gs^EGwBKE;87QAJ;%$Z}k>I@^uw?j&T9FXWu zoaOWgXgZg5@9nX=ny91E>es^opY#at6?!(K2|;*yllcR6{uA_#>}yx|L&xq3sEXZf zkiqdDA&RH?{phc{RJqmH$Wk5i(D&tU?8G?pU~d?k9&a;L<5&EP;;iz3Dc_8j zVHM@sSBJAWf6!3-t{}@e*hM_`)F5RGG9ae4?f1|aL z7lT8`?!+t|eEXc`MgCwiB(KRs6QnB~U1lENPVU=!0x3Iyv#RAmrn8N!O>1=a&haLQ zCw7YTmm-5xCL_h}Dt%;GI?um({N01E-`_uo|76vG#+?V6l4N{8^-1e!ZGIwt!JAMT z!d^e57E~wPoHVm!m#UW5a~P>4m`LCFZclHidc!LXzGRdxqczasyPxhg{Px>Gv~Rv? ztLqU)CL^$O9o>9n&|7M2yjKqay+Zh4@R3w|@}+*pTW{~e6@IVl%Yl17^ZsSL-4agY z!#T`wtcP+WIW!a!nm2#;sO7rZoRXe#xofVTrBzj`Qdx5CS#Og;u}aR*=fyBz^Cx7O zoB;2pxr?NpjB716 z#Q73(*tFlJ4UH&{J-4D&o1p6vHxIC1VSe(L)El@xyir?jcuco=-JE-{VLE8xMp?;3 zgaPURa{x^?#>VAygPSAyAYGp_gC&woacyFw=-SJ|4I+<_rqmllHqmjW#&ZKXRoU#h z;Tm@XVd1F3P&UZPgN!6#BbdTApw5l+HLb7BNH5Ad75>xYFPOI+h>1#kvIvkgWDJ6A z2cj3{K==n@T`3I2qO2f4jqZYqdA|rF1$7)Xj^ZZ=M3Sd&45>{e^CInk{ybN%-g%-| z?Mj@?IqD8uijgJ^2tNW4WHa7fl}+$%e>UHvSNc}l)-buiF)y-B4q~BcLK;Nggo?6F z^<-Xy$65RnI%#$)c^e+%{T_E?zYAYDWZ{?hGr#E9-;fRK?lVU6pV!JnU%HTKCmtSG zA>2>GZ^qUJxx84v{V6B-Dyw|U*QYmWT`vw5U`v|l;f!W9;pH~-N1JN?JObBxZ|XAT z^<1Yqm-gJ?q@(Cz>8oj-Nfh(CTi0%7kPlfbH1un^{hKfQ=aNjEf`3EAV%mxCbMJdJox5Vs!{5$m zG4g@d4_jH+us%rqX4}`(eHglFOr|-CEys2CyMZ+x*=n3Hb2auz9Kt{IcE<^}aF$&z znk_vHdJ3*5FSK3E>;n$G>9sSQmCbT?b&5)xF5z?DmxwW@+7HZMAQDnOpKbBPHeqY> z*J%XgWIpj;78(0FWl7@V(u_mXQ!o;Gy7cp^5k&=dqL<^T#e)JY``;>Kws=N;3$LI0 zazvXfur+j6Rn`sa9yY-;zDnQ6*U2T z(QW1kLbMGLG(5KyGxrya$G{8YS>tG(JH@!@Lui<0CM8yPUx=1H_(mb+4Xgxa!477e zI90S$KQ_AQ32F!}?(K+%Ly1z}S~J_q=U#h1;tXD#O{X@8ch;j^^lOQ)KQj_B%V?%# zIXL0Rz^rWu@xM5HSI~72GhNDsau2D6g*(KOiK-*em3_px_EmcZrH2MyDutF~e- zicZ`)5DIFcUYbPcHAc!~UrCQy{yJUwjr7hPVg4ye>5$kA;ZZrOZM{KXpNn!A8{B<% zyI`ot;LGjl$1!vj#(R*_{3FNV9Zh1*?wrkm3tE|p4_r>HvrIL~#rOoDzt8((d@~C- z5YJ?XR><4NPzIT10hG9=qi;8pWffh~AV7W3?arlLWp7mb67m(pcadDD?)o_UIN(_!3f)?;YjI z$x%99#l3H#e#7eV$16{Hy~BJZzbbOz`mU6f$l$Rju=ml>@uUn2w!0x;RF8A5p2TuJ@`Aal@{+ttXF-yw_7P%qw!UEw%Uj;)Nk@MRNaYkjj*!%RM?-Q zjNl?0?{7{I>lFN;1gKf^uHQrfI0mwUzr^1KyF%qx#I5!1Qi~EdlC% z@Tpg)-5n8iP&3Uk=p+2d+Tp&_-?R8~mN+Y(?w4(8Zk#&)+1{mRG0OSCl1pC$De6C% zqx|dniqO(uFfqhJ3N+9BY@5$a>vc?Z^rfUTO?Z-#@|NvPrr7WyU{C-4_+VP3}r zaNde$_mCdQ;fpL!*<7SX>HXj;{ks>-WxGwBjoX~0Yg3`e(+^Ha&up7EcGw|L z@dHkqLH2_{ zh8Xk0UN36_`E4a9z3|+4V^d>YOmECXb{@5@Q!YtdF5|@f^#PhjC_u*|a_HA6@E#1F zwKFiFO>lT4sz%}t>o++aoYNinql?@eeGc_iyD`8d?hU+J%ZM&mN|he_qds(>dBV=% z?57Jm=d_~AG<96Q=~f%Ee^7+2y|TjRoU1DgO1@G*uA__g{piu~UN&NN%rcKN<+uC7 zrCTrdK=ET6G|c4Kq@7yJ6U(|vj$D%x8xxkUT;&QHzx$0TlEWE2j=ZHWWps&Goou!4 zM6sI7T0cv(`-%E8D~u6G9$agR^T3H^mlKYic$!Hl2V_c|*6u>`h5Ekx^(3Hb-?Vjh@wEES zLvNp@TzL8}c6kzWSL=0e$j07d)8rUy?1^9nwcB3m#7t@d{51514T4rOI zUKE<&<`-3XDEREpDG!3}+tj}NQ+V~{{zRD$JuaW?i3E75o;W!E6lNz=VH7y;=$)9v z>6BO5?v3r3N9w%`X$~s5zVTgwWGAtvbghra)a+Bz#?Xqe^*xZ((JPTqDs%GKi^!CV zIHBERl;po}I-Q{0c9Dmnl4(1*8Tn|{v9xc2N{cV5lVTsG`=%&7o|f2Gw1FqwM+l*D z>S*_Y=jfUp61yJ_9%`F+d9Y6M(z0nZp18wv);8+piKt|Hb)%~a3HhQ~rb4wV91(!^ zKqlJyu5XjIyRahuf+qQwYUPGFIk_H&sPNf#KKwqO{ohnQfeI$;?NOf{%* zKC9=$(pac=u_M=O{jbg?w?;O2U)C>t*zc66=XR@Gq(AXN`4)o#ijI#IgPB|~K+|s@ z8@itiG+riWBptO)J)YkrwHf%n7a}3aCiz?j#LbDC4%-|fTlzO}eA4q_WfwS*-V!p#Tv9afcIAu6U8}*-?FJG+*W^kl;Q8SJFu3u(H@toM(6IqJC`CjNYuiSD!3G0w z?a?R*u*)itZCBfS<{A<}nZxzd^ZAx-@p_w0w#-p&}$vrciiy|k=i)V#pN zVXeW$_1#Lq*0h89v37kn4o4GF>MaWss!$*O)L*bPBmfm?tSf+bb1mZwYG`)L2PuSJ zw;V_Oj_`yHK@OBz!cO-92?1F$l`1LbRE>1%KxO|@Fx-81(f9Z4VjE&w9Us)(op+dg zAO;cJ;#Y11p^X95Y!isq3HYJ>M)}yS-X2y>-BNNPljH92yc?q*O-sYPLU@BgN)PoN zxDesCr~y6c@hN?BMRvs`gf<5!?54Z?R_5HY(<#}U+s>A0oDu>@xqq;){~4zX_EV*5 zM-VlxP(G{a@Z%=|#j;>RSnIvb?o(rng;cF=@3^DpO zAipgov_@rqpjdTM9Yt$LT;07FThse(TyFjBD`{lSVY-25(w6Jq{2>55S(Ecu91c2@1N;!8zXBGOVKlyJ_ zl14z@Tc|QbBj263#3y3glSMt4C(=!<^GNrLx49KmoWxM)e_pTpt-iVw7?3GkgG7GK z92@&JWU83J$0z8T{gMD60t8S-vT~0fG+aet9K{}c#t=o~av-u1J zu7Z8x@$ljZ7#v{DDT)E}XN|m1pFYM$5cB@2I2iFnPQQte;tLZtOlSQdpF<}w%7E5C8IHzj(fx@7cmU8+lz!(O@fzg5 zX_j0&Vb9lMJN>e~xU03j^pM3q79%nIR~ClWs(4gpx|83>)tgGK#jniOh8o*J%9hPU z{sk7fmL6hYPjVzy;?O%i0jR#C3o$WcKBd>8oV^fMh|a(!Y3%wY@0bN^M55F^5VacX z{(cm+NrGJ>y2Cc!-YzoLH<+8h!;fP?R`v_dWHF0p2Ah>X=?khkv1_q&o0ByJ9sXoN zx@*%=x%iS;XjW3sHPfEP1e4UoviLKqdO@+WqQC@ZFJlDZ?K|mZ4#mHt1GptB#tEb{ zIPT4w)z?(nY(_fTe>s*UoP*aqme^2vduFWxszTndnzveC2K{ve*|#0b2Sh5!RNWUr7>(~QWzg-~S6TC#6h$1cgf?=yp}lNn_iX6gBMz4!YZ&vRebeH_nw zU&s5te{|I0n4jkNT|b}mbAHaV{XCZ`tm3~f9sW*AfsRzNRU`a-%(_$mI+Rg!9=0GN2 zu7)Z$TE>uwo}iUhY&;2hueQzJ)s=3V^3&(LenfON=9&d{gN6dO$zk2vUR#UmEpHWn zl^(N$s@xdK_dYpe9pA(q1D>mNonqZ{Ep6MPDufYL-dj=CO&M?K%1={7x;{K& z&nfxjA-XyK3cWo&udAUoOiVd$;doR3xCv?pto~MGxOZGE;M_-|j+6LYg@ktXJ`bHs zYuFe(dWV>BKUoU$7}3l2H1ufSGmcGg>+P>4mlQ2p>6}MT-V`m$gcFBWE%_NG>pxa& z$Z7>@)>l`)E3@BQ*>?%zbO-twi%VJ9w*w18RP-mQhkH&>*psWjrS6R0JUw+H($|pa zc;}(P4@si~T1A?y!&nemxyFUxhH-+6u&DMAgm=gP!7n0fNB!}!Me*6TF;B()ONUmz zD!!P`-fCHp7Fwf@Wt_lNI;@eu_KNgrzw1>>Y6`b- zH9IS>%q1L3U^jsvr%xjhHnH0V;%&!kcx!^k#V0xnISHR_*s?JRJl;36rtVIM> z7Q2~^7uFG=%HvO#Kg+r3r`qvomg)u+GIL+YoiWfZWQl*{Mf-Nvh=U#Lgu%TR6CxLM zC{4z7iNQ!EAXV{1Xa?7o~pOS zh~A51hF6#N#!Yfq1XBR4927o_WbGWF2-Y=>Ysr0EQ6@Ks-Q|P%+dTv?9$Sy8N)lH)W2lpaTOi#4 zXrN_OvjVaj`v5UT6lk>R<#(&M`J%YgkT9nk`(s>R?!BLdg_E?u=fMK>?$1TU-nvj#>QyrP)QVNYEE36;f}jjZ*s-yAAho{&q^a z_4Fmqd*xToa4Rpr*}1O;oLW!T?*T#9>tsuM-ZYdZD}rGrT}F(>V9sSoQF+O27^<8W z{O}#XB$L%r(84$({hWuD4rNlz>mpJyRpE`L3J{~`GrGck5RCC(`l@}_#0q7=w;<#j`i z7rpMJc!>OB}Py7j0kf$wHs1q%Z{vAqah^*%WlE(Fk9qA0%f#S(L@f%GHO zCHu|2-O7)1vqq9va{WAvxejZbmkZc$nTrIfI{?OHNgb=H#p|L0UR5#t zH&Si-tO`R^Q%5EXJN&#hod4S1PI{@M zytz3=;b2R-lgYQ%{@*7?6(Q&mr12$FZlt9?>6@37y~Nk~Z)c7kR9v)b?QWiC`U4j? zP~ZXSHrtbt>MJlO((l+Esm};}OmX8?5zdHBKKbz{#{;9E%kNz*?ZQyyYd1F*OCxNc zfjV+@84@Lj06dr;Z`^q<4LfvS-(To?8gC@7U8Hy)$pmGrwmZ zyXgW9f)PWCdBxQ+pVZVIDX9H*auqo!Cvi|NPy@maDO~|8$(W{svK!ISbjKb=t08aM zkZ4qde%STl{yN7;&Qb~eK~o1|u2)XiP+nDCQQ|7eiDkoFGXJl_rhn(>EW2A^wnf1u z;zKh;iB^Ju)Ak98+R$cs7GYz{8NF{$SW;fQ?4d578gGumrR66H>Rzx^Y$ZT~MLV#S zE-yc!+0esG*0GhewjPS@v%>E958Ynab$nfQqwrH?AqalsrNBo_;csfZ&~uQdBqnY+tLE ze7aYmqQBtH^4lcHCD!*J=7N?D)>lH%0`xN^g|3xW9q15UTUpe24d?{rIF6io|IYH+ ze{|{t|Ka~XwDSY7U8cKII-p8kCRhN9tQD5ImT7b+00UfJjH9u*VfKurA%E=v!JWN( zen12>`9YnTGHv;51kno&Z)!W3qNG|P$0GLv|N3~UUSqRR>bc}?Bys57g3a;lT<85j z0sk{-K1U5(}D19X~i5Hc_6mt7XOgpSTyuT5X;_47Kpn*V)W z;$F-V#_~a@O`*^9XN-_$4!R+u3hR~@H!V+y_V&RuU=||dMbD(1Z08ob+KitaV8MNS z74phSO76(wV<*;M@=UJa9CwF(;2=OF;jMzZ^7#u$jB)cM8X-5_e`Da|?vOzo$`Szg zn?>XtCYLA(25>u}VYCZKk;U62+u1o2o>>jyCTo{~18?4j&CB)HPM$hm^t+p~MO81N z`!SLleQjvSxfM+IL~_DR7O-rjhR@E6#P0*6a1qeKhT=(!N+J~&_X z`ExwiwJ#YLAF{34zE?*x-P^cHd}(x3Z}vs_LgPnzZxBl_rriwFdI!LisS6nV*|BYC zmv!W>5PyT=fYtI0p$vsDzdwUBp}Hx|B=wcL77-LHkU8e=0=pOKco zw!JbF!Xc!kM%7>EUKsJ_(Nyk$YJB5o5wcXWYy!LneWoyIVuO5zpY$!+A%0&&!9P>AU< zHNxR`+KhwUQy+v1osYT;4g+mcTpNN5m|4KudyBB_Q&v~%?a0W05g)f4HjAG#7ud4`n2Ef^Dm@A&Ejm3IrhWa3Efia7j>D0FDJbzj;bq}Bx5Cz z6XCa_ncS-w7L-NNjH`|+DYD1a-TNWak`fYBj&ypF$u+G1$r0H)_i`a77st9hKy`ys z6<`d0+b#M@n?`PYX zR+7TXd9B28Y{3>kwW!^JbFhSNy2x{TMsHZ*aL*(E0J)0?uL8RO$g`)E(r)b32!%?; z)6AGtmYIi|k6$b(Xmb}X`H``_=|sCTOf<{Ac`io$z;6|g7H780i^4V#(g5`|oF2x6 zf}_3!WINeY6U2f|GSD*;YEcTE)`_d!yqzIBEE%5FRuN^$^ri~U%hX*^rKW(<1#rMC zkh=jmZ)Y6D{hAsyz%hxZ-J!}5R$Ai@p+w2-#R8debYZ7d%+yHFVb@452W$RgQ*== zh7#R?nuPOynE@Tu3Lpzv1z#Tg_2pZl-Sx@U^H&_Km%Jh)xs`GI+;_vNS5+5gW|&-; zVIAic@8je_-}A`N$>} z-X_qF8@ZF~@pCiR8k*CFgx1@dESa1_gc%Gkx@%fL;z*kjO6lYJ6FzbHIkd-urYK(&D(ymoU+vA}OuaZ5tN);0VX{-mA)hxB93iJvYs0p@VC z4EQ{TjwJuJp^tg$HQp;%LC?C9rZ7)p_X>_$|_5;Hd>oc-piSs=`k!V;wx z3@@FHY|u_Z7T_ns6&m&O?BptDt@;yB&8k#W5^i++-8m|Dfc^7A47Q4+4KcOS=7Ve} zV8g|S70qEM4{kNoqlac%uv*d2Lpg5CWxZng$w5Sb>Ex%{)1>LTBfwj$vVAYV;R9lA zXkD-BC;ON8$=3JuFltvcBO+oUYr*qPK$l<~gO_AP>zt&9Z4O&w+>k=I>$j!5-wI{s zf1$yIMN`h7KmVn4#l5B8GUgu7U{^NZi!n(IT! zS2wBCqOhUgMBk5@6>(ihbOkJ1_f|nevEbQ3j%CDu?(O^ur=Et=<1k#{#1#F2SzUDp zyxTDNwmjVqD8To@0MUKU@&b6N4pI|21+N|>5iBz)$Mtl}1MY4Hfpq6Y2W^MmiAG!xFL>DIK;5dTlH*gGn z(XL?Q$|`J-I=ijrnC&k~2}UK;j|3rp4*TTXP4epf!*>3FumwN^$kYqM2S39M@%KZKCQw2`RMT%rTJ(U zuytG-l?e831MP4a{iN1uuXG9K#E6vZ=Hip{irS6XrbyLwYloZghFEvX;?=RLCV$gl z%ORx`qY7THwbTW-70M06awN0m+GjCb)Izqs-3GiUK_Wma+4 zvc=dWs%PPbh-ENXx8;>?^Rf+5#^q6+R~mnxF254=eZWI6JshI!*DBKl!I)am9Z?W+ zH}M2Mb6VY=oYGRvjyR0EEORRWCum@nteW9r*RvdpPjmH}0g~5F(@$!1+*x81G2YM0 z_RU}UaFS&$J$F%+-nn7Z2lAhvOs=~hk=V}LH!*FJ#ct{;=zIDdIuRwB7ZMI%@Nt*) z6hByb$@~y7C;*{(Xcky6;Pf3*1&p0G=b|2_8WNm&F~Ckv6H7&qW$^d&{Ib{2UQ`Hz z9%?*WH1oA0M=J7cfqd{f=kbj_Ba(U?U4X3A{sgT=Vy|^CUH!dUaoNXJ?(0i+$5@N7 zOP^R}_a8nWfdy$UE*n-0nh>#gdBh=<>ZmXAGU0ZfkC;L}%+*$2bvG~d`#HZ;h-JgE zaK+DM`(?ABYK%Y>Cs`d|u&GD}LWu%e7YdOsH>y8R6f@1psT1B8JN z%w)zE0oqkaB+^X?rck%qE^gZ=HL;@BcVAD(M1S)?r|Be>N$7T?mx13bhWWBWc4W5h zSu$fjgKF0QvB2s7sSxVF`8nvHk^kpl@&DT437n$|O7#3EBh0QQK-@6&EZTcengrBx zYAFF@%nLF^Q4D@JvpatNB1%bbXSkaZ^JcuixIj^Eu4N;H<6Y6eP&bbJs$P>ourra2#3rIguUfYKpkcS zOFkiq)Xi)aTG%rK7SyIRb@Uj56YiGIVM*p_OH2D8G5F)=_`2vMdO+h`L=)`J{oP|; zTs}Rwj}n?eE6z_QXU?LDH^q%no{3wC&ZTL`b#Nb5JY-}w9gt?sbd#O}*+g+;rU zz@iJHPS`REu^!N)s}B5$L=kwqw#<#1n2Kwzhl0ITzh|H57IqysOu|O-#$D8f4FZBA z?Jf$&_yosGg3Q%yIvCTYNP*Wpub%{L)$ZTv%O35`TRYHWMh|_W3t%fM7zP({w$Mx|n;9hE``aJ|y)}<6`JF{gvjG5R3@vE_^N&292 zIz51A=$as}VTrD`O4YmDLUE0*@9F9!c({%@HSwE>Y3QfZ)$&+r|DGkAE5kkIkJZ)2 z&I=2xQdvPRp$$|Nwg{z+VBZDCaaHs#-Gp>9lWc(Rj?@uZpWvIeMiyOb99~J~Y4&(D ze`&BXA7}YERLY4r<9(o(I~et1CLemN#%2ecT*=r6jTfXFEfHQdf-8Pso|pt`u7uBM zD|bHEL>vR~TX{+l+;I-xG3_6;GiP9H%HQj*&~3KuBV27Qa4Ep>9qi1?g;n`YH^`}+ zf|K!9XATyKZy(5F`@ih-t^e9k;fq;9g+M0gV~v z@gOPXHhy+U}~(RK6Qmv6ft-{^9zwUEkGOd}V*~8LHB@Xn?*l z28h>)sgzbMAKj>u_Jkn&X@~C}M}7HQ;X7A$->%f5%zh zSx=z@qLDZpDEL1QLfx-Z(-&J?yB&@H1AQ2C_B;6UjP`-0TZMq}eCblqw}%{3sSuk} z?*qC1{*nFobzS(tou|BC3e4E#=ZsB9ED&b(kz;oFYbAx8zeK}}4iE5rGkWAL(R(CV zZuPP}>ntxYV~e9IuFw-QL$MT*dLT99#yHFf#Vif#@cw~JHUR`M%Z$UCrq3M4@?e~? z>{iGvLPVp;{XBH42jX6!ozY6qk;;P~oZE;Ibpv)!C%j3)gwhbSI$eE8N0uty@2icz zAYDVdhBlvZOd@#pBzJ!ct0Od~QOX%fbZMF$3O=GEM(wt09`g9rbY1(#$+GRnu7vX^ z10@3r7hU=JTl#gsHwoe3tqdWYP#fY9{RC-;*p*X&d@Oq}l(6BFE}rc2CS_9K@xhk| zSz5B7_J0S{!Rkd~>~j-{;y+NEn|w#WFmLH{>xqj8Ui@WfNHOq(s9$EPjx z0ZoQ77SorKUiodCIl^y%^|JhjV$M@1OCvo98Uot#qA}|=m}QXbEyyJ(QBS%=8AiEZhJ~2l37(DM?k&x>5F7QeGi8*CZ zy6CUo!{T8aRLd5fC z0K$o+Ob+Y=7_S2rgpaB3GNE*f!b;;O6v~FrU4NCwP6;!<&QdQ3}mEUXK8NjEhmv5&brapky0Z(gy`>pAiGo9U1*&`cizXU#vQ*}vnCV_FrM zLd-z~(BLGiwCv3`L&)gttThz}tux$FqII3`5KTzdD??v9oO2CYFWie>N(8`#b!jM; z7lkQ7$tJdB2#lc&lW$MAyj!bLnLltX&b7?&uE9)$YuBT9$9zktFu{T;Pcf}q`z8@> zlnJ%zLvN3iU(J(>y;2=xZEeeDZP?23>H#b1JD8L@vbw_e2mB>d*y?=huTQC=dA@DNGizpdZLEQS%Ht>c zFK#?G6uvLBx_$~$3Xpp@_+%VVen2=LV``KqnvkV)9W3%|?_BYZ7ncfOn&mPPG}1h7 zB&;dSDa8N^>=+FVfE&6m!o#Yqz0gNdw??F1!sd+qkI$#+Ea=nDe-;+Y`Z>Xx=Y>>Z z9w=CR?r3ddu&CZC@58=)wj^c$gg^hO_4kCWUfxW7&e5J-;*$C^T)^%Y>%B;&1gdJCSv3rMQw z<38R1j2PI~`BG7$Oi7EOHO|-BIW{BrE-x0eV8bv~i1wnbP&mIw#+Ff|bCQ_Qc8Ow% zw3CU?OH5e%k)8+_XA&%lrZ^u+J`xu#&%)Nlm3s@p- zlQ^@W<%g-x+>|FhqkOIfvZ6KGpKU}<9NJydmlHqhJ&esY;j6j&LGG*3^hRAJ zB^^kL?O@v9G%G*wQO$hGRI$}7_-gtF-Rt+D<5}mCVOgEdqSb+^pUeZlNdkteMz=@{?^mCZT)dG%44|aJO@wEYFJJYkSvRMA1Ly%F#9muKUB+(hoBw* zO45+7!9sy~Fh>Vxv+N`?W1}@g8XI}4bY9-6sk;7*cA5z`HtRr((5f~87swGD*MpR6Glbiw$H$%mL< zoj%uA(9TqjybSkeeHbG(@F-Nc)#>ukPzbu?hVHSIs74*FTX;qHwj%W@_EJY5C+R8d zSK1X5G2&bM8GjdH;k9_g0dzaIOr5TrQAy}SF!@WV*Wmb5wbj8c6c-96p5hW;>q4{? z{q5a#=5se{Vm0Z=zf;8df8$w}KcoNM^ol%ij5+uR!qP14P?JLDAc(!d(|!r7)utvj zs*69}RbaX>HFd?h^^hDdF>V7>1>Gq_YYu$WO#Gbw-o5=#dOp~G`eD?lM_{;G({kA_dP(ixcr7p40;wvS1fwdE~6(GDt)S{h6G|&#y zvUDVb|4NSVn*M5f=F$keav09>NmuvaAV<-^%X<13iC7XtbmCv7XT6x#E6k(o^Q@%W z%`MRLprYmB0TjpBO5e)M5^Pn~FUNsN?>7>0oFbF_Q|e5~mieO%#AQtD1N0gDc^C`24=r*(IZNORE=kW(p=M9iTi@jEw9diUN%y^B4B2ZVyaU!AF3Q;b3uCyA_-nAnY zC+-}Y9J0vJ@8-&dzA1z!f>Z7?Y_~Y`x8fhjkE@L|Cb!R`D6KRSd#D-q_CUqRIqMzW zz<0vlB{usye5Di%A(S2<2`otxYA>BBU~;(^fn#n$%i?4jZt~=VZxgpn+~0Rf;20VG z{RboLSw!D_I1g(~c_h50c8AZmvlMraO^&muOE%5c%_s*Lc0I$$HVM)Es-6@ zwXlbV$HTay(Kd{qjCVT6*5I7YtW^%fZ|_ z8j=Y#H)}h$!apfCKv%R(`hUxi?QQmU~NSt%sRwVs9BiFYO z?gA|Gv_vL??}~W7B8x?Q>O6AgrSc{Ae^6T;d(HYH=sb=Ukq)d87Iwv$fLJ}L z4wAv45lf=bKa<110R@kRXp}u+iw9+q*qORasiI6gH4LAh?S1UXbG#oHdfT(ErC)>4 z{FTx42jVdUo#MwgecDrPw8@Osy6zliP?dJ_REKUrtS;U)o~<)~|>VpnDNU z1Vwr_jw(zPj9<%BCljBx#%*J_arWE2cTS;dA}DeFc-f<0cu&B6%jaUCk+3p+wX+co3l2vGp$1X!?RP$vI>xcUsw$xJ${p%uo0Os6#PBaSa$@a zi0{2pHeB`WL9wDD;i%!A7yI-hwFPwdRsHZR-cnqyYKU+N@^wm3H~X^Me(+O4&;eZV zcpzLAi%Un;UdycZ4ppKbUt9_uA;^TDF!H)4$ET?O?N!i^^W6u&E-8G`G5hxEGf%lz z6q8$tFaQX|xRt;kboo#7t0M!Yh6zT_kEAQ>pC@0gPF4Hc7h2knnJW3l9Kez+l&BZA zH1a15!#bwB!j7@TikjB*OIIkbKMCdsr#D|DZIQ_hcDd9o{BK6|SI@ zwNSedL{}w+;l5Ws!u^4SWv~x@I@jlIUEh$*TDX2=MkbRI@qR&T`Azz(5=*a%6t)y$ z1SqB95@A&`t)-j}baSfT;%Agm(dxq{VrNt2tQFAyG5#`M!QmrcD9ru({?aMM5O^8r zan9s)&ERE3F^}mLw8i%p*_wW^t3NGbUGhWfdH=JD{Ei-`0Be@OZxy-v*AIoQ?gW1k zAN(u4%2!G0Bo`1l@c<7$M=eHA*G8hmUYU9p8fF>$7Z}s93uo}lp}MRWS|6@ow^D(P z?=kPgaWRL+7pmi8eOC|ldQW0CZ|STX9jwFuY$FVz zc{4e)L8}D7OPEbNP{sv&%Q-)K5qcoA0+btB3GmjfL&yo-N}QK3hQrNO)0OgpVB9MZ z;Rt#(A+=FU*S?k@H{R{2Zun=CO#jj?e|+Gq9A!$G_^>jT(mMTzWeB;jWnHnq#?Wk; z)J0r`&T*tSPA##p^*O;+6}jWSD&ZXeiCCPKDrH^fb1b2;B}*&mwhSbi@d#|4BGUaF zFmIFXZ9Z5&p`U$&*MP-B7jg)a1zonpaS-1Upi_YK)a88)*=~y^bJh-5r)l#WH#KCqyH;QD zv%Rrz)8)kN^Wz5h&Y-HDZzZt#J%N`4<)|YnlxbwM0$pzu-R}7DLPnpK-ZRBqdF~oK z#rPbRqTCxY$ZJUQ+6drU(q(c~-DvwTO> z;1Vg9V=u8yoANGdpH-SxXL5B1;JpPqRFA9DoqX4bxl&yejicN15}NYe9+z1rWa>)S zvn_6166gAO27-ftT*{rAM!UcS9kQhBdbQMaoRV-K2Co@#izxKeyrSv#wQsyaW@YaJ zo*jGjvD2RQu)2KGzz^uAI?Z7fWEBf-1>_?&Lu+WoK`CQSe_^)d8$l6M$W0;y?3l8G$7{o8P2^Ls^oMAv0KjUygUdEW(H*uU?B039F$ zttV9H99zI|{?SC*#(#;rJ+#BE`KICt<9*wMC4=tA*-|$)zX$3hfZ-WIE_9)JC(!&d zFLxN1^=yG8(N~3%yc)hE&rI^@`Sz1NT~D1Rf{~&|jB3D!(U<3ecHE*W;SnK3Mc;w5 z0V}cn0gCuo_1P0FTP3;TUupecetXW=nK1!z=AQuyMl>TqM{EtwjlCZiI?ppKF1Y5M zJ!N;DgFH8X<@9MgTI28qO24KV2R@|(N|o9os*p7GG70LfP#C67uV_uqOJu>Ac0K9i z83(KB*YWi3W9bcJZw{Wif5LPeMlu2J;6hP4O749$Z4?qPSE6u{mcxD*%nel6XHMU* zt}}zE^S-YPYdQUKhUwSK9GTdzqgk!h0E$mH{m9--rw%l3Q*GOQ`gF78dVPf7LRpl z-z*-p3c{k{To}j985IhZL`MiS6;M|buMPSI5qhdGoP4_yW9$0t*#XpNiNLZuXKqP3 zow#|FxeH{66iKXviLl~o=rsS;3ha=U^7q-?y^z`DCzOl_76}IN(h<5R%$@r=ysF8` z1z?a>0PSf1svr#7$Xe*&p6U{I2N>0uuy+~V4L!x)aV3L@i@o^bcSl@CLqrzzn^G){ zbiH)ifTB(cS?F2_h4I?nC>z|@^Gq#Kx+}5J_R6;-UwNJGkUvPGrDj?BhqJ>O;F<|? zWsZr-GYXnH=X=Qqa+$KE=hLne{tvRzGu$0w++jD`zE1j~Ah{(t_DyI%u zW^!pENToO+FK*&dZK8o!HMzd^d)sC$+vnkTZs&MOF{?YnS`B7Si{~ZBK$SjoTN-5v zSYe@nL-M{L3tIg*SU}XEU={H8XBY8b?qbOck=M%B&Y1kv^NMHrKKCs!s8?*++%D#a z0jbDx3Dy=j8AWMhH2g}U_K*|1Cq(pPiue^@*7utFsD8Wd&#CBP)_vr5H}#C;t+B%& zcmRj#+G#0jF&IS7!DgYk{yulN4A@|h*R-U_%P7tGRnzGT(~&{TU(0!}PoIS4O|91J z)!SIPlDfUU4?4Q(9lXlRYjEcygcWcF-XZuNRrJXDKMEU2kx|sTNmdbBtRv%7r z6lz5>JutW^%C-%9TuiY#MHEj$XT=1^CyiE-9zGl7Ai{|gwA*@s&LNQ4CF1!JLCv(y zq^au%neG@rCRbK%M+e;tv6O-~c3O%n-=(&b3MdyCm7oU$+DQgg(~b}4`Z%mkE2P~@ zaLquD)uhyxh4luV;$~-MJF9<5;6&V3X7^Um0z^j<`EUmE=zS zmg>2CvNgJ7nf1vK$)6|>7RA*u_yf}rF(s-cNgbb&Ut^P>{xCJaz@N6O=sQ^V*@vy- zOMINd8CNm;!n2Rw@dEFO%LBW7&EV0=%f=w?zBS0PqD0K3Nuq~fm0=-t%@UL=c?TP< z;1|{7NxU1%+}muIGiZ8OJo}(1X;IFov>5J`ISHqfP52IkAVkiUMhez6A58F6va&z5 z{xso>g5z812UB{?<}$4toMd5y{3bku54_AnP|Bhwy6SQn-`=wX6J?y z1LTFeVEc9>b2{ev8Sk`Y3z4>#jYB1QWalbV#iWkp?Q{3rb|M91UMB%-!MCi#&1vA{ z0d9LWO*9dd%2;*p)Q;#zlyRkE=eL-16}-ch70t-irQKvsaa(QIeax z{eCHhTU0EU^Z4syXLyptENJ^(&CmdjA>EK|3|Ks1dGoM?yO?aebnmHjr;~Hwkj}Ve zEGoXr727Vruo&dnZeN5!nR9S}>hZ7H%wZ#>#!y23XS+o^&rPKA%)GKME~rU!4Z3e3 zb(r_dLC074at05&ccIEO1nve4o;K}7aO-=D;Hqn~o%)(KOYyAzFVa{aiYCREsY}Wq z<`j2kP6Am?-^@r1XtM7p<#MZuc(O2=Qk(oDOnJfj%7kElL7{}X-D2SvEk5Xy4E-=r zl~-RY1`2Q(sgTXQ+L0Z@SBc@vF-t-EH%3mEyojUi3)E6t93W~hDh0wxH=)AR;l*2{ z)%HT|{u4Zcag?ds!`4p2^v}HS7s?D)G%9gU2VZgDu`IVm1DoJK5Ms8sP^am%41teo zbKL1I(KijN^Q1aRmur7nL;|;;vGM1RO+pA)7-bKWcsp#>lUw z#2n?3E+)QvWAV7L^rpUz`oRM4Uo#|2LTDnn=&$yt{Db&6idv=x?MMkP8{uR3tRj9m zzB2<^*^2Y|_zRrO$%y|(%Qph&iJ9h$l?DU~5$ zpn!xVM$&8kArAB!w5KT*_7ou1uTE&)D_>#A&Ce+i5sVSvHT9JPSy5s{9 znSuxZSy{5Qe<1>Fuv?(G9EOK=@c82kqB3nn2Ex1?hBr1T`wSaLyPm*P&tBMgVF#n@ z@Kbcau1f_l95>kfVp-79b@`v|Z3vP%XtwY4k8qTU{yw;z$Z|ceVD7T8OnDOTBBJPw z*v{{i(8e41QsLE_c_~t8v1>69o3H0?kR2v)3XMrcS7f#fvJ?N+B>&@Q7Ce>~!@soE zR;kf#fh)N*@b4@mc$wM=X1~rc&udid^@h<;dHgo}tAAEiihu5C=r{P0<~lQQTz(T+ zQvPo{+wh|bdHvZEHU<)yYFOHL=x-koXPpAk zQrwKsq70FR9L?;(lrPoFbJitZZyfbqnR@i$`n&7pb_W9lAL47j3i}6sv+bqLZGrAC ziMSVp;S>|>~uVv_`w4~pQ2v|$CM2H*yy>m6sqmwb8o$| z@;CY)D9>n@+NoB00*xP`CNq<<4W?Ij6LBzL7^;hD)oDI|k*3ZZ72SeQ}PW4hh>nnhAxVItDprzpTkAl%Mf^$W?Y>zt~F=!13YM5`9M6!k30L zvJ|fOmCPc&O{zvx8H8;IK;J3nv{r8x4hmXOUXf7+4$}{Sba5^fzIdO?Pt#tFr}h$# zCV7@P`z8>k`@dh>H)F70O1p1B>tmU@hwS`Jye{1CRB1~C61YU9GG39LgdVNE*L8|A z^(Fl}%cd2wog0m#U7hJhyBAYAz%J4OG;M4|4^XQokcO#T8Va@C;y) zK6q9v<#3YjnJ2G%1EI4|Wd)K(H4cAm7mvNjjs@IF@C!~*Lx6ew3R$VmJG^$`R;o@m{gWNB7Hnby!aI^`7u ze0TLr^QD_<_<)N>wBn{K4Qi2Oz;Ofd^K`{R!I;~j!gDM(MIh(RPw65FZHC~i{N~X7pOA@GN@DRj z-QqDn9JIU>4B7)8c7OJIo||x~P>Q^3f%lHakSrIby?V4Tq>3&W7xgwx>49U-hzD`$ z#@9D#w|;u%-5S(L6n#3#@k?G0qcg;u%dDN)j3PW&F=bTH1y#2WxtZ1yqu4(2^Db;o zJPn|U)<>6zOavV5x%Q)UPAPPO7X>4Gw@j;dU|0|(1z8(|+)>t}yh&0;^0LeJN00bv z743sc1ZqB&-_E3f?;jKk5_IHO7&uosv~)7$sZoz#?an_;Fn%U`x|z7Nuu+4di5PC{8n&0?@s&VAjg)o0M(e} z&E&Eq&)}6h8{k2(T12ShkW)ka;SDj}%GA+oSG&)B5ZnkLM#bb?JqZy_Q>uz&*fKsa zrI2xWp-uAQKtu-gfqo^PsAgPrOCz8>Ao45w(wEr_YpIamd-5cu78uE5A!0A3l}D#- z;bINByBm7MUHyrT!&^sBiJ4ZHBM;x?XB@EVk<_eu!7J{?ilg5p2u;##(I1Yga@1|?Sf5dGPw|oJP1N0x__a1J}`ry9G$}L z=vjEcq0fd)lB{j4LG60WO70o+d#PUVFE+P@tI%%$A1nU(ulr+XrNrs%}81rFtkf+GFP2ya3}B3ht6OY28hL ziEYH6OBQjjGt3U;!@AWw@4mEcwYV7iDu~5QIBcc+rPkk0iZtq+3$z2u-RK}VXoF7D^M092;Dr=d(knT}GLpkF3KA{l!eWKdzSZ3wmX z4+LQ&!+a57{~8WF22Bql9lI7P!-$>uE2Pp$kcb`Jkj-{~^-(&xKyK=)R>6aG;jz@4UI1>*(nf6R? zjJ$A*-wo|K`td2pO-FAV2s7kC>B=;GrjoM9h)0>`y8`bLL{L`~zR!rlD8Ex3bWf9! zx@8J?^uLjOrhZFOST=o(mPeW)C|uNzk1Vy*I0W zBtAB@P7y%?m=CZa9Iu_2F5Ps*wc-(BpZB6q2?#QWd8poyV%*NqkrS9nzeg{~@gJM*q2+!|k*QXr4j{0hyO~~bAS+BBO$s~d{ajGfBV5GQFHOJb3 zfC#r~)bT5M_v0qF6{lRSx(8NmxaDQc!-3$R;-W=3ZD=QkWP^y-K%uor-;D9aC&_G` zd7qM}Rl}XV>-j{NpS|P#@-z0p63|`z#jA(rpn{G`b=oa-AC}icM-)9Tc1X5hL4d;| zbo%`19DQ-sHqlGtDUVK~{Q~6=1t_S&=H`T~ha2^BCaJ-f8e)O@-zYBj`!qoidTj7o z_G0|ehZe7cpxIeG8vi%l^uPL>`52C?UD&?a3~KC?60|>%t9;m;r}W!kAr&xt;8idp zY%y-1JB(cm;6I!^`7s6d7oYrrc$HUeFC{oT+u09rdIVlK=iShSeazekUkamxuAXv# zBfw2PU$nbBwq!a9>JF)@7mm6%bCN?AQa4RgxYjgiyeG6@-cGrbnJa{JR`uvh!>fv#@tbFJgD2w=?|>~<+~ig_wyHE#>o_4U zxc=;h&Kc73x0*^@#{2oP{E?EB07vO>@%q=!b~RW&c_bOH3#bVF%Eq*ZAPhFCj*GTj z^=eea+t#VR@eeMuaSidT%5BA|7)m6iG*3Z4l+G^9a5GIVFG3lSHWSSFUW6j1RT=KI za1{Lw!_|1vpKN^9UZ$&Aew1}q_hisgyW{;l#hkOYjqwTA%ddz&?S<#XBWaS91X5f$ z>N!mZWkiC%EY?{l-e4XdQ}8IJcSQBoeXcwEKuffdL z0-A$d$KSvJr8b&FhP7u#kuD7s47fhZwln1DJu>)Z!c11DLLm|0t9E_Q)d4h>$4Jr9 zME7AxRT#V<)rHABKs5w&pwr4sa|u{^9!H$_r$04Sn``#PV1x&RTf5f`x(0&9*r*`y zow1|>{kXx|HIaIj`m=)10$%-yMcM_|7|tY}7vKKGm2iOLExHSUcF@>`N=#6r%lp}g zlHy;~bHu9>7ZZ3ceN~P0eKg|(RaO}jwrzS2KR0CC@pD$A`-q$MT)2e=t5gCKaws30 z3b3lXrPNbOo9>J5pdpFbJKfkUK3qb|bRAx*g`U$*i1N>$Pl(>==lYcW_+h)$ExI!v zPL-b^8Wh0Ww?tj1L*L>z=05lxDZ2Z9TBCG-|IlWtxlAIgA8PjQAC(AKK;k;)%=K3{TJs4B zJ)g5yd+Wv*<$8lm+#3D7)WnRG+Nekd{<_dJ;Bx*8)9@?gayf!)8o`5@72xeRGjZ5i z60Mu47qn}+&^FG0FI_ak(m5uzHi66yY|x7G6)gx*^t<${a@qYT7J$~&RBiPy8gxNF zsB8UWy(q~cG-rOvv2c3d`SP09^c<=lw$rsNKi9gFgkcT$u>ZC}ucqNv$QYZ~y>r3x zTy*kQ=We>9Zs1-y_e+}ZV42?x+t@E3mqXu+p}Uh<0qs>yThx85b{_YqPn5qmt-*4- zmRcqsXOo}|r6qXZ#>FN0I#1L^@XR^1w#HxQkUD#FZ(xgFZ*c2!)do?I{0g>QjH-64 zEwKxy&++l@Ir8^)8~;$>UfU!yw$y#Vn{uUzc}OIr^F<^Jpk}0nEtf$XZci=o5}LUI zMesYtFM}oZMVZA0{yl>}4`?S|{;1sg;kskwK@&-S?o$t)gHsrR*!bh|IopG#Ku7mJ z8!D{*(vs0j`~HEnG}+N2sx1a#nSCfh4oyn4h{ND74FVp3belzZH5#bUrp_{Ta)uxc zQAFq7oWT@FXX^m=`rfoC4)uxj7sgpLAJR?TBqeD&Vap$3wT=C-IkgpJbeksD9i+e5 z7e6(uYmRIbA}XgQhMvv0-D`9lVnn@q8Y4~wON=q(r|2KwbU2YS9P(tAU%$Q_58(~5 z&_o?x$}o%-0VQk<5S8Gg2Ti;Ca&0q3zYDGJ+kB15Q~mYbHZ|--H4^sPmmr2Dy#*Qw zfS(XF<_@-trvWiR?7!*b%qBjCi~@@i03u5`THSnx{1hN()^jwadIa z=u>H#+n$HMOVp(gD0>LLif}%T^y&y~4WArM!VcC0;epfO)_&UTu@DRw0jml0Sj?(w zE;(&gn@6y>wf@h$rr$XZvm1IFy1$KPjc@&~?tc{exvE_1e{j!dfh%y11n195Sv;T) zcLs)4)S%>e14R@rathjGKN{Hpi>FAFOt7hV zqeAsk;!dIcOK6U*$nQ6s$Qj7x;@_mJ(|2=;LWJ1)-gj$ItrNtDU`M`z$K@0K^g&m& zf1SJtx*|Wb(uYfdFH?gU*p`ge;j>@#qVOl! zguJg&4eQT4)~ZvuL!(8N`t`QBL7vH=G8Gw!$q?FCQlA3yMpK+a-^A%1WhR@~w?tKX zH`k+RM{KgLlyq!LLO9_g*(COYPL1hAmZ906iW~fV*OcIxXwc`ukV$_`5n|5h2SAlG(mAqI6b5(vY_0bSlCyC@u#6>()K_ z7C9QB6-AoH|i0K7}3E4=wKhr10o6T;;~q*A!ln!(+Su-DeEd zlAj~JQ)el1G;57JYO4$v<+Lo{Z%+renC8(n8RsmXPghI$Ll*eSww@HK z2l*LY6`^+RbhynjDk>}Z=6r~kqw=p{%d^ti!Sa6M>Rb#M-(fioGfAV}iPKlSN6F2+1qHiFuIG z4)GAhUHO1y#FS=2A8JVGu;sCz(bN4lU~B=f1g_pwiyF5kKyYs@%ppY+_Y z?zBd=dzVl1SsFD%6%qv`58UyvdgBBJK$D>gN`vbffe|#z`!kyOVe&_vDn6xxbLr^UVdF4JY@o z8}HAWOfv{re4K4F$$td7Ey}8=E4)eKL9dP^oyEbU5R7(g1Z5ZHPZKP4GtY+P(42ow4v!eZ~ELlX3;9a zr2O6c2R?(Y4^Atfkhhx04}K7}U*yeSpPZUgKDKghahCj? ze~){iG=+<*4}xe%EaqLn+5i^z&DCqcD7SH+L!WcMjH?^tt#)95a2?Tk{_Y)|Q`32I2_1QD9o!+PI4web0LD=uo1AbQf z3BHDK!Dq$dl+*ldNVe{z^W+XHmJ}?&J?X z_6mUx=ZlXg@UD!|hYO9OS45{|wTyj{%22O{y4eR=!G22nEm`8~p*QX-JoHbZHbx_Q z(JUlSY$DzP%i4o_9u|zZ0K(tiul9Dsw~Bw~m^Bw#{-24$uaxG08z_ixbBDz{|9V@{ z)=?c%CC@p-%8HV$V9pNinD z3J0+J-8l~nKW+t`zE`Iwf~wHt5aArmBn?{f5-guv331URK*&$YXo|82ln&{dD)dvJ z;&ZAc?mWu>ou}UgYCB&QJ}0&tBBKS$R0qR?+p!a2zvpR4oN2pPDCc%GCmCuZ6L zgP%LomRYr@w6niHqgEdjE2-ii`wO`XpKrx?;tsQuU|zdkJ~ZZ$wBE@^-k}d~LX5U{ z%H>!y86Fa!nW0|AYC*SPE3j@f9f^zZCZCnERTR!Wc1wj@=$_YyQ3vqS=7WpANbN3&^{jq{YEc}QcfHeAc z-GHH*Bj`xC$?st96`Gxx)5;dRF)# z0vG{PTQC-mx4^3PSj309<6-g2zPqscxzN#tP^8CXWu+K-y>(FVVTz$|W{OgD)M*G5 z{$KYGkwyM?-CrHC~4Lz}IowcT}D@>;AJ{RZi zR5YN94XzB8ES8eD-uS(}*8TJqg^!{?e4iq)9mZ8dv%a|X=whZHQ(ELtpC~c*Sr=otl?s6$ zr`8D*LM~?_>w>aZk+fbTA47tznDI6jRULj#NeykzIvFU^rtRHHo46p&Ey+Z_YqPOW z;`Uuxdb@_4S?g_Cz=r0;MOT&wI;hcX+}bRxTR5D%`*u*60fOhE!jE+ACm{F;^VoqN z6nD6wm`=v3nPKyD9kt$B8Cm%hwZ3z8zDdbn9{D+Cz!Smh81y-B&Sn=}C|sj8t!!xnn9RP@Le>~!=Vp%3%I0b z9XxS%=bt#9-yeBfarXuFQ`vJaQHv)1Z0OfZdg#l?s8x6m>m1+^^jm9Z>T?pn*|v-Z z1_w4a)p^3NwRv=U%K1L{(Os6g9+s{+*5sxK^j3h!x@S}*VAt`HMXU`w;{_(dO}{gc_Z34S19;Jo$PiK_rkk)ep>gEsy9d% ziN56pP+b-tvdH<(x0zoWYvV;%slenI&sU>u9BQQGL-&dyt9gwUZ4JW7G1>bWI zmh~Iv;*{F8E?kZ0R1)t(C>!=s&p<{M%K1zH3i;EQ4kwIvW_NkLh)#F;lXDh+%^hl`<7hdRqCEF zZIixs6PUawibE<-U72qnFwYQ3kggOl6@w1IUb#@lS%SLJsxG)@b$ z@RmI9w>&?ue8_#0YZp|<@$#nCB+b*`CXYIihG02ofRzz3hGbA?O+>IRa_7~u|M7Vn zZRYLI$(6^Tu%t#y^R)R<=e-yRQW3_}CbqH<2;FZULTN)9CU#HGx3A5c$;?0AhradI z!imRssM0#bCWu6;Im}jeK!&lrY2|_GBQGTo?c@GDQB0p zKz3&;T?p|rONob*D36SMV8f(bVJQ9Rpf5KB(Mpi%7ohKj(r~O-@5P7dM$3K7^ZhOxEEUo&-PP(+TI9P@rBCjHuZ7#_u5AtI_|V ziGJVImG**|Wd32F9P%>gtkZgt^TYM;hg+_q6T*B+68=7!>3X4o#D0FCqvW^+R@&$D zI`u<K}qd!RuN2mTH%Cqxk}x-d4r5ze%!`uN#WhOuDKk$FN3~ecyDH z=EUiJsa}cgqOEbM!yRL}H)9Oxu6Lb1UI)`Nt4rIw2y@P%1R*XXW}tQ1yIr`NO?((R z<>A8s6ytnd!gPbb;N_~jF;Qj_{y~?p_AZfW@94UyqFc+o0B_Yp$N9%fNoyS!ypIPu zKp0X8538TZ?X`FB6TZ0FC+7d^%VKXi;d9^S@Ojg{`y%ydqA+3(BaC+6Pw}ICfz=-z z1@lbvT6&Hy{(O9Vl;gTUG;H|wet6T~tXcQ|S1!etPr7;#N~}lK2!d76;4b&FyfYje zT;i=U;dHH*DfFw?`_-e3tfeWowr84!?o!-M)(}dq+Qr>w!Rj}`;8AF3RFT;I*RIaM zo+|_L+LOTj1ci+UIZP#Kg+Gnw)wJHcA@$35H~Y>$!DGnzFc)+(15pPtmCbwPB+Prk z_3>O??34Dhg`&p(+(SH`6_@m6nlpxuSH%SH(45#*ZJ6PIlRxN)Q)W{Zi; zT1w6zlP}76INbNjd2`X)`}M*jpJi8{*I(%L6@IGzh3KN{+*2Rw@RO(rF*v5{j~l~8 zqDvg@Yx1%gXhP{>hZDXk1TkPgR#@RIG_G*~${u(cG3K|Dd`{*`@Wb%t`pYI4S&xz) zX>5+=f^{&^*l`C^W2rlf*wmh>$0UQXcZB%W2Hl61hoU1cHJ1d_m%gH8hUg1SDmN={ z3l}VryoskMpLI0H-NpmuW5PtE3m{Ifl$>YiRzS?7PacowV2-|VSHBw?iK_-h-1RhJ0R5;y-!YdF!K)KRZ^@n7+WAz^)?!t zkALk6u>R)kDc%yG1$&wAqu&*DXI-C~>d0HMM0No3*tD=}Lw@;aM}}yH063fm!Wzzb z^~tQK#vhXh4!1%Nnc@o)jFMXH%LceE?OUIIm#zL;x6t-SxH~x&?G4-wK0!w@T!oOR zvL7cao*>g&ao)2c4>SBIEw}7~nW4DsPU+!LrEwOz{%Q zhu&BkAmNouQ9*nYk-%;w0yY3PLidqvUC!#bt?$LXUIk-OqnpznNzD%*k`^gHAtm2MTzTMWJV#>H?B! zZ(XStt~mFGr*lAs4wp@TzwD*Y4OG9$kRUwt zJBq1RI_lKM4c6vCMka+$VRI?bqy&n4I1Q1LGKX$Nq|x`wC!^S)Mq`2PjXX@HTD8gz zznRM$=E5b;M)3&ce|-}7mFpvOC@LOSE5BO>&hL5(G!b>C&F!Q(Ms_{P$)dzYBpA20t_di4;ExBv}m@s?S{5@g{d1_ZH!iU;(Zuc4L-ca$&uE6byR{%@>01UphCrjzz}SvjkaB7%ucLSqqqV z&pUjONV+?yY4kC(yPEc&a_)A`3pQQR9|9D|4+M*Uh6jB7i^YX-bM(tC3QJpN?)A7E zZP^xMYpMA>Szp9Mb%ppz4NLyn;%u;j{iqI>?>fTaLRe_e?qd_T$u@=j?J&oo`C4T; z?acB=CS$eI3RgZ~O)oV8I^;L#i~ei<_5Z=|RDY-bpCdl2zJiHmQ!}i(^5c()u5wR= zFEQ`l07bh2Z6ULsI`6(Bx8J`%>$}R@vv>|#=asDr^{!50u3fY>$@Vis@0yH-I+OQ8 z@>v0FKI&GkGm$MsRPU(oTh+#22%22rmr4ruK_uF&fX=-CT3*JJ!@B{fN1Mj>={o(B{zNM{%h;-&Zh<1kL5W#xvP# z$zdydn;C1#$&oM zt1EV0?E+^gRouI551sG-SaE4R2(Wu=^1LDY_!qcPjiTzJYs$a)-9?<`7MGQl?(984 zDZjhTd>Jfi)t~U=K(mgwEf~CO9(M+XyS{yB@mwG~x_B~>`fkFK{v);}GU`jC$r`Hw z)fsN1tAEl>VPeQG_BQvTJK>6iMC_jty^xjqWb0++8;<7cy=QhtiB0w*zoUFs& zqOev_0na=Ra$>k^@zV9QrI4%$*tJ^YU$Pa!Lci%m-k)~yeQ@Ix3MAgE9UpPdcs*>z zw^q$NK)E74;*_UD`u2U}+20o8iSF^H+9s0i2$67Sf+_2OFG1S)xen*z7pxs2uJ4>o zaVq)#Tp#1GU+q}ihX*19Ib=|UN9hop08axL*B{sy{^UyS&A}IzIBXK$>v889HxS*b zq7eWh^~DEZcz2X4KyI7@V0G`|tRYYIv3W7Y1m_-A1<#!8nY2Mu4WFb;-o;8VK2K1G zfOU1SuoBgZ9L(`T*f0B43ip6Uj1gSmEk}AlbzlfHOELIgLgO}2NOstC%<4PA#GKo(h$lW#Kn;8 zE=(PY#4WICLKv%<-9}Jh`)gIxl)QrdW@n9E^UGtFJ>6VyG}QJ+!B#DHl7Z4%Y9fWt z8_BK3A55;gcd2f1l|?f^ZS3O{yBo(O#j9Mzr05qWFnW-Va3C(gf9h}%;;_Lt-n4fl z`gxblh;dx4SPpr3+x>lP!g+{a^UUG-_P-DtaA8*|eHv#npH%)bwyT*q*b;AUZpyZr z6}kinO44QqB1`krEp%l@VfVnFVyfeww&|_<@=(4wNoMzE8%+rcUn4gy_HjLK&Jxv2 zhPWxHUfu!_Y+K5_0JInO4>?KWNxbD05^=cC^iw0VsLWLozyrmtKqADdsoar7#XooNLEsOb}qwjmGK zx`O|>ygrc0YP^Q;YwPw7q!|;lhaBt!qDUD-0`pSJ7329)4Z#R|DeN#JbuFo?= zY^4m6kTWv4;5AGqbPiB#hgNJuxhOy$y%~*tSQXarmH(Q_!=SoEU!;5UyVp+2J~c@q zg49ZNsP7<-^2s4UqpM3X{1y%|H))IF2yrqkYNUVM<1h4H@cH*t3G3UBKK$0D{i6%z z1d^~6=B;eYkhQg?LdFFzYyYC?VkIe<`ze*CmOx4MV4L{Im#*ybpZ#2gIU(7CM<_vP zv}hwh*U;4EW8~JWxYLs&myBPX?Wl_tzpt@eUmJ;JB!d;tt`X4A?iM~L&r0B3+k4H| z@O`yIgYHLKr9jAff?1iho6rU)b3W8~;2yjL1IA4K7j(oBUz|=8`c^%Y8acMEL>N7I z{AZ@jFz~##y~*3^eK?4yU0!Wg_lgnUI@qz%SEFxCwV84E`f;e$w`Uf1W=;kkJ9#|S zZgg&skRx)DbiQ+f>ESY(dLi}vuRGXvqH$AN%#?Uj)3c08>Wq(|rz#JaSDrcGOUbn) zN5G(a-D=Wpl(}Uq*6@!QWvu;WVSbMbZKLim8--ISgji2j0unvIZQsTjR>e}}!>q1^pe7{M&!=)s z{xt(PZYlfyIGk7fYMd_`z4R~2)qXgwrCnm*?Lw>we8S}5P^7t^ zV{qrm^ahN?PSbsaN3tTM2;Xj>8?l}|Ao9IB@RU89Wv6h!YpM$c`V%V;Q*l5CZ~h}m z9mmBK_~ECMw5PAAb4G@0NnMqnwU_5#a1irCa7@XGH@-<- z5?3qU`D4U-bf@k?PqU$R*$jZezS23D+Y7Ij>=|g5D4K{Za4`idXMf7?J~@p~?QTX9 zWT2$iuteA|BIp2&&y)aTybe^TG6)tFbiFTGE-MlyOX1jVz%;0lxGfx~+I;^)I9XVA zzpF59?S62?wP!s8|Ai}fo!wmF7fELpVSz0(Ja@L~ELDvf{Ymr*$(n9derMg4bD zp2S!IhhK+nWz3ftN#d`ES5d3N8q)`8T_`KCLxT=x4kl2a;&gaEKX?@|w z*hBR-MK}~J2 zVrxc??u7*U9)YiO2E!j02i#qSv!ZHvpBBE(ys~(wfB82P|8<3a*gHP77i{+7l6Vwr zVjfG1`lNK}uaiesH!eY_!y)u~@Zok;7mSK9L1BEdIui94BEYdK$JC3Ep4Po&fe+Pp zJU+^zy-ZKO=aJ2~8G!(;iykn4a7G^=O(U_Oc9`3unSp_RaXhR>Z2%diBTu+jLAs4~ z#o*hYbx#C8SKb}P7!~fi>ThBPrEcvhp3xH}f%X)wDWMQqLb$aG?OMF@7=I$yS6Ov8 z$yB=W=}NO@UV?zsE2{oY=gFKSGMvI7L*&}hu-a>_5Oy1~n8;7x*jRP6ae36kn4NCy zEJ6*y2-=V;U2+_S3DGZd3P&H`=0Lmkj|aB<@)HS^!k>zxkp~SLlr$-t6@bK(!bBb* zMJS-jaPPZZdT5=@*>by9)*Plp9u8mqnO8^e4z0!7!w10KaKv%Vp?3OKg&7|^5CwaW z%y`017uPqup!y4;xrh`Xy5m7@oniQoRpb6t0jSu_!$AQ@YX(4kENde~ekA@2BZTk^ z-_8rGN0PYqjR?Y}izXeydNc9_%K_nuif zs;NH0nK92y>Dt&zOjZ;AlRhz)LLdR$q^Y0NOjs0!NiiqxxuZ{hJ5JnH{N)Oxcb3Q< zW52ciJ5+hS&Ol{QnQ9#+*uk|07o>0kg#w%B=}MlfnhX`zrfjD8&{@-{mD_(ILW(hI z5tBAwp`VaD*=rWyo^>rrVJ+gd9K2V3lx#Z^E$bbggvwEd)iT%b@GR}7o%>+huTZyH z5mCKr!RG12f!+JKS`p-1yWOHve$;pqwH&piB%Vx=&VZ{UzQOB@yM5=DdU}(wEpj*P zJ#-Az9%JkiUFCvmd~83b9bGzYh76YZDMf=7!`tABd;X#D8L6sBA>@3LapAUrT+(#d zZrW7U-I~VG{4Di=c~`YF@7kWvI^KXD-)H#z88!wZsTl8c0hIw{7lv@d6Of`H(63o% zWB`7|0f)k;;N&~eDLBVIowxTQ@UC^uL}k>RKt!+K5bvT7rd3f@p}*T3A58&+Wg^ID zM>@cxjD}IBb@?&a8zTaUAsBeo6~n!)3g^(0C=1Ex!2;0a){t+dtyv(x4E}m8hU5OX z3H=W3Na4QNQ$VnHITgcIMG7R!4KI_O{8a{?s(2m{m^f_huE+bI22G{FADLW1RyWkS z)7D@F3DnM6?0yd~MH$z(QSsg*%wA?;>CVsAv+;IO7u*z%+!(b@A^vbPN-vTT<_3)u zIfFY@OqBgqLx|R$@^v;FifvKzODwactST6EF-XZJ9fGOU@PE6m`Two}5p=|Y#0#b9 zgRP>!kYCdde<7ngzm7PUeMxCBfH9Q!gq@gjYp%+7$v=bfx>n}`P;@{Qw`mar&-Z#6 zlgcD&$09xGt22x9HtN5gI>i@zP6-&HygzIQZF{=9a_G@2NZGC?s4zY+RsWCOb5TA1 zpVli`KaUWkAHF5t3USvRr6^EFDAH30!}R)%Ijn4U+iEwv@25Trsj^oe`Bs)?XUm+= zXff1O8NBl!j-YS?)v7$*b`VTxvmG0k@L)MbW|lIjvYRVXcwCE-29#O1bRi;)T@eE}U&Y^PQ5XBRIY5aUg^W`jW+oi4_4|g`kRu=|ZMJxcg z^);r!m&6^tQdIvB#q3E`9Sp4S%9Ciw>0h(WS@Kqw-7Pjx=i|7Mp|$g&G(CZHpSqXI zX%_g6Y;sn(1=egOuMwxeDuOzn4Ypiz{e0XtUgpi4H!L_So`n>A+q?7e$GLav;!p9@ zJzzwMcOsaU>Q{!)uQa3;{|x#nBG`OZE%?$Wid0dvr<2m&2Z0l(|H140-vRCacdwH` zc^cK_(HK^|6RLgKQfplBYtD7DFHF{FV?JtWe#7$c9QtP20MbMLSqpLi9@U{hFe_<_ z*D@_0^5v=+mjOfv<7o@ViehOPzI;GXLJ&LCeyWft=bdwjv36TTvdx<2qiIEji#6<~ z9dcRaWE(yNCvoQDV^Dzp;+>Wm7QXh#mvxJB?rp%LVI{(9BG5t7CD!gt(DU3U{d+oh zxE{}QO=N zcj8=Roo*=AcV7&aDS-gYiCH6N+!U#)7%5<+5{QEPsyLtu7mey6CP`XdmQq9;$X7)X(cgB@ZWF z9Cvj+?qaKW&Quj=#8zeHcdN6$MhUd>LIF#BDfNWik3c`;7Nx%lb`ax115^b*?-OK6 zY(vBg9{wgz^PoJoTn~Smzui(liH29I9A5z6Ef)l*>*KH;?+H+5r~i)B9-%Nq`>9Ot zC}8_dXIg2$zu+X#9D4U!5~QfnV*l$j_vAY;$`G;Cod=9Eurns}oK_P7npc*hLVqNodIFIa8g9A8TXQ}| zsUPMG3zL&9yZ(oZ_@6`L_<#0J*Uv361N~7Q;MK<)tG3@q>HxrbRIVP&z>X&FvYYP8 z*$GGg+3Z{OqN#pxEpV+omFIKuRyd)@)vnR~yq}|qr4MAG(4z@X!t@vldyDm0VLt(S zxL=Ii{xa18fKO4awurs|(tIz4InlF^CVjrQ((ilX;HA_e*c5TeEBRsV%N1;F1Gz*sb{D#G`l!T)s~ zX~OG5eZS>l2b-Gjn+_!$t4rR(*V(zh4s%9TR$q7f+;ur=?TyWWr4N=9|D>%kVQ*q( zBxW-z&?d!TT-HvQ&(+S>FzWOVb}%hr`G;pFNRM>O&(VNpLkIg(xZ30{0H9cK*ZsLO z|2gZ(!yxMqH;uZfmFa-`H<@YlavVjjy$!3@2ct1p@HQ8-oCrMa4C_95cFXWA)Y9Yi ziOp8yfFnKLmW=A)_WlduY4gNR_oNnW-H5x?S1`j+DUtTfPoIa-d2z3kIq$M$q&E{@ zjL-+TP^Y5;N1mhTDFVeky9)BLZ^aTdHI3WTPxpQR77($XbQK~SR!g-M31eup`U|-b z6ixYJ1Z=JF#`K{x{7Y~Gdi~-ka@ORZL6-_p$~_=e;1CmJQ)=Cd5raWt4|cT3HoaMF z-R-6dJ$_2;+mWv%F~YwNEi5PNc160^tiJEPudb;xO849hOpq$D%yJJpYo+_s6o?0x zQ5aQ-mXf=Xk{nh4|RH=k2P*eHJzIx)&D((!|zRN#1t3Ri6A%~ z)06d3fJMKK6bLtJOm&&6A!Il-$SMlW%t=S5tVJ|KV-Qy;-I)5|D5R@Jyv{j7?pi8~ zQ%LRY@3M#iZ*j%`Dux_Ox!$DlAA1oAikG_{~qaedx%0^kUjedUO<+kGzHM*7?RAg{QMgJC&7n!A0*az0p6&p za9Ab53vYMx_fHR37EeEZyL*C=yVnv0&tNX>!$HT6q=e~&ot*CgjjS$}Y*3n1r69el z5}?(aI}t@8VZZv33<2g|#NLc+S$Jy!qufo|8&Znbmb|1+^Jf}=)Cy3hTFrs=vxH$N zrzgYxxhI5;pv=ShviUEz`OrSxkBrY|$=*F&79e$#pCWzs~990_i_H6Ppy(}O3gu!Q`6c&%OaOkvY~Y0&d?MDN4P$ADYcxCI-F=Tky7mJ>o1hS5yf`?n41EhBuNTR9&?QuiX!@ZRu^qVSzoJ}8@vsdRbR;??7BcQnz+oq(&drze`>q)X6iF(QZNA2f;CcMc` zSb8`0@SzGW8z_}IB|zCocd$LqU)$=(_Bp40Y8Q<#v2ONc1o^EFoBT7h3y6~80=SrJ z768qdRMjYKePnI+nV~h2Lj7BV!cD@2)8HyWNkF?vZmg<G06Y$` zkG4_a9CU#WhHup@A;W}I8$DO<%BjE6Pvx@E=5mT(Ab8$!4-n~bd5vcPKu=%d(!*-1X91yO4uh<*IWH%pG*q)P< z0lS%5=u3Yg|78yS5stMY>#QE(C2$eFVlwwaKa5BcV^8?d2N&odV5Zr14)$X>08M1} zLY-W)*kw-w$zInC%D%+a(>B=1y24~u!v$o7Od;0*<{~$yXuL(f;b|l}G;ad)IeaHuF zrp2eP_D(rnKg<|TyJUNfaqV)~K|X6QEE0;n0iT27&rRF~_(WvqW3Qa^&woqQXT2An zP!TjxRMP0XoIGXQsNlMgJk7XriGFJn=zy0&tF!wn&ckWL?~Lzj!CcFvdjfqbZ6zqL zFWoasW!VyOasDqtN~9O8fHUQo>zyTgEcz|8DMYy*|h zO3-HBt6g|B&uLYm^D(iC?)5VMHP1vtIH@rSw*|FF3 zu9Z*UxP;h$Q_l2svY++NCI~T6V2(X_g8&}t-6tbtDK089eF#0pz~T&YMs?&b#GnGH z0H`8-h5O183&>fW3n^y9OkE9O+6D7Q^Jep35+l^_klroa;U1~kEJR*kf`}4Wdt8K~ zi$K-2^|ESH9uSbO6cK!ht|9WPhxk|W%8p(`*V`U#RSADRG4CIG&WQekX=dAF={iG| z&swyLUT#1!?Ch@A))E}c^k`25EoMgt>2p%ur6mvuJ>*R0NXRsWNet^e(7`%IVqayA zX{kGT*zW4*{5U!AL1&*Ie{z$UFe~@T;4&r#33e@68MZ~#u-TE}awQ&fyX7`n8AYkC z@n>J_gEZc%s|!ecJZ3$Yy9Nh;tkwfumABJnpgW0Sw)YgEdvi2>vmy1qYR|)%dH&}v z%XiOgQiG%q254~Q+G`2ZYkwis<-K-ZOHgyewVm_gwpKg26RI0 zu=IUFHA!{4Tt47>7|v))e2gmA4r#n3w~BH z0IDSqXe`HIO6XoA3RfmpBJYEvRszrt0<93HxlQ-Qy1Ko_cWKW3ceG2wZ|MHQKh0R2W9pUj$5X(QNF zP5z&Ph2}s4whP6tqe6m1>*2da8Sx*Q&9BUxOmfM6-|cq(qITmh(-V(Jt#V(Yx~{ym z?S~%m_P_w~e8`14i&F$=vr)VLDOH{u_UYOT@x13+r6Jz$uO(Kh!IAp#m`YRPHDs5Aj?C8Z zsb>^s!mX9{WtZQXJb;J2AS8L;eW+>KunNm>l*7*kZVvIlH ztL=hWk$qcVs(7}?kjv3VJI`s+`dCeGk1475H%gvUmzFI5JE`4~m*H^~IxMCKO6!4S z4Tt*R&3Sto9?jYtzxkBF&B&yN({z>{$hvR(WBSh;cAs*~8)RMg-unK9Tnknqi;U^g zA?_o)9MM09Rp3NsPvnX6^!6eUb|-_Rp#hZ6+yw8#DdYhz6rQhasS5>X?H20{asbKJ zW?W2}z|(pDDiTtPLeoqM1_lnXQOAi5h&~qB!$0G*X(zvxs;vvU3 zN}pXLuQg)}zodQ)fU<=6cQ=S#!}e!-7R*Mc(Y;}x?`7Dpxm0#yy$C_VPz_a`$Yj9g z3*EKzB|ZwL8F!mL$bJ3o-N!CkmP@s-`hGY`eD#aMxV0qJ^6MV+=Sr9u`8tfkmK!iM zd&Q{aFdQ!UkG`t>_d59~Bz1@I(mWyf`lMJ}c!r*vh9QK)k1d`p%2Scbxbx5^&+yh~Rv9Ho zXx9c>KcGU;c(lSWCEocI>cJTUYn-fU-s`zY#$y{i0hon4p%%n4Q{;h4K5M7%Okxil zO5q_Xb&#u(?qxNvno!ay+!sP_c<4xm1QU3KBvk52z)9heu|vdp-6%d@Ti9Donm*n zpX7=|?W_IcVp>x+k53VA=UpL<&)q3gd&B!umdqB_5wi?t*u~u+mH7sGQE_uis4AmY zw^TE4?=tJO5DELxiXXby5jV?iiZ(#xQrVo_?1slUNlsp`TD3Uk(Na%TUGobrcCb+J zb^L8Kj0y)|xE)ONQM+Qmw_eBT&KgZ-83z+AY1ncG`nL;(6DTY1I{by0xmJx!gL^nYVZ2Nf4!=2}BW;VA zMb}5`%!?Uub+=>Sia2&p zerKB~M}kfI$f}UjekyTDwC644IcFA-hYOq4|2Y28jGVK({mFNFe`u#wNSZUj9V+sO5pBvDPk> zzqz*g2~yg3fA<0BXTxoHhgClzcV)U~U*K~(VW>yIsSTH0I*q^K$SwScxnzcmia>=U zQW#kQc5QZS59w|(duwIyBeRZ}h0M*j34;-GA0KEL7+g~wRim~OxyhLRp+qwH7i!0l zC_jhz0;MSrr_t_SB1GSO_0xX1+g}iV?Rl(v3f1rT2Dfh_aCBHTHl3oea7{4tWtk(# z^|ZKFWiv%^pPh39^Tz0G{%=ACG^ z4(({R<#O}(jv2`-5a)T^Ss?t@;wciO|Ey=Cx!_zBk)KmxB!w6h@$7Qml<9P=^hJqS zlb3lxZ`Z?It8>T$Rj}y}&Ttu=)$#Xm%jHF0+jenI?TxoSeiy{vVZ_?~7>eX})sFllgNlFR>Bv8w>c9qrnuth@Vh;xQ*ZiwNf!YBO;=7LT@4^kRXOYiu3x-Ip4bHyJzmKJ9F-uSu=mITm&Tf zy}$SO?!BM=?B_u|Sn6~r%zA^UcJR6NJ=2l095gs!Rbudm`A02eNw`H2s)rlQ|+1DVoqQK zBmM!)ND-K3Va2AsQpMovzL zUVNIoMwdSpY&x!gaH}#!!l}RQ~7X`eQONqc$BV{I#K}mrEu% zE4-&Z;7=d@U}0r3u`pVw^{bZWfCuYzF{t|8mzkf0FR8p>x{1?LwYEgU$ zwArwK#?4%|=ATNdtTUyjWy)?7&;0a- zKCk^OS(ylj4ap^dwn@Z^`l>8{<}mx(QU9APsbV;3^K(<}3nH$KALUs#!fMhCVVV&` zkXlsV5RiiC`$S_6^K5RCWUp>955HsfoHoMIB19+9T6HJ$^-HNl&ot7fBLL&gI6 z6F+ZA9CLR`H<60@cO&0_>*ha@lqEF8go!In_Z{A`RJ z<8Co>8+3>#{!olKj$pnq;f8O^S#na_u}fG<*>Y)(GcQCu_;EVc@#E>3k9TC4HP)cj z^FW-w6H4Y+=b~zLz+PhLz)=96c`(QCA0AB1An@m!;ZqCBNSTMnnMPXYpTB<@bJSJ! z6bE^1MvE{6bX{Nk^Bihp0nboom;)&RZ$)&)5GFct9u~|_@z<-OlnYW%3!9ux->i2F zTnY841UK+}ghH4B={s5JXZ{6JUb$W~+6m^PY8EIP(EBBQMons(SDNx(wUg?_fOCR7 ze%IA18Ym|vU2PcNBWWIF3t7M8E+Vovzq`-x{)9m;Q@=!=ix1)76C`&7Uti{3ZWphb zv$dAGB(*`W(55z!e(clN5P@GXTr>VoFQiszYiMcyaoZ8N`-Dc($d{wLhU~(1ZPAPS z+GQEL#*+t@mdvg9FnBAgsp%!ItscDS+G1aKQH`&?lg~CqOe7c4lJho7=!ctOvueHZ zg;NaMx&K)|{y%vQs)h6go5&^#<1-xQzgYH>a+<(7cN{X^(mIZfmg)|edrO(RdZc_H z`{qK~`}gS8wRl}{4m0^tGo>Us@f!7I<%mb4z727MzAXl>GR*IIK2;AKStA`W4#lC@ zv%oZpPhegsjZKJb;DGFu2`Jp0gN&eeGauK~<2SZk;^yyfGEN7)Lg)PAJoCtA3iZJR zaB?q^PFGHXEO0Z#cL^ypS(V#o87ZQU)4P4Y=Ikf>{D&LwN=&EIC89r46aRV;tF}>r ztkF7Vp=f}7(PUAa0+j?Bb!00X$nUo6lSGg;)B+qN?2e>!=&}Gg57>qu%!5nNAPIq~ z=Ev0h+8u^4sqfH+-_1yFyLkYM`&I<_3$%XL`G?MeH~(!-ON1~p1(_Di&Gn>0{;LE%BVRJ zTC_ndSGR*oik=t%l!JVO)+{pnBc{BAe^fMC%AT*yz1?h%p6h+k8-gAOMFP-K}8rQ4&=?O887a#BP@dV2GpTrab&)b6@3^fzE0Q$%E^_XG< z@)e)8&N;!Vz?=vyv6SC!)!d-imZ$CByT}p7g^V9T=s8pjgqgyJ7h9}P!q+VRF0K2% zXIp>zL6cn}_*(VTGLy;ta_^`mKNB6-wYQPbTz{+yQIOhDw}Tx5dV>+%2u(vq?E2_l z_+<$P^Jx96Wkzc6Cd{O{yQI`&`Yr>?MC{*U1B4sYEJ%h}C4|65Z603w`=@-Zh<^{Z zmto;q7ZjGQlAJkILRQJ@?nU=7r*Gag${)hXV$J~2jbhNTQwKHW3o*hP*vvskG|Lyjcng9iJDyL-iopesvqV- z270~bT7y{;_9=q(bF(ccoAe4aIu34a`|6k~#}o^lxFkpYA-g{qsmk6}+Xyb9p& z3RO^`Z5v6cIs#1e!#w^b`algLhMXAB_(NALtPl(CyDqZ-HFBn_6#0|;8*>jfyITeH zJ*Q9|PrYmg9@KVEeDlVd-Pc=}UZszO5JzKXB!90%J~U8xmwU0WZn%FcrY>(LxT&si zf|x9-g=N~Sf$jWa>V0#MO$LX2IbeJb7rhgC4HH!I`bZluwHD)|5~iG5~&}=ojpj^whm6ltU1SL%&d=j^`Af}R_yzNvDYp{@B)k?U`SMNdTB(m5-ce3 zQj)1IO2xyq@0MGA+4R(I?jGz=bTQT|78u$1L&pR3v*tx=4|K&OqtHLI_CLEmUmUL3 zJLxXtJ-E`(W`eqrp>SvowmmP@6%6aDqd0W07gL<6@02S7Q$j5>{O+oo2a%2HTH6n@ z;ElHQ>Q83gpS*g8-RY2P`q8P^TwRco}l|%!6EYGQO{=VrYv%%tIPuutmQv3+`)9d_Q>w={2%# zUVS+-BQM@tQ6@dw?Dt1K0O@yFA@5GV&^{}tIay7_$v<|57->jzN;@oa?K`x6{QBeF%)a2B5%W3?>{z{Ug&gG=ye(zH6?XUL-CF&OVI{QR? zKNML7ZQ9SB#sj9}Cb4t`vbdkBb&|_tX5(Ir6mcm_d-k2KYsv^ z^lYLOHMvEL452`qMg$3gC?QgOf42ST4S|GQ?m0)njdmV(hiRs`HCMYT&9|g@3O|M@ z2YjGx-XLtcF>G>mdMEJN*d@3^V_0lUUDM=bsx@oE9YpW9)f;hltE3nXpBm9c9FO>f z0(7l~Qb`6<^FlcDvUo%MqDRILhq-cWRz^^nuS9vmkss&tanb#&QOWt|mo*N}$hRT} z+L);2dcqcXa?jSdR@PU)hpm>fTpB5p)^FO+{X?B>mjQjTTp)Wr7X*#sZ%|J(N#1|U zGdOs0KKlAl1tR!25``cLk9(b#*GW`3Y#lFX8 zEsa5B%i@wx&F+|1ch=qK{gaV&A70DKM`eDod-;pa_-_#31mf?tsM-^KkAo;se{Lrj z2S_rQsgmXxYJY2|NKF`vGgtrdnF{4Z`P+WZ0`o(WJSGh6X% zw00ZF;f*n>%zyIgC_TA>tby!r{5#L`;kqFpCLV)hHxH75Vr~qLb*C6Kq=O`neE*%J ztLj@c)l;Y*Avv;YTxELAH}Mnk%U5=Td)=8PvQHfjQN3v-B?h}s?!#e5qNh$IONxwq zd5o;){Zfyd1q06@d@a;CE93J~WR0J5?4y3JzM-}g6_tv<4O}_{b&3Ak_gaHa1&HWX z(>q6jAeo5Is#r{$-0cOHiU~_FUbuE#;ay1dZ9S*zDhYj$s3*UfWuiGE*u$WsEf9hN zF%BS#Ow0tUB&CyyUcNubSFU)ZG;HtsOD=Yg2Mshoj}w$ql^Gz9QDYDeX85_7#R>%@ zd|+#9QLHQ@CyW=BFv^1;r&Cq+f%V5Uw4pc(>82z_U#vCM7vTr;!mwB9e0*$RY{F_yKg;&e>cUrHEC> z-p*Pqz-~x7Ls-5tTh;-Uxu;|R$#+~*r=DH3iPF7*KipVeOdKqnI{8)?8X)Uw=A9w? z5@)#5{^Y|iZFY~=zur?Jz#_SL5lxM&#O&o_Ex?tGdL)Rlp1qcMH{kqVNY3-;DYDBRyb@ zDUgeG54hnRdI~EEA9Xz1M){{}Uvme&FBaC(nb;J0TQEUSH1v-@R%l0{Y~XW zs|&~bS+)dcs1n=JtTS!)*&S;;l6h2y#mWr)1L8#hpCV&(=aU}hadveP;XKWwmA4r`1(!K<|asz$=UGoC8NeKQXY$^k%YeX2hto2CYll1I+Z%qr=?I~%^ zrr~Y}$H<_VQ*w+8OPD(tPcAG5Y!jgPX0?Qhtlh66oXln#teeZ~`s$@&>Ps2D)XV$GZ$_8 zj|d(yh*!KE$!x8Qq8$sBjc=b9a8bOhTu^jIBFf8yk#-L9oGv0DVhRv5F}^B^dP4YZ z6!{uLbGb|O=JS>6_1(b&iy{_3(F>JtRu}QF79MWG>?44nz7>Vv&}u|a0KwNa&sABe zCtQwo%Jh-x+UGTXn3cn8);6klXS^sEY$C8T9m$|5qMOjsfBtcXag9gJuZnbrBL-T4Ha&KqR;-f$(N9xnZp}*VrM@3 zepdmGFZ+^=?QUPJm$_y#4%v zChQn^^LaQB6(DaM7SLE4LE>(PZ=kho19EWFQlu(SOG8bMv}39TpTi3ZUl8O{>k!G~ z_SXHYvR7A18k^FOSYK$5y{ctuz(6Mmd9EixeUDkFK-LP>AVwA?Q!ZphX|BcA;spB@ z9gR(0*s-~nhZmxc+1ak#K?^Km*!E6g{Dw%UnQ%j?Zt&J4VA#8LRPIS~-0;KK5raM^ zZVyvWIVRuxVxmEHu$`Rp&bsQ2^=%2t%KA`#wTt)Ia;m!6O`aQc?_b}hI|G{XI)Bs3 z=px!E>WFR#R}}It7xM)i=F`N9Eo5~a!Zm%tg7=hpNILuCB{)XhRN;^w&y}VtM}T%T znRIg<*-+hR;%q!JgLUjnm(*Nl*T%xxCmngBy(e?M7{YN+yUSxAq)dx3zDCD^8=Qa?(70gLiQT>wX*mu(IA)jkK-3=8t)e zU7z;#u?}TvIj-)?%!)HO2yi;E-+NhsBu-U*D>wEFln^tme}ZgKuYOA_!W6<{q3mID zc=+OvSo~0jV3K#iPr(nJwO6Y0nf5eet!ZRXuP zaNlgeL$wCBHu^7)V3bHJhaX%7?^9!;gu&I6IyzLYyN;;G86>Y}DMm%@IRE-X$E5;( zo&@@S3N;|^_yZdK3Mlh46b?XI75x>^z(%1y;evgaA3nVbnv0%CUtf!wQka>w5cgv? z;W4ckI^h)R+SCeso&KVQY6Pd++*|PrX0@RS-`@5Yk(_tUMjOP^9m8MyNwi(JM}63* z@YO7%dxny;Fycrbs^)gzrD(j5jppHW*68n~LY<5gvHcm+bM)C?S?!E_tXbUYsM(~E zWzcjC_fe^o} zuK8#_)vv`Y^zBb=_hh77>cfWWnpXo_UwT{>l3$-Y=Krd7UkHim{o66-rnE_h=1nk6W7*`5q~WI za20yBKs6!*l)b|=OoOaEkBV!fA9+Kt={9qj&k%-HF14RpvIlsl*yKW-fAR- zx11(;E})Yh&U*H$etC$yrNi54hSO9}w~~@?JX8H_{?+1`<;6-mXc5a=^f-&MpPMb< z?>0BGzJIS~CKpM!@)Q<@tW^P~$xA?CQL3R6IXR^7bnRVUh0<@E+EDV~MVGnWFJW)a zov=4bZ~FB?=P<2r9{ZrX37{qZ`lOf>(`np@-Ooc^*;;l7tIy0%wC+7l{nZE8+}R(z z4lY(Lj1|BA6kp>=B9~6}1T1kpqB&{y2I~*oPhy#Pk4^3tdE3P-m!zBrNNzXZvK2+p ze-T%MnD5aOZJv1egg)27Y%g3hpz40GVX z($ht;bG4f)PqG@+oE!{9R>W?hymp8T?~kUTUWn9 z_vV=*e8T9dO%Xa_CRB{Z&eTA6rgw42VbF&q^HW)37TJ21S$<4y?sNB%51UiZ|G-=Z z2`dH!QIMOR@`4=tM`IX}|B=6~4;c7JM$%|kDa?T zI7lz?buJA_lLPzx2FA-j1;V`oIJCkDqsNNd^%Ez%H?%8;jYD4Pcf4sx5>NlRYNELL zIj!(-fdb{PQ#nQ(%7>616n?qwFTT<2SD&dnpmd&b2qIadE=yL{nJ7Yb$b&Td!D9SY z-&=|SSzKue2}#gkYaWdGDwywL?r?7nr6|q9x;KZLh|!18!C|#fiXh)Y<#V~z2$`SP zH|zz9*eY4baUE%dDvs-YPEA#Z6R3T&>akN&P#Gr(GLf3Tvw~oy7Y8_uE5NC zw7;g#OF#Y@Yz>oZ)K>{kdo>%yjwtju5t&q5?CBfY=j4GqVoV1hKu)ZpI;}`2!!aEy z*mtz=P%cVXz37=XOrGw!CQBJ}oxKdv?|w{^VlwkDhMxA0Ei6SsRgk`*9foNzNdX2y zFs3js{Ds~W{0|AweW6{ir&9$Xd>_GaV}?xM5$@Y>z9qL+LB5FWFldvcNQ_Wa$rngR zHoGO+e)vROb|TZ5 z{=Q=zPmknc+eJ<_I`;0<6xer+FN$<}A9o=CAygsX(Y58qFTS#2U z(eS7Ro8R;cJIo}{MF7+94Kb0lEaT0-KSc1(jgK?2hzGm|#I$$6T|;Cu&18YJUT`^d z;T@y_x%GW%Q%Q_ijE^U_rc5cijy~KQt^|V3A{ENM*OcbHH?G|0oN*JS{se^nF7T&u zT=;nl(i=Q>Z+6{2?%Hzt-Td(--H@zbMV3?C85xXG;+)y!UDEtYRK_pi_XsJQ!vwP7~K#A|@upWPZaUtf3;*MMs*%yuF zB*~^+_n!MGd?$Aol?=sSLt%xM8dA^^{IpIQFOsJ|NmTg_+R)8^z@oFpq4W=(Nh00B z&CJ`Kmhbs*H)J3??jct~Ngy-1J!=krXbAL;K4^6Sx)cvJSC?%uk9W{_M}tFc;`-<+ zR(EP=$JMUL^S0WwNK5U|rJE8Z3dt?FyO^(wvNFw$&buOG50a`X&-Rh)Uk z>fNPtZ-Jwd_`<1^=!usUHvqU9E~k;{eUOLdC+NcdX1$!$B+SqE>!|&nAyD}igJ*<^ zg_?Xa-54cM``Urr83;-6iWH#y0H=fw(j66#s>|*{ag&6WEVhg96tCOM=03A39AsC^ z$@+0j-y>J#S>=>N~H^*fAl}Fo^E05g4RLWA;Od^jR5;ViP~*Qo~_B> z#yY8KJiGGLcAI>W_YE4jt;YhCMyAKnaG{ zhl(jy@Jd+tyMIH=-J~y0wax!Ad46a0KntySZ&k5e9XbKytVM?L4N zFbkAFK~Kd2`HI>y87*_AU>1nAxqz*HUgH>z9ocbF z4i8CmGgnAT(72fs}84FKH%=p%r01FF(>oszn?9ty ziaYI(tHji}=U!R}A)*=HpQowC8i(+tKBFfalI=*q^C5(MmSRb8F4Mhtq9Ojb@S9(z ztot@+)CIr)8tp&(mVPFe;tnYI&teb%A#w$ zEtG^IU!sj+@Yj(%-k?IKi6JyCpq=#-JYODeYI;{J=NMcIDB0l5s2pVvislIsHVzY| z0i%PRsh5|i&)V3$=Q-35#sTh~W!yHl^Nd>={o|&M8*_%Je0e}jOr3#D08k_v1k2I_ zj+^WF30jv8?9c-C7E(1t$z=viC!uo89_s;=#)`wk`XB-rPaK~eUN9q|(ivf5BNTjV zJfn7mn(F0zTGljOFg$HFt9>z8brHCXW873or-#y7m|XBfM2~R_13XyVCQFn(823P_ zyx%|IepNw>l2{yKjI0*dXP^(UMRwR=?xG+Rj_PlE?37b&PYow@t$Yq+iKPtujSbbj z!Ud_b@uzm1^w#Ow-+y#=kQr(_g6xnAQ&~c@d(U%Yywe1Iu;_%qpP7fDVj^RLGTQMh z0T0y76^A1u?ibSCjkM($EC5#;P0fyw3NuaX1j0ZxiFH1BF_2gG&aJC^=dUpuH0P4b zGOo_p;x#1oRJ>?ma{41WHS9RJRo^z+I7!GNgcYz6sGs=VDvH&)hV(9ByB4@oV&wfc zQ-t+A!quXMAQ5=yWkg8LJlkBA_5(&+JacxWe1 zPei5C;bG0mHvwKLino5?GF57uP5DjOuf^KP?}m@Ltj^;v4`CDZj)j>uk;L93{CqDW z6CU;EcuKUt(7$^!d4MnHj+-mRl#||?HT}^FUHD%V=Kt{>;0&H+IC5nwOa@G}hNEdO zcgi->NGI1QIdYxzhrgOZKWQ`Z07hd2S@6k$MF?38huoVFhuuJSXhB!{^#uRWP2|nf zvK_B7FMVeQwRLCNUCa}Sk@&bOu|aTvDW_EIupOmVKETA zHFQ#8N%;+d1LNaA_4R8g&e|ky^VUfDS>jK$Qo!o`)LY6|Q=?xzP=t6$qSyo91w8-+ zcX+jgE>KyaY!qKoAQgjN|KyrQ!)}r_?cP~3qkQn%1*qjyH#{?h*YzQ_&^lB~?NB_b z2HL$Nuu;=@!~uc4mIzUgLcW-h?jFaY|hwfZ|A!K6hHMBy=u4KZ#b6b0V^@jlBMkKDQQ&jf@- zQ(v!(;`IBxjtgZx_Uu6aWFYEMb84-o%opnO@FP}v{=d!K;5g?4gCiR>_nSfbAv|y2 z_X(fVxxuT&rw@sT;jJ+TrVTOB&9r#vUd1MmWBCCCPUwJhXwP~PbEoj{?Iue2Gd?u zz<q|ubzzC`HOgEmlD1?2$`vRXj!N1g`c66AqiDC&^#9l2+V-sr_V+abwgD5wiV zRK!orp`y+2J3w6*jGIX!JM_VjwVncqcoLYsyMcJ13@X$`YKRZJpG2V_2G;Pew(-XRP3(K5R9At3b-pvcc@lbn_z zj78>%c;iv^#Z*o|+#H0Oi^<|5$^E4{-XXptMJ>(KQyiN{mHhT*wH0uBI&3d+@9t$D z1dBb+=x_ET9nz;hfQ{pj=TY^{TiJwpqBWp2LvfyDMF&U6pAACSeXI+Uq2Af%`D;+0 zMEGq^^7-IK#?v>fuhrv>Z*$cl7YFC7p%fb8C8&qaAa^xFhPqLtlR!vld4ZS%uL!&L zS7|>~Y`txWapJJGRzb1Ze9LKkChv9bE))dCdmb^S+=%%_t-P|%NZ=)oe$H5(Dz~Dz7KqmdIP3B_JYD}dJQR3LDq*Pwer3TTGtf>=z^xZ+5 zvIWpiO}-6<-3ekLP=x0R^?yzKj;ckXd@Cqdo7%`H3TKSjp5WB?8YY+c(oXWqvGBVc z?G%(@XQM;f&i@OEI12NxGWPQ-Q~orI>_rHW5n`sjTtK(aPw~;LHWpRSMsixKbSo0&{VZGFXi_rn(HFRBQ2acS??#bcCEuAbWyYJOik{=t#G|xe#_oS z`dP-0+l`;kiNBbNlzNr67PH9Q3$z!rX)J-uzSjW|(CvTByo6+8{n6X)LmpN^At#cB zIH*mMsr+$?K5qOIowh;=mY!-o(M<&VrmuYy{O2>YfkUUZ56ESKQOFg~=yAGRX zS5QwMET8*)W~nul#*#ti4Ku=9CH+>x#uKCKnm!Pm-^z(PJ^$r_7yD*0_Ipyc$z00A z$fPTBSm^5IZ_KS=s10y_tX%S+` z&aANrNy9P&m(T~g#=G8onShcE#sXb$o*rGi$WyZ|S_A`l*^f25!po1ciT%2r#g| zm0HpiK4{F3;$47Eax%1Fl-alykomnwa9BiKOXvj;8%EZ-MN zcI1mB-sSFwLvF#hKSE&UW3Gv+b=E&ly96jyswby$lMrJW=)oEUHRT! zqS|ZrEd2t1{wU}kK^xL=$H^i3dQ4&b_!GSu1wLQOLR^YEuP? z#>6GIuP0JLu#66m=-njCQk0w0x-*TyB&0yCjc7|%9zp-Z(Oo5JKP^Rv$r>bI%^i8c zNT@m#NF457_{(Cc@&kET!5j44PAqKK{I7cG?Qkf-FNZOV$p7o z4Y+%Vd;KT%_Oq2%I(5nJ5D0Jpy96YZ>T@c?&JXL!Z1DCKuOG%e_b)S-trR?UJc}tW zXXX`?k!+?Vod~)EodjuE!o)FKB3D9=#omFDvbM4hLrUjisRtIT^a zV%?2ljL_;5i0?KxT?sWWSte`y!0AqU&C$^%{y4#uSi<^QIQHUW$v&FoJ{hD2L$n6) zMt#62ZmheX0z*Hjb9h-6uzze~QoCRCHZDC?ym>A2 zK0Wo(HJC`mIHVrP1#?Z(HoG)YJnG?hZu;{VPESk7D})?jJIjqcJCrZ(Iq1~<;Mtdj~m1{6EUMLMuikz;5ID}WAF0U=ooWtNOAJB8@m)? z)ZLwabxkU$7ft~3@*kFz{Fl(UngYu4hMblZ-igb@zOKyj%7NB3Es@BO?(e0)kJZu_ zEJB_@@HbSZ+i2wOc}|UK;NXSu5J`qBzYL^C73o{q7UT#!IttKK@;#ijc+%yqBpi{T zS><|eiLjGMu_+}bbzQw&s_|vNuUk9lZ0FYoouVknc6_&Ex>`T_4?LMd?T*LLA~U9p)#wR|0T>DpamaGdcr z<$INUcSfVNa&-y=n5L z6T=9~!s{Vq4G@1UPLS*hhg68>aP6VkRnB=`Vo3H@Ih9_|?W!pP*YgmDziAO_L;67W z`-x;;`sjtL4fT~X1?uso0q>iJWZxiLI=wo6A3Mch)jlT+cG_Fo?Cu)ck18CZ$sdKyC&Mu>IfGIwE){>*2q0%5Qp)w|>rZh@ zuL%!`yQkeZ)6GrmqaFLEcY*|QCWS7QTsbs(vgN0!{QRES=Z}VB{Ic8<2A}IWg&DAs zdZE-*sLv2PZG4{P4dH9HKlNqB`d7u@c~IFTc>9|gF-dz7`dP8S-W!z9@Xvs81)DA~ zrlwPr+zY*KrEf++3G9A{fzP@R1o6kQ6E+4SFpsTruVZRwZVNNH>da4B5oLj#tCSNW z5gCW2HJ2JKJ*;;q(eTE3Jkcmk?#+_1ad=0&41PM=mOo z2HDhZt0?pd9xQo2Rut`#7@Q)FJk`NSr=SQ=H&}5C1`{IxU2eaLq3|ay1*b)D(>`O@ zQfQ)^XT3ymb+kPG;CsHm1Wtauc~2`2qV`Hs9-OdNOWECA$Z=G zXy{R-4_8{-U%ac(2g;;@7ImH_NviN5-hNjv<~caRreHqNV9Bj1u{GM1=p-PW>F%XA{U#agO!lvXF_U52X)uNSkC12I4(x385HI@s`d7B=8 ztomN?{(0R~QdKepc)KR}FgGF7!B>z8`ND*X`B;j|qg@lYu6B}_rdo*QwedL})wNG0 z?WGwpZqbGP)<7^*06#fQ!(g~*vUq%bek)C#0$?NZH1*4`9;mzTbMP!r1OH3ak|Ijrn zB5dTOF+otb9jr+grp~G(@9a{3{N0D-tc_3S_vY-163ar!0F@VfC`*g6d#66fZ|a zkPOM^5%H$DCxV4s)>O+wyBxu+w$ZiSbk0)@yUa`3I0M2yC@i82 zr^5J$Tf9Sk7eaK67RkoL&rDW?l`E8FbIw~#24y51G*4{P%HDf7NoI#sz_nDTY`yXQ z0Rg&Sc+dTH(ruO$xM?7p4?vNTFi)U3|8*UpCmMebZPUB(-)uBi41vuW57`Q=oLaZ8 z6{CDPjOB}rU`>_SJMX|k&H?Km=XkQLwrVh8^IOPE2w4f@iIWc0IpHk zq*qoVjUQ>rjLu3&a?9M2rVN{^-E?iCW+4~Q&JrWYMb$b|Y=w{3_ZWrs?}$q8H8H)r z!XD`k(CRWx;Ej0LMLRZ#Pcp>swCBN>*|VnW=7oH&+m++`o%FhV-g*BNy>mP)eCuSy zPxO>19YvIy1YIa@)B8wDfaAG{4NnY8XB~lx9_oM zLn5P{khd^!lpras9a*E*jS@jzq*zb$Nff6-J!7_l-oHXzJgpzvnV%nJDaRI(WB;Y z*-}d&RUyb+V@Ur^J8rlMAaQbC6@nppC&F&E)SCsGUw40J_a(&3^_lMe@o?SFs+Hdd z0W%YO%=(bW5WE#^`qvT>O+Z8pU~0REg5M9G7%NPD6AF1+Vo+kgQulnG6>?l$;S4V` zJ&SLb|27q=hN=bAuk$3u;P@ZISM~iUHVeO`oawc$+4h}2ta7y~m7Um|_y?f(7u5T| z^>sgYElQVbDKxf;pb}q;vk&Z*ZLWMPGTQ#QS+EVw-WhqC{bTV+&mK(`q zbLurG{30F})1v&9Ci<;hJu5x4cX(Xke)sR8tg_s7um5IY)&c=@#k}Wo!cq!vpRH{# zzq5nUJ$}iqFSmLg-ne?yGVS%x2#Go}SNz_luiEta-3k}e>le<}(TP;}{EwIAUzh3s zYu|JI8Tqd%niupGrsjO0Y+-;ffHw;a3@q^BF=W5&Y$hA(dGpfEeh*iDI`*gSpp>@x z&q`?$jhep)nVu)Q5}e;GLt`NNd|8e}E1P(&9L z^ugUATr)FvGk|Jrxa{PrDp^o&2FWJB?uc!z4e?jzZs+Uw7Cw3>@_nQvI5wCZ@iM@O zSb1Vwo$RmLA{Sn?5zr^P<`ytOw7Jc!pJCa3cuKDDAJz9~YA(_Zi4`M;<$_Fx#$Geg z;=V|=T$;F+X`WQ_W4BPxw?@rGO?f!^rm6GgGaDQcTOb3;l}pyWP)f4uuJeW?P3on)58|v%fTS-d!_28Q$lEH3`xsuNJEnnz?$lzxch^O$lK47H5c@nj zY&axD>T;q9q3lwh7acVWSoQhSx-om^cQJT*GH8b=tPCX}IY3-`hGErHnhfJ0z@m6s z+)FiH0pUR9(g!*IlP6l-mtT|wpRi>4*}jj?0kys_8A*C-@wCfXf~3Jrk!3GrC*}&4 z>3+ysP;;oQWJtwmt`6as6N!+?2Sg51UrCq;5=ag-hnOTVu3a73Qg$~}UTsb1ZqRa3 z`=F?hc9PEEdV_rQnPmFI0Rog523k7r);*Wdj}^l#AKu7NOxX}&XVV-ybNUl`S_o?3 zd<_VS^O42>>Op<`L$@=4B;5Qx#!tVDWJ7vlqA`q>fozz^G_ERqH}<`8wv|UL`vA`g z1~*ib?eZqUXj zPQ>_eP44JZ(QTrJZ<1c@p@h6RPV^>~d@9V4&Ct)B^KJB(_AUEZqFDZZL36mA{&(@? zmYc5;o1KSN$rJ@d_Mu)QoMW}HGkofH9zy}wm6%VuF&#hZ&^ueT&0=|0)_F?4ipwEI zxwimcua`}yM)F7rW9B1P+8sR!iYeH>pqlA`-a^J_JF_%Tw47!vza72%dHKfha=+rB zd$7mmYW&xwmv|Q0Ug3JbFjFe%xGfaG>S?b4gP=uaL2Qtrt0`x9`|a>uOb^OO+NVoRTlgEX}RM+t#y<@K=ZDhM3 z7er#c!RNSAfu;d7H-lPW!>o0tv7y*)E=ekJ?0Cvl*d#IF?e~S=efF0rNo-O@*hr(1{D{_b7>orsrHQBwzH`39^;G&Z^Go=7#|-hp8}W3{+!>Fs9i=1tcO7m&UCLv&AZSM2Z^-pP z`*O^duYWdY{3)9-E>5%MU0hhH4pSsRy2=KTUAHU&i#X|8bz^INgUCVbb85lX?vT^VJe%{Z?_SN3 zk_cKfRwx;Xoeop1B_W})Eyqi=FMNu(Y8-jEo~6lI5fc8$;Zd)?i}+nF?qex0*y$o_ zXg#QgE6Dm=#Y->4722~Ug+JhHH3v0DCk7tM=HgzN@V@Vo1^C0xov;>Cn*u_*~mJHl(cSC92ZkVw-W38 zRxit>5of7KxzllPt^x7yHL5AotH%=?8Q`awHv$oTI2J;&AvHgOla#&8{KR(XkVAs{mLOh7)u5uNf6=t89@3rmg^1khk`vqBfS>ZlJ=u zVJJWxzWdHQgn=>t#Lwcjb7F=IbZQjOcVJT)VHVzLZ|5w6jN8(RiKN+kvym;}PQW`a zd>d={o7}aqed}1)BX6GiS&r%3eq+YLfV8|V1WRsjnn=Ux9@mOh$EjSUeNFn>>`)LO ze<|~`@niv(+cc)l?1CyR3Hq-cH_#go2SSHjQ$vfIvG|uAKF&jC$vKtct`5zmdfwZz zo4-sC&V3cQR}d>e$^lAv{G3^9YPVT?J1vAWx9$N zS%)Zo%dg9fYH~Zw10K5!aA~cIqW1%V5`0Zm14EHK8<|DIM{fE~FW5Z055LdHOeF{y zYt8gEm=v0INz}*OxnF+xk}tlpcnM8`0zn3$d;?rL**_NDbaQ|G!14sAycBoH8S?kVR{aCLH9XhweJg6q%To?HxcaF0CG}ZZsBYBfon;pw z+y2xS;LV(2$k&jK8m91J+t}IYO%;*PlRxOH)T@CGcW`r#>IId^Z4qfc@m#~tJJ>Qy z&1g{04zxr-8cc+-n=E<2=z{`i=gK_FgNd~v1?{0HK}Re2UUOdrRV8w zAvzR~iQ;xUQxih<2kH~!w<>+~bqf3byk+`KJ><9Tg zws%kzVl>S}X~9>?#ScBF`ZW`85rHzUn~}vY2pSrEEDgXO@hq-k3e1S2wlw;Zp(*O_ zhu*a?xQE)x_~`Nyxh>9FQr+l77oL6Dz4#8Lf#3(NoMl8hbw3TO*5*BAjpY@f-L*>) znD1ehEPGvF)y__gi*P=ZWXGQ}0b3d05!3+sApxz369F&@;V($RVPZhUm0LO@_4|so znydCG8GFlC9=;Q}HPDYHEuRL>qhwp}huhl4lgTjv75In+JH4~fKd$xPx*1Z2+cGCc zL-W0otcC|Yj?A^d{Ag8#{V^@cG&zYMk$wRQ`=esQcGNwL{u&oZ5;Q%)HY)J}eHXEh z^&U;W?42+!tgmcHKZhLJIeFY_-j8=_`Ldag`GBeeXim3r+|r)AhFmWHR3dn*Sc}d{ zcbq#p8wYbrj+2xmf6noJy-*(=<-ZqO_)HA|JqdV4VHKN^T%?7=iA3Y3)90*e)q63{#HI>6%l4>&H93+r(pUR*BGi<)UzdrtW| z+2nl}vKc6KsL(A?A8zTX{IVzr$7e*LE@?3+tX?S&{bP%0!0_42IM<`cS+*c46xrIi zH}}%bant%NGZQbvEHu%5$(_1~>{(jCkKwt9tCs?v&v&k%;tCI-+IRrmzzhT}& zO^}ai!C01U;PeA^!8g0Pj4nY?29MzDp4kz+Y9bzJ@}<4$pVCoiE>1>D!wGd{YxVcP06yZx`u2SUf<90tef9s zaZ_L!rJN9p$<$`wSl=H8D~MAfyRf)`GbtZ9ULu3IU*o5XF@6t}u>3#LFLrBpjT3co zs|w;xpHEJ}SG6EQfDk2Qenxr^IFaPEp_oPMX-1gwyHkTK_)~7LK*Jmcj~$~R!p3oG z454Iz29L*9BDE^kosB;8Whu>Y!yUqUcn={E;NO~})g9w! zYG)xNtwH>p{!*dnqEnO?eBEH`SNZ~7AqHe|7|8)xij>fvTJ-8%-1XSGSIg} zJAwcG&a@VXou2^Lcc$kLR=W6ayzF-{2#ioNdik9%{T8i~-W;UUdOk#krRajo{4fsp z(i~zw=j*99oql}(2O>*yfT<7O6%s5Rg7rNFk=Gjd9{SNZ>?h+FW8Yw*NIebVjW;hH zB0sFNT6aTohW#nerrrgYF$4`R(XfTcPboGb?$|+P#(8ELvK_`J_sM_k%+O4lYE!!X ztoQBOvM(nOvwdV8H+XX+M1;bS6f=FI^-vF1f=L^S(NXd7lI4p%hdPncsLioe<8W}0 z1R^@31%?yJqMLoQW4QE&_`8tuirjZhQ`q+x+}FQ*W+vLBTxQH{wEM(7qQ~w}aVCKL zH-r2~eYfLh|NlEC^5TEciTzjntZWv_;tBl;ky9ciGb7aS8+l6GI{*8ZhuPYY$KA4i z$#O00kael){qAj(d=o)Zo+RcxADuSJl+nAr%)C_&V}}+^T4k$1LNyUC_3ks5|zF(Fs6&x(rAj1P% z+H7>fLliIpgRQ=y=nG$z->9ujv!+FQsw<@k*9O0FhiqHRqvhcbwDg$FdJaaA#?`6JarW!el9$~Q$mv2{UjB`fHPZQ`op*|p&<(R+@y4);9I49W z$i^KZd&#TuE1Gi(l<@@RL;YG3hUxbxEdeiFOQ)P?1T+sHRAqb5b1<=%?lCFaf@WjD z$zZ6$kuq~%x2aLDREIFPulk^kuKq~vUwczUk@I_@2`IcKk#kTD%O{BzGbe@I5ZtM10&$HD2O{ z10IJO_qjMhQ<{h5h(=tVk^{7OvKMyzn+A*9K(xgKEBf@Dm-Alk_H-q&E(ocvHj*O1Q zyDr}zP#9Q!Q_xeLc}Hw``0UHUW5xLwm!REOD$3_(VGY7tvmpDIMP81fN|JfL9qiPs zZx{*M8YjY)x)~7FQx%B3M-ccVNOGsZ@4cmzwZVud*QW*vcu}`pf#7+JmPm9!k$_=| z?bJQW<7>$|sg4RKj)~}R($5$|483Q!_?fR=pSq4$$@x3v)Xj>|XERcmuVE9V_JPW>3Ukw`tKm1NmbPO+7G~Gd8;7v97`5VgkoCSlh{)82SZJeqWBf zURQxXiHziE4t(gPRbQGu=H>Y+-M8di$0Nn16N;xfp$ZIPGO9nMPg26s4<7=glekWKY=QL>@rVJb zVy2>85xW^SG==27ZjJKJ{na#;WAVic*&eMfd`av;!qKSho z-tmZiKOydAY#P^ZcF9`#jG_?=R3>~T`><`_HrL8D#z;1$jsR&QSy9jp|>#SKK{ z{$Ol(6hp;dGG4p+e!SOl>}QO;F?U;Q`D_1Su?=B*GX18(-Wt$y5HvO_a`*)~y7Fb* z&XSTL69|wcmEb^Hpa`NYzMCLl>oW^OE@JNbhaTzibjix{zf}WYo8DAz7v7`0D!zy) zmBNWKvSZq%=t9^sCNwh)1_cFAG;H1Wf3a8t6&NCe#`s78W^8T=M`g@Lvpx!1eDUa6=n2Clk9&UROFPDCn!5{a{o>R`Fh#nJKWYM0ZRFBaK3Sh}bnGNJ zDT*$v0{nt(gNVM56s@nK@skqGEGV*bB0M?HVwh5|p%~Rpp>E|;^vISFda8q+ZIX8w z=|@>*2=7wkrMaJcnr{JmaA;sC&{f@gl z(CqG+*$q)Mjh`%+ou_)vO&V=*nVt6dg&@i+EJaZSJAU;JO zRlGYEUx2aQhwHEQOJnkCr#g@Z54)TA5UYw#CJh~EiD2y0V#@hXkF331XOQu{N zr&iN|U-MmwsCUB)M9-jzRNvR`J{76nb_e#zBz9*foL-l5yYc)4WP2DizL4+&I!kAs z7Zu*wj#>#Xo$FnI>p5GA^bPA_!Z^D%O9CK{aZBL-Q-WAIuUQ@UZJZvTx%S z>bkhBU!9fxVx<$?H~1LhfW9CtcnaN|EXa_fT)t{XQF%J7S(2B%+ecFTizJVMVrd+& zxOq0qSzjRJ3Jn~yvA>~BmC}qmSO>j~Tk88{ZrWx#;`*h-lQV*gM;x3t;d*=l@+Wqd zb3kCPvl+Y~O-?Qn@AT;l&~C5?Q=h`$D!}yalnfgk)7>rhEa(xn%wquDY4v!OT}{m8 zycaY?>macK>RFM=bDhtGbLaWCPvKW`b!6%~bc5mFi^KFK8TY_7i;#UP6lgontSI0l z(Zs%Al?1m{fnBcA&1~RgEZRX$!&N*>Z<$C(t7D;Yz`i=a+!C^59`5=NYi9l{imm@o z{wLeduYZADe4Nrj7=M5gyzS+Ul%+`-P@LN0H1Dsu1BH8fbAr#u12>Fj&gi8a-Cbt< zB24l6_3QIiz%2NQw?t0PGq*i_XCsckh&w9G&wrY`hu;_S75uA2cup+!-6qaed&tGt9@ExH^;tzU69M(P0G0N z)~9{qb^eh<8R6`A4BgC6OQI$a0CVTj7iBiEgo`8ZdGAKKZ6Q2Z@B1*N`aRPtYmD)# z_NP?u(1xlSUk7omIly<4G@~d?7!#5cl%yCcyzKM(@@ae`C(1A~nF0O2Sb3%Ihi6}u z3(w~A0TQE=+0F&?MM-g*?epRBv;M}KxOg85&bMw4%ehxqm>}_bzWy+T?cgt+T&0`V zM0b(OZ{tl_+JoE{CWTk#=2|$cZKWelMj~WsWmI-%Qixlqc0Puo*eekDc~<+Zou1&x za#flz`wwZ~u|_{ZTmB|LwZJ*Ddt#Q2uX*75<6oi|uzxX0vTI(bnk~kz+xe%Zq^% zDDE2zDZ>OzAuj&8!EIZ!#@@G0GO2y`0?IuSy`Rpjr+st$2Cxp}4bbTQ2v4?LjYm}L zGIe<lg0q`dnBU%#Krs% z2J)>AY5}u=p+Q$~mQPq!qXG+)BebQBvl6G6E4)?*FrS(QzyK| zXdaedSFoWc{KRI;k8<^DXPexwc`73@l#sjYs9~rr*>*BFol-+WM6;CUtbFIGlJZ#5 zI|SdF&xxkOElbLEQx%#!7XFha<-g)tQNO3P_up+Hl%an@+{}hrnt)KPT1ynop0bN> z-nW5$2PS)0?@Y8(vNngGd3}J(rGgXhhjO3AZSMryg2q%h&;$@?y$>?NzhG$hZ-!pr zq>AfLh`1hXQcCEC zhmCw5vv7|N{JxrdpCfMYT7~hE^e@NtH@EE!ff&o+vN1?Y09w+a+Xj5RKF>vh z;CjdE&)=C7!#TS4YO16U2?fIofEoBd+n@3G-~W$eDD``o{Qqui 0: + indice = np.where(self.flag == i)[0] + assert len(indice) == size + indice = indice[list(torch.randperm(int(size), + generator=g))].tolist() + extra = int( + math.ceil( + size * 1.0 / self.samples_per_gpu / self.num_replicas) + ) * self.samples_per_gpu * self.num_replicas - len(indice) + # pad indice + tmp = indice.copy() + for _ in range(extra // size): + indice.extend(tmp) + indice.extend(tmp[:extra % size]) + indices.extend(indice) + + assert len(indices) == self.total_size + + indices = [ + indices[j] for i in list( + torch.randperm( + len(indices) // self.samples_per_gpu, generator=g)) + for j in range(i * self.samples_per_gpu, (i + 1) * + self.samples_per_gpu) + ] + + # subsample + offset = self.num_samples * self.rank + indices = indices[offset:offset + self.num_samples] + assert len(indices) == self.num_samples + + return iter(indices) + + def __len__(self): + return self.num_samples + + def set_epoch(self, epoch): + self.epoch = epoch + + +class DistributedGivenIterationSampler(Sampler): + + def __init__(self, + dataset, + total_iter, + batch_size, + num_replicas=None, + rank=None, + last_iter=-1): + rank, world_size = get_dist_info() + assert rank < world_size + self.dataset = dataset + self.total_iter = total_iter + self.batch_size = batch_size + self.world_size = world_size + self.rank = rank + self.last_iter = last_iter + + self.total_size = self.total_iter * self.batch_size + + self.indices = self.gen_new_list() + + def __iter__(self): + return iter(self.indices[(self.last_iter + 1) * self.batch_size:]) + + def set_uniform_indices(self, labels, num_classes): + np.random.seed(0) + assert (len(labels) == len(self.dataset)) + N = len(labels) + size_per_label = int(N / num_classes) + 1 + indices = [] + images_lists = [[] for i in range(num_classes)] + for i, l in enumerate(labels): + images_lists[l].append(i) + for i, l in enumerate(images_lists): + if len(l) == 0: + continue + indices.extend( + np.random.choice( + l, size_per_label, replace=(len(l) <= size_per_label))) + indices = np.array(indices) + np.random.shuffle(indices) + indices = indices[:N].astype(np.int) + # repeat + all_size = self.total_size * self.world_size + indices = indices[:all_size] + num_repeat = (all_size - 1) // indices.shape[0] + 1 + indices = np.tile(indices, num_repeat) + indices = indices[:all_size] + np.random.shuffle(indices) + # slice + beg = self.total_size * self.rank + indices = indices[beg:beg + self.total_size] + assert len(indices) == self.total_size + # set + self.indices = indices + + def gen_new_list(self): + + # each process shuffle all list with same seed, and pick one piece according to rank + np.random.seed(0) + + all_size = self.total_size * self.world_size + indices = np.arange(len(self.dataset)) + indices = indices[:all_size] + num_repeat = (all_size - 1) // indices.shape[0] + 1 + indices = np.tile(indices, num_repeat) + indices = indices[:all_size] + + np.random.shuffle(indices) + beg = self.total_size * self.rank + indices = indices[beg:beg + self.total_size] + + assert len(indices) == self.total_size + + return indices + + def __len__(self): + # note here we do not take last iter into consideration, since __len__ + # should only be used for displaying, the correct remaining size is + # handled by dataloader + #return self.total_size - (self.last_iter+1)*self.batch_size + return self.total_size + + def set_epoch(self, epoch): + pass diff --git a/openmixup/datasets/multi_view.py b/openmixup/datasets/multi_view.py new file mode 100644 index 00000000..76f12aa0 --- /dev/null +++ b/openmixup/datasets/multi_view.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.utils import build_from_cfg +from torchvision.transforms import Compose + +from .base import BaseDataset +from .registry import DATASETS, PIPELINES +from .builder import build_datasource +from .utils import to_numpy + + +@DATASETS.register_module() +class MultiViewDataset(BaseDataset): + """The dataset outputs multiple views of an image. + + The number of views in the output dict depends on `num_views`. The + image can be processed by one pipeline or multiple piepelines. + + Args: + data_source (dict): Data source defined in + `mmselfsup.datasets.data_sources`. + num_views (list): The number of different views. + pipelines (list[list[dict]]): A list of pipelines, where each pipeline + contains elements that represents an operation defined in + `mmselfsup.datasets.pipelines`. + prefetch (bool, optional): Whether to prefetch data. Defaults to False. + + Examples: + >>> dataset = MultiViewDataset(data_source, [2], [pipeline]) + >>> output = dataset[idx] + The output got 2 views processed by one pipeline. + + >>> dataset = MultiViewDataset( + >>> data_source, [2, 6], [pipeline1, pipeline2]) + >>> output = dataset[idx] + The output got 8 views processed by two pipelines, the first two views + were processed by pipeline1 and the remaining views by pipeline2. + """ + + def __init__(self, data_source, num_views, pipelines, prefetch=False): + assert len(num_views) == len(pipelines) + self.data_source = build_datasource(data_source) + self.pipelines = [] + for pipe in pipelines: + pipeline = Compose([build_from_cfg(p, PIPELINES) for p in pipe]) + self.pipelines.append(pipeline) + self.prefetch = prefetch + + trans = [] + assert isinstance(num_views, list) + for i in range(len(num_views)): + trans.extend([self.pipelines[i]] * num_views[i]) + self.trans = trans + + def __getitem__(self, idx): + img = self.data_source.get_img(idx) + multi_views = list(map(lambda trans: trans(img), self.trans)) + if self.prefetch: + multi_views = [ + torch.from_numpy(to_numpy(img)) for img in multi_views + ] + return dict(img=multi_views) + + def evaluate(self, results, logger=None): + return NotImplementedError diff --git a/openmixup/datasets/npid.py b/openmixup/datasets/npid.py new file mode 100644 index 00000000..39858cf5 --- /dev/null +++ b/openmixup/datasets/npid.py @@ -0,0 +1,25 @@ +from PIL import Image +from .registry import DATASETS +from .base import BaseDataset + + +@DATASETS.register_module +class NPIDDataset(BaseDataset): + """Dataset for NPID. + """ + + def __init__(self, data_source, pipeline): + super(NPIDDataset, self).__init__(data_source, pipeline) + + def __getitem__(self, idx): + img = self.data_source.get_sample(idx) + assert isinstance(img, Image.Image), \ + 'The output from the data source must be an Image, got: {}. \ + Please ensure that the list file does not contain labels.'.format( + type(img)) + img = self.pipeline(img) + return dict(img=img, idx=idx) + + def evaluate(self, scores, keyword, logger=None): + + raise NotImplemented diff --git a/openmixup/datasets/pipelines/__init__.py b/openmixup/datasets/pipelines/__init__.py new file mode 100644 index 00000000..370e3456 --- /dev/null +++ b/openmixup/datasets/pipelines/__init__.py @@ -0,0 +1,3 @@ +from .auto_augment import * +from .compose import * +from .transforms import * diff --git a/openmixup/datasets/pipelines/auto_augment.py b/openmixup/datasets/pipelines/auto_augment.py new file mode 100644 index 00000000..27ff7bcf --- /dev/null +++ b/openmixup/datasets/pipelines/auto_augment.py @@ -0,0 +1,1053 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# refer to mmclassification: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/datasets/pipelines/auto_augment.py +import copy +import inspect +import random +from numbers import Number +from typing import Sequence +from PIL import Image +import cv2 +import mmcv +import numpy as np + +from ..registry import PIPELINES +from .compose import BuildCompose +# from torchvision.transforms import Compose + +# Default hyperparameters for all Ops +_HPARAMS_DEFAULT = dict(pad_val=128) + + +def random_negative(value, random_negative_prob): + """Randomly negate value based on random_negative_prob.""" + return -value if np.random.rand() < random_negative_prob else value + + +def merge_hparams(policy: dict, hparams: dict): + """Merge hyperparameters into policy config. + + Only merge partial hyperparameters required of the policy. + + Args: + policy (dict): Original policy config dict. + hparams (dict): Hyperparameters need to be merged. + + Returns: + dict: Policy config dict after adding ``hparams``. + """ + op = PIPELINES.get(policy['type']) + assert op is not None, f'Invalid policy type "{policy["type"]}".' + for key, value in hparams.items(): + if policy.get(key, None) is not None: + continue + if key in inspect.getfullargspec(op.__init__).args: + policy[key] = value + return policy + + +@PIPELINES.register_module() +class AutoAugment(object): + """Auto augmentation. + + This data augmentation is proposed in `AutoAugment: Learning Augmentation + Policies from Data `_. + + Args: + policies (list[list[dict]]): The policies of auto augmentation. Each + policy in ``policies`` is a specific augmentation policy, and is + composed by several augmentations (dict). When AutoAugment is + called, a random policy in ``policies`` will be selected to + augment images. + hparams (dict): Configs of hyperparameters. Hyperparameters will be + used in policies that require these arguments if these arguments + are not set in policy dicts. Defaults to use _HPARAMS_DEFAULT. + """ + + def __init__(self, policies, hparams=_HPARAMS_DEFAULT): + assert isinstance(policies, list) and len(policies) > 0, \ + 'Policies must be a non-empty list.' + for policy in policies: + assert isinstance(policy, list) and len(policy) > 0, \ + 'Each policy in policies must be a non-empty list.' + for augment in policy: + assert isinstance(augment, dict) and 'type' in augment, \ + 'Each specific augmentation must be a dict with key' \ + ' "type".' + + self.hparams = hparams + policies = copy.deepcopy(policies) + self.policies = [] + for sub in policies: + merged_sub = [merge_hparams(policy, hparams) for policy in sub] + self.policies.append(merged_sub) + + self.sub_policy = [BuildCompose(policy) for policy in self.policies] + + def __call__(self, img): + sub_policy = random.choice(self.sub_policy) + img = sub_policy(np.array(img)) + return Image.fromarray(img.astype(np.uint8)) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(policies={self.policies})' + return repr_str + + +@PIPELINES.register_module() +class RandAugment(object): + r"""Random augmentation. + + This data augmentation is proposed in `RandAugment: Practical automated + data augmentation with a reduced search space + `_. + + Args: + policies (list[dict]): The policies of random augmentation. Each + policy in ``policies`` is one specific augmentation policy (dict). + The policy shall at least have key `type`, indicating the type of + augmentation. For those which have magnitude, (given to the fact + they are named differently in different augmentation, ) + `magnitude_key` and `magnitude_range` shall be the magnitude + argument (str) and the range of magnitude (tuple in the format of + (val1, val2)), respectively. Note that val1 is not necessarily + less than val2. + num_policies (int): Number of policies to select from policies each + time. + magnitude_level (int | float): Magnitude level for all the augmentation + selected. + total_level (int | float): Total level for the magnitude. Defaults to + 30. + magnitude_std (Number | str): Deviation of magnitude noise applied. + + - If positive number, magnitude is sampled from normal distribution + (mean=magnitude, std=magnitude_std). + - If 0 or negative number, magnitude remains unchanged. + - If str "inf", magnitude is sampled from uniform distribution + (range=[min, magnitude]). + hparams (dict): Configs of hyperparameters. Hyperparameters will be + used in policies that require these arguments if these arguments + are not set in policy dicts. Defaults to use _HPARAMS_DEFAULT. + + Note: + `magnitude_std` will introduce some randomness to policy, modified by + https://github.com/rwightman/pytorch-image-models. + + When magnitude_std=0, we calculate the magnitude as follows: + + .. math:: + \text{magnitude} = \frac{\text{magnitude\_level}} + {\text{total\_level}} \times (\text{val2} - \text{val1}) + + \text{val1} + """ + + def __init__(self, + policies, + num_policies, + magnitude_level, + magnitude_std=0., + total_level=30, + hparams=_HPARAMS_DEFAULT): + assert isinstance(num_policies, int), 'Number of policies must be ' \ + f'of int type, got {type(num_policies)} instead.' + assert isinstance(magnitude_level, (int, float)), \ + 'Magnitude level must be of int or float type, ' \ + f'got {type(magnitude_level)} instead.' + assert isinstance(total_level, (int, float)), 'Total level must be ' \ + f'of int or float type, got {type(total_level)} instead.' + assert isinstance(policies, list) and len(policies) > 0, \ + 'Policies must be a non-empty list.' + + assert isinstance(magnitude_std, (Number, str)), \ + 'Magnitude std must be of number or str type, ' \ + f'got {type(magnitude_std)} instead.' + if isinstance(magnitude_std, str): + assert magnitude_std == 'inf', \ + 'Magnitude std must be of number or "inf", ' \ + f'got "{magnitude_std}" instead.' + + assert num_policies > 0, 'num_policies must be greater than 0.' + assert magnitude_level >= 0, 'magnitude_level must be no less than 0.' + assert total_level > 0, 'total_level must be greater than 0.' + + self.num_policies = num_policies + self.magnitude_level = magnitude_level + self.magnitude_std = magnitude_std + self.total_level = total_level + self.hparams = hparams + policies = copy.deepcopy(policies) + self._check_policies(policies) + self.policies = [merge_hparams(policy, hparams) for policy in policies] + + def _check_policies(self, policies): + for policy in policies: + assert isinstance(policy, dict) and 'type' in policy, \ + 'Each policy must be a dict with key "type".' + type_name = policy['type'] + + magnitude_key = policy.get('magnitude_key', None) + if magnitude_key is not None: + assert 'magnitude_range' in policy, \ + f'RandAugment policy {type_name} needs `magnitude_range`.' + magnitude_range = policy['magnitude_range'] + assert (isinstance(magnitude_range, Sequence) + and len(magnitude_range) == 2), \ + f'`magnitude_range` of RandAugment policy {type_name} ' \ + f'should be a Sequence with two numbers.' + + def _process_policies(self, policies): + processed_policies = [] + for policy in policies: + processed_policy = copy.deepcopy(policy) + magnitude_key = processed_policy.pop('magnitude_key', None) + if magnitude_key is not None: + magnitude = self.magnitude_level + # if magnitude_std is positive number or 'inf', move + # magnitude_value randomly. + if self.magnitude_std == 'inf': + magnitude = random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = random.gauss(magnitude, self.magnitude_std) + magnitude = min(self.total_level, max(0, magnitude)) + + val1, val2 = processed_policy.pop('magnitude_range') + magnitude = (magnitude / self.total_level) * (val2 - + val1) + val1 + + processed_policy.update({magnitude_key: magnitude}) + processed_policies.append(processed_policy) + return processed_policies + + def __call__(self, img): + if self.num_policies == 0: + return img + sub_policy = random.choices(self.policies, k=self.num_policies) + sub_policy = self._process_policies(sub_policy) + sub_policy = BuildCompose(sub_policy) + img = sub_policy(np.array(img)) + return Image.fromarray(img.astype(np.uint8)) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(policies={self.policies}, ' + repr_str += f'num_policies={self.num_policies}, ' + repr_str += f'magnitude_level={self.magnitude_level}, ' + repr_str += f'total_level={self.total_level})' + return repr_str + + +@PIPELINES.register_module() +class Shear(object): + """Shear images. + + Args: + magnitude (int | float): The magnitude used for shear. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing Shear therefore should be + in range [0, 1]. Defaults to 0.5. + direction (str): The shearing direction. Options are 'horizontal' and + 'vertical'. Defaults to 'horizontal'. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'bicubic'. + """ + + def __init__(self, + magnitude, + pad_val=128, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert direction in ('horizontal', 'vertical'), 'direction must be ' \ + f'either "horizontal" or "vertical", got {direction} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.pad_val = tuple(pad_val) + self.prob = prob + self.direction = direction + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + magnitude = random_negative(self.magnitude, self.random_negative_prob) + img_sheared = mmcv.imshear( + img, + magnitude, + direction=self.direction, + border_value=self.pad_val, + interpolation=self.interpolation) + return img_sheared.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class Translate(object): + """Translate images. + + Args: + magnitude (int | float): The magnitude used for translate. Note that + the offset is calculated by magnitude * size in the corresponding + direction. With a magnitude of 1, the whole image will be moved out + of the range. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing translate therefore should + be in range [0, 1]. Defaults to 0.5. + direction (str): The translating direction. Options are 'horizontal' + and 'vertical'. Defaults to 'horizontal'. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'. + """ + + def __init__(self, + magnitude, + pad_val=128, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='nearest'): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert direction in ('horizontal', 'vertical'), 'direction must be ' \ + f'either "horizontal" or "vertical", got {direction} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.pad_val = tuple(pad_val) + self.prob = prob + self.direction = direction + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + magnitude = random_negative(self.magnitude, self.random_negative_prob) + height, width = img.shape[:2] + if self.direction == 'horizontal': + offset = magnitude * width + else: + offset = magnitude * height + img_translated = mmcv.imtranslate( + img, + offset, + direction=self.direction, + border_value=self.pad_val, + interpolation=self.interpolation) + return img_translated.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@PIPELINES.register_module() +class Rotate(object): + """Rotate images. + + Args: + angle (float): The angle used for rotate. Positive values stand for + clockwise rotation. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If None, the center of the image will be used. + Defaults to None. + scale (float): Isotropic scale factor. Defaults to 1.0. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing Rotate therefore should be + in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the angle + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'. + """ + + def __init__(self, + angle, + center=None, + scale=1.0, + pad_val=128, + prob=0.5, + random_negative_prob=0.5, + interpolation='nearest'): + assert isinstance(angle, float), 'The angle type must be float, but ' \ + f'got {type(angle)} instead.' + if isinstance(center, tuple): + assert len(center) == 2, 'center as a tuple must have 2 ' \ + f'elements, got {len(center)} elements instead.' + else: + assert center is None, 'The center type' \ + f'must be tuple or None, got {type(center)} instead.' + assert isinstance(scale, float), 'the scale type must be float, but ' \ + f'got {type(scale)} instead.' + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert all(isinstance(i, int) for i in pad_val), 'pad_val as a '\ + 'tuple must got elements of int type.' + else: + raise TypeError('pad_val must be int or tuple with 3 elements.') + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.angle = angle + self.center = center + self.scale = scale + self.pad_val = tuple(pad_val) + self.prob = prob + self.random_negative_prob = random_negative_prob + self.interpolation = interpolation + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + angle = random_negative(self.angle, self.random_negative_prob) + img_rotated = mmcv.imrotate( + img, + angle, + center=self.center, + scale=self.scale, + border_value=self.pad_val, + interpolation=self.interpolation) + return img_rotated.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(angle={self.angle}, ' + repr_str += f'center={self.center}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +def auto_contrast(img, cutoff=0): + """Auto adjust image contrast. + This function maximize (normalize) image contrast by first removing cutoff + percent of the lightest and darkest pixels from the histogram and remapping + the image so that the darkest pixel becomes black (0), and the lightest + becomes white (255). + Args: + img (ndarray): Image to be contrasted. BGR order. + cutoff (int | float | tuple): The cutoff percent of the lightest and + darkest pixels to be removed. If given as tuple, it shall be + (low, high). Otherwise, the single value will be used for both. + Defaults to 0. + Returns: + ndarray: The contrasted image. + """ + + def _auto_contrast_channel(im, c, cutoff): + im = im[:, :, c] + # Compute the histogram of the image channel. + histo = np.histogram(im, 256, (0, 255))[0] + # Remove cut-off percent pixels from histo + histo_sum = np.cumsum(histo) + cut_low = histo_sum[-1] * cutoff[0] // 100 + cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100 + histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low + histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0) + + # Compute mapping + low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1] + # If all the values have been cut off, return the origin img + if low >= high: + return im + scale = 255.0 / (high - low) + offset = -low * scale + lut = np.array(range(256)) + lut = lut * scale + offset + lut = np.clip(lut, 0, 255) + return lut[im] + + if isinstance(cutoff, (int, float)): + cutoff = (cutoff, cutoff) + else: + assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \ + f'float or tuple, but got {type(cutoff)} instead.' + # Auto adjusts contrast for each channel independently and then stacks + # the result. + s1 = _auto_contrast_channel(img, 0, cutoff) + s2 = _auto_contrast_channel(img, 1, cutoff) + s3 = _auto_contrast_channel(img, 2, cutoff) + contrasted_img = np.stack([s1, s2, s3], axis=-1) + return contrasted_img.astype(img.dtype) + + +@PIPELINES.register_module() +class AutoContrast(object): + """Auto adjust image contrast. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + img_contrasted = auto_contrast(img) + return img_contrasted.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Identity(object): + """Identity Mapping (do nothing). + + Args: + prob (float): The probability for performing identity mapping. + """ + + def __init__(self, prob=0.5): + self.prob = prob + + def __call__(self, img): + return img + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Invert(object): + """Invert images. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + img_inverted = mmcv.iminvert(img) + return img_inverted.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Equalize(object): + """Equalize the image histogram. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, prob=0.5): + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.prob = prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + img_equalized = mmcv.imequalize(img) + return img_equalized.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Solarize(object): + """Solarize images (invert all pixel values above a threshold). + + Args: + thr (int | float): The threshold above which the pixels value will be + inverted. + prob (float): The probability for solarizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, thr, prob=0.5): + assert isinstance(thr, (int, float)), 'The thr type must '\ + f'be int or float, but got {type(thr)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.thr = thr + self.prob = prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + img_solarized = mmcv.solarize(img, thr=self.thr) + return img_solarized.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(thr={self.thr}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class SolarizeAdd(object): + """SolarizeAdd images (add a certain value to pixels below a threshold). + + Args: + magnitude (int | float): The value to be added to pixels below the thr. + thr (int | float): The threshold below which the pixels value will be + adjusted. + prob (float): The probability for solarizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, thr=128, prob=0.5): + assert isinstance(magnitude, (int, float)), 'The thr magnitude must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert isinstance(thr, (int, float)), 'The thr type must '\ + f'be int or float, but got {type(thr)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.magnitude = magnitude + self.thr = thr + self.prob = prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + img_solarized = np.where(img < self.thr, + np.minimum(img + self.magnitude, 255), + img) + return img_solarized.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'thr={self.thr}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Posterize(object): + """Posterize images (reduce the number of bits for each color channel). + + Args: + bits (int | float): Number of bits for each pixel in the output img, + which should be less or equal to 8. + prob (float): The probability for posterizing therefore should be in + range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, bits, prob=0.5): + assert bits <= 8, f'The bits must be less than 8, got {bits} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.bits = int(bits) + self.prob = prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + img_posterized = mmcv.posterize(img, bits=self.bits) + return img_posterized.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(bits={self.bits}, ' + repr_str += f'prob={self.prob})' + return repr_str + + +@PIPELINES.register_module() +class Contrast(object): + """Adjust images contrast. + + Args: + magnitude (int | float): The magnitude used for adjusting contrast. A + positive magnitude would enhance the contrast and a negative + magnitude would make the image grayer. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + magnitude = random_negative(self.magnitude, self.random_negative_prob) + img_contrasted = mmcv.adjust_contrast(img, factor=1 + magnitude) + return img_contrasted.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class ColorTransform(object): + """Adjust images color balance. + + Args: + magnitude (int | float): The magnitude used for color transform. A + positive magnitude would enhance the color and a negative magnitude + would make the image grayer. A magnitude=0 gives the origin img. + prob (float): The probability for performing ColorTransform therefore + should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + magnitude = random_negative(self.magnitude, self.random_negative_prob) + img_color_adjusted = mmcv.adjust_color(img, alpha=1 + magnitude) + return img_color_adjusted.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +@PIPELINES.register_module() +class Brightness(object): + """Adjust images brightness. + + Args: + magnitude (int | float): The magnitude used for adjusting brightness. A + positive magnitude would enhance the brightness and a negative + magnitude would make the image darker. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + magnitude = random_negative(self.magnitude, self.random_negative_prob) + img_brightened = mmcv.adjust_brightness(img, factor=1 + magnitude) + return img_brightened.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +def adjust_sharpness(img, factor=1., kernel=None): + """Adjust image sharpness. + This function controls the sharpness of an image. An + enhancement factor of 0.0 gives a blurred image. A + factor of 1.0 gives the original image. And a factor + of 2.0 gives a sharpened image. It blends the source + image and the degenerated mean image: + .. math:: + output = img * factor + degenerated * (1 - factor) + Args: + img (ndarray): Image to be sharpened. BGR order. + factor (float): Same as :func:`mmcv.adjust_brightness`. + kernel (np.ndarray, optional): Filter kernel to be applied on the img + to obtain the degenerated img. Defaults to None. + Note: + No value sanity check is enforced on the kernel set by users. So with + an inappropriate kernel, the ``adjust_sharpness`` may fail to perform + the function its name indicates but end up performing whatever + transform determined by the kernel. + Returns: + ndarray: The sharpened image. + """ + + if kernel is None: + # adopted from PIL.ImageFilter.SMOOTH + kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13 + assert isinstance(kernel, np.ndarray), \ + f'kernel must be of type np.ndarray, but got {type(kernel)} instead.' + assert kernel.ndim == 2, \ + f'kernel must have a dimension of 2, but got {kernel.ndim} instead.' + + degenerated = cv2.filter2D(img, -1, kernel) + sharpened_img = cv2.addWeighted( + img.astype(np.float32), factor, degenerated.astype(np.float32), + 1 - factor, 0) + sharpened_img = np.clip(sharpened_img, 0, 255) + return sharpened_img.astype(img.dtype) + + +@PIPELINES.register_module() +class Sharpness(object): + """Adjust images sharpness. + + Args: + magnitude (int | float): The magnitude used for adjusting sharpness. A + positive magnitude would enhance the sharpness and a negative + magnitude would make the image bulr. A magnitude=0 gives the + origin img. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, magnitude, prob=0.5, random_negative_prob=0.5): + assert isinstance(magnitude, (int, float)), 'The magnitude type must '\ + f'be int or float, but got {type(magnitude)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + assert 0 <= random_negative_prob <= 1.0, 'The random_negative_prob ' \ + f'should be in range [0,1], got {random_negative_prob} instead.' + + self.magnitude = magnitude + self.prob = prob + self.random_negative_prob = random_negative_prob + + def __call__(self, img): + if np.random.rand() > self.prob: + return img + magnitude = random_negative(self.magnitude, self.random_negative_prob) + img_sharpened = adjust_sharpness(img, factor=1 + magnitude) + return img_sharpened.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob})' + return repr_str + + +def cutout(img, shape, pad_val=0): + """Randomly cut out a rectangle from the original img. + Args: + img (ndarray): Image to be cutout. + shape (int | tuple[int]): Expected cutout shape (h, w). If given as a + int, the value will be used for both h and w. + pad_val (int | float | tuple[int | float]): Values to be filled in the + cut area. Defaults to 0. + Returns: + ndarray: The cutout image. + """ + + channels = 1 if img.ndim == 2 else img.shape[2] + if isinstance(shape, int): + cut_h, cut_w = shape, shape + else: + assert isinstance(shape, tuple) and len(shape) == 2, \ + f'shape must be a int or a tuple with length 2, but got type ' \ + f'{type(shape)} instead.' + cut_h, cut_w = shape + if isinstance(pad_val, (int, float)): + pad_val = tuple([pad_val] * channels) + elif isinstance(pad_val, tuple): + assert len(pad_val) == channels, \ + 'Expected the num of elements in tuple equals the channels' \ + 'of input image. Found {} vs {}'.format( + len(pad_val), channels) + else: + raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`') + + img_h, img_w = img.shape[:2] + y0 = np.random.uniform(img_h) + x0 = np.random.uniform(img_w) + + y1 = int(max(0, y0 - cut_h / 2.)) + x1 = int(max(0, x0 - cut_w / 2.)) + y2 = min(img_h, y1 + cut_h) + x2 = min(img_w, x1 + cut_w) + + if img.ndim == 2: + patch_shape = (y2 - y1, x2 - x1) + else: + patch_shape = (y2 - y1, x2 - x1, channels) + + img_cutout = img.copy() + patch = np.array( + pad_val, dtype=img.dtype) * np.ones( + patch_shape, dtype=img.dtype) + img_cutout[y1:y2, x1:x2, ...] = patch + + return img_cutout + + +@PIPELINES.register_module() +class Cutout(object): + """Cutout images. + + Args: + shape (int | float | tuple(int | float)): Expected cutout shape (h, w). + If given as a single value, the value will be used for + both h and w. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If it is a sequence, it must have the same length with the image + channels. Defaults to 128. + prob (float): The probability for performing cutout therefore should + be in range [0, 1]. Defaults to 0.5. + """ + + def __init__(self, shape, pad_val=128, prob=0.5): + if isinstance(shape, float): + shape = int(shape) + elif isinstance(shape, tuple): + shape = tuple(int(i) for i in shape) + elif not isinstance(shape, int): + raise TypeError( + 'shape must be of ' + f'type int, float or tuple, got {type(shape)} instead') + if isinstance(pad_val, int): + pad_val = tuple([pad_val] * 3) + elif isinstance(pad_val, Sequence): + assert len(pad_val) == 3, 'pad_val as a tuple must have 3 ' \ + f'elements, got {len(pad_val)} instead.' + assert 0 <= prob <= 1.0, 'The prob should be in range [0,1], ' \ + f'got {prob} instead.' + + self.shape = shape + self.pad_val = tuple(pad_val) + self.prob = prob + + def __call__(self, img): + """ assume the default img is numpy.array """ + if np.random.rand() > self.prob: + return img + if isinstance(img, Image.Image): + img = np.array(img) + img = cutout(img, self.shape, pad_val=self.pad_val) + return Image.fromarray(img.astype(np.uint8)) + else: + img_cutout = cutout(img, self.shape, pad_val=self.pad_val) + return img_cutout.astype(img.dtype) + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(shape={self.shape}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob})' + return repr_str diff --git a/openmixup/datasets/pipelines/compose.py b/openmixup/datasets/pipelines/compose.py new file mode 100644 index 00000000..4f2dcf3d --- /dev/null +++ b/openmixup/datasets/pipelines/compose.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections.abc import Sequence + +from openmixup.utils import build_from_cfg +from ..registry import PIPELINES + + +class BuildCompose(object): + """Compose a data pipeline with a sequence of transforms. + *** Modified torchvision Compose *** + + Args: + transforms (list[dict | callable]): + Either config dicts of transforms or transform objects. + """ + + def __init__(self, transforms): + assert isinstance(transforms, Sequence) + self.transforms = [] + for transform in transforms: + if isinstance(transform, dict): + transform = build_from_cfg(transform, PIPELINES) + self.transforms.append(transform) + elif callable(transform): + self.transforms.append(transform) + else: + raise TypeError('transform must be callable or a dict, but got' + f' {type(transform)}') + + def __call__(self, data): + for t in self.transforms: + data = t(data) + if data is None: + return None + return data + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += f'\n {t}' + format_string += '\n)' + return format_string diff --git a/openmixup/datasets/pipelines/transforms.py b/openmixup/datasets/pipelines/transforms.py new file mode 100644 index 00000000..d3d49bdc --- /dev/null +++ b/openmixup/datasets/pipelines/transforms.py @@ -0,0 +1,146 @@ +import inspect +import numpy as np +from PIL import Image, ImageFilter + +import torch +from torchvision import transforms as _transforms + +from openmixup.utils import build_from_cfg + +from ..registry import PIPELINES + + +# register all existing transforms in torchvision +_EXCLUDED_TRANSFORMS = ['GaussianBlur'] +for m in inspect.getmembers(_transforms, inspect.isclass): + if m[0] not in _EXCLUDED_TRANSFORMS: + PIPELINES.register_module(m[1]) + + +@PIPELINES.register_module +class RandomAppliedTrans(object): + """Randomly applied transformations. + + Args: + transforms (list[dict]): List of transformations in dictionaries. + p (float): Probability. + """ + + def __init__(self, transforms, p=0.5): + t = [build_from_cfg(t, PIPELINES) for t in transforms] + self.trans = _transforms.RandomApply(t, p=p) + + def __call__(self, img): + return self.trans(img) + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str + + +# custom transforms +@PIPELINES.register_module +class Lighting(object): + """Lighting noise(AlexNet - style PCA - based noise).""" + + _IMAGENET_PCA = { + 'eigval': + torch.Tensor([0.2175, 0.0188, 0.0045]), + 'eigvec': + torch.Tensor([ + [-0.5675, 0.7192, 0.4009], + [-0.5808, -0.0045, -0.8140], + [-0.5836, -0.6948, 0.4203], + ]) + } + + def __init__(self): + self.alphastd = 0.1 + self.eigval = self._IMAGENET_PCA['eigval'] + self.eigvec = self._IMAGENET_PCA['eigvec'] + + def __call__(self, img): + assert isinstance(img, torch.Tensor), \ + "Expect torch.Tensor, got {}".format(type(img)) + if self.alphastd == 0: + return img + + alpha = img.new().resize_(3).normal_(0, self.alphastd) + rgb = self.eigvec.type_as(img).clone()\ + .mul(alpha.view(1, 3).expand(3, 3))\ + .mul(self.eigval.view(1, 3).expand(3, 3))\ + .sum(1).squeeze() + + return img.add(rgb.view(3, 1, 1).expand_as(img)) + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str + + +@PIPELINES.register_module +class GaussianBlur(object): + """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709.""" + + def __init__(self, sigma_min, sigma_max): + self.sigma_min = sigma_min + self.sigma_max = sigma_max + + def __call__(self, img): + sigma = np.random.uniform(self.sigma_min, self.sigma_max) + img = img.filter(ImageFilter.GaussianBlur(radius=sigma)) + return img + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str + + +@PIPELINES.register_module +class Solarization(object): + """Solarization augmentation in BYOL https://arxiv.org/abs/2006.07733.""" + + def __init__(self, threshold=128): + self.threshold = threshold + + def __call__(self, img): + img = np.array(img) + img = np.where(img < self.threshold, img, 255 -img) + return Image.fromarray(img.astype(np.uint8)) + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str + + +@PIPELINES.register_module +class PlaceCrop(object): + """Crops the given Image at the particular index list in Co-Tuning, + https://proceedings.neurips.cc//paper/2020/file/c8067ad1937f728f51288b3eb986afaa-Paper.pdf. + Usually used as a test time augmentation. + + Args: + size (tuple or int): Desired output size of the crop. If size is an + int instead of sequence like (w, h), a square crop (size, size). + start (tuple or list or int): The start coordinate for CenterCrop. If + start is a sequence or list, it will be uniformly sampled each time. + """ + + def __init__(self, size, start): + self.size = size + self.start = start + if isinstance(size, int): + self.size = (int(size), int(size)) + + def __call__(self, img): + start_x = self.start + start_y = self.start + if isinstance(self.start, list) or isinstance(self.start, tuple): + start_x = self.start[np.random.randint(len(self.start))] + start_y = self.start[np.random.randint(len(self.start))] + th, tw = self.size + return img.crop((start_x, start_y, start_x + tw, start_y + th)) + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str diff --git a/openmixup/datasets/registry.py b/openmixup/datasets/registry.py new file mode 100644 index 00000000..77ab5d65 --- /dev/null +++ b/openmixup/datasets/registry.py @@ -0,0 +1,5 @@ +from openmixup.utils import Registry + +DATASOURCES = Registry('datasource') +DATASETS = Registry('dataset') +PIPELINES = Registry('pipeline') diff --git a/openmixup/datasets/relative_loc.py b/openmixup/datasets/relative_loc.py new file mode 100644 index 00000000..2552d223 --- /dev/null +++ b/openmixup/datasets/relative_loc.py @@ -0,0 +1,65 @@ +from openmixup.utils import build_from_cfg + +import torch +from PIL import Image +from torchvision.transforms import Compose, RandomCrop +import torchvision.transforms.functional as TF + +from .registry import DATASETS, PIPELINES +from .base import BaseDataset + + +def image_to_patches(img): + """Crop split_per_side x split_per_side patches from input image. + + Args: + img (PIL Image): input image. + + Returns: + list[PIL Image]: A list of cropped patches. + """ + split_per_side = 3 # split of patches per image side + patch_jitter = 21 # jitter of each patch from each grid + h, w = img.size + h_grid = h // split_per_side + w_grid = w // split_per_side + h_patch = h_grid - patch_jitter + w_patch = w_grid - patch_jitter + assert h_patch > 0 and w_patch > 0 + patches = [] + for i in range(split_per_side): + for j in range(split_per_side): + p = TF.crop(img, i * h_grid, j * w_grid, h_grid, w_grid) + p = RandomCrop((h_patch, w_patch))(p) + patches.append(p) + return patches + + +@DATASETS.register_module +class RelativeLocDataset(BaseDataset): + """Dataset for relative patch location. + """ + + def __init__(self, data_source, pipeline, format_pipeline): + super(RelativeLocDataset, self).__init__(data_source, pipeline) + format_pipeline = [build_from_cfg(p, PIPELINES) for p in format_pipeline] + self.format_pipeline = Compose(format_pipeline) + + def __getitem__(self, idx): + img = self.data_source.get_sample(idx) + assert isinstance(img, Image.Image), \ + 'The output from the data source must be an Image, got: {}. \ + Please ensure that the list file does not contain labels.'.format( + type(img)) + img = self.pipeline(img) + patches = image_to_patches(img) + patches = [self.format_pipeline(p) for p in patches] + perms = [] + # create a list of patch pairs + [perms.append(torch.cat((patches[i], patches[4]), dim=0)) for i in range(9) if i != 4] + # create corresponding labels for patch pairs + patch_labels = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7]) + return dict(img=torch.stack(perms), patch_label=patch_labels) # 8(2C)HW, 8 + + def evaluate(self, scores, keyword, logger=None): + raise NotImplemented diff --git a/openmixup/datasets/rotation_pred.py b/openmixup/datasets/rotation_pred.py new file mode 100644 index 00000000..2a20ac2f --- /dev/null +++ b/openmixup/datasets/rotation_pred.py @@ -0,0 +1,45 @@ +import torch +from PIL import Image + +from .registry import DATASETS +from .base import BaseDataset + + +def rotate(img): + """Rotate input image with 0, 90, 180, and 270 degrees. + + Args: + img (Tensor): input image of shape (C, H, W). + + Returns: + list[Tensor]: A list of four rotated images. + """ + return [ + img, + torch.flip(img.transpose(1, 2), [1]), + torch.flip(img, [1, 2]), + torch.flip(img, [1]).transpose(1, 2) + ] + + +@DATASETS.register_module +class RotationPredDataset(BaseDataset): + """Dataset for rotation prediction. + """ + + def __init__(self, data_source, pipeline): + super(RotationPredDataset, self).__init__(data_source, pipeline) + + def __getitem__(self, idx): + img = self.data_source.get_sample(idx) + assert isinstance(img, Image.Image), \ + 'The output from the data source must be an Image, got: {}. \ + Please ensure that the list file does not contain labels.'.format( + type(img)) + img = self.pipeline(img) + img = torch.stack(rotate(img), dim=0) + rotation_labels = torch.LongTensor([0, 1, 2, 3]) + return dict(img=img, rot_label=rotation_labels) + + def evaluate(self, scores, keyword, logger=None): + raise NotImplementedError diff --git a/openmixup/datasets/semi_supervised.py b/openmixup/datasets/semi_supervised.py new file mode 100644 index 00000000..a7db6aee --- /dev/null +++ b/openmixup/datasets/semi_supervised.py @@ -0,0 +1,157 @@ +import torch +from PIL import Image +from torch.utils.data import Dataset +from torchvision.transforms import Compose +import time + +from .utils import to_numpy +from openmixup.utils import print_log, build_from_cfg +from .registry import DATASETS, PIPELINES +from .builder import build_datasource + + +@DATASETS.register_module +class SemiSupervisedDataset(Dataset): + """Dataset for semi-supervised methods + *** using contrastive-based (SSL) augmentation (2N) *** + + Args: + data_source_labeled (dict): Data source for labeled data. + data_source_unlabeled (dict): Data source for unlabeled data. + pipeline_labeled (list[dict]): A list of dict for the labeled (L) dataset, where + each element represents an operation defined in `datasets.pipelines`. + pipeline_unlabeled (list[dict]): A list of dict for the unlabeled (UL) dataset. + pipeline_strong (list[dict]): A list of dict for additional stonge augmentations. + If 'pipeline_strong' is not None, imply it to the second sample of L and UL. + ret_samples (dict): Choice of return samples' shape, 'x_l_2' denotes the second + labeled samples, 'x_ul_2' denotes the second unlabeled samples. For examples, + 'FixMatch' requires strong & weak augmented unlabeled pairs, as + ret_samples=dict(x_l_2=False, x_ul_2=True). + 'Self-Tuning' requires two ways of L and UL samples for the PGC loss, as + ret_samples=dict(x_l_2=True, x_ul_2=True). + Default to None, i.e., return shape of [N, 4, C, H, W]. + """ + + def __init__(self, + data_source_labeled, + data_source_unlabeled, + pipeline_labeled=None, + pipeline_unlabeled=None, + pipeline_strong=None, + ret_samples=dict(x_l_2=True, x_ul_2=True), + prefetch=False): + self.data_source_labeled = build_datasource(data_source_labeled) + self.data_source_unlabeled = build_datasource(data_source_unlabeled) + # labeled + pipeline_labeled = [build_from_cfg(p, PIPELINES) for p in pipeline_labeled] + self.pipeline_labeled = Compose(pipeline_labeled) + # unlabeled + pipeline_unlabeled = [build_from_cfg(p, PIPELINES) for p in pipeline_unlabeled] + self.pipeline_unlabeled = Compose(pipeline_unlabeled) + # strong aug + self.pipeline_strong = None + if pipeline_strong is not None: + pipeline_strong = [build_from_cfg(p, PIPELINES) for p in pipeline_strong] + self.pipeline_strong = Compose(pipeline_strong) + + self.pseudo_labels = [-1 for _ in range(self.data_source_unlabeled.get_length())] + # length is dependent on the large dataset + if self.data_source_labeled.get_length() >= self.data_source_unlabeled.get_length(): + self.length = self.data_source_labeled.get_length() + # self.length_gap = self.length - self.data_source_unlabeled.get_length() + self.unlabeled_large = False + else: + self.length = self.data_source_unlabeled.get_length() + # self.length_gap = self.length - self.data_source_labeled.get_length() + self.unlabeled_large = True + self.ret_samples = ret_samples + self.x_l_2 = ret_samples.get("x_l_2", True) + self.x_ul_2 = ret_samples.get("x_ul_2", True) + self.prefetch = prefetch + + def __len__(self): + return self.length + + def assign_labels(self, labels): + """ assign pseudo labels for the unlabeled data """ + assert len(self.pseudo_labels) == len(labels), \ + "Inconsistent lenght of asigned labels for unlabeled dataset, \ + {} vs {}".format(len(self.pseudo_labels), len(labels)) + self.pseudo_labels = labels[:] + + def __getitem__(self, idx): + # Warning: the seed might be different in a mini-batch!!! + seed = time.localtime()[3] * 10 + time.localtime()[4] % 10 # hour in [0,11], min in [0,59] + # idx for labeled and unlabeled data + if self.unlabeled_large == True: + idx_labeled = (idx + seed) % self.data_source_labeled.get_length() + idx_unlabeled = idx + else: + idx_labeled = idx + idx_unlabeled = (idx + seed) % self.data_source_unlabeled.get_length() + # labeled data: img + gt_labels + img_labeled, gt_labels = self.data_source_labeled.get_sample(idx_labeled) + # unlabeled data: img + pseudo labels + img_unlabeled = self.data_source_unlabeled.get_sample(idx_unlabeled) + pseudo_labels = self.pseudo_labels[idx_unlabeled] + assert isinstance(img_labeled, Image.Image) and isinstance(img_unlabeled, Image.Image), \ + 'The output from the data source must be an Image, got: {}, {}. \ + Please ensure that the list file does not contain labels.'.format( + type(img_labeled), type(img_unlabeled)) + + # contrastive-based pipelines + img1_labeled = self.pipeline_labeled(img_labeled) + if self.x_l_2 == True: + if self.pipeline_strong is not None: + img2_labeled = self.pipeline_strong(img_labeled) + else: + img2_labeled = self.pipeline_labeled(img_labeled) + img1_unlabeled = self.pipeline_unlabeled(img_unlabeled) + if self.x_ul_2 == True: + if self.pipeline_strong is not None: + img2_unlabeled = self.pipeline_strong(img_unlabeled) + else: + img2_unlabeled = self.pipeline_unlabeled(img_unlabeled) + + # prefetch as numpy + if self.prefetch: + img1_labeled = torch.from_numpy(to_numpy(img1_labeled)) + if self.x_l_2 == True: + img2_labeled = torch.from_numpy(to_numpy(img2_labeled)) + img1_unlabeled = torch.from_numpy(to_numpy(img1_unlabeled)) + if self.x_ul_2 == True: + img2_unlabeled = torch.from_numpy(to_numpy(img2_unlabeled)) + + # returm samples + if self.x_l_2: + cat_list = [img1_labeled.unsqueeze(0), img2_labeled.unsqueeze(0)] + else: + cat_list = [img1_labeled.unsqueeze(0),] + cat_list.append(img1_unlabeled.unsqueeze(0)) + if self.x_ul_2: + cat_list.append(img2_unlabeled.unsqueeze(0)) + img = torch.cat(cat_list, dim=0) + # provide data + labels + return dict(img=img, gt_labels=gt_labels, ps_labels=pseudo_labels, gt_idx=idx_labeled) + + def evaluate(self, scores, keyword, logger=None, topk=(1, 5)): + """ (original supervised) classification evaluation """ + eval_res = {} + + target = torch.LongTensor(self.data_source_labeled.labels) + assert scores.size(0) == target.size(0), \ + "Inconsistent length for results and labels, {} vs {}".format( + scores.size(0), target.size(0)) + num = scores.size(0) + _, pred = scores.topk(max(topk), dim=1, largest=True, sorted=True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) # KxN + for k in topk: + correct_k = correct[:k].contiguous().view(-1).float().sum(0).item() + acc = correct_k * 100.0 / num + eval_res["{}_top{}".format(keyword, k)] = acc + if logger is not None and logger != 'silent': + print_log( + "{}_top{}: {:.03f}".format(keyword, k, acc), + logger=logger) + return eval_res diff --git a/openmixup/datasets/utils.py b/openmixup/datasets/utils.py new file mode 100644 index 00000000..d39dee0a --- /dev/null +++ b/openmixup/datasets/utils.py @@ -0,0 +1,9 @@ +import numpy as np + + +def to_numpy(pil_img): + np_img = np.array(pil_img, dtype=np.uint8) + if np_img.ndim < 3: + np_img = np.expand_dims(np_img, axis=-1) + np_img = np.rollaxis(np_img, 2) # HWC to CHW + return np_img diff --git a/openmixup/hooks/__init__.py b/openmixup/hooks/__init__.py new file mode 100644 index 00000000..0c1b154a --- /dev/null +++ b/openmixup/hooks/__init__.py @@ -0,0 +1,12 @@ +from .addtional_scheduler import * +from .builder import build_hook, build_addtional_scheduler, build_optimizer +from .byol_hook import BYOLHook +from .deepcluster_hook import DeepClusterHook +from .deepcluster_automix_hook import DeepClusterAutoMixHook +from .extractor import Extractor +from .momentum_hook import CosineHook, StepHook, CosineScheduleHook, StepScheduleHook +from .odc_hook import ODCHook +from .optimizer_hook import DistOptimizerHook +from .registry import HOOKS +from .save_hook import SAVEHook +from .validate_hook import ValidateHook diff --git a/openmixup/hooks/addtional_scheduler.py b/openmixup/hooks/addtional_scheduler.py new file mode 100644 index 00000000..54967db4 --- /dev/null +++ b/openmixup/hooks/addtional_scheduler.py @@ -0,0 +1,645 @@ +from mmcv.runner import Hook +from math import cos, pi +from .registry import HOOKS + + +class LrAddtionalSchedulerHook(Hook): + """LR Addtional Scheduler. + + Args: + addtional_indice (list): A list of indice for selected params. + by_epoch (bool): LR changes epoch by epoch + warmup (string): Type of warmup used. It can be None(use no warmup), + 'constant', 'linear' or 'exp' + warmup_iters (int): The number of iterations or epochs that warmup + lasts + warmup_ratio (float): LR used at the beginning of warmup equals to + warmup_ratio * initial_lr + warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters + means the number of epochs that warmup lasts, otherwise means the + number of iteration that warmup lasts + """ + + def __init__(self, + addtional_indice=None, + by_epoch=True, + warmup=None, + warmup_iters=0, + warmup_ratio=0.1, + warmup_by_epoch=False, + **kwargs): + # validate the "warmup" argument + if warmup is not None: + if warmup not in ['constant', 'linear', 'exp']: + raise ValueError( + f'"{warmup}" is not a supported type for warming up, valid' + ' types are "constant" and "linear"') + if warmup is not None: + assert warmup_iters > 0, \ + '"warmup_iters" must be a positive integer' + assert 0 < warmup_ratio <= 1.0, \ + '"warmup_ratio" must be in range (0,1]' + + # optional indice + self.addtional_indice = addtional_indice + assert addtional_indice is not None + # basic lr scheduler args + self.by_epoch = by_epoch + self.warmup = warmup + self.warmup_iters = warmup_iters + self.warmup_ratio = warmup_ratio + self.warmup_by_epoch = warmup_by_epoch + + if self.warmup_by_epoch: + self.warmup_epochs = self.warmup_iters + self.warmup_iters = None + else: + self.warmup_epochs = None + + self.base_lr = [] # initial lr for optinal param groups + self.regular_lr = [] # expected lr if no warming up is performed + + def _set_lr(self, runner, lr_groups): + if isinstance(runner.optimizer, dict): + for k, optim in runner.optimizer.items(): + j = 0 + for i, param_group in enumerate(optim.param_groups): + if i in self.addtional_indice: + lr = lr_groups[k][j] + param_group['lr'] = lr + j += 1 + else: + j = 0 + for i, param_group in enumerate(runner.optimizer.param_groups): + if i in self.addtional_indice: + lr = lr_groups[j] + param_group['lr'] = lr + j += 1 + + def get_lr(self, runner, base_lr): + raise NotImplementedError + + def get_regular_lr(self, runner): + if isinstance(runner.optimizer, dict): + lr_groups = {} + for k in runner.optimizer.keys(): + _lr_group = [ + self.get_lr(runner, _base_lr) + for _base_lr in self.base_lr[k] + ] + lr_groups.update({k: _lr_group}) + return lr_groups + else: + return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr] + + def get_warmup_lr(self, cur_iters): + if self.warmup == 'constant': + warmup_lr = [_lr * self.warmup_ratio for _lr in self.regular_lr] + elif self.warmup == 'linear': + k = (1 - cur_iters / self.warmup_iters) * (1 - self.warmup_ratio) + warmup_lr = [_lr * (1 - k) for _lr in self.regular_lr] + elif self.warmup == 'exp': + k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) + warmup_lr = [_lr * k for _lr in self.regular_lr] + return warmup_lr + + def before_run(self, runner): + # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved, + # it will be set according to the optimizer params + if isinstance(runner.optimizer, dict): + self.base_lr = {} + for k, optim in runner.optimizer.items(): + _base_lr = list() + for i, group in enumerate(optim.param_groups): + if i in self.addtional_indice: + group.setdefault('initial_lr', group['lr']) + _base_lr.append(group['initial_lr'], group['lr']) + self.base_lr.update({k: _base_lr}) + else: + self.base_lr = list() + for i, group in enumerate(runner.optimizer.param_groups): + if i in self.addtional_indice: + group.setdefault('initial_lr', group['lr']) + self.base_lr.append(group['initial_lr']) + + def before_train_epoch(self, runner): + if not self.by_epoch: + return + if self.warmup_by_epoch: + epoch_len = len(runner.data_loader) + self.warmup_iters = self.warmup_epochs * epoch_len + + self.regular_lr = self.get_regular_lr(runner) + self._set_lr(runner, self.regular_lr) + + def before_train_iter(self, runner): + cur_iter = runner.iter + if not self.by_epoch: + self.regular_lr = self.get_regular_lr(runner) + if self.warmup is None or cur_iter >= self.warmup_iters: + self._set_lr(runner, self.regular_lr) + else: + warmup_lr = self.get_warmup_lr(cur_iter) + self._set_lr(runner, warmup_lr) + elif self.by_epoch: + if self.warmup is None or cur_iter > self.warmup_iters: + return + elif cur_iter == self.warmup_iters: + self._set_lr(runner, self.regular_lr) + else: + warmup_lr = self.get_warmup_lr(cur_iter) + self._set_lr(runner, warmup_lr) + + +@HOOKS.register_module() +class FixedLrAdditionalHook(LrAddtionalSchedulerHook): + + def __init__(self, **kwargs): + super(FixedLrAdditionalHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + return base_lr + + +@HOOKS.register_module() +class StepLrAdditionalHook(LrAddtionalSchedulerHook): + + def __init__(self, step, gamma=0.1, **kwargs): + assert isinstance(step, (list, int)) + if isinstance(step, list): + for s in step: + assert isinstance(s, int) and s > 0 + elif isinstance(step, int): + assert step > 0 + else: + raise TypeError('"step" must be a list or integer') + self.step = step + self.gamma = gamma + super(StepLrAdditionalHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + + if isinstance(self.step, int): + return base_lr * (self.gamma**(progress // self.step)) + + exp = len(self.step) + for i, s in enumerate(self.step): + if progress < s: + exp = i + break + return base_lr * self.gamma**exp + + +@HOOKS.register_module() +class ExpLrAdditionalHook(LrAddtionalSchedulerHook): + + def __init__(self, gamma, **kwargs): + self.gamma = gamma + super(ExpLrAdditionalHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_lr * self.gamma**progress + + +@HOOKS.register_module() +class PolyLrAdditionalHook(LrAddtionalSchedulerHook): + + def __init__(self, power=1., min_lr=0., **kwargs): + self.power = power + self.min_lr = min_lr + super(PolyLrAdditionalHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + coeff = (1 - progress / max_progress)**self.power + return (base_lr - self.min_lr) * coeff + self.min_lr + + +@HOOKS.register_module() +class InvLrAdditionalHook(LrAddtionalSchedulerHook): + + def __init__(self, gamma, power=1., **kwargs): + self.gamma = gamma + self.power = power + super(InvLrAdditionalHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_lr * (1 + self.gamma * progress)**(-self.power) + + +@HOOKS.register_module() +class CosineAnnealingLrAdditionalHook(LrAddtionalSchedulerHook): + + def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + super(CosineAnnealingLrAdditionalHook, self).__init__(**kwargs) + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + return annealing_cos(base_lr, target_lr, progress / max_progress) + + +@HOOKS.register_module() +class CosineRestartLrAdditionalHook(LrAddtionalSchedulerHook): + """Cosine annealing with restarts learning rate scheme. + + Args: + periods (list[int]): Periods for each cosine anneling cycle. + restart_weights (list[float], optional): Restart weights at each + restart iteration. Default: [1]. + min_lr (float, optional): The minimum lr. Default: None. + min_lr_ratio (float, optional): The ratio of minimum lr to the base lr. + Either `min_lr` or `min_lr_ratio` should be specified. + Default: None. + """ + + def __init__(self, + periods, + restart_weights=[1], + min_lr=None, + min_lr_ratio=None, + **kwargs): + assert (min_lr is None) ^ (min_lr_ratio is None) + self.periods = periods + self.min_lr = min_lr + self.min_lr_ratio = min_lr_ratio + self.restart_weights = restart_weights + assert (len(self.periods) == len(self.restart_weights) + ), 'periods and restart_weights should have the same length.' + super(CosineRestartLrAdditionalHook, self).__init__(**kwargs) + + self.cumulative_periods = [ + sum(self.periods[0:i + 1]) for i in range(0, len(self.periods)) + ] + + def get_lr(self, runner, base_lr): + if self.by_epoch: + progress = runner.epoch + else: + progress = runner.iter + + if self.min_lr_ratio is not None: + target_lr = base_lr * self.min_lr_ratio + else: + target_lr = self.min_lr + + idx = get_position_from_periods(progress, self.cumulative_periods) + current_weight = self.restart_weights[idx] + nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1] + current_periods = self.periods[idx] + + alpha = min((progress - nearest_restart) / current_periods, 1) + return annealing_cos(base_lr, target_lr, alpha, current_weight) + + +def get_position_from_periods(iteration, cumulative_periods): + """Get the position from a period list. + + It will return the index of the right-closest number in the period list. + For example, the cumulative_periods = [100, 200, 300, 400], + if iteration == 50, return 0; + if iteration == 210, return 2; + if iteration == 300, return 2. + + Args: + iteration (int): Current iteration. + cumulative_periods (list[int]): Cumulative period list. + + Returns: + int: The position of the right-closest number in the period list. + """ + for i, period in enumerate(cumulative_periods): + if iteration <= period: + return i + raise ValueError(f'Current iteration {iteration} exceeds ' + f'cumulative_periods {cumulative_periods}') + + +@HOOKS.register_module() +class CyclicLrAdditionalHook(LrAddtionalSchedulerHook): + """Cyclic LR Scheduler. + + Implement the cyclical learning rate policy (CLR) described in + https://arxiv.org/pdf/1506.01186.pdf + + Different from the original paper, we use cosine anealing rather than + triangular policy inside a cycle. This improves the performance in the + 3D detection area. + + Attributes: + target_ratio (tuple[float]): Relative ratio of the highest LR and the + lowest LR to the initial LR. + cyclic_times (int): Number of cycles during training + step_ratio_up (float): The ratio of the increasing process of LR in + the total cycle. + by_epoch (bool): Whether to update LR by epoch. + """ + + def __init__(self, + by_epoch=False, + target_ratio=(10, 1e-4), + cyclic_times=1, + step_ratio_up=0.4, + **kwargs): + if isinstance(target_ratio, float): + target_ratio = (target_ratio, target_ratio / 1e5) + elif isinstance(target_ratio, tuple): + target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ + if len(target_ratio) == 1 else target_ratio + elif isinstance(target_ratio, list): + target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \ + if len(target_ratio) == 1 else target_ratio + else: + raise ValueError('target_ratio should be either float ' + f'or tuple (list), got {type(target_ratio)}') + + assert len(target_ratio) == 2, \ + '"target_ratio" must be list or tuple of two floats' + assert 0 <= step_ratio_up < 1.0, \ + '"step_ratio_up" must be in range [0,1)' + + self.target_ratio = target_ratio + self.cyclic_times = cyclic_times + self.step_ratio_up = step_ratio_up + self.lr_phases = [] # init lr_phases + + assert not by_epoch, \ + 'currently only support "by_epoch" = False' + super(CyclicLrAdditionalHook, self).__init__(**kwargs) + + def before_run(self, runner): + super(CyclicLrAdditionalHook, self).before_run(runner) + # initiate lr_phases + # total lr_phases are separated as up and down + max_iter_per_phase = runner.max_iters // self.cyclic_times + iter_up_phase = int(self.step_ratio_up * max_iter_per_phase) + self.lr_phases.append( + [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]]) + self.lr_phases.append([ + iter_up_phase, max_iter_per_phase, max_iter_per_phase, + self.target_ratio[0], self.target_ratio[1] + ]) + + def get_lr(self, runner, base_lr): + curr_iter = runner.iter + for (start_iter, end_iter, max_iter_per_phase, start_ratio, + end_ratio) in self.lr_phases: + curr_iter %= max_iter_per_phase + if start_iter <= curr_iter < end_iter: + progress = curr_iter - start_iter + return annealing_cos(base_lr * start_ratio, + base_lr * end_ratio, + progress / (end_iter - start_iter)) + + +def annealing_cos(start, end, factor, weight=1): + """Calculate annealing cos learning rate. + + Cosine anneal from `weight * start + (1 - weight) * end` to `end` as + percentage goes from 0.0 to 1.0. + + Args: + start (float): The starting learning rate of the cosine annealing. + end (float): The ending learing rate of the cosine annealing. + factor (float): The coefficient of `pi` when calculating the current + percentage. Range from 0.0 to 1.0. + weight (float, optional): The combination factor of `start` and `end` + when calculating the actual starting learning rate. Default to 1. + """ + cos_out = cos(pi * factor) + 1 + return end + 0.5 * weight * (start - end) * cos_out + + + +class CustomSchedulerHook(Hook): + """Custom Scheduler Hook. + + Args: + attr_name (str): Name of the attribute + attr_base (float): The initial value of the attribute + by_epoch (bool): Attr changes epoch by epoch + warmup (string): Type of warmup used. It can be None(use no warmup), + 'constant', 'linear' or 'exp' + warmup_iters (int): The number of iterations or epochs that warmup + lasts + warmup_ratio (float): Attr used at the beginning of warmup equals to + warmup_ratio * initial_attr + warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters + means the number of epochs that warmup lasts, otherwise means the + number of iteration that warmup lasts + """ + + def __init__(self, + attr_name="", + attr_base=None, + by_epoch=True, + warmup=None, + warmup_iters=0, + warmup_ratio=0.1, + warmup_by_epoch=False, + **kwargs): + # validate the "warmup" argument + if warmup is not None: + if warmup not in ['constant', 'linear', 'exp']: + raise ValueError( + f'"{warmup}" is not a supported type for warming up, valid' + ' types are "constant" and "linear"') + if warmup is not None: + assert warmup_iters > 0, \ + '"warmup_iters" must be a positive integer' + assert 0 < warmup_ratio <= 1.0, \ + '"warmup_ratio" must be in range (0,1]' + + # basic custom scheduler args + self.attr_name = attr_name + self.attr_base = attr_base # initial attr for optinal param groups + if attr_base is None or attr_name == "": + raise ValueError( + f'invalid attr_name="{attr_name}" or attr_base="{attr_base}"') + self.by_epoch = by_epoch + self.warmup = warmup + self.warmup_iters = warmup_iters + self.warmup_ratio = warmup_ratio + self.warmup_by_epoch = warmup_by_epoch + + if self.warmup_by_epoch: + self.warmup_epochs = self.warmup_iters + self.warmup_iters = None + else: + self.warmup_epochs = None + + self.regular_attr = attr_base # expected attr if no warming up is performed + + def _set_attr(self, runner, attr): + setattr(runner.model.module, self.attr_name, attr) + + def get_attr(self, runner, base_attr): + raise NotImplementedError + + def get_regular_attr(self, runner): + return self.get_attr( + runner, getattr(runner.model.module, self.attr_name)) + + def get_warmup_attr(self, cur_iters): + if self.warmup == 'constant': + warmup_attr = self.warmup_ratio * self.attr_base + elif self.warmup == 'linear': + k = (1 - cur_iters / self.warmup_iters) * (1 - self.warmup_ratio) + warmup_attr = (1 - k) * self.attr_base + elif self.warmup == 'exp': + k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters) + warmup_attr = k * self.attr_base + return warmup_attr + + def before_run(self, runner): + # notice: we can only get the attr_name of the model.module attribute, + # but we cannot adjust the attribute of submodele, such as the attribute + # in runner.model.head. + assert hasattr(runner.model.module, self.attr_name), \ + "The runner must have attribute:"+self.attr_name + attr = getattr(runner.model.module, self.attr_name) + assert isinstance(attr, float) + + def before_train_epoch(self, runner): + if not self.by_epoch: + return + if self.warmup_by_epoch: + epoch_len = len(runner.data_loader) + self.warmup_iters = self.warmup_epochs * epoch_len + + # self.regular_attr = self.get_regular_attr(runner) + self._set_attr(runner, self.regular_attr) + + def before_train_iter(self, runner): + cur_iter = runner.iter + if not self.by_epoch: + if self.warmup is None or cur_iter > self.warmup_iters: + # using get_regular_attr() after finishing the warmup stage + self.regular_attr = self.get_regular_attr(runner) + self._set_attr(runner, self.regular_attr) + else: + warmup_attr = self.get_warmup_attr(cur_iter) + self._set_attr(runner, warmup_attr) + elif self.by_epoch: + if self.warmup is None or cur_iter > self.warmup_iters: + return + elif cur_iter == self.warmup_iters: + self._set_attr(runner, self.regular_attr) + else: + warmup_attr = self.get_warmup_attr(cur_iter) + self._set_attr(runner, warmup_attr) + + +@HOOKS.register_module() +class CustomFixedHook(CustomSchedulerHook): + + def __init__(self, **kwargs): + super(CustomFixedHook, self).__init__(**kwargs) + + def get_attr(self, runner, base_attr): + return base_attr + + +@HOOKS.register_module() +class CustomStepHook(CustomSchedulerHook): + + def __init__(self, step, gamma=0.1, **kwargs): + assert isinstance(step, (list, int)) + if isinstance(step, list): + for s in step: + assert isinstance(s, int) and s > 0 + elif isinstance(step, int): + assert step > 0 + else: + raise TypeError('"step" must be a list or integer') + self.step = step + self.gamma = gamma + super(CustomStepHook, self).__init__(**kwargs) + + def get_attr(self, runner, base_attr): + progress = runner.epoch if self.by_epoch else runner.iter + + if isinstance(self.step, int): + return base_attr * (self.gamma**(progress // self.step)) + + exp = len(self.step) + for i, s in enumerate(self.step): + if progress < s: + exp = i + break + return base_attr * self.gamma**exp + + +@HOOKS.register_module() +class CustomExpHook(CustomSchedulerHook): + + def __init__(self, gamma, **kwargs): + self.gamma = gamma + super(CustomExpHook, self).__init__(**kwargs) + + def get_attr(self, runner, base_attr): + progress = runner.epoch if self.by_epoch else runner.iter + return base_attr * self.gamma**progress + + +@HOOKS.register_module() +class CustomPolyHook(CustomSchedulerHook): + + def __init__(self, power=1., min_attr=0., **kwargs): + self.power = power + self.min_attr = min_attr + super(CustomPolyHook, self).__init__(**kwargs) + + def get_attr(self, runner, base_attr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + coeff = (1 - progress / max_progress)**self.power + return (base_attr - self.min_attr) * coeff + self.min_attr + + +@HOOKS.register_module() +class CustomCosineAnnealingHook(CustomSchedulerHook): + + def __init__(self, min_attr=None, min_attr_ratio=None, **kwargs): + assert (min_attr is None) ^ (min_attr_ratio is None) + self.min_attr = min_attr + self.min_attr_ratio = min_attr_ratio + super(CustomCosineAnnealingHook, self).__init__(**kwargs) + + def get_attr(self, runner, base_attr): + if self.by_epoch: + progress = runner.epoch + max_progress = runner.max_epochs + else: + progress = runner.iter + max_progress = runner.max_iters + + if self.min_attr_ratio is not None: + target_attr = base_attr * self.min_attr_ratio + else: + target_attr = self.min_attr + return annealing_cos(base_attr, target_attr, progress / max_progress) diff --git a/openmixup/hooks/builder.py b/openmixup/hooks/builder.py new file mode 100644 index 00000000..2d0ff0d0 --- /dev/null +++ b/openmixup/hooks/builder.py @@ -0,0 +1,113 @@ +import re +import torch.distributed as dist +from openmixup.utils import build_from_cfg, optimizers, print_log + +from mmcv.runner import obj_from_dict + +from .registry import HOOKS + + +def build_hook(cfg, default_args=None): + return build_from_cfg(cfg, HOOKS, default_args) + + +def build_addtional_scheduler(param_names, hook_cfg): + """Build Addtional Scheduler from configs. + + Args: + param_names (list): Names of parameters in the model. + hook_cfg (dict): The config dict of the optimizer. + + Returns: + obj: The constructed object. + """ + hook_cfg = hook_cfg.copy() + paramwise_options = hook_cfg.pop('paramwise_options', None) + # you must use paramwise_options in optimizer_cfg + assert isinstance(paramwise_options, list) + addtional_indice = list() + for i, name in enumerate(param_names): + for regexp in paramwise_options: + if re.search(regexp, name): + # additional scheduler for selected params + addtional_indice.append(i) + if not dist.is_initialized() or dist.get_rank() == 0: + print_log('optional_scheduler -- {}: {}'.format(name, 'lr')) + # build type + assert 'policy' in hook_cfg + policy_type = hook_cfg.pop('policy') + # If the type of policy is all in lower case + if policy_type == policy_type.lower(): + policy_type = policy_type.title() + hook_cfg['type'] = policy_type + 'LrAdditionalHook' + # fatal args + hook_cfg['addtional_indice'] = addtional_indice + return build_hook(hook_cfg, dict(dist_mode=True)) + + +def build_optimizer(model, optimizer_cfg): + """Build optimizer from configs. + + Args: + model (:obj:`nn.Module`): The model with parameters to be optimized. + optimizer_cfg (dict): The config dict of the optimizer. + Positional fields are: + - type: class name of the optimizer. + - lr: base learning rate. + Optional fields are: + - any arguments of the corresponding optimizer type, e.g., + weight_decay, momentum, etc. + - paramwise_options: a dict with regular expression as keys + to match parameter names and a dict containing options as + values. Options include 6 fields: lr, lr_mult, momentum, + momentum_mult, weight_decay, weight_decay_mult. + + Returns: + torch.optim.Optimizer: The initialized optimizer. + + Example: + >>> model = torch.nn.modules.Conv1d(1, 1, 1) + >>> paramwise_options = { + >>> '(bn|gn)(\d+)?.(weight|bias)': dict(weight_decay_mult=0.1), + >>> '\Ahead.': dict(lr_mult=10, momentum=0)} + >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, + >>> weight_decay=0.0001, + >>> paramwise_options=paramwise_options) + >>> optimizer = build_optimizer(model, optimizer_cfg) + """ + if hasattr(model, 'module'): + model = model.module + + optimizer_cfg = optimizer_cfg.copy() + paramwise_options = optimizer_cfg.pop('paramwise_options', None) + # if no paramwise option is specified, just use the global setting + if paramwise_options is None: + return obj_from_dict(optimizer_cfg, optimizers, + dict(params=model.parameters())) + else: + assert isinstance(paramwise_options, dict) + params = [] + for name, param in model.named_parameters(): + param_group = {'params': [param]} + if not param.requires_grad: + params.append(param_group) + continue + + for regexp, options in paramwise_options.items(): + if re.search(regexp, name): + for key, value in options.items(): + if key.endswith('_mult'): # is a multiplier + key = key[:-5] + assert key in optimizer_cfg, \ + "{} not in optimizer_cfg".format(key) + value = optimizer_cfg[key] * value + param_group[key] = value + if not dist.is_initialized() or dist.get_rank() == 0: + print_log('paramwise_options -- {}: {}={}'.format( + name, key, value)) + + # otherwise use the global settings + params.append(param_group) + + optimizer_cls = getattr(optimizers, optimizer_cfg.pop('type')) + return optimizer_cls(params, **optimizer_cfg) diff --git a/openmixup/hooks/byol_hook.py b/openmixup/hooks/byol_hook.py new file mode 100644 index 00000000..09dd4815 --- /dev/null +++ b/openmixup/hooks/byol_hook.py @@ -0,0 +1,43 @@ +from math import cos, pi +from mmcv.runner import Hook +from mmcv.parallel import is_module_wrapper + +from .registry import HOOKS + + +@HOOKS.register_module +class BYOLHook(Hook): + """Hook for BYOL. + + This hook includes momentum adjustment in BYOL following: + m = 1 - ( 1- m_0) * (cos(pi * k / K) + 1) / 2, + k: current step, K: total steps. + + Args: + end_momentum (float): The final momentum coefficient + for the target network. Default: 1. + """ + + def __init__(self, end_momentum=1., update_interval=1, **kwargs): + self.end_momentum = end_momentum + self.update_interval = update_interval + + def before_train_iter(self, runner): + assert hasattr(runner.model.module, 'momentum'), \ + "The runner must have attribute \"momentum\" in BYOLHook." + assert hasattr(runner.model.module, 'base_momentum'), \ + "The runner must have attribute \"base_momentum\" in BYOLHook." + if self.every_n_iters(runner, self.update_interval): + cur_iter = runner.iter + max_iter = runner.max_iters + base_m = runner.model.module.base_momentum + m = self.end_momentum - (self.end_momentum - base_m) * ( + cos(pi * cur_iter / float(max_iter)) + 1) / 2 + runner.model.module.momentum = m + + def after_train_iter(self, runner): + if self.every_n_iters(runner, self.update_interval): + if is_module_wrapper(runner.model): + runner.model.module.momentum_update() + else: + runner.model.momentum_update() diff --git a/openmixup/hooks/deepcluster_automix_hook.py b/openmixup/hooks/deepcluster_automix_hook.py new file mode 100644 index 00000000..b94a3b52 --- /dev/null +++ b/openmixup/hooks/deepcluster_automix_hook.py @@ -0,0 +1,162 @@ +import os +import numpy as np + +from mmcv.runner import Hook + +import torch +import torch.distributed as dist + +from openmixup.third_party import clustering as _clustering +from openmixup.utils import print_log +from .registry import HOOKS +from .extractor import Extractor + + +@HOOKS.register_module +class DeepClusterAutoMixHook(Hook): + """Hook for AutoMix SSL (from DeepCluster). + + Args: + extractor (dict): Config dict for feature extraction. + clustering (dict): Config dict that specifies the clustering algorithm. + unif_sampling (bool): Whether to apply uniform sampling. + reweight (bool): Whether to apply loss re-weighting. + reweight_pow (float): The power of re-weighting. + init_memory (bool): Whether to initialize memory banks for ODC. + Default: False. + init_params (bool): Whether to reitialize cluster head and mixblock. + Default: False. (init only once) + initial (bool): Whether to call the hook initially. Default: True. + interval (int): Frequency of epochs to call the hook. Default: 1. + dist_mode (bool): Use distributed training or not. Default: True. + save_cluster (bool): Whether to save clustering results. Default: True. + pretrained_labels (str): Whether to use predefined clustering labels, + which is generated by DeepCluster or ODC. Default: None. + data_loaders (DataLoader): A PyTorch dataloader. Default: None. + """ + + def __init__( + self, + extractor, + clustering, + unif_sampling, + reweight, + reweight_pow, + init_memory=False, # for ODC + init_params=False, # restart params + initial=True, + interval=1, + dist_mode=True, + save_cluster=True, + pretrained_labels=None, + data_loaders=None): + self.extractor = Extractor(dist_mode=dist_mode, **extractor) + self.clustering_type = clustering.pop('type') + self.clustering_cfg = clustering + self.unif_sampling = unif_sampling + self.reweight = reweight + self.reweight_pow = reweight_pow + self.init_memory = init_memory + self.init_params = init_params + self.initial = initial + self.interval = interval + self.dist_mode = dist_mode + self.save_cluster = save_cluster + self.pretrained_labels = pretrained_labels + self.data_loaders = data_loaders + assert data_loaders is not None + + def before_run(self, runner): + if self.initial: + self.deepcluster(runner) + + def after_train_epoch(self, runner): + if not self.every_n_epochs(runner, self.interval): + return + self.deepcluster(runner) + + def deepcluster(self, runner): + # step 1: get features + runner.model.eval() + if self.init_memory or self.pretrained_labels is None: + features = self.extractor(runner) + runner.model.train() + + # step 2: get labels + if not self.dist_mode or (self.dist_mode and runner.rank == 0): + clustering_algo = _clustering.__dict__[self.clustering_type]( + **self.clustering_cfg) + # useing pretrained labels + if self.pretrained_labels is not None: + assert os.path.exists(self.pretrained_labels), \ + "invalid path={}".format(self.pretrained_labels) + new_labels = np.load(self.pretrained_labels) + else: + # Features are normalized during clustering + clustering_algo.cluster(features, verbose=True) + assert isinstance(clustering_algo.labels, np.ndarray) + new_labels = clustering_algo.labels.astype(np.int64) + # mkdir and save + if self.save_cluster and runner.rank == 0: + if not os.path.exists("{}/cluster".format(runner.work_dir)): + try: + os.mkdir("{}/cluster".format(runner.work_dir)) + except: + print("mkdir error: {}/cluster".format(runner.work_dir)) + # save cluster labels + np.save( + "{}/cluster/cluster_epoch_{}.npy".format(runner.work_dir, + runner.epoch), new_labels) + self.evaluate(runner, new_labels) + else: + new_labels = np.zeros((len(self.data_loaders[0].dataset), ), + dtype=np.int64) + + if self.dist_mode: + new_labels_tensor = torch.from_numpy(new_labels).cuda() + dist.broadcast(new_labels_tensor, 0) + new_labels = new_labels_tensor.cpu().numpy() + new_labels_list = list(new_labels) + + # step 3: assign new labels + self.data_loaders[0].dataset.assign_labels(new_labels_list) + + # step 4 (a): set uniform sampler + if self.unif_sampling: + self.data_loaders[0].sampler.set_uniform_indices( + new_labels_list, self.clustering_cfg.k) + + # step 4 (b): set loss reweight + if self.reweight: + runner.model.module.set_reweight(new_labels, self.reweight_pow) + + # step 5: randomize cluster head and mixblock + if self.init_params: + runner.model.module.mix_block.init_weights(init_linear='normal') + runner.model.module.head_clst.init_weights(init_linear='normal') + for param_q, param_k in zip(runner.model.module.head_clst.parameters(), + runner.model.module.head_clst_off.parameters()): + param_k.data.copy_(param_q.data) + + if self.dist_mode: + for p in runner.model.module.mix_block.state_dict().values(): + dist.broadcast(p, 0) + for p in runner.model.module.head_clst.state_dict().values(): + dist.broadcast(p, 0) + for p in runner.model.module.head_clst_off.state_dict().values(): + dist.broadcast(p, 0) + + # step 6: init memory for ODC + if self.init_memory: + runner.model.module.memory_bank.init_memory(features, new_labels) + + def evaluate(self, runner, new_labels): + hist = np.bincount(new_labels, minlength=self.clustering_cfg.k) + empty_cls = (hist == 0).sum() + minimal_cls_size, maximal_cls_size = hist.min(), hist.max() + if runner.rank == 0: + print_log( + "cluster empty_num: {}\tmin_cluster: {}\tmax_cluster:{}".format( + empty_cls.item(), minimal_cls_size.item(), + maximal_cls_size.item()), + logger='root') diff --git a/openmixup/hooks/deepcluster_hook.py b/openmixup/hooks/deepcluster_hook.py new file mode 100644 index 00000000..e29b3e2b --- /dev/null +++ b/openmixup/hooks/deepcluster_hook.py @@ -0,0 +1,135 @@ +import os +import numpy as np + +from mmcv.runner import Hook + +import torch +import torch.distributed as dist + +from openmixup.third_party import clustering as _clustering +from openmixup.utils import print_log +from .registry import HOOKS +from .extractor import Extractor + + +@HOOKS.register_module +class DeepClusterHook(Hook): + """Hook for DeepCluster. + + Args: + extractor (dict): Config dict for feature extraction. + clustering (dict): Config dict that specifies the clustering algorithm. + unif_sampling (bool): Whether to apply uniform sampling. + reweight (bool): Whether to apply loss re-weighting. + reweight_pow (float): The power of re-weighting. + init_memory (bool): Whether to initialize memory banks for ODC. + Default: False. + initial (bool): Whether to call the hook initially. Default: True. + interval (int): Frequency of epochs to call the hook. Default: 1. + dist_mode (bool): Use distributed training or not. Default: True. + data_loaders (DataLoader): A PyTorch dataloader. Default: None. + """ + + def __init__( + self, + extractor, + clustering, + unif_sampling, + reweight, + reweight_pow, + init_memory=False, # for ODC + initial=True, + interval=1, + dist_mode=True, + save_cluster=True, + data_loaders=None): + self.extractor = Extractor(dist_mode=dist_mode, **extractor) + self.clustering_type = clustering.pop('type') + self.clustering_cfg = clustering + self.unif_sampling = unif_sampling + self.reweight = reweight + self.reweight_pow = reweight_pow + self.init_memory = init_memory + self.initial = initial + self.interval = interval + self.dist_mode = dist_mode + self.save_cluster = save_cluster + self.data_loaders = data_loaders + + def before_run(self, runner): + if self.initial: + self.deepcluster(runner) + + def after_train_epoch(self, runner): + if not self.every_n_epochs(runner, self.interval): + return + self.deepcluster(runner) + + def deepcluster(self, runner): + # step 1: get features + runner.model.eval() + features = self.extractor(runner) + runner.model.train() + + # step 2: get labels + if not self.dist_mode or (self.dist_mode and runner.rank == 0): + clustering_algo = _clustering.__dict__[self.clustering_type]( + **self.clustering_cfg) + # Features are normalized during clustering + clustering_algo.cluster(features, verbose=True) + assert isinstance(clustering_algo.labels, np.ndarray) + new_labels = clustering_algo.labels.astype(np.int64) + # mkdir and save + if self.save_cluster: + if not os.path.exists("{}/cluster".format(runner.work_dir)): + try: + os.mkdir("{}/cluster".format(runner.work_dir)) + except: + print("mkdir error: {}/cluster".format(runner.work_dir)) + # save cluster labels + np.save( + "{}/cluster/cluster_epoch_{}.npy".format(runner.work_dir, + runner.epoch), new_labels) + self.evaluate(runner, new_labels) + else: + new_labels = np.zeros((len(self.data_loaders[0].dataset), ), + dtype=np.int64) + + if self.dist_mode: + new_labels_tensor = torch.from_numpy(new_labels).cuda() + dist.broadcast(new_labels_tensor, 0) + new_labels = new_labels_tensor.cpu().numpy() + new_labels_list = list(new_labels) + + # step 3: assign new labels + self.data_loaders[0].dataset.assign_labels(new_labels_list) + + # step 4 (a): set uniform sampler + if self.unif_sampling: + self.data_loaders[0].sampler.set_uniform_indices( + new_labels_list, self.clustering_cfg.k) + + # step 4 (b): set loss reweight + if self.reweight: + runner.model.module.set_reweight(new_labels, self.reweight_pow) + + # step 5: randomize classifier + runner.model.module.head.init_weights(init_linear='normal') + if self.dist_mode: + for p in runner.model.module.head.state_dict().values(): + dist.broadcast(p, 0) + + # step 6: init memory for ODC + if self.init_memory: + runner.model.module.memory_bank.init_memory(features, new_labels) + + def evaluate(self, runner, new_labels): + hist = np.bincount(new_labels, minlength=self.clustering_cfg.k) + empty_cls = (hist == 0).sum() + minimal_cls_size, maximal_cls_size = hist.min(), hist.max() + if runner.rank == 0: + print_log( + "empty_num: {}\tmin_cluster: {}\tmax_cluster:{}".format( + empty_cls.item(), minimal_cls_size.item(), + maximal_cls_size.item()), + logger='root') diff --git a/openmixup/hooks/extractor.py b/openmixup/hooks/extractor.py new file mode 100644 index 00000000..c5d8e3bb --- /dev/null +++ b/openmixup/hooks/extractor.py @@ -0,0 +1,61 @@ +import torch.nn as nn +from torch.utils.data import Dataset + +from openmixup.utils import nondist_forward_collect, dist_forward_collect + + +class Extractor(object): + """Feature extractor. + + Args: + dataset (Dataset | dict): A PyTorch dataset or dict that indicates + the dataset. + imgs_per_gpu (int): Number of images on each GPU, i.e., batch size of + each GPU. + workers_per_gpu (int): How many subprocesses to use for data loading + for each GPU. + dist_mode (bool): Use distributed extraction or not. Default: False. + """ + + def __init__(self, + dataset, + imgs_per_gpu, + workers_per_gpu, + dist_mode=False): + from openmixup import datasets + if isinstance(dataset, Dataset): + self.dataset = dataset + elif isinstance(dataset, dict): + self.dataset = datasets.build_dataset(dataset) + else: + raise TypeError( + 'dataset must be a Dataset object or a dict, not {}'.format( + type(dataset))) + self.data_loader = datasets.build_dataloader( + self.dataset, + imgs_per_gpu, + workers_per_gpu, + dist=dist_mode, + shuffle=False) + self.dist_mode = dist_mode + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + + def _forward_func(self, runner, **x): + backbone_feat = runner.model(mode='extract', **x) + last_layer_feat = runner.model.module.neck([backbone_feat[-1]])[0] + last_layer_feat = last_layer_feat.view(last_layer_feat.size(0), -1) + return dict(feature=last_layer_feat.cpu()) + + def __call__(self, runner): + func = lambda **x: self._forward_func(runner, **x) + if self.dist_mode: + feats = dist_forward_collect( + func, + self.data_loader, + runner.rank, + len(self.dataset), + ret_rank=-1)['feature'] # NxD + else: + feats = nondist_forward_collect(func, self.data_loader, + len(self.dataset))['feature'] + return feats diff --git a/openmixup/hooks/momentum_hook.py b/openmixup/hooks/momentum_hook.py new file mode 100644 index 00000000..09f3b735 --- /dev/null +++ b/openmixup/hooks/momentum_hook.py @@ -0,0 +1,261 @@ +from math import cos, pi +from mmcv.runner import Hook + +from .registry import HOOKS + + +@HOOKS.register_module +class CosineHook(Hook): + """Hook for Momentum update: Cosine. + + This hook includes momentum adjustment with cosine scheduler: + m = 1 - ( 1- m_0) * (cos(pi * k / K) + 1) / 2, + k: current step, K: max adjust steps. + + Args: + end_momentum (float): The final momentum coefficient for the target + network. Default: 1. + adjust_scope (float): Ranging from (0, 1], only adjust momentum in + this scope. Default: 1.0. + restart_step (int): Set the momentum to 0 when hit the restart_step + (by interval), i.e., cut_iter Mod restart_step == 0. + Default: 1e10 (never restart). + """ + + def __init__(self, + end_momentum=1., + adjust_scope=1., + restart_step=1e11, + update_interval=1, **kwargs): + self.end_momentum = end_momentum + self.adjust_scope = adjust_scope + self.update_interval = update_interval + self.restart_step = int(min(max(restart_step, 1), 1e10)) + assert adjust_scope >= 0. + + def before_train_iter(self, runner): + assert hasattr(runner.model.module, 'momentum'), \ + "The runner must have attribute \"momentum\" in BYOLHook." + assert hasattr(runner.model.module, 'base_momentum'), \ + "The runner must have attribute \"base_momentum\" in BYOLHook." + if self.every_n_iters(runner, self.update_interval): + cur_iter = runner.iter + if self.adjust_scope < 1: + max_iter = int(runner.max_iters * self.adjust_scope) + else: + max_iter = runner.max_iters + if cur_iter <= max_iter: + if cur_iter % self.restart_step == 0: + m = 0 + else: + base_m = runner.model.module.base_momentum + m = self.end_momentum - (self.end_momentum - base_m) * ( + cos(pi * cur_iter / float(max_iter)) + 1) / 2 + runner.model.module.momentum = m + + +@HOOKS.register_module +class StepHook(Hook): + """Hook for Momentum update: Step. + + This hook includes momentum adjustment with step scheduler. + + Args: + step (list): The list of mile-store for the target network. + Default: [0.6, 0.9]. + gamma (float): The step size. Default: 0.1. + adjust_scope (float): range from (0, 1], only adjust momentum in + this scope. Default: 1.0. + restart_step (int): Set the momentum to 0 when hit the restart_step + (by interval), i.e., cut_iter Mod restart_step == 0. + Default: 1e10 (never restart). + """ + + def __init__(self, + step=[0.6, 0.9], + gamma=0.1, + adjust_scope=1., + restart_step=1e11, + update_interval=1, **kwargs): + self.step = step + self.gamma = gamma + self.adjust_scope = adjust_scope + self.restart_step = int(min(max(restart_step, 1), 1e10)) + self.update_interval = update_interval + assert 0 <= adjust_scope and 0 < gamma < 1 + + def before_train_iter(self, runner): + assert hasattr(runner.model.module, 'momentum'), \ + "The runner must have attribute \"momentum\" in BYOLHook." + assert hasattr(runner.model.module, 'base_momentum'), \ + "The runner must have attribute \"base_momentum\" in BYOLHook." + if self.every_n_iters(runner, self.update_interval): + cur_iter = runner.iter + if self.adjust_scope < 1: + max_iter = int(runner.max_iters * self.adjust_scope) + else: + max_iter = runner.max_iters + if cur_iter <= max_iter: + if cur_iter % self.restart_step == 0: + runner.model.module.momentum = 0 + else: + base_m = runner.model.module.base_momentum + for i in range(len(self.step)): + if int(self.step[i] * max_iter) >= cur_iter: + m = base_m * (1. - pow(self.gamma, i+1)) + runner.model.module.momentum = m + break + else: + pass + + +@HOOKS.register_module +class CosineScheduleHook(Hook): + """Hook for Momentum update: Cosine. + + This hook includes momentum adjustment with cosine scheduler: + m = 1 - ( 1- m_0) * (cos(pi * k / K) + 1) / 2, + k: current step, K: max adjust steps. + + Args: + end_momentum (float): The final momentum coefficient for the target + network. Default: 1. + adjust_scope (float): Ranging from (0, 1], only adjust momentum in + this scope. Default: 1.0. + warming_up (string): Warming up from end_momentum to base_momentum. + Default: "linear". + restart_step (int): Set the momentum to 0 when hit the restart_step + (by interval), i.e., cut_iter Mod restart_step == 0. + Default: 1e10 (never restart). + """ + + def __init__(self, + end_momentum=1., + adjust_scope=[0, 1], + warming_up="linear", + restart_step=1e11, + update_interval=1, **kwargs): + self.end_momentum = end_momentum + self.adjust_scope = adjust_scope + self.warming_up = warming_up + self.restart_step = int(min(max(restart_step, 1), 1e10)) + self.update_interval = update_interval + assert len(adjust_scope) == 2 and adjust_scope[0] <= adjust_scope[1] + + def before_train_iter(self, runner): + assert hasattr(runner.model.module, 'momentum'), \ + "The runner must have attribute \"momentum\" in BYOLHook." + assert hasattr(runner.model.module, 'base_momentum'), \ + "The runner must have attribute \"base_momentum\" in BYOLHook." + if self.every_n_iters(runner, self.update_interval): + cur_iter = runner.iter + base_m = runner.model.module.base_momentum + assert base_m <= self.end_momentum + if self.adjust_scope[1] < 1: + max_iter = int(runner.max_iters * self.adjust_scope[1]) + else: + max_iter = runner.max_iters + if self.adjust_scope[0] > 0: + min_iter = int(runner.max_iters * self.adjust_scope[0]) + else: + min_iter = 0 + + if min_iter <= cur_iter and cur_iter <= max_iter: + if cur_iter % self.restart_step == 0: + m = 0 + else: + m = self.end_momentum - (self.end_momentum - base_m) * ( + cos(pi * cur_iter / float(max_iter)) + 1) / 2 + runner.model.module.momentum = m + else: + if cur_iter < min_iter: # end_m to base_m + if self.warming_up == "linear": + m = self.end_momentum - (self.end_momentum - base_m) * ( + (min_iter - cur_iter) / min_iter) + runner.model.module.momentum = m + elif self.warming_up == "constant": + runner.model.module.momentum = base_m + else: + assert self.warming_up in ["linear", "constant"] + else: + pass + + +@HOOKS.register_module +class StepScheduleHook(Hook): + """Hook for Momentum update: Step. + + This hook includes momentum adjustment with step scheduler. + + Args: + end_momentum (float): The final momentum coefficient for the + target network. Default: 1. + step (list): The list of mile-store for the target network. + Default: [0.6, 0.9]. + gamma (float): The step size. Default: 0.1. + adjust_scope (float): range from (0, 1], only adjust momentum in + this scope. Default: 1.0. + warming_up (string): Warming up from end_momentum to base_momentum. + Default: "linear". + restart_step (int): Set the momentum to 0 when hit the restart_step + (by interval), i.e., cut_iter Mod restart_step == 0. + Default: 1e10 (never restart). + """ + + def __init__(self, + end_momentum=1., + step=[0.6, 0.9], + gamma=0.1, + adjust_scope=[0, 1], + warming_up="linear", + restart_step=1e11, + update_interval=1, **kwargs): + self.end_momentum = end_momentum + self.step = step + self.gamma = gamma + self.adjust_scope = adjust_scope + self.warming_up = warming_up + self.restart_step = int(min(max(restart_step, 1), 1e10)) + self.update_interval = update_interval + assert 0 <= adjust_scope and 0 < gamma < 1 + + def before_train_iter(self, runner): + assert hasattr(runner.model.module, 'momentum'), \ + "The runner must have attribute \"momentum\" in BYOLHook." + assert hasattr(runner.model.module, 'base_momentum'), \ + "The runner must have attribute \"base_momentum\" in BYOLHook." + if self.every_n_iters(runner, self.update_interval): + cur_iter = runner.iter + base_m = runner.model.module.base_momentum + assert base_m < self.end_momentum + if self.adjust_scope[1] < 1: + max_iter = int(runner.max_iters * self.adjust_scope[1]) + else: + max_iter = runner.max_iters + if self.adjust_scope[0] > 0: + min_iter = int(runner.max_iters * self.adjust_scope[0]) + else: + min_iter = 0 + + if min_iter <= cur_iter and cur_iter <= max_iter: + if cur_iter % self.restart_step == 0: + runner.model.module.momentum = 0 + else: + base_m = runner.model.module.base_momentum + for i in range(len(self.step)): + if int(self.step[i] * max_iter) >= cur_iter: + m = base_m * (self.end_momentum - pow(self.gamma, i+1)) + runner.model.module.momentum = m + break + else: + if cur_iter < min_iter: # end_m to base_m + if self.warming_up == "linear": + m = self.end_momentum - (self.end_momentum - base_m) * ( + (min_iter - cur_iter) / min_iter) + runner.model.module.momentum = m + elif self.warming_up == "constant": + runner.model.module.momentum = base_m + else: + assert self.warming_up in ["linear", "constant"] + else: + pass diff --git a/openmixup/hooks/odc_hook.py b/openmixup/hooks/odc_hook.py new file mode 100644 index 00000000..cb11828f --- /dev/null +++ b/openmixup/hooks/odc_hook.py @@ -0,0 +1,90 @@ +import os +import numpy as np + +from mmcv.runner import Hook + +from openmixup.utils import print_log +from .registry import HOOKS + + +@HOOKS.register_module +class ODCHook(Hook): + """Hook for ODC. + + Args: + centroids_update_interval (int): Frequency of iterations + to update centroids. + deal_with_small_clusters_interval (int): Frequency of iterations + to deal with small clusters. + evaluate_interval (int): Frequency of iterations to evaluate clusters. + reweight (bool): Whether to perform loss re-weighting. + reweight_pow (float): The power of re-weighting. + save_cluster (bool): Whether to save cluster labels every 10 epochs. + dist_mode (bool): Use distributed training or not. Default: True. + """ + + def __init__(self, + centroids_update_interval, + deal_with_small_clusters_interval, + evaluate_interval, + reweight, + reweight_pow, + save_cluster=True, + dist_mode=True): + assert dist_mode, "non-dist mode is not implemented" + self.centroids_update_interval = centroids_update_interval + self.deal_with_small_clusters_interval = \ + deal_with_small_clusters_interval + self.evaluate_interval = evaluate_interval + self.reweight = reweight + self.reweight_pow = reweight_pow + self.save_cluster = save_cluster + + def after_train_iter(self, runner): + # centroids update + if self.every_n_iters(runner, self.centroids_update_interval): + runner.model.module.memory_bank.update_centroids_memory() + + # deal with small clusters + if self.every_n_iters(runner, self.deal_with_small_clusters_interval): + runner.model.module.memory_bank.deal_with_small_clusters() + + # reweight + runner.model.module.set_reweight() + + # evaluate + if self.every_n_iters(runner, self.evaluate_interval): + new_labels = runner.model.module.memory_bank.label_bank + if new_labels.is_cuda: + new_labels = new_labels.cpu() + self.evaluate(runner, new_labels.numpy()) + + def after_train_epoch(self, runner): + # save cluster + if self.every_n_epochs(runner, 10) and runner.rank == 0: + new_labels = runner.model.module.memory_bank.label_bank + if new_labels.is_cuda: + new_labels = new_labels.cpu() + # mkdir and save + if self.save_cluster: + if not os.path.exists("{}/cluster".format(runner.work_dir)): + try: + os.mkdir("{}/cluster".format(runner.work_dir)) + except: + print("mkdir error: {}/cluster".format(runner.work_dir)) + np.save( + "{}/cluster/cluster_epoch_{}.npy".format(runner.work_dir, + runner.epoch), + new_labels.numpy()) + + def evaluate(self, runner, new_labels): + hist = np.bincount( + new_labels, minlength=runner.model.module.memory_bank.num_classes) + empty_cls = (hist == 0).sum() + minimal_cls_size, maximal_cls_size = hist.min(), hist.max() + if runner.rank == 0: + print_log( + "empty_num: {}\tmin_cluster: {}\tmax_cluster:{}".format( + empty_cls.item(), minimal_cls_size.item(), + maximal_cls_size.item()), + logger='root') diff --git a/openmixup/hooks/optimizer_hook.py b/openmixup/hooks/optimizer_hook.py new file mode 100644 index 00000000..4658da5a --- /dev/null +++ b/openmixup/hooks/optimizer_hook.py @@ -0,0 +1,73 @@ +import re +from mmcv.runner import OptimizerHook +try: + import apex + has_apex = True +except: + has_apex = False + print('Optimizer: apex is not installed') + + +class DistOptimizerHook(OptimizerHook): + """Optimizer hook for distributed training. + + Args: + update_interval (int): Frequency of epochs to call the hook. Default: 1. + cancel_grad (dict): Config dict for cancelling gradients for selected parameters, + e.g., cancel_grad=dict(regexp=cancel_iter), 'regexp' stands for param_name. + Default: None. + grad_clip (dict): Gradient clip tricks. Default: None. + use_fp16 (bool): Whether to use fp16 training skills. Defalut: False. + """ + + def __init__(self, + update_interval=1, + cancel_grad=None, + grad_clip=None, + coalesce=True, + bucket_size_mb=-1, + use_fp16=False): + self.grad_clip = grad_clip + self.coalesce = coalesce + self.bucket_size_mb = bucket_size_mb + self.update_interval = update_interval + self.use_fp16 = use_fp16 + # basic args + if use_fp16: + assert has_apex + if cancel_grad is not None: + assert isinstance(cancel_grad, dict) + self.cancel_grad = cancel_grad + else: + self.cancel_grad = None + + def before_run(self, runner): + runner.optimizer.zero_grad() + + def after_train_iter(self, runner): + runner.outputs['loss'] /= self.update_interval + if self.use_fp16: + with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss: + scaled_loss.backward() + else: + runner.outputs['loss'].backward() + if self.every_n_iters(runner, self.update_interval): + # clip gradients + if self.grad_clip is not None: + self.clip_grads(runner.model.parameters()) + # cancel gradients of selected params + if self.cancel_grad is not None: + cur_iter = runner.iter + cur_dict = dict() + for name, p in runner.model.named_parameters(): + for regexp, cancel_iter in self.cancel_grad.items(): + if cancel_iter > cur_iter: + if re.search(regexp, name): + p.grad = None + cur_dict[regexp] = cancel_iter + self.cancel_grad = cur_dict + if not self.cancel_grad: + self.cancel_grad = None + # update + runner.optimizer.step() + runner.optimizer.zero_grad() diff --git a/openmixup/hooks/registry.py b/openmixup/hooks/registry.py new file mode 100644 index 00000000..6772c923 --- /dev/null +++ b/openmixup/hooks/registry.py @@ -0,0 +1,3 @@ +from openmixup.utils import Registry + +HOOKS = Registry('hook') diff --git a/openmixup/hooks/save_hook.py b/openmixup/hooks/save_hook.py new file mode 100644 index 00000000..2eeb9b06 --- /dev/null +++ b/openmixup/hooks/save_hook.py @@ -0,0 +1,39 @@ +import os +from mmcv.runner import Hook + +from .registry import HOOKS + + +@HOOKS.register_module +class SAVEHook(Hook): + """Hook for saving. + + Args: + save_interval (float): Default: 1. + iter_per_epoch (int): The iter number of each epoch. + """ + + def __init__(self, save_interval=1., iter_per_epoch=500, update_interval=1, **kwargs): + self.save_interval = save_interval + self.iter_per_epoch = iter_per_epoch + self.update_interval = update_interval + + def before_train_iter(self, runner): + if self.every_n_iters(runner, self.update_interval): + cur_iter = runner.iter + if cur_iter % self.save_interval == 0: + runner.model.module.save = True + runner.model.module.save_name = '{}/epoch_{}.png'.format( + runner.work_dir, int(cur_iter/self.iter_per_epoch)) + save_name = os.path.join(runner.work_dir, "MixedSamples") + if not os.path.exists(save_name): + try: + os.mkdir(save_name) + except: + if not os.path.exists(save_name): + save_name = runner.work_dir + print("mkdir error") + runner.model.module.save_name = os.path.join( + save_name, "epoch_{}.png".format(int(cur_iter/self.iter_per_epoch))) + else: + runner.model.module.save = False \ No newline at end of file diff --git a/openmixup/hooks/validate_hook.py b/openmixup/hooks/validate_hook.py new file mode 100644 index 00000000..19452cfc --- /dev/null +++ b/openmixup/hooks/validate_hook.py @@ -0,0 +1,93 @@ +from mmcv.runner import Hook + +import numpy as np +import torch +from torch.utils.data import Dataset + +from openmixup.utils import nondist_forward_collect, dist_forward_collect +from .registry import HOOKS + + +@HOOKS.register_module +class ValidateHook(Hook): + """Validation hook. + + Args: + dataset (Dataset | dict): A PyTorch dataset or dict that indicates + the dataset. + dist_mode (bool): Use distributed evaluation or not. Default: True. + initial (bool): Whether to evaluate before the training starts. + Default: True. + interval (int): Evaluation interval (by epochs). Default: 1. + save_val (bool): Whether to save evaluation results. Default: False. + **eval_kwargs: Evaluation arguments fed into the evaluate function of + the dataset. + """ + + def __init__(self, + dataset, + dist_mode=True, + initial=True, + interval=1, + save_val=False, + **eval_kwargs): + from openmixup import datasets + if isinstance(dataset, Dataset): + self.dataset = dataset + elif isinstance(dataset, dict): + self.dataset = datasets.build_dataset(dataset) + else: + raise TypeError( + 'dataset must be a Dataset object or a dict, not {}'.format( + type(dataset))) + self.data_loader = datasets.build_dataloader( + self.dataset, + eval_kwargs['imgs_per_gpu'], + eval_kwargs['workers_per_gpu'], + dist=dist_mode, + shuffle=False, + prefetch=eval_kwargs.get('prefetch', False), + img_norm_cfg=eval_kwargs.get('img_norm_cfg', dict()), + ) + self.dist_mode = dist_mode + self.initial = initial + self.interval = interval + self.save_val = save_val + self.eval_kwargs = eval_kwargs + + def before_run(self, runner): + if self.initial: + self._run_validate(runner) + + def after_train_epoch(self, runner): + if not self.every_n_epochs(runner, self.interval): + return + self._run_validate(runner) + + def _run_validate(self, runner): + runner.model.eval() + func = lambda **x: runner.model(mode='test', **x) + if self.dist_mode: + results = dist_forward_collect( + func, self.data_loader, runner.rank, + len(self.dataset)) # dict{key: np.ndarray} + else: + results = nondist_forward_collect(func, self.data_loader, + len(self.dataset)) + if runner.rank == 0: + for name, val in results.items(): + self._evaluate(runner, torch.from_numpy(val), name) + if self.save_val: + np.save( + "{}/val_epoch_{}.npy".format(runner.work_dir, runner.epoch), val) + runner.model.train() + + def _evaluate(self, runner, results, keyword): + eval_res = self.dataset.evaluate( + results, + keyword=keyword, + logger=runner.logger, + **self.eval_kwargs['eval_param']) + for name, val in eval_res.items(): + runner.log_buffer.output[name] = val + runner.log_buffer.ready = True diff --git a/openmixup/models/__init__.py b/openmixup/models/__init__.py new file mode 100644 index 00000000..90cb9c75 --- /dev/null +++ b/openmixup/models/__init__.py @@ -0,0 +1,10 @@ +from .backbones import * +from .builder import (build_backbone, build_model, build_head, build_loss) +from .heads import * +from .classifiers import * +from .necks import * +from .losses import * +from .memories import * +from .selfsup import * +from .semisup import * +from .registry import (BACKBONES, MODELS, NECKS, MEMORIES, HEADS, LOSSES) diff --git a/openmixup/models/backbones/__init__.py b/openmixup/models/backbones/__init__.py new file mode 100644 index 00000000..915de799 --- /dev/null +++ b/openmixup/models/backbones/__init__.py @@ -0,0 +1,18 @@ +from .lenet import LeNet5 +from .alexnet import AlexNet +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .resnet_mmcls import ResNet_mmcls, ResNet_CIFAR, ResNetV1d, ResNet_Mix, ResNet_Mix_CIFAR +from .resnext import ResNeXt, ResNeXt_CIFAR, ResNeXt_Mix, ResNeXt_CIFAR_Mix +from .seresnet import SEResNet, SEResNet_CIFAR +from .shufflenet_v2 import ShuffleNetV2 +from .wide_resnet import WideResNet, WideResNet_Mix + + +__all__ = [ + 'LeNet5', 'AlexNet', 'MobileNetV2', 'MobileNetV3', + 'ResNet_mmcls', 'ResNet_CIFAR', 'ResNetV1d', 'ResNet_Mix', 'ResNet_Mix_CIFAR', + 'ResNeXt', 'ResNeXt_CIFAR', 'ResNeXt_Mix', 'ResNeXt_CIFAR_Mix', + 'SEResNet', 'SEResNet_CIFAR', 'ShuffleNetV2', + 'WideResNet', 'WideResNet_Mix' +] diff --git a/openmixup/models/backbones/alexnet.py b/openmixup/models/backbones/alexnet.py new file mode 100644 index 00000000..a9bc4f64 --- /dev/null +++ b/openmixup/models/backbones/alexnet.py @@ -0,0 +1,81 @@ +# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones +# copy from mmclassification alexnet.py +import torch.nn as nn + +from mmcv.cnn import kaiming_init, normal_init + +from .. import builder +from ..registry import BACKBONES +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class AlexNet(BaseBackbone): + """`AlexNet `_ backbone. + + The input for AlexNet is a 224x224 RGB image. + + Args: + mlp_neck (dict): additional MLP neck in SSL. Default is None. + cls_neck (dict): the original classifier MLP in AlexNet, + "Dropout-fc-ReLU-Dropout-fc-ReLU-4096-class_num". + """ + + def __init__(self, + mlp_neck=None, + cls_neck=None, + pretrained=None): + super(AlexNet, self).__init__() + assert mlp_neck is None or cls_neck is None + self.mlp_neck = mlp_neck + self.cls_neck = cls_neck + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + if mlp_neck is not None: # additional mlp neck for AlexNet + self.mlp_neck = builder.build_neck(mlp_neck) + if cls_neck is not None: # original cls neck in AlexNet + self.cls_neck = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + # nn.Linear(4096, num_classes), # ori + ) + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + for m in self.features.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_in', nonlinearity='relu') + if self.mlp_neck is not None: + self.mlp_neck.init_weights(init_linear='normal') + if self.cls_neck is not None: + for m in self.cls_neck: + if isinstance(m, nn.Linear): + kaiming_init(m, mode='fan_in', nonlinearity='relu') + + def forward(self, x): + x = self.features(x) + if self.mlp_neck is not None: + x = [x.view(x.size(0), 256 * 6 * 6)] + x = self.mlp_neck(x)[0] + if self.cls_neck is not None: + x = x.view(x.size(0), 256 * 6 * 6) + x = self.cls_neck(x) + + return [x] diff --git a/openmixup/models/backbones/base_backbone.py b/openmixup/models/backbones/base_backbone.py new file mode 100644 index 00000000..35a5ef55 --- /dev/null +++ b/openmixup/models/backbones/base_backbone.py @@ -0,0 +1,57 @@ +# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones +# copy from mmclassification base_backbone.py +import logging +from abc import ABCMeta, abstractmethod + +import torch.nn as nn +from mmcv.runner import load_checkpoint + + +class BaseBackbone(nn.Module, metaclass=ABCMeta): + """Base backbone. + + This class defines the basic functions of a backbone. + Any backbone that inherits this class should at least + define its own `forward` function. + + """ + + def __init__(self): + super(BaseBackbone, self).__init__() + + def init_weights(self, pretrained=None): + """Init backbone weights + + Args: + pretrained (str | None): If pretrained is a string, then it + initializes backbone weights by loading the pretrained + checkpoint. If pretrained is None, then it follows default + initializer or customized initializer in subclasses. + """ + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + # use default initializer or customized initializer in subclasses + pass + else: + raise TypeError('pretrained must be a str or None.' + f' But received {type(pretrained)}.') + + @abstractmethod + def forward(self, x): + """Forward computation + + Args: + x (tensor | tuple[tensor]): x could be a Torch.tensor or a tuple of + Torch.tensor, containing input data for forward computation. + """ + pass + + def train(self, mode=True): + """Set module status before forward computation + + Args: + mode (bool): Whether it is train_mode or test_mode + """ + super(BaseBackbone, self).train(mode) diff --git a/openmixup/models/backbones/lenet.py b/openmixup/models/backbones/lenet.py new file mode 100644 index 00000000..286ea420 --- /dev/null +++ b/openmixup/models/backbones/lenet.py @@ -0,0 +1,75 @@ +# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones +# copy from mmclassification lenet.py +import torch.nn as nn + +from mmcv.cnn import kaiming_init, normal_init + +from .. import builder +from ..registry import BACKBONES +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class LeNet5(BaseBackbone): + """`LeNet5 `_ backbone. + 12.29 version + + The input for LeNet-5 is a 32×32 grayscale image. + + Args: + activation (str): choose your activation func, default is Tanh. + mlp_neck (dict): additional MLP neck in SSL. + cls_neck (dict): the original classifier MLP in LeNet, + "120-tanh-84-tanh-class_num". Default is None. + """ + + def __init__(self, + activation="Tanh", + mlp_neck=None, + cls_neck=None, + pretrained=None): + super(LeNet5, self).__init__() + assert activation in ["ReLU", "LeakyReLU", "Tanh", "ELU", "Sigmoid"] + assert mlp_neck is None or cls_neck is None + self.activation = activation + self.mlp_neck = mlp_neck + self.cls_neck = cls_neck + self.features = nn.Sequential( + nn.Conv2d(1, 6, kernel_size=5, stride=1), eval("nn.{}()".format(activation)), + nn.AvgPool2d(kernel_size=2), + nn.Conv2d(6, 16, kernel_size=5, stride=1), eval("nn.{}()".format(activation)), + nn.AvgPool2d(kernel_size=2), + nn.Conv2d(16, 120, kernel_size=5, stride=1), eval("nn.{}()".format(activation)), + ) + if mlp_neck is not None: # additional mlp neck for LeNet + self.mlp_neck = builder.build_neck(mlp_neck) + if cls_neck is not None: # original cls neck in LeNet + self.cls_neck = nn.Sequential( + nn.Linear(120, 84), + eval("nn.{}()".format(activation)), + # nn.Linear(84, num_classes), # ori LeNet + ) + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + for m in self.features.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_in', nonlinearity='relu') + if self.mlp_neck is not None: + self.mlp_neck.init_weights(init_linear='normal') + if self.cls_neck is not None: + for m in self.cls_neck: + if isinstance(m, nn.Linear): + if self.activation not in ['LeakyReLU', "ReLU"]: + normal_init(m, std=0.01, bias=0.) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + + def forward(self, x): + x = self.features(x) + if self.mlp_neck is not None: + x = self.mlp_neck( [x.squeeze()] )[0] + if self.cls_neck is not None: + x = self.cls_neck(x.squeeze()) + + return [x] diff --git a/openmixup/models/backbones/mobilenet_v2.py b/openmixup/models/backbones/mobilenet_v2.py new file mode 100644 index 00000000..cec9920d --- /dev/null +++ b/openmixup/models/backbones/mobilenet_v2.py @@ -0,0 +1,274 @@ +# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones +# copy from mmclassification mobilenet_v2.py +import logging + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, constant_init, kaiming_init +from mmcv.runner import load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm + +from ..utils import make_divisible +from ..registry import BACKBONES +from .base_backbone import BaseBackbone + + + +class InvertedResidual(nn.Module): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False): + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=1, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@BACKBONES.register_module() +class MobileNetV2(BaseBackbone): + """MobileNetV2 backbone. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + # Parameters to build layers. 4 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks, stride. + arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], + [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], + [6, 320, 1, 1]] + + def __init__(self, + widen_factor=1., + out_indices=(7, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False): + super(MobileNetV2, self).__init__() + self.widen_factor = widen_factor + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 8): + raise ValueError('the item in out_indices must in ' + f'range(0, 8). But received {index}') + + if frozen_stages not in range(-1, 8): + raise ValueError('frozen_stages must be in range(-1, 8). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks, stride = layer_cfg + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + if widen_factor > 1.0: + self.out_channel = int(1280 * widen_factor) + else: + self.out_channel = 1280 + + layer = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channel, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.add_module('conv2', layer) + self.layers.append('conv2') + + def make_layer(self, out_channels, num_blocks, stride, expand_ratio): + """ Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. Default: 6. + """ + layers = [] + for i in range(num_blocks): + if i >= 1: + stride = 1 + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride, + expand_ratio=expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = logging.getLogger() + load_checkpoint(self, pretrained, strict=False, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + return outs + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/openmixup/models/backbones/mobilenet_v3.py b/openmixup/models/backbones/mobilenet_v3.py new file mode 100644 index 00000000..a411f481 --- /dev/null +++ b/openmixup/models/backbones/mobilenet_v3.py @@ -0,0 +1,204 @@ +# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones +# copy from mmclassification mobilenet_v3.py +import logging + +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, constant_init, kaiming_init +from mmcv.runner import load_checkpoint +from torch.nn.modules.batchnorm import _BatchNorm + +from ..utils import make_divisible +from ..utils import InvertedResidual +from ..registry import BACKBONES +from .base_backbone import BaseBackbone + + +@BACKBONES.register_module() +class MobileNetV3(BaseBackbone): + """MobileNetV3 backbone. + + Args: + arch (str): Architechture of mobilnetv3, from {small, large}. + Default: small. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (None or Sequence[int]): Output from which stages. + Default: None, which means output tensors from final stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defualt: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Defualt: False. + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 2], + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'large': [[3, 16, 16, False, 'ReLU', 1], + [3, 64, 24, False, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], + [5, 960, 160, True, 'HSwish', 1], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.01), + out_indices=None, + frozen_stages=-1, + norm_eval=False, + with_cp=False, + init_cfg=[ + dict( + type='Kaiming', + layer=['Conv2d'], + nonlinearity='leaky_relu'), + dict(type='Normal', layer=['Linear'], std=0.01), + dict(type='Constant', layer=['BatchNorm2d'], val=1) + ]): + super(MobileNetV3, self).__init__() + assert arch in self.arch_settings + if out_indices is None: + out_indices = (12, ) if arch == 'small' else (16, ) + for order, index in enumerate(out_indices): + if index not in range(0, len(self.arch_settings[arch]) + 2): + raise ValueError( + 'the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch]) + 2}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch]) + 2}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + self.init_cfg = init_cfg + + self.layers = self._make_layer() + self.feat_dim = self.arch_settings[arch][-1][1] + + def _make_layer(self): + layers = [] + layer_setting = self.arch_settings[self.arch] + in_channels = 16 + + layer = ConvModule( + in_channels=3, + out_channels=in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + self.add_module('layer0', layer) + layers.append('layer0') + + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict( + type='HSigmoid', + bias=3, + divisor=6, + min_value=0, + max_value=1))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + in_channels = out_channels + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + # Build the last layer before pooling + # TODO: No dilation + layer = ConvModule( + in_channels=in_channels, + out_channels=576 if self.arch == 'small' else 960, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + layer_name = 'layer{}'.format(len(layer_setting) + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + return layers + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + return outs + + def _freeze_stages(self): + for i in range(0, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV3, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/openmixup/models/backbones/resnet_mmcls.py b/openmixup/models/backbones/resnet_mmcls.py new file mode 100644 index 00000000..33b0e48f --- /dev/null +++ b/openmixup/models/backbones/resnet_mmcls.py @@ -0,0 +1,965 @@ +# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones +# copy from mmclassification resnet.py, resnet_cifar.py +from openmixup.models.utils.gather_layer import grad_batch_unshuffle_ddp +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, build_conv_layer, build_norm_layer, + constant_init, kaiming_init) +from mmcv.utils.parrots_wrapper import _BatchNorm +import random +from ..registry import BACKBONES +from .base_backbone import BaseBackbone +from ..utils import grad_batch_shuffle_ddp + + +class BasicBlock(nn.Module): + """BasicBlock for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the output channels of conv1. This is a + reserved argument in BasicBlock and should always be 1. Default: 1. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + style (str): `pytorch` or `caffe`. It is unused and reserved for + unified API with Bottleneck. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + in_channels, + out_channels, + expansion=1, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(BasicBlock, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert self.expansion == 1 + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, out_channels, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + 3, + padding=1, + bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + """Bottleneck block for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. Default: 4. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None. + style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: "pytorch". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + in_channels, + out_channels, + expansion=4, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(Bottleneck, self).__init__() + assert style in ['pytorch', 'caffe'] + + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + @property + def norm3(self): + return getattr(self, self.norm3_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def get_expansion(block, expansion=None): + """Get the expansion of a residual block. + + The block expansion will be obtained by the following order: + + 1. If ``expansion`` is given, just return it. + 2. If ``block`` has the attribute ``expansion``, then return + ``block.expansion``. + 3. Return the default value according the the block type: + 1 for ``BasicBlock`` and 4 for ``Bottleneck``. + + Args: + block (class): The block class. + expansion (int | None): The given expansion ratio. + + Returns: + int: The expansion of the block. + """ + if isinstance(expansion, int): + assert expansion > 0 + elif expansion is None: + if hasattr(block, 'expansion'): + expansion = block.expansion + elif issubclass(block, BasicBlock): + expansion = 1 + elif issubclass(block, Bottleneck): + expansion = 4 + else: + raise TypeError(f'expansion is not specified for {block.__name__}') + else: + raise TypeError('expansion must be an integer or None') + + return expansion + + +class ResLayer(nn.Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): Residual block used to build ResLayer. + num_blocks (int): Number of blocks. + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int, optional): The expansion for BasicBlock/Bottleneck. + If not specified, it will firstly be obtained via + ``block.expansion``. If the block has no attribute "expansion", + the following default values will be used: 1 for BasicBlock and + 4 for Bottleneck. Default: None. + stride (int): stride of the first block. Default: 1. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + block, + num_blocks, + in_channels, + out_channels, + expansion=None, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + **kwargs): + self.block = block + self.expansion = get_expansion(block, expansion) + + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + in_channels = out_channels + for _ in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) + + +@BACKBONES.register_module() +class ResNet_mmcls(BaseBackbone): + """ResNet backbone. + + Please refer to the `paper `_ for + details. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(0, 1, 2, 3,)``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmcls.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=64, + base_channels=64, + expansion=None, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3,), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True): + super(ResNet_mmcls, self).__init__() + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.expansion = get_expansion(self.block, expansion) + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + _in_channels = stem_channels + _out_channels = base_channels * self.expansion + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + res_layer = self.make_res_layer( + block=self.block, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=_out_channels, + expansion=self.expansion, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg) + _in_channels = _out_channels + _out_channels *= 2 + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = res_layer[-1].out_channels + + def make_res_layer(self, **kwargs): + return ResLayer(**kwargs) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + # self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + # self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + # m.eval() + for param in m.parameters(): + param.requires_grad = False + + def _freeze_bn(self): + """ keep normalization layer freezed. """ + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, (_BatchNorm, nn.SyncBatchNorm)): + m.eval() + + def _unfreeze_bn(self): + for m in self.modules(): + if isinstance(m, (_BatchNorm, nn.SyncBatchNorm)): + m.train() + + def init_weights(self, pretrained=None): + super(ResNet_mmcls, self).init_weights(pretrained) + if pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, 1) + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + return outs + + def train(self, mode=True): + super(ResNet_mmcls, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@BACKBONES.register_module() +class ResNetV1d(ResNet_mmcls): + """ResNetV1d variant described in + `Bag of Tricks `_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv + in the input stem with three 3x3 convs. And in the downsampling block, + a 2x2 avg_pool with stride 2 is added before conv, whose stride is + changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) + + +@BACKBONES.register_module() +class ResNet_CIFAR(ResNet_mmcls): + """ResNet backbone for CIFAR. + + Compared to standard ResNet, it uses `kernel_size=3` and `stride=1` in + conv1, and does not apply MaxPoolinng after stem. It has been proven to + be more efficient than standard ResNet in other public codebase, e.g., + `https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): This network has specific designed stem, thus it is + asserted to be False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + super(ResNet_CIFAR, self).__init__( + depth, deep_stem=deep_stem, **kwargs) + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + return outs + + +@BACKBONES.register_module() +class ResNet_Mix(ResNet_mmcls): + """ResNet Support ManifoldMix and its variants + v09.13 + + Provide a port to mixup the latent space. + """ + + def __init__(self, **kwargs): + super(ResNet_Mix, self).__init__(**kwargs) + + def _feature_mixup(self, x, mask, dist_shuffle=False, idx_shuffle_mix=None, cross_view=False, + BN_shuffle=False, idx_shuffle_BN=None, idx_unshuffle_BN=None, **kwargs): + """ mixup two feature maps with the pixel-wise mask + + Args: + x, mask (tensor): Input x [N,C,H,W] and mixup mask [N, \*, H, W]. + dist_shuffle (bool): Whether to shuffle cross gpus. + idx_shuffle_mix (tensor): Shuffle indice of [N,1] to generate x_. + cross_view (bool): Whether to view the input x as two views [2N, C, H, W], + which is usually adopted in self-supervised and semi-supervised settings. + BN_shuffle (bool): Whether to do shuffle cross gpus for shuffle_BN. + idx_shuffle_BN (tensor): Shuffle indice to utilize shuffle_BN cross gpus. + idx_unshuffle_BN (tensor): Unshuffle indice for the shuffle_BN (in pair). + """ + # adjust mixup mask + assert mask.dim() == 4 and mask.size(1) <= 2 + if mask.size(1) == 1: + mask = [mask, 1 - mask] + else: + mask = [ + mask[:, 0, :, :].unsqueeze(1), mask[:, 1, :, :].unsqueeze(1)] + # undo shuffle_BN for ssl mixup + if BN_shuffle: + assert idx_unshuffle_BN is not None and idx_shuffle_BN is not None + x = grad_batch_unshuffle_ddp(x, idx_unshuffle_BN) # 2N index if cross_view + + # shuffle input + if dist_shuffle==True: # cross gpus shuffle + assert idx_shuffle_mix is not None + if cross_view: + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + x_, _, _ = grad_batch_shuffle_ddp(x_, idx_shuffle_mix) + else: + x_, _, _ = grad_batch_shuffle_ddp(x, idx_shuffle_mix) + else: # within each gpu + if cross_view: + # default: the input image is shuffled + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + else: + x_ = x[idx_shuffle_mix, :] + assert x.size(3) == mask[0].size(3), \ + "mismatching mask x={}, mask={}.".format(x.size(), mask[0].size()) + mix = x * mask[0] + x_ * mask[1] + + # redo shuffle_BN for ssl mixup + if BN_shuffle: + mix, _, _ = grad_batch_shuffle_ddp(mix, idx_shuffle_BN) # N index + + return mix + + def forward(self, x, mix_args=None): + """ only support mask-based mixup policy """ + # latent space mixup + if mix_args is not None: + assert isinstance(mix_args, dict) + mix_layer = mix_args["layer"] # {0, 1, 2, 3} + if mix_args["BN_shuffle"]: + x, _, idx_unshuffle = grad_batch_shuffle_ddp(x) # 2N index if cross_view + else: + idx_unshuffle = None + else: + mix_layer = -1 + + # input mixup + if mix_layer == 0: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + # normal resnet stem + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + + outs = [] + # stage 1 to 4 + for i, layer_name in enumerate(self.res_layers): + # res block + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + if i+1 == mix_layer: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + return outs + + +@BACKBONES.register_module() +class ResNet_Mix_CIFAR(ResNet_mmcls): + """ResNet backbone for CIFAR, support ManifoldMix and its variants + v09.13 + + Provide a port to mixup the latent space. + """ + def __init__(self, depth, deep_stem=False, **kwargs): + super(ResNet_Mix_CIFAR, self).__init__( + depth, deep_stem=deep_stem, **kwargs) + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def _feature_mixup(self, x, mask, dist_shuffle=False, idx_shuffle_mix=None, cross_view=False, + BN_shuffle=False, idx_shuffle_BN=None, idx_unshuffle_BN=None, **kwargs): + """ mixup two feature maps with the pixel-wise mask + + Args: + x, mask (tensor): Input x [N,C,H,W] and mixup mask [N, \*, H, W]. + dist_shuffle (bool): Whether to shuffle cross gpus. + idx_shuffle_mix (tensor): Shuffle indice of [N,1] to generate x_. + cross_view (bool): Whether to view the input x as two views [2N, C, H, W], + which is usually adopted in self-supervised and semi-supervised settings. + BN_shuffle (bool): Whether to do shuffle cross gpus for shuffle_BN. + idx_shuffle_BN (tensor): Shuffle indice to utilize shuffle_BN cross gpus. + idx_unshuffle_BN (tensor): Unshuffle indice for the shuffle_BN (in pair). + """ + # adjust mixup mask + assert mask.dim() == 4 and mask.size(1) <= 2 + if mask.size(1) == 1: + mask = [mask, 1 - mask] + else: + mask = [ + mask[:, 0, :, :].unsqueeze(1), mask[:, 1, :, :].unsqueeze(1)] + # undo shuffle_BN for ssl mixup + if BN_shuffle: + assert idx_unshuffle_BN is not None and idx_shuffle_BN is not None + x = grad_batch_unshuffle_ddp(x, idx_unshuffle_BN) # 2N index if cross_view + + # shuffle input + if dist_shuffle==True: # cross gpus shuffle + assert idx_shuffle_mix is not None + if cross_view: + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + x_, _, _ = grad_batch_shuffle_ddp(x_, idx_shuffle_mix) + else: + x_, _, _ = grad_batch_shuffle_ddp(x, idx_shuffle_mix) + else: # within each gpu + if cross_view: + # default: the input image is shuffled + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + else: + x_ = x[idx_shuffle_mix, :] + assert x.size(3) == mask[0].size(3), \ + "mismatching mask x={}, mask={}.".format(x.size(), mask[0].size()) + mix = x * mask[0] + x_ * mask[1] + + # redo shuffle_BN for ssl mixup + if BN_shuffle: + mix, _, _ = grad_batch_shuffle_ddp(mix, idx_shuffle_BN) # N index + + return mix + + def forward(self, x, mix_args=None): + """ only support mask-based mixup policy """ + # latent space mixup + if mix_args is not None: + assert isinstance(mix_args, dict) + mix_layer = mix_args["layer"] # {0, 1, 2, 3} + if mix_args["BN_shuffle"]: + x, _, idx_unshuffle = grad_batch_shuffle_ddp(x) # 2N index if cross_view + else: + idx_unshuffle = None + else: + mix_layer = -1 + + # input mixup + if mix_layer == 0: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + # CIFAR stem + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + outs = [] + # stage 1 to 4 + for i, layer_name in enumerate(self.res_layers): + # res block + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + if i+1 == mix_layer: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + return outs diff --git a/openmixup/models/backbones/resnext.py b/openmixup/models/backbones/resnext.py new file mode 100644 index 00000000..a0a0c899 --- /dev/null +++ b/openmixup/models/backbones/resnext.py @@ -0,0 +1,494 @@ +# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones +# copy from mmclassification resnext.py +import random +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from ..registry import BACKBONES +from .resnet_mmcls import Bottleneck as _Bottleneck +from .resnet_mmcls import ResLayer, ResNet_mmcls +from ..utils import grad_batch_shuffle_ddp, grad_batch_unshuffle_ddp + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module): downsample operation on identity branch. + Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + **kwargs): + super(Bottleneck, self).__init__(in_channels, out_channels, **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@BACKBONES.register_module() +class ResNeXt(ResNet_mmcls): + """ResNeXt backbone. + mmclassification version + + Please refer to the `paper `_ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super(ResNeXt, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) + + +@BACKBONES.register_module() +class ResNeXt_CIFAR(ResNeXt): + """ResNeXt backbone for CIFAR. + + Compared to standard ResNeXt, it uses `kernel_size=3` and `stride=1` in + conv1, and does not apply MaxPoolinng after stem. It has been proven to + be more efficient than standard ResNet in other public codebase, e.g., + `https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`. + + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + super(ResNeXt_CIFAR, self).__init__( + depth, deep_stem=deep_stem, **kwargs) + assert not self.deep_stem, 'ResNeXt_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + return outs + + +@BACKBONES.register_module() +class ResNeXt_Mix(ResNet_mmcls): + """ResNeXt backbone, Supporting ManifoldMix and its variants. + mmclassification version + v09.13 + + *** Provide a port to mixup the latent space *** + Please refer to the `paper `_ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + """ ResNeXt init """ + self.groups = groups + self.width_per_group = width_per_group + super(ResNeXt_Mix, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + """ ResNeXt res_layer """ + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) + + def _feature_mixup(self, x, mask, dist_shuffle=False, idx_shuffle_mix=None, cross_view=False, + BN_shuffle=False, idx_shuffle_BN=None, idx_unshuffle_BN=None, **kwargs): + """ mixup two feature maps with the pixel-wise mask + + Args: + x, mask (tensor): Input x [N,C,H,W] and mixup mask [N, \*, H, W]. + dist_shuffle (bool): Whether to shuffle cross gpus. + idx_shuffle_mix (tensor): Shuffle indice of [N,1] to generate x_. + cross_view (bool): Whether to view the input x as two views [2N, C, H, W], + which is usually adopted in self-supervised and semi-supervised settings. + BN_shuffle (bool): Whether to do shuffle cross gpus for shuffle_BN. + idx_shuffle_BN (tensor): Shuffle indice to utilize shuffle_BN cross gpus. + idx_unshuffle_BN (tensor): Unshuffle indice for the shuffle_BN (in pair). + """ + # adjust mixup mask + assert mask.dim() == 4 and mask.size(1) <= 2 + if mask.size(1) == 1: + mask = [mask, 1 - mask] + else: + mask = [ + mask[:, 0, :, :].unsqueeze(1), mask[:, 1, :, :].unsqueeze(1)] + # undo shuffle_BN for ssl mixup + if BN_shuffle: + assert idx_unshuffle_BN is not None and idx_shuffle_BN is not None + x = grad_batch_unshuffle_ddp(x, idx_unshuffle_BN) # 2N index if cross_view + + # shuffle input + if dist_shuffle==True: # cross gpus shuffle + assert idx_shuffle_mix is not None + if cross_view: + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + x_, _, _ = grad_batch_shuffle_ddp(x_, idx_shuffle_mix) + else: + x_, _, _ = grad_batch_shuffle_ddp(x, idx_shuffle_mix) + else: # within each gpu + if cross_view: + # default: the input image is shuffled + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + else: + x_ = x[idx_shuffle_mix, :] + assert x.size(3) == mask[0].size(3), \ + "mismatching mask x={}, mask={}.".format(x.size(), mask[0].size()) + mix = x * mask[0] + x_ * mask[1] + + # redo shuffle_BN for ssl mixup + if BN_shuffle: + mix, _, _ = grad_batch_shuffle_ddp(mix, idx_shuffle_BN) # N index + + return mix + + def forward(self, x, mix_args=None): + """ only support mask-based mixup policy """ + # latent space mixup + if mix_args is not None: + assert isinstance(mix_args, dict) + mix_layer = mix_args["layer"] # {0, 1, 2, 3} + if mix_args["BN_shuffle"]: + x, _, idx_unshuffle = grad_batch_shuffle_ddp(x) # 2N index if cross_view + else: + idx_unshuffle = None + else: + mix_layer = -1 + + # input mixup + if mix_layer == 0: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + # normal resnet stem + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + + outs = [] + # stage 1 to 4 + for i, layer_name in enumerate(self.res_layers): + # res block + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + if i+1 == mix_layer: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + return outs + + +@BACKBONES.register_module() +class ResNeXt_CIFAR_Mix(ResNet_mmcls): + """ResNeXt backbone for CIFAR, support ManifoldMix and its variants + v09.13 + + Provide a port to mixup the latent space. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, deep_stem=False, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super(ResNeXt_CIFAR_Mix, self).__init__( + depth, deep_stem=deep_stem, **kwargs) + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) + + def _feature_mixup(self, x, mask, dist_shuffle=False, idx_shuffle_mix=None, cross_view=False, + BN_shuffle=False, idx_shuffle_BN=None, idx_unshuffle_BN=None, **kwargs): + """ mixup two feature maps with the pixel-wise mask + + Args: + x, mask (tensor): Input x [N,C,H,W] and mixup mask [N, \*, H, W]. + dist_shuffle (bool): Whether to shuffle cross gpus. + idx_shuffle_mix (tensor): Shuffle indice of [N,1] to generate x_. + cross_view (bool): Whether to view the input x as two views [2N, C, H, W], + which is usually adopted in self-supervised and semi-supervised settings. + BN_shuffle (bool): Whether to do shuffle cross gpus for shuffle_BN. + idx_shuffle_BN (tensor): Shuffle indice to utilize shuffle_BN cross gpus. + idx_unshuffle_BN (tensor): Unshuffle indice for the shuffle_BN (in pair). + """ + # adjust mixup mask + assert mask.dim() == 4 and mask.size(1) <= 2 + if mask.size(1) == 1: + mask = [mask, 1 - mask] + else: + mask = [ + mask[:, 0, :, :].unsqueeze(1), mask[:, 1, :, :].unsqueeze(1)] + # undo shuffle_BN for ssl mixup + if BN_shuffle: + assert idx_unshuffle_BN is not None and idx_shuffle_BN is not None + x = grad_batch_unshuffle_ddp(x, idx_unshuffle_BN) # 2N index if cross_view + + # shuffle input + if dist_shuffle==True: # cross gpus shuffle + assert idx_shuffle_mix is not None + if cross_view: + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + x_, _, _ = grad_batch_shuffle_ddp(x_, idx_shuffle_mix) + else: + x_, _, _ = grad_batch_shuffle_ddp(x, idx_shuffle_mix) + else: # within each gpu + if cross_view: + # default: the input image is shuffled + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + else: + x_ = x[idx_shuffle_mix, :] + assert x.size(3) == mask[0].size(3), \ + "mismatching mask x={}, mask={}.".format(x.size(), mask[0].size()) + mix = x * mask[0] + x_ * mask[1] + + # redo shuffle_BN for ssl mixup + if BN_shuffle: + mix, _, _ = grad_batch_shuffle_ddp(mix, idx_shuffle_BN) # N index + + return mix + + def forward(self, x, mix_args=None): + """ only support mask-based mixup policy """ + # latent space mixup + if mix_args is not None: + assert isinstance(mix_args, dict) + mix_layer = mix_args["layer"] # {0, 1, 2, 3} + if mix_args["BN_shuffle"]: + x, _, idx_unshuffle = grad_batch_shuffle_ddp(x) # 2N index if cross_view + else: + idx_unshuffle = None + else: + mix_layer = -1 + + # input mixup + if mix_layer == 0: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + # CIFAR stem + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + outs = [] + # stage 1 to 4 + for i, layer_name in enumerate(self.res_layers): + # res block + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + if i+1 == mix_layer: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + return outs diff --git a/openmixup/models/backbones/seresnet.py b/openmixup/models/backbones/seresnet.py new file mode 100644 index 00000000..30f5f717 --- /dev/null +++ b/openmixup/models/backbones/seresnet.py @@ -0,0 +1,223 @@ +# reference: https://github.com/open-mmlab/mmclassification/tree/master/mmcls/models/backbones +# copy from mmclassification seresnet.py +import mmcv +import torch.nn as nn +from mmcv.cnn import ConvModule, build_conv_layer, build_norm_layer +import torch.utils.checkpoint as cp + +from ..registry import BACKBONES +from .resnet_mmcls import Bottleneck, ResLayer, ResNet_mmcls + + +class SELayer(nn.Module): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will be + ``int(channels/ratio)``. Default: 16. + conv_cfg (None or dict): Config dict for convolution layer. + Default: None, which means using conv2d. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configurated + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configurated by the first dict and the + second activation layer will be configurated by the second dict. + Default: (dict(type='ReLU'), dict(type='Sigmoid')) + """ + + def __init__(self, + channels, + ratio=16, + conv_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'))): + super(SELayer, self).__init__() + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert mmcv.is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + self.conv1 = ConvModule( + in_channels=channels, + out_channels=int(channels / ratio), + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=int(channels / ratio), + out_channels=channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + return x * out + + +class SEBottleneck(Bottleneck): + """SEBottleneck block for SEResNet. + + Args: + in_channels (int): The input channels of the SEBottleneck block. + out_channels (int): The output channel of the SEBottleneck block. + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + """ + + def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs): + super(SEBottleneck, self).__init__(in_channels, out_channels, **kwargs) + self.se_layer = SELayer(out_channels, ratio=se_ratio) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + out = self.se_layer(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@BACKBONES.register_module() +class SEResNet(ResNet_mmcls): + """SEResNet backbone. + + Please refer to the `paper `_ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmcls.models import SEResNet + >>> import torch + >>> self = SEResNet(depth=50) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 56, 56) + (1, 128, 28, 28) + (1, 256, 14, 14) + (1, 512, 7, 7) + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, se_ratio=16, **kwargs): + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for SEResNet') + self.se_ratio = se_ratio + super(SEResNet, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer(se_ratio=self.se_ratio, **kwargs) + + +@BACKBONES.register_module() +class SEResNet_CIFAR(SEResNet): + """SEResNet backbone for CIFAR. + + Compared to standard ResNeXt, it uses `kernel_size=3` and `stride=1` in + conv1, and does not apply MaxPoolinng after stem. It has been proven to + be more efficient than standard ResNet in other public codebase, e.g., + `https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`. + + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + super(SEResNet_CIFAR, self).__init__( + depth, deep_stem=deep_stem, **kwargs) + assert not self.deep_stem, 'SEResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + return outs diff --git a/openmixup/models/backbones/shufflenet_v2.py b/openmixup/models/backbones/shufflenet_v2.py new file mode 100644 index 00000000..d49d4ded --- /dev/null +++ b/openmixup/models/backbones/shufflenet_v2.py @@ -0,0 +1,299 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, constant_init, normal_init +# from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from ..utils import channel_shuffle +from ..builder import BACKBONES +from .base_backbone import BaseBackbone + + +# class InvertedResidual(BaseModule): +class InvertedResidual(nn.Module): + """InvertedResidual block for ShuffleNetV2 backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 convolution layer. Default: 1 + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.stride = stride + self.with_cp = with_cp + + branch_features = out_channels // 2 + if self.stride == 1: + assert in_channels == branch_features * 2, ( + f'in_channels ({in_channels}) should equal to ' + f'branch_features * 2 ({branch_features * 2}) ' + 'when stride is 1') + + if in_channels != branch_features * 2: + assert self.stride != 1, ( + f'stride ({self.stride}) should not equal 1 when ' + f'in_channels != branch_features * 2') + + if self.stride > 1: + self.branch1 = nn.Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=self.stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.branch2 = nn.Sequential( + ConvModule( + in_channels if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1, + groups=branch_features, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + + def _inner_forward(x): + if self.stride > 1: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + else: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@BACKBONES.register_module() +class ShuffleNetV2(BaseBackbone): + """ShuffleNetV2 backbone. + + Args: + widen_factor (float): Width multiplier - adjusts the number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (0, 1, 2, 3). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + widen_factor=1.0, + out_indices=(3, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=None): + super(ShuffleNetV2, self).__init__(init_cfg) + self.stage_blocks = [4, 8, 4] + for index in out_indices: + if index not in range(0, 4): + raise ValueError('the item in out_indices must in ' + f'range(0, 4). But received {index}') + + if frozen_stages not in range(-1, 4): + raise ValueError('frozen_stages must be in range(-1, 4). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if widen_factor == 0.5: + channels = [48, 96, 192, 1024] + elif widen_factor == 1.0: + channels = [116, 232, 464, 1024] + elif widen_factor == 1.5: + channels = [176, 352, 704, 1024] + elif widen_factor == 2.0: + channels = [244, 488, 976, 2048] + else: + raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. ' + f'But received {widen_factor}') + + self.in_channels = 24 + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + layer = self._make_layer(channels[i], num_blocks) + self.layers.append(layer) + + output_channels = channels[-1] + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=output_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def _make_layer(self, out_channels, num_blocks): + """Stack blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): number of blocks. + """ + layers = [] + for i in range(num_blocks): + stride = 2 if i == 0 else 1 + layers.append( + InvertedResidual( + in_channels=self.in_channels, + out_channels=out_channels, + stride=stride, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'conv1' in name: + normal_init(m, mean=0, std=0.01) + else: + normal_init(m, mean=0, std=1.0 / m.weight.shape[1]) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m.weight, val=1, bias=0.0001) + if isinstance(m, _BatchNorm): + if m.running_mean is not None: + nn.init.constant_(m.running_mean, 0) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return outs + return outs + + def train(self, mode=True): + super(ShuffleNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/openmixup/models/backbones/wide_resnet.py b/openmixup/models/backbones/wide_resnet.py new file mode 100644 index 00000000..9499c51b --- /dev/null +++ b/openmixup/models/backbones/wide_resnet.py @@ -0,0 +1,302 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.runner import load_checkpoint +from mmcv.cnn import kaiming_init, constant_init +import random +from openmixup.utils import get_root_logger +from openmixup.models.utils.gather_layer import grad_batch_unshuffle_ddp +from ..registry import BACKBONES +from .base_backbone import BaseBackbone +from ..utils import grad_batch_shuffle_ddp + + +class BasicBlock(nn.Module): + """BasicBlock for Wide ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + stride (int): stride of the block. Default: 1. + drop_rate (float): Dropout ratio in the residual block. Default: 0. + activate_before_residual (bool): Since the first conv in WRN doesn't + have bn-relu behind, we use the bn1 and relu1 in the block1 to + make up the ``conv1-bn1-relu1`` structure. Default: False. + """ + + def __init__(self, + in_channels, + out_channels, + stride, + drop_rate=0.0, + activate_before_residual=False): + super(BasicBlock, self).__init__() + self.bn1 = nn.BatchNorm2d(in_channels, momentum=0.001, eps=0.001) + self.relu1 = nn.LeakyReLU(negative_slope=0.1, inplace=False) + self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, + padding=1, bias=True) + self.bn2 = nn.BatchNorm2d(out_channels, momentum=0.001, eps=0.001) + self.relu2 = nn.LeakyReLU(negative_slope=0.1, inplace=False) + self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, + padding=1, bias=True) + self.drop_rate = drop_rate + self.equalInOut = (in_channels == out_channels) + self.convShortcut = None + if stride != 1 or not self.equalInOut: + self.convShortcut = nn.Conv2d(in_channels, out_channels, + kernel_size=1, stride=stride, padding=0, bias=True) + self.activate_before_residual = activate_before_residual + + def forward(self, x): + if not self.equalInOut and self.activate_before_residual == True: + x = self.relu1(self.bn1(x)) + out = self.relu2(self.bn2(self.conv1(x))) + else: + out = self.relu1(self.bn1(x)) + out = self.relu2(self.bn2(self.conv1(out))) + if self.drop_rate > 0: + out = F.dropout(out, p=self.drop_rate, training=self.training) + out = self.conv2(out) + return torch.add(x if self.equalInOut else self.convShortcut(x), out) + + +class NetworkBlock(nn.Module): + """" Network Block (stage) in Wide ResNet """ + + def __init__(self, + num_layers, + in_channels, + out_channels, + block, + stride, + drop_rate=0.0, + activate_before_residual=False): + super(NetworkBlock, self).__init__() + layers = [] + for i in range(int(num_layers)): + layers.append(block(i == 0 and in_channels or out_channels, out_channels, + i == 0 and stride or 1, drop_rate, activate_before_residual)) + self.layer = nn.Sequential(*layers) + + def forward(self, x): + return self.layer(x) + + +@BACKBONES.register_module() +class WideResNet(BaseBackbone): + """Wide Residual Networks backbone. + + Please refer to the `paper `_ for + details. + https://github.com/szagoruyko/wide-residual-networks + + Args: + first_stride (int): Stride of the first 3x3 conv. Default: 1. + in_channels (int): Number of input image channels. Default: 3. + depth (int): Network depth, from {10, 28, 37}, total 3 stages. + widen_factor (int): Width of each stage convolution block. Default: 2. + drop_rate (float): Dropout ratio in residual blocks. Default: 0. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(2, )``. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + """ + + def __init__(self, + first_stride, + in_channels=3, + depth=28, + widen_factor=2, + drop_rate=0.0, + out_indices=(0, 1, 2,), + frozen_stages=-1, + norm_eval=False): + super(WideResNet, self).__init__() + channels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor] + assert ((depth - 4) % 6 == 0) + n = (depth - 4) / 6 + + # 1st conv before any network block, 3x3 + self.conv1 = nn.Conv2d(in_channels, channels[0], kernel_size=3, stride=1, + padding=1, bias=True) + # 1st block + self.block1 = NetworkBlock( + n, channels[0], channels[1], BasicBlock, first_stride, + drop_rate, activate_before_residual=True) + # 2nd block + self.block2 = NetworkBlock( + n, channels[1], channels[2], BasicBlock, 2, drop_rate) + # 3rd block + self.block3 = NetworkBlock( + n, channels[2], channels[3], BasicBlock, 2, drop_rate) + # original: global average pooling and classifier (in head) + self.bn1 = nn.BatchNorm2d(channels[3], momentum=0.001, eps=0.001) + self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=False) + self.channels = channels[3] + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + self._freeze_stages() + + def init_weights(self, pretrained=None): + if isinstance(pretrained, str): + logger = get_root_logger() + load_checkpoint(self, pretrained, strict=True, logger=logger) + elif pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + kaiming_init(m, mode='fan_out', nonlinearity='leaky_relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + constant_init(m, 1) + else: + raise TypeError('pretrained must be a str or None') + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for m in [self.conv1]: + for param in m.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages + 1): + m = getattr(self, 'block{}'.format(i+1)) + m.eval() + for param in m.parameters(): + param.requires_grad = False + if self.frozen_stages == 2: + for m in [self.bn1]: + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + outs = [] + x = self.conv1(x) + for i in range(3): + block_i = getattr(self, 'block{}'.format(i+1)) + x = block_i(x) + if i == 2: # after block3 + x = self.relu(self.bn1(x)) + # x = F.adaptive_avg_pool2d(x, 1) + # x = x.view(-1, self.channels) # Nxd + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return tuple(outs) + return tuple(outs) + + def train(self, mode=True): + super(WideResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, nn.BatchNorm2d): + m.eval() + + +@BACKBONES.register_module() +class WideResNet_Mix(WideResNet): + """Wide-ResNet Support ManifoldMix and its variants + v12.10 + + Provide a port to mixup the latent space. + """ + + def __init__(self, **kwargs): + super(WideResNet_Mix, self).__init__(**kwargs) + + def _feature_mixup(self, x, mask, dist_shuffle=False, idx_shuffle_mix=None, cross_view=False, + BN_shuffle=False, idx_shuffle_BN=None, idx_unshuffle_BN=None, **kwargs): + """ mixup two feature maps with the pixel-wise mask + + Args: + x, mask (tensor): Input x [N,C,H,W] and mixup mask [N, \*, H, W]. + dist_shuffle (bool): Whether to shuffle cross gpus. + idx_shuffle_mix (tensor): Shuffle indice of [N,1] to generate x_. + cross_view (bool): Whether to view the input x as two views [2N, C, H, W], + which is usually adopted in self-supervised and semi-supervised settings. + BN_shuffle (bool): Whether to do shuffle cross gpus for shuffle_BN. + idx_shuffle_BN (tensor): Shuffle indice to utilize shuffle_BN cross gpus. + idx_unshuffle_BN (tensor): Unshuffle indice for the shuffle_BN (in pair). + """ + # adjust mixup mask + assert mask.dim() == 4 and mask.size(1) <= 2 + if mask.size(1) == 1: + mask = [mask, 1 - mask] + else: + mask = [ + mask[:, 0, :, :].unsqueeze(1), mask[:, 1, :, :].unsqueeze(1)] + # undo shuffle_BN for ssl mixup + if BN_shuffle: + assert idx_unshuffle_BN is not None and idx_shuffle_BN is not None + x = grad_batch_unshuffle_ddp(x, idx_unshuffle_BN) # 2N index if cross_view + + # shuffle input + if dist_shuffle==True: # cross gpus shuffle + assert idx_shuffle_mix is not None + if cross_view: + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + x_, _, _ = grad_batch_shuffle_ddp(x_, idx_shuffle_mix) + else: + x_, _, _ = grad_batch_shuffle_ddp(x, idx_shuffle_mix) + else: # within each gpu + if cross_view: + # default: the input image is shuffled + N = x.size(0) // 2 + detach_p = random.random() + x_ = x[N:, ...].clone().detach() if detach_p < 0.5 else x[N:, ...] + x = x[:N, ...] if detach_p < 0.5 else x[:N, ...].detach() + else: + x_ = x[idx_shuffle_mix, :] + assert x.size(3) == mask[0].size(3), \ + "mismatching mask x={}, mask={}.".format(x.size(), mask[0].size()) + mix = x * mask[0] + x_ * mask[1] + + # redo shuffle_BN for ssl mixup + if BN_shuffle: + mix, _, _ = grad_batch_shuffle_ddp(mix, idx_shuffle_BN) # N index + + return mix + + def forward(self, x, mix_args=None): + """ only support mask-based mixup policy """ + # latent space mixup + if mix_args is not None: + assert isinstance(mix_args, dict) + mix_layer = mix_args["layer"] # {0, 1, 2,} + if mix_args["BN_shuffle"]: + x, _, idx_unshuffle = grad_batch_shuffle_ddp(x) # 2N index if cross_view + else: + idx_unshuffle = None + else: + mix_layer = -1 + + # input mixup + if mix_layer == 0: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + # normal conv1 + x = self.conv1(x) + + outs = [] + # block 1 to 3 + for i in range(3): + block_i = getattr(self, 'block{}'.format(i+1)) + x = block_i(x) + if i == 2: # after block3 + x = self.relu(self.bn1(x)) + # x = F.adaptive_avg_pool2d(x, 1) + # x = x.view(-1, self.channels) # Nxd + if i in self.out_indices: + outs.append(x) + if len(self.out_indices) == 1: + return tuple(outs) + if i+1 == mix_layer: + x = self._feature_mixup(x, idx_unshuffle_BN=idx_unshuffle, **mix_args) + return tuple(outs) diff --git a/openmixup/models/builder.py b/openmixup/models/builder.py new file mode 100644 index 00000000..434c1cba --- /dev/null +++ b/openmixup/models/builder.py @@ -0,0 +1,56 @@ +from torch import nn + +from openmixup.utils import build_from_cfg +from .registry import (BACKBONES, MODELS, NECKS, HEADS, MEMORIES, LOSSES) + + +def build(cfg, registry, default_args=None): + """Build a module. + + Args: + cfg (dict, list[dict]): The config of modules, it is either a dict + or a list of configs. + registry (:obj:`Registry`): A registry the module belongs to. + default_args (dict, optional): Default arguments to build the module. + Default: None. + + Returns: + nn.Module: A built nn module. + """ + if isinstance(cfg, list): + modules = [ + build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg + ] + return nn.Sequential(*modules) + else: + return build_from_cfg(cfg, registry, default_args) + + +def build_backbone(cfg): + """Build backbone.""" + return build(cfg, BACKBONES) + + +def build_neck(cfg): + """Build neck.""" + return build(cfg, NECKS) + + +def build_memory(cfg): + """Build memory.""" + return build(cfg, MEMORIES) + + +def build_head(cfg): + """Build head.""" + return build(cfg, HEADS) + + +def build_loss(cfg): + """Build loss.""" + return build(cfg, LOSSES) + + +def build_model(cfg): + """Build model.""" + return build(cfg, MODELS) diff --git a/openmixup/models/classifiers/__init__.py b/openmixup/models/classifiers/__init__.py new file mode 100644 index 00000000..0f0b578c --- /dev/null +++ b/openmixup/models/classifiers/__init__.py @@ -0,0 +1,11 @@ +from .classification import Classification +from .mixup_classification import MixUpClassification +from .mixup_momentum_V1plus import AutoMixup_V1plus +from .mixup_momentum_V2 import AutoMixup_V2 +from .representation import Representation + + +__all__ = [ + 'Classification', 'Representation', + 'MixUpClassification', 'AutoMixup_V1plus', 'AutoMixup_V2', +] diff --git a/openmixup/models/classifiers/classification.py b/openmixup/models/classifiers/classification.py new file mode 100644 index 00000000..bbd5bbf2 --- /dev/null +++ b/openmixup/models/classifiers/classification.py @@ -0,0 +1,107 @@ +import torch.nn as nn + +from openmixup.utils import print_log + +from .. import builder +from ..registry import MODELS +from ..utils import Sobel + + +@MODELS.register_module +class Classification(nn.Module): + """Simple image classification. + + Args: + backbone (dict): Config dict for module of backbone ConvNet. + with_sobel (bool): Whether to apply a Sobel filter on images. Default: False. + head (dict): Config dict for module of loss functions. Default: None. + pretrained (str, optional): Path to pre-trained weights. Default: None. + """ + + def __init__(self, + backbone, + with_sobel=False, + head=None, + pretrained=None): + super(Classification, self).__init__() + self.with_sobel = with_sobel + if with_sobel: + self.sobel_layer = Sobel() + self.backbone = builder.build_backbone(backbone) + self.head = head + if head is not None: + self.head = builder.build_head(head) + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + """Initialize the weights of model. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Default: None. + """ + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + if self.head is not None: + self.head.init_weights() + + def forward_backbone(self, img): + """Forward backbone. + + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + + Returns: + tuple[Tensor]: backbone outputs. + """ + if self.with_sobel: + img = self.sobel_layer(img) + x = self.backbone(img) + return x + + def forward_train(self, img, gt_label, **kwargs): + """Forward computation during training. + + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + gt_label (Tensor): Ground-truth labels. + kwargs: Any keyword arguments to be used to forward. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + x = self.forward_backbone(img) + outs = self.head(x) + loss_inputs = (outs, gt_label) + losses = self.head.loss(*loss_inputs) + return losses + + def forward_test(self, img, **kwargs): + x = self.forward_backbone(img) # tuple + outs = self.head(x) + keys = ['head{}'.format(i) for i in range(len(outs))] + out_tensors = [out.cpu() for out in outs] # NxC + return dict(zip(keys, out_tensors)) + + def forward_calibration(self, img, **kwargs): + x = self.backbone(img) + preds_one_k = self.head(x) + return preds_one_k + + def aug_test(self, imgs): + raise NotImplementedError + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'calibration': + return self.forward_calibration(img, **kwargs) + elif mode == 'extract': + return self.forward_backbone(img) + else: + raise Exception("No such mode: {}".format(mode)) diff --git a/openmixup/models/classifiers/mixup_classification.py b/openmixup/models/classifiers/mixup_classification.py new file mode 100644 index 00000000..affb2606 --- /dev/null +++ b/openmixup/models/classifiers/mixup_classification.py @@ -0,0 +1,155 @@ +import numpy as np +import torch +import torch.nn as nn + +from openmixup.utils import print_log + +from .. import builder +from ..registry import MODELS +from ..utils import cutmix, mixup, saliencymix, resizemix, fmix + + +@MODELS.register_module +class MixUpClassification(nn.Module): + """MixUp classification. + v09.14 + + Args: + backbone (dict): Config dict for module of backbone ConvNet. + head (dict): Config dict for module of loss functions. Default: None. + alpha (float): To sample Beta distribution in MixUp methods. + mix_mode (str): Basice mixUp methods in input space. Default: "mixup". + mix_args (dict): Args for manifoldmix, resizeMix, fmix mode. + pretrained (str, optional): Path to pre-trained weights. Default: None. + """ + + def __init__(self, + backbone, + head=None, + alpha=1.0, + mix_mode="mixup", + mix_args=dict( + manifoldmix=dict(layer=(0, 3)), + resizemix=dict(scope=(0.1, 0.8), use_alpha=False), + fmix=dict(decay_power=3, size=(32,32), max_soft=0., reformulate=False) + ), + pretrained=None): + super(MixUpClassification, self).__init__() + assert mix_mode in ["mixup", "manifoldmix", "cutmix", "saliencymix", "resizemix", "fmix"] + if mix_mode in ["manifoldmix"]: + assert 0 == min(mix_args[mix_mode]["layer"]) and max(mix_args[mix_mode]["layer"]) < 4 + if mix_mode == "resizemix": + assert 0 <= min(mix_args[mix_mode]["scope"]) and max(mix_args[mix_mode]["scope"]) <= 1 + self.backbone = builder.build_backbone(backbone) + self.head = head + self.mix_mode = mix_mode + self.alpha = alpha + self.mix_args = mix_args + if head is not None: + self.head = builder.build_head(head) + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + """Initialize the weights of model. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Default: None. + """ + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + if self.head is not None: + self.head.init_weights() + + def _manifoldmix(self, img, gt_label): + """ pixel-wise manifoldmix for the latent space mixup backbone """ + # manifoldmix + lam = np.random.beta(self.alpha, self.alpha) + bs = img.size(0) + rand_index = torch.randperm(bs).cuda() + # mixup labels + y_a = gt_label + y_b = gt_label[rand_index] + gt_label = (y_a, y_b, lam) + + _layer = np.random.randint( + min(self.mix_args[self.mix_mode]["layer"]), max(self.mix_args[self.mix_mode]["layer"]), dtype=int) + # generate mixup mask + _mask = None + if img.size(3) > 64: # normal version of resnet + scale_factor = 2**(1 + _layer) if _layer > 0 else 1 + else: # CIFAR version + scale_factor = 2**(_layer - 1) if _layer > 1 else 1 + _mask_size = img.size(3) // scale_factor + _mask = torch.zeros(img.size(0), 1, _mask_size, _mask_size).cuda() + _mask[:] = lam + + return rand_index, _layer, _mask, gt_label + + def forward_train(self, img, gt_label, **kwargs): + """Forward computation during training. + + Args: + img (Tensor): Input images of shape (N, C, H, W). + Typically these should be mean centered and std scaled. + gt_label (Tensor): Ground-truth labels. + kwargs: Any keyword arguments to be used to forward. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + if self.mix_mode not in ["manifoldmix"]: + if self.mix_mode in ["mixup", "cutmix", "saliencymix"]: + img, gt_label = eval(self.mix_mode)(img, gt_label, self.alpha, dist_mode=False) + elif self.mix_mode in ["resizemix", "fmix"]: + mix_args = dict(alpha=self.alpha, dist_mode=False, **self.mix_args[self.mix_mode]) + img, gt_label = eval(self.mix_mode)(img, gt_label, **mix_args) + else: + raise NotImplementedError + x = self.backbone(img) + else: + # manifoldmix + rand_index, _layer, _mask, gt_label = self._manifoldmix(img, gt_label) + + # args for mixup backbone + mix_args = dict( + layer=_layer, cross_view=False, mask=_mask, + BN_shuffle=False, idx_shuffle_BN=None, idx_shuffle_mix=rand_index, dist_shuffle=False) + x = self.backbone(img, mix_args) + + outs = self.head(x) + + loss_inputs = (outs, gt_label) + losses = self.head.loss(*loss_inputs) + return losses + + def forward_test(self, img, **kwargs): + x = self.backbone(img) # tuple + outs = self.head(x) + keys = ['head{}'.format(i) for i in range(len(outs))] + out_tensors = [out.cpu() for out in outs] # NxC + return dict(zip(keys, out_tensors)) + + def forward_calibration(self, img, **kwargs): + img, gt_label = img[0], img[1] + + inputs = (img, False) + x = self.backbone(inputs) + outs = self.head(x) + return outs + + def aug_test(self, imgs): + raise NotImplementedError + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'calibration': + return self.forward_calibration(img, **kwargs) + elif mode == 'extract': + return self.self.backbone(img) + else: + raise Exception("No such mode: {}".format(mode)) diff --git a/openmixup/models/classifiers/mixup_momentum_V1plus.py b/openmixup/models/classifiers/mixup_momentum_V1plus.py new file mode 100644 index 00000000..2c6a9f1c --- /dev/null +++ b/openmixup/models/classifiers/mixup_momentum_V1plus.py @@ -0,0 +1,336 @@ +import torch +import numpy as np +import torch.nn as nn +import matplotlib.pyplot as plt +import os +import torchvision + +from torchvision import transforms +from openmixup.utils import print_log +from .. import builder +from ..registry import MODELS + + +@MODELS.register_module +class AutoMixup_V1plus(nn.Module): + """ AutoMix V0707 + + Implementation of "AutoMix: Unveiling the Power of Mixup + (https://arxiv.org/abs/2103.13027)". + + Args: + backbone (dict): Config dict for module of backbone ConvNet. + mix_block (dict): Config dict for the mixblock. + head_mix (dict): Config dict for module of mixup classification loss. + head_one (dict): Config dict for module of onehot classification loss. + head_indices (tuple): Indices of the cls head. + Default: ("head_mix_q", "head_one_q", "head_mix_k", "head_one_k") + mask_layer (int): Number of the feature layer indix in the backbone. + alpha (int): Beta distribution '$\beta(\alpha, \alpha)$'. + momentum (float): Momentum coefficient for the momentum-updated encoder. + Default: 0.999. + mask_loss (float): Loss weight for the mixup mask. Default: 0. + lam_margin (int): Margin of lambda to stop using AutoMix to train backbone + when lam is small. If lam > lam_margin: AutoMix; else: vanilla mixup. + Default: -1 (or 0). + pretrained (str, optional): Path to pre-trained weights. Default: None. + """ + + def __init__(self, + backbone, + mix_block, + head_mix, + head_one=None, + head_indices=( + "head_mix_q", "head_one_q", + "head_mix_k", "head_one_k"), + mask_layer=2, + alpha=1.0, + momentum=0.999, + mask_loss=0., + lam_margin=-1, + save=False, + save_name='mixup_samples', + pretrained=None): + super(AutoMixup_V1plus, self).__init__() + # basic params + self.alpha = alpha + self.mask_layer = mask_layer + self.momentum = momentum + self.base_momentum = momentum + self.mask_loss = mask_loss + self.lam_margin = lam_margin + self.save = save + self.save_name = save_name + assert lam_margin < 1. and mask_layer <= 4 + # mixblock + self.mix_block = builder.build_head(mix_block) + # backbone + self.backbone_q = builder.build_backbone(backbone) + self.backbone_k = builder.build_backbone(backbone) + for param in self.backbone_k.parameters(): # stop grad k + param.requires_grad = False + # mixup cls head + assert "head_mix_q" in head_indices and "head_mix_k" in head_indices + self.head_mix_q = builder.build_head(head_mix) + self.head_mix_k = builder.build_head(head_mix) + for param in self.head_mix_k.parameters(): # stop grad k + param.requires_grad = False + # onehot cls head + if "head_one_q" in head_indices: + self.head_one_q = builder.build_head(head_one) + else: + self.head_one_q = None + if "head_one_k" in head_indices and "head_one_q" in head_indices: + self.head_one_k = builder.build_head(head_one) + for param in self.head_one_k.parameters(): # stop grad k + param.requires_grad = False + else: + self.head_one_k = None + + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + """Initialize the weights of model. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Default: None. + """ + # init params in q + if pretrained is not None: + print_log('load model from: {}'.format(pretrained), logger='root') + self.backbone_q.init_weights(pretrained=pretrained) + if self.head_mix_q is not None: + self.head_mix_q.init_weights(init_linear='kaiming') + if self.head_one_q is not None: + self.head_one_q.init_weights(init_linear='kaiming') + + # copy backbone param from q to k + for param_q, param_k in zip(self.backbone_q.parameters(), + self.backbone_k.parameters()): + param_k.data.copy_(param_q.data) + + # copy head one param from q to k + if self.head_one_q is not None and self.head_one_k is not None: + for param_one_q, param_one_k in zip(self.head_one_q.parameters(), + self.head_one_k.parameters()): + param_one_k.data.copy_(param_one_q.data) + # copy head mix param from q to k + if self.head_mix_q is not None and self.head_mix_k is not None: + for param_mix_q, param_mix_k in zip(self.head_mix_q.parameters(), + self.head_mix_k.parameters()): + param_mix_k.data.copy_(param_mix_q.data) + + # init mixblock + if self.mix_block is not None: + self.mix_block.init_weights(init_linear='normal') + + @torch.no_grad() + def _momentum_update(self): + """Momentum update of the k form q, including the backbone and heads """ + # update k's backbone and cls head from q + for param_q, param_k in zip(self.backbone_q.parameters(), + self.backbone_k.parameters()): + param_k.data = param_k.data * self.momentum + \ + param_q.data * (1 - self.momentum) + + if self.head_one_q is not None and self.head_one_k is not None: + for param_one_q, param_one_k in zip(self.head_one_q.parameters(), + self.head_one_k.parameters()): + param_one_k.data = param_one_k.data * self.momentum + \ + param_one_q.data * (1 - self.momentum) + + if self.head_mix_q is not None and self.head_mix_k is not None: + for param_mix_q, param_mix_k in zip(self.head_mix_q.parameters(), + self.head_mix_k.parameters()): + param_mix_k.data = param_mix_k.data * self.momentum + \ + param_mix_q.data * (1 - self.momentum) + + def forward_train(self, img, gt_label, **kwargs): + """Forward computation during training. + + Args: + img (Tensor): Input of a batch of images, (N, C, H, W). + gt_label (Tensor): Groundtruth onehot labels. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + batch_size = img.size()[0] + lam = np.random.beta(self.alpha, self.alpha, 2) + index_q = torch.randperm(batch_size).cuda() + index_k = torch.randperm(batch_size).cuda() + + with torch.no_grad(): + self._momentum_update() + + # auto Mixup + indices = [index_k, index_q] + feature = self.backbone_k(img)[0] + mixed_x_q, mixed_x_k, loss_mask = self.pixel_mixup(img, lam, indices, feature) + + # mixed sample visualization + if self.save: + self.plot_mix(mixed_x_k, img, img[index_k, :], lam[0]) + + # k: the mix block training + loss_mix_k = self.forward_k(mixed_x_k, gt_label, index_k, lam[0]) + # q: the encoder training + loss_one_q, loss_mix_q = self.forward_q(img, mixed_x_q, gt_label, index_q, lam[1]) + + # loss summary + losses = { + 'loss': loss_mix_q['loss'] + loss_mix_k['loss'], + 'acc_mix_k': loss_mix_k['acc'], + 'acc_mix_q': loss_mix_q['acc'], + } + if loss_one_q is not None: + losses['loss'] += loss_one_q['loss'] + losses['acc_one_q'] = loss_one_q['acc'] + if loss_mask is not None and self.mask_loss > 0: + losses["loss"] += loss_mask["loss"] * self.mask_loss + return losses + + @torch.no_grad() + def plot_mix(self, img_mixed, img, img_, lam): + invTrans = transforms.Compose([ + transforms.Normalize( + mean=[ 0., 0., 0. ], std=[1/0.2023, 1/0.1994, 1/0.201]), + transforms.Normalize( + mean=[-0.4914, -0.4822, -0.4465], std=[ 1., 1., 1. ])]) + imgs = torch.cat((img[:4], img_[:4], img_mixed[:4]), dim=0) + img_grid = torchvision.utils.make_grid(imgs, nrow=4, pad_value=0) + imgs = np.transpose(invTrans(img_grid).detach().cpu().numpy(), (1, 2, 0)) + fig = plt.figure() + plt.imshow(imgs) + plt.title('lambda k: {}'.format(lam)) + if not os.path.exists(self.save_name): + plt.savefig(self.save_name) + plt.close() + + def forward_q(self, x, mixed_x, y, index, lam): + """ + Args: + x (Tensor): Input of a batch of images, (N, C, H, W). + mixed_x (Tensor): Mixup images of x, (N, C, H, W). + y (Tensor): Groundtruth onehot labels, coresponding to x. + index (List): Input list of shuffle index (tensor) for mixup. + lam (List): Input list of lambda (scalar). + + Returns: + dict[str, Tensor]: loss_one_q and loss_mix_q are losses from q. + """ + # onehot q + if self.head_one_q is not None: + out_one_q = self.backbone_q(x)[-1] + pred_one_q = self.head_one_q([out_one_q]) + # loss + error_one_q = (pred_one_q, y) + loss_one_q = self.head_one_q.loss(*error_one_q) + else: + loss_one_q = None + + # mixup q + out_mix_q = self.backbone_q(mixed_x)[-1] + pred_mix_q = self.head_mix_q([out_mix_q]) + # mixup loss + y_mix_q = (y, y[index], lam) + error_mix_q = (pred_mix_q, y_mix_q) + loss_mix_q = self.head_mix_q.loss(*error_mix_q) + return loss_one_q, loss_mix_q + + def forward_k(self, mixed_x, y, index, lam): + """ forward k with the mixup sample """ + # mixed_x forward + out_mix_k = self.backbone_k(mixed_x)[-1] + pred_mix_k = self.head_mix_k([out_mix_k]) + # k mixup loss + y_mix_k = (y, y[index], lam) + error_mix_k = (pred_mix_k, y_mix_k) + loss_mix_k = self.head_mix_k.loss(*error_mix_k) + return loss_mix_k + + def pixel_mixup(self, x, lam, index, feature): + """ pixel-wise input space mixup, v07.07 + + Args: + x (Tensor): Input of a batch of images, (N, C, H, W). + lam (List): Input list of lambda (scalar). + index (List): Input list of shuffle index (tensor) for mixup. + feature (Tensor): The feature map of x, (N, C, H', W'). + + Returns: + mixed_x_q, mixed_x_k: Mixup samples for q (training the backbone) + and k (training the mixblock). + mask_loss (Tensor): Output loss of mixup masks. + """ + # lam info + lam_k = lam[0] # lam is a scalar + lam_q = lam[1] + + # mask upsampling factor + if x.shape[3] > 64: # normal version of resnet + scale_factor = 2**(2 + self.mask_layer) + else: # CIFAR version + scale_factor = 2**self.mask_layer + + # get mixup mask + mask_k = self.mix_block(feature, lam_k, index[0], scale_factor=scale_factor) + mask_q = self.mix_block(feature, lam_q, index[1], scale_factor=scale_factor).clone().detach() + # lam_margin for backbone training + if self.lam_margin >= lam_q or self.lam_margin >= 1-lam_q: + mask_q[:, 0, :, :] = lam_q + mask_q[:, 1, :, :] = 1 - lam_q + # loss of mixup mask + if self.mask_loss > 0.: + mask_loss = self.mix_block.loss(mask_k, lam_k) + else: + mask_loss = None + + # mix, apply mask on x and x_ + # mixed_x_k = x * (1 - mask_k) + x[index[0], :] * mask_k + assert mask_k.shape[1] == 2 and mask_q.shape[1] == 2 + mixed_x_k = x * mask_k[:, 0, :, :].unsqueeze(1) + x[index[0], :] * mask_k[:, 1, :, :].unsqueeze(1) + + # mixed_x_q = x * (1 - mask_q) + x[index[1], :] * mask_q + mixed_x_q = x * mask_q[:, 0, :, :].unsqueeze(1) + x[index[1], :] * mask_q[:, 1, :, :].unsqueeze(1) + return mixed_x_q, mixed_x_k, mask_loss + + def forward_test(self, img, **kwargs): + """Forward computation during testing. + + Args: + img (Tensor): Input of a batch of images, (N, C, H, W). + + Returns: + dict[key, Tensor]: A dictionary of head names (key) and predictions. + """ + keys = list() # 'acc_mix_k', 'acc_one_k', 'acc_mix_q', 'acc_one_q' + pred = list() + # backbone + last_k = self.backbone_k(img)[-1] + last_q = self.backbone_q(img)[-1] + # head k + pred.append(self.head_mix_k([last_k])) + keys.append('acc_mix_k') + if self.head_one_k is not None: + pred.append(self.head_one_k([last_k])) + keys.append('acc_one_k') + # head q + pred.append(self.head_mix_q([last_q])) + keys.append('acc_mix_q') + if self.head_one_q is not None: + pred.append(self.head_one_q([last_q])) + keys.append('acc_one_q') + + out_tensors = [p[0].cpu() for p in pred] # NxC + return dict(zip(keys, out_tensors)) + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + else: + raise Exception('No such mode: {}'.format(mode)) diff --git a/openmixup/models/classifiers/mixup_momentum_V2.py b/openmixup/models/classifiers/mixup_momentum_V2.py new file mode 100644 index 00000000..fd2baf84 --- /dev/null +++ b/openmixup/models/classifiers/mixup_momentum_V2.py @@ -0,0 +1,525 @@ +import os +import torch +import numpy as np +import torch.nn as nn +import matplotlib.pyplot as plt +import torchvision +from torchvision import transforms + +import logging +from mmcv.runner import load_checkpoint +from openmixup.utils import print_log +from .. import builder +from ..registry import MODELS + + +@MODELS.register_module +class AutoMixup_V2(nn.Module): + """ AutoMix V2 + v0824 version (based on V1plus v0707) + V1219 version (adding freezed backbone_k and mixblock) + + Official implementation of "AutoMix: Unveiling the Power of Mixup + (https://arxiv.org/abs/2103.13027)". + + Args: + backbone (dict): Config dict for module of backbone ConvNet (main). + backbone_k (dict): Config dict for module of momentum backbone ConvNet. Default: None. + mix_block (dict): Config dict for the mixblock. Default: None. + head_mix (dict): Config dict for module of mixup classification loss (backbone). + head_one (dict): Config dict for module of onehot classification loss (backbone). + head_mix (dict): Config dict for mixup classification loss (mixblock). Default: None. + head_one (dict): Config dict for onehot classification loss (mixblock). Default: None. + head_weights (dict): Dict of the used cls heads names and loss weights, + which determines the cls or mixup head in used. + Default: dict(head_mix_q=1, head_one_q=1, head_mix_k=1, head_one_k=1) + alpha (int): Beta distribution '$\beta(\alpha, \alpha)$'. + momentum (float): Momentum coefficient for the momentum-updated encoder. + Default: 0.999. + mask_layer (int): Number of the feature layer indix in the backbone. + mask_loss (float): Loss weight for the mixup mask. Default: 0. + mask_adjust (float): Probrobality (in [0, 1]) of adjusting the mask (q) in terms + of lambda (q), which only affect the backbone training. + Default: False (or 0.). + pre_one_loss (float): Loss weight for the pre-MixBlock head as onehot classification. + Default: 0. (requires a pre_head in MixBlock) + pre_mix_loss (float): Loss weight for the pre-MixBlock head as mixup classification. + Default: 0. (requires a pre_head in MixBlock) + lam_margin (int): Margin of lambda to stop using AutoMix to train backbone + when lam is small. If lam > lam_margin: AutoMix; else: vanilla mixup. + Default: -1 (or 0). + mix_shuffle_no_repeat (bool): Whether to use 'no_repeat' mode to generate + mixup shuffle idx. We can ignore this issue in supervised learning. + Default: False. + pretrained (str, optional): Path to pre-trained weights. Default: None. + pretrained_k (str, optional): Path to pre-trained weights for en_k. Default: None. + """ + + def __init__(self, + backbone, + backbone_k=None, + mix_block=None, + head_mix=None, + head_one=None, + head_mix_k=None, + head_one_k=None, + head_weights=dict( + head_mix_q=1, head_one_q=1, head_mix_k=1, head_one_k=1), + alpha=1.0, + momentum=0.999, + mask_layer=2, + mask_loss=0., + mask_adjust=0., + pre_one_loss=0., + pre_mix_loss=0., + lam_margin=-1, + save=False, + save_name='mixup_samples', + debug=False, + mix_shuffle_no_repeat=False, + pretrained=None, + pretrained_k=None): + super(AutoMixup_V2, self).__init__() + # basic params + self.alpha = float(alpha) + self.mask_layer = int(mask_layer) + self.momentum = float(momentum) + self.base_momentum = float(momentum) + self.mask_loss = float(mask_loss) if float(mask_loss) > 0 else 0 + self.mask_adjust = float(mask_adjust) + self.pre_one_loss = float(pre_one_loss) if float(pre_one_loss) > 0 else 0 + self.pre_mix_loss = float(pre_mix_loss) if float(pre_mix_loss) > 0 else 0 + self.lam_margin = float(lam_margin) if float(lam_margin) > 0 else 0 + self.save = bool(save) + self.save_name = str(save_name) + self.debug = bool(debug) + self.mix_shuffle_no_repeat = bool(mix_shuffle_no_repeat) + assert 0 <= self.momentum and self.lam_margin < 1 and self.mask_adjust <= 1 + + # network + assert isinstance(mix_block, dict) and isinstance(backbone, dict) + assert backbone_k is None or isinstance(backbone_k, dict) + assert head_mix is None or isinstance(head_mix, dict) + assert head_one is None or isinstance(head_one, dict) + assert head_mix_k is None or isinstance(head_mix_k, dict) + assert head_one_k is None or isinstance(head_one_k, dict) + head_mix_k = head_mix if head_mix_k is None else head_mix_k + head_one_k = head_one if head_one_k is None else head_one_k + # mixblock + self.mix_block = builder.build_head(mix_block) + # backbone + self.backbone_q = builder.build_backbone(backbone) + if backbone_k is not None: + self.backbone_k = builder.build_backbone(backbone_k) + assert self.momentum >= 1. and pretrained_k is not None + else: + self.backbone_k = builder.build_backbone(backbone) + self.backbone = self.backbone_k # for feature extract + for param in self.backbone_k.parameters(): # stop grad k + param.requires_grad = False + # mixup cls head + assert "head_mix_q" in head_weights.keys() and "head_mix_k" in head_weights.keys() + self.head_mix_q = builder.build_head(head_mix) + self.head_mix_k = builder.build_head(head_mix_k) + for param in self.head_mix_k.parameters(): # stop grad k + param.requires_grad = False + # onehot cls head + if "head_one_q" in head_weights.keys(): + self.head_one_q = builder.build_head(head_one) + else: + self.head_one_q = None + if "head_one_k" in head_weights.keys() and "head_one_q" in head_weights.keys(): + self.head_one_k = builder.build_head(head_one_k) + for param in self.head_one_k.parameters(): # stop grad k + param.requires_grad = False + else: + self.head_one_k = None + # for feature extract + self.fc = self.head_one_k if self.head_one_k is not None else self.head_one_q + # onehot and mixup heads for training + self.weight_mix_q = head_weights.get("head_mix_q", 1.) + self.weight_mix_k = head_weights.get("head_mix_k", 1.) + self.weight_one_q = head_weights.get("head_one_q", 1.) + assert self.weight_mix_q > 0 and (self.weight_mix_k > 0 or backbone_k is not None) + + self.init_weights(pretrained=pretrained, pretrained_k=pretrained_k) + + def init_weights(self, pretrained=None, pretrained_k=None): + """Initialize the weights of model. + + Args: + pretrained (str, optional): Path to pre-trained weights. + Default: None. + pretrained_k (str, optional): Path to pre-trained weights to initialize the + backbone_k and mixblock. Default: None. + """ + # init mixblock + if self.mix_block is not None: + self.mix_block.init_weights(init_linear='normal') + # init pretrained backbone_k and mixblock + if pretrained_k is not None: + print_log('load pretrained classifier k from: {}'.format(pretrained_k), logger='root') + # load full ckpt to backbone and fc + logger = logging.getLogger() + load_checkpoint(self, pretrained_k, strict=False, logger=logger) + # head_mix_k and head_one_k should share the same initalization + if self.head_mix_k is not None and self.head_one_k is not None: + for param_one_k, param_mix_k in zip(self.head_one_k.parameters(), + self.head_mix_k.parameters()): + param_mix_k.data.copy_(param_one_k.data) + + # init backbone, based on params in q + if pretrained is not None: + print_log('load encoder_q from: {}'.format(pretrained), logger='root') + self.backbone_q.init_weights(pretrained=pretrained) + # copy backbone param from q to k + if pretrained_k is None and self.momentum < 1: + for param_q, param_k in zip(self.backbone_q.parameters(), + self.backbone_k.parameters()): + param_k.data.copy_(param_q.data) + + # init head + if self.head_mix_q is not None: + self.head_mix_q.init_weights(init_linear='kaiming') + if self.head_one_q is not None: + self.head_one_q.init_weights(init_linear='kaiming') + + # copy head one param from q to k + if (self.head_one_q is not None and self.head_one_k is not None) and \ + (pretrained_k is None and self.momentum < 1): + for param_one_q, param_one_k in zip(self.head_one_q.parameters(), + self.head_one_k.parameters()): + param_one_k.data.copy_(param_one_q.data) + # copy head mix param from q to k + if (self.head_mix_q is not None and self.head_mix_k is not None) and \ + (pretrained_k is None and self.momentum < 1): + for param_mix_q, param_mix_k in zip(self.head_mix_q.parameters(), + self.head_mix_k.parameters()): + param_mix_k.data.copy_(param_mix_q.data) + + @torch.no_grad() + def _momentum_update(self): + """Momentum update of the k form q, including the backbone and heads """ + # we don't update q to k when momentum > 1 + if self.momentum >= 1.: + return + # update k's backbone and cls head from q + for param_q, param_k in zip(self.backbone_q.parameters(), + self.backbone_k.parameters()): + param_k.data = param_k.data * self.momentum + \ + param_q.data * (1. - self.momentum) + + if self.head_one_q is not None and self.head_one_k is not None: + for param_one_q, param_one_k in zip(self.head_one_q.parameters(), + self.head_one_k.parameters()): + param_one_k.data = param_one_k.data * self.momentum + \ + param_one_q.data * (1 - self.momentum) + + if self.head_mix_q is not None and self.head_mix_k is not None: + for param_mix_q, param_mix_k in zip(self.head_mix_q.parameters(), + self.head_mix_k.parameters()): + param_mix_k.data = param_mix_k.data * self.momentum + \ + param_mix_q.data * (1 - self.momentum) + + def _no_repeat_shuffle_idx(self, batch_size_this, ignore_failure=False): + """ generate no repeat shuffle idx within a gpu """ + idx_shuffle = torch.randperm(batch_size_this).cuda() + idx_original = torch.tensor([i for i in range(batch_size_this)]).cuda() + idx_repeat = False + for i in range(10): # try 10 times + if (idx_original == idx_shuffle).any() == True: + idx_repeat = True + idx_shuffle = torch.randperm(batch_size_this).cuda() + else: + idx_repeat = False + break + # hit: prob < 1.2e-3 + if idx_repeat == True and ignore_failure == False: + # way 2: repeat prob = 0, but too simple! + idx_shift = np.random.randint(1, batch_size_this-1) + idx_shuffle = torch.tensor( # shift the original idx + [(i+idx_shift) % batch_size_this for i in range(batch_size_this)]).cuda() + return idx_shuffle + + def forward_train(self, img, gt_label, **kwargs): + """Forward computation during training. + + Args: + img (Tensor): Input of a batch of images, (N, C, H, W). + gt_label (Tensor): Groundtruth onehot labels. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + batch_size = img.size()[0] + lam = np.random.beta(self.alpha, self.alpha, 2) # 0: mb, 1: bb + if self.mix_shuffle_no_repeat: + index_bb = self._no_repeat_shuffle_idx(batch_size, ignore_failure=True) + index_mb = self._no_repeat_shuffle_idx(batch_size, ignore_failure=False) + else: + index_bb = torch.randperm(batch_size).cuda() + index_mb = torch.randperm(batch_size).cuda() + + with torch.no_grad(): + self._momentum_update() + + # auto Mixup + indices = [index_mb, index_bb] + feature = self.backbone_k(img)[0] + results = self.pixel_mixup(img, gt_label, lam, indices, feature) + + # save img bb, mixed sample visualization + if self.save and self.mask_adjust > 0: + self.plot_mix( + results["img_mix_bb"], img, img[index_bb, :], lam[1], results["debug_plot"], "backbone") + # save img mb + if self.save and self.mask_adjust <= 0: + self.plot_mix( + results["img_mix_mb"], img, img[index_mb, :], lam[0], results["debug_plot"], "mixblock") + + # k (mb): the mix block training + loss_mix_k = self.forward_k(results["img_mix_mb"], gt_label, index_mb, lam[0]) + # q (bb): the encoder training + loss_one_q, loss_mix_q = self.forward_q(img, results["img_mix_bb"], gt_label, index_bb, lam[1]) + + # loss summary + losses = { + 'loss': loss_mix_q['loss'] * self.weight_mix_q, + 'acc_mix_q': loss_mix_q['acc'], + } + # onehot loss + if loss_one_q is not None and self.weight_one_q > 0: + losses['loss'] += loss_one_q['loss'] * self.weight_one_q + losses['acc_one_q'] = loss_one_q['acc'] + # mixblock loss + if self.weight_mix_k > 0: + losses["loss"] += loss_mix_k['loss'] * self.weight_mix_k + losses['acc_mix_k'] = loss_mix_k['acc'] + if results["mask_loss"] is not None and self.mask_loss > 0: + losses["loss"] += results["mask_loss"] + if results["pre_one_loss"] is not None and self.pre_one_loss > 0: + losses["loss"] += results["pre_one_loss"] + if loss_mix_k["pre_mix_loss"] is not None and self.pre_mix_loss > 0: + losses["loss"] += loss_mix_k["pre_mix_loss"] + + return losses + + @torch.no_grad() + def plot_mix(self, im_mixed, im_q, im_k, lam, debug_plot=None, name="k"): + """ visualize mixup results, supporting 'debug' mode """ + invTrans = transforms.Compose([ + transforms.Normalize( + mean=[ 0., 0., 0. ], std=[1/0.2023, 1/0.1994, 1/0.201]), + transforms.Normalize( + mean=[-0.4914, -0.4822, -0.4465], std=[ 1., 1., 1. ])]) + # plot mixup results + imgs = torch.cat((im_q[:4], im_k[:4], im_mixed[:4]), dim=0) + img_grid = torchvision.utils.make_grid(imgs, nrow=4, pad_value=0) + imgs = np.transpose(invTrans(img_grid).detach().cpu().numpy(), (1, 2, 0)) + fig = plt.figure() + plt.imshow(imgs) + plt.title('lambda {}={}'.format(name, lam)) + assert self.save_name.find(".png") != -1 + if not os.path.exists(self.save_name): + plt.savefig(self.save_name) + # debug: plot intermediate results + if self.debug: + assert isinstance(debug_plot, dict) + for key,value in debug_plot.items(): + n, h, w = value.size() + imgs = value[:4].view(h, 4 * w).detach().cpu().numpy() + fig = plt.figure() + plt.imshow(imgs) + # plt.title('debug {}, lambda k={}'.format(str(key), lam)) + _debug_path = self.save_name.split(".png")[0] + "_{}.png".format(str(key)) + if not os.path.exists(_debug_path): + plt.savefig(_debug_path, bbox_inches='tight') + plt.close() + + def forward_q(self, x, mixed_x, y, index, lam): + """ + Args: + x (Tensor): Input of a batch of images, (N, C, H, W). + mixed_x (Tensor): Mixup images of x, (N, C, H, W). + y (Tensor): Groundtruth onehot labels, coresponding to x. + index (List): Input list of shuffle index (tensor) for mixup. + lam (List): Input list of lambda (scalar). + + Returns: + dict[str, Tensor]: loss_one_q and loss_mix_q are losses from q. + """ + # onehot q + loss_one_q = None + if self.head_one_q is not None and self.weight_one_q > 0: + out_one_q = self.backbone_q(x)[-1] + pred_one_q = self.head_one_q([out_one_q]) + # loss + error_one_q = (pred_one_q, y) + loss_one_q = self.head_one_q.loss(*error_one_q) + + # mixup q + loss_mix_q = None + if self.weight_mix_q > 0: + out_mix_q = self.backbone_q(mixed_x)[-1] + pred_mix_q = self.head_mix_q([out_mix_q]) + # mixup loss + y_mix_q = (y, y[index], lam) + error_mix_q = (pred_mix_q, y_mix_q) + loss_mix_q = self.head_mix_q.loss(*error_mix_q) + return loss_one_q, loss_mix_q + + def forward_k(self, mixed_x, y, index, lam): + """ forward k with the mixup sample """ + loss_mix_k = dict() + if self.weight_mix_k > 0: + # mixed_x forward + out_mix_k = self.backbone_k(mixed_x) + pred_mix_k = self.head_mix_k([out_mix_k[-1]]) + # k mixup loss + y_mix_k = (y, y[index], lam) + error_mix_k = (pred_mix_k, y_mix_k) + loss_mix_k = self.head_mix_k.loss(*error_mix_k) + + # mixup loss, short cut of pre-mixblock + if self.pre_mix_loss > 0: + out_mb = out_mix_k[0] + # pre FFN + if self.mix_block.pre_attn is not None: + out_mb = self.mix_block.pre_attn(out_mb) # non-local + if self.mix_block.pre_conv is not None: + out_mb = self.mix_block.pre_conv([out_mb]) # neck + # pre mixblock mixup loss + pred_mix_mb = self.mix_block.pre_head(out_mb) + error_mix_mb = (pred_mix_mb, y_mix_k) + loss_mix_k["pre_mix_loss"] = \ + self.mix_block.pre_head.loss(*error_mix_mb)["loss"] * self.pre_mix_loss + else: + loss_mix_k["pre_mix_loss"] = None + + return loss_mix_k + + def pixel_mixup(self, x, y, lam, index, feature): + """ pixel-wise input space mixup, v08.24 + + Args: + x (Tensor): Input of a batch of images, (N, C, H, W). + y (Tensor): A batch of gt_labels, (N, 1). + lam (List): Input list of lambda (scalar). + index (List): Input list of shuffle index (tensor) for mixup. + feature (Tensor): The feature map of x, (N, C, H', W'). + + Returns: dict includes following + mixed_x_bb, mixed_x_mb: Mixup samples for bb (training the backbone) + and mb (training the mixblock). + mask_loss (Tensor): Output loss of mixup masks. + pre_one_loss (Tensor): Output onehot cls loss of pre-mixblock. + """ + results = dict() + # lam info + lam_mb = lam[0] # lam is a scalar + lam_bb = lam[1] + + # mask upsampling factor + if x.shape[3] > 64: # normal version of resnet + scale_factor = 2**(2 + self.mask_layer) + else: # CIFAR version + scale_factor = 2**self.mask_layer + + # get mixup mask + mask_mb = self.mix_block(feature, lam_mb, index[0], scale_factor=scale_factor, debug=self.debug) + mask_bb = self.mix_block(feature, lam_bb, index[1], scale_factor=scale_factor, debug=False) + if self.debug: + results["debug_plot"] = mask_mb["debug_plot"] + else: + results["debug_plot"] = None + + # pre mixblock loss + results["pre_one_loss"] = None + if self.pre_one_loss > 0.: + pred_one = self.mix_block.pre_head([mask_mb["x_lam"]]) + y_one = (y, y, 1) + error_one = (pred_one, y_one) + results["pre_one_loss"] = \ + self.mix_block.pre_head.loss(*error_one)["loss"] * self.pre_one_loss + + mask_mb = mask_mb["mask"] + mask_bb = mask_bb["mask"].clone().detach() + + # adjust mask_bb with lambd + if self.mask_adjust > np.random.rand(): # [0,1) + epsilon = 1e-8 + _mask = mask_bb[:, 0, :, :].squeeze() # [N, H, W], _mask for lam + _mask = _mask.clamp(min=epsilon, max=1-epsilon) + _mean = _mask.mean(dim=[1, 2]).squeeze() # [N, 1, 1] -> [N] + idx_larg = _mean[:] > lam[0] + epsilon # index of mean > lam_bb + idx_less = _mean[:] < lam[0] - epsilon # index of mean < lam_bb + # if mean > lam_bb + mask_bb[idx_larg==True, 0, :, :] = \ + _mask[idx_larg==True, :, :] * (lam[0] / _mean[idx_larg==True].view(-1, 1, 1)) + mask_bb[idx_larg==True, 1, :, :] = 1 - mask_bb[idx_larg==True, 0, :, :] + # elif mean < lam_bb + mask_bb[idx_less==True, 1, :, :] = \ + (1 - _mask[idx_less==True, :, :]) * ((1 - lam[0]) / (1 - _mean[idx_less==True].view(-1, 1, 1))) + mask_bb[idx_less==True, 0, :, :] = 1 - mask_bb[idx_less==True, 1, :, :] + # lam_margin for backbone training + if self.lam_margin >= lam_bb or self.lam_margin >= 1-lam_bb: + mask_bb[:, 0, :, :] = lam_bb + mask_bb[:, 1, :, :] = 1 - lam_bb + + # loss of mixup mask + results["mask_loss"] = None + if self.mask_loss > 0.: + results["mask_loss"] = self.mix_block.mask_loss(mask_mb, lam_mb)["loss"] + if results["mask_loss"] is not None: + results["mask_loss"] *= self.mask_loss + + # mix, apply mask on x and x_ + # img_mix_mb = x * (1 - mask_mb) + x[index[0], :] * mask_mb + assert mask_mb.shape[1] == 2 and mask_bb.shape[1] == 2 + results["img_mix_mb"] = \ + x * mask_mb[:, 0, :, :].unsqueeze(1) + x[index[0], :] * mask_mb[:, 1, :, :].unsqueeze(1) + + # img_mix_bb = x * (1 - mask_bb) + x[index[1], :] * mask_bb + results["img_mix_bb"] = \ + x * mask_bb[:, 0, :, :].unsqueeze(1) + x[index[1], :] * mask_bb[:, 1, :, :].unsqueeze(1) + + return results + + def forward_test(self, img, **kwargs): + """Forward computation during testing. + + Args: + img (Tensor): Input of a batch of images, (N, C, H, W). + + Returns: + dict[key, Tensor]: A dictionary of head names (key) and predictions. + """ + keys = list() # 'acc_mix_k', 'acc_one_k', 'acc_mix_q', 'acc_one_q' + pred = list() + # backbone + last_k = self.backbone_k(img)[-1] + last_q = self.backbone_q(img)[-1] + # head k + if self.weight_mix_k > 0: + pred.append(self.head_mix_k([last_k])) + keys.append('acc_mix_k') + if self.head_one_k is not None: + pred.append(self.head_one_k([last_k])) + keys.append('acc_one_k') + # head q + pred.append(self.head_mix_q([last_q])) + keys.append('acc_mix_q') + if self.head_one_q is not None: + pred.append(self.head_one_q([last_q])) + keys.append('acc_one_q') + + out_tensors = [p[0].cpu() for p in pred] # NxC + return dict(zip(keys, out_tensors)) + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + return self.forward_train(img, **kwargs) + elif mode == 'test': + return self.forward_test(img, **kwargs) + else: + raise Exception('No such mode: {}'.format(mode)) diff --git a/openmixup/models/classifiers/representation.py b/openmixup/models/classifiers/representation.py new file mode 100644 index 00000000..6e1fd3c5 --- /dev/null +++ b/openmixup/models/classifiers/representation.py @@ -0,0 +1,71 @@ +import torch.nn as nn + +from openmixup.utils import print_log + +from .. import builder +from ..registry import MODELS + + +@MODELS.register_module +class Representation(nn.Module): + + def __init__(self, + backbone, + neck=None, + head=None, + head_keys=["head0"], + pretrained=None, + ): + super(Representation, self).__init__() + self.backbone = builder.build_backbone(backbone) + if neck is not None: + self.neck = builder.build_neck(neck) + else: + self.neck = None + assert head is None + self.head_keys = head_keys + self.init_weights(pretrained=pretrained) + + def init_weights(self, pretrained=None): + if pretrained is not None: + print_log('load model ckpt from {}.'.format(pretrained), logger='root') + self.backbone.init_weights(pretrained=pretrained) + + def forward_backbone(self, img): + """Forward backbone + + Returns: + x (tuple): backbone outputs + """ + x = self.backbone(img) + return x + + def forward_train(self, img, gt_label, **kwargs): + raise NotImplementedError + + def forward_test(self, img, **kwargs): + x = self.forward_backbone(img) # tuple + if self.neck is not None: + x = self.neck(x) + if len(self.head_keys) > 1: + assert len(self.head_keys) == len(x) + else: + keys = ['head0'] + # out_tensors = [out.cpu() for out in outs] # NxC + out_tensors = [out.cpu() for out in x] # NxC + return dict(zip(keys, out_tensors)) + + def aug_test(self, imgs): + raise NotImplementedError + + def forward(self, img, mode='train', **kwargs): + if mode == 'train': + # return self.forward_train(img, **kwargs) + raise Exception("No such mode: {} in Reprensentation.".format(mode)) + elif mode == 'test': + return self.forward_test(img, **kwargs) + elif mode == 'extract': + # return self.forward_backbone(img) + raise Exception("No such mode: {} in Reprensentation.".format(mode)) + else: + raise Exception("No such mode: {} in Reprensentation.".format(mode)) diff --git a/openmixup/models/heads/__init__.py b/openmixup/models/heads/__init__.py new file mode 100644 index 00000000..3493f96e --- /dev/null +++ b/openmixup/models/heads/__init__.py @@ -0,0 +1,12 @@ +from .cls_head import ClsHead +from .cls_mixup_head import ClsMixupHead +from .contrastive_head import ContrastiveHead +from .latent_pred_head import LatentPredictHead +from .multi_cls_head import MultiClsHead +from .pmix_block_V2 import PixelMixBlock_V2 + + +__all__ = [ + 'ContrastiveHead', 'ClsHead', 'ClsMixupHead', 'LatentPredictHead', 'MultiClsHead', + 'PixelMixBlock_V2', +] diff --git a/openmixup/models/heads/cls_head.py b/openmixup/models/heads/cls_head.py new file mode 100644 index 00000000..8c840be0 --- /dev/null +++ b/openmixup/models/heads/cls_head.py @@ -0,0 +1,146 @@ +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import kaiming_init, normal_init + +from ..utils import accuracy, accuracy_mixup +from ..registry import HEADS +from ..builder import build_loss + + +@HEADS.register_module +class ClsHead(nn.Module): + """Simplest classifier head, with only one fc layer. + *** Mixup and multi-label classification are supported *** + + Args: + with_avg_pool (bool): Whether to use GAP before this head. + loss (dict): Config of classification loss. + in_channels (int): Number of channels in the input feature map. + num_classes (int): Number of categories excluding the category. + multi_label (bool): Whether to use one_hot like labels (requiring the + multi-label classification loss). Notice that we support the + single-label cls task to use the multi-label cls loss. + frozen (bool): Whether to freeze the parameters. + """ + + def __init__(self, + with_avg_pool=False, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + in_channels=2048, + num_classes=1000, + multi_label=False, + frozen=False): + super(ClsHead, self).__init__() + self.with_avg_pool = with_avg_pool + self.in_channels = in_channels + self.num_classes = num_classes + self.multi_label = multi_label + + # loss + if loss is not None: + assert isinstance(loss, dict) + self.criterion = build_loss(loss) + else: + assert multi_label == False + loss = dict(type='CrossEntropyLoss', loss_weight=1.0) + self.criterion = build_loss(loss) + # pooling + if self.with_avg_pool: + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + # fc layer + self.fc_cls = nn.Linear(in_channels, num_classes) + if frozen: + self.frozen() + + def frozen(self): + self.fc_cls.eval() + for param in self.fc_cls.parameters(): + param.requires_grad = False + + def init_weights(self, init_linear='normal', std=0.01, bias=0.): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + for m in self.modules(): + if isinstance(m, nn.Linear): + if init_linear == 'normal': + normal_init(m, std=std, bias=bias) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, + (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert isinstance(x, (tuple, list)) and len(x) == 1 + x = x[0] + if self.with_avg_pool: + assert x.dim() == 4, \ + "Tensor must has 4 dims, got: {}".format(x.dim()) + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + cls_score = self.fc_cls(x) + return [cls_score] + + def loss(self, cls_score, labels, **kwargs): + """" cls loss forward + + Args: + cls_score (list): Score should be [tensor]. + labels (tuple or tensor): Labels should be tensor [N, \*] by default. + If labels as tuple, it's used for CE mixup, (gt_a, gt_b, lambda). + """ + single_label = False + losses = dict() + assert isinstance(cls_score, (tuple, list)) and len(cls_score) == 1 + + # computing loss + if not isinstance(labels, tuple): + # whether is the single label cls [N,] or multi-label cls [N,C] + single_label = \ + labels.dim() == 1 or (labels.dim() == 2 and labels.shape[1] == 1) + # Notice: we allow the single-label cls using multi-label loss, thus + # * For single-label cls, loss = loss.sum() / N + # * For multi-label cls, loss = loss.sum() or loss.mean() + avg_factor = labels.size(0) if single_label else None + + target = labels.clone() + if self.multi_label: + # convert to onehot labels + if single_label: + target = F.one_hot(target, num_classes=self.num_classes) + # default onehot cls + losses['loss'] = self.criterion( + cls_score[0], target, avg_factor=avg_factor, **kwargs) + # compute accuracy + losses['acc'] = accuracy(cls_score[0], labels) + else: + # mixup classification + y_a, y_b, lam = labels + # whether is the single label cls [N,] or multi-label cls [N,C] + single_label = \ + y_a.dim() == 1 or (y_a.dim() == 2 and y_a.shape[1] == 1) + # Notice: we allow the single-label cls using multi-label loss, thus + # * For single-label cls, loss = loss.sum() / N + # * For multi-label cls, loss = loss.sum() or loss.mean() + avg_factor = y_a.size(0) if single_label else None + + if not self.multi_label: + losses['loss'] = \ + self.criterion(cls_score[0], y_a, avg_factor=avg_factor, **kwargs) * lam + \ + self.criterion(cls_score[0], y_b, avg_factor=avg_factor, **kwargs) * (1 - lam) + else: + # convert to onehot labels + if single_label: + y_a = F.one_hot(y_a, num_classes=self.num_classes) + y_b = F.one_hot(y_b, num_classes=self.num_classes) + # mixup onehot like labels, using a multi-label loss + y_mixed = lam * y_a + (1 - lam) * y_b + losses['loss'] = self.criterion( + cls_score[0], y_mixed, avg_factor=avg_factor, **kwargs) + # compute accuracy + losses['acc'] = accuracy(cls_score[0], labels[0]) + losses['acc_mix'] = accuracy_mixup(cls_score[0], labels) + return losses diff --git a/openmixup/models/heads/cls_mixup_head.py b/openmixup/models/heads/cls_mixup_head.py new file mode 100644 index 00000000..12033445 --- /dev/null +++ b/openmixup/models/heads/cls_mixup_head.py @@ -0,0 +1,266 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from builtins import NotImplementedError +from mmcv.cnn import kaiming_init, normal_init + +from ..utils import accuracy, accuracy_mixup +from ..registry import HEADS +from ..builder import build_loss + + +@HEADS.register_module +class ClsMixupHead(nn.Module): + """Simplest classifier head, with only one fc layer. + *** Mixup and multi-label classification are supported *** + V1218, IP89, fix 'neg_weight' and 'eta_weight' usages + + Args: + with_avg_pool (bool): Whether to use GAP before this head. + loss (dict): Config of classification loss. + in_channels (int): Number of channels in the input feature map. + num_classes (int): Number of categories excluding the category. + multi_label (bool): Whether to use one_hot like labels (requiring the + multi-label classification loss). Notice that we support the + single-label cls task to use the multi-label cls loss. + two_hot (bool): Whether to use multi-hot label (two hot). + two_hot_scale (float): Rescale the sum of labels, in (0, 1]. The sum of + softmax labels is 1, while that of the two-hot labels is 2. This scalar + is used to rescale the sum of labels to (0, 2]. + lam_scale_mode (str): The mode of rescaling two-hot or soft mixup labels, + in {'pow', 'exp', 'none'}. If mode!='none', rescaling the labels with + lam_thr and lam_idx. Default: "none". + lam_thr (float): Rescale threshold for two-hot labels, in [0,1]. + lam_idx (float): Rescale factor for the exp or power function. + eta_weight (dict): The lam threhold of whether to use the eta weights. It + contains 'eta_weight=dict(eta=1, mode="both", thr=1)', where 'eta' denotes + the basic rescale factor of each lam term and 'mode' is the selection method. + If eta_weight['mode']=="both", add the eta_weight for the both lam term. + If eta_weight['mode']=="less", add the eta_weight for lam < thr. + If eta_weight['mode']=="more", add the eta_weight for lam > thr. + Default: dict(eta=1, mode="both", thr=0). + neg_weight (bool or float): Whether to remove (or reweight) the negative + part of loss according to gt_label (should be BCE multi-label loss). + Default: 1 (True). + frozen (bool): Whether to freeze the parameters. + """ + + def __init__(self, + with_avg_pool=False, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + in_channels=2048, + num_classes=1000, + multi_label=False, + two_hot=False, + two_hot_scale=1, + lam_scale_mode='none', + lam_thr=1, + lam_idx=1, + eta_weight=dict(eta=1, mode="both", thr=0.5), + neg_weight=1, + frozen=False): + super(ClsMixupHead, self).__init__() + self.with_avg_pool = bool(with_avg_pool) + self.in_channels = int(in_channels) + self.num_classes = int(num_classes) + self.multi_label = bool(multi_label) + self.two_hot = bool(two_hot) + self.two_hot_scale = float(two_hot_scale) + self.lam_scale_mode = str(lam_scale_mode) + self.lam_thr = float(lam_thr) + self.lam_idx = float(lam_idx) + self.eta_weight = eta_weight + self.neg_weight = float(neg_weight) if float(neg_weight) != 1 else 1 + assert lam_scale_mode in ['none', 'pow', 'exp'] + assert eta_weight["mode"] in ['more', 'less', 'both'] and \ + 0 <= eta_weight["thr"] <= 1 and eta_weight["eta"] < 100 + assert 0 < lam_thr <= 1 and -100 < lam_idx < 100 + assert 0 < two_hot_scale <= 1 and 0 <= neg_weight <= 1 + + # loss + if loss is not None: + assert isinstance(loss, dict) + self.criterion = build_loss(loss) + else: + assert multi_label == False + loss = dict(type='CrossEntropyLoss', loss_weight=1.0) + self.criterion = build_loss(loss) + if self.neg_weight != 1: + 0 <= self.neg_weight <= 1, "the weight of negative parts should not be \ + larger than the postive part." + assert multi_label == True and loss['type'] == 'CrossEntropyLoss' + # global pooling + if self.with_avg_pool: + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + # fc layer + self.fc_cls = nn.Linear(in_channels, num_classes) + if frozen: + self.frozen() + + def frozen(self): + self.fc_cls.eval() + for param in self.fc_cls.parameters(): + param.requires_grad = False + + def init_weights(self, init_linear='normal', std=0.01, bias=0.): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + for m in self.modules(): + if isinstance(m, nn.Linear): + if init_linear == 'normal': + normal_init(m, std=std, bias=bias) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, + (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert isinstance(x, (tuple, list)) and len(x) == 1 + x = x[0] + if self.with_avg_pool: + assert x.dim() == 4, \ + "Tensor must has 4 dims, got: {}".format(x.dim()) + x = self.avg_pool(x) + x = x.view(x.size(0), -1) + cls_score = self.fc_cls(x) + return [cls_score] + + def lambda_adjust(self, lam, mode="pow", thr=1, idx=1): + """ rescale lambda for two-hot label mixup classification + + Args: + lam (float): The original lambda in [0,1]. + mode (str): The rescale function, {'pow', 'exp'}. + thr (float): If lam < threshold, do rescale; else + lam=1. Threshold in (0,1]. + idx (float): The index for power or exp functions. + """ + if lam >= thr: + lam = 1 + else: + if mode == "pow": + lam = (thr ** (-abs(idx))) * (lam ** abs(idx)) + elif mode == "exp": + b = (abs(idx)** (-thr*2)) * 1 + k = 1 / (1 - b) + lam = ((abs(idx)** (lam - thr*2)) * (abs(idx) ** lam) - b) * k + else: + raise NotImplementedError + return lam + + def loss(self, cls_score, labels, label_mask=None, **kwargs): + r"""" mixup classification loss forward + + Args: + cls_score (list): Score should be [tensor] of [N, d]. + labels (tuple or tensor): Labels should be tensor [N, \*] by default. + If labels as tuple, it's used for CE mixup, (gt_a, gt_b, lambda). + label_mask (tensor): Mask (N,1) to indicate whether this idx is a + ground truth or pseudo label. + """ + single_label = False + losses = dict() + assert isinstance(cls_score, (tuple, list)) and len(cls_score) == 1 + + # 1. original one-hot classification + if not isinstance(labels, tuple): + # whether is the single label cls [N,] or multi-label cls [N,C] + single_label = \ + labels.dim() == 1 or (labels.dim() == 2 and labels.shape[1] == 1) + # Notice: we allow the single-label cls using multi-label loss, thus + # * For single-label cls, loss = loss.sum() / N + # * For multi-label cls, loss = loss.sum() or loss.mean() + avg_factor = labels.size(0) if single_label else None + + target = labels.clone() + if self.multi_label: + # convert to onehot labels + if single_label: + target = F.one_hot(target, num_classes=self.num_classes) + # default onehot cls + losses['loss'] = self.criterion( + cls_score[0], target, avg_factor=avg_factor, **kwargs) + # compute accuracy + losses['acc'] = accuracy(cls_score[0], labels) + # 2. mixup classification + else: + y_a, y_b, lam = labels + # whether is the single label cls [N,] or multi-label cls [N,C] + single_label = \ + y_a.dim() == 1 or (y_a.dim() == 2 and y_a.shape[1] == 1) + # Notice: we allow the single-label cls using multi-label loss, thus + # * For single-label cls, loss = loss.sum() / N + # * For multi-label cls, loss = loss.sum() or loss.mean() + avg_factor = y_a.size(0) if single_label else None + + # 2.1 mixup (hard ce) cls (using softmax) + if not self.multi_label: + assert self.two_hot == False + losses['loss'] = \ + self.criterion(cls_score[0], y_a, avg_factor=avg_factor, **kwargs) * lam + \ + self.criterion(cls_score[0], y_b, avg_factor=avg_factor, **kwargs) * (1 - lam) + else: + # convert to onehot (binary) for multi-label mixup cls + if single_label: + y_a = F.one_hot(y_a, num_classes=self.num_classes) + y_b = F.one_hot(y_b, num_classes=self.num_classes) + # basic mixup labels: sumed to 1 + y_mixed = lam * y_a + (1 - lam) * y_b + use_eta_weight = None + class_weight = None + + # 2.2 mixup (sigmoid) multi-lalebl sumed to 2 (using two-hot loss) + if self.two_hot: + if self.lam_scale_mode != 'none': + lam_a = self.lambda_adjust( + lam, mode=self.lam_scale_mode, thr=self.lam_thr, idx=self.lam_idx) + lam_b = self.lambda_adjust( + 1-lam, mode=self.lam_scale_mode, thr=self.lam_thr, idx=self.lam_idx) + if label_mask is not None: + lam_a = lam_a if label_mask[0] else lam + lam_b = lam_b if label_mask[1] else 1-lam + y_mixed = lam_a * y_a + lam_b * y_b + else: + y_mixed = y_a + y_b + # 2.3 mixup (soft) single-label sumed to 1 (using softmax) + else: + if self.eta_weight["eta"] != 0: + # whether to use eta + below_thr = lam < self.eta_weight["thr"] + if self.eta_weight["mode"] == 'less': + use_eta_weight = [lam, 0] if below_thr else [0, 1-lam] + elif self.eta_weight["mode"] == 'more': + use_eta_weight = [lam, 0] if not below_thr else [0, 1-lam] + else: + use_eta_weight = [lam, 1-lam] # 'both' + # eta rescale by lam + for i in range(len(use_eta_weight)): + if use_eta_weight[i] > 0: + if self.lam_scale_mode != 'none': + use_eta_weight[i] = self.eta_weight["eta"] * \ + self.lambda_adjust( + use_eta_weight[i], mode=self.lam_scale_mode, + thr=self.lam_thr, idx=self.lam_idx) + else: + use_eta_weight[i] = self.eta_weight["eta"] + assert use_eta_weight[0] > 0 or use_eta_weight[1] > 0, \ + "one of eta should be non-zero, lam={}, lam_={}".format(lam, 1-lam) + # rescale the sum of labels, each hot <= 1 + if self.two_hot_scale < 1: + y_mixed = (y_mixed * self.two_hot_scale).clamp(max=1) + # remove neg in BCE loss + if self.neg_weight < 1: + class_weight = (y_mixed > 0).type(torch.float) + class_weight = class_weight.clamp(min=self.neg_weight) + losses['loss'] = self.criterion( + cls_score[0], y_mixed, + avg_factor=avg_factor, class_weight_override=class_weight, + eta_weight=use_eta_weight, **kwargs) + # compute accuracy + losses['acc'] = accuracy(cls_score[0], labels[0]) + losses['acc_mix'] = accuracy_mixup(cls_score[0], labels) + return losses diff --git a/openmixup/models/heads/contrastive_head.py b/openmixup/models/heads/contrastive_head.py new file mode 100644 index 00000000..88e4f1b7 --- /dev/null +++ b/openmixup/models/heads/contrastive_head.py @@ -0,0 +1,38 @@ +import torch +import torch.nn as nn + +from ..registry import HEADS + + +@HEADS.register_module +class ContrastiveHead(nn.Module): + """Head for contrastive learning. + + Args: + temperature (float): The temperature hyper-parameter that + controls the concentration level of the distribution. + Default: 0.1. + """ + + def __init__(self, temperature=0.1): + super(ContrastiveHead, self).__init__() + self.criterion = nn.CrossEntropyLoss() + self.temperature = temperature + + def forward(self, pos, neg): + """Forward head. + + Args: + pos (Tensor): Nx1 positive similarity. + neg (Tensor): Nxk negative similarity. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + N = pos.size(0) + logits = torch.cat((pos, neg), dim=1) + logits /= self.temperature + labels = torch.zeros((N, ), dtype=torch.long).cuda() + losses = dict() + losses['loss'] = self.criterion(logits, labels) + return losses diff --git a/openmixup/models/heads/latent_pred_head.py b/openmixup/models/heads/latent_pred_head.py new file mode 100644 index 00000000..89cf8459 --- /dev/null +++ b/openmixup/models/heads/latent_pred_head.py @@ -0,0 +1,37 @@ +import torch.nn as nn +from mmcv.cnn import normal_init + +from ..registry import HEADS +from .. import builder + + +@HEADS.register_module +class LatentPredictHead(nn.Module): + """Head for contrastive learning. + """ + + def __init__(self, predictor, size_average=True): + super(LatentPredictHead, self).__init__() + self.predictor = builder.build_neck(predictor) + self.size_average = size_average + + def init_weights(self, init_linear='normal'): + self.predictor.init_weights(init_linear=init_linear) + + def forward(self, input, target): + """Forward head. + + Args: + input (Tensor): NxC input features. + target (Tensor): NxC target features. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + pred = self.predictor([input])[0] + pred_norm = nn.functional.normalize(pred, dim=1) + target_norm = nn.functional.normalize(target, dim=1) + loss = -2 * (pred_norm * target_norm).sum() + if self.size_average: + loss /= input.size(0) + return dict(loss=loss) diff --git a/openmixup/models/heads/multi_cls_head.py b/openmixup/models/heads/multi_cls_head.py new file mode 100644 index 00000000..168bdeec --- /dev/null +++ b/openmixup/models/heads/multi_cls_head.py @@ -0,0 +1,83 @@ +import torch.nn as nn + +from ..utils import accuracy +from ..registry import HEADS +from ..utils import build_norm_layer, MultiPooling + + +@HEADS.register_module +class MultiClsHead(nn.Module): + """Multiple classifier heads. + """ + + FEAT_CHANNELS = { + 'resnet18': [64, 64, 128, 256, 512], + 'resnet50': [64, 256, 512, 1024, 2048] + } + FEAT_LAST_UNPOOL = { + 'resnet50': 2048 * 7 * 7 + } + + def __init__(self, + pool_type='adaptive', + in_indices=(0, ), + with_last_layer_unpool=False, + backbone='resnet50', + norm_cfg=dict(type='BN'), + num_classes=1000): + super(MultiClsHead, self).__init__() + assert norm_cfg['type'] in ['BN', 'SyncBN', 'GN', 'null'] + + self.with_last_layer_unpool = with_last_layer_unpool + self.with_norm = norm_cfg['type'] != 'null' + + self.criterion = nn.CrossEntropyLoss() + + self.multi_pooling = MultiPooling(pool_type, in_indices, backbone) + + if self.with_norm: + self.norms = nn.ModuleList([ + build_norm_layer(norm_cfg, self.FEAT_CHANNELS[backbone][l])[1] + for l in in_indices + ]) + + self.fcs = nn.ModuleList([ + nn.Linear(self.multi_pooling.POOL_DIMS[backbone][l], num_classes) + for l in in_indices + ]) + if with_last_layer_unpool: + self.fcs.append( + nn.Linear(self.FEAT_LAST_UNPOOL[backbone], num_classes)) + + def init_weights(self): + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + elif isinstance(m, + (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert isinstance(x, (list, tuple)) + if self.with_last_layer_unpool: + last_x = x[-1] + x = self.multi_pooling(x) + if self.with_norm: + x = [n(xx) for n, xx in zip(self.norms, x)] + if self.with_last_layer_unpool: + x.append(last_x) + x = [xx.view(xx.size(0), -1) for xx in x] + x = [fc(xx) for fc, xx in zip(self.fcs, x)] + return x + + def loss(self, cls_score, labels): + losses = dict() + for i, s in enumerate(cls_score): + # keys must contain "loss" + losses['loss.{}'.format(i + 1)] = self.criterion(s, labels) + losses['acc.{}'.format(i + 1)] = accuracy(s, labels) + return losses diff --git a/openmixup/models/heads/pmix_block_V2.py b/openmixup/models/heads/pmix_block_V2.py new file mode 100644 index 00000000..f76b636c --- /dev/null +++ b/openmixup/models/heads/pmix_block_V2.py @@ -0,0 +1,486 @@ +import torch +import torch.nn as nn +import math +from ..registry import HEADS +from mmcv.cnn import NonLocal2d, kaiming_init, normal_init +from ..necks import ConvNeck +from .. import builder +from ..utils import build_norm_layer +from openmixup.utils import print_log + + +@HEADS.register_module +class PixelMixBlock_V2(nn.Module): + """Pixel-wise MixBlock V2. + version v08.24 + add pre_attn and pre_conv + version v10.09 + add learnable lam mult + + Args: + in_channels (int): Channels of the input feature map. + reduction (int): Channel reduction ratio. Default: 2. + use_scale (bool): Whether to scale pairwise_weight by + `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`. + Default: True. + double_norm (bool): Whether to scale pairwise_weight again by L1 norm. + Default: False + attention_mode (str): Options (non-local) are `gaussian`, `concatenation`, + `embedded_gaussian` and `dot_product`. Default: embedded_gaussian. + unsampling_mode (str): Unsampling mode {'nearest', 'bilinear', etc}. + Default: 'nearest'. + pre_norm_cfg (dict): Config dict for a norm before q,k,v input of MixBlock. + e.g., pre_norm_cfg=dict(type='BN', requires_grad=True). + Default: None. + pre_conv_cfg (dict): Config dict for a before MixBlock convolution neck. + e.g., pre_conv_cfg=dict( + type="ConvNeck", in_channels=256, hid_channels=128, out_channels=256, + num_layers=2, kernel_size=3, with_bias=True, with_residual=True). + Default: None. + pre_attn_cfg (dict): Config dict for a before MixBlock self-attention block. + e.g., pre_attn_cfg=dict(in_channels=256, mode='gaussian'). + Default: None. + pre_neck_cfg (dict): Config dict for a Neck parallel to MixBlock, which converts + feature maps to flattened vectors for the pre_head (directly supervised by loss). + E.g., pre_neck_cfg=dict( + type='LinearNeck', in_channels=256, out_channels=128, with_avg_pool=True) + Default: None. + pre_head_cfg (dict): Config dict for a loss head parallel to MixBlock, e.g., infoNCE + or classification CE loss, which is used to train pre_conv and pre_attn. + Default: None. + lam_concat (bool): Whether to concat lam as a channel in all input q, k, v. + Default: False. (lam_concat=False if lam_concat_v=True) + lam_concat_v (bool): Whether to concat lam as a channel in v but not in q, k. + Default: False. (lam_concat_v=False if lam_concat=True) + lam_mul (bool or float): Whether to mult lam in x_lam and mult (1-lam) in x_lam_ + to get pair-wise weight. + Default: False. + lam_mul_k (float): Rescale lambda before multipling to x, which is adjusted by k. + Default: -1. + lam_residual (bool): Whether to use residual addition for lam_mult. + Default: False. + value_neck_cfg (dict): Config dict for a non-linear value embedding network. + E.g., value_neck_cfg=dict( + type="ConvNeck", in_channels=256, hid_channels=128, out_channels=1, act_cfg=dict(type='ELU'), + num_layers=2, kernel_size=1, with_bias=True, with_residual=False). + Default: None. (default value network is 1x1 conv) + x_qk_concat (bool): Whether to concat x and x_ in q, k pair-wise weight embedding. + Default: False. + x_v_concat (bool): Whether to concat x and x_ in value embedding. + Default: False. + mask_loss_mode (str): Which mode in {'L1', 'L2', 'none', 'Variance'} to caculate loss. + Default: "none". + mask_loss_margin (int): Margine loss for the grid mask pattens. Default: 0. + mask_mode (str): Which mode to normalize mixup masks to sum=1. Default: "none". + """ + + def __init__(self, + in_channels, + reduction=2, + use_scale=True, + double_norm=False, + attention_mode='embedded_gaussian', + unsampling_mode='bilinear', + pre_norm_cfg=None, + pre_conv_cfg=None, + pre_attn_cfg=None, + pre_neck_cfg=None, + pre_head_cfg=None, + lam_concat=False, + lam_concat_v=False, + lam_mul=0., + lam_mul_k=-1, + lam_residual=False, + value_neck_cfg=None, + x_qk_concat=False, + x_v_concat=False, + mask_loss_mode="none", + mask_loss_margin=0, + mask_mode="none", + frozen=False): + super(PixelMixBlock_V2, self).__init__() + # non-local args + self.in_channels = int(in_channels) + self.reduction = int(reduction) + self.use_scale = bool(use_scale) + self.double_norm = bool(double_norm) + self.inter_channels = max(in_channels // reduction, 1) + self.attention_mode = str(attention_mode) + self.unsampling_mode = str(unsampling_mode) + assert self.attention_mode in ['gaussian', 'embedded_gaussian'] + assert self.unsampling_mode in [ + 'nearest', 'linear', 'bilinear', 'bicubic', 'trilinear', + ] + + # pre MixBlock or parallel to MixBlock + assert pre_norm_cfg is None or isinstance(pre_norm_cfg, dict) + assert pre_conv_cfg is None or isinstance(pre_conv_cfg, dict) + assert pre_attn_cfg is None or isinstance(pre_attn_cfg, dict) + assert pre_neck_cfg is None or isinstance(pre_neck_cfg, dict) + assert pre_head_cfg is None or isinstance(pre_head_cfg, dict) + self.pre_norm = pre_norm_cfg + self.pre_conv = pre_conv_cfg + self.pre_attn = pre_attn_cfg + self.pre_neck = pre_neck_cfg + self.pre_head = pre_head_cfg + if pre_norm_cfg is not None: + _, self.pre_norm = build_norm_layer(pre_norm_cfg, in_channels) + if pre_conv_cfg is not None: + self.pre_conv = ConvNeck(**pre_conv_cfg) + if pre_attn_cfg is not None: + self.pre_attn = NonLocal2d(**pre_attn_cfg) + if pre_neck_cfg is not None: + self.pre_neck = builder.build_neck(pre_neck_cfg) + if pre_head_cfg is not None: + self.pre_head = builder.build_head(pre_head_cfg) + + # mixblock args + self.lam_concat = bool(lam_concat) + self.lam_concat_v = bool(lam_concat_v) + self.lam_mul = float(lam_mul) if float(lam_mul) > 0 else 0 + self.lam_mul_k = float(lam_mul_k) if float(lam_mul_k) > 0 else -1 + self.lam_residual = bool(lam_residual) + assert value_neck_cfg is None or isinstance(value_neck_cfg, dict) + self.value_neck_cfg = value_neck_cfg + self.x_qk_concat = bool(x_qk_concat) + self.x_v_concat = bool(x_v_concat) + self.mask_loss_mode = str(mask_loss_mode) + self.mask_loss_margin = max(mask_loss_margin, 0.) + self.mask_mode = str(mask_mode) + self.frozen = bool(frozen) + assert 0 <= lam_mul and lam_mul <= 1 + assert lam_mul_k == -1 or (lam_mul_k <= 10 and lam_mul_k >= 0) + assert mask_loss_mode in [ + "none", "L2", "L1", "Variance", "L1+Variance", "L2+Variance", "Sparsity"] + assert mask_mode in [ + "none", "none_v_", "sum", "softmax"] + if self.lam_concat or self.lam_concat_v: + assert self.lam_concat != self.lam_concat_v, \ + "lam_concat can be adopted on q,k,v or only on v" + if self.lam_concat or self.lam_mul: + assert self.lam_concat != self.lam_mul, \ + "both lam_concat and lam_mul change q,k,v in terms of lam" + if self.lam_concat or self.x_qk_concat: + assert self.lam_concat != self.x_qk_concat, \ + "x_lam=x_lam_=cat(x,x_) if x_qk_concat=True, it's no use to concat lam" + + # concat all as k,q,v + self.qk_in_channels = int(in_channels + 1) \ + if self.lam_concat else int(in_channels) + self.v_in_channels = int(in_channels + 1) \ + if self.lam_concat or self.lam_concat_v else int(in_channels) + if self.x_qk_concat: + self.qk_in_channels = int(2 * self.in_channels) + if self.x_v_concat: + self.v_in_channels = int(2 * self.in_channels) + + # MixBlock, conv value + if value_neck_cfg is None: + self.value = nn.Conv2d( + self.v_in_channels, + 1, + kernel_size=1, + stride=1) + else: + value_neck_cfg["in_channels"] = self.v_in_channels + self.value = builder.build_neck(value_neck_cfg) + # MixBlock, conv q,k + if self.attention_mode == 'embedded_gaussian': + self.key = None + if self.x_qk_concat: # sym conv q and k + # conv key + self.key = nn.Conv2d( + self.qk_in_channels, + self.inter_channels, + kernel_size=1, + stride=1) + # conv query + self.query = nn.Conv2d( + self.qk_in_channels, + self.inter_channels, + kernel_size=1, + stride=1) + + self.init_weights() + if self.frozen: + self._freeze() + + def init_weights(self, init_linear='normal', std=0.01, bias=0.): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + # init mixblock + for m in self.modules(): + if isinstance(m, (nn.Linear, nn.Conv2d)): + if init_linear == 'normal': + normal_init(m, std=std, bias=bias) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _freeze(self): + # before mixblock + if self.pre_norm is not None: + self.pre_norm.eval() + if self.pre_conv is not None: + self.pre_conv.eval() + if self.pre_attn is not None: + self.pre_attn.eval() + if self.pre_neck is not None: + self.pre_neck.eval() + if self.pre_head is not None: + self.pre_head.eval() + # mixblock + self.value.eval() + if self.attention_mode == 'embedded_gaussian': + self.query.eval() + if self.key is not None: + self.key.eval() + # detach + if self.frozen: + # before mixblock + if self.pre_norm is not None: + for param in self.pre_norm.parameters(): + param.requires_grad = False + if self.pre_conv is not None: + for param in self.pre_conv.parameters(): + param.requires_grad = False + if self.pre_attn is not None: + for param in self.pre_attn.parameters(): + param.requires_grad = False + if self.pre_neck is not None: + for param in self.pre_neck.parameters(): + param.requires_grad = False + if self.pre_head is not None: + for param in self.pre_head.parameters(): + param.requires_grad = False + # mixblock + if self.attention_mode == 'embedded_gaussian': + for param in self.query.parameters(): + param.requires_grad = False + if self.key is not None: + for param in self.key.parameters(): + param.requires_grad = False + for param in self.value.parameters(): + param.requires_grad = False + + def gaussian(self, q_x, k_x): + """ non-local similarity func """ + # NonLocal2d pairwise_weight: [N, HxW, HxW] + pairwise_weight = torch.matmul(q_x, k_x) + if torch.any(torch.isnan(pairwise_weight)): + print_log("Warming attention map is nan, P: {}".format(pairwise_weight), logger='root') + raise ValueError + if self.use_scale: + pairwise_weight /= q_x.shape[-1]**0.5 + pairwise_weight = pairwise_weight.softmax(dim=-1) + return pairwise_weight + + def embedded_gaussian(self, q_x, k_x): + """ learnable non-local similarity func """ + # NonLocal2d pairwise_weight: [N, HxW, HxW] + pairwise_weight = torch.matmul(q_x, k_x) + if torch.any(torch.isnan(pairwise_weight)): + print_log("Warming attention map is nan, P: {}".format(pairwise_weight), logger='root') + raise ValueError + if self.use_scale: + # q_x.shape[-1] is `self.inter_channels` + pairwise_weight /= q_x.shape[-1]**0.5 + pairwise_weight = pairwise_weight.softmax(dim=-1) + if self.double_norm: + pairwise_weight = pairwise_weight / (1e-8 + pairwise_weight.sum(dim=1, keepdim=True)) + return pairwise_weight + + def rescale_lam_mult(self, lam, k=1): + """ adjust lam against y=x in terms of k """ + assert k >= 0 + k += 1 + if not isinstance(lam, float): + lam = float(lam) + return 1 / (k - 2/3) * (4/3 * math.pow(lam, 3) -2 * lam**2 + k * lam) + + def forward(self, x, lam, index, scale_factor, debug=False): + """ v08.23, add pre_conv and pre_attn + + x (tensor): Input feature map [N, C, H, W]. + lam (int): Mixup ratio lambda. + index (tensor): Random shuffle index in current mini-batch. + scale_factor (int): Unsampling factor (assert scale_factor % 2 == 0). + debug (bool): Whether to use debug mode. + """ + results = dict() + # pre-step 0: input 2d feature map x, [N, C, H, W] + if isinstance(x, list) and index is None: + assert len(x) == 2 # only for SSL mixup + x = torch.cat(x) + n, _, h, w = x.size() + # pre-step 1: before mixblock, add pre conv and attn + if self.pre_attn is not None: + x = self.pre_attn(x) + if self.pre_conv is not None: + x = self.pre_conv([x])[0] + if self.pre_norm is not None: + x = self.pre_norm(x) + + if index is None: # only for SSL mixup, [2N, C, H, W] + n = n // 2 + x_lam = x[:n, ...] + x_lam_ = x[n:, ...] + else: # supervised cls + x_lam = x + x_lam_ = x[index, :] # shuffle within a gpu + results = dict(x_lam=x_lam, x_lam_=x_lam_) + + # pre-step 2: lambda encoding + if self.lam_mul > 0: # multiply lam to x_lam + assert self.lam_concat == False + # rescale lam + if self.lam_mul_k >= 0: + lam_rescale = self.rescale_lam_mult(lam, self.lam_mul_k) + else: + lam_rescale = lam + # using residual + if self.lam_residual: + x_lam = x_lam * (1 + lam_rescale * self.lam_mul) + x_lam_ = x_lam_ * (1 + (1 - lam_rescale) * self.lam_mul) + else: + x_lam = x_lam * lam_rescale + x_lam_ = x_lam_ * (1 - lam_rescale) + if self.lam_concat: # concat lam as a new channel + # assert self.lam_mul > 0 and self.x_qk_concat == False + lam_block = torch.zeros(n, 1, h, w).cuda() + lam_block[:] = lam + x_lam = torch.cat([x_lam, lam_block], dim=1) + x_lam_ = torch.cat([x_lam_, 1-lam_block], dim=1) + + # step 1: conpute 1x1 conv value, v: [N, HxW, 1]. + v, v_ = x_lam, x_lam_ + if self.x_v_concat: + v = torch.cat([x_lam, x_lam_], dim=1) + v_ = v + if self.lam_concat_v: + lam_block = torch.zeros(n, 1, h, w).cuda() + lam_block[:] = lam + v = torch.cat([x_lam, lam_block], dim=1) + v_ = torch.cat([x_lam_, 1-lam_block], dim=1) + if self.mask_mode != "none": # compute both v and v_ + if self.value_neck_cfg is None: + v_ = self.value(v_).view(n, 1, -1) # [N, 1, HxW] + else: + v_ = self.value([v_])[0].view(n, 1, -1) # [N, 1, HxW] + v_ = v_.permute(0, 2, 1) # v_ for 1-lam: [N, HxW, 1] + if self.value_neck_cfg is None: + v = self.value(v).view(n, 1, -1) # [N, 1, HxW] + else: + v = self.value([v])[0].view(n, 1, -1) # [N, 1, HxW] + v = v.permute(0, 2, 1) # v for lam: [N, HxW, 1] + # debug mode + if debug: + debug_plot = dict(value=v_.view(n, h, -1).clone().detach()) + + # step 2: compute 1x1 conv q & k, q_x: [N, HxW, C], k_x: [N, C, HxW]. + if self.x_qk_concat: + x_lam = torch.cat([x_lam, x_lam_], dim=1) + x_lam_ = x_lam + if self.attention_mode == 'gaussian': + q_x = x_lam.view(n, self.qk_in_channels, -1) + q_x = q_x.permute(0, 2, 1) # q for lam: [N, HxW, C] + k_x = x_lam_.view(n, self.qk_in_channels, -1) # k for 1-lam: [N, C, HxW] + else: + # query + q_x = self.query(x_lam).view(n, self.inter_channels, -1) + q_x = q_x.permute(0, 2, 1) # q for lam: [N, HxW, C/r] + # key + if self.key is not None: + k_x = self.key(x_lam_).view(n, self.inter_channels, -1) # [N, C/r, HxW] + else: + k_x = self.query(x_lam_).view(n, self.inter_channels, -1) # [N, C/r, HxW] + + # ste 3: 2d pairwise_weight: [N, HxW, HxW] + pairwise_func = getattr(self, self.attention_mode) + pairwise_weight = pairwise_func(q_x, k_x) # x_lam [N, HxW, C/r] x [N, C/r, HxW] x_lam_ + + # debug mode + if debug: + debug_plot["pairwise_weight"] = pairwise_weight.clone().detach() + results["debug_plot"] = debug_plot + + # step 4: generate mask and upsampling + mask_lam = torch.matmul( # P^T x v_lam = mask_lam + pairwise_weight.permute(0, 2, 1), v).view(n, 1, h, w) # mask for lam + if torch.any(torch.isnan(mask_lam)): + print_log("Warming mask_lam is nan, P: {}, v: {}".format(pairwise_weight, v), logger='root') + pairwise_weight = pairwise_weight.clamp(min=-1e20, max=1e20) + mask_lam = torch.matmul( # P^T x v_lam = mask_lam + pairwise_weight.permute(0, 2, 1), v.clamp(min=-1e20, max=1e20) + ).view(n, 1, h, w) + upsampling = nn.Upsample(scale_factor=scale_factor, mode=self.unsampling_mode) + mask_lam = upsampling(mask_lam) + mask_lam = torch.sigmoid(mask_lam) # mask for lam, in [0, 1] + + if self.mask_mode != "none": + # P x v_lam_ = mask_lam_ + mask_lam_ = torch.matmul(pairwise_weight, v_).view(n, 1, h, w) # 1 - lam + if torch.any(torch.isnan(mask_lam_)): + print_log("Warming mask_lam_ is nan, P: {}, v: {}".format(pairwise_weight, v_), logger='root') + mask_lam = torch.matmul( + pairwise_weight, v_.clamp(min=-1e20, max=1e20) + ).view(n, 1, h, w) + mask_lam_ = upsampling(mask_lam_) + mask_lam_ = torch.sigmoid(mask_lam_) # mask for 1-lam + if self.mask_mode == "sum": + # stop grad of one side [try] + mask = torch.cat([mask_lam.clone().detach(), mask_lam_], dim=1) + # sum to 1 + sum_masks = mask.sum(1, keepdim=True) + mask /= sum_masks + elif self.mask_mode == "softmax": + # stop grad of one side [try] + mask = torch.cat([mask_lam.clone().detach(), mask_lam_], dim=1) + # sum to 1 by softmax + mask = mask.softmax(dim=1) + elif self.mask_mode == "none_v_": + mask_lam = None + mask = torch.cat([1 - mask_lam_, mask_lam_], dim=1) + else: + raise NotImplementedError + else: + mask = torch.cat([mask_lam, 1 - mask_lam], dim=1) + + results["mask"] = mask + return results + + def mask_loss(self, mask, lam): + """ loss for mixup masks """ + losses = dict() + assert mask.dim() == 4 + n, k, h, w = mask.size() # mixup mask [N, 2, H, W] + if k > 1: # the second mask has no grad! + mask = mask[:, 1, :, :].unsqueeze(1) + m_mean = mask.sum() / (n * h * w) # mask mean in [0, 1] + zero = torch.tensor(0.).cuda() + + if self.mask_loss_mode == "L1": # [0, 1-m] + losses['loss'] = torch.max(torch.abs(1 - m_mean - lam) - self.mask_loss_margin, zero).mean() + elif self.mask_loss_mode == "L2": # [0, 1-m^2] + losses['loss'] = torch.max((1 - m_mean - lam) ** 2 - self.mask_loss_margin ** 2, zero).mean() + elif self.mask_loss_mode == "Variance": # [0, 0.5] + losses['loss'] = -torch.max((torch.sum((mask - m_mean)**2) / (n * h * w)), zero) + elif self.mask_loss_mode == "Sparsity": # [0, 0.25-m] + losses['loss'] = torch.max(torch.abs(mask * (mask - 1)).sum() / (n * h * w) - self.mask_loss_margin, zero) + elif self.mask_loss_mode == "L1+Variance": # [0, 1-m] + [0, 1] + losses['loss'] = torch.max(torch.abs(1 - m_mean - lam) - self.mask_loss_margin, zero).mean() - \ + 2 * torch.max((torch.sum((mask - m_mean)**2) / (n * h * w)), zero) + elif self.mask_loss_mode == "L2+Variance": # [0, 1-m^2] + [0, 1] + losses['loss'] = torch.max((1 - m_mean - lam) ** 2 - self.mask_loss_margin ** 2, zero).mean() - \ + 2 * torch.max((torch.sum((mask - m_mean)**2) / (n * h * w)), zero) + else: + raise NotImplementedError + if torch.isnan(losses['loss']): + print_log("Warming mask loss: {}, mask sum: {}".format(losses['loss'], mask), logger='root') + losses['loss'] = None + # raise ValueError + return losses diff --git a/openmixup/models/losses/__init__.py b/openmixup/models/losses/__init__.py new file mode 100644 index 00000000..3a75a7d9 --- /dev/null +++ b/openmixup/models/losses/__init__.py @@ -0,0 +1,13 @@ +from .asymmetric_loss import AsymmetricLoss, asymmetric_loss +from .cross_entropy_loss import CrossEntropyLoss, binary_cross_entropy, cross_entropy +from .focal_loss import FocalLoss, sigmoid_focal_loss +from .label_smooth_loss import LabelSmoothLoss +from .utils import convert_to_one_hot, weight_reduce_loss, weighted_loss + + +__all__ = [ + 'asymmetric_loss', 'AsymmetricLoss', + 'cross_entropy', 'binary_cross_entropy', 'CrossEntropyLoss', + 'weight_reduce_loss', 'LabelSmoothLoss', 'weighted_loss', 'FocalLoss', + 'sigmoid_focal_loss', 'convert_to_one_hot' +] diff --git a/openmixup/models/losses/asymmetric_loss.py b/openmixup/models/losses/asymmetric_loss.py new file mode 100644 index 00000000..f2d12678 --- /dev/null +++ b/openmixup/models/losses/asymmetric_loss.py @@ -0,0 +1,207 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from ..registry import LOSSES +from .utils import weight_reduce_loss, convert_to_one_hot + + +def asymmetric_loss(pred, + target, + weight=None, + gamma_pos=1.0, + gamma_neg=4.0, + clip=0.05, + disable_grad_focal=True, + reduction='mean', + avg_factor=None): + r"""asymmetric loss. + + Please refer to the `paper `__ for + details. + + Args: + pred (torch.Tensor): The predicted logits with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction with + shape (N, \*), (multi-label binarized vector). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, ). Dafaults to None. + gamma_pos (float): positive focusing parameter. Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We usually set + gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + disable_grad_focal (bool): Whether to disable grad when caculate the + gradient-related weights for ACL. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , loss + is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + + eps = 1e-8 + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + + if clip and clip > 0: + pt = (1 - pred_sigmoid + + clip).clamp(max=1) * (1 - target) + pred_sigmoid * target + else: + pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target + asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * + (1 - target)) + loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class AsymmetricLoss(nn.Module): + """asymmetric loss. + + Args: + gamma_pos (float): positive focusing parameter. + Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We + usually set gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + reduction (str): The method used to reduce the loss into + a scalar. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + gamma_pos=0.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + loss_weight=1.0, + **kwargs): + super(AsymmetricLoss, self).__init__() + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.clip = clip + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """asymmetric loss.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # should be onehot targets + num_classes = pred.size(-1) + target = convert_to_one_hot(target, num_classes) + + loss_cls = self.loss_weight * asymmetric_loss( + pred, + target, + weight, + gamma_pos=self.gamma_pos, + gamma_neg=self.gamma_neg, + clip=self.clip, + reduction=reduction, + avg_factor=avg_factor) + return loss_cls + + +@LOSSES.register_module() +class ASLSingleLabel(nn.Module): + """ ASL exteneded version single-label classification problems + *** using 'exp' instead of 'sigmoid' *** + + Args: + gamma_pos (float): positive focusing parameter. + Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We + usually set gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + label_smooth_val (float): The degree of label smoothing. + disable_grad_focal (bool): Whether to disable grad when caculate + gradient-related weights for ACL. + reduction (str): The method used to reduce the loss into + a scalar. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + def __init__(self, + gamma_pos=0.0, + gamma_neg=4.0, + clip=None, + label_smooth_val=0, + disable_grad_focal=True, + reduction='mean', + loss_weight=1.0, + **kwargs): + super(ASLSingleLabel, self).__init__() + + self.logsoftmax = nn.LogSoftmax(dim=-1) + self.targets_classes = [] + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.clip = clip + self.label_smooth_val = label_smooth_val + self.disable_grad_focal = disable_grad_focal + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """ + pred (tensor): The predicted logits of [N, C]. + target (tensor): The onehot labels of [N, C] (binarized vector). + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + # should be onehot targets + num_classes = pred.size(-1) + target = convert_to_one_hot(target, num_classes) + + # Calculating Probabilities + log_preds = self.logsoftmax(pred) + anti_target = 1 - target + xs_pos = torch.exp(log_preds) # using 'exp' instead of 'sigmoid' + xs_neg = 1 - xs_pos + + # no Asymmetric Clipping for single labels + + # ASL weights + if self.disable_grad_focal: + torch.set_grad_enabled(False) + xs_pos = xs_pos * target + xs_neg = xs_neg * anti_target + asymmetric_w = torch.pow(1 - xs_pos - xs_neg, + self.gamma_pos * target + self.gamma_neg * anti_target) + if self.disable_grad_focal: + torch.set_grad_enabled(True) + log_preds = log_preds * asymmetric_w + + # label smoothing + if self.label_smooth_val > 0: + target = target.mul(1 - self.label_smooth_val).add( + self.label_smooth_val / num_classes) + + # loss calculation + loss = - target.mul(log_preds) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss diff --git a/openmixup/models/losses/cross_entropy_loss.py b/openmixup/models/losses/cross_entropy_loss.py new file mode 100644 index 00000000..71cb5da9 --- /dev/null +++ b/openmixup/models/losses/cross_entropy_loss.py @@ -0,0 +1,328 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..registry import LOSSES +from .utils import weight_reduce_loss, convert_to_one_hot + + +def cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + **kwargs): + r"""Calculate the CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # element-wise losses + loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def soft_cross_entropy(pred, + label, + weight=None, + reduction='mean', + class_weight=None, + avg_factor=None, + **kwargs): + r"""Calculate the Soft CrossEntropy loss. The label can be float. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction with shape (N, C). + When using "mixup", the label can be float. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # element-wise losses + loss = -label * F.log_softmax(pred, dim=-1) + if class_weight is not None: + loss *= class_weight + loss = loss.sum(dim=-1) + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def soft_mix_cross_entropy(pred, + label, + weight=None, + reduction='mean', + class_weight=None, + avg_factor=None, + eta_weight=None, + **kwargs): + r"""Calculate the Soft Decoupled Mixup CrossEntropy loss using softmax + The label can be float mixup label (class-wise sum to 1, k-mixup, k>=2). + *** Warnning: this mixup and label-smoothing cannot be set simultaneously *** + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction with shape (N, C). + When using "mixup", the label can be float (mixup one-hot label). + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + eta_weight (list): Reweight the global loss in mixup cls loss as, + loss = loss_local + eta_weight[i] * loss_global[i]. Default to None. + + Returns: + torch.Tensor: The calculated loss + """ + # *** Assume k-mixup in C classes, k >= 2 and k << C *** + # step 1: remove labels have less than k-hot (mixed between the + # same class will result in the original onehot) + mask_one = (label > 0).sum(dim=-1) + mix_num = max(mask_one) + mask_one = mask_one >= mix_num + if mask_one.sum() < label.size(0): + pred_one = pred[mask_one==False, :] + label_one = label[mask_one==False, :] + pred = pred[mask_one, :] + label = label[mask_one, :] + weight_one = None + if weight is not None: + weight_one = weight[mask_one==False, ...].float() + weight = weight[mask_one, ...].float() + else: + if weight is not None: + weight = weight.float() + pred_one, label_one, weight_one = None, None, None + + # step 2: select k-mixup for the local and global + bs, cls_num = label.size() # N, C + assert isinstance(eta_weight, list) + # local: between k classes + mask_lam_k = label > 0 # [N, N], top k is true + lam_k = label[0, label[0, :] > 0] # [k,] k-mix relevant classes + + # local: original mixup CE loss between C classes + loss = -label * F.log_softmax(pred, dim=-1) # [N, N] + if class_weight is not None: + loss *= class_weight + loss = loss.sum(dim=-1) # reduce class + + # global: between lam_i and C-k classes + if len(set(lam_k.cpu().numpy())) == lam_k.size(0) and lam_k.size(0) > 1: + # *** trivial solution: lam=0.5, lam=1.0 *** + assert len(eta_weight) == lam_k.size(0), \ + "eta weight={}, lam_k={}".format(eta_weight, lam_k) + for i in range(lam_k.size(0)): + # selected (C-k+1), except lam_k[j], where j!=i (k-1) + mask_lam_i = (label == lam_k[i]) | ~mask_lam_k # [N, N] + pred_lam_i = pred.reshape([1, bs, -1])[:, mask_lam_i].reshape( + [-1, cls_num+1-lam_k.size(0)]) # [N, C-k+1] + label_lam_i = label.reshape([1, bs, -1])[:, mask_lam_i].reshape( + [-1, cls_num+1-lam_k.size(0)]) # [N, C-k+1] + # convert to onehot + label_lam_i = (label_lam_i > 0).type(torch.float) + # element-wise losses + loss_global = -label_lam_i * F.log_softmax(pred_lam_i, dim=-1) # [N, C-1] + if class_weight is not None: + loss_global *= class_weight + # eta reweight + loss += eta_weight[i] * loss_global.sum(dim=-1) # reduce class + # apply weight and do the reduction + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + # step 3: original soft CE loss + if label_one is not None: + loss_one = -label_one * F.log_softmax(pred_one, dim=-1) + if class_weight is not None: + loss_one *= class_weight + loss_one = loss_one.sum(dim=-1) # reduce class + loss_one = weight_reduce_loss( + loss_one, weight=weight_one, reduction=reduction, avg_factor=avg_factor) + loss += loss_one + + return loss + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + **kwargs): + r"""Calculate the binary CrossEntropy loss with logits. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + label (torch.Tensor): The gt label with shape (N, \*). + weight (torch.Tensor, optional): Element-wise weight of loss with shape + (N, ). Defaults to None. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , loss + is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape [C] or [N, C], C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + assert pred.dim() == label.dim() + # Ensure that the size of class_weight is consistent with pred and label to + # avoid automatic boracast, + if class_weight is not None: + if class_weight.dim() == 1: + N = pred.size()[0] + class_weight = class_weight.repeat(N, 1) + loss = F.binary_cross_entropy_with_logits( + pred, label, weight=class_weight, reduction='none') + + # apply weights and do the reduction + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + return loss + + +@LOSSES.register_module() +class CrossEntropyLoss(nn.Module): + r"""Cross entropy loss. + + Args: + use_sigmoid (bool): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_soft (bool): Whether to use the soft version of CrossEntropyLoss. + Defaults to False. + use_mix_decouple (bool): Whether to use decoupled mixup version of + CrossEntropyLoss with the 'soft' CE implementation. Default to False. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". Defaults to 'mean'. + loss_weight (float): Weight of the loss. Defaults to 1.0. + class_weight (List[float], optional): The weight for each class with + shape (C), C is the number of classes. Default None. + """ + + def __init__(self, + use_sigmoid=False, + use_soft=False, + use_mix_decouple=False, + reduction='mean', + loss_weight=1.0, + class_weight=None, + **kwargs): + super(CrossEntropyLoss, self).__init__() + self.use_sigmoid = use_sigmoid + self.use_soft = use_soft + self.use_mix_decouple = use_mix_decouple + assert not ( + self.use_soft and self.use_sigmoid + ), 'use_sigmoid and use_soft could not be set simultaneously' + if self.use_mix_decouple: + assert use_soft, \ + "use_mix_decouple requires 'use_soft' to be true" + + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = class_weight + # loss func + if self.use_sigmoid: + self.criterion = binary_cross_entropy + elif self.use_soft: + self.criterion = soft_mix_cross_entropy \ + if self.use_mix_decouple else soft_cross_entropy + else: + self.criterion = cross_entropy + + def forward(self, + cls_score, + label, + weight=None, + eta_weight=None, + avg_factor=None, + reduction_override=None, + class_weight_override=None, + **kwargs): + r"""caculate loss + + Args: + cls_score (tensor): Predicted logits of (N, C). + label (tensor): Groundtruth label of (N, \*). + weight (tensor): Loss weight for each samples of (N,), + eta_weight (list): Rescale weight for the global loss when 'use_mix_decouple'=true, + loss = loss_local + eta_weight[i] * loss_global[i]. Default: None. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override is not None else self.reduction) + + class_weight = \ + class_weight_override if class_weight_override is not None else self.class_weight + if class_weight is not None: + if isinstance(class_weight, list): # [C] + class_weight = cls_score.new_tensor(class_weight) + else: + if class_weight.dim() == 1: # (C,) + assert class_weight.size(0) == cls_score.size(1) + else: # (N, C) + assert class_weight.shape == cls_score.shape + # BCE version requires onehot targets + num_classes = cls_score.size(-1) + if self.use_sigmoid: + label = convert_to_one_hot(label, num_classes) + label = label.float() + # use_mix_decouple version requires eta weight + if self.use_mix_decouple: + assert eta_weight is not None, \ + "use_mix_decouple requires 'eta_weight' to be not None" + + loss_cls = self.loss_weight * self.criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + eta_weight=eta_weight, + **kwargs) + return loss_cls diff --git a/openmixup/models/losses/focal_loss.py b/openmixup/models/losses/focal_loss.py new file mode 100644 index 00000000..0082762b --- /dev/null +++ b/openmixup/models/losses/focal_loss.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from ..registry import LOSSES +from .utils import weight_reduce_loss, convert_to_one_hot + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + r"""Sigmoid focal loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction with + shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, ). Dafaults to None. + gamma (float): The gamma for calculating the modulating factor. + Defaults to 2.0. + alpha (float): A balanced form for Focal Loss. Defaults to 0.25. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , + loss is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module() +class FocalLoss(nn.Module): + """Focal loss. + + Args: + gamma (float): Focusing parameter in focal loss. + Defaults to 2.0. + alpha (float): The parameter in balanced form of focal + loss. Defaults to 0.25. + reduction (str): The method used to reduce the loss into + a scalar. Options are "none" and "mean". Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0, + **kwargs): + + super(FocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + r"""Sigmoid focal loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction + with shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Dafaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + # should be onehot targets + num_classes = pred.size(-1) + target = convert_to_one_hot(target, num_classes) + + loss_cls = self.loss_weight * sigmoid_focal_loss( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + return loss_cls diff --git a/openmixup/models/losses/label_smooth_loss.py b/openmixup/models/losses/label_smooth_loss.py new file mode 100644 index 00000000..a6285ef9 --- /dev/null +++ b/openmixup/models/losses/label_smooth_loss.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn + +from ..registry import LOSSES +from .cross_entropy_loss import CrossEntropyLoss +from .utils import convert_to_one_hot + + +@LOSSES.register_module() +class LabelSmoothLoss(nn.Module): + r"""Intializer for the label smoothed cross entropy loss. + + Refers to `Rethinking the Inception Architecture for Computer Vision + `_ + + This decreases gap between output scores and encourages generalization. + Labels provided to forward can be one-hot like vectors (NxC) or class + indices (Nx1). + And this accepts linear combination of one-hot like labels from mixup or + cutmix except multi-label task. + + Args: + label_smooth_val (float): The degree of label smoothing. + num_classes (int, optional): Number of classes. Defaults to None. + mode (str): Refers to notes, Options are 'original', 'classy_vision', + 'multi_label'. Defaults to 'classy_vision' + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". Defaults to 'mean'. + loss_weight (float): Weight of the loss. Defaults to 1.0. + + Notes: + if the mode is "original", this will use the same label smooth method + as the original paper as: + + .. math:: + (1-\epsilon)\delta_{k, y} + \frac{\epsilon}{K} + + where epsilon is the `label_smooth_val`, K is the num_classes and + delta(k,y) is Dirac delta, which equals 1 for k=y and 0 otherwise. + + if the mode is "classy_vision", this will use the same label smooth + method as the facebookresearch/ClassyVision repo as: + + .. math:: + \frac{\delta_{k, y} + \epsilon/K}{1+\epsilon} + + if the mode is "multi_label", this will accept labels from multi-label + task and smoothing them as: + + .. math:: + (1-2\epsilon)\delta_{k, y} + \epsilon + """ + + def __init__(self, + label_smooth_val, + num_classes=None, + mode=None, + reduction='mean', + loss_weight=1.0, + **kwargs): + super().__init__() + self.num_classes = num_classes + self.loss_weight = loss_weight + + assert (isinstance(label_smooth_val, float) + and 0 <= label_smooth_val < 1), \ + f'LabelSmoothLoss accepts a float label_smooth_val ' \ + f'over [0, 1), but gets {label_smooth_val}' + self.label_smooth_val = label_smooth_val + + accept_reduction = {'none', 'mean', 'sum'} + assert reduction in accept_reduction, \ + f'LabelSmoothLoss supports reduction {accept_reduction}, ' \ + f'but gets {mode}.' + self.reduction = reduction + + if mode is None: + warnings.warn( + 'LabelSmoothLoss mode is not set, use "classy_vision" ' + 'by default. The default value will be changed to ' + '"original" recently. Please set mode manually if want ' + 'to keep "classy_vision".', UserWarning) + mode = 'classy_vision' + + accept_mode = {'original', 'classy_vision', 'multi_label'} + assert mode in accept_mode, \ + f'LabelSmoothLoss supports mode {accept_mode}, but gets {mode}.' + self.mode = mode + + self._eps = label_smooth_val + if mode == 'classy_vision': + self._eps = label_smooth_val / (1 + label_smooth_val) + if mode == 'multi_label': + self.ce = CrossEntropyLoss(use_sigmoid=True) + self.smooth_label = self.multilabel_smooth_label + else: + self.ce = CrossEntropyLoss(use_soft=True) + self.smooth_label = self.original_smooth_label + + def generate_one_hot_like_label(self, label): + """This function takes one-hot or index label vectors and computes one- + hot like label vectors (float)""" + # check if targets are inputted as class integers + if label.dim() == 1 or (label.dim() == 2 and label.shape[1] == 1): + label = convert_to_one_hot(label.view(-1, 1), self.num_classes) + return label.float() + + def original_smooth_label(self, one_hot_like_label): + assert self.num_classes > 0 + smooth_label = one_hot_like_label * (1 - self._eps) + smooth_label += self._eps / self.num_classes + return smooth_label + + def multilabel_smooth_label(self, one_hot_like_label): + assert self.num_classes > 0 + smooth_label = torch.full_like(one_hot_like_label, self._eps) + smooth_label.masked_fill_(one_hot_like_label > 0, 1 - self._eps) + return smooth_label + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + r"""Label smooth loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + label (torch.Tensor): The ground truth label of the prediction + with shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Dafaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + if self.num_classes is None: + assert cls_score.dim() == 2 + self.num_classes = cls_score.shape[1] + else: + assert self.num_classes == cls_score.shape[1], \ + f'num_classes should equal to cls_score.shape[1], ' \ + f'but got num_classes: {self.num_classes} and ' \ + f'cls_score.shape[1]: {cls_score.shape[1]}' + + one_hot_like_label = self.generate_one_hot_like_label(label=label) + assert one_hot_like_label.shape == cls_score.shape, \ + f'LabelSmoothLoss requires output and target ' \ + f'to be same shape, but got output.shape: {cls_score.shape} ' \ + f'and target.shape: {one_hot_like_label.shape}' + + smoothed_label = self.smooth_label(one_hot_like_label) + return self.ce.forward( + cls_score, + smoothed_label, + weight=weight, + avg_factor=avg_factor, + reduction_override=reduction_override, + **kwargs) diff --git a/openmixup/models/losses/utils.py b/openmixup/models/losses/utils.py new file mode 100644 index 00000000..1037953b --- /dev/null +++ b/openmixup/models/losses/utils.py @@ -0,0 +1,112 @@ +import functools + +import torch +import torch.nn.functional as F + + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss tensor. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. Options are "none", + "mean" and "sum". + avg_factor (float): Avarage factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + reduction_enum = F._Reduction.get_enum(reduction) + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 1: + loss = loss.mean() + elif reduction_enum == 2: + loss = loss.sum() + else: + # if reduction is 'mean', then average the loss by avg_factor + if reduction_enum == 1: + loss = loss.sum() / avg_factor + # if reduction is 'none', then do nothing; otherwise raise an error + elif reduction != 0: + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + ``loss_func(pred, target, **kwargs)``. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like ``loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)``. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper + + +def convert_to_one_hot(targets, num_classes=None): + """This function converts target class indices to one-hot vectors, given + the number of classes. + + Args: + targets (Tensor): The ground truth label of the prediction + with shape (N, 1) + num_classes (int): the number of classes. + + Returns: + Tensor: Processed loss values. + """ + # already as onehot [N, C] + if targets.dim() != 1 and not (targets.dim() == 2 and targets.shape[1] == 1): + return targets + # targets are inputted as class integers: convert to onehot + assert num_classes is not None + assert (torch.max(targets).item() < + num_classes), 'Class Index must be less than number of classes' + + one_hot_targets = F.one_hot(targets, num_classes=num_classes) + # one_hot_targets = torch.zeros((targets.reshape(-1, 1).shape[0], num_classes), + # dtype=torch.long, device=targets.device) + # one_hot_targets.scatter_(1, targets.long(), 1) + return one_hot_targets diff --git a/openmixup/models/memories/__init__.py b/openmixup/models/memories/__init__.py new file mode 100644 index 00000000..b4250c7a --- /dev/null +++ b/openmixup/models/memories/__init__.py @@ -0,0 +1,7 @@ +from .odc_memory import ODCMemory +from .simple_memory import SimpleMemory + + +__all__ = [ + 'ODCMemory', 'SimpleMemory', +] diff --git a/openmixup/models/memories/odc_memory.py b/openmixup/models/memories/odc_memory.py new file mode 100644 index 00000000..2a7e454f --- /dev/null +++ b/openmixup/models/memories/odc_memory.py @@ -0,0 +1,233 @@ +import numpy as np +from sklearn.cluster import KMeans + +import torch +import torch.nn as nn +import torch.distributed as dist +from mmcv.runner import get_dist_info + +from ..registry import MEMORIES + + +@MEMORIES.register_module +class ODCMemory(nn.Module): + """Memory modules for ODC. + + Args: + length (int): Number of features stored in samples memory. + feat_dim (int): Dimension of stored features. + momentum (float): Momentum coefficient for updating features. + num_classes (int): Number of clusters. + min_cluster (int): Minimal cluster size. + """ + + def __init__(self, length, feat_dim, momentum, num_classes, min_cluster, + **kwargs): + super(ODCMemory, self).__init__() + self.rank, self.num_replicas = get_dist_info() + if self.rank == 0: + self.feature_bank = torch.zeros((length, feat_dim), + dtype=torch.float32) + self.label_bank = torch.zeros((length, ), dtype=torch.long) + self.centroids = torch.zeros((num_classes, feat_dim), + dtype=torch.float32).cuda() + self.kmeans = KMeans(n_clusters=2, random_state=0, max_iter=20) + self.feat_dim = feat_dim + self.initialized = False + self.momentum = momentum + self.num_classes = num_classes + self.min_cluster = min_cluster + self.debug = kwargs.get('debug', False) + + def init_memory(self, feature, label): + """Initialize memory modules.""" + self.initialized = True + self.label_bank.copy_(torch.from_numpy(label).long()) + # make sure no empty clusters + assert (np.bincount(label, minlength=self.num_classes) != 0).all() + if self.rank == 0: + feature /= (np.linalg.norm(feature, axis=1).reshape(-1, 1) + 1e-10) + self.feature_bank.copy_(torch.from_numpy(feature)) + centroids = self._compute_centroids() + self.centroids.copy_(centroids) + dist.broadcast(self.centroids, 0) + + def _compute_centroids_ind(self, cinds): + """Compute a few centroids.""" + assert self.rank == 0 + num = len(cinds) + centroids = torch.zeros((num, self.feat_dim), dtype=torch.float32) + for i, c in enumerate(cinds): + ind = np.where(self.label_bank.numpy() == c)[0] + centroids[i, :] = self.feature_bank[ind, :].mean(dim=0) + return centroids + + def _compute_centroids(self): + """Compute all non-empty centroids.""" + assert self.rank == 0 + l = self.label_bank.numpy() + argl = np.argsort(l) + sortl = l[argl] + diff_pos = np.where(sortl[1:] - sortl[:-1] != 0)[0] + 1 + start = np.insert(diff_pos, 0, 0) + end = np.insert(diff_pos, len(diff_pos), len(l)) + class_start = sortl[start] + # keep empty class centroids unchanged + centroids = self.centroids.cpu().clone() + for i, st, ed in zip(class_start, start, end): + centroids[i, :] = self.feature_bank[argl[st:ed], :].mean(dim=0) + return centroids + + def _gather(self, ind, feature): + """Gather indices and features.""" + # if not hasattr(self, 'ind_gathered'): + # self.ind_gathered = [torch.ones_like(ind).cuda() + # for _ in range(self.num_replicas)] + # if not hasattr(self, 'feature_gathered'): + # self.feature_gathered = [torch.ones_like(feature).cuda() + # for _ in range(self.num_replicas)] + ind_gathered = [ + torch.ones_like(ind).cuda() for _ in range(self.num_replicas) + ] + feature_gathered = [ + torch.ones_like(feature).cuda() for _ in range(self.num_replicas) + ] + dist.all_gather(ind_gathered, ind) + dist.all_gather(feature_gathered, feature) + ind_gathered = torch.cat(ind_gathered, dim=0) + feature_gathered = torch.cat(feature_gathered, dim=0) + return ind_gathered, feature_gathered + + def update_samples_memory(self, ind, feature): + """Update samples memory.""" + assert self.initialized + feature_norm = feature / (feature.norm(dim=1).view(-1, 1) + 1e-10 + ) # normalize + ind, feature_norm = self._gather( + ind, feature_norm) # ind: (N*w), feature: (N*w)xk, cuda tensor + ind = ind.cpu() + if self.rank == 0: + feature_old = self.feature_bank[ind, ...].cuda() + feature_new = (1 - self.momentum) * feature_old + \ + self.momentum * feature_norm + feature_norm = feature_new / ( + feature_new.norm(dim=1).view(-1, 1) + 1e-10) + self.feature_bank[ind, ...] = feature_norm.cpu() + dist.barrier() + dist.broadcast(feature_norm, 0) + # compute new labels + similarity_to_centroids = torch.mm(self.centroids, + feature_norm.permute(1, 0)) # CxN + newlabel = similarity_to_centroids.argmax(dim=0) # cuda tensor + newlabel_cpu = newlabel.cpu() + change_ratio = (newlabel_cpu != + self.label_bank[ind]).sum().float().cuda() \ + / float(newlabel_cpu.shape[0]) + self.label_bank[ind] = newlabel_cpu.clone() # copy to cpu + return change_ratio + + def deal_with_small_clusters(self): + """Deal with small clusters.""" + # check empty class + hist = np.bincount(self.label_bank.numpy(), minlength=self.num_classes) + small_clusters = np.where(hist < self.min_cluster)[0].tolist() + if self.debug and self.rank == 0: + print("mincluster: {}, num of small class: {}".format( + hist.min(), len(small_clusters))) + if len(small_clusters) == 0: + return + # re-assign samples in small clusters to make them empty + for s in small_clusters: + ind = np.where(self.label_bank.numpy() == s)[0] + if len(ind) > 0: + inclusion = torch.from_numpy( + np.setdiff1d( + np.arange(self.num_classes), + np.array(small_clusters), + assume_unique=True)).cuda() + if self.rank == 0: + target_ind = torch.mm( + self.centroids[inclusion, :], + self.feature_bank[ind, :].cuda().permute( + 1, 0)).argmax(dim=0) + target = inclusion[target_ind] + else: + target = torch.zeros((ind.shape[0], ), + dtype=torch.int64).cuda() + dist.all_reduce(target) + self.label_bank[ind] = torch.from_numpy(target.cpu().numpy()) + # deal with empty cluster + self._redirect_empty_clusters(small_clusters) + + def update_centroids_memory(self, cinds=None): + """Update centroids memory.""" + if self.rank == 0: + if self.debug: + print("updating centroids ...") + if cinds is None: + center = self._compute_centroids() + self.centroids.copy_(center) + else: + center = self._compute_centroids_ind(cinds) + self.centroids[ + torch.LongTensor(cinds).cuda(), :] = center.cuda() + dist.broadcast(self.centroids, 0) + + def _partition_max_cluster(self, max_cluster): + """Partition the largest cluster into two sub-clusters.""" + assert self.rank == 0 + max_cluster_inds = np.where(self.label_bank == max_cluster)[0] + + assert len(max_cluster_inds) >= 2 + max_cluster_features = self.feature_bank[max_cluster_inds, :] + if np.any(np.isnan(max_cluster_features.numpy())): + raise Exception("Has nan in features.") + kmeans_ret = self.kmeans.fit(max_cluster_features) + sub_cluster1_ind = max_cluster_inds[kmeans_ret.labels_ == 0] + sub_cluster2_ind = max_cluster_inds[kmeans_ret.labels_ == 1] + if not (len(sub_cluster1_ind) > 0 and len(sub_cluster2_ind) > 0): + print( + "Warning: kmeans partition fails, resort to random partition.") + sub_cluster1_ind = np.random.choice( + max_cluster_inds, len(max_cluster_inds) // 2, replace=False) + sub_cluster2_ind = np.setdiff1d( + max_cluster_inds, sub_cluster1_ind, assume_unique=True) + return sub_cluster1_ind, sub_cluster2_ind + + def _redirect_empty_clusters(self, empty_clusters): + """Re-direct empty clusters.""" + for e in empty_clusters: + assert (self.label_bank != e).all().item(), \ + "Cluster #{} is not an empty cluster.".format(e) + max_cluster = np.bincount( + self.label_bank, minlength=self.num_classes).argmax().item() + # gather partitioning indices + if self.rank == 0: + sub_cluster1_ind, sub_cluster2_ind = self._partition_max_cluster( + max_cluster) + size1 = torch.LongTensor([len(sub_cluster1_ind)]).cuda() + size2 = torch.LongTensor([len(sub_cluster2_ind)]).cuda() + sub_cluster1_ind_tensor = torch.from_numpy( + sub_cluster1_ind).long().cuda() + sub_cluster2_ind_tensor = torch.from_numpy( + sub_cluster2_ind).long().cuda() + else: + size1 = torch.LongTensor([0]).cuda() + size2 = torch.LongTensor([0]).cuda() + dist.all_reduce(size1) + dist.all_reduce(size2) + if self.rank != 0: + sub_cluster1_ind_tensor = torch.zeros( + (size1, ), dtype=torch.int64).cuda() + sub_cluster2_ind_tensor = torch.zeros( + (size2, ), dtype=torch.int64).cuda() + dist.broadcast(sub_cluster1_ind_tensor, 0) + dist.broadcast(sub_cluster2_ind_tensor, 0) + if self.rank != 0: + sub_cluster1_ind = sub_cluster1_ind_tensor.cpu().numpy() + sub_cluster2_ind = sub_cluster2_ind_tensor.cpu().numpy() + + # reassign samples in partition #2 to the empty class + self.label_bank[sub_cluster2_ind] = e + # update centroids of max_cluster and e + self.update_centroids_memory([max_cluster, e]) diff --git a/openmixup/models/memories/simple_memory.py b/openmixup/models/memories/simple_memory.py new file mode 100644 index 00000000..ef17e5b4 --- /dev/null +++ b/openmixup/models/memories/simple_memory.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn +import torch.distributed as dist +from mmcv.runner import get_dist_info +from openmixup.utils import AliasMethod + +from ..registry import MEMORIES + + +@MEMORIES.register_module +class SimpleMemory(nn.Module): + """Simple memory bank for NPID. + + Args: + length (int): Number of features stored in the memory bank. + feat_dim (int): Dimension of stored features. + momentum (float): Momentum coefficient for updating features. + """ + + def __init__(self, length, feat_dim, momentum, **kwargs): + super(SimpleMemory, self).__init__() + self.rank, self.num_replicas = get_dist_info() + self.feature_bank = torch.randn(length, feat_dim).cuda() + self.feature_bank = nn.functional.normalize(self.feature_bank) + self.momentum = momentum + self.multinomial = AliasMethod(torch.ones(length)) + self.multinomial.cuda() + + def update(self, ind, feature): + """Update features in memory bank. + + Args: + ind (Tensor): Indices for the batch of features. + feature (Tensor): Batch of features. + """ + feature_norm = nn.functional.normalize(feature) + ind, feature_norm = self._gather(ind, feature_norm) + feature_old = self.feature_bank[ind, ...] + feature_new = (1 - self.momentum) * feature_old + \ + self.momentum * feature_norm + feature_new_norm = nn.functional.normalize(feature_new) + self.feature_bank[ind, ...] = feature_new_norm + + def _gather(self, ind, feature): + """Gather indices and features. + + Args: + ind (Tensor): Indices for the batch of features. + feature (Tensor): Batch of features. + + Returns: + Tensor: Gathered indices. + Tensor: Gathered features. + """ + ind_gathered = [ + torch.ones_like(ind).cuda() for _ in range(self.num_replicas) + ] + feature_gathered = [ + torch.ones_like(feature).cuda() for _ in range(self.num_replicas) + ] + dist.all_gather(ind_gathered, ind) + dist.all_gather(feature_gathered, feature) + ind_gathered = torch.cat(ind_gathered, dim=0) + feature_gathered = torch.cat(feature_gathered, dim=0) + return ind_gathered, feature_gathered diff --git a/openmixup/models/necks/__init__.py b/openmixup/models/necks/__init__.py new file mode 100644 index 00000000..d3dace17 --- /dev/null +++ b/openmixup/models/necks/__init__.py @@ -0,0 +1,10 @@ +from .conv_necks import ConvNeck +from .fpn_automix import FPN_AutoMix +from .mlp_necks import (AvgPoolNeck, LinearNeck, RelativeLocNeck, ODCNeck, + MoCoV2Neck, NonLinearNeck, SwAVNeck, DenseCLNeck) + + +__all__ = [ + 'AvgPoolNeck', 'ConvNeck', 'DenseCLNeck', 'FPN_AutoMix', 'LinearNeck', + 'MoCoV2Neck', 'NonLinearNeck', 'ODCNeck', 'RelativeLocNeck', 'SwAVNeck', +] diff --git a/openmixup/models/necks/conv_necks.py b/openmixup/models/necks/conv_necks.py new file mode 100644 index 00000000..ac3ba074 --- /dev/null +++ b/openmixup/models/necks/conv_necks.py @@ -0,0 +1,120 @@ +import torch +import torch.nn as nn +from mmcv.cnn import kaiming_init, normal_init, ConvModule +from ..registry import NECKS + + +@NECKS.register_module +class ConvNeck(nn.Module): + """The N layers conv neck: [conv-norm-act] - conv-{norm}. + + Args: + in_channels (int): Channels of the input feature map. + hid_channels (int): Channels of the hidden feature channel. + out_channels (int): Channels of the output feature channel. + num_layers (int): The number of convolution layers. + kernel_size (int): Kernel size of the convolution layer. + conv_cfg (dict): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='GELU'). + with_bias (bool): Whether to use 'bias' in each conv layer. Default: False. + with_avg_pool (bool): Whether to add a global average pooling layer in the + output. Default: False. + with_last_norm (bool): Whether to add a norm layer in the output. Default: False. + with_last_dropout (float or dict): Probability of an element to be zeroed in + the output, or dict config for dropout. + Default: 0.0. + with_residual (bool, optional): Add resudual connection. + Default: False. + """ + + def __init__(self, + in_channels, + hid_channels, + out_channels, + num_layers=2, + kernel_size=1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='ELU'), + with_bias=False, + with_avg_pool=False, + with_last_norm=False, + with_last_dropout=0., + with_residual=True, + **kwargs): + super(ConvNeck, self).__init__() + # basic args + self.in_channels = int(in_channels) + self.hid_channels = int(hid_channels) + self.out_channels = int(out_channels) + self.num_layers = int(num_layers) + self.kernel_size = int(kernel_size) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.dropout = None + assert conv_cfg is None or isinstance(conv_cfg, dict) + assert norm_cfg is None or isinstance(norm_cfg, dict) + assert act_cfg is None or isinstance(act_cfg, dict) + assert kernel_size in [1, 3] + # specific for ssl + self.with_bias = bool(with_bias) + self.with_avg_pool = bool(with_avg_pool) + self.with_last_norm = bool(with_last_norm) + self.with_residual = bool(with_residual) + if isinstance(with_last_dropout, dict): + _type = with_last_dropout.pop('type', None) + _prob = with_last_dropout.pop('prob', 0.) + assert 0 < _prob and _prob < 1 and \ + _type in ["Dropout", "AlphaDropout", "FeatureAlphaDropout"] + self.dropout = eval("nn.{}".format(_type))(_prob) + elif float(with_last_dropout) > 0: + assert float(with_last_dropout) < 1. + self.dropout = nn.Dropout(float(with_last_dropout)) + + # build FFN + layers = [] + for i in range(num_layers): + layers.append( + ConvModule( + in_channels=in_channels if i == 0 else hid_channels, + out_channels=hid_channels if i != num_layers-1 else out_channels, + kernel_size=kernel_size, + stride=1, + padding=1 if kernel_size == 3 else 0, + bias=with_bias, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg if i != num_layers-1 or with_last_norm else None, + act_cfg=act_cfg if i != num_layers-1 else None + )) + self.conv = nn.Sequential(*layers) + self.init_weights() + + def init_weights(self, init_linear='normal', std=0.01, bias=0.): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + for m in self.modules(): + if isinstance(m, (nn.Linear, nn.Conv2d)): + if init_linear == 'normal': + normal_init(m, std=std, bias=bias) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x): + assert len(x) == 1, "Got: {}".format(len(x)) + res = x[0] + x = self.conv(x[0]) + if self.dropout is not None: + x = self.dropout(x) + if self.with_residual: + x = x + res + return [x] diff --git a/openmixup/models/necks/fpn_automix.py b/openmixup/models/necks/fpn_automix.py new file mode 100644 index 00000000..9aee02ab --- /dev/null +++ b/openmixup/models/necks/fpn_automix.py @@ -0,0 +1,41 @@ +import torch.nn as nn +from ..registry import NECKS +from mmcv.cnn import ConvModule + + +@NECKS.register_module() +class FPN_AutoMix(nn.Module): + def __init__(self, + in_channels, + out_channels, + conv_cfg=None, + act_cfg=None): + super(FPN_AutoMix, self).__init__() + self.l_conv = ConvModule( + in_channels, + out_channels, + 1, + conv_cfg=conv_cfg, + act_cfg=act_cfg, + inplace=False) + + def forward(self, input): + ''' + input: feature of two layers, 0 for target + ''' + assert len(input) == 2 + n, c, w, h = input[0].shape # target shape + + if w > input[1].shape[-1]: + # upsample + m = nn.Upsample(scale_factor=2, mode='nearest') + out = m(input[-1]) + else: + # avgpool + m = nn.AdaptiveAvgPool2d((w, h)) + out = m(input[-1]) + last_feature = self.l_conv(out) + out_feature = input[0] + last_feature + + return out_feature + \ No newline at end of file diff --git a/openmixup/models/necks/mlp_necks.py b/openmixup/models/necks/mlp_necks.py new file mode 100644 index 00000000..28128d26 --- /dev/null +++ b/openmixup/models/necks/mlp_necks.py @@ -0,0 +1,445 @@ +import torch +import torch.nn as nn +from packaging import version +from mmcv.cnn import kaiming_init, normal_init + +from ..registry import NECKS +from ..utils import build_norm_layer + + +def _init_weights(module, init_linear='normal', std=0.01, bias=0.): + assert init_linear in ['normal', 'kaiming'], \ + "Undefined init_linear: {}".format(init_linear) + for m in module.modules(): + if isinstance(m, nn.Linear): + if init_linear == 'normal': + normal_init(m, std=std, bias=bias) + else: + kaiming_init(m, mode='fan_in', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, + nn.GroupNorm, nn.SyncBatchNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + +@NECKS.register_module +class AvgPoolNeck(nn.Module): + """Average pooling neck. + """ + + def __init__(self): + super(AvgPoolNeck, self).__init__() + self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) + + def init_weights(self, **kwargs): + pass + + def forward(self, x): + assert len(x) == 1 + return [self.avg_pool(x[0])] + + +@NECKS.register_module +class LinearNeck(nn.Module): + """Linear neck: fc only. + """ + + def __init__(self, in_channels, out_channels, with_avg_pool=True): + super(LinearNeck, self).__init__() + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(in_channels, out_channels) + + def init_weights(self, init_linear='normal'): + _init_weights(self, init_linear) + + def forward(self, x): + assert len(x) == 1 + x = x[0] + if self.with_avg_pool: + x = self.avgpool(x) + return [self.fc(x.view(x.size(0), -1))] + + +@NECKS.register_module +class RelativeLocNeck(nn.Module): + """Relative patch location neck: fc-bn-relu-dropout. + """ + + def __init__(self, + in_channels, + out_channels, + sync_bn=False, + with_avg_pool=True): + super(RelativeLocNeck, self).__init__() + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + + if version.parse(torch.__version__) < version.parse("1.4.0"): + self.expand_for_syncbn = True + else: + self.expand_for_syncbn = False + + self.fc = nn.Linear(in_channels * 2, out_channels) + if sync_bn: + _, self.bn = build_norm_layer( + dict(type='SyncBN', momentum=0.003), + out_channels) + else: + self.bn = nn.BatchNorm1d( + out_channels, momentum=0.003) + self.relu = nn.ReLU(inplace=True) + self.drop = nn.Dropout() + self.sync_bn = sync_bn + + def init_weights(self, init_linear='normal'): + _init_weights(self, init_linear, std=0.005, bias=0.1) + + def _forward_syncbn(self, module, x): + assert x.dim() == 2 + if self.expand_for_syncbn: + x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1) + else: + x = module(x) + return x + + def forward(self, x): + assert len(x) == 1 + x = x[0] + if self.with_avg_pool: + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + if self.sync_bn: + x = self._forward_syncbn(self.bn, x) + else: + x = self.bn(x) + x = self.relu(x) + x = self.drop(x) + return [x] + + +@NECKS.register_module +class ODCNeck(nn.Module): + """The non-linear neck in ODC, fc-bn-relu-dropout-fc-relu. + """ + + def __init__(self, + in_channels, + hid_channels, + out_channels, + sync_bn=False, + with_avg_pool=True): + super(ODCNeck, self).__init__() + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + + if version.parse(torch.__version__) < version.parse("1.4.0"): + self.expand_for_syncbn = True + else: + self.expand_for_syncbn = False + + self.fc0 = nn.Linear(in_channels, hid_channels) + if sync_bn: + _, self.bn0 = build_norm_layer( + dict(type='SyncBN', momentum=0.001, affine=False), + hid_channels) + else: + self.bn0 = nn.BatchNorm1d( + hid_channels, momentum=0.001, affine=False) + + self.fc1 = nn.Linear(hid_channels, out_channels) + self.relu = nn.ReLU(inplace=True) + self.drop = nn.Dropout() + self.sync_bn = sync_bn + + def init_weights(self, init_linear='normal'): + _init_weights(self, init_linear) + + def _forward_syncbn(self, module, x): + assert x.dim() == 2 + if self.expand_for_syncbn: + x = module(x.unsqueeze(-1).unsqueeze(-1)).squeeze(-1).squeeze(-1) + else: + x = module(x) + return x + + def forward(self, x): + assert len(x) == 1 + x = x[0] + if self.with_avg_pool: + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc0(x) + if self.sync_bn: + x = self._forward_syncbn(self.bn0, x) + else: + x = self.bn0(x) + x = self.relu(x) + x = self.drop(x) + x = self.fc1(x) + x = self.relu(x) + return [x] + + +@NECKS.register_module +class MoCoV2Neck(nn.Module): + """The non-linear neck in MoCo v2: fc-relu-fc. + v12.29: add activation choices. + + Args: + in_channels (int): Number of input channels. + hid_channels (int): Number of hidden channels. + out_channels (int): Number of output channels. + with_avg_pool (bool): Whether to apply the global + average pooling after backbone. Defaults to True. + """ + + def __init__(self, + in_channels, + hid_channels, + out_channels, + activation="ReLU", # add 12.29 + with_avg_pool=True): + super(MoCoV2Neck, self).__init__() + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + if activation != "ReLU": + assert activation in ["LeakyReLU", "Tanh", "ELU", "Sigmoid"] + if activation == "Tanh" or activation == "Sigmoid": # add 12.30 + inplace_choice = "" + else: + inplace_choice = "inplace=True" + self.mlp = nn.Sequential( # using my activation func + nn.Linear(in_channels, hid_channels), eval( "nn.{}({})".format(activation, inplace_choice) ), + nn.Linear(hid_channels, out_channels)) + else: # ori in MoCo.v2 (ReLU) + self.mlp = nn.Sequential( + nn.Linear(in_channels, hid_channels), nn.ReLU(inplace=True), + nn.Linear(hid_channels, out_channels)) + + def init_weights(self, init_linear='normal'): + _init_weights(self, init_linear) + + def forward(self, x): + assert len(x) == 1 + x = x[0] + if self.with_avg_pool: + x = self.avgpool(x) + return [self.mlp(x.view(x.size(0), -1))] + + +@NECKS.register_module() +class NonLinearNeck(nn.Module): + """The non-linear neck for SimCLR and BYOL. + + Structure: fc-bn-[relu-fc-bn] where the substructure in [] can be repeated. + For the default setting, the repeated time is 1. + The neck can be used in many algorithms, e.g., SimCLR, BYOL, SimSiam. + + Args: + in_channels (int): Number of input channels. + hid_channels (int): Number of hidden channels. + out_channels (int): Number of output channels. + num_layers (int): Number of fc layers. Defaults to 2. + with_bias (bool): Whether to use bias in fc layers (except for the + last). Defaults to False. + with_last_bn (bool): Whether to add the last BN layer. + Defaults to True. + with_last_bn_affine (bool): Whether to have learnable affine parameters + in the last BN layer (set False for SimSiam). Defaults to True. + with_last_bias (bool): Whether to use bias in the last fc layer. + Defaults to False. + with_avg_pool (bool): Whether to apply the global average pooling + after backbone. Defaults to True. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='SyncBN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + hid_channels, + out_channels, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=True, + with_last_bias=False, + with_avg_pool=True, + norm_cfg=dict(type='SyncBN'), + ): + super(NonLinearNeck, self).__init__() + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.relu = nn.ReLU(inplace=True) + self.fc0 = nn.Linear(in_channels, hid_channels, bias=with_bias) + self.bn0 = build_norm_layer(norm_cfg, hid_channels)[1] + + self.fc_names = [] + self.bn_names = [] + for i in range(1, num_layers): + this_channels = out_channels if i == num_layers - 1 \ + else hid_channels + if i != num_layers - 1: + self.add_module( + f'fc{i}', + nn.Linear(hid_channels, this_channels, bias=with_bias)) + self.add_module(f'bn{i}', + build_norm_layer(norm_cfg, this_channels)[1]) + self.bn_names.append(f'bn{i}') + else: + self.add_module( + f'fc{i}', + nn.Linear( + hid_channels, this_channels, bias=with_last_bias)) + if with_last_bn: + self.add_module( + f'bn{i}', + build_norm_layer( + dict(**norm_cfg, affine=with_last_bn_affine), + this_channels)[1]) + self.bn_names.append(f'bn{i}') + else: + self.bn_names.append(None) + self.fc_names.append(f'fc{i}') + + def forward(self, x): + assert len(x) == 1 + x = x[0] + if self.with_avg_pool: + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc0(x) + x = self.bn0(x) + for fc_name, bn_name in zip(self.fc_names, self.bn_names): + fc = getattr(self, fc_name) + x = self.relu(x) + x = fc(x) + if bn_name is not None: + bn = getattr(self, bn_name) + x = bn(x) + return [x] + + +@NECKS.register_module() +class SwAVNeck(nn.Module): + """The non-linear neck of SwAV: fc-bn-relu-fc-normalization. + + Args: + in_channels (int): Number of input channels. + hid_channels (int): Number of hidden channels. + out_channels (int): Number of output channels. + with_avg_pool (bool): Whether to apply the global average pooling after + backbone. Defaults to True. + with_l2norm (bool): whether to normalize the output after projection. + Defaults to True. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='SyncBN'). + """ + + def __init__(self, + in_channels, + hid_channels, + out_channels, + with_avg_pool=True, + with_l2norm=True, + norm_cfg=dict(type='SyncBN'), + ): + super(SwAVNeck, self).__init__() + self.with_avg_pool = with_avg_pool + self.with_l2norm = with_l2norm + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + if out_channels == 0: + self.projection_neck = None + elif hid_channels == 0: + self.projection_neck = nn.Linear(in_channels, out_channels) + else: + self.bn = build_norm_layer(norm_cfg, hid_channels)[1] + self.projection_neck = nn.Sequential( + nn.Linear(in_channels, hid_channels), self.bn, + nn.ReLU(inplace=True), nn.Linear(hid_channels, out_channels)) + + def forward_projection(self, x): + if self.projection_neck is not None: + x = self.projection_neck(x) + if self.with_l2norm: + x = nn.functional.normalize(x, dim=1, p=2) + return x + + def forward(self, x): + # forward computing + # x: list of feature maps, len(x) according to len(num_crops) + avg_out = [] + for _x in x: + _x = _x[0] + if self.with_avg_pool: + _out = self.avgpool(_x) + avg_out.append(_out) + feat_vec = torch.cat(avg_out) # [sum(num_crops) * N, C] + feat_vec = feat_vec.view(feat_vec.size(0), -1) + output = self.forward_projection(feat_vec) + return [output] + + +@NECKS.register_module() +class DenseCLNeck(nn.Module): + """The non-linear neck of DenseCL. + + Single and dense neck in parallel: fc-relu-fc, conv-relu-conv. + Borrowed from the authors' code: `