From 1bc33aa909dd488666cfb08f630b611d4565ac2f Mon Sep 17 00:00:00 2001 From: salvacarrion Date: Mon, 25 Nov 2019 11:37:10 +0100 Subject: [PATCH 01/10] Add debugging code --- examples/NN/2_CIFAR10/2_cifar_conv_da.cpp | 132 +++++++++++----------- src/layers/da/layer_crop_random.cpp | 14 +++ src/layers/da/layer_crop_scale_random.cpp | 14 +++ src/layers/da/layer_cutout_random.cpp | 14 +++ src/layers/da/layer_flip_random.cpp | 14 +++ src/layers/da/layer_rotate_random.cpp | 14 +++ src/layers/da/layer_scale_random.cpp | 14 +++ src/layers/da/layer_shift_random.cpp | 18 ++- 8 files changed, 166 insertions(+), 68 deletions(-) diff --git a/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp b/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp index 9ad0ff814..e9face1e8 100644 --- a/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp +++ b/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp @@ -25,70 +25,74 @@ using namespace eddl; int main(int argc, char **argv){ - // download CIFAR data - download_cifar10(); - - // Settings - int epochs = 25; - int batch_size = 100; - int num_classes = 10; - - // network - layer in=Input({3,32,32}); - layer l=in; - - // Data augmentation - l = FlipRandom(l, 1); - l = ShiftRandom(l, {-0.1f, +0.1f}, {-0.1f, +0.1f}); - l = ScaleRandom(l, {0.9f, 1.1f}); - - l=MaxPool(ReLu(Conv(l,32,{3,3},{1,1})),{2,2}); - l=MaxPool(ReLu(Conv(l,64,{3,3},{1,1})),{2,2}); - l=MaxPool(ReLu(Conv(l,128,{3,3},{1,1})),{2,2}); - l=MaxPool(ReLu(Conv(l,256,{3,3},{1,1})),{2,2}); - - l=Reshape(l,{-1}); - - l=Activation(Dense(l,128),"relu"); - - layer out=Activation(Dense(l,num_classes),"softmax"); - - // net define input and output layers list - model net=Model({in},{out}); - - - // Build model - build(net, - sgd(0.01, 0.9), // Optimizer - {"soft_cross_entropy"}, // Losses - {"categorical_accuracy"}, // Metrics - CS_CPU() // CPU with maximum threads availables - //CS_GPU({1}) // GPU with only one gpu - ); - - // plot the model - plot(net,"model.pdf"); - - // get some info from the network - summary(net); - - // Load and preprocess training data - tensor x_train = eddlT::load("cifar_trX.bin"); - tensor y_train = eddlT::load("cifar_trY.bin"); - eddlT::div_(x_train, 255.0); - - // Load and preprocess test data - tensor x_test = eddlT::load("cifar_tsX.bin"); - tensor y_test = eddlT::load("cifar_tsY.bin"); - eddlT::div_(x_test, 255.0); - - for(int i=0;ishape[1], input->shape[2], input->shape[3]}, input->device); + int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); + A->ToGPU(); + Tensor::select(input, A, {idx}, 0, 1); + A->ToCPU(); + A->save("images/test_f_" + to_string(idx) + "_0.jpg"); + + // Method Tensor::crop_random(this->input, this->output); + + auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); + B->ToGPU(); + Tensor::select(output, B, {idx}, 0, 1); + B->ToCPU(); + B->save("images/test_f_" + to_string(idx) + "_1.jpg"); } void LCropRandom::backward(){ diff --git a/src/layers/da/layer_crop_scale_random.cpp b/src/layers/da/layer_crop_scale_random.cpp index 2f9b161c5..138d607b9 100644 --- a/src/layers/da/layer_crop_scale_random.cpp +++ b/src/layers/da/layer_crop_scale_random.cpp @@ -46,7 +46,21 @@ void LCropAndScaleRandom::resize(int batch){ } void LCropAndScaleRandom::forward() { + auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); + int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); + A->ToGPU(); + Tensor::select(input, A, {idx}, 0, 1); + A->ToCPU(); + A->save("images/test_da_" + to_string(idx) + "_0.jpg"); + + // Method Tensor::crop_scale_random(this->input, this->output, this->factor, this->da_mode); + + auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); + B->ToGPU(); + Tensor::select(output, B, {idx}, 0, 1); + B->ToCPU(); + B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LCropAndScaleRandom::backward() { diff --git a/src/layers/da/layer_cutout_random.cpp b/src/layers/da/layer_cutout_random.cpp index 563eede01..af22365d3 100644 --- a/src/layers/da/layer_cutout_random.cpp +++ b/src/layers/da/layer_cutout_random.cpp @@ -45,7 +45,21 @@ void LCutoutRandom::resize(int batch){ } void LCutoutRandom::forward() { + auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); + int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); + A->ToGPU(); + Tensor::select(input, A, {idx}, 0, 1); + A->ToCPU(); + A->save("images/test_da_" + to_string(idx) + "_0.jpg"); + + // Method Tensor::cutout_random(this->input, this->output, this->factor_x, this->factor_y, this->constant); + + auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); + B->ToGPU(); + Tensor::select(output, B, {idx}, 0, 1); + B->ToCPU(); + B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LCutoutRandom::backward() { diff --git a/src/layers/da/layer_flip_random.cpp b/src/layers/da/layer_flip_random.cpp index 2e3045f7f..fc2f95785 100644 --- a/src/layers/da/layer_flip_random.cpp +++ b/src/layers/da/layer_flip_random.cpp @@ -45,7 +45,21 @@ void LFlipRandom::resize(int batch){ } void LFlipRandom::forward() { + auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); + int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); + A->ToGPU(); + Tensor::select(input, A, {idx}, 0, 1); + A->ToCPU(); + A->save("images/test_da_" + to_string(idx) + "_0.jpg"); + + // Method Tensor::flip_random(this->input, this->output, this->axis); + + auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); + B->ToGPU(); + Tensor::select(output, B, {idx}, 0, 1); + B->ToCPU(); + B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LFlipRandom::backward() { diff --git a/src/layers/da/layer_rotate_random.cpp b/src/layers/da/layer_rotate_random.cpp index 6add6654d..d30d84996 100644 --- a/src/layers/da/layer_rotate_random.cpp +++ b/src/layers/da/layer_rotate_random.cpp @@ -48,7 +48,21 @@ void LRotateRandom::resize(int batch){ } void LRotateRandom::forward() { + auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); + int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); + A->ToGPU(); + Tensor::select(input, A, {idx}, 0, 1); + A->ToCPU(); + A->save("images/test_da_" + to_string(idx) + "_0.jpg"); + + // Method Tensor::rotate_random(this->input, this->output, this->factor, this->offset_center, this->da_mode, this->constant); + + auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); + B->ToGPU(); + Tensor::select(output, B, {idx}, 0, 1); + B->ToCPU(); + B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LRotateRandom::backward() { diff --git a/src/layers/da/layer_scale_random.cpp b/src/layers/da/layer_scale_random.cpp index fb06f7287..913c82a7a 100644 --- a/src/layers/da/layer_scale_random.cpp +++ b/src/layers/da/layer_scale_random.cpp @@ -47,7 +47,21 @@ void LScaleRandom::resize(int batch){ } void LScaleRandom::forward() { + auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); + int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); + A->ToGPU(); + Tensor::select(input, A, {idx}, 0, 1); + A->ToCPU(); + A->save("images/test_da_" + to_string(idx) + "_0.jpg"); + + // Method Tensor::scale_random(this->input, this->output, this->factor, this->da_mode, this->constant); + + auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); + B->ToGPU(); + Tensor::select(output, B, {idx}, 0, 1); + B->ToCPU(); + B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LScaleRandom::backward() { diff --git a/src/layers/da/layer_shift_random.cpp b/src/layers/da/layer_shift_random.cpp index 76bb0188f..4c4ff972b 100644 --- a/src/layers/da/layer_shift_random.cpp +++ b/src/layers/da/layer_shift_random.cpp @@ -49,11 +49,21 @@ void LShiftRandom::resize(int batch){ } void LShiftRandom::forward() { - if (TRMODE) { + auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); + int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); + A->ToGPU(); + Tensor::select(input, A, {idx}, 0, 1); + A->ToCPU(); + A->save("images/test_da_" + to_string(idx) + "_0.jpg"); + + // Method Tensor::shift_random(input, output, factor_x, factor_y); - } - else - Tensor::copy(input,output); + + auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); + B->ToGPU(); + Tensor::select(output, B, {idx}, 0, 1); + B->ToCPU(); + B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LShiftRandom::backward() { From 08fddc1a7f0b6d5296b47c71f15b6a3be17d3c6e Mon Sep 17 00:00:00 2001 From: salvacarrion Date: Mon, 25 Nov 2019 13:27:46 +0100 Subject: [PATCH 02/10] Fix DA methods (CropScale not working) --- examples/NN/2_CIFAR10/2_cifar_conv_da.cpp | 6 +++--- src/hardware/cpu/cpu_da.cpp | 7 ++++--- src/hardware/gpu/gpu_da_kernels.cu | 21 ++++++++++++--------- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp b/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp index e9face1e8..a2e5f05ee 100644 --- a/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp +++ b/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp @@ -38,12 +38,12 @@ int main(int argc, char **argv){ layer l=in; // Data augmentation - l = ShiftRandom(l, {-0.2f, +0.2f}, {-0.2f, +0.2f}); +// l = ShiftRandom(l, {-0.2f, +0.2f}, {-0.2f, +0.2f}); // l = RotateRandom(l, {-30.0f, +30.0f}); -// l = ScaleRandom(l, {0.8f, 1.5f}); +// l = ScaleRandom(l, {0.85f, 2.0f}); // l = FlipRandom(l, 1); // l = CropRandom(l, {28, 28}); -// l = CropAndScaleRandom(l, {0.8f, 1.0f}); + l = CropAndScaleRandom(l, {0.8f, 1.0f}); // l = CutoutRandom(l, {0.0f, 0.3f}, {0.0f, 0.3f}); l=MaxPool(ReLu(Conv(l,32,{3,3},{1,1})),{2,2}); diff --git a/src/hardware/cpu/cpu_da.cpp b/src/hardware/cpu/cpu_da.cpp index b304248c7..776644c21 100644 --- a/src/hardware/cpu/cpu_da.cpp +++ b/src/hardware/cpu/cpu_da.cpp @@ -69,6 +69,7 @@ void cpu_single_rotate(int b, Tensor *A, Tensor *B, float angle, vector off void cpu_single_scale(int b, int* offsets, Tensor *A, Tensor *B, vector new_shape, int mode, float constant){ for(int c=0; cshape[1]; c++) { + for(int Bi=0; Bishape[2];Bi++) { for(int Bj=0; Bjshape[3];Bj++) { @@ -263,7 +264,6 @@ void cpu_scale_random(Tensor *A, Tensor *B, vector factor, int mode, floa // https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.zoom.html // I use "new_shape" because I might want to keep the shape of B, but thinking of it as a bigger/smaller matrix // If the factor is less than 1.0f, performs a downscale with padding - int offsets[2] = {0, 0}; #pragma omp parallel for for(int b=0; bshape[0]; b++) { @@ -272,6 +272,7 @@ void cpu_scale_random(Tensor *A, Tensor *B, vector factor, int mode, floa int new_shape_x = (int)(A->shape[3] * scale); // Center crop (if the if the crop is smaller than B) + int offsets[2] = {0, 0}; offsets[0] = (new_shape_y - A->shape[2])/2.0f; offsets[1] = (new_shape_x - A->shape[3])/2.0f; @@ -292,7 +293,6 @@ void cpu_flip_random(Tensor *A, Tensor *B, int axis){ void cpu_crop_random(Tensor *A, Tensor *B){ // Performs a crop with padding (Keeps the original size) - int offsets[2] = {0, 0}; #pragma omp parallel for for(int b=0; bshape[0]; b++) { @@ -308,6 +308,7 @@ void cpu_crop_random(Tensor *A, Tensor *B){ int coords_from_y = y; int coords_to_y = y+h; + int offsets[2] = {0, 0}; offsets[0] = coords_from_y; offsets[1] = coords_from_x; @@ -339,7 +340,6 @@ void cpu_crop_scale_random(Tensor *A, Tensor *B, vector factor, int mode, void cpu_cutout_random(Tensor *A, Tensor *B, vector factor_x, vector factor_y, float constant){ // Performs a crop with padding (Keeps the original size) - int offsets[2] = {0, 0}; #pragma omp parallel for for(int b=0; bshape[0]; b++) { @@ -355,6 +355,7 @@ void cpu_cutout_random(Tensor *A, Tensor *B, vector factor_x, vector= 0.5f){ // Apply? - gpu_single_flip(thread_id_x, A, B, batch, depth, irows, icols, axis); - } + bool apply = rnd[b] >= 0.5f; + gpu_single_flip(thread_id_x, A, B, batch, depth, irows, icols, axis, apply); } } From da1aca6f448ea140eaf65775f22a1063c40b6921 Mon Sep 17 00:00:00 2001 From: simleo Date: Mon, 25 Nov 2019 15:49:42 +0100 Subject: [PATCH 03/10] CMakeLists.txt: install Eigen includes at top-level --- CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 27fc986ad..64afa81aa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -415,7 +415,7 @@ target_include_directories(eddl PUBLIC ) target_include_directories(eddl PUBLIC $ - $ + $ ) #target_compile_features(eddl PUBLIC cxx_std_11) @@ -605,9 +605,9 @@ foreach(f ${Eigen_directory_files}) endforeach(f ${Eigen_directory_files}) install(FILES ${Eigen_directory_files_to_install} - DESTINATION "include/eddl/third_party/eigen/Eigen" + DESTINATION "include/Eigen" ) -install(DIRECTORY ${ESCAPED_EIGEN_SOURCE_DIR}/src DESTINATION "include/eddl/third_party/eigen/Eigen" COMPONENT Devel FILES_MATCHING PATTERN "*.h") +install(DIRECTORY ${ESCAPED_EIGEN_SOURCE_DIR}/src DESTINATION "include/Eigen" COMPONENT Devel FILES_MATCHING PATTERN "*.h") configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/eddlConfig.cmake.in ${CMAKE_BINARY_DIR}/cmake/eddlConfig.cmake @ONLY) From 2e5158a9e801ca9f5d892db434db4549516a1b43 Mon Sep 17 00:00:00 2001 From: Roberto Paredes Date: Tue, 26 Nov 2019 10:59:06 +0100 Subject: [PATCH 04/10] tensor tile --- examples/NN/2_CIFAR10/3_cifar_vgg16.cpp | 4 +- examples/NN/2_CIFAR10/4_cifar_vgg16_bn.cpp | 3 +- src/hardware/cpu/cpu_core.cpp | 1 + src/layers/merge/layer_concat.cpp | 2 +- src/layers/normalization/layer_batchnorm.cpp | 46 ++----------------- src/net/net_api.cpp | 3 +- src/tensor/tensor.h | 1 + src/tensor/tensor_core.cpp | 47 ++++++++++++++++++++ 8 files changed, 61 insertions(+), 46 deletions(-) diff --git a/examples/NN/2_CIFAR10/3_cifar_vgg16.cpp b/examples/NN/2_CIFAR10/3_cifar_vgg16.cpp index 696950ef9..f45e71903 100644 --- a/examples/NN/2_CIFAR10/3_cifar_vgg16.cpp +++ b/examples/NN/2_CIFAR10/3_cifar_vgg16.cpp @@ -86,10 +86,12 @@ int main(int argc, char **argv){ tensor y_test = eddlT::load("cifar_tsY.bin"); eddlT::div_(x_test, 255.0); + + for(int i=0;i sind, int ini, int end) { int s = A->size / A->shape[0]; + #pragma omp parallel for for (int i = ini; i < end; i++) { int p = sind[i] * s; diff --git a/src/layers/merge/layer_concat.cpp b/src/layers/merge/layer_concat.cpp index d53ce9a89..7c7c5e5f7 100644 --- a/src/layers/merge/layer_concat.cpp +++ b/src/layers/merge/layer_concat.cpp @@ -36,7 +36,7 @@ LConcat::LConcat(vector parent, string name, int dev) : MLayer(name, de } else if (ndim == 4) { for (int i = 0; i < parent.size() - 1; ++i) { if (parent[i]->output->shape[0] != parent[i + 1]->output->shape[0]) - msg("Error: LConcat layers with different size in dim 1"); + msg("Error: LConcat layers with different size in dim 1, batch size"); else if (parent[i]->output->shape[2] != parent[i + 1]->output->shape[2]) msg("Error: LConcat layers with different size in dim 3, rows of 4D"); else if (parent[i]->output->shape[3] != parent[i + 1]->output->shape[3]) diff --git a/src/layers/normalization/layer_batchnorm.cpp b/src/layers/normalization/layer_batchnorm.cpp index 7eab2eea9..83e7506cb 100644 --- a/src/layers/normalization/layer_batchnorm.cpp +++ b/src/layers/normalization/layer_batchnorm.cpp @@ -128,52 +128,14 @@ void LBatchNorm::resize(int batch){ if (momentum!=0.0) { if (!init) { - Tensor *nmean=new Tensor(mean->output->getShape(),dev); - Tensor *nvar=new Tensor(variance->output->getShape(),dev); - - Tensor::copy(mean->output,nmean); - Tensor::copy(variance->output,nvar); + Tensor *nmean=mean->output->clone(); + Tensor *nvar=variance->output->clone(); mean->resize(batch); variance->resize(batch); - int msize=mean->output->shape[0]; - int nsize=nmean->shape[0]; - - if (msize>nsize) { - //from nmean to mean with deselect - vector sind(msize); - int start,end; - for(int i=0;ioutput, sind, start, end); - Tensor::deselect(nvar, variance->output, sind, start, end); - } - if (msize%nsize) { - Tensor::deselect(nmean, mean->output, sind, end, end+(msize%nsize)); - Tensor::deselect(nvar, variance->output, sind,end, end+(msize%nsize)); - } - } - else { - //from nmean to mean with select - vector sind(nsize); - int start,end; - for(int i=0;ioutput, sind, start, end); - Tensor::select(nvar, variance->output, sind, start, end); - } - if (nsize%msize) { - Tensor::select(nmean, mean->output, sind, end, end+(nsize%msize)); - Tensor::select(nvar, variance->output, sind,end, end+(nsize%msize)); - } - - } - + Tensor::tile(nmean,mean->output); + Tensor::tile(nvar,variance->output); delete nmean; delete nvar; diff --git a/src/net/net_api.cpp b/src/net/net_api.cpp index c4fee0aa6..fd3c9da6d 100644 --- a/src/net/net_api.cpp +++ b/src/net/net_api.cpp @@ -453,7 +453,8 @@ void Net::fit(vtensor tin, vtensor tout, int batch, int epochs) { // Set some parameters int num_batches = n / batch_size; - + num_batches=10; + // Train network fprintf(stdout, "%d epochs of %d batches of size %d\n", epochs, num_batches, batch_size); for (i = 0; i < epochs; i++) { diff --git a/src/tensor/tensor.h b/src/tensor/tensor.h index f0bdb5d7f..7b8ac3a6d 100644 --- a/src/tensor/tensor.h +++ b/src/tensor/tensor.h @@ -333,6 +333,7 @@ class Tensor { static void fill(Tensor *A, int aini, int aend, Tensor *B, int bini, int bend, int inc); static void select(Tensor *A, Tensor *B, vector sind, int ini, int end); static void deselect(Tensor *A, Tensor *B, vector sind, int ini, int end); + static void tile(Tensor *A, Tensor *B); // Generators (In-place) ************************************* // Rethink names diff --git a/src/tensor/tensor_core.cpp b/src/tensor/tensor_core.cpp index 807023672..6c3497748 100644 --- a/src/tensor/tensor_core.cpp +++ b/src/tensor/tensor_core.cpp @@ -258,6 +258,7 @@ void Tensor::select(Tensor *A, Tensor *B, vector sind, int ini, int end) { Bc->ToCPU(); cpu_select(Ac, Bc, sind, ini, end); + Tensor::copy(Bc,B); delete Ac; @@ -317,3 +318,49 @@ void Tensor::deselect(Tensor *A, Tensor *B, vector sind, int ini, int end) } //B->tsem->unlock(); } + +void Tensor::tile(Tensor *A, Tensor *B) +{ + + int Asize=A->shape[0]; + int Bsize=B->shape[0]; + + + if (Bsize>Asize) { + vector sind(Bsize); + int start,end; + for(int i=0;i sind(Bsize); + for(int i=0;i Date: Tue, 26 Nov 2019 11:02:44 +0100 Subject: [PATCH 05/10] tensor tile --- src/net/net_api.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/net/net_api.cpp b/src/net/net_api.cpp index fd3c9da6d..fd752aa08 100644 --- a/src/net/net_api.cpp +++ b/src/net/net_api.cpp @@ -452,8 +452,6 @@ void Net::fit(vtensor tin, vtensor tout, int batch, int epochs) { // Set some parameters int num_batches = n / batch_size; - - num_batches=10; // Train network fprintf(stdout, "%d epochs of %d batches of size %d\n", epochs, num_batches, batch_size); From ddf92f9cc588634c26fae1a4ee3acd6483816452 Mon Sep 17 00:00:00 2001 From: salvacarrion Date: Tue, 26 Nov 2019 11:25:56 +0100 Subject: [PATCH 06/10] Fix DA + minor refactor --- examples/NN/2_CIFAR10/2_cifar_conv_da.cpp | 2 +- examples/Tensor/eddl_tensor.cpp | 6 ++-- src/apis/eddl.cpp | 4 +-- src/apis/eddl.h | 2 +- src/apis/eddlT.cpp | 8 ++--- src/layers/da/layer_crop_random.cpp | 16 ++++------ src/layers/da/layer_crop_scale.cpp | 4 +-- src/layers/da/layer_crop_scale_random.cpp | 38 ++++++++++------------- src/layers/da/layer_cutout.cpp | 8 ++--- src/layers/da/layer_cutout_random.cpp | 16 ++++------ src/layers/da/layer_da.h | 6 ++-- src/layers/da/layer_flip.cpp | 8 ++--- src/layers/da/layer_flip_random.cpp | 16 ++++------ src/layers/da/layer_rotate.cpp | 8 ++--- src/layers/da/layer_rotate_random.cpp | 16 ++++------ src/layers/da/layer_scale.cpp | 8 ++--- src/layers/da/layer_scale_random.cpp | 16 ++++------ src/layers/da/layer_shift.cpp | 8 ++--- src/layers/da/layer_shift_random.cpp | 12 +++---- src/tensor/tensor.cpp | 4 +-- src/tensor/tensor.h | 5 ++- src/tensor/tensor_core.cpp | 16 +++++----- src/tensor/tensor_serialization.cpp | 2 +- tests/dev/aux_tests.cpp | 32 +++++++++---------- tests/test_main.cpp | 2 +- 25 files changed, 107 insertions(+), 156 deletions(-) diff --git a/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp b/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp index a2e5f05ee..ce40f8b48 100644 --- a/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp +++ b/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp @@ -43,7 +43,7 @@ int main(int argc, char **argv){ // l = ScaleRandom(l, {0.85f, 2.0f}); // l = FlipRandom(l, 1); // l = CropRandom(l, {28, 28}); - l = CropAndScaleRandom(l, {0.8f, 1.0f}); + l = CropScaleRandom(l, {0.5f, 1.0f}); // l = CutoutRandom(l, {0.0f, 0.3f}, {0.0f, 0.3f}); l=MaxPool(ReLu(Conv(l,32,{3,3},{1,1})),{2,2}); diff --git a/examples/Tensor/eddl_tensor.cpp b/examples/Tensor/eddl_tensor.cpp index c1983dc20..5f44512b7 100644 --- a/examples/Tensor/eddl_tensor.cpp +++ b/examples/Tensor/eddl_tensor.cpp @@ -52,7 +52,7 @@ int main(int argc, char **argv) { //t0->reshape_({1, 1, 100, 100}); Tensor *t0 = Tensor::load("images/cow.jpg"); - t0->ToGPU(); + t0->toGPU(); t0->info(); // float* ptr = new float[3*4*2]{ // 255.0f, 255.0f, 255.0f, 255.0f, 255.0f, 255.0f, 255.0f, 255.0f, @@ -97,7 +97,7 @@ int main(int argc, char **argv) { // Tensor::cutout(t1, t2, {80, 80}, {100, 200}); // t1 = t2->clone(); - // t2->ToCPU(); + // t2->toCPU(); // t2->save("images/new_cow_single.jpg"); // cout << "Image saved!" << endl; @@ -133,7 +133,7 @@ int main(int argc, char **argv) { t1 = t2->clone(); // Save result - t2->ToCPU(); + t2->toCPU(); t2->save("images/new_cow_" + to_string(i) + ".jpg"); cout << "Image saved! #" << i << endl; } diff --git a/src/apis/eddl.cpp b/src/apis/eddl.cpp index 1b410e3e3..afbd2fa42 100644 --- a/src/apis/eddl.cpp +++ b/src/apis/eddl.cpp @@ -145,8 +145,8 @@ namespace eddl { return new LCropRandom(parent, new_shape, name, DEV_CPU); } - layer CropAndScaleRandom(layer parent, vector factor, string da_mode, string name){ - return new LCropAndScaleRandom(parent, factor, name, da_mode, DEV_CPU); + layer CropScaleRandom(layer parent, vector factor, string da_mode, string name){ + return new LCropScaleRandom(parent, factor, da_mode, name, DEV_CPU); } layer CutoutRandom(layer parent, vector factor_x, vector factor_y, float constant, string name){ diff --git a/src/apis/eddl.h b/src/apis/eddl.h index af62f5ab0..175d757de 100644 --- a/src/apis/eddl.h +++ b/src/apis/eddl.h @@ -88,7 +88,7 @@ namespace eddl { layer ScaleRandom(layer parent, vector factor, string da_mode="nearest", float constant=0.0f, string name=""); layer FlipRandom(layer parent, int axis, string name=""); layer CropRandom(layer parent, vector new_shape, string name=""); - layer CropAndScaleRandom(layer parent, vector factor, string da_mode="nearest", string name=""); + layer CropScaleRandom(layer parent, vector factor, string da_mode= "nearest", string name= ""); layer CutoutRandom(layer parent, vector factor_x, vector factor_y, float constant=0.0f, string name=""); // ---- LOSSES ---- diff --git a/src/apis/eddlT.cpp b/src/apis/eddlT.cpp index eb1fcebf0..351ae30c8 100644 --- a/src/apis/eddlT.cpp +++ b/src/apis/eddlT.cpp @@ -71,21 +71,21 @@ namespace eddlT { // Copy data ******************************** void ToCPU_(Tensor *A) { - A->ToCPU(); + A->toCPU(); } void ToGPU_(Tensor *A) { - A->ToGPU(); + A->toGPU(); } Tensor * ToCPU(Tensor *A){ Tensor *B=A->clone(); - B->ToCPU(); + B->toCPU(); return B; } Tensor * ToGPU(Tensor *A) { Tensor *B=A->clone(); - B->ToGPU(); + B->toGPU(); return B; } Tensor* clone(Tensor *A) diff --git a/src/layers/da/layer_crop_random.cpp b/src/layers/da/layer_crop_random.cpp index b4dd08068..53a79dcc4 100644 --- a/src/layers/da/layer_crop_random.cpp +++ b/src/layers/da/layer_crop_random.cpp @@ -46,18 +46,18 @@ void LCropRandom::resize(int batch){ void LCropRandom::forward() { auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->ToGPU(); + A->toGPU(); Tensor::select(input, A, {idx}, 0, 1); - A->ToCPU(); + A->toCPU(); A->save("images/test_f_" + to_string(idx) + "_0.jpg"); // Method Tensor::crop_random(this->input, this->output); auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->ToGPU(); + B->toGPU(); Tensor::select(output, B, {idx}, 0, 1); - B->ToCPU(); + B->toCPU(); B->save("images/test_f_" + to_string(idx) + "_1.jpg"); } @@ -67,20 +67,16 @@ void LCropRandom::backward(){ Layer *LCropRandom::share(int c, int bs, vector p) { - LCropRandom *n = new LCropRandom(p[0], this->new_shape, "share_" + to_string(c) + name, dev); + auto *n = new LCropRandom(p[0], this->new_shape, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LCropRandom::clone(int c, int bs, vector p, int todev) { - LCropRandom *n = new LCropRandom(p[0], this->new_shape, "clone_" + to_string(todev) + name, todev); + auto *n = new LCropRandom(p[0], this->new_shape, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_crop_scale.cpp b/src/layers/da/layer_crop_scale.cpp index 02404e949..a1222949f 100644 --- a/src/layers/da/layer_crop_scale.cpp +++ b/src/layers/da/layer_crop_scale.cpp @@ -21,7 +21,7 @@ int LCropAndScale::total_layers = 0; LCropAndScale::LCropAndScale(Layer *parent, vector from_coords, vector to_coords, string da_mode, float constant, string name, int dev) : LCrop(parent, from_coords, to_coords, false, constant, name, dev) { if(name.empty()) this->name = "crop_scale" + to_string(++total_layers); - this->da_mode=da_mode; + this->da_mode=std::move(da_mode); } LCropAndScale::~LCropAndScale() @@ -30,5 +30,5 @@ LCropAndScale::~LCropAndScale() } void LCropAndScale::forward() { - //Tensor::crop_scale(this->input, this->output, this->from_coords, this->to_coords, this->da_mode, this->constant); + Tensor::crop_scale(this->input, this->output, this->from_coords, this->to_coords, this->da_mode, this->constant); } diff --git a/src/layers/da/layer_crop_scale_random.cpp b/src/layers/da/layer_crop_scale_random.cpp index 138d607b9..ce189e88a 100644 --- a/src/layers/da/layer_crop_scale_random.cpp +++ b/src/layers/da/layer_crop_scale_random.cpp @@ -18,9 +18,9 @@ using namespace std; -int LCropAndScaleRandom::total_layers = 0; +int LCropScaleRandom::total_layers = 0; -LCropAndScaleRandom::LCropAndScaleRandom(Layer *parent, vector factor, string da_mode, string name, int dev) : LinLayer(name, dev) { +LCropScaleRandom::LCropScaleRandom(Layer *parent, vector factor, string da_mode, string name, int dev) : LinLayer(name, dev) { if(name.empty()) this->name = "crop_scale" + to_string(++total_layers); input = parent->output; @@ -28,66 +28,62 @@ LCropAndScaleRandom::LCropAndScaleRandom(Layer *parent, vector factor, st delta = parent->delta; // Params - this->factor=factor; - this->da_mode=da_mode; + this->factor=std::move(factor); + this->da_mode=std::move(da_mode); parent->addchild(this); addparent(parent); } -LCropAndScaleRandom::~LCropAndScaleRandom() +LCropScaleRandom::~LCropScaleRandom() { delta=nullptr; } // virtual -void LCropAndScaleRandom::resize(int batch){ +void LCropScaleRandom::resize(int batch){ output->resize(batch); } -void LCropAndScaleRandom::forward() { +void LCropScaleRandom::forward() { auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->ToGPU(); + A->toGPU(); Tensor::select(input, A, {idx}, 0, 1); - A->ToCPU(); + A->toCPU(); A->save("images/test_da_" + to_string(idx) + "_0.jpg"); // Method Tensor::crop_scale_random(this->input, this->output, this->factor, this->da_mode); auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->ToGPU(); + B->toGPU(); Tensor::select(output, B, {idx}, 0, 1); - B->ToCPU(); + B->toCPU(); B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } -void LCropAndScaleRandom::backward() { +void LCropScaleRandom::backward() { } -Layer *LCropAndScaleRandom::share(int c, int bs, vector p) { - LCropAndScaleRandom *n = new LCropAndScaleRandom(p[0], this->factor, this->da_mode, "share_" + to_string(c) + name, dev); +Layer *LCropScaleRandom::share(int c, int bs, vector p) { + auto *n = new LCropScaleRandom(p[0], this->factor, this->da_mode, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } -Layer *LCropAndScaleRandom::clone(int c, int bs, vector p, int todev) { - LCropAndScaleRandom *n = new LCropAndScaleRandom(p[0], this->factor, this->da_mode, "clone_" + to_string(todev) + name, todev); +Layer *LCropScaleRandom::clone(int c, int bs, vector p, int todev) { + auto *n = new LCropScaleRandom(p[0], this->factor, this->da_mode, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } -string LCropAndScaleRandom::plot(int c) { +string LCropScaleRandom::plot(int c) { string s; if (c) s = name + " [label=" + "\"" + name + "\",style=filled,fontsize=12,fillcolor=bisque4,shape=box]"; diff --git a/src/layers/da/layer_cutout.cpp b/src/layers/da/layer_cutout.cpp index 3b4722d32..40ef551fd 100644 --- a/src/layers/da/layer_cutout.cpp +++ b/src/layers/da/layer_cutout.cpp @@ -56,20 +56,16 @@ void LCutout::backward() { Layer *LCutout::share(int c, int bs, vector p) { - LCutout *n = new LCutout(p[0], this->from_coords, this->to_coords, this->constant, "share_" + to_string(c) + name, dev); + auto *n = new LCutout(p[0], this->from_coords, this->to_coords, this->constant, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LCutout::clone(int c, int bs, vector p, int todev) { - LCutout *n = new LCutout(p[0], this->from_coords, this->to_coords, this->constant, "clone_" + to_string(todev) + name, todev); + auto *n = new LCutout(p[0], this->from_coords, this->to_coords, this->constant, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_cutout_random.cpp b/src/layers/da/layer_cutout_random.cpp index af22365d3..7700aea08 100644 --- a/src/layers/da/layer_cutout_random.cpp +++ b/src/layers/da/layer_cutout_random.cpp @@ -47,18 +47,18 @@ void LCutoutRandom::resize(int batch){ void LCutoutRandom::forward() { auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->ToGPU(); + A->toGPU(); Tensor::select(input, A, {idx}, 0, 1); - A->ToCPU(); + A->toCPU(); A->save("images/test_da_" + to_string(idx) + "_0.jpg"); // Method Tensor::cutout_random(this->input, this->output, this->factor_x, this->factor_y, this->constant); auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->ToGPU(); + B->toGPU(); Tensor::select(output, B, {idx}, 0, 1); - B->ToCPU(); + B->toCPU(); B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } @@ -68,20 +68,16 @@ void LCutoutRandom::backward() { Layer *LCutoutRandom::share(int c, int bs, vector p) { - LCutoutRandom *n = new LCutoutRandom(p[0], this->factor_x, this->factor_y, this->constant, "share_" + to_string(c) + name, dev); + auto *n = new LCutoutRandom(p[0], this->factor_x, this->factor_y, this->constant, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LCutoutRandom::clone(int c, int bs, vector p, int todev) { - LCutoutRandom *n = new LCutoutRandom(p[0], this->factor_x, this->factor_y, this->constant, "clone_" + to_string(todev) + name, todev); + auto *n = new LCutoutRandom(p[0], this->factor_x, this->factor_y, this->constant, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_da.h b/src/layers/da/layer_da.h index a01e2455f..b8f594637 100644 --- a/src/layers/da/layer_da.h +++ b/src/layers/da/layer_da.h @@ -311,14 +311,14 @@ class LCropRandom : public LinLayer { /// Crop Layer -class LCropAndScaleRandom : public LinLayer { +class LCropScaleRandom : public LinLayer { public: static int total_layers; vector factor; string da_mode; - LCropAndScaleRandom(Layer *parent, vector factor, string da_mode, string name, int dev); - ~LCropAndScaleRandom(); + LCropScaleRandom(Layer *parent, vector factor, string da_mode, string name, int dev); + ~LCropScaleRandom(); Layer *share(int c, int bs, vector p) override; diff --git a/src/layers/da/layer_flip.cpp b/src/layers/da/layer_flip.cpp index 312a4765c..92c6759a5 100644 --- a/src/layers/da/layer_flip.cpp +++ b/src/layers/da/layer_flip.cpp @@ -54,20 +54,16 @@ void LFlip::backward() { Layer *LFlip::share(int c, int bs, vector p) { - LFlip *n = new LFlip(p[0], this->axis, "share_" + to_string(c) + name, dev); + auto *n = new LFlip(p[0], this->axis, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LFlip::clone(int c, int bs, vector p, int todev) { - LFlip *n = new LFlip(p[0], this->axis, "clone_" + to_string(todev) + name, todev); + auto *n = new LFlip(p[0], this->axis, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_flip_random.cpp b/src/layers/da/layer_flip_random.cpp index fc2f95785..377ff86fb 100644 --- a/src/layers/da/layer_flip_random.cpp +++ b/src/layers/da/layer_flip_random.cpp @@ -47,18 +47,18 @@ void LFlipRandom::resize(int batch){ void LFlipRandom::forward() { auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->ToGPU(); + A->toGPU(); Tensor::select(input, A, {idx}, 0, 1); - A->ToCPU(); + A->toCPU(); A->save("images/test_da_" + to_string(idx) + "_0.jpg"); // Method Tensor::flip_random(this->input, this->output, this->axis); auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->ToGPU(); + B->toGPU(); Tensor::select(output, B, {idx}, 0, 1); - B->ToCPU(); + B->toCPU(); B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } @@ -68,20 +68,16 @@ void LFlipRandom::backward() { Layer *LFlipRandom::share(int c, int bs, vector p) { - LFlipRandom *n = new LFlipRandom(p[0], this->axis, "share_" + to_string(c) + name, dev); + auto *n = new LFlipRandom(p[0], this->axis, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LFlipRandom::clone(int c, int bs, vector p, int todev) { - LFlipRandom *n = new LFlipRandom(p[0], this->axis, "clone_" + to_string(todev) + name, todev); + auto *n = new LFlipRandom(p[0], this->axis, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_rotate.cpp b/src/layers/da/layer_rotate.cpp index 4c3556702..a77878377 100644 --- a/src/layers/da/layer_rotate.cpp +++ b/src/layers/da/layer_rotate.cpp @@ -58,20 +58,16 @@ void LRotate::backward() { Layer *LRotate::share(int c, int bs, vector p) { - LRotate *n = new LRotate(p[0], this->angle, this->offset_center, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); + auto *n = new LRotate(p[0], this->angle, this->offset_center, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LRotate::clone(int c, int bs, vector p, int todev) { - LRotate *n = new LRotate(p[0], this->angle, this->offset_center, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); + auto *n = new LRotate(p[0], this->angle, this->offset_center, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_rotate_random.cpp b/src/layers/da/layer_rotate_random.cpp index d30d84996..8ed22c18e 100644 --- a/src/layers/da/layer_rotate_random.cpp +++ b/src/layers/da/layer_rotate_random.cpp @@ -50,18 +50,18 @@ void LRotateRandom::resize(int batch){ void LRotateRandom::forward() { auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->ToGPU(); + A->toGPU(); Tensor::select(input, A, {idx}, 0, 1); - A->ToCPU(); + A->toCPU(); A->save("images/test_da_" + to_string(idx) + "_0.jpg"); // Method Tensor::rotate_random(this->input, this->output, this->factor, this->offset_center, this->da_mode, this->constant); auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->ToGPU(); + B->toGPU(); Tensor::select(output, B, {idx}, 0, 1); - B->ToCPU(); + B->toCPU(); B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } @@ -71,20 +71,16 @@ void LRotateRandom::backward() { Layer *LRotateRandom::share(int c, int bs, vector p) { - LRotateRandom *n = new LRotateRandom(p[0], this->factor, this->offset_center, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); + auto *n = new LRotateRandom(p[0], this->factor, this->offset_center, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LRotateRandom::clone(int c, int bs, vector p, int todev) { - LRotateRandom *n = new LRotateRandom(p[0], this->factor, this->offset_center, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); + auto *n = new LRotateRandom(p[0], this->factor, this->offset_center, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_scale.cpp b/src/layers/da/layer_scale.cpp index f43f91414..ead3a0305 100644 --- a/src/layers/da/layer_scale.cpp +++ b/src/layers/da/layer_scale.cpp @@ -63,20 +63,16 @@ void LScale::backward() { Layer *LScale::share(int c, int bs, vector p) { - LScale *n = new LScale(p[0], this->new_shape, this->reshape, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); + auto *n = new LScale(p[0], this->new_shape, this->reshape, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LScale::clone(int c, int bs, vector p, int todev) { - LScale *n = new LScale(p[0], this->new_shape, this->reshape, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); + auto *n = new LScale(p[0], this->new_shape, this->reshape, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_scale_random.cpp b/src/layers/da/layer_scale_random.cpp index 913c82a7a..2abaa29c3 100644 --- a/src/layers/da/layer_scale_random.cpp +++ b/src/layers/da/layer_scale_random.cpp @@ -49,18 +49,18 @@ void LScaleRandom::resize(int batch){ void LScaleRandom::forward() { auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->ToGPU(); + A->toGPU(); Tensor::select(input, A, {idx}, 0, 1); - A->ToCPU(); + A->toCPU(); A->save("images/test_da_" + to_string(idx) + "_0.jpg"); // Method Tensor::scale_random(this->input, this->output, this->factor, this->da_mode, this->constant); auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->ToGPU(); + B->toGPU(); Tensor::select(output, B, {idx}, 0, 1); - B->ToCPU(); + B->toCPU(); B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } @@ -70,20 +70,16 @@ void LScaleRandom::backward() { Layer *LScaleRandom::share(int c, int bs, vector p) { - LScaleRandom *n = new LScaleRandom(p[0], this->factor, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); + auto *n = new LScaleRandom(p[0], this->factor, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LScaleRandom::clone(int c, int bs, vector p, int todev) { - LScaleRandom *n = new LScaleRandom(p[0], this->factor, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); + auto *n = new LScaleRandom(p[0], this->factor, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_shift.cpp b/src/layers/da/layer_shift.cpp index 925d26ce2..5d97f92b4 100644 --- a/src/layers/da/layer_shift.cpp +++ b/src/layers/da/layer_shift.cpp @@ -57,20 +57,16 @@ void LShift::backward() { Layer *LShift::share(int c, int bs, vector p) { - LShift *n = new LShift(p[0], this->shift, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); + auto *n = new LShift(p[0], this->shift, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } Layer *LShift::clone(int c, int bs, vector p, int todev) { - LShift *n = new LShift(p[0], this->shift, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); + auto *n = new LShift(p[0], this->shift, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/layers/da/layer_shift_random.cpp b/src/layers/da/layer_shift_random.cpp index 4c4ff972b..f84c1a302 100644 --- a/src/layers/da/layer_shift_random.cpp +++ b/src/layers/da/layer_shift_random.cpp @@ -51,18 +51,18 @@ void LShiftRandom::resize(int batch){ void LShiftRandom::forward() { auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->ToGPU(); + A->toGPU(); Tensor::select(input, A, {idx}, 0, 1); - A->ToCPU(); + A->toCPU(); A->save("images/test_da_" + to_string(idx) + "_0.jpg"); // Method Tensor::shift_random(input, output, factor_x, factor_y); auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->ToGPU(); + B->toGPU(); Tensor::select(output, B, {idx}, 0, 1); - B->ToCPU(); + B->toCPU(); B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } @@ -75,8 +75,6 @@ Layer *LShiftRandom::share(int c, int bs, vector p) { LShiftRandom *n = new LShiftRandom(p[0], this->factor_x, this->factor_y, this->da_mode, this->constant, "share_" + to_string(c) + name, dev); n->orig = this; - // TODO: Implement - return n; } @@ -84,8 +82,6 @@ Layer *LShiftRandom::clone(int c, int bs, vector p, int todev) { LShiftRandom *n = new LShiftRandom(p[0], this->factor_x, this->factor_y, this->da_mode, this->constant, "clone_" + to_string(todev) + name, todev); n->orig = this; - // TODO: Implement - return n; } diff --git a/src/tensor/tensor.cpp b/src/tensor/tensor.cpp index c29ecae64..bcaefc04a 100755 --- a/src/tensor/tensor.cpp +++ b/src/tensor/tensor.cpp @@ -101,7 +101,7 @@ Tensor::Tensor(const vector &shape, int dev):Tensor(shape, nullptr, dev){} Tensor::Tensor(const vector &shape, Tensor *T):Tensor(shape,T->ptr,T->device) {} -void Tensor::ToCPU(int dev){ +void Tensor::toCPU(int dev){ #ifdef cGPU if (isGPU()) { @@ -127,7 +127,7 @@ void Tensor::ToCPU(int dev){ #endif } -void Tensor::ToGPU(int dev){ +void Tensor::toGPU(int dev){ #ifdef cGPU if (isCPU()) { this->device = dev; diff --git a/src/tensor/tensor.h b/src/tensor/tensor.h index f0bdb5d7f..7ec21d818 100644 --- a/src/tensor/tensor.h +++ b/src/tensor/tensor.h @@ -93,8 +93,8 @@ class Tensor { ~Tensor(); // Copy data - void ToCPU(int dev=DEV_CPU); - void ToGPU(int dev=DEV_GPU); + void toCPU(int dev=DEV_CPU); + void toGPU(int dev=DEV_GPU); Tensor* clone(); // Resize @@ -133,7 +133,6 @@ class Tensor { // ***** Core (static) ***************************** Tensor* permute(vector axis); - // ************************************************ // ****** Tensor operations *********************** // ************************************************ diff --git a/src/tensor/tensor_core.cpp b/src/tensor/tensor_core.cpp index 807023672..ff7bb54c4 100644 --- a/src/tensor/tensor_core.cpp +++ b/src/tensor/tensor_core.cpp @@ -236,14 +236,14 @@ void Tensor::select(Tensor *A, Tensor *B, vector sind, int ini, int end) { } else if ((A->isGPU()) && (B->isCPU())) { Tensor *Ac=A->clone(); - Ac->ToCPU(); + Ac->toCPU(); cpu_select(Ac, B, sind, ini, end); delete Ac; }else if ((A->isCPU()) && (B->isGPU())) { Tensor *Bc=B->clone(); - Bc->ToCPU(); + Bc->toCPU(); cpu_select(A, Bc, sind, ini, end); Tensor::copy(Bc,B); @@ -252,10 +252,10 @@ void Tensor::select(Tensor *A, Tensor *B, vector sind, int ini, int end) { } else if ((A->isGPU()) && (B->isGPU())) { Tensor *Ac=A->clone(); - Ac->ToCPU(); + Ac->toCPU(); Tensor *Bc=B->clone(); - Bc->ToCPU(); + Bc->toCPU(); cpu_select(Ac, Bc, sind, ini, end); Tensor::copy(Bc,B); @@ -285,14 +285,14 @@ void Tensor::deselect(Tensor *A, Tensor *B, vector sind, int ini, int end) } else if ((A->isGPU()) && (B->isCPU())) { Tensor *Ac=A->clone(); - Ac->ToCPU(); + Ac->toCPU(); cpu_deselect(Ac, B, sind, ini, end); delete Ac; }else if ((A->isCPU()) && (B->isGPU())) { Tensor *Bc=B->clone(); - Bc->ToCPU(); + Bc->toCPU(); cpu_deselect(A, Bc, sind, ini, end); Tensor::copy(Bc,B); @@ -301,10 +301,10 @@ void Tensor::deselect(Tensor *A, Tensor *B, vector sind, int ini, int end) } else if ((A->isGPU()) && (B->isGPU())) { Tensor *Ac=A->clone(); - Ac->ToCPU(); + Ac->toCPU(); Tensor *Bc=B->clone(); - Bc->ToCPU(); + Bc->toCPU(); cpu_deselect(Ac, Bc, sind, ini, end); Tensor::copy(Bc,B); diff --git a/src/tensor/tensor_serialization.cpp b/src/tensor/tensor_serialization.cpp index 592d34687..8eeee2f32 100644 --- a/src/tensor/tensor_serialization.cpp +++ b/src/tensor/tensor_serialization.cpp @@ -206,7 +206,7 @@ void Tensor::save2img(const string& filename, string format){ // Re-order axis Tensor *t = this->clone(); // Important if permute is not used - t->ToCPU(); // Just in case + t->toCPU(); // Just in case // TODO: Temp! Check permute correctness // Re-order components (careful with t[a]=t[b], collisions may appear if both are the same) diff --git a/tests/dev/aux_tests.cpp b/tests/dev/aux_tests.cpp index b85ceb19c..e927aa28f 100644 --- a/tests/dev/aux_tests.cpp +++ b/tests/dev/aux_tests.cpp @@ -31,8 +31,8 @@ bool check_tensors(Tensor* A, Tensor* B, float epsilon){ B = B->clone(); // Copy to CPU (equal only supported in CPU) - A->ToCPU(); - B->ToCPU(); + A->toCPU(); + B->toCPU(); return Tensor::equal(A, B, epsilon); } @@ -43,7 +43,7 @@ TestResult run_mpool(Tensor* t_input, int dev, int runs){ // Move to device if (dev == DEV_GPU){ - t_input->ToGPU(); + t_input->toGPU(); } // Instantiate PoolDescription + Perform MaxPooling @@ -74,8 +74,8 @@ TestResult run_conv2d(Tensor* t_input, Tensor* t_kernel, int dev, int runs){ // Move to device if (dev == DEV_GPU){ - t_input->ToGPU(); - t_kernel->ToGPU(); + t_input->toGPU(); + t_kernel->toGPU(); } // Instantiate PoolDescription + Perform MaxPooling @@ -103,9 +103,9 @@ TestResult run_dense(Tensor* t_input, Tensor* t_weights, int dev, int runs){ // Move to device if (dev == DEV_GPU){ - t_input->ToGPU(); - t_weights->ToGPU(); - t_output->ToGPU(); + t_input->toGPU(); + t_weights->toGPU(); + t_output->toGPU(); } clock_t begin = clock(); @@ -129,8 +129,8 @@ TestResult run_activation(Tensor* t_input, string act, int dev, int runs){ // Move to device if (dev == DEV_GPU){ - t_input->ToGPU(); - t_output->ToGPU(); + t_input->toGPU(); + t_output->toGPU(); } clock_t begin = clock(); @@ -161,7 +161,7 @@ TestResult run_batchnorm(Tensor* t_input, int dev, int runs){ // Move to device if (dev == DEV_GPU){ - t_input->ToGPU(); + t_input->toGPU(); } LTensor* l_t = new LTensor(t_input->getShape(), t_input->ptr, t_input->device); @@ -188,8 +188,8 @@ TestResult run_upsampling(Tensor* t_input, vector size, int dev, int runs){ // Move to device if (dev == DEV_GPU){ - t_input->ToGPU(); - t_output->ToGPU(); + t_input->toGPU(); + t_output->toGPU(); } clock_t begin = clock(); @@ -212,7 +212,7 @@ TestResult run_tensor_op(Tensor* t_input, string op, int dev, int runs){ // Move to device if (dev == DEV_GPU){ - t_output->ToGPU(); + t_output->toGPU(); } clock_t begin = clock(); @@ -275,8 +275,8 @@ TestResult run_tensor_da(Tensor* t_input, Tensor* t_output, string op, int dev, // Move to device if (dev == DEV_GPU){ - t_input->ToGPU(); - t_output->ToGPU(); + t_input->toGPU(); + t_output->toGPU(); } clock_t begin = clock(); diff --git a/tests/test_main.cpp b/tests/test_main.cpp index a80525853..c5b8cb0d2 100644 --- a/tests/test_main.cpp +++ b/tests/test_main.cpp @@ -366,7 +366,7 @@ int main(int argc, char **argv) { t_input->reshape_({t_input->shape[2], t_input->shape[3]}); t_input->print(); res_small_gpu.tensor->reshape_({res_small_gpu.tensor->shape[2], res_small_gpu.tensor->shape[3]}); - res_small_gpu.tensor->ToCPU(); + res_small_gpu.tensor->toCPU(); res_small_gpu.tensor->print(); } From 2225bdc2c76a3c1a5f6d5ec7e64466eac2955f63 Mon Sep 17 00:00:00 2001 From: salvacarrion Date: Tue, 26 Nov 2019 12:01:39 +0100 Subject: [PATCH 07/10] Minor fix --- src/apis/eddlT.cpp | 8 ++++---- src/apis/eddlT.h | 8 ++++---- src/tensor/tensor.cpp | 2 +- tests/test_main.cpp | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/apis/eddlT.cpp b/src/apis/eddlT.cpp index 351ae30c8..bd7cc5c1e 100644 --- a/src/apis/eddlT.cpp +++ b/src/apis/eddlT.cpp @@ -69,20 +69,20 @@ namespace eddlT { } // Copy data ******************************** - void ToCPU_(Tensor *A) + void toCPU_(Tensor *A) { A->toCPU(); } - void ToGPU_(Tensor *A) + void toGPU_(Tensor *A) { A->toGPU(); } - Tensor * ToCPU(Tensor *A){ + Tensor * toCPU(Tensor *A){ Tensor *B=A->clone(); B->toCPU(); return B; } - Tensor * ToGPU(Tensor *A) + Tensor * toGPU(Tensor *A) { Tensor *B=A->clone(); B->toGPU(); diff --git a/src/apis/eddlT.h b/src/apis/eddlT.h index 934b902da..330e2c6be 100644 --- a/src/apis/eddlT.h +++ b/src/apis/eddlT.h @@ -36,10 +36,10 @@ namespace eddlT{ // Copy data c ******************************** - void ToCPU_(Tensor *A); - void ToGPU_(Tensor *A); - Tensor * ToCPU(Tensor *A); - Tensor * ToGPU(Tensor *A); + void toCPU_(Tensor *A); + void toGPU_(Tensor *A); + Tensor * toCPU(Tensor *A); + Tensor * toGPU(Tensor *A); Tensor* clone(Tensor *A); Tensor* select(Tensor *A, int i); void copyTensor(Tensor *A,Tensor *B); diff --git a/src/tensor/tensor.cpp b/src/tensor/tensor.cpp index bcaefc04a..33efad74c 100755 --- a/src/tensor/tensor.cpp +++ b/src/tensor/tensor.cpp @@ -107,7 +107,7 @@ void Tensor::toCPU(int dev){ { this->device = dev; - float *cpu_ptr = get_fmem(size, "Tensor::ToCPU"); + float *cpu_ptr = get_fmem(size, "Tensor::toCPU"); float *gpu_ptr = ptr; if (ndim == 2) { diff --git a/tests/test_main.cpp b/tests/test_main.cpp index c5b8cb0d2..7d26f1056 100644 --- a/tests/test_main.cpp +++ b/tests/test_main.cpp @@ -312,7 +312,7 @@ int main(int argc, char **argv) { // t_input = Tensor::ones({10, 10, 1000,1000}, DEV_CPU); // res_small_cpu = run_tensor_op(t_input, "shift", DEV_CPU, MAX_TRIES); // -//// res_small_cpu.tensor->ToCPU(); +//// res_small_cpu.tensor->toCPU(); //// res_small_cpu.tensor->reshape_({50, 50}); //// res_small_cpu.tensor->print(); // From 5e05997d456ab023c34a9b6ed0a418ba6da06ffd Mon Sep 17 00:00:00 2001 From: salvacarrion Date: Tue, 26 Nov 2019 12:46:11 +0100 Subject: [PATCH 08/10] Minor updates --- examples/NN/2_CIFAR10/2_cifar_conv_da.cpp | 6 +++--- src/layers/da/layer_crop_random.cpp | 14 -------------- src/layers/da/layer_crop_scale_random.cpp | 14 -------------- src/layers/da/layer_cutout_random.cpp | 14 -------------- src/layers/da/layer_flip_random.cpp | 14 -------------- src/layers/da/layer_rotate_random.cpp | 14 -------------- src/layers/da/layer_scale_random.cpp | 14 -------------- src/layers/da/layer_shift_random.cpp | 14 -------------- 8 files changed, 3 insertions(+), 101 deletions(-) diff --git a/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp b/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp index 5cd2a8fba..6457863da 100644 --- a/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp +++ b/examples/NN/2_CIFAR10/2_cifar_conv_da.cpp @@ -43,7 +43,7 @@ int main(int argc, char **argv){ // l = ScaleRandom(l, {0.85f, 2.0f}); // l = FlipRandom(l, 1); // l = CropRandom(l, {28, 28}); - l = CropScaleRandom(l, {0.5f, 1.0f}); +// l = CropScaleRandom(l, {0.f, 1.0f}); // l = CutoutRandom(l, {0.0f, 0.3f}, {0.0f, 0.3f}); l=MaxPool(ReLu(Conv(l,32,{3,3},{1,1})),{2,2}); @@ -66,8 +66,8 @@ int main(int argc, char **argv){ sgd(0.01, 0.9), // Optimizer {"soft_cross_entropy"}, // Losses {"categorical_accuracy"}, // Metrics - CS_CPU() // CPU with maximum threads availables - // CS_GPU({1}) // GPU with only one gpu + //CS_CPU() // CPU with maximum threads availables + CS_GPU({1}) // GPU with only one gpu ); // plot the model diff --git a/src/layers/da/layer_crop_random.cpp b/src/layers/da/layer_crop_random.cpp index 0169756b5..90e0a7e7d 100644 --- a/src/layers/da/layer_crop_random.cpp +++ b/src/layers/da/layer_crop_random.cpp @@ -44,21 +44,7 @@ void LCropRandom::resize(int batch){ } void LCropRandom::forward() { - auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); - int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->toGPU(); - Tensor::select(input, A, {idx}, 0, 1); - A->toCPU(); - A->save("images/test_f_" + to_string(idx) + "_0.jpg"); - - // Method Tensor::crop_random(this->input, this->output); - - auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->toGPU(); - Tensor::select(output, B, {idx}, 0, 1); - B->toCPU(); - B->save("images/test_f_" + to_string(idx) + "_1.jpg"); } void LCropRandom::backward(){ diff --git a/src/layers/da/layer_crop_scale_random.cpp b/src/layers/da/layer_crop_scale_random.cpp index 62671e9ec..0fda5942a 100644 --- a/src/layers/da/layer_crop_scale_random.cpp +++ b/src/layers/da/layer_crop_scale_random.cpp @@ -46,21 +46,7 @@ void LCropScaleRandom::resize(int batch){ } void LCropScaleRandom::forward() { - auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); - int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->toGPU(); - Tensor::select(input, A, {idx}, 0, 1); - A->toCPU(); - A->save("images/test_da_" + to_string(idx) + "_0.jpg"); - - // Method Tensor::crop_scale_random(this->input, this->output, this->factor, this->da_mode); - - auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->toGPU(); - Tensor::select(output, B, {idx}, 0, 1); - B->toCPU(); - B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LCropScaleRandom::backward() { diff --git a/src/layers/da/layer_cutout_random.cpp b/src/layers/da/layer_cutout_random.cpp index 5fe9187a5..d62ab67b1 100644 --- a/src/layers/da/layer_cutout_random.cpp +++ b/src/layers/da/layer_cutout_random.cpp @@ -45,21 +45,7 @@ void LCutoutRandom::resize(int batch){ } void LCutoutRandom::forward() { - auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); - int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->toGPU(); - Tensor::select(input, A, {idx}, 0, 1); - A->toCPU(); - A->save("images/test_da_" + to_string(idx) + "_0.jpg"); - - // Method Tensor::cutout_random(this->input, this->output, this->factor_x, this->factor_y, this->constant); - - auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->toGPU(); - Tensor::select(output, B, {idx}, 0, 1); - B->toCPU(); - B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LCutoutRandom::backward() { diff --git a/src/layers/da/layer_flip_random.cpp b/src/layers/da/layer_flip_random.cpp index a64c03156..51c073950 100644 --- a/src/layers/da/layer_flip_random.cpp +++ b/src/layers/da/layer_flip_random.cpp @@ -45,21 +45,7 @@ void LFlipRandom::resize(int batch){ } void LFlipRandom::forward() { - auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); - int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->toGPU(); - Tensor::select(input, A, {idx}, 0, 1); - A->toCPU(); - A->save("images/test_da_" + to_string(idx) + "_0.jpg"); - - // Method Tensor::flip_random(this->input, this->output, this->axis); - - auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->toGPU(); - Tensor::select(output, B, {idx}, 0, 1); - B->toCPU(); - B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LFlipRandom::backward() { diff --git a/src/layers/da/layer_rotate_random.cpp b/src/layers/da/layer_rotate_random.cpp index ef9bcc98e..c2ced2e6f 100644 --- a/src/layers/da/layer_rotate_random.cpp +++ b/src/layers/da/layer_rotate_random.cpp @@ -48,21 +48,7 @@ void LRotateRandom::resize(int batch){ } void LRotateRandom::forward() { - auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); - int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->toGPU(); - Tensor::select(input, A, {idx}, 0, 1); - A->toCPU(); - A->save("images/test_da_" + to_string(idx) + "_0.jpg"); - - // Method Tensor::rotate_random(this->input, this->output, this->factor, this->offset_center, this->da_mode, this->constant); - - auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->toGPU(); - Tensor::select(output, B, {idx}, 0, 1); - B->toCPU(); - B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LRotateRandom::backward() { diff --git a/src/layers/da/layer_scale_random.cpp b/src/layers/da/layer_scale_random.cpp index 715040013..57ae45d7c 100644 --- a/src/layers/da/layer_scale_random.cpp +++ b/src/layers/da/layer_scale_random.cpp @@ -47,21 +47,7 @@ void LScaleRandom::resize(int batch){ } void LScaleRandom::forward() { - auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); - int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->toGPU(); - Tensor::select(input, A, {idx}, 0, 1); - A->toCPU(); - A->save("images/test_da_" + to_string(idx) + "_0.jpg"); - - // Method Tensor::scale_random(this->input, this->output, this->factor, this->da_mode, this->constant); - - auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->toGPU(); - Tensor::select(output, B, {idx}, 0, 1); - B->toCPU(); - B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LScaleRandom::backward() { diff --git a/src/layers/da/layer_shift_random.cpp b/src/layers/da/layer_shift_random.cpp index 7846eece9..bc51a0828 100644 --- a/src/layers/da/layer_shift_random.cpp +++ b/src/layers/da/layer_shift_random.cpp @@ -49,21 +49,7 @@ void LShiftRandom::resize(int batch){ } void LShiftRandom::forward() { - auto *A=new Tensor({1, input->shape[1], input->shape[2], input->shape[3]}, input->device); - int idx = (int)uniform(0.0f, (float)input->shape[0]-1.0f); - A->toGPU(); - Tensor::select(input, A, {idx}, 0, 1); - A->toCPU(); - A->save("images/test_da_" + to_string(idx) + "_0.jpg"); - - // Method Tensor::shift_random(input, output, factor_x, factor_y); - - auto *B=new Tensor({1, output->shape[1], output->shape[2], output->shape[3]}, output->device); - B->toGPU(); - Tensor::select(output, B, {idx}, 0, 1); - B->toCPU(); - B->save("images/test_da_" + to_string(idx) + "_1.jpg"); } void LShiftRandom::backward() { From 050424b47dc870299bb2f8e62d55adbd692e4e75 Mon Sep 17 00:00:00 2001 From: Roberto Paredes Palacios Date: Tue, 26 Nov 2019 13:07:34 +0100 Subject: [PATCH 09/10] constraints --- src/layers/constraints/constraint_max_norm.cpp | 4 ++-- src/layers/constraints/constraint_min_max_norm.cpp | 8 ++++---- src/layers/constraints/constraint_unit_norm.cpp | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/layers/constraints/constraint_max_norm.cpp b/src/layers/constraints/constraint_max_norm.cpp index de2311ea2..5ecc19325 100644 --- a/src/layers/constraints/constraint_max_norm.cpp +++ b/src/layers/constraints/constraint_max_norm.cpp @@ -15,8 +15,8 @@ using namespace std; CMaxNorm::CMaxNorm(float max_value, int axis) : Constraint("max_norm") { // Todo: Implement - this->max_value; - this->axis; + //this->max_value; + //this->axis; } float CMaxNorm::apply(Tensor* T) { return 0; } diff --git a/src/layers/constraints/constraint_min_max_norm.cpp b/src/layers/constraints/constraint_min_max_norm.cpp index cdbba43c4..b77dcc4b5 100644 --- a/src/layers/constraints/constraint_min_max_norm.cpp +++ b/src/layers/constraints/constraint_min_max_norm.cpp @@ -13,10 +13,10 @@ CMinMaxNorm::CMinMaxNorm(float min_value, float max_value, float rate, int axis) : Constraint("min_max_norm") { // Todo: Implement - this->min_value; - this->max_value; - this->rate; - this->axis; + // this->min_value; + // this->max_value; + // this->rate; + // this->axis; } float CMinMaxNorm::apply(Tensor* T) { return 0; } diff --git a/src/layers/constraints/constraint_unit_norm.cpp b/src/layers/constraints/constraint_unit_norm.cpp index 462645058..b0aaeec48 100644 --- a/src/layers/constraints/constraint_unit_norm.cpp +++ b/src/layers/constraints/constraint_unit_norm.cpp @@ -12,7 +12,7 @@ CUnitNorm::CUnitNorm(int axis) : Constraint("unit_norm") { // Todo: Implement - this->axis; + // this->axis; } float CUnitNorm::apply(Tensor* T) { return 0; } From 17b86d227ab4a9cc1911b231432cb7b83ab6052e Mon Sep 17 00:00:00 2001 From: Roberto Paredes Palacios Date: Tue, 26 Nov 2019 13:14:21 +0100 Subject: [PATCH 10/10] BN without momentum --- src/layers/normalization/layer_batchnorm.cpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/layers/normalization/layer_batchnorm.cpp b/src/layers/normalization/layer_batchnorm.cpp index b46217c7f..377092d9e 100644 --- a/src/layers/normalization/layer_batchnorm.cpp +++ b/src/layers/normalization/layer_batchnorm.cpp @@ -167,10 +167,18 @@ void LBatchNorm::forward() { } } else { - Tensor::copy(mean->output,layers[0]->output); + if (momentum!=0.0) + Tensor::copy(mean->output,layers[0]->output); + else layers[0]->forward(); + layers[1]->forward(); - Tensor::copy(variance->output,layers[3]->output); + if (momentum!=0.0) + Tensor::copy(variance->output,layers[3]->output); + else { + layers[2]->forward(); + layers[3]->forward(); + } layers[4]->forward(); layers[5]->forward(); layers[6]->forward();