Skip to content

Commit

Permalink
Merge pull request #58 from deephealthproject/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
salvacarrion authored Nov 26, 2019
2 parents fbaaf81 + 17b86d2 commit cbe57bb
Show file tree
Hide file tree
Showing 38 changed files with 252 additions and 276 deletions.
6 changes: 3 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ target_include_directories(eddl PUBLIC
)
target_include_directories(eddl PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/third_party/eigen>
$<INSTALL_INTERFACE:include/eddl/third_party/eigen>
$<INSTALL_INTERFACE:include>
)
#target_compile_features(eddl PUBLIC cxx_std_11)

Expand Down Expand Up @@ -605,9 +605,9 @@ foreach(f ${Eigen_directory_files})
endforeach(f ${Eigen_directory_files})
install(FILES
${Eigen_directory_files_to_install}
DESTINATION "include/eddl/third_party/eigen/Eigen"
DESTINATION "include/Eigen"
)
install(DIRECTORY ${ESCAPED_EIGEN_SOURCE_DIR}/src DESTINATION "include/eddl/third_party/eigen/Eigen" COMPONENT Devel FILES_MATCHING PATTERN "*.h")
install(DIRECTORY ${ESCAPED_EIGEN_SOURCE_DIR}/src DESTINATION "include/Eigen" COMPONENT Devel FILES_MATCHING PATTERN "*.h")


configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/eddlConfig.cmake.in ${CMAKE_BINARY_DIR}/cmake/eddlConfig.cmake @ONLY)
Expand Down
132 changes: 68 additions & 64 deletions examples/NN/2_CIFAR10/2_cifar_conv_da.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,70 +25,74 @@ using namespace eddl;

int main(int argc, char **argv){

// download CIFAR data
download_cifar10();

// Settings
int epochs = 25;
int batch_size = 100;
int num_classes = 10;

// network
layer in=Input({3,32,32});
layer l=in;

// Data augmentation
l = FlipRandom(l, 1);
l = ShiftRandom(l, {-0.1f, +0.1f}, {-0.1f, +0.1f});
l = ScaleRandom(l, {0.9f, 1.1f});

l=MaxPool(ReLu(Conv(l,32,{3,3},{1,1})),{2,2});
l=MaxPool(ReLu(Conv(l,64,{3,3},{1,1})),{2,2});
l=MaxPool(ReLu(Conv(l,128,{3,3},{1,1})),{2,2});
l=MaxPool(ReLu(Conv(l,256,{3,3},{1,1})),{2,2});

l=Reshape(l,{-1});

l=Activation(Dense(l,128),"relu");

layer out=Activation(Dense(l,num_classes),"softmax");

// net define input and output layers list
model net=Model({in},{out});


// Build model
build(net,
sgd(0.01, 0.9), // Optimizer
{"soft_cross_entropy"}, // Losses
{"categorical_accuracy"}, // Metrics
CS_CPU() // CPU with maximum threads availables
//CS_GPU({1}) // GPU with only one gpu
);

// plot the model
plot(net,"model.pdf");

// get some info from the network
summary(net);

// Load and preprocess training data
tensor x_train = eddlT::load("cifar_trX.bin");
tensor y_train = eddlT::load("cifar_trY.bin");
eddlT::div_(x_train, 255.0);

// Load and preprocess test data
tensor x_test = eddlT::load("cifar_tsX.bin");
tensor y_test = eddlT::load("cifar_tsY.bin");
eddlT::div_(x_test, 255.0);

for(int i=0;i<epochs;i++) {
// training, list of input and output tensors, batch, epochs
fit(net,{x_train},{y_train},batch_size, 1);
// Evaluate train
std::cout << "Evaluate test:" << std::endl;
evaluate(net,{x_test},{y_test});
}
// download CIFAR data
download_cifar10();

// Settings
int epochs = 25;
int batch_size = 100;
int num_classes = 10;

// network
layer in=Input({3,32,32});
layer l=in;

// Data augmentation
// l = ShiftRandom(l, {-0.2f, +0.2f}, {-0.2f, +0.2f});
// l = RotateRandom(l, {-30.0f, +30.0f});
// l = ScaleRandom(l, {0.85f, 2.0f});
// l = FlipRandom(l, 1);
// l = CropRandom(l, {28, 28});
// l = CropScaleRandom(l, {0.f, 1.0f});
// l = CutoutRandom(l, {0.0f, 0.3f}, {0.0f, 0.3f});

l=MaxPool(ReLu(Conv(l,32,{3,3},{1,1})),{2,2});
l=MaxPool(ReLu(Conv(l,64,{3,3},{1,1})),{2,2});
l=MaxPool(ReLu(Conv(l,128,{3,3},{1,1})),{2,2});
l=MaxPool(ReLu(Conv(l,256,{3,3},{1,1})),{2,2});

l=Reshape(l,{-1});

l=Activation(Dense(l,128),"relu");

layer out=Activation(Dense(l,num_classes),"softmax");

// net define input and output layers list
model net=Model({in},{out});


// Build model
build(net,
sgd(0.01, 0.9), // Optimizer
{"soft_cross_entropy"}, // Losses
{"categorical_accuracy"}, // Metrics
//CS_CPU() // CPU with maximum threads availables
CS_GPU({1}) // GPU with only one gpu
);

// plot the model
plot(net,"model.pdf");

// get some info from the network
summary(net);

// Load and preprocess training data
tensor x_train = eddlT::load("cifar_trX.bin");
tensor y_train = eddlT::load("cifar_trY.bin");
eddlT::div_(x_train, 255.0);

// Load and preprocess test data
tensor x_test = eddlT::load("cifar_tsX.bin");
tensor y_test = eddlT::load("cifar_tsY.bin");
eddlT::div_(x_test, 255.0);

for(int i=0;i<epochs;i++) {
// training, list of input and output tensors, batch, epochs
fit(net,{x_train},{y_train},batch_size, 1);
// Evaluate train
std::cout << "Evaluate test:" << std::endl;
evaluate(net,{x_test},{y_test});
}


}
Expand Down
4 changes: 3 additions & 1 deletion examples/NN/2_CIFAR10/3_cifar_vgg16.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,12 @@ int main(int argc, char **argv){
tensor y_test = eddlT::load("cifar_tsY.bin");
eddlT::div_(x_test, 255.0);



for(int i=0;i<epochs;i++) {
// training, list of input and output tensors, batch, epochs
fit(net,{x_train},{y_train},batch_size, 1);
// Evaluate train
// Evaluate test
std::cout << "Evaluate test:" << std::endl;
evaluate(net,{x_test},{y_test});
}
Expand Down
3 changes: 2 additions & 1 deletion examples/NN/2_CIFAR10/4_cifar_vgg16_bn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,11 @@ int main(int argc, char **argv){
tensor y_test = eddlT::load("cifar_tsY.bin");
eddlT::div_(x_test, 255.0);


for(int i=0;i<epochs;i++) {
// training, list of input and output tensors, batch, epochs
fit(net,{x_train},{y_train},batch_size, 1);
// Evaluate train
// Evaluate test
std::cout << "Evaluate test:" << std::endl;
evaluate(net,{x_test},{y_test});
}
Expand Down
6 changes: 3 additions & 3 deletions examples/Tensor/eddl_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ int main(int argc, char **argv) {
//t0->reshape_({1, 1, 100, 100});

Tensor *t0 = Tensor::load("images/cow.jpg");
t0->ToGPU();
t0->toGPU();
t0->info();
// float* ptr = new float[3*4*2]{
// 255.0f, 255.0f, 255.0f, 255.0f, 255.0f, 255.0f, 255.0f, 255.0f,
Expand Down Expand Up @@ -97,7 +97,7 @@ int main(int argc, char **argv) {
// Tensor::cutout(t1, t2, {80, 80}, {100, 200});
// t1 = t2->clone();

// t2->ToCPU();
// t2->toCPU();
// t2->save("images/new_cow_single.jpg");
// cout << "Image saved!" << endl;

Expand Down Expand Up @@ -133,7 +133,7 @@ int main(int argc, char **argv) {
t1 = t2->clone();

// Save result
t2->ToCPU();
t2->toCPU();
t2->save("images/new_cow_" + to_string(i) + ".jpg");
cout << "Image saved! #" << i << endl;
}
Expand Down
4 changes: 2 additions & 2 deletions src/apis/eddl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,8 @@ namespace eddl {
return new LCropRandom(parent, new_shape, name, DEV_CPU);
}

layer CropAndScaleRandom(layer parent, vector<float> factor, string da_mode, string name){
return new LCropAndScaleRandom(parent, factor, name, da_mode, DEV_CPU);
layer CropScaleRandom(layer parent, vector<float> factor, string da_mode, string name){
return new LCropScaleRandom(parent, factor, da_mode, name, DEV_CPU);
}

layer CutoutRandom(layer parent, vector<float> factor_x, vector<float> factor_y, float constant, string name){
Expand Down
2 changes: 1 addition & 1 deletion src/apis/eddl.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ namespace eddl {
layer ScaleRandom(layer parent, vector<float> factor, string da_mode="nearest", float constant=0.0f, string name="");
layer FlipRandom(layer parent, int axis, string name="");
layer CropRandom(layer parent, vector<int> new_shape, string name="");
layer CropAndScaleRandom(layer parent, vector<float> factor, string da_mode="nearest", string name="");
layer CropScaleRandom(layer parent, vector<float> factor, string da_mode= "nearest", string name= "");
layer CutoutRandom(layer parent, vector<float> factor_x, vector<float> factor_y, float constant=0.0f, string name="");

// ---- LOSSES ----
Expand Down
16 changes: 8 additions & 8 deletions src/apis/eddlT.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,23 +69,23 @@ namespace eddlT {
}

// Copy data ********************************
void ToCPU_(Tensor *A)
void toCPU_(Tensor *A)
{
A->ToCPU();
A->toCPU();
}
void ToGPU_(Tensor *A)
void toGPU_(Tensor *A)
{
A->ToGPU();
A->toGPU();
}
Tensor * ToCPU(Tensor *A){
Tensor * toCPU(Tensor *A){
Tensor *B=A->clone();
B->ToCPU();
B->toCPU();
return B;
}
Tensor * ToGPU(Tensor *A)
Tensor * toGPU(Tensor *A)
{
Tensor *B=A->clone();
B->ToGPU();
B->toGPU();
return B;
}
Tensor* clone(Tensor *A)
Expand Down
8 changes: 4 additions & 4 deletions src/apis/eddlT.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ namespace eddlT{


// Copy data c ********************************
void ToCPU_(Tensor *A);
void ToGPU_(Tensor *A);
Tensor * ToCPU(Tensor *A);
Tensor * ToGPU(Tensor *A);
void toCPU_(Tensor *A);
void toGPU_(Tensor *A);
Tensor * toCPU(Tensor *A);
Tensor * toGPU(Tensor *A);
Tensor* clone(Tensor *A);
Tensor* select(Tensor *A, int i);
void copyTensor(Tensor *A,Tensor *B);
Expand Down
1 change: 1 addition & 0 deletions src/hardware/cpu/cpu_core.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ cpu_select(Tensor * A, Tensor * B, vector<int> sind, int ini, int end)
{
int s = A->size / A->shape[0];


#pragma omp parallel for
for (int i = ini; i < end; i++) {
int p = sind[i] * s;
Expand Down
7 changes: 4 additions & 3 deletions src/hardware/cpu/cpu_da.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ void cpu_single_rotate(int b, Tensor *A, Tensor *B, float angle, vector<int> off
void cpu_single_scale(int b, int* offsets, Tensor *A, Tensor *B, vector<int> new_shape, int mode, float constant){

for(int c=0; c<B->shape[1]; c++) {

for(int Bi=0; Bi<B->shape[2];Bi++) {
for(int Bj=0; Bj<B->shape[3];Bj++) {

Expand Down Expand Up @@ -268,7 +269,6 @@ void cpu_scale_random(Tensor *A, Tensor *B, vector<float> factor, int mode, floa
// https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.zoom.html
// I use "new_shape" because I might want to keep the shape of B, but thinking of it as a bigger/smaller matrix
// If the factor is less than 1.0f, performs a downscale with padding
int offsets[2] = {0, 0};

#pragma omp parallel for
for(int b=0; b<B->shape[0]; b++) {
Expand All @@ -277,6 +277,7 @@ void cpu_scale_random(Tensor *A, Tensor *B, vector<float> factor, int mode, floa
int new_shape_x = (int)(A->shape[3] * scale);

// Center crop (if the if the crop is smaller than B)
int offsets[2] = {0, 0};
offsets[0] = (new_shape_y - A->shape[2])/2.0f;
offsets[1] = (new_shape_x - A->shape[3])/2.0f;

Expand All @@ -297,7 +298,6 @@ void cpu_flip_random(Tensor *A, Tensor *B, int axis){

void cpu_crop_random(Tensor *A, Tensor *B){
// Performs a crop with padding (Keeps the original size)
int offsets[2] = {0, 0};

#pragma omp parallel for
for(int b=0; b<B->shape[0]; b++) {
Expand All @@ -313,6 +313,7 @@ void cpu_crop_random(Tensor *A, Tensor *B){
int coords_from_y = y;
int coords_to_y = y+h;

int offsets[2] = {0, 0};
offsets[0] = coords_from_y;
offsets[1] = coords_from_x;

Expand Down Expand Up @@ -344,7 +345,6 @@ void cpu_crop_scale_random(Tensor *A, Tensor *B, vector<float> factor, int mode,

void cpu_cutout_random(Tensor *A, Tensor *B, vector<float> factor_x, vector<float> factor_y, float constant){
// Performs a crop with padding (Keeps the original size)
int offsets[2] = {0, 0};

#pragma omp parallel for
for(int b=0; b<B->shape[0]; b++) {
Expand All @@ -360,6 +360,7 @@ void cpu_cutout_random(Tensor *A, Tensor *B, vector<float> factor_x, vector<floa
int coords_from_y = y;
int coords_to_y = y+h;

int offsets[2] = {0, 0};
cpu_single_crop(b, offsets, A, B, {coords_from_y, coords_from_x}, {coords_to_y, coords_to_x}, constant, true);
}
}
21 changes: 12 additions & 9 deletions src/hardware/gpu/gpu_da_kernels.cu
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ __device__ void gpu_single_scale(long int thread_id_x, float* A, float* B, int b



__device__ void gpu_single_flip(long int thread_id_x, float* A, float* B, int batch, int depth, int irows, int icols, int axis){
__device__ void gpu_single_flip(long int thread_id_x, float* A, float* B, int batch, int depth, int irows, int icols, int axis, bool apply){
int A_stride[4] = {depth*irows*icols, irows*icols, icols, 1};
int *B_stride = A_stride;

Expand All @@ -115,10 +115,14 @@ __device__ void gpu_single_flip(long int thread_id_x, float* A, float* B, int ba
//--------------
//printf("{%d, %d, %d, %d}\n", b, c, Bi, Bj);

int pos[2] = {Bi, Bj}; pos[axis] = (irows-1) - pos[axis];
int Ai = pos[0]; int Aj = pos[1];
int A_pos = b*A_stride[0] + c*A_stride[1] + Ai*A_stride[2] + Aj*A_stride[3];
B[thread_id_x] = A[A_pos];
if(apply){
int pos[2] = {Bi, Bj}; pos[axis] = (irows-1) - pos[axis];
int Ai = pos[0]; int Aj = pos[1];
int A_pos = b*A_stride[0] + c*A_stride[1] + Ai*A_stride[2] + Aj*A_stride[3];
B[thread_id_x] = A[A_pos];
}else{
B[thread_id_x] = A[thread_id_x];
}
}


Expand Down Expand Up @@ -208,7 +212,7 @@ __global__ void flip(float* A, float* B, int batch, int depth, int irows, int ic
long int ops = batch * depth*irows*icols;

if (thread_id_x < ops){
gpu_single_flip(thread_id_x, A, B, batch, depth, irows, icols, axis);
gpu_single_flip(thread_id_x, A, B, batch, depth, irows, icols, axis, true);
}
}

Expand Down Expand Up @@ -290,9 +294,8 @@ __global__ void flip_random(float* A, float* B, int batch, int depth, int irows,
//--------------
int b = thread_id_x / (depth*irows*icols) % batch;

if(rnd[b] >= 0.5f){ // Apply?
gpu_single_flip(thread_id_x, A, B, batch, depth, irows, icols, axis);
}
bool apply = rnd[b] >= 0.5f;
gpu_single_flip(thread_id_x, A, B, batch, depth, irows, icols, axis, apply);
}
}

Expand Down
Loading

0 comments on commit cbe57bb

Please sign in to comment.