Skip to content

Commit

Permalink
Merge pull request #285 from deephealthproject/develop
Browse files Browse the repository at this point in the history
Fixes for v1.0a
  • Loading branch information
salvacarrion authored May 25, 2021
2 parents 1b4ceb9 + 94d60b7 commit 8c2bbd1
Show file tree
Hide file tree
Showing 12 changed files with 73 additions and 15 deletions.
4 changes: 2 additions & 2 deletions docs/sphinx/source/layers/convolutional.rst
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ Example:

.. code-block:: c++

l = UpSampling3D(l, {32, 32});
l = UpSampling3D(l, {32, 32, 32});


2D Convolutional Transpose
Expand All @@ -93,4 +93,4 @@ Example:

.. code-block:: c++

l = ConvT3D(l, 32, {3, 3, 3}, {1, 1, 1}, "same");
l = ConvT3D(l, 32, {3, 3, 3}, {1, 1, 1}, "same");
16 changes: 7 additions & 9 deletions include/eddl/apis/eddl.h
Original file line number Diff line number Diff line change
Expand Up @@ -948,12 +948,12 @@ namespace eddl {
*
* @param parent Parent layer
* @param filters Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution)
* @param kernel_size Vector of 2 integers, specifying the height and width of the 2D convolution window
* @param strides Vector of 2 integers, specifying the strides of the convolution along the height and width
* @param kernel_size Vector of 3 integers, specifying the depth, height and width of the 3D convolution window
* @param strides Vector of 3 integers, specifying the strides of the convolution along the depth, height and width
* @param padding One of "none", "valid" or "same"
* @param use_bias Boolean, whether the layer uses a bias vector
* @param groups Number of blocked connections from input channels to output channels
* @param dilation_rate Vector of 2 integers, specifying the dilation rate to use for dilated convolution
* @param dilation_rate Vector of 3 integers, specifying the dilation rate to use for dilated convolution
* @param name A name for the operation
* @return Convolution layer
*/
Expand All @@ -970,7 +970,6 @@ namespace eddl {
* @param parent Parent layer
* @param filters The dimensionality of the output space (i.e. the number of output filters in the convolution)
* @param kernel_size The height and width of the 2D convolution window
* @param output_padding The amount of padding along the height and width of the output tensor. The amount of output padding along a given dimension must be lower than the stride along that same dimension
* @param padding One of "valid" or "same"
* @param dilation_rate The dilation rate to use for dilated convolution. Spacing between kernel elements
* @param strides The strides of the convolution along the height and width
Expand All @@ -991,18 +990,17 @@ namespace eddl {
*
* @param parent Parent layer
* @param filters The dimensionality of the output space (i.e. the number of output filters in the convolution)
* @param kernel_size The height and width of the 2D convolution window
* @param output_padding The amount of padding along the height and width of the output tensor. The amount of output padding along a given dimension must be lower than the stride along that same dimension
* @param kernel_size The depth, height and width of the 3D convolution window
* @param padding One of "valid" or "same"
* @param dilation_rate The dilation rate to use for dilated convolution. Spacing between kernel elements
* @param strides The strides of the convolution along the height and width
* @param strides The strides of the convolution along the depth, height and width
* @param use_bias Boolean, whether the layer uses a bias vector
* @param name A name for the operation
* @return Output layer after upsampling operation
*/
layer ConvT3D(layer parent, int filters, const vector<int> &kernel_size,
const vector<int> &strides = {1, 1}, string padding = "same", bool use_bias = true,
int groups = 1, const vector<int> &dilation_rate = {1, 1}, string name = "");
const vector<int> &strides = {1, 1, 1}, string padding = "same", bool use_bias = true,
int groups = 1, const vector<int> &dilation_rate = {1, 1, 1}, string name = "");

/**
* @brief Pointwise 2D convolution
Expand Down
1 change: 1 addition & 0 deletions include/eddl/hardware/cpu/cpu_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,7 @@ float cpu_norm_(float *ptr, int size, int *map, string ord);
// CPU: Logic functions: Truth value testing
std::pair<unsigned int*, int> cpu_nonzero(Tensor *A);
void cpu_where(Tensor *condition, Tensor *A, Tensor *B, Tensor *C);
void cpu_where_back(Tensor *condition, Tensor *PD_A, Tensor *PD_B, Tensor *D);

// CPU: Logic functions: Truth value testing
bool cpu_all(Tensor *A);
Expand Down
1 change: 1 addition & 0 deletions include/eddl/hardware/gpu/gpu_hw.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ void gpu_norm(Tensor *A, Tensor *B, ReduceDescriptor2 *rd, string ord);

// Generating index arrays *****************************
void gpu_where(Tensor *condition, Tensor *A, Tensor *B, Tensor *C);
void gpu_where_back(Tensor *condition, Tensor *PD_A, Tensor *PD_B, Tensor *D);

// GPU: Logic functions: Truth value testing
bool gpu_all(Tensor *A);
Expand Down
1 change: 1 addition & 0 deletions include/eddl/hardware/gpu/gpu_kernels.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ __global__ void gpu_norm_fro(float *A, float *B, int *map, int size, int size_re

// Generating index arrays *****************************
__global__ void gpu_where(float *condition, float *A, float *B, float *C, long int size);
__global__ void gpu_where_back(float *condition, float *PD_A, float *PD_B, float *D, long int size);

// GPU: Logic functions: Comparisons
__global__ void gpu_isfinite(float *A, float *B, long int size);
Expand Down
3 changes: 2 additions & 1 deletion include/eddl/layers/auxiliar/layer_auxiliar.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <cstdio>

#include "eddl/layers/layer.h"
#include "eddl/layers/merge/layer_merge.h"

using namespace std;

Expand All @@ -41,7 +42,7 @@ class LShape : public LinLayer {


/// Where Layer
class LWhere : public LinLayer {
class LWhere : public MLayer {
public:
static int total_layers;
Tensor* condition;
Expand Down
1 change: 1 addition & 0 deletions include/eddl/tensor/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -2440,6 +2440,7 @@ class Tensor {
* @param C A tensor with elements from ``A`` if ``condition`` holds and from ``B`` otherwise..
*/
static void where(Tensor *condition, Tensor *A, Tensor *B, Tensor *C);
static void where_back(Tensor *condition, Tensor *PD_A, Tensor *PD_B, Tensor *D);

Tensor* mask_indices(Tensor *mask, Tensor *A); // where(x > 0, x[random], y[ones])
static void mask_indices(Tensor *mask, Tensor *A, Tensor *B);
Expand Down
11 changes: 11 additions & 0 deletions src/hardware/cpu/cpu_indexing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,15 @@ void cpu_where(Tensor *condition, Tensor *A, Tensor *B, Tensor *C){
C->ptr[i] = B->ptr[i];
}
}
}

void cpu_where_back(Tensor *condition, Tensor *PD_A, Tensor *PD_B, Tensor *D){
#pragma omp parallel for
for (int i = 0; i < PD_A->size; ++i){
if((bool) condition->ptr[i]){
PD_A->ptr[i] += D->ptr[i];
}else{
PD_B->ptr[i] += D->ptr[i];
}
}
}
14 changes: 13 additions & 1 deletion src/hardware/gpu/gpu_indexing.cu
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
#include "eddl/descriptors/descriptors.h"


// CPU: Logic functions: Comparisons
// GPU: Logic functions: Comparisons
void gpu_where(Tensor *condition, Tensor *A, Tensor *B, Tensor *C){
int device=A->gpu_device;
cudaSetDevice(device);
Expand All @@ -30,3 +30,15 @@ void gpu_where(Tensor *condition, Tensor *A, Tensor *B, Tensor *C){
gpu_where<<<dimGrid,dimBlock>>>(condition->ptr, A->ptr, B->ptr, C->ptr, A->size);
check_cuda(cudaDeviceSynchronize(), "gpu_where");
}


// GPU: Logic functions: Comparisons
void gpu_where_back(Tensor *condition, Tensor *PD_A, Tensor *PD_B, Tensor *D){
int device=PD_A->gpu_device;
cudaSetDevice(device);

setDims(PD_A);

gpu_where_back<<<dimGrid,dimBlock>>>(condition->ptr, PD_A->ptr, PD_B->ptr, D->ptr, PD_A->size);
check_cuda(cudaDeviceSynchronize(), "gpu_where_back");
}
12 changes: 12 additions & 0 deletions src/hardware/gpu/gpu_indexing_kernels.cu
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,15 @@ __global__ void gpu_where(float *condition, float *A, float *B, float *C, long i
}
}
}

__global__ void gpu_where_back(float *condition, float *PD_A, float *PD_B, float *D, long int size){
long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;

if (thread_id_x < size){
if((bool) condition[thread_id_x]){
PD_A[thread_id_x] += D[thread_id_x];
}else{
PD_B[thread_id_x] += D[thread_id_x];
}
}
}
4 changes: 2 additions & 2 deletions src/layers/auxiliar/layer_where.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ using namespace std;

int LWhere::total_layers = 0;

LWhere::LWhere(Layer *parent1, Layer *parent2, Layer *condition, string name, int dev, int mem) : LinLayer(name, dev, mem) {
LWhere::LWhere(Layer *parent1, Layer *parent2, Layer *condition, string name, int dev, int mem) : MLayer(name, dev, mem) {
if(name.empty()) this->name = "where" + to_string(++total_layers);

input = parent1->output; // Useless without a backward
Expand Down Expand Up @@ -50,7 +50,7 @@ void LWhere::forward() {
}

void LWhere::backward() {
msg("NotImplementedError", "LWhere::backward");
Tensor::where_back(this->condition, parent[0]->delta, parent[1]->delta, this->delta);
}


Expand Down
20 changes: 20 additions & 0 deletions src/tensor/tensor_indexing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,3 +91,23 @@ void Tensor::where(Tensor *condition, Tensor *A, Tensor *B, Tensor *C){
}
#endif
}

void Tensor::where_back(Tensor *condition, Tensor *PD_A, Tensor *PD_B, Tensor *D){
checkCompatibility(PD_A, PD_B, D, "Tensor::where_back");

if (condition->isCPU() && PD_A->isCPU() && PD_B->isCPU()) {
cpu_where_back(condition, PD_A, PD_B, D);
}
#ifdef cGPU
else if (condition->isGPU() && PD_A->isGPU() && PD_B->isGPU())
{
gpu_where_back(condition, PD_A, PD_B, D);
}
#endif
#ifdef cFPGA
else if (condition->isFPGA() && A->isFPGA() && B->isFPGA())
{
fpga_where_back(condition, A, B, C);
}
#endif
}

0 comments on commit 8c2bbd1

Please sign in to comment.