diff --git a/README.md b/README.md index 4c54c1f..b4e7bc1 100644 --- a/README.md +++ b/README.md @@ -128,6 +128,20 @@ std::vector allGatheredvTwo = mpiWorld().varyingAllGather(vecToSend, receiveCounts,displacements); ``` +Every functions defined for a single [POD](http://en.cppreference.com/w/cpp/concept/PODType) type is also defined for a collection of [POD](http://en.cppreference.com/w/cpp/concept/PODType)s. This collection can either be held in a `std::vector` or in a `std::array`. For instance, + +```c++ +const int count = 2; +if(mpiWorld().rank() == sourceIndex) { + const std::vector collection(count,toSend); + mpiWorld().sendAndBlock(collection,destinationIndex); +} +if(mpiWorld().rank() == destinationIndex) { + const std::vector received = + mpiWorld().receiveAndBlock>(count,sourceIndex); +} +``` + # Communicator ## Identical v.s. Congruent communicators diff --git a/include/NiceMPI/NiceMPI.h b/include/NiceMPI/NiceMPI.h index dee0767..b56b909 100644 --- a/include/NiceMPI/NiceMPI.h +++ b/include/NiceMPI/NiceMPI.h @@ -23,6 +23,8 @@ SOFTWARE. */ #ifndef NICEMPI_H #define NICEMPI_H +#include +#include // std::size_t #include // unique_ptr #include // std::is_pod, std::enable_if #include // std::move @@ -44,6 +46,46 @@ Communicator &mpiSelf(); // Forward declaration Communicator createProxy(MPI_Comm mpiCommunicator); // Forward declaration + +/** \brief std::array that contains PODs are both POD and collection, so we need to distinguish them. */ +template +struct is_std_array { + static constexpr bool value = false; +}; +/** \brief std::array that contains PODs are both POD and collection, so we need to distinguish them. + *Specialization*.*/ +template +struct is_std_array> { + static constexpr bool value = true; +}; + + + +/** \brief Type traits that defines an alias for the type contained in a container. Usefull in ReceiveRequest. + *Specialization*.*/ +template +struct to_contained_type { + using type = T; +}; +/** \brief Type traits that defines an alias for the type contained in a container. Usefull in ReceiveRequest. + *Specialization*.*/ +template +struct to_contained_type> { + using type = typename std::vector::value_type; +}; +/** \brief Type traits that defines an alias for the type contained in a container. Usefull in ReceiveRequest. + *Specialization*.*/ +template +struct to_contained_type> { + using type = typename std::array::value_type; +}; +/** \brief Type traits that defines an alias for the type contained in a container. Usefull in ReceiveRequest. + *Alias*.*/ +template +using to_contained_type_t = typename to_contained_type::type; + + + /** \brief Returns after an asyncSend call, this object allows to control the status of the call. */ class SendRequest { public: @@ -73,7 +115,7 @@ template class ReceiveRequest { public: /** \brief The function asyncReceive initializes the member of the request directly. */ - ReceiveRequest(): dataPtr(new Type) + ReceiveRequest(int count): data(count) {} /** \brief Destroys the handle \p rhs. Only there because of the [rule of 5](http://en.cppreference.com/w/cpp/language/rule_of_three). */ @@ -98,8 +140,8 @@ class ReceiveRequest { handleError(MPI_Wait(&value,MPI_STATUS_IGNORE)); } /** Returns the data, assuming that the user made sure that the receiving operation was completed. */ - std::unique_ptr take() { - return std::move(dataPtr); + std::vector> take() { + return std::move(data); } /** \brief The function asyncReceive needs the address of \p data. */ @@ -109,7 +151,7 @@ class ReceiveRequest { /** \brief MPI implementation. */ MPI_Request value; /** \brief \p data to be received. */ - std::unique_ptr dataPtr; + std::vector> data; }; @@ -132,34 +174,87 @@ class Communicator { /** \brief Regroups the \p data of every processes in a single vector and returns it. */ - template::value,bool>::type = true> + template::value and !is_std_array::value,bool>::type = true + > std::vector allGather(Type data); + /** \brief Regroups the \p data of every processes in a single vector and returns it. */ + template::value,bool>::type = true + > + std::vector allGather(const Collection& data); + /** \brief Starts to receive data of type \p Type from the \p source. A \p tag can be required to be provided with the data. \p MPI_ANY_TAG can be used. Returns a ReceiveRequest object that can be used to find out if the data were received, or to wait until they are received and get them.*/ - template::value,bool>::type = true> + template::value and !is_std_array::value,bool>::type = true + > ReceiveRequest asyncReceive(int source, int tag = 0); + /** \brief Starts to receive data of type \p Type from the \p source. A \p tag can be required to be provided + with the data. \p MPI_ANY_TAG can be used. Returns a ReceiveRequest object that can be used to find out if + the data were received, or to wait until they are received and get them.*/ + template::value,bool>::type = true + > + ReceiveRequest asyncReceive(int count, int source, int tag = 0); + /** \brief Starts to send \p data to the \p destination. A \p tag can be required to be provided with the data. \p MPI_ANY_TAG can be used. Returns a SendRequest object that can be used to find out if the data were sent, or to wait until they are sent.*/ - template::value,bool>::type = true> + template::value and !is_std_array::value,bool>::type = true + > SendRequest asyncSend(Type data, int destination, int tag = 0); + /** \brief Starts to send \p data to the \p destination. A \p tag can be required to be provided with the data. + \p MPI_ANY_TAG can be used. Returns a SendRequest object that can be used to find out if the data were sent, or + to wait until they are sent.*/ + template::value,bool>::type = true + > + SendRequest asyncSend(const Collection& data, int destination, int tag = 0); + /** \brief The \p source broadcast its \p data to every processes. */ - template::value,bool>::type = true> + template::value and !is_std_array::value,bool>::type = true + > Type broadcast(int source, Type data); + /** \brief The \p source broadcast its \p data to every processes. */ + template::value,bool>::type = true + > + Collection broadcast(int source, Collection data); + /** \brief The \p source gathers the \p data of every processes. */ - template::value,bool>::type = true> + template::value and !is_std_array::value,bool>::type = true + > std::vector gather(int source, Type data); + /** \brief The \p source gathers the \p data of every processes. */ + template::value,bool>::type = true + > + std::vector gather(int source, const Collection& data); + /** \brief Wait to receive data of type \p Type from the \p source. A \p tag can be required to be provided with the data. \p MPI_ANY_TAG can be used.*/ - template::value,bool>::type = true> + template::value and !is_std_array::value,bool>::type = true + > Type receiveAndBlock(int source, int tag = 0); + /** \brief Wait to receive data of type \p Type from the \p source. A \p tag can be required to be provided with + the data. \p MPI_ANY_TAG can be used.*/ + template::value,bool>::type = true + > + Collection receiveAndBlock(int count, int source, int tag = 0); + /** \brief The \p source scatters \p sendCount of its data \p toSend to every processes. Hence, the process with rank \p i receives the data from \p toSend[i] to toSend[i+\p sendCount].*/ template::value,bool>::type = true> @@ -167,9 +262,18 @@ class Communicator { /** \brief Wait to send \p data to the \p destination. A \p tag can be required to be provided with the data. \p MPI_ANY_TAG can be used.*/ - template::value,bool>::type = true> + template::value and !is_std_array::value,bool>::type = true + > void sendAndBlock(Type data, int destination, int tag = 0); + /** \brief Wait to send \p data to the \p destination. A \p tag can be required to be provided with + the data. \p MPI_ANY_TAG can be used.*/ + template::value,bool>::type = true + > + void sendAndBlock(const Collection& data, int destination, int tag = 0); + /** \brief Regroups the \p data of every processes in a single vector and returns it. \p receiveCounts[i] data is received from the process with rank \p i. These data starts at the index \p displacements[i] of the returned vector.*/ @@ -217,6 +321,12 @@ class Communicator { /** \brief Returns the \p displacements scaled by the size of \p Type.*/ template static std::vector createScaledDisplacements(std::vector displacements); + /** \brief Initializes the collection with \p count elements. */ + template + static std::vector initializeWithCount(std::vector, int count); + /** \brief Initializes the collection with \p count elements. */ + template + static std::array initializeWithCount(std::array a, int /*count*/); /** \brief Returns the sum of the \p data. */ static int sum(const std::vector& data); /** \brief Implements \p varyingScatter(). */ diff --git a/include/NiceMPI/private/NiceMPI.hpp b/include/NiceMPI/private/NiceMPI.hpp index 83eeb06..0aa3d14 100644 --- a/include/NiceMPI/private/NiceMPI.hpp +++ b/include/NiceMPI/private/NiceMPI.hpp @@ -54,7 +54,7 @@ inline Communicator Communicator::split(int color, int key) const { } -template::value,bool>::type> +template::value and !is_std_array::value,bool>::type> inline std::vector Communicator::allGather(Type data) { std::vector result(size()); handleError(MPI_Allgather(&data,sizeof(Type),MPI_UNSIGNED_CHAR,result.data(),sizeof(Type),MPI_UNSIGNED_CHAR, @@ -62,27 +62,69 @@ inline std::vector Communicator::allGather(Type data) { return result; } -template::value,bool>::type> +template::value,bool>::type +> +inline std::vector Communicator::allGather(const Collection& data) { + using Type = typename Collection::value_type; + std::vector result(size()*data.size()); + handleError(MPI_Allgather(data.data(),sizeof(Type)*data.size(),MPI_UNSIGNED_CHAR,result.data(), + sizeof(Type)*data.size(),MPI_UNSIGNED_CHAR, handle.get() )); + return result; +} + +template::value and !is_std_array::value,bool>::type> inline ReceiveRequest Communicator::asyncReceive(int source, int tag) { - ReceiveRequest r; - handleError(MPI_Irecv(r.dataPtr.get(),sizeof(Type),MPI_UNSIGNED_CHAR,source,tag,handle.get(),&r.value)); + ReceiveRequest r(1); + handleError(MPI_Irecv(r.data.data(),sizeof(Type),MPI_UNSIGNED_CHAR,source,tag,handle.get(),&r.value)); return r; } -template::value,bool>::type> +template::value,bool>::type +> +inline ReceiveRequest Communicator::asyncReceive(int count, int source, int tag) { + using Type = typename Collection::value_type; + ReceiveRequest r(count); + handleError(MPI_Irecv(r.data.data(),sizeof(Type)*count,MPI_UNSIGNED_CHAR,source,tag,handle.get(),&r.value)); + return r; +} + +template::value and !is_std_array::value,bool>::type> inline SendRequest Communicator::asyncSend(Type data, int destination, int tag) { MPI_Request x; handleError(MPI_Isend(&data,sizeof(Type),MPI_UNSIGNED_CHAR,destination,tag,handle.get(),&x)); return SendRequest(x); } -template::value,bool>::type> +template::value,bool>::type +> +inline SendRequest Communicator::asyncSend(const Collection& data, int destination, int tag) { + using Type = typename Collection::value_type; + MPI_Request x; + handleError(MPI_Isend(data.data(),sizeof(Type)*data.size(),MPI_UNSIGNED_CHAR,destination,tag,handle.get(),&x)); + return SendRequest(x); +} + +template::value and !is_std_array::value,bool>::type> inline Type Communicator::broadcast(int source, Type data) { handleError(MPI_Bcast(&data,sizeof(Type),MPI_UNSIGNED_CHAR,source,handle.get() )); return data; } -template::value,bool>::type> +template::value,bool>::type +> +inline Collection Communicator::broadcast(int source, Collection data) { + using Type = typename Collection::value_type; + auto sizeToBroadcast = broadcast(source,data.size()); + if(rank() != source) data = initializeWithCount(Collection{},sizeToBroadcast); + handleError(MPI_Bcast(data.data(),sizeof(Type)*sizeToBroadcast,MPI_UNSIGNED_CHAR,source,handle.get() )); + return data; +} + +template::value and !is_std_array::value,bool>::type> inline std::vector Communicator::gather(int source, Type data) { std::vector result; if(rank() == source) result.resize(size()); @@ -91,13 +133,35 @@ inline std::vector Communicator::gather(int source, Type data) { return result; } -template::value,bool>::type> +template::value,bool>::type +> +std::vector Communicator::gather(int source, const Collection& data) { + using Type = typename Collection::value_type; + std::vector result; + if(rank() == source) result.resize(size()*data.size()); + handleError(MPI_Gather(data.data(),sizeof(Type)*data.size(),MPI_UNSIGNED_CHAR,result.data(), + sizeof(Type)*data.size(),MPI_UNSIGNED_CHAR,source,handle.get() )); + return result; +} + +template::value and !is_std_array::value,bool>::type> inline Type Communicator::receiveAndBlock(int source, int tag) { Type data; handleError(MPI_Recv(&data,sizeof(Type),MPI_UNSIGNED_CHAR,source,tag,handle.get() ,MPI_STATUS_IGNORE)); return data; } +template::value,bool>::type +> +Collection Communicator::receiveAndBlock(int count, int source, int tag) { + Collection data = initializeWithCount(Collection{},count); + using Type = typename Collection::value_type; + handleError(MPI_Recv(data.data(),sizeof(Type)*count,MPI_UNSIGNED_CHAR,source,tag,handle.get() ,MPI_STATUS_IGNORE)); + return data; +} + template::value,bool>::type> inline std::vector Communicator::scatter(int source, const std::vector& toSend, int sendCount) { const bool enoughDataToSend = (static_cast(toSend.size()) - sendCount*size()) >= 0; @@ -108,11 +172,19 @@ inline std::vector Communicator::scatter(int source, const std::vector::value,bool>::type> +template::value and !is_std_array::value,bool>::type> inline void Communicator::sendAndBlock(Type data, int destination, int tag) { handleError(MPI_Send(&data,sizeof(Type),MPI_UNSIGNED_CHAR,destination,tag,handle.get() )); } +template::value,bool>::type +> +inline void Communicator::sendAndBlock(const Collection& data, int destination, int tag) { + using Type = typename Collection::value_type; + handleError(MPI_Send(data.data(),sizeof(Type)*data.size(),MPI_UNSIGNED_CHAR,destination,tag,handle.get() )); +} + template::value,bool>::type> inline std::vector Communicator::varyingAllGather(const std::vector& data, const std::vector& receiveCounts, const std::vector& displacements) @@ -187,6 +259,15 @@ inline std::vector Communicator::createScaledDisplacements(std::vector return displacements; } +template +inline std::vector Communicator::initializeWithCount(std::vector, int count) { + return std::vector(count); +} +template +inline std::array Communicator::initializeWithCount(std::array a, int /*count*/) { + return a; +} + inline int Communicator::sum(const std::vector& data) { int theSum = 0; for(auto&& x: data) theSum += x; diff --git a/src/NiceMPI_tests.cpp b/src/NiceMPI_tests.cpp index 3b72e73..db52d9c 100644 --- a/src/NiceMPI_tests.cpp +++ b/src/NiceMPI_tests.cpp @@ -54,12 +54,7 @@ class NiceMPItests : public ::testing::Test { result.theInt = rank*2; return result; } - void expectNear(const PODtype& expected, const PODtype& actual, double tolerance) { - EXPECT_EQ(expected.theInt, actual.theInt); - EXPECT_NEAR(expected.theDouble, actual.theDouble, tolerance); - EXPECT_EQ(expected.theChar, actual.theChar); - } - void testGather(const std::vector& gathered) { + void expectGathered(const std::vector& gathered) { ASSERT_EQ(mpiWorld().size(),gathered.size()); for(int i=0; i& gathered, const int sizeByProcess) { + ASSERT_EQ(mpiWorld().size()*sizeByProcess,gathered.size()); + for(int i=0; i + void testAsyncSendAndReceiveCollection() { + if(sourceIndex == destinationIndex) return; + CollectionType toSend; + const int count = 2; + if(mpiWorld().rank() == sourceIndex) { + toSend = {{ podTypeInstance, podTypeInstance }}; + SendRequest r = mpiWorld().asyncSend(toSend,destinationIndex); + r.wait(); + } + if(mpiWorld().rank() == destinationIndex) { + ReceiveRequest r = mpiWorld().asyncReceive(count,sourceIndex); + r.wait(); + std::vector data = r.take(); + EXPECT_EQ(count,data.size()); + for(auto&& x: data) expectNear(podTypeInstance, x, defaultTolerance); + } + } + template + void testBroadcastCollection() { + CollectionType data; + if(mpiWorld().rank() == sourceIndex) data = {{ podTypeInstance, podTypeInstance }}; + const CollectionType results = mpiWorld().broadcast(sourceIndex, data); + EXPECT_EQ(2,results.size()); + for(auto&& x: results) expectNear(podTypeInstance, x, defaultTolerance); + } + template + void testAllGather() { + PODtype x = createPODtypeForRank(mpiWorld().rank()); + const int sizeByProcess = 2; + const CollectionType toSend = {{ x, x }}; + const std::vector gathered = mpiWorld().allGather(toSend); + expectGatheredCollection(gathered,sizeByProcess); + } + template + void testGather() { + PODtype x = createPODtypeForRank(mpiWorld().rank()); + const int sizeByProcess = 2; + const CollectionType toSend = {{ x, x }}; + const std::vector gathered = mpiWorld().gather(sourceIndex, toSend ); + if(mpiWorld().rank()==sourceIndex) expectGatheredCollection(gathered,sizeByProcess); + else EXPECT_EQ(0,gathered.size()); + } + template + void testSendAndReceiveCollection() { + if(sourceIndex == destinationIndex) return; + if(mpiWorld().rank() == sourceIndex) { + const CollectionType toSend = {{ podTypeInstance, podTypeInstance }}; + mpiWorld().sendAndBlock(toSend,destinationIndex); + } + if(mpiWorld().rank() == destinationIndex) { + const int count = 2; + const CollectionType results = mpiWorld().receiveAndBlock(count,sourceIndex); + EXPECT_EQ(count,results.size()); + for(auto&& x: results) expectNear(podTypeInstance, x, defaultTolerance); + } + } const Communicator world; const int sourceIndex = 0; @@ -194,12 +260,12 @@ TEST_F(NiceMPItests, scatterTwoWithSpare) { } TEST_F(NiceMPItests, gather) { const std::vector gathered = mpiWorld().gather(sourceIndex, createPODtypeForRank(mpiWorld().rank()) ); - if(mpiWorld().rank()==sourceIndex) testGather(gathered); + if(mpiWorld().rank()==sourceIndex) expectGathered(gathered); else EXPECT_EQ(0,gathered.size()); } TEST_F(NiceMPItests, allGather) { const PODtype myData = createPODtypeForRank(mpiWorld().rank()); - testGather(mpiWorld().allGather(myData)); + expectGathered(mpiWorld().allGather(myData)); } @@ -242,7 +308,7 @@ TEST_F(NiceMPItests, varyingGatherOneFromEach) { const std::vector data = { createPODtypeForRank(mpiWorld().rank()) }; const std::vector receiveCounts(mpiWorld().size(),1); const std::vector gathered = mpiWorld().varyingGather(sourceIndex,data,receiveCounts); - if(mpiWorld().rank()==sourceIndex) testGather(gathered); + if(mpiWorld().rank()==sourceIndex) expectGathered(gathered); else EXPECT_EQ(0,gathered.size()); } TEST_F(NiceMPItests, varyingGatherWithDisplacements) { @@ -273,7 +339,7 @@ TEST_F(NiceMPItests, varyingAllGatherOneFromEach) { const std::vector data = { createPODtypeForRank(mpiWorld().rank()) }; const std::vector receiveCounts(mpiWorld().size(),1); const std::vector gathered = mpiWorld().varyingAllGather(data,receiveCounts); - testGather(gathered); + expectGathered(gathered); } TEST_F(NiceMPItests, varyingAllGatherWithDisplacements) { std::vector data; @@ -318,8 +384,9 @@ TEST_F(NiceMPItests, asyncSendAndReceiveAndWait) { if(mpiWorld().rank() == destinationIndex) { ReceiveRequest r = mpiWorld().asyncReceive(sourceIndex); r.wait(); - std::unique_ptr data = r.take(); - expectNear(podTypeInstance, *data, defaultTolerance); + std::vector data = r.take(); + ASSERT_EQ(1,data.size()); + expectNear(podTypeInstance, data[0], defaultTolerance); } } TEST_F(NiceMPItests, asyncSendAndReceiveWithTag) { @@ -332,8 +399,9 @@ TEST_F(NiceMPItests, asyncSendAndReceiveWithTag) { if(mpiWorld().rank() == destinationIndex) { ReceiveRequest r = mpiWorld().asyncReceive(sourceIndex,MPI_ANY_TAG); r.wait(); - std::unique_ptr data = r.take(); - expectNear(podTypeInstance, *data, defaultTolerance); + std::vector data = r.take(); + ASSERT_EQ(1,data.size()); + expectNear(podTypeInstance, data[0], defaultTolerance); } } TEST_F(NiceMPItests, asyncSendAndReceiveAndTest) { @@ -345,7 +413,40 @@ TEST_F(NiceMPItests, asyncSendAndReceiveAndTest) { if(mpiWorld().rank() == destinationIndex) { ReceiveRequest r = mpiWorld().asyncReceive(sourceIndex); while(!r.isCompleted()) std::this_thread::sleep_for(std::chrono::microseconds{}); - std::unique_ptr data = r.take(); - expectNear(podTypeInstance, *data, defaultTolerance); + std::vector data = r.take(); + ASSERT_EQ(1,data.size()); + expectNear(podTypeInstance, data[0], defaultTolerance); } } + + +TEST_F(NiceMPItests, sendAndReceiveAnythingVector) { + testSendAndReceiveCollection>(); +} +TEST_F(NiceMPItests, sendAndReceiveAnythingArray) { + testSendAndReceiveCollection>(); +} +TEST_F(NiceMPItests, broadcastVector) { + testBroadcastCollection>(); +} +TEST_F(NiceMPItests, broadcastArray) { + testBroadcastCollection>(); +} +TEST_F(NiceMPItests, gatherVector) { + testGather>(); +} +TEST_F(NiceMPItests, gatherArray) { + testGather>(); +} +TEST_F(NiceMPItests, allGatherVector) { + testAllGather>(); +} +TEST_F(NiceMPItests, allGatherArray) { + testAllGather>(); +} +TEST_F(NiceMPItests, asyncSendAndReceiveAndWaitVector) { + testAsyncSendAndReceiveCollection>(); +} +TEST_F(NiceMPItests, asyncSendAndReceiveAndWaitArray) { + testAsyncSendAndReceiveCollection>(); +}