diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/Pstream.H b/src/OpenFOAM/db/IOstreams/Pstreams/Pstream.H index 75cd715a3b..d7da7aec0e 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/Pstream.H +++ b/src/OpenFOAM/db/IOstreams/Pstreams/Pstream.H @@ -143,6 +143,15 @@ public: const label comm = UPstream::worldComm ); + //- Generic broadcast multiple values (contiguous or non-contiguous) + //- to all processes in communicator. + template + static void genericListBroadcast + ( + ListType& values, + const label comm = UPstream::worldComm + ); + //- Broadcast value (contiguous or non-contiguous) //- to all processes in communicator. template diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBroadcast.C b/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBroadcast.C index f0a2420f94..f4a92b88bc 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBroadcast.C +++ b/src/OpenFOAM/db/IOstreams/Pstreams/PstreamBroadcast.C @@ -51,6 +51,40 @@ void Foam::Pstream::genericBroadcast(T& value, const label comm) } +template +void Foam::Pstream::genericListBroadcast(ListType& values, const label comm) +{ + if (!is_contiguous::value) + { + Pstream::genericBroadcast(values, comm); + } + else if (UPstream::parRun() && UPstream::nProcs(comm) > 1) + { + // Broadcast the size + label len(values.size()); + UPstream::broadcast + ( + reinterpret_cast(&len), + sizeof(label), + comm, + UPstream::masterNo() + ); + values.resize_nocopy(len); // A no-op on master + + if (len) + { + UPstream::broadcast + ( + values.data_bytes(), + values.size_bytes(), + comm, + UPstream::masterNo() + ); + } + } +} + + template void Foam::Pstream::broadcast(T& value, const label comm) { @@ -74,68 +108,14 @@ void Foam::Pstream::broadcast(T& value, const label comm) template void Foam::Pstream::broadcast(List& values, const label comm) { - if (!is_contiguous::value) - { - Pstream::genericBroadcast(values, comm); - } - else if (UPstream::parRun() && UPstream::nProcs(comm) > 1) - { - // Broadcast the size - label len(values.size()); - UPstream::broadcast - ( - reinterpret_cast(&len), - sizeof(label), - comm, - UPstream::masterNo() - ); - values.resize_nocopy(len); // A no-op on master - - if (len) - { - UPstream::broadcast - ( - values.data_bytes(), - values.size_bytes(), - comm, - UPstream::masterNo() - ); - } - } + Pstream::genericListBroadcast(values, comm); } template void Foam::Pstream::broadcast(DynamicList& values, const label comm) { - if (!is_contiguous::value) - { - Pstream::genericBroadcast(values, comm); - } - else if (UPstream::parRun() && UPstream::nProcs(comm) > 1) - { - // Broadcast the size - label len(values.size()); - UPstream::broadcast - ( - reinterpret_cast(&len), - sizeof(label), - comm, - UPstream::masterNo() - ); - values.resize_nocopy(len); // A no-op on master - - if (len) - { - UPstream::broadcast - ( - values.data_bytes(), - values.size_bytes(), - comm, - UPstream::masterNo() - ); - } - } + Pstream::genericListBroadcast(values, comm); } diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/PstreamReduceOps.H b/src/OpenFOAM/db/IOstreams/Pstreams/PstreamReduceOps.H index 2ebf11e75c..f7942e42ad 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/PstreamReduceOps.H +++ b/src/OpenFOAM/db/IOstreams/Pstreams/PstreamReduceOps.H @@ -45,7 +45,8 @@ namespace Foam // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // -//- Reduce operation with user specified communication schedule +//- Reduce inplace (cf. MPI Allreduce) +//- using specified communication schedule. template void reduce ( @@ -66,7 +67,8 @@ void reduce } -//- Reduce (inplace) using either linear or tree communication schedule +//- Reduce inplace (cf. MPI Allreduce) +//- using linear/tree communication schedule template void reduce ( @@ -99,7 +101,8 @@ T returnReduce } -//- Reduce with sum of both value and count (for averaging) +//- Reduce inplace (cf. MPI Allreduce) +//- the sum of both value and count (for averaging) template void sumReduce ( @@ -117,7 +120,8 @@ void sumReduce } -//- Reduce multiple values (identical size on all processes!) +//- Reduce inplace (cf. MPI Allreduce) +//- multiple values (identical size on all processes!) template void reduce ( @@ -131,7 +135,8 @@ void reduce NotImplemented; } -//- Non-blocking reduce single value. Sets request. +//- Non-blocking reduce inplace (cf. MPI Iallreduce) +//- single value. Sets request. template void reduce ( @@ -145,8 +150,9 @@ void reduce NotImplemented; } -//- Non-blocking reduce multiple values (identical size on all processes!) -//- Sets request. +//- Non-blocking reduce inplace (cf. MPI Iallreduce) +//- of multiple values (same size on all processes!) +// Sets request. template void reduce ( @@ -166,7 +172,7 @@ void reduce // Specialisations for bool -//- Logical (and) reduction +//- Logical (and) reduction (cf. MPI AllReduce) void reduce ( bool& value, @@ -175,7 +181,7 @@ void reduce const label comm = UPstream::worldComm ); -//- Logical (or) reduction +//- Logical (or) reduction (cf. MPI AllReduce) void reduce ( bool& value, diff --git a/src/OpenFOAM/db/IOstreams/Pstreams/UPstream.H b/src/OpenFOAM/db/IOstreams/Pstreams/UPstream.H index 177365747f..895ba8e157 100644 --- a/src/OpenFOAM/db/IOstreams/Pstreams/UPstream.H +++ b/src/OpenFOAM/db/IOstreams/Pstreams/UPstream.H @@ -585,78 +585,89 @@ public: //- Shutdown (finalize) MPI as required and exit program with errNo. static void exit(int errNo = 1); - //- Exchange label with all processors (in the communicator). - // sendData[proci] is the label to send to proci. + //- Exchange integer data with all processors (in the communicator). + // \c sendData[proci] is the value to send to proci. // After return recvData contains the data from the other processors. static void allToAll ( - const labelUList& sendData, - labelUList& recvData, + const UList& sendData, + UList& recvData, const label communicator = worldComm ); - //- Exchange data with all processors (in the communicator) - // sendSizes, sendOffsets give (per processor) the slice of - // sendData to send, similarly recvSizes, recvOffsets give the slice - // of recvData to receive + //- Exchange integer data with all processors (in the communicator). + // \c sendData[proci] is the value to send to proci. + // After return recvData contains the data from the other processors. static void allToAll ( - const char* sendData, - const UList& sendSizes, - const UList& sendOffsets, - - char* recvData, - const UList& recvSizes, - const UList& recvOffsets, - + const UList& sendData, + UList& recvData, const label communicator = worldComm ); - //- Receive data from all processors on the master (low-level) - static void mpiGather - ( - const char* sendData, - int sendSize, - char* recvData, - int recvSize, - const label communicator = worldComm + // Low-level gather/scatter routines + + #undef Pstream_CommonRoutines + #define Pstream_CommonRoutines(Native) \ + \ + /*! \brief Receive identically-sized \c Native data from all ranks */ \ + static void mpiGather \ + ( \ + const Native* sendData, \ + int sendCount, \ + char* recvData, \ + int recvCount, \ + const label communicator = worldComm \ + ); \ + \ + /*! \brief Send identically-sized \c Native data to all ranks */ \ + static void mpiScatter \ + ( \ + const Native* sendData, \ + int sendCount, \ + char* recvData, \ + int recvCount, \ + const label communicator = worldComm \ + ); \ + + Pstream_CommonRoutines(char); + + + #undef Pstream_CommonRoutines + #define Pstream_CommonRoutines(Native) \ + \ + /*! \brief Receive variable length \c Native data from all ranks */ \ + static void gather \ + ( \ + const Native* sendData, \ + int sendCount, /*!< Ignored on master if recvCount[0] == 0 */ \ + Native* recvData, /*!< Ignored on non-root rank */ \ + const UList& recvCounts, /*!< Ignored on non-root rank */ \ + const UList& recvOffsets, /*!< Ignored on non-root rank */ \ + const label communicator = worldComm \ + ); \ + \ + /*! \brief Send variable length \c Native data to all ranks */ \ + static void scatter \ + ( \ + const Native* sendData, /*!< Ignored on non-root rank */ \ + const UList& sendCounts, /*!< Ignored on non-root rank */ \ + const UList& sendOffsets, /*!< Ignored on non-root rank */ \ + Native* recvData, \ + int recvCount, \ + const label communicator = worldComm \ ); - //- Send data to all processors from master (low-level) - static void mpiScatter - ( - const char* sendData, - int sendSize, + Pstream_CommonRoutines(char); + Pstream_CommonRoutines(int32_t); + Pstream_CommonRoutines(int64_t); + Pstream_CommonRoutines(uint32_t); + Pstream_CommonRoutines(uint64_t); + Pstream_CommonRoutines(float); + Pstream_CommonRoutines(double); - char* recvData, - int recvSize, - const label communicator = worldComm - ); - - //- Receive data from all processors on the master - static void gather - ( - const char* sendData, - int sendSize, - - char* recvData, - const UList& recvSizes, - const UList& recvOffsets, - const label communicator = worldComm - ); - - //- Send data to all processors from the root of the communicator - static void scatter - ( - const char* sendData, - const UList& sendSizes, - const UList& sendOffsets, - - char* recvData, - int recvSize, - const label communicator = worldComm - ); + #undef Pstream_CommonRoutines // Gather single, contiguous value(s) diff --git a/src/Pstream/dummy/Make/files b/src/Pstream/dummy/Make/files index 90b44827f9..7ba370e470 100644 --- a/src/Pstream/dummy/Make/files +++ b/src/Pstream/dummy/Make/files @@ -1,5 +1,7 @@ UPstream.C +UPstreamAllToAll.C UPstreamBroadcast.C +UPstreamGatherScatter.C UPstreamReduce.C UIPstreamRead.C diff --git a/src/Pstream/dummy/UPstream.C b/src/Pstream/dummy/UPstream.C index 0f73b4b89f..5ba1a5330c 100644 --- a/src/Pstream/dummy/UPstream.C +++ b/src/Pstream/dummy/UPstream.C @@ -75,75 +75,6 @@ void Foam::UPstream::abort() // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // -void Foam::UPstream::allToAll -( - const labelUList& sendData, - labelUList& recvData, - const label communicator -) -{ - recvData.deepCopy(sendData); -} - - -void Foam::UPstream::mpiGather -( - const char* sendData, - int sendSize, - - char* recvData, - int recvSize, - const label communicator -) -{ - std::memmove(recvData, sendData, sendSize); -} - - -void Foam::UPstream::mpiScatter -( - const char* sendData, - int sendSize, - - char* recvData, - int recvSize, - const label communicator -) -{ - std::memmove(recvData, sendData, sendSize); -} - - -void Foam::UPstream::gather -( - const char* sendData, - int sendSize, - - char* recvData, - const UList& recvSizes, - const UList& recvOffsets, - const label communicator -) -{ - std::memmove(recvData, sendData, sendSize); -} - - -void Foam::UPstream::scatter -( - const char* sendData, - const UList& sendSizes, - const UList& sendOffsets, - - char* recvData, - int recvSize, - const label communicator -) -{ - std::memmove(recvData, sendData, recvSize); -} - - void Foam::UPstream::allocatePstreamCommunicator ( const label, diff --git a/src/Pstream/dummy/UPstreamAllToAll.C b/src/Pstream/dummy/UPstreamAllToAll.C new file mode 100644 index 0000000000..6983ae418d --- /dev/null +++ b/src/Pstream/dummy/UPstreamAllToAll.C @@ -0,0 +1,84 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | www.openfoam.com + \\/ M anipulation | +------------------------------------------------------------------------------- + Copyright (C) 2022 OpenCFD Ltd. +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see . + +\*---------------------------------------------------------------------------*/ + +#include "UPstream.H" + +#include +#include // memmove + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#undef Pstream_CommonRoutines +#define Pstream_CommonRoutines(Native) \ +void Foam::UPstream::allToAll \ +( \ + const UList& sendData, \ + UList& recvData, \ + const label comm \ +) \ +{ \ + recvData.deepCopy(sendData); \ +} \ + + +Pstream_CommonRoutines(int32_t); +Pstream_CommonRoutines(int64_t); + +#undef Pstream_CommonRoutines + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#undef Pstream_CommonRoutines +#define Pstream_CommonRoutines(Native) \ +void Foam::UPstream::allToAll \ +( \ + const Native* sendData, \ + const UList& sendCounts, \ + const UList& sendOffsets, \ + Native* recvData, \ + const UList& recvCounts, \ + const UList& recvOffsets, \ + const label comm \ +) \ +{ \ + if (recvCounts[0] != sendCounts[0]) \ + { \ + FatalErrorInFunction \ + << "Number to send " << sendCounts[0] \ + << " does not equal number to receive " << recvCounts[0] \ + << Foam::abort(FatalError); \ + } \ + std::memmove(recvData, sendData, recvCounts[0]*sizeof(Native)); \ +} + + +// Unused: Pstream_CommonRoutines(char); + +#undef Pstream_CommonRoutines + +// ************************************************************************* // diff --git a/src/Pstream/dummy/UPstreamBroadcast.C b/src/Pstream/dummy/UPstreamBroadcast.C index b4ace0a112..079512f255 100644 --- a/src/Pstream/dummy/UPstreamBroadcast.C +++ b/src/Pstream/dummy/UPstreamBroadcast.C @@ -33,7 +33,7 @@ bool Foam::UPstream::broadcast ( char* buf, const std::streamsize bufSize, - const label communicator, + const label comm, const int rootProcNo ) { diff --git a/src/Pstream/dummy/UPstreamGatherScatter.C b/src/Pstream/dummy/UPstreamGatherScatter.C new file mode 100644 index 0000000000..3a5f54e901 --- /dev/null +++ b/src/Pstream/dummy/UPstreamGatherScatter.C @@ -0,0 +1,112 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | www.openfoam.com + \\/ M anipulation | +------------------------------------------------------------------------------- + Copyright (C) 2022 OpenCFD Ltd. +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see . + +\*---------------------------------------------------------------------------*/ + +#include "UPstream.H" +#include // memmove + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#undef Pstream_CommonRoutines +#define Pstream_CommonRoutines(Native) \ +void Foam::UPstream::mpiGather \ +( \ + const Native* sendData, \ + int sendCount, \ + \ + Native* recvData, \ + int recvCount, \ + const label comm \ +) \ +{ \ + std::memmove(recvData, sendData, recvCount*sizeof(Native)); \ +} \ + \ + \ +void Foam::UPstream::mpiScatter \ +( \ + const Native* sendData, \ + int sendCount, \ + \ + Native* recvData, \ + int recvCount, \ + const label comm \ +) \ +{ \ + std::memmove(recvData, sendData, recvCount*sizeof(Native)); \ +} + +Pstream_CommonRoutines(char); + +#undef Pstream_CommonRoutines + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#undef Pstream_CommonRoutines +#define Pstream_CommonRoutines(Native) \ +void Foam::UPstream::gather \ +( \ + const Native* sendData, \ + int sendCount, \ + \ + Native* recvData, \ + const UList& recvCounts, \ + const UList& recvOffsets, \ + const label comm \ +) \ +{ \ + /* recvCounts[0] may be invalid - use sendCount instead */ \ + std::memmove(recvData, sendData, sendCount*sizeof(Native)); \ +} \ + \ +void Foam::UPstream::scatter \ +( \ + const Native* sendData, \ + const UList& sendCounts, \ + const UList& sendOffsets, \ + \ + Native* recvData, \ + int recvCount, \ + const label comm \ +) \ +{ \ + std::memmove(recvData, sendData, recvCount*sizeof(Native)); \ +} + + +//TDB: Pstream_CommonRoutines(bool); +Pstream_CommonRoutines(char); +Pstream_CommonRoutines(int32_t); +Pstream_CommonRoutines(int64_t); +Pstream_CommonRoutines(uint32_t); +Pstream_CommonRoutines(uint64_t); +Pstream_CommonRoutines(float); +Pstream_CommonRoutines(double); + +#undef Pstream_CommonRoutines + +// ************************************************************************* // diff --git a/src/Pstream/mpi/Make/files b/src/Pstream/mpi/Make/files index b117976a38..217347cc29 100644 --- a/src/Pstream/mpi/Make/files +++ b/src/Pstream/mpi/Make/files @@ -1,6 +1,8 @@ PstreamGlobals.C UPstream.C +UPstreamAllToAll.C UPstreamBroadcast.C +UPstreamGatherScatter.C UPstreamReduce.C UIPstreamRead.C diff --git a/src/Pstream/mpi/UPstream.C b/src/Pstream/mpi/UPstream.C index a1cd9c391f..2e971a0201 100644 --- a/src/Pstream/mpi/UPstream.C +++ b/src/Pstream/mpi/UPstream.C @@ -31,7 +31,7 @@ License #include "PstreamGlobals.H" #include "profilingPstream.H" #include "SubList.H" -#include "allReduce.H" +#include "UPstreamWrapping.H" #include "int.H" #include "collatedFileOperation.H" @@ -482,428 +482,6 @@ void Foam::UPstream::abort() // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // -void Foam::UPstream::allToAll -( - const labelUList& sendData, - labelUList& recvData, - const label communicator -) -{ - const label np = nProcs(communicator); - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** allToAll :" - << " np:" << np - << " sendData:" << sendData.size() - << " with comm:" << communicator - << " warnComm:" << UPstream::warnComm - << endl; - error::printStack(Pout); - } - - if (sendData.size() != np || recvData.size() != np) - { - FatalErrorInFunction - << "Size of sendData " << sendData.size() - << " or size of recvData " << recvData.size() - << " is not equal to the number of processors in the domain " - << np - << Foam::abort(FatalError); - } - - if (!UPstream::parRun()) - { - recvData.deepCopy(sendData); - } - else - { - profilingPstream::beginTiming(); - - if - ( - MPI_Alltoall - ( - // NOTE: const_cast is a temporary hack for - // backward-compatibility with versions of OpenMPI < 1.7.4 - const_cast(sendData.cdata()), - sizeof(label), - MPI_BYTE, - recvData.data(), - sizeof(label), - MPI_BYTE, - PstreamGlobals::MPICommunicators_[communicator] - ) - ) - { - FatalErrorInFunction - << "MPI_Alltoall failed for " << sendData - << " on communicator " << communicator - << Foam::abort(FatalError); - } - - profilingPstream::addAllToAllTime(); - } -} - - -void Foam::UPstream::allToAll -( - const char* sendData, - const UList& sendSizes, - const UList& sendOffsets, - - char* recvData, - const UList& recvSizes, - const UList& recvOffsets, - - const label communicator -) -{ - const label np = nProcs(communicator); - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** MPI_Alltoallv :" - << " sendSizes:" << sendSizes - << " sendOffsets:" << sendOffsets - << " with comm:" << communicator - << " warnComm:" << UPstream::warnComm - << endl; - error::printStack(Pout); - } - - if - ( - sendSizes.size() != np - || sendOffsets.size() != np - || recvSizes.size() != np - || recvOffsets.size() != np - ) - { - FatalErrorInFunction - << "Size of sendSize " << sendSizes.size() - << ", sendOffsets " << sendOffsets.size() - << ", recvSizes " << recvSizes.size() - << " or recvOffsets " << recvOffsets.size() - << " is not equal to the number of processors in the domain " - << np - << Foam::abort(FatalError); - } - - if (!UPstream::parRun()) - { - if (recvSizes[0] != sendSizes[0]) - { - FatalErrorInFunction - << "Bytes to send " << sendSizes[0] - << " does not equal bytes to receive " << recvSizes[0] - << Foam::abort(FatalError); - } - std::memmove(recvData, &sendData[sendOffsets[0]], recvSizes[0]); - } - else - { - profilingPstream::beginTiming(); - - if - ( - MPI_Alltoallv - ( - const_cast(sendData), - const_cast(sendSizes.cdata()), - const_cast(sendOffsets.cdata()), - MPI_BYTE, - recvData, - const_cast(recvSizes.cdata()), - const_cast(recvOffsets.cdata()), - MPI_BYTE, - PstreamGlobals::MPICommunicators_[communicator] - ) - ) - { - FatalErrorInFunction - << "MPI_Alltoallv failed for sendSizes " << sendSizes - << " recvSizes " << recvSizes - << " communicator " << communicator - << Foam::abort(FatalError); - } - - profilingPstream::addAllToAllTime(); - } -} - - -void Foam::UPstream::mpiGather -( - const char* sendData, - int sendSize, - - char* recvData, - int recvSize, - const label communicator -) -{ - const label np = nProcs(communicator); - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** MPI_Gather :" - << " np:" << np - << " recvSize:" << recvSize - << " with comm:" << communicator - << " warnComm:" << UPstream::warnComm - << endl; - error::printStack(Pout); - } - - if (!UPstream::parRun()) - { - std::memmove(recvData, sendData, recvSize); - } - else - { - profilingPstream::beginTiming(); - - if - ( - MPI_Gather - ( - const_cast(sendData), - sendSize, - MPI_BYTE, - recvData, - recvSize, - MPI_BYTE, - 0, - MPI_Comm(PstreamGlobals::MPICommunicators_[communicator]) - ) - ) - { - FatalErrorInFunction - << "MPI_Gather failed for sendSize " << sendSize - << " recvSize " << recvSize - << " communicator " << communicator - << Foam::abort(FatalError); - } - - profilingPstream::addGatherTime(); - } -} - - -void Foam::UPstream::mpiScatter -( - const char* sendData, - int sendSize, - - char* recvData, - int recvSize, - const label communicator -) -{ - const label np = nProcs(communicator); - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** MPI_Scatter :" - << " np:" << np - << " recvSize:" << recvSize - << " with comm:" << communicator - << " warnComm:" << UPstream::warnComm - << endl; - error::printStack(Pout); - } - - if (!UPstream::parRun()) - { - std::memmove(recvData, sendData, recvSize); - } - else - { - profilingPstream::beginTiming(); - - if - ( - MPI_Scatter - ( - const_cast(sendData), - sendSize, - MPI_BYTE, - recvData, - recvSize, - MPI_BYTE, - 0, - MPI_Comm(PstreamGlobals::MPICommunicators_[communicator]) - ) - ) - { - FatalErrorInFunction - << "MPI_Scatter failed for sendSize " << sendSize - << " recvSize " << recvSize - << " communicator " << communicator - << Foam::abort(FatalError); - } - - profilingPstream::addScatterTime(); - } -} - - -void Foam::UPstream::gather -( - const char* sendData, - int sendSize, - - char* recvData, - const UList& recvSizes, - const UList& recvOffsets, - const label communicator -) -{ - const label np = nProcs(communicator); - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** MPI_Gatherv :" - << " np:" << np - << " recvSizes:" << recvSizes - << " recvOffsets:" << recvOffsets - << " with comm:" << communicator - << " warnComm:" << UPstream::warnComm - << endl; - error::printStack(Pout); - } - - if - ( - UPstream::master(communicator) - && (recvSizes.size() != np || recvOffsets.size() < np) - ) - { - // Note: allow recvOffsets to be e.g. 1 larger than np so we - // can easily loop over the result - - FatalErrorInFunction - << "Size of recvSizes " << recvSizes.size() - << " or recvOffsets " << recvOffsets.size() - << " is not equal to the number of processors in the domain " - << np - << Foam::abort(FatalError); - } - - if (!UPstream::parRun()) - { - // recvSizes[0] may be invalid - use sendSize instead - std::memmove(recvData, sendData, sendSize); - } - else - { - profilingPstream::beginTiming(); - - if - ( - MPI_Gatherv - ( - const_cast(sendData), - sendSize, - MPI_BYTE, - recvData, - const_cast(recvSizes.cdata()), - const_cast(recvOffsets.cdata()), - MPI_BYTE, - 0, - MPI_Comm(PstreamGlobals::MPICommunicators_[communicator]) - ) - ) - { - FatalErrorInFunction - << "MPI_Gatherv failed for sendSize " << sendSize - << " recvSizes " << recvSizes - << " communicator " << communicator - << Foam::abort(FatalError); - } - - profilingPstream::addGatherTime(); - } -} - - -void Foam::UPstream::scatter -( - const char* sendData, - const UList& sendSizes, - const UList& sendOffsets, - - char* recvData, - int recvSize, - const label communicator -) -{ - const label np = nProcs(communicator); - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** MPI_Scatterv :" - << " np:" << np - << " sendSizes:" << sendSizes - << " sendOffsets:" << sendOffsets - << " with comm:" << communicator - << " warnComm:" << UPstream::warnComm - << endl; - error::printStack(Pout); - } - - if - ( - UPstream::master(communicator) - && (sendSizes.size() != np || sendOffsets.size() != np) - ) - { - FatalErrorInFunction - << "Size of sendSizes " << sendSizes.size() - << " or sendOffsets " << sendOffsets.size() - << " is not equal to the number of processors in the domain " - << np - << Foam::abort(FatalError); - } - - if (!UPstream::parRun()) - { - std::memmove(recvData, sendData, recvSize); - } - else - { - profilingPstream::beginTiming(); - - if - ( - MPI_Scatterv - ( - const_cast(sendData), - const_cast(sendSizes.cdata()), - const_cast(sendOffsets.cdata()), - MPI_BYTE, - recvData, - recvSize, - MPI_BYTE, - 0, - MPI_Comm(PstreamGlobals::MPICommunicators_[communicator]) - ) - ) - { - FatalErrorInFunction - << "MPI_Scatterv failed for sendSizes " << sendSizes - << " sendOffsets " << sendOffsets - << " communicator " << communicator - << Foam::abort(FatalError); - } - - profilingPstream::addScatterTime(); - } -} - - void Foam::UPstream::allocatePstreamCommunicator ( const label parentIndex, diff --git a/src/Pstream/mpi/UPstreamAllToAll.C b/src/Pstream/mpi/UPstreamAllToAll.C new file mode 100644 index 0000000000..fd873d0b1f --- /dev/null +++ b/src/Pstream/mpi/UPstreamAllToAll.C @@ -0,0 +1,86 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | www.openfoam.com + \\/ M anipulation | +------------------------------------------------------------------------------- + Copyright (C) 2022 OpenCFD Ltd. +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see . + +\*---------------------------------------------------------------------------*/ + +#include "Pstream.H" +#include "UPstreamWrapping.H" + +#include +#include +#include // memmove + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#undef Pstream_CommonRoutines +#define Pstream_CommonRoutines(Native, TaggedType) \ +void Foam::UPstream::allToAll \ +( \ + const UList& sendData, \ + UList& recvData, \ + const label comm \ +) \ +{ \ + PstreamDetail::allToAll \ + ( \ + sendData, recvData, TaggedType, comm \ + ); \ +} \ + + +Pstream_CommonRoutines(int32_t, MPI_INT32_T); +Pstream_CommonRoutines(int64_t, MPI_INT64_T); + +#undef Pstream_CommonRoutines + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#undef Pstream_CommonRoutines +#define Pstream_CommonRoutines(Native, TaggedType) \ +void Foam::UPstream::allToAll \ +( \ + const Native* sendData, \ + const UList& sendCounts, \ + const UList& sendOffsets, \ + Native* recvData, \ + const UList& recvCounts, \ + const UList& recvOffsets, \ + const label comm \ +) \ +{ \ + PstreamDetail::allToAllv \ + ( \ + sendData, sendCounts, sendOffsets, \ + recvData, recvCounts, recvOffsets, \ + TaggedType, comm \ + ); \ +} + +// Unused: Pstream_CommonRoutines(char, MPI_BYTE); + +#undef Pstream_CommonRoutines + +// ************************************************************************* // diff --git a/src/Pstream/mpi/UPstreamBroadcast.C b/src/Pstream/mpi/UPstreamBroadcast.C index 52eeedad31..cdb09bf2a4 100644 --- a/src/Pstream/mpi/UPstreamBroadcast.C +++ b/src/Pstream/mpi/UPstreamBroadcast.C @@ -37,29 +37,29 @@ bool Foam::UPstream::broadcast ( char* buf, const std::streamsize bufSize, - const label communicator, + const label comm, const int rootProcNo ) { - if (!UPstream::parRun() || UPstream::nProcs(communicator) < 2) + if (!UPstream::parRun() || UPstream::nProcs(comm) < 2) { // Nothing to do - ignore return true; } - //Needed? PstreamGlobals::checkCommunicator(communicator, rootProcNo); + //Needed? PstreamGlobals::checkCommunicator(comm, rootProcNo); if (debug) { Pout<< "UPstream::broadcast : root:" << rootProcNo - << " comm:" << communicator + << " comm:" << comm << " size:" << label(bufSize) << Foam::endl; } - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) { Pout<< "UPstream::broadcast : root:" << rootProcNo - << " comm:" << communicator + << " comm:" << comm << " size:" << label(bufSize) << " warnComm:" << UPstream::warnComm << Foam::endl; @@ -74,7 +74,7 @@ bool Foam::UPstream::broadcast bufSize, MPI_BYTE, rootProcNo, - PstreamGlobals::MPICommunicators_[communicator] + PstreamGlobals::MPICommunicators_[comm] ); profilingPstream::addBroadcastTime(); diff --git a/src/Pstream/mpi/UPstreamGatherScatter.C b/src/Pstream/mpi/UPstreamGatherScatter.C new file mode 100644 index 0000000000..ee0b6f8b3c --- /dev/null +++ b/src/Pstream/mpi/UPstreamGatherScatter.C @@ -0,0 +1,133 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | www.openfoam.com + \\/ M anipulation | +------------------------------------------------------------------------------- + Copyright (C) 2022 OpenCFD Ltd. +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see . + +\*---------------------------------------------------------------------------*/ + +#include "Pstream.H" +#include "UPstreamWrapping.H" + +#include +#include +#include // memmove + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#undef Pstream_CommonRoutines +#define Pstream_CommonRoutines(Native, TaggedType) \ +void Foam::UPstream::mpiGather \ +( \ + const Native* sendData, \ + int sendCount, \ + \ + Native* recvData, \ + int recvCount, \ + const label comm \ +) \ +{ \ + PstreamDetail::gather \ + ( \ + sendData, sendCount, recvData, recvCount, \ + TaggedType, comm \ + ); \ +} \ + \ + \ +void Foam::UPstream::mpiScatter \ +( \ + const Native* sendData, \ + int sendCount, \ + \ + Native* recvData, \ + int recvCount, \ + const label comm \ +) \ +{ \ + PstreamDetail::scatter \ + ( \ + sendData, sendCount, recvData, recvCount, \ + TaggedType, comm \ + ); \ +} + +Pstream_CommonRoutines(char, MPI_BYTE); + +#undef Pstream_CommonRoutines + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#undef Pstream_CommonRoutines +#define Pstream_CommonRoutines(Native, TaggedType) \ +void Foam::UPstream::gather \ +( \ + const Native* sendData, \ + int sendCount, \ + \ + Native* recvData, \ + const UList& recvCounts, \ + const UList& recvOffsets, \ + const label comm \ +) \ +{ \ + PstreamDetail::gatherv \ + ( \ + sendData, sendCount, \ + recvData, recvCounts, recvOffsets, \ + TaggedType, comm \ + ); \ +} \ + \ +void Foam::UPstream::scatter \ +( \ + const Native* sendData, \ + const UList& sendCounts, \ + const UList& sendOffsets, \ + \ + Native* recvData, \ + int recvCount, \ + const label comm \ +) \ +{ \ + PstreamDetail::scatterv \ + ( \ + sendData, sendCounts, sendOffsets, \ + recvData, recvCount, \ + TaggedType, comm \ + ); \ +} + + +//TDB: Pstream_CommonRoutines(bool, MPI_C_BOOL); +Pstream_CommonRoutines(char, MPI_BYTE); +Pstream_CommonRoutines(int32_t, MPI_INT32_T); +Pstream_CommonRoutines(int64_t, MPI_INT64_T); +Pstream_CommonRoutines(uint32_t, MPI_UINT32_T); +Pstream_CommonRoutines(uint64_t, MPI_UINT64_T); +Pstream_CommonRoutines(float, MPI_FLOAT); +Pstream_CommonRoutines(double, MPI_DOUBLE); + +#undef Pstream_CommonRoutines + +// ************************************************************************* // diff --git a/src/Pstream/mpi/UPstreamReduce.C b/src/Pstream/mpi/UPstreamReduce.C index a40e15a347..970a1c59c7 100644 --- a/src/Pstream/mpi/UPstreamReduce.C +++ b/src/Pstream/mpi/UPstreamReduce.C @@ -27,7 +27,7 @@ License #include "Pstream.H" #include "PstreamReduceOps.H" -#include "allReduce.H" +#include "UPstreamWrapping.H" #include #include @@ -179,9 +179,9 @@ void Foam::reduce \ label& requestID \ ) \ { \ - PstreamDetail::iallReduce \ + PstreamDetail::allReduce \ ( \ - &value, 1, TaggedType, MPI_SUM, comm, requestID \ + &value, 1, TaggedType, MPI_SUM, comm, &requestID \ ); \ } \ \ @@ -195,9 +195,9 @@ void Foam::reduce \ label& requestID \ ) \ { \ - PstreamDetail::iallReduce \ + PstreamDetail::allReduce \ ( \ - values, size, TaggedType, MPI_SUM, comm, requestID \ + values, size, TaggedType, MPI_SUM, comm, &requestID \ ); \ } diff --git a/src/Pstream/mpi/UPstreamWrapping.H b/src/Pstream/mpi/UPstreamWrapping.H new file mode 100644 index 0000000000..f1553125e3 --- /dev/null +++ b/src/Pstream/mpi/UPstreamWrapping.H @@ -0,0 +1,202 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | www.openfoam.com + \\/ M anipulation | +------------------------------------------------------------------------------- + Copyright (C) 2012-2016 OpenFOAM Foundation + Copyright (C) 2022 OpenCFD Ltd. +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see . + +Namespace + Foam::PstreamDetail + +Description + Some implementation details for Pstream and/or MPI. + +InNamespace + Foam::PstreamDetail + +Description + Functions to wrap MPI_Bcast, MPI_Allreduce, MPI_Iallreduce etc. + +SourceFiles + UPstreamWrappingTemplates.C + +\*---------------------------------------------------------------------------*/ + +#ifndef Foam_UPstreamWrapping_H +#define Foam_UPstreamWrapping_H + +#include "UPstream.H" +#include + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ +namespace PstreamDetail +{ + +// MPI_Bcast, using root=0 +template +void broadcast0 +( + Type* values, + int count, + MPI_Datatype datatype, + const label comm +); + +// MPI_Reduce, using root=0 +template +void reduce0 +( + Type* values, + int count, + MPI_Datatype datatype, + MPI_Op optype, + const label comm +); + +// MPI_Allreduce or MPI_Iallreduce +template +void allReduce +( + Type* values, + int count, + MPI_Datatype datatype, + MPI_Op optype, + const label comm, // Communicator + label* requestID = nullptr // Non-null for MPI_Iallreduce +); + + +// MPI_Alltoall or MPI_Ialltoall with one element per rank +template +void allToAll +( + const UList& sendData, + UList& recvData, + MPI_Datatype datatype, + const label comm, // Communicator + label* requestID = nullptr // Non-null for MPI_Ialltoall +); + + +// MPI_Alltoallv or MPI_Ialltoallv +template +void allToAllv +( + const Type* sendData, + const UList& sendCounts, + const UList& sendOffsets, + + Type* recvData, + const UList& recvCounts, + const UList& recvOffsets, + + MPI_Datatype datatype, + const label comm, // Communicator + label* requestID = nullptr // Non-null for MPI_Ialltoallv +); + + +// MPI_Gather or MPI_Igather +template +void gather +( + const Type* sendData, + int sendCount, + + Type* recvData, // Ignored on non-root rank + int recvCount, // Ignored on non-root rank + + MPI_Datatype datatype, // The send/recv data type + const label comm, // Communicator + label* requestID = nullptr // Non-null for MPI_Igather +); + + +// MPI_Scatter or MPI_Iscatter +template +void scatter +( + const Type* sendData, // Ignored on non-root rank + int sendCount, // Ignored on non-root rank + + Type* recvData, + int recvCount, + + MPI_Datatype datatype, // The send/recv data type + const label comm, // Communicator + label* requestID = nullptr // Non-null for MPI_Iscatter +); + + +// MPI_Gatherv or MPI_Igatherv +template +void gatherv +( + const Type* sendData, + int sendCount, // Ignored on master if recvCounts[0] == 0 + + Type* recvData, // Ignored on non-root rank + const UList& recvCounts, // Ignored on non-root rank + const UList& recvOffsets, // Ignored on non-root rank + + MPI_Datatype datatype, // The send/recv data type + const label comm, // Communicator + label* requestID = nullptr // Non-null for MPI_Igatherv +); + + +// MPI_Scatterv or MPI_Iscatterv +template +void scatterv +( + const Type* sendData, // Ignored on non-root rank + const UList& sendCounts, // Ignored on non-root rank + const UList& sendOffsets, // Ignored on non-root rank + + Type* recvData, + int recvCount, + + MPI_Datatype datatype, // The send/recv data type + const label comm, // Communicator + label* requestID = nullptr // Non-null for MPI_Igatherv +); + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace PstreamDetail +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#ifdef NoRepository + #include "UPstreamWrappingTemplates.C" +#endif + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/src/Pstream/mpi/UPstreamWrappingTemplates.C b/src/Pstream/mpi/UPstreamWrappingTemplates.C new file mode 100644 index 0000000000..8af13e7e16 --- /dev/null +++ b/src/Pstream/mpi/UPstreamWrappingTemplates.C @@ -0,0 +1,1018 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | www.openfoam.com + \\/ M anipulation | +------------------------------------------------------------------------------- + Copyright (C) 2012-2015 OpenFOAM Foundation + Copyright (C) 2019-2022 OpenCFD Ltd. +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see . + +\*---------------------------------------------------------------------------*/ + +#include "UPstreamWrapping.H" +#include "profilingPstream.H" +#include "PstreamGlobals.H" + +// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * // + +template +void Foam::PstreamDetail::broadcast0 +( + Type* values, + int count, + MPI_Datatype datatype, + const label comm +) +{ + if (!UPstream::parRun()) + { + return; + } + + profilingPstream::beginTiming(); + + // const int retval = + MPI_Bcast + ( + values, + count, + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm] + ); + + profilingPstream::addBroadcastTime(); +} + + +template +void Foam::PstreamDetail::reduce0 +( + Type* values, + int count, + MPI_Datatype datatype, + MPI_Op optype, + const label comm +) +{ + if (!UPstream::parRun()) + { + return; + } + + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) + { + Pout<< "** reducing:"; + if (count == 1) + { + Pout<< (*values); + } + else + { + Pout<< UList(values, count); + } + Pout<< " with comm:" << comm + << " warnComm:" << UPstream::warnComm << endl; + error::printStack(Pout); + } + + profilingPstream::beginTiming(); + + // const int retval = + MPI_Reduce + ( + MPI_IN_PLACE, // recv is also send + values, + count, + datatype, + optype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm] + ); + + profilingPstream::addReduceTime(); +} + + +template +void Foam::PstreamDetail::allReduce +( + Type* values, + int count, + MPI_Datatype datatype, + MPI_Op optype, + const label comm, + label* requestID +) +{ + if (!UPstream::parRun()) + { + return; + } + + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) + { + if (requestID != nullptr) + { + Pout<< "** MPI_Iallreduce (non-blocking):"; + } + else + { + Pout<< "** MPI_Allreduce (blocking):"; + } + if (count == 1) + { + Pout<< (*values); + } + else + { + Pout<< UList(values, count); + } + Pout<< " with comm:" << comm + << " warnComm:" << UPstream::warnComm << endl; + error::printStack(Pout); + } + + profilingPstream::beginTiming(); + + bool handled(false); + +#if defined(MPI_VERSION) && (MPI_VERSION >= 3) + if (requestID != nullptr) + { + handled = true; + MPI_Request request; + if + ( + MPI_Iallreduce + ( + MPI_IN_PLACE, // recv is also send + values, + count, + datatype, + optype, + PstreamGlobals::MPICommunicators_[comm], + &request + ) + ) + { + FatalErrorInFunction + << "MPI_Iallreduce failed for " + << UList(values, count) + << Foam::abort(FatalError); + } + + if (PstreamGlobals::freedRequests_.size()) + { + *requestID = PstreamGlobals::freedRequests_.remove(); + PstreamGlobals::outstandingRequests_[*requestID] = request; + } + else + { + *requestID = PstreamGlobals::outstandingRequests_.size(); + PstreamGlobals::outstandingRequests_.append(request); + } + } +#endif + + if (!handled) + { + if (requestID != nullptr) + { + *requestID = -1; + } + if + ( + MPI_Allreduce + ( + MPI_IN_PLACE, // recv is also send + values, + count, + datatype, + optype, + PstreamGlobals::MPICommunicators_[comm] + ) + ) + { + FatalErrorInFunction + << "MPI_Allreduce failed for " + << UList(values, count) + << Foam::abort(FatalError); + } + } + + profilingPstream::addReduceTime(); +} + + +template +void Foam::PstreamDetail::allToAll +( + const UList& sendData, + UList& recvData, + MPI_Datatype datatype, + const label comm, + label* requestID +) +{ + const label np = UPstream::nProcs(comm); + + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) + { + if (requestID != nullptr) + { + Pout<< "** MPI_Ialltoall (non-blocking):"; + } + else + { + Pout<< "** MPI_Alltoall (blocking):"; + } + Pout<< " np:" << np + << " sendData:" << sendData.size() + << " with comm:" << comm + << " warnComm:" << UPstream::warnComm + << endl; + error::printStack(Pout); + } + + if (sendData.size() != np || recvData.size() != np) + { + FatalErrorInFunction + << "Have " << np << " ranks, but size of sendData:" + << sendData.size() << " or recvData:" << recvData.size() + << " is different!" + << Foam::abort(FatalError); + } + + if (!UPstream::parRun()) + { + recvData.deepCopy(sendData); + return; + } + + profilingPstream::beginTiming(); + + bool handled(false); + + #if defined(MPI_VERSION) && (MPI_VERSION >= 3) + if (requestID != nullptr) + { + handled = true; + MPI_Request request; + if + ( + MPI_Ialltoall + ( + // NOTE: const_cast is a temporary hack for + // backward-compatibility with versions of OpenMPI < 1.7.4 + const_cast(sendData.cdata()), + 1, // one element per rank + datatype, + recvData.data(), + 1, // one element per rank + datatype, + PstreamGlobals::MPICommunicators_[comm], + &request + ) + ) + { + FatalErrorInFunction + << "MPI_Ialltoall [comm: " << comm << "] failed." + << " For " << sendData + << Foam::abort(FatalError); + } + + if (PstreamGlobals::freedRequests_.size()) + { + *requestID = PstreamGlobals::freedRequests_.remove(); + PstreamGlobals::outstandingRequests_[*requestID] = request; + } + else + { + *requestID = PstreamGlobals::outstandingRequests_.size(); + PstreamGlobals::outstandingRequests_.append(request); + } + } +#endif + + + if (!handled) + { + if (requestID != nullptr) + { + *requestID = -1; + } + if + ( + MPI_Alltoall + ( + // NOTE: const_cast is a temporary hack for + // backward-compatibility with versions of OpenMPI < 1.7.4 + const_cast(sendData.cdata()), + 1, // one element per rank + datatype, + recvData.data(), + 1, // one element per rank + datatype, + PstreamGlobals::MPICommunicators_[comm] + ) + ) + { + FatalErrorInFunction + << "MPI_Alltoall [comm: " << comm << "] failed." + << " For " << sendData + << Foam::abort(FatalError); + } + } + + profilingPstream::addAllToAllTime(); +} + + +template +void Foam::PstreamDetail::allToAllv +( + const Type* sendData, + const UList& sendCounts, + const UList& sendOffsets, + + Type* recvData, + const UList& recvCounts, + const UList& recvOffsets, + + MPI_Datatype datatype, + const label comm, + label* requestID +) +{ + const label np = UPstream::nProcs(comm); + + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) + { + if (requestID != nullptr) + { + Pout<< "** MPI_Ialltoallv (non-blocking):"; + } + else + { + Pout<< "** MPI_Alltoallv (blocking):"; + } + Pout<< " sendCounts:" << sendCounts + << " sendOffsets:" << sendOffsets + << " with comm:" << comm + << " warnComm:" << UPstream::warnComm + << endl; + error::printStack(Pout); + } + + if + ( + (sendCounts.size() != np || sendOffsets.size() < np) + || (recvCounts.size() != np || recvOffsets.size() < np) + ) + { + FatalErrorInFunction + << "Have " << np << " ranks, but sendCounts:" << sendCounts.size() + << ", sendOffsets:" << sendOffsets.size() + << ", recvCounts:" << recvCounts.size() + << " or recvOffsets:" << recvOffsets.size() + << " is different!" + << Foam::abort(FatalError); + } + + if (!UPstream::parRun()) + { + if (recvCounts[0] != sendCounts[0]) + { + FatalErrorInFunction + << "Bytes to send " << sendCounts[0] + << " does not equal bytes to receive " << recvCounts[0] + << Foam::abort(FatalError); + } + std::memmove + ( + recvData, + (sendData + sendOffsets[0]), + recvCounts[0]*sizeof(Type) + ); + return; + } + + profilingPstream::beginTiming(); + + bool handled(false); + +#if defined(MPI_VERSION) && (MPI_VERSION >= 3) + if (requestID != nullptr) + { + handled = true; + MPI_Request request; + + if + ( + MPI_Ialltoallv + ( + const_cast(sendData), + const_cast(sendCounts.cdata()), + const_cast(sendOffsets.cdata()), + datatype, + recvData, + const_cast(recvCounts.cdata()), + const_cast(recvOffsets.cdata()), + datatype, + PstreamGlobals::MPICommunicators_[comm], + &request + ) + ) + { + FatalErrorInFunction + << "MPI_Ialltoallv [comm: " << comm << "] failed." + << " For sendCounts " << sendCounts + << " recvCounts " << recvCounts + << Foam::abort(FatalError); + } + + if (PstreamGlobals::freedRequests_.size()) + { + *requestID = PstreamGlobals::freedRequests_.remove(); + PstreamGlobals::outstandingRequests_[*requestID] = request; + } + else + { + *requestID = PstreamGlobals::outstandingRequests_.size(); + PstreamGlobals::outstandingRequests_.append(request); + } + } +#endif + + if (!handled) + { + if (requestID != nullptr) + { + *requestID = -1; + } + if + ( + MPI_Alltoallv + ( + const_cast(sendData), + const_cast(sendCounts.cdata()), + const_cast(sendOffsets.cdata()), + datatype, + recvData, + const_cast(recvCounts.cdata()), + const_cast(recvOffsets.cdata()), + datatype, + PstreamGlobals::MPICommunicators_[comm] + ) + ) + { + FatalErrorInFunction + << "MPI_Alltoallv [comm: " << comm << "] failed." + << " For sendCounts " << sendCounts + << " recvCounts " << recvCounts + << Foam::abort(FatalError); + } + } + + profilingPstream::addAllToAllTime(); +} + + +template +void Foam::PstreamDetail::gather +( + const Type* sendData, + int sendCount, + + Type* recvData, + int recvCount, + + MPI_Datatype datatype, + const label comm, + label* requestID +) +{ + if (!UPstream::parRun()) + { + std::memmove(recvData, sendData, recvCount*sizeof(Type)); + return; + } + + const label np = UPstream::nProcs(comm); + + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) + { + if (requestID != nullptr) + { + Pout<< "** MPI_Igather (non-blocking):"; + } + else + { + Pout<< "** MPI_Gather (blocking):"; + } + Pout<< " np:" << np + << " recvCount:" << recvCount + << " with comm:" << comm + << " warnComm:" << UPstream::warnComm + << endl; + error::printStack(Pout); + } + + profilingPstream::beginTiming(); + + bool handled(false); + +#if defined(MPI_VERSION) && (MPI_VERSION >= 3) + if (requestID != nullptr) + { + handled = true; + MPI_Request request; + if + ( + MPI_Igather + ( + const_cast(sendData), + sendCount, + datatype, + recvData, + recvCount, + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm], + &request + ) + ) + { + FatalErrorInFunction + << "MPI_Igather [comm: " << comm << "] failed." + << " sendCount " << sendCount + << " recvCount " << recvCount + << Foam::abort(FatalError); + } + + if (PstreamGlobals::freedRequests_.size()) + { + *requestID = PstreamGlobals::freedRequests_.remove(); + PstreamGlobals::outstandingRequests_[*requestID] = request; + } + else + { + *requestID = PstreamGlobals::outstandingRequests_.size(); + PstreamGlobals::outstandingRequests_.append(request); + } + } +#endif + + if (!handled) + { + if (requestID != nullptr) + { + *requestID = -1; + } + if + ( + MPI_Gather + ( + const_cast(sendData), + sendCount, + datatype, + recvData, + recvCount, + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm] + ) + ) + { + FatalErrorInFunction + << "MPI_Gather [comm: " << comm << "] failed." + << " sendCount " << sendCount + << " recvCount " << recvCount + << Foam::abort(FatalError); + } + } + + profilingPstream::addGatherTime(); +} + + +template +void Foam::PstreamDetail::scatter +( + const Type* sendData, + int sendCount, + + Type* recvData, + int recvCount, + + MPI_Datatype datatype, + const label comm, + label* requestID +) +{ + if (!UPstream::parRun()) + { + std::memmove(recvData, sendData, recvCount*sizeof(Type)); + return; + } + + const label np = UPstream::nProcs(comm); + + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) + { + if (requestID != nullptr) + { + Pout<< "** MPI_Iscatter (non-blocking):"; + } + else + { + Pout<< "** MPI_Scatter (blocking):"; + } + Pout<< " np:" << np + << " recvCount:" << recvCount + << " with comm:" << comm + << " warnComm:" << UPstream::warnComm + << endl; + error::printStack(Pout); + } + + profilingPstream::beginTiming(); + + bool handled(false); + +#if defined(MPI_VERSION) && (MPI_VERSION >= 3) + if (requestID != nullptr) + { + handled = true; + MPI_Request request; + if + ( + MPI_Iscatter + ( + const_cast(sendData), + sendCount, + datatype, + recvData, + recvCount, + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm], + &request + ) + ) + { + FatalErrorInFunction + << "MPI_Iscatter [comm: " << comm << "] failed." + << " sendCount " << sendCount + << " recvCount " << recvCount + << Foam::abort(FatalError); + } + + if (PstreamGlobals::freedRequests_.size()) + { + *requestID = PstreamGlobals::freedRequests_.remove(); + PstreamGlobals::outstandingRequests_[*requestID] = request; + } + else + { + *requestID = PstreamGlobals::outstandingRequests_.size(); + PstreamGlobals::outstandingRequests_.append(request); + } + } +#endif + + if (!handled) + { + if (requestID != nullptr) + { + *requestID = -1; + } + if + ( + MPI_Scatter + ( + const_cast(sendData), + sendCount, + datatype, + recvData, + recvCount, + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm] + ) + ) + { + FatalErrorInFunction + << "MPI_Iscatter [comm: " << comm << "] failed." + << " sendCount " << sendCount + << " recvCount " << recvCount + << Foam::abort(FatalError); + } + } + + profilingPstream::addScatterTime(); +} + + +template +void Foam::PstreamDetail::gatherv +( + const Type* sendData, + int sendCount, + + Type* recvData, + const UList& recvCounts, + const UList& recvOffsets, + + MPI_Datatype datatype, + const label comm, + label* requestID +) +{ + if (!UPstream::parRun()) + { + // recvCounts[0] may be invalid - use sendCount instead + std::memmove(recvData, sendData, sendCount*sizeof(Type)); + return; + } + + const label np = UPstream::nProcs(comm); + + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) + { + if (requestID != nullptr) + { + Pout<< "** MPI_Igatherv (non-blocking):"; + } + else + { + Pout<< "** MPI_Gatherv (blocking):"; + } + Pout<< " np:" << np + << " recvCounts:" << recvCounts + << " recvOffsets:" << recvOffsets + << " with comm:" << comm + << " warnComm:" << UPstream::warnComm + << endl; + error::printStack(Pout); + } + + if + ( + UPstream::master(comm) + && (recvCounts.size() != np || recvOffsets.size() < np) + ) + { + // Note: allow offsets to be e.g. 1 larger than nProc so we + // can easily loop over the result + + FatalErrorInFunction + << "Have " << np << " ranks, but recvCounts:" << recvCounts.size() + << " or recvOffsets:" << recvOffsets.size() + << " is too small!" + << Foam::abort(FatalError); + } + + profilingPstream::beginTiming(); + + // Ensure send/recv consistency on master + if (UPstream::master(comm) && !recvCounts[0]) + { + sendCount = 0; + } + + bool handled(false); + +#if defined(MPI_VERSION) && (MPI_VERSION >= 3) + if (requestID != nullptr) + { + handled = true; + MPI_Request request; + if + ( + MPI_Igatherv + ( + const_cast(sendData), + sendCount, + datatype, + recvData, + const_cast(recvCounts.cdata()), + const_cast(recvOffsets.cdata()), + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm], + &request + ) + ) + { + FatalErrorInFunction + << "MPI_Igatherv failed [comm: " << comm << ']' + << " sendCount " << sendCount + << " recvCounts " << recvCounts + << Foam::abort(FatalError); + } + + if (PstreamGlobals::freedRequests_.size()) + { + *requestID = PstreamGlobals::freedRequests_.remove(); + PstreamGlobals::outstandingRequests_[*requestID] = request; + } + else + { + *requestID = PstreamGlobals::outstandingRequests_.size(); + PstreamGlobals::outstandingRequests_.append(request); + } + } +#endif + + if (!handled) + { + if (requestID != nullptr) + { + *requestID = -1; + } + if + ( + MPI_Gatherv + ( + const_cast(sendData), + sendCount, + datatype, + recvData, + const_cast(recvCounts.cdata()), + const_cast(recvOffsets.cdata()), + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm] + ) + ) + { + FatalErrorInFunction + << "MPI_Gatherv failed [comm: " << comm << ']' + << " sendCount " << sendCount + << " recvCounts " << recvCounts + << Foam::abort(FatalError); + } + } + + profilingPstream::addGatherTime(); +} + + +template +void Foam::PstreamDetail::scatterv +( + const Type* sendData, + const UList& sendCounts, + const UList& sendOffsets, + + Type* recvData, + int recvCount, + + MPI_Datatype datatype, + const label comm, + label* requestID +) +{ + if (!UPstream::parRun()) + { + std::memmove(recvData, sendData, recvCount*sizeof(Type)); + return; + } + + const label np = UPstream::nProcs(comm); + + if (UPstream::warnComm != -1 && comm != UPstream::warnComm) + { + if (requestID != nullptr) + { + Pout<< "** MPI_Iscatterv (non-blocking):"; + } + else + { + Pout<< "** MPI_Scatterv (blocking):"; + } + Pout<< " np:" << np + << " sendCounts:" << sendCounts + << " sendOffsets:" << sendOffsets + << " with comm:" << comm + << " warnComm:" << UPstream::warnComm + << endl; + error::printStack(Pout); + } + + if + ( + UPstream::master(comm) + && (sendCounts.size() != np || sendOffsets.size() < np) + ) + { + // Note: allow offsets to be e.g. 1 larger than nProc so we + // can easily loop over the result + + FatalErrorInFunction + << "Have " << np << " ranks, but sendCounts:" << sendCounts.size() + << " or sendOffsets:" << sendOffsets.size() + << " is too small!" + << Foam::abort(FatalError); + } + + profilingPstream::beginTiming(); + + bool handled(false); + +#if defined(MPI_VERSION) && (MPI_VERSION >= 3) + if (requestID != nullptr) + { + handled = true; + MPI_Request request; + if + ( + MPI_Iscatterv + ( + const_cast(sendData), + const_cast(sendCounts.cdata()), + const_cast(sendOffsets.cdata()), + datatype, + recvData, + recvCount, + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm], + &request + ) + ) + { + FatalErrorInFunction + << "MPI_Iscatterv [comm: " << comm << "] failed." + << " sendCounts " << sendCounts + << " sendOffsets " << sendOffsets + << Foam::abort(FatalError); + } + + if (PstreamGlobals::freedRequests_.size()) + { + *requestID = PstreamGlobals::freedRequests_.remove(); + PstreamGlobals::outstandingRequests_[*requestID] = request; + } + else + { + *requestID = PstreamGlobals::outstandingRequests_.size(); + PstreamGlobals::outstandingRequests_.append(request); + } + } +#endif + + if (!handled) + { + if (requestID != nullptr) + { + *requestID = -1; + } + if + ( + MPI_Scatterv + ( + const_cast(sendData), + const_cast(sendCounts.cdata()), + const_cast(sendOffsets.cdata()), + datatype, + recvData, + recvCount, + datatype, + 0, // (root rank) == UPstream::masterNo() + PstreamGlobals::MPICommunicators_[comm] + ) + ) + { + FatalErrorInFunction + << "MPI_Scatterv [comm: " << comm << "] failed." + << " sendCounts " << sendCounts + << " sendOffsets " << sendOffsets + << Foam::abort(FatalError); + } + } + + profilingPstream::addScatterTime(); +} + + +// ************************************************************************* // diff --git a/src/Pstream/mpi/allReduce.H b/src/Pstream/mpi/allReduce.H deleted file mode 100644 index 9b4c3fc857..0000000000 --- a/src/Pstream/mpi/allReduce.H +++ /dev/null @@ -1,117 +0,0 @@ -/*---------------------------------------------------------------------------*\ - ========= | - \\ / F ield | OpenFOAM: The Open Source CFD Toolbox - \\ / O peration | - \\ / A nd | www.openfoam.com - \\/ M anipulation | -------------------------------------------------------------------------------- - Copyright (C) 2012-2016 OpenFOAM Foundation - Copyright (C) 2022 OpenCFD Ltd. -------------------------------------------------------------------------------- -License - This file is part of OpenFOAM. - - OpenFOAM is free software: you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - OpenFOAM is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - for more details. - - You should have received a copy of the GNU General Public License - along with OpenFOAM. If not, see . - -Namespace - Foam::PstreamDetail - -Description - Some implementation details for Pstream and/or MPI. - -InNamespace - Foam::PstreamDetail - -Description - Functions to wrap MPI_Bcast, MPI_Allreduce, MPI_Iallreduce - -SourceFiles - allReduceTemplates.C - -\*---------------------------------------------------------------------------*/ - -#ifndef Foam_allReduce_H -#define Foam_allReduce_H - -#include "UPstream.H" -#include - -// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // - -namespace Foam -{ -namespace PstreamDetail -{ - -// MPI_Bcast, using root=0 -template -void broadcast0 -( - Type* values, - int count, - MPI_Datatype datatype, - const label communicator -); - -// MPI_Reduce, using root=0 -template -void reduce0 -( - Type* values, - int count, - MPI_Datatype datatype, - MPI_Op optype, - const label communicator -); - -// MPI_Allreduce -template -void allReduce -( - Type* values, - int count, - MPI_Datatype datatype, - MPI_Op optype, - const label communicator -); - - -// MPI_Iallreduce -template -void iallReduce -( - Type* values, - int count, - MPI_Datatype datatype, - MPI_Op optype, - const label communicator, - label& requestID -); - -// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // - -} // End namespace PstreamDetail -} // End namespace Foam - -// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // - -#ifdef NoRepository - #include "allReduceTemplates.C" -#endif - -// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // - -#endif - -// ************************************************************************* // diff --git a/src/Pstream/mpi/allReduceTemplates.C b/src/Pstream/mpi/allReduceTemplates.C deleted file mode 100644 index c370d88135..0000000000 --- a/src/Pstream/mpi/allReduceTemplates.C +++ /dev/null @@ -1,261 +0,0 @@ -/*---------------------------------------------------------------------------*\ - ========= | - \\ / F ield | OpenFOAM: The Open Source CFD Toolbox - \\ / O peration | - \\ / A nd | www.openfoam.com - \\/ M anipulation | -------------------------------------------------------------------------------- - Copyright (C) 2012-2015 OpenFOAM Foundation - Copyright (C) 2019-2022 OpenCFD Ltd. -------------------------------------------------------------------------------- -License - This file is part of OpenFOAM. - - OpenFOAM is free software: you can redistribute it and/or modify it - under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - OpenFOAM is distributed in the hope that it will be useful, but WITHOUT - ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - for more details. - - You should have received a copy of the GNU General Public License - along with OpenFOAM. If not, see . - -\*---------------------------------------------------------------------------*/ - -#include "allReduce.H" -#include "profilingPstream.H" -#include "PstreamGlobals.H" - -// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * // - -template -void Foam::PstreamDetail::broadcast0 -( - Type* values, - int count, - MPI_Datatype datatype, - const label communicator -) -{ - if (!UPstream::parRun()) - { - return; - } - - profilingPstream::beginTiming(); - - // const int retval = - MPI_Bcast - ( - values, - count, - datatype, - 0, // (root process) is master == UPstream::masterNo() - PstreamGlobals::MPICommunicators_[communicator] - ); - - profilingPstream::addBroadcastTime(); -} - - -template -void Foam::PstreamDetail::reduce0 -( - Type* values, - int count, - MPI_Datatype datatype, - MPI_Op optype, - const label communicator -) -{ - if (!UPstream::parRun()) - { - return; - } - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** reducing:"; - if (count == 1) - { - Pout<< (*values); - } - else - { - Pout<< UList(values, count); - } - Pout<< " with comm:" << communicator - << " warnComm:" << UPstream::warnComm << endl; - error::printStack(Pout); - } - - profilingPstream::beginTiming(); - - // const int retval = - MPI_Reduce - ( - MPI_IN_PLACE, - values, - count, - datatype, - optype, - 0, // (root process) is master == UPstream::masterNo() - PstreamGlobals::MPICommunicators_[communicator] - ); - - profilingPstream::addReduceTime(); -} - - -template -void Foam::PstreamDetail::allReduce -( - Type* values, - int count, - MPI_Datatype datatype, - MPI_Op optype, - const label communicator -) -{ - if (!UPstream::parRun()) - { - return; - } - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** reducing:"; - if (count == 1) - { - Pout<< (*values); - } - else - { - Pout<< UList(values, count); - } - Pout<< " with comm:" << communicator - << " warnComm:" << UPstream::warnComm << endl; - error::printStack(Pout); - } - - profilingPstream::beginTiming(); - - // const int retval = - MPI_Allreduce - ( - MPI_IN_PLACE, - values, - count, - datatype, - optype, - PstreamGlobals::MPICommunicators_[communicator] - ); - - profilingPstream::addReduceTime(); -} - - -template -void Foam::PstreamDetail::iallReduce -( - Type* values, - int count, - MPI_Datatype datatype, - MPI_Op optype, - const label communicator, - label& requestID -) -{ - if (!UPstream::parRun()) - { - return; - } - - if (UPstream::warnComm != -1 && communicator != UPstream::warnComm) - { - Pout<< "** non-blocking reducing:"; - if (count == 1) - { - Pout<< (*values); - } - else - { - Pout<< UList(values, count); - } - Pout<< " with comm:" << communicator - << " warnComm:" << UPstream::warnComm << endl; - error::printStack(Pout); - } - - profilingPstream::beginTiming(); - -#if defined(MPI_VERSION) && (MPI_VERSION >= 3) - MPI_Request request; - if - ( - MPI_Iallreduce - ( - MPI_IN_PLACE, - values, - count, - datatype, - optype, - PstreamGlobals::MPICommunicators_[communicator], - &request - ) - ) - { - FatalErrorInFunction - << "MPI_Iallreduce failed for " - << UList(values, count) - << Foam::abort(FatalError); - } - - if (PstreamGlobals::freedRequests_.size()) - { - requestID = PstreamGlobals::freedRequests_.remove(); - PstreamGlobals::outstandingRequests_[requestID] = request; - } - else - { - requestID = PstreamGlobals::outstandingRequests_.size(); - PstreamGlobals::outstandingRequests_.append(request); - } - - if (UPstream::debug) - { - Pout<< "UPstream::allocateRequest for non-blocking reduce" - << " : request:" << requestID << endl; - } -#else - // Non-blocking not yet implemented in mpi - if - ( - MPI_Allreduce - ( - MPI_IN_PLACE, - values, - count, - datatype, - optype, - PstreamGlobals::MPICommunicators_[communicator] - ) - ) - { - FatalErrorInFunction - << "MPI_Allreduce failed for " - << UList(values, count) - << Foam::abort(FatalError); - } - requestID = -1; -#endif - - profilingPstream::addReduceTime(); -} - - -// ************************************************************************* //