ENH: extend globalIndex mpiGather to use scalar/label components

- MPI_Gatherv requires contiguous data, but a byte-wise transfer can
  quickly exceed the 'int' limits used for MPI sizes/offsets. Thus
  gather label/scalar components when possible to increase the
  effective size limit.

  For non-contiguous types (or large contiguous data) now also
  reverts to manual handling

ENH: handle contiguous data in GAMGAgglomeration gather values

- delegate to globalIndex::gatherValues static method (new)
This commit is contained in:
Mark Olesen 2022-03-28 14:03:05 +02:00
parent 87e3b196b0
commit 6fa23bd7a6
6 changed files with 731 additions and 616 deletions

View File

@ -68,6 +68,29 @@ int main(int argc, char *argv[])
}
// This now compiles (and works)
// - reverts to normal gather for non-contiguous
{
const wordList sendData({"hello", "world"});
// One-sided sizing! master only
const globalIndex allProcAddr
(
sendData.size(),
globalIndex::gatherOnly{}
);
Pout<< "listGather sizes: " << flatOutput(allProcAddr.sizes()) << nl;
// Collect all values
wordList allValues
(
allProcAddr.mpiGather(sendData)
);
Pout<< "all-data: " << allValues << endl;
}
// Gather all values
{
const auto& sendData = localValues;
@ -75,8 +98,8 @@ int main(int argc, char *argv[])
// One-sided sizing! master only
const globalIndex allProcAddr
(
UPstream::listGatherValues<label>(sendData.size()),
globalIndex::SIZES
sendData.size(),
globalIndex::gatherOnly{}
);
Pout<< "listGather sizes: " << flatOutput(allProcAddr.sizes()) << nl;

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2017 OpenFOAM Foundation
Copyright (C) 2019-2021 OpenCFD Ltd.
Copyright (C) 2019-2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -206,9 +206,6 @@ void Foam::GAMGAgglomeration::agglomerateLduAddressing
boolList& faceFlipMap = faceFlipMap_[fineLevelIndex];
label nFlipped = 0;
label nDissapear = 0;
forAll(faceRestrictAddr, fineFacei)
{
label coarseFacei = faceRestrictAddr[fineFacei];
@ -225,7 +222,6 @@ void Foam::GAMGAgglomeration::agglomerateLduAddressing
if (cOwn == rmUpperAddr && cNei == rmLowerAddr)
{
faceFlipMap[fineFacei] = true;
nFlipped++;
}
else if (cOwn == rmLowerAddr && cNei == rmUpperAddr)
{
@ -244,10 +240,6 @@ void Foam::GAMGAgglomeration::agglomerateLduAddressing
<< exit(FatalError);
}
}
else
{
nDissapear++;
}
}
@ -513,39 +505,37 @@ void Foam::GAMGAgglomeration::procAgglomerateRestrictAddressing
{
// Collect number of cells
labelList nFineCells;
gatherList
globalIndex::gatherValues
(
comm,
procIDs,
restrictAddressing_[levelIndex].size(),
nFineCells
);
nFineCells,
labelList offsets(nFineCells.size()+1);
{
offsets[0] = 0;
forAll(nFineCells, i)
{
offsets[i+1] = offsets[i] + nFineCells[i];
}
}
UPstream::msgType(),
UPstream::commsTypes::scheduled
);
labelList fineOffsets(globalIndex::calcOffsets(nFineCells));
// Combine and renumber nCoarseCells
labelList nCoarseCells;
gatherList
globalIndex::gatherValues
(
comm,
procIDs,
nCells_[levelIndex],
nCoarseCells
nCoarseCells,
UPstream::msgType(),
UPstream::commsTypes::scheduled
);
labelList coarseOffsets(globalIndex::calcOffsets(nCoarseCells));
// (cell)restrictAddressing
const globalIndex cellOffsetter(offsets);
labelList procRestrictAddressing;
cellOffsetter.gather
globalIndex::gather
(
fineOffsets,
comm,
procIDs,
restrictAddressing_[levelIndex],
@ -558,29 +548,22 @@ void Foam::GAMGAgglomeration::procAgglomerateRestrictAddressing
if (Pstream::myProcNo(comm) == procIDs[0])
{
labelList coarseCellOffsets(procIDs.size()+1);
{
coarseCellOffsets[0] = 0;
forAll(procIDs, i)
{
coarseCellOffsets[i+1] = coarseCellOffsets[i]+nCoarseCells[i];
}
}
nCells_[levelIndex] = coarseCellOffsets.last();
nCells_[levelIndex] = coarseOffsets.last(); // ie, totalSize()
// Renumber consecutively
for (label proci = 1; proci < procIDs.size(); proci++)
for (label proci = 1; proci < procIDs.size(); ++proci)
{
SubList<label> procSlot
(
procRestrictAddressing,
offsets[proci+1]-offsets[proci],
offsets[proci]
fineOffsets[proci+1]-fineOffsets[proci],
fineOffsets[proci]
);
// procSlot += coarseOffsets[proci];
forAll(procSlot, i)
{
procSlot[i] += coarseCellOffsets[proci];
procSlot[i] += coarseOffsets[proci];
}
}
@ -688,51 +671,6 @@ void Foam::GAMGAgglomeration::combineLevels(const label curLevel)
}
//void Foam::GAMGAgglomeration::gatherList
//(
// const label comm,
// const labelList& procIDs,
//
// const label myVal,
// labelList& vals,
// const int tag
//)
//{
// vals.setSize(procIDs.size());
//
// if (Pstream::myProcNo(comm) == procIDs[0])
// {
// vals[0] = myVal;
//
// for (label i=1; i<procIDs.size(); i++)
// {
// label& slaveVal = vals[i];
// IPstream::read
// (
// Pstream::commsTypes::scheduled,
// procIDs[i],
// reinterpret_cast<char*>(&slaveVal),
// sizeof(slaveVal),
// tag,
// comm
// );
// }
// }
// else
// {
// OPstream::write
// (
// Pstream::commsTypes::scheduled,
// procIDs[0],
// reinterpret_cast<const char*>(&myVal),
// sizeof(myVal),
// tag,
// comm
// );
// }
//}
void Foam::GAMGAgglomeration::calculateRegionMaster
(
const label comm,

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2016 OpenFOAM Foundation
Copyright (C) 2019-2020 OpenCFD Ltd.
Copyright (C) 2019-2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -37,8 +37,8 @@ SourceFiles
\*---------------------------------------------------------------------------*/
#ifndef GAMGAgglomeration_H
#define GAMGAgglomeration_H
#ifndef Foam_GAMGAgglomeration_H
#define Foam_GAMGAgglomeration_H
#include "MeshObject.H"
#include "lduPrimitiveMesh.H"
@ -161,18 +161,6 @@ protected:
const label nCoarseCells
) const;
//- Gather value from all procIDs onto procIDs[0]
template<class Type>
static void gatherList
(
const label comm,
const labelList& procIDs,
const Type& myVal,
List<Type>& allVals,
const int tag = Pstream::msgType()
);
void clearLevel(const label leveli);

View File

@ -31,50 +31,6 @@ License
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
template<class Type>
void Foam::GAMGAgglomeration::gatherList
(
const label comm,
const labelList& procIDs,
const Type& myVal,
List<Type>& allVals,
const int tag
)
{
if (Pstream::myProcNo(comm) == procIDs[0])
{
allVals.setSize(procIDs.size());
allVals[0] = myVal;
for (label i=1; i<procIDs.size(); i++)
{
IPstream fromSlave
(
Pstream::commsTypes::scheduled,
procIDs[i],
0,
tag,
comm
);
fromSlave >> allVals[i];
}
}
else
{
OPstream toMaster
(
Pstream::commsTypes::scheduled,
procIDs[0],
0,
tag,
comm
);
toMaster << myVal;
}
}
template<class Type>
void Foam::GAMGAgglomeration::restrictField
(
@ -114,11 +70,12 @@ void Foam::GAMGAgglomeration::restrictField
restrictField(cf, ff, fineToCoarse);
label coarseLevelIndex = fineLevelIndex+1;
const label coarseLevelIndex = fineLevelIndex+1;
if (procAgglom && hasProcMesh(coarseLevelIndex))
{
label fineComm = UPstream::parent(procCommunicator_[coarseLevelIndex]);
const label coarseComm =
UPstream::parent(procCommunicator_[coarseLevelIndex]);
const List<label>& procIDs = agglomProcIDs(coarseLevelIndex);
const labelList& offsets = cellOffsets(coarseLevelIndex);
@ -126,7 +83,7 @@ void Foam::GAMGAgglomeration::restrictField
globalIndex::gather
(
offsets,
fineComm,
coarseComm,
procIDs,
cf,
UPstream::msgType(),
@ -180,19 +137,17 @@ void Foam::GAMGAgglomeration::prolongField
{
const labelList& fineToCoarse = restrictAddressing_[levelIndex];
label coarseLevelIndex = levelIndex+1;
const label coarseLevelIndex = levelIndex+1;
if (procAgglom && hasProcMesh(coarseLevelIndex))
{
label coarseComm = UPstream::parent
(
procCommunicator_[coarseLevelIndex]
);
const label coarseComm =
UPstream::parent(procCommunicator_[coarseLevelIndex]);
const List<label>& procIDs = agglomProcIDs(coarseLevelIndex);
const labelList& offsets = cellOffsets(coarseLevelIndex);
label localSize = nCells_[levelIndex];
const label localSize = nCells_[levelIndex];
Field<Type> allCf(localSize);
globalIndex::scatter

View File

@ -408,389 +408,432 @@ public:
const bool checkOverflow = false
);
//- Collect single values in processor order on master (== procIDs[0]).
// Handles contiguous/non-contiguous data.
template<class ProcIDsContainer, class Type>
static void gatherValues
(
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const Type& localValue,
List<Type>& allValues, //! output field (master only)
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::scheduled
);
// Other
//- Collect data in processor order on master (== procIDs[0]).
// Handles contiguous/non-contiguous data, skips empty fields.
template<class ProcIDsContainer, class Type>
static void gather
(
const labelUList& offsets, //!< offsets (master only)
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const UList<Type>& fld,
List<Type>& allFld, //! output field (master only)
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking
);
// Gather (helpers)
//- Collect indirect data in processor order on master
// Handles contiguous/non-contiguous data, skips empty fields.
template<class Type, class Addr>
static void gather
(
const labelUList& offsets, //!< offsets (master only)
const label comm, //!< communicator
const UList<int>& procIDs,
const IndirectListBase<Type, Addr>& fld,
List<Type>& allFld, //! output field (master only)
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::scheduled
);
//- Collect data in processor order on master (== procIDs[0]).
// \note offsets needed on master only.
template<class ProcIDsContainer, class Type>
static void gather
(
const labelUList& offsets,
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const UList<Type>& fld,
List<Type>& allFld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking
);
//- Collect indirect data in processor order on master
// \note offsets needed on master only.
template<class Type, class Addr>
static void gather
(
const labelUList& offsets,
const label comm, //!< communicator
const UList<int>& procIDs,
const IndirectListBase<Type, Addr>& fld,
List<Type>& allFld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::scheduled
);
// Misc low-level gather routines
//- Inplace collect in processor order on master (== procIDs[0]).
// \note offsets needed on master only.
template<class ProcIDsContainer, class Type>
static void gather
(
const labelUList& offsets,
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
List<Type>& fld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking
);
//- Collect data in processor order on master (== procIDs[0]).
// \note the globalIndex offsets needed on master only.
template<class ProcIDsContainer, class Type>
void gather
(
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const UList<Type>& fld,
List<Type>& allFld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType =
Pstream::commsTypes::nonBlocking
) const
//- Inplace collect in processor order on master (== procIDs[0]).
// Note: adjust naming?
template<class ProcIDsContainer, class Type>
static void gather
(
const labelUList& offsets, //!< offsets (master only)
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
List<Type>& fld, //!< in/out field
const int tag = UPstream::msgType(),
const UPstream::commsTypes ct = UPstream::commsTypes::nonBlocking
)
{
List<Type> allData;
gather(offsets, comm, procIDs, fld, allData, tag, ct);
if (Pstream::myProcNo(comm) == procIDs[0])
{
gather(offsets_, comm, procIDs, fld, allFld, tag, commsType);
fld.transfer(allData);
}
}
//- Inplace collect in processor order on master (== procIDs[0]).
// \note the globalIndex offsets needed on master only.
template<class ProcIDsContainer, class Type>
void gather
(
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
List<Type>& fld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType =
Pstream::commsTypes::nonBlocking
) const
{
gather(offsets_, comm, procIDs, fld, tag, commsType);
}
//- Collect data in processor order on master (== procIDs[0]).
// \note the globalIndex offsets needed on master only.
template<class ProcIDsContainer, class Type>
void gather
(
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const UList<Type>& fld, //!< input field
List<Type>& allFld, //! output field (master only)
const int tag = UPstream::msgType(),
const UPstream::commsTypes ct = UPstream::commsTypes::nonBlocking
) const
{
gather(offsets_, comm, procIDs, fld, allFld, tag, ct);
}
//- Inplace collect in processor order on master (== procIDs[0]).
// \note the globalIndex offsets needed on master only.
// Note: adjust naming?
template<class ProcIDsContainer, class Type>
void gather
(
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
List<Type>& fld, //!< in/out field
const int tag = UPstream::msgType(),
const UPstream::commsTypes ct = UPstream::commsTypes::nonBlocking
) const
{
gather(offsets_, comm, procIDs, fld, tag, ct);
}
// Gather
// Gather
//- Collect data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
template<class Type>
void gather
(
const UList<Type>& sendData,
List<Type>& allData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Collect data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
template<class Type>
void gather
(
const UList<Type>& sendData,
List<Type>& allData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Collect data indirectly in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
template<class Type, class Addr>
void gather
(
const IndirectListBase<Type, Addr>& sendData,
List<Type>& allData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::scheduled,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Collect data indirectly in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
template<class Type, class Addr>
void gather
(
const IndirectListBase<Type, Addr>& sendData,
List<Type>& allData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Collect data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// \return output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
OutputContainer gather
(
const UList<Type>& sendData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Collect data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// \return output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
OutputContainer gather
(
const UList<Type>& sendData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Collect data indirectly in processor order on master.
// Communication with default/specified communicator, message tag.
//
// \return output (master), zero-sized on non-master
template<class Type, class Addr, class OutputContainer = List<Type>>
OutputContainer gather
(
const IndirectListBase<Type, Addr>& sendData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::scheduled,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Collect data indirectly in processor order on master.
// Communication with default/specified communicator, message tag.
//
// \return output (master), zero-sized on non-master
template<class Type, class Addr, class OutputContainer = List<Type>>
OutputContainer gather
(
const IndirectListBase<Type, Addr>& sendData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Inplace collect data in processor order on master
//- (in serial: a no-op).
// Communication with default/specified communicator, message tag.
// After the gather, the field is zero-sized on the slaves.
template<class Type>
void gatherInplace
(
List<Type>& fld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Inplace collect data in processor order on master
//- (in serial: a no-op).
// Communication with default/specified communicator, message tag.
// After the gather, the field is zero-sized on the slaves.
template<class Type>
void gatherInplace
(
List<Type>& fld,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Collect \em contiguous data using a MPI_Gatherv call
//- (in serial: performs a simple copy).
// Communication with default/specified communicator.
// \attention The nProcs for globalIndex and communicator
// must match!!
//
// The allData is output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
void mpiGather
(
const UList<Type>& sendData,
OutputContainer& allData,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Use MPI_Gatherv call for contiguous data when possible
//- (in serial: performs a simple copy).
// Communication with default/specified communicator.
// \attention The nProcs for globalIndex and communicator
// must match!!
//
// The allData is output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
void mpiGather
(
const UList<Type>& sendData,
OutputContainer& allData,
const label comm = UPstream::worldComm, //!< communicator
//- Collect \em contiguous data using a MPI_Gatherv call
//- (in serial: performs a simple copy).
// Communication with default/specified communicator.
// \attention The nProcs for globalIndex and communicator
// must match!!
//
// \return output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
OutputContainer mpiGather
(
const UList<Type>& sendData,
const label comm = UPstream::worldComm //!< communicator
) const;
// For fallback routines:
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const int tag = UPstream::msgType()
) const;
//- Inplace collect \em contiguous data using a MPI_Gatherv call
//- (in serial: a no-op).
// Communication with default/specified communicator.
// \attention The nProcs for globalIndex and communicator
// must match!!
//
// After the gather, the field is zero-sized on non-master.
template<class Type>
void mpiGatherInplace
(
List<Type>& fld,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Use MPI_Gatherv call for contiguous data when possible
//- (in serial: performs a simple copy).
// Communication with default/specified communicator.
// \attention The nProcs for globalIndex and communicator
// must match!!
//
// \return output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
OutputContainer mpiGather
(
const UList<Type>& sendData,
const label comm = UPstream::worldComm, //!< communicator
// For fallback routines:
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const int tag = UPstream::msgType()
) const;
//- Use MPI_Gatherv call to inplace collect contiguous data
//- when possible.
//- (in serial: a no-op).
// Communication with default/specified communicator.
// \attention The nProcs for globalIndex and communicator
// must match!!
//
// After the gather, the field is zero-sized on non-master.
template<class Type>
void mpiGatherInplace
(
List<Type>& fld,
const label comm = UPstream::worldComm, //!< communicator
// For fallback routines:
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const int tag = UPstream::msgType()
) const;
// Gather Operations
// Gather Operations
//- Collect \em contiguous data using a MPI_Gatherv call
//- (in serial: performs a simple copy).
// Communication with default/specified communicator.
//
// The allData is output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
static void mpiGatherOp
(
const UList<Type>& sendData,
OutputContainer& allData,
const label comm = UPstream::worldComm //!< communicator
);
//- Use MPI_Gatherv call to collect contiguous data when possible
//- (in serial: performs a simple copy).
// Communication with default/specified communicator.
//
// The allData is output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
static void mpiGatherOp
(
const UList<Type>& sendData,
OutputContainer& allData,
const label comm = UPstream::worldComm, //!< communicator
//- Collect \em contiguous data using a MPI_Gatherv call
//- (in serial: performs a simple copy).
// Communication with default/specified communicator.
//
// \return output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
static OutputContainer mpiGatherOp
(
const UList<Type>& sendData,
const label comm = UPstream::worldComm //!< communicator
);
// For fallback routines:
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const int tag = UPstream::msgType()
);
//- Inplace collect \em contiguous data using a MPI_Gatherv call
//- (in serial: a no-op).
// Communication with default/specified communicator.
//
// After the gather, the field is zero-sized on non-master.
template<class Type>
static void mpiGatherInplaceOp
(
List<Type>& fld,
const label comm = UPstream::worldComm //!< communicator
);
//- Use MPI_Gatherv call to collect contiguous data when possible
//- (in serial: performs a simple copy).
// Communication with default/specified communicator.
//
// \return output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
static OutputContainer mpiGatherOp
(
const UList<Type>& sendData,
const label comm = UPstream::worldComm, //!< communicator
//- Collect data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// The allFld is output (master), zero-sized on non-master
template<class Type>
static void gatherOp
(
const UList<Type>& sendData,
List<Type>& allData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
// For fallback routines:
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const int tag = UPstream::msgType()
);
//- Collect data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// The allFld is output (master), zero-sized on non-master
template<class Type, class Addr>
static void gatherOp
(
const IndirectListBase<Type, Addr>& sendData,
List<Type>& allData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
//- Use MPI_Gatherv call to inplace collect contiguous data
//- when possible.
//- (in serial: a no-op).
// Communication with default/specified communicator.
//
// After the gather, the field is zero-sized on non-master.
template<class Type>
static void mpiGatherInplaceOp
(
List<Type>& fld,
const label comm = UPstream::worldComm, //!< communicator
//- Collect and return data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// \return output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
static OutputContainer gatherOp
(
const UList<Type>& sendData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
// For fallback routines:
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const int tag = UPstream::msgType()
);
//- Collect and return data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// \return output (master), zero-sized on non-master
template<class Type, class Addr, class OutputContainer = List<Type>>
static OutputContainer gatherOp
(
const IndirectListBase<Type, Addr>& sendData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
//- Collect data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// The allFld is output (master), zero-sized on non-master
template<class Type>
static void gatherOp
(
const UList<Type>& sendData,
List<Type>& allData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
//- Inplace collect data in processor order on master
//- (in serial: a no-op).
// Communication with default/specified communicator, message tag.
//
// After the gather, the field is zero-sized on the slaves.
template<class Type>
static void gatherInplaceOp
(
List<Type>& fld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
//- Collect data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// The allFld is output (master), zero-sized on non-master
template<class Type, class Addr>
static void gatherOp
(
const IndirectListBase<Type, Addr>& sendData,
List<Type>& allData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
//- Collect and return data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// \return output (master), zero-sized on non-master
template<class Type, class OutputContainer = List<Type>>
static OutputContainer gatherOp
(
const UList<Type>& sendData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
//- Collect and return data in processor order on master
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
//
// \return output (master), zero-sized on non-master
template<class Type, class Addr, class OutputContainer = List<Type>>
static OutputContainer gatherOp
(
const IndirectListBase<Type, Addr>& sendData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
//- Inplace collect data in processor order on master
//- (in serial: a no-op).
// Communication with default/specified communicator, message tag.
//
// After the gather, the field is zero-sized on the slaves.
template<class Type>
static void gatherInplaceOp
(
List<Type>& fld,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
);
// Scatter
// Scatter
//- Distribute data in processor order.
// Requires fld to be correctly sized!
// Communication with default/specified communicator, message tag.
// \note offsets needed on master only.
template<class ProcIDsContainer, class Type>
static void scatter
(
const labelUList& offsets,
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const UList<Type>& allFld,
UList<Type>& fld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking
);
//- Distribute data in processor order.
// Requires fld to be correctly sized!
// Communication with default/specified communicator, message tag.
template<class ProcIDsContainer, class Type>
static void scatter
(
const labelUList& offsets, //!< offsets (master only)
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const UList<Type>& allFld,
UList<Type>& fld,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking
);
//- Distribute data in processor order.
// Requires fld to be correctly sized!
// Communication with default/specified communicator, message tag.
// \note the globalIndex offsets needed on master only.
template<class ProcIDsContainer, class Type>
void scatter
(
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const UList<Type>& allFld,
UList<Type>& fld,
const int tag = UPstream::msgType(),
const Pstream::commsTypes commsType =
Pstream::commsTypes::nonBlocking
) const
{
scatter(offsets_, comm, procIDs, allFld, fld, tag, commsType);
}
//- Distribute data in processor order.
// Requires fld to be correctly sized!
// Communication with default/specified communicator, message tag.
// \note the globalIndex offsets needed on master only.
template<class ProcIDsContainer, class Type>
void scatter
(
const label comm, //!< communicator
const ProcIDsContainer& procIDs,
const UList<Type>& allFld,
UList<Type>& fld,
const int tag = UPstream::msgType(),
const UPstream::commsTypes ct =
UPstream::commsTypes::nonBlocking
) const
{
scatter(offsets_, comm, procIDs, allFld, fld, tag, ct);
}
//- Distribute data in processor order.
// Requires fld to be correctly sized!
// Communication with default/specified communicator, message tag.
// \note the globalIndex offsets needed on master only.
template<class Type>
void scatter
(
const UList<Type>& allData,
UList<Type>& localData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Distribute data in processor order.
// Requires fld to be correctly sized!
// Communication with default/specified communicator, message tag.
// \note the globalIndex offsets needed on master only.
template<class Type>
void scatter
(
const UList<Type>& allData,
UList<Type>& localData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Distribute data in processor order
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
// \note the globalIndex offsets needed on master only.
template<class Type, class OutputContainer = List<Type>>
OutputContainer scatter
(
const UList<Type>& allData,
const int tag = UPstream::msgType(),
const Pstream::commsTypes = Pstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
//- Distribute data in processor order
//- (in serial: performs a simple copy).
// Communication with default/specified communicator, message tag.
// \note the globalIndex offsets needed on master only.
template<class Type, class OutputContainer = List<Type>>
OutputContainer scatter
(
const UList<Type>& allData,
const int tag = UPstream::msgType(),
const UPstream::commsTypes = UPstream::commsTypes::nonBlocking,
const label comm = UPstream::worldComm //!< communicator
) const;
// Scatter
// Scatter
//- Get (potentially remote) data.
//- Elements required given as global indices
// Communication with default/specified communicator, message tag.
template<class Type, class CombineOp>
void get
(
List<Type>& allFld,
const labelUList& globalIds,
const CombineOp& cop,
const label comm = UPstream::worldComm, //!< communicator
const int tag = UPstream::msgType()
) const;
//- Get (potentially remote) data.
//- Elements required given as global indices
// Communication with default/specified communicator, message tag.
template<class Type, class CombineOp>
void get
(
List<Type>& allFld,
const labelUList& globalIds,
const CombineOp& cop,
const label comm = UPstream::worldComm, //!< communicator
const int tag = UPstream::msgType()
) const;
// IOstream Operators

View File

@ -64,6 +64,90 @@ Foam::globalIndex::calcListOffsets
}
template<class ProcIDsContainer, class Type>
void Foam::globalIndex::gatherValues
(
const label comm,
const ProcIDsContainer& procIDs,
const Type& localValue,
List<Type>& allValues,
const int tag,
const UPstream::commsTypes preferredCommsType
)
{
// low-level: no parRun guard
// Automatically change from nonBlocking to scheduled for
// non-contiguous data.
const UPstream::commsTypes commsType =
(
(
!is_contiguous<Type>::value
&& UPstream::commsTypes::nonBlocking == preferredCommsType
)
? UPstream::commsTypes::scheduled
: preferredCommsType
);
const label startOfRequests = UPstream::nRequests();
if (UPstream::myProcNo(comm) == procIDs[0])
{
allValues.resize_nocopy(procIDs.size());
allValues[0] = localValue;
for (label i = 1; i < procIDs.size(); ++i)
{
if (is_contiguous<Type>::value)
{
IPstream::read
(
commsType,
procIDs[i],
reinterpret_cast<char*>(&allValues[i]),
sizeof(Type),
tag,
comm
);
}
else
{
IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
fromProc >> allValues[i];
}
}
}
else
{
allValues.clear(); // safety: zero-size on non-master
if (is_contiguous<Type>::value)
{
OPstream::write
(
commsType,
procIDs[0],
reinterpret_cast<const char*>(&localValue),
sizeof(Type),
tag,
comm
);
}
else
{
OPstream toMaster(commsType, procIDs[0], 0, tag, comm);
toMaster << localValue;
}
}
if (commsType == UPstream::commsTypes::nonBlocking)
{
// Wait for all to finish
UPstream::waitRequests(startOfRequests);
}
}
template<class ProcIDsContainer, class Type>
void Foam::globalIndex::gather
(
@ -73,7 +157,7 @@ void Foam::globalIndex::gather
const UList<Type>& fld,
List<Type>& allFld,
const int tag,
const Pstream::commsTypes preferredCommsType
const UPstream::commsTypes preferredCommsType
)
{
// low-level: no parRun guard
@ -156,7 +240,7 @@ void Foam::globalIndex::gather
}
}
if (commsType == Pstream::commsTypes::nonBlocking)
if (commsType == UPstream::commsTypes::nonBlocking)
{
// Wait for all to finish
UPstream::waitRequests(startOfRequests);
@ -173,7 +257,7 @@ void Foam::globalIndex::gather
const IndirectListBase<Type, Addr>& fld,
List<Type>& allFld,
const int tag,
const Pstream::commsTypes preferredCommsType
const UPstream::commsTypes preferredCommsType
)
{
// low-level: no parRun guard
@ -190,6 +274,8 @@ void Foam::globalIndex::gather
: preferredCommsType
);
const label startOfRequests = UPstream::nRequests();
if (Pstream::myProcNo(comm) == procIDs[0])
{
allFld.resize_nocopy(off.last()); // == totalSize()
@ -231,29 +317,11 @@ void Foam::globalIndex::gather
toMaster << fld;
}
}
}
template<class ProcIDsContainer, class Type>
void Foam::globalIndex::gather
(
const labelUList& off, // needed on master only
const label comm,
const ProcIDsContainer& procIDs,
List<Type>& fld,
const int tag,
const Pstream::commsTypes commsType
)
{
// low-level: no parRun guard
List<Type> allData;
gather(off, comm, procIDs, fld, allData, tag, commsType);
if (Pstream::myProcNo(comm) == procIDs[0])
if (commsType == UPstream::commsTypes::nonBlocking)
{
fld.transfer(allData);
// Wait for all to finish
UPstream::waitRequests(startOfRequests);
}
}
@ -266,13 +334,19 @@ void Foam::globalIndex::gather
const UList<Type>& sendData,
List<Type>& allData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
) const
{
if (UPstream::parRun())
if (!UPstream::parRun())
{
gather
// Serial: direct copy
allData = sendData;
return;
}
{
globalIndex::gather
(
offsets_, // needed on master only
comm,
@ -287,11 +361,6 @@ void Foam::globalIndex::gather
allData.clear(); // safety: zero-size on non-master
}
}
else
{
// Serial: direct copy
allData = sendData;
}
}
@ -301,13 +370,19 @@ void Foam::globalIndex::gather
const IndirectListBase<Type, Addr>& sendData,
List<Type>& allData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
) const
{
if (UPstream::parRun())
if (!UPstream::parRun())
{
gather
// Serial: direct copy
allData = sendData;
return;
}
{
globalIndex::gather
(
offsets_, // needed on master only
comm,
@ -322,11 +397,6 @@ void Foam::globalIndex::gather
allData.clear(); // safety: zero-size on non-master
}
}
else
{
// Serial: direct copy
allData = List<Type>(sendData);
}
}
@ -335,7 +405,7 @@ OutputContainer Foam::globalIndex::gather
(
const UList<Type>& sendData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
) const
{
@ -350,7 +420,7 @@ OutputContainer Foam::globalIndex::gather
(
const IndirectListBase<Type, Addr>& sendData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
) const
{
@ -365,23 +435,14 @@ void Foam::globalIndex::gatherInplace
(
List<Type>& fld,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
) const
{
if (UPstream::parRun())
{
List<Type> allData;
gather
(
comm,
UPstream::procID(comm),
fld,
allData,
tag,
commsType
);
gather(fld, allData, tag, commsType, comm);
if (UPstream::master(comm))
{
@ -401,7 +462,10 @@ void Foam::globalIndex::mpiGather
(
const UList<Type>& sendData,
OutputContainer& allData,
const label comm
const label comm,
const UPstream::commsTypes commsType,
const int tag
) const
{
if (!UPstream::parRun())
@ -410,57 +474,150 @@ void Foam::globalIndex::mpiGather
allData = sendData;
return;
}
if (!is_contiguous<Type>::value)
{
FatalErrorInFunction
<< "Cannot be called for non-contiguous data" << nl
<< abort(FatalError);
}
auto nSendBytes = sendData.size_bytes();
// MPI_Gatherv requires contiguous data, but a byte-wise transfer can
// quickly exceed the 'int' limits used for MPI sizes/offsets.
// Thus gather label/scalar components when possible to increase the
// effective size limit.
//
// Note: cannot rely on pTraits (cmptType, nComponents) since this method
// needs to compile (and work) even with things like strings etc.
List<int> recvSizes;
// Single char ad hoc "enum":
// - b(yte): gather bytes
// - f(loat): gather scalars components
// - i(nt): gather label components
// - 0: gather with Pstream read/write etc.
List<int> recvCounts;
List<int> recvOffsets;
if (UPstream::master(comm))
char dataMode(0);
int nCmpts(0);
if (is_contiguous<Type>::value)
{
const globalIndex& globalAddr = *this;
// Must be the same as Pstream::nProcs(comm), at least on master!!
const label nproc = globalAddr.nProcs();
// Allow request of 0 entries to be sent on master
if (!globalAddr.localSize(0))
if (is_contiguous_scalar<Type>::value)
{
nSendBytes = 0;
dataMode = 'f';
nCmpts = static_cast<int>(sizeof(Type)/sizeof(scalar));
}
else if (is_contiguous_label<Type>::value)
{
dataMode = 'i';
nCmpts = static_cast<int>(sizeof(Type)/sizeof(label));
}
else
{
dataMode = 'b';
nCmpts = static_cast<int>(sizeof(Type));
}
allData.resize_nocopy(globalAddr.totalSize());
recvSizes.resize(nproc);
recvOffsets.resize(nproc+1);
for (label proci = 0; proci < nproc; ++proci)
// Offsets must fit into int
if (UPstream::master(comm))
{
recvSizes[proci] = globalAddr.localSize(proci) * sizeof(Type);
recvOffsets[proci] = globalAddr.localStart(proci) * sizeof(Type);
const globalIndex& globalAddr = *this;
if (globalAddr.totalSize() > (INT_MAX/nCmpts))
{
// Offsets do not fit into int - revert to manual.
dataMode = 0;
}
else
{
// Must be same as Pstream::nProcs(comm), at least on master!
const label nproc = globalAddr.nProcs();
allData.resize_nocopy(globalAddr.totalSize());
recvCounts.resize(nproc);
recvOffsets.resize(nproc+1);
for (label proci = 0; proci < nproc; ++proci)
{
recvCounts[proci] = globalAddr.localSize(proci)*nCmpts;
recvOffsets[proci] = globalAddr.localStart(proci)*nCmpts;
}
recvOffsets[nproc] = globalAddr.totalSize()*nCmpts;
// Assign local data directly
recvCounts[0] = 0; // ie, ignore for MPI_Gatherv
SubList<Type>(allData, globalAddr.range(0)) =
SubList<Type>(sendData, globalAddr.range(0));
}
}
recvOffsets[nproc] = globalAddr.totalSize() * sizeof(Type);
// Consistent information for everyone
UPstream::broadcast(&dataMode, 1, comm);
}
else
// Dispatch
switch (dataMode)
{
case 'b': // Byte-wise
{
UPstream::gather
(
sendData.cdata_bytes(),
sendData.size_bytes(),
allData.data_bytes(),
recvCounts,
recvOffsets,
comm
);
break;
}
case 'f': // Float (scalar) components
{
typedef scalar cmptType;
UPstream::gather
(
reinterpret_cast<const cmptType*>(sendData.cdata()),
(sendData.size()*nCmpts),
reinterpret_cast<cmptType*>(allData.data()),
recvCounts,
recvOffsets,
comm
);
break;
}
case 'i': // Int (label) components
{
typedef label cmptType;
UPstream::gather
(
reinterpret_cast<const cmptType*>(sendData.cdata()),
(sendData.size()*nCmpts),
reinterpret_cast<cmptType*>(allData.data()),
recvCounts,
recvOffsets,
comm
);
break;
}
default: // Regular (manual) gathering
{
globalIndex::gather
(
offsets_, // needed on master only
comm,
UPstream::procID(comm),
sendData,
allData,
tag,
commsType
);
break;
}
}
if (!UPstream::master(comm))
{
allData.clear(); // safety: zero-size on non-master
}
UPstream::gather
(
sendData.cdata_bytes(),
nSendBytes,
allData.data_bytes(),
recvSizes,
recvOffsets,
comm
);
}
@ -468,11 +625,14 @@ template<class Type, class OutputContainer>
OutputContainer Foam::globalIndex::mpiGather
(
const UList<Type>& sendData,
const label comm
const label comm,
const UPstream::commsTypes commsType,
const int tag
) const
{
OutputContainer allData;
mpiGather(sendData, allData, comm);
mpiGather(sendData, allData, comm, commsType, tag);
return allData;
}
@ -481,13 +641,16 @@ template<class Type>
void Foam::globalIndex::mpiGatherInplace
(
List<Type>& fld,
const label comm
const label comm,
const UPstream::commsTypes commsType,
const int tag
) const
{
if (UPstream::parRun())
{
List<Type> allData;
mpiGather(fld, allData, comm);
mpiGather(fld, allData, comm, commsType, tag);
if (UPstream::master(comm))
{
@ -507,14 +670,17 @@ void Foam::globalIndex::mpiGatherOp
(
const UList<Type>& sendData,
OutputContainer& allData,
const label comm
const label comm,
const UPstream::commsTypes commsType,
const int tag
)
{
if (UPstream::parRun())
{
// Gather sizes - only needed on master
globalIndex(sendData.size(), globalIndex::gatherOnly{}, comm)
.mpiGather(sendData, allData, comm);
.mpiGather(sendData, allData, comm, commsType, tag);
}
else
{
@ -528,11 +694,14 @@ template<class Type, class OutputContainer>
OutputContainer Foam::globalIndex::mpiGatherOp
(
const UList<Type>& sendData,
const label comm
const label comm,
const UPstream::commsTypes commsType,
const int tag
)
{
OutputContainer allData;
mpiGatherOp(sendData, allData, comm);
mpiGatherOp(sendData, allData, comm, commsType, tag);
return allData;
}
@ -541,13 +710,16 @@ template<class Type>
void Foam::globalIndex::mpiGatherInplaceOp
(
List<Type>& fld,
const label comm
const label comm,
const UPstream::commsTypes commsType,
const int tag
)
{
if (UPstream::parRun())
{
List<Type> allData;
mpiGatherOp(fld, allData, comm);
mpiGatherOp(fld, allData, comm, commsType, tag);
if (UPstream::master(comm))
{
@ -568,7 +740,7 @@ void Foam::globalIndex::gatherOp
const UList<Type>& sendData,
List<Type>& allData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
)
{
@ -592,7 +764,7 @@ void Foam::globalIndex::gatherOp
const IndirectListBase<Type, Addr>& sendData,
List<Type>& allData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
)
{
@ -615,7 +787,7 @@ OutputContainer Foam::globalIndex::gatherOp
(
const UList<Type>& sendData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
)
{
@ -630,7 +802,7 @@ OutputContainer Foam::globalIndex::gatherOp
(
const IndirectListBase<Type, Addr>& sendData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
)
{
@ -645,7 +817,7 @@ void Foam::globalIndex::gatherInplaceOp
(
List<Type>& fld,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
)
{
@ -668,7 +840,7 @@ void Foam::globalIndex::scatter
const UList<Type>& allFld,
UList<Type>& fld,
const int tag,
const Pstream::commsTypes preferredCommsType
const UPstream::commsTypes preferredCommsType
)
{
// low-level: no parRun guard
@ -685,10 +857,6 @@ void Foam::globalIndex::scatter
: preferredCommsType
);
// FUTURE:
// could decide which procs will receive data and use mpiScatter
// to distribute. Could then skip send/receive for those procs...
const label startOfRequests = UPstream::nRequests();
if (Pstream::myProcNo(comm) == procIDs[0])
@ -757,7 +925,7 @@ void Foam::globalIndex::scatter
}
}
if (commsType == Pstream::commsTypes::nonBlocking)
if (commsType == UPstream::commsTypes::nonBlocking)
{
// Wait for all to finish
UPstream::waitRequests(startOfRequests);
@ -771,7 +939,7 @@ void Foam::globalIndex::scatter
const UList<Type>& allData,
UList<Type>& localData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
) const
{
@ -802,7 +970,7 @@ OutputContainer Foam::globalIndex::scatter
(
const UList<Type>& allData,
const int tag,
const Pstream::commsTypes commsType,
const UPstream::commsTypes commsType,
const label comm
) const
{
@ -853,7 +1021,7 @@ void Foam::globalIndex::get
);
// Send local indices to individual processors as local index
PstreamBuffers sendBufs(Pstream::commsTypes::nonBlocking, tag, comm);
PstreamBuffers sendBufs(UPstream::commsTypes::nonBlocking, tag, comm);
for (const auto proci : validBins)
{
@ -870,7 +1038,7 @@ void Foam::globalIndex::get
sendBufs.finishedSends();
PstreamBuffers returnBufs(Pstream::commsTypes::nonBlocking, tag, comm);
PstreamBuffers returnBufs(UPstream::commsTypes::nonBlocking, tag, comm);
for (const int proci : sendBufs.allProcs())
{