ENH: globalIndex with direct gather/broadcast
- less communication than gatherList/scatterList ENH: refine send granularity in Pstream::exchange STYLE: ensure PstreamBuffers and defaultCommsType agree - simpler loops for lduSchedule
This commit is contained in:
parent
b8c3dc4e49
commit
0cf02eb384
@ -210,7 +210,7 @@ void broadcast_chunks
|
||||
<< "Contiguous data only." << sizeof(T) << Foam::abort(FatalError);
|
||||
}
|
||||
|
||||
if (UPstream::maxCommsSize <= int(sizeof(T)))
|
||||
if (UPstream::maxCommsSize <= 0)
|
||||
{
|
||||
// Do in one go
|
||||
Info<< "send " << sendData.size() << " elements in one go" << endl;
|
||||
@ -230,48 +230,61 @@ void broadcast_chunks
|
||||
// guaranteed that some processor's sending size is some other
|
||||
// processor's receive size. Also we can ignore any local comms.
|
||||
|
||||
// We need to send bytes so the number of iterations:
|
||||
// We need to send chunks so the number of iterations:
|
||||
// maxChunkSize iterations
|
||||
// ------------ ----------
|
||||
// 0 0
|
||||
// 1..maxChunkSize 1
|
||||
// maxChunkSize+1..2*maxChunkSize 2
|
||||
// etc.
|
||||
// ...
|
||||
|
||||
const label maxChunkSize(UPstream::maxCommsSize/sizeof(T));
|
||||
const label maxChunkSize
|
||||
(
|
||||
max
|
||||
(
|
||||
static_cast<label>(1),
|
||||
static_cast<label>(UPstream::maxCommsSize/sizeof(T))
|
||||
)
|
||||
);
|
||||
|
||||
label nIter(0);
|
||||
label nChunks(0);
|
||||
{
|
||||
label nSendMax = 0;
|
||||
// Get max send count (elements)
|
||||
// forAll(sendBufs, proci)
|
||||
// {
|
||||
// if (proci != Pstream::myProcNo(comm))
|
||||
// {
|
||||
// nSendMax = max(nSendMax, sendBufs[proci].size());
|
||||
// nChunks = max(nChunks, sendBufs[proci].size());
|
||||
// }
|
||||
// }
|
||||
nSendMax = sendSize;
|
||||
nChunks = sendSize;
|
||||
|
||||
if (nSendMax)
|
||||
// Convert from send count (elements) to number of chunks.
|
||||
// Can normally calculate with (count-1), but add some safety
|
||||
if (nChunks)
|
||||
{
|
||||
nIter = 1 + ((nSendMax-1)/maxChunkSize);
|
||||
nChunks = 1 + (nChunks/maxChunkSize);
|
||||
}
|
||||
reduce(nIter, maxOp<label>(), tag, comm);
|
||||
reduce(nChunks, maxOp<label>(), tag, comm);
|
||||
|
||||
Info
|
||||
<< "send " << nSendMax << " elements ("
|
||||
<< (nSendMax*sizeof(T)) << " bytes) in " << nIter
|
||||
<< " iterations of " << maxChunkSize << " chunks ("
|
||||
<< "send " << sendSize << " elements ("
|
||||
<< (sendSize*sizeof(T)) << " bytes) in " << nChunks
|
||||
<< " chunks of " << maxChunkSize << " elements ("
|
||||
<< (maxChunkSize*sizeof(T)) << " bytes) for maxCommsSize:"
|
||||
<< Pstream::maxCommsSize
|
||||
<< endl;
|
||||
}
|
||||
|
||||
// stress-test with shortened sendSize
|
||||
// will produce useless loops, but no calls
|
||||
// sendSize /= 2;
|
||||
|
||||
label nSend(0);
|
||||
label startSend(0);
|
||||
char* charPtrSend;
|
||||
|
||||
for (label iter = 0; iter < nIter; ++iter)
|
||||
for (label iter = 0; iter < nChunks; ++iter)
|
||||
{
|
||||
nSend = min
|
||||
(
|
||||
@ -297,6 +310,8 @@ void broadcast_chunks
|
||||
startSend += nSend;
|
||||
}
|
||||
}
|
||||
|
||||
Info<< "final: " << startSend << endl;
|
||||
}
|
||||
|
||||
|
||||
@ -305,8 +320,9 @@ void broadcast_chunks
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
argList::noCheckProcessorDirectories();
|
||||
argList::addOption("comms-size", "int", "override Pstream::maxCommsSize");
|
||||
|
||||
#include "setRootCase.H"
|
||||
#include "createTime.H"
|
||||
|
||||
if (!Pstream::parRun())
|
||||
{
|
||||
@ -322,6 +338,9 @@ int main(int argc, char *argv[])
|
||||
broadcast_chunks<labelList, label>(input1);
|
||||
|
||||
Pstream::maxCommsSize = 33;
|
||||
|
||||
args.readIfPresent("comms-size", Pstream::maxCommsSize);
|
||||
|
||||
broadcast_chunks<labelList, label>(input1);
|
||||
|
||||
// Mostly the same with PstreamBuffers
|
||||
|
@ -999,16 +999,16 @@ void correctCoupledBoundaryConditions(fvMesh& mesh)
|
||||
const lduSchedule& patchSchedule =
|
||||
fld.mesh().globalData().patchSchedule();
|
||||
|
||||
forAll(patchSchedule, patchEvali)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
const label patchi = patchSchedule[patchEvali].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
const auto& fvp = mesh.boundary()[patchi];
|
||||
auto& pfld = bfld[patchi];
|
||||
|
||||
const auto* ppPtr = isA<CoupledPatchType>(fvp);
|
||||
if (ppPtr && ppPtr->coupled())
|
||||
{
|
||||
if (patchSchedule[patchEvali].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
pfld.initEvaluate(Pstream::commsTypes::scheduled);
|
||||
}
|
||||
|
@ -95,9 +95,10 @@ void evaluateConstraintTypes(GeometricField<Type, fvPatchField, volMesh>& fld)
|
||||
const lduSchedule& patchSchedule =
|
||||
fld.mesh().globalData().patchSchedule();
|
||||
|
||||
forAll(patchSchedule, patchEvali)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
label patchi = patchSchedule[patchEvali].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
fvPatchField<Type>& tgtField = fldBf[patchi];
|
||||
|
||||
if
|
||||
@ -106,7 +107,7 @@ void evaluateConstraintTypes(GeometricField<Type, fvPatchField, volMesh>& fld)
|
||||
&& polyPatch::constraintType(tgtField.patch().patch().type())
|
||||
)
|
||||
{
|
||||
if (patchSchedule[patchEvali].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
tgtField.initEvaluate(Pstream::commsTypes::scheduled);
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ void Foam::Pstream::exchange
|
||||
}
|
||||
}
|
||||
|
||||
if (UPstream::maxCommsSize <= int(sizeof(T)))
|
||||
if (UPstream::maxCommsSize <= 0)
|
||||
{
|
||||
// Do the exchanging in one go
|
||||
exchangeContainer<Container, T>
|
||||
@ -244,38 +244,41 @@ void Foam::Pstream::exchange
|
||||
// guaranteed that some processor's sending size is some other
|
||||
// processor's receive size. Also we can ignore any local comms.
|
||||
|
||||
// We need to send bytes so the number of iterations:
|
||||
// We need to send chunks so the number of iterations:
|
||||
// maxChunkSize iterations
|
||||
// ------------ ----------
|
||||
// 0 0
|
||||
// 1..maxChunkSize 1
|
||||
// maxChunkSize+1..2*maxChunkSize 2
|
||||
// etc.
|
||||
// ...
|
||||
|
||||
const label maxChunkSize(UPstream::maxCommsSize/sizeof(T));
|
||||
const label maxChunkSize
|
||||
(
|
||||
max
|
||||
(
|
||||
static_cast<label>(1),
|
||||
static_cast<label>(UPstream::maxCommsSize/sizeof(T))
|
||||
)
|
||||
);
|
||||
|
||||
label nIter(0);
|
||||
label nChunks(0);
|
||||
{
|
||||
label nSendMax = 0;
|
||||
// Get max send count (elements)
|
||||
forAll(sendBufs, proci)
|
||||
{
|
||||
if (proci != Pstream::myProcNo(comm))
|
||||
{
|
||||
nSendMax = max(nSendMax, sendBufs[proci].size());
|
||||
nChunks = max(nChunks, sendBufs[proci].size());
|
||||
}
|
||||
}
|
||||
|
||||
if (nSendMax)
|
||||
// Convert from send count (elements) to number of chunks.
|
||||
// Can normally calculate with (count-1), but add some safety
|
||||
if (nChunks)
|
||||
{
|
||||
nIter = 1 + ((nSendMax-1)/maxChunkSize);
|
||||
nChunks = 1 + (nChunks/maxChunkSize);
|
||||
}
|
||||
reduce(nIter, maxOp<label>(), tag, comm);
|
||||
|
||||
/// Info<< "send " << nSendMax << " elements ("
|
||||
/// << (nSendMax*sizeof(T)) << " bytes) in " << nIter
|
||||
/// << " iterations of " << maxChunkSize << " chunks ("
|
||||
/// << (maxChunkSize*sizeof(T)) << " bytes) maxCommsSize:"
|
||||
/// << Pstream::maxCommsSize << endl;
|
||||
reduce(nChunks, maxOp<label>(), tag, comm);
|
||||
}
|
||||
|
||||
labelList nRecv(sendBufs.size());
|
||||
@ -286,7 +289,7 @@ void Foam::Pstream::exchange
|
||||
List<const char*> charPtrSend(sendBufs.size());
|
||||
List<char*> charPtrRecv(sendBufs.size());
|
||||
|
||||
for (label iter = 0; iter < nIter; ++iter)
|
||||
for (label iter = 0; iter < nChunks; ++iter)
|
||||
{
|
||||
forAll(sendBufs, proci)
|
||||
{
|
||||
|
@ -440,16 +440,18 @@ void Foam::GeometricField<Type, PatchField, GeoMesh>::Boundary::evaluate()
|
||||
const lduSchedule& patchSchedule =
|
||||
bmesh_.mesh().globalData().patchSchedule();
|
||||
|
||||
forAll(patchSchedule, patchEvali)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
if (patchSchedule[patchEvali].init)
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (schedEval.init)
|
||||
{
|
||||
this->operator[](patchSchedule[patchEvali].patch)
|
||||
this->operator[](patchi)
|
||||
.initEvaluate(Pstream::commsTypes::scheduled);
|
||||
}
|
||||
else
|
||||
{
|
||||
this->operator[](patchSchedule[patchEvali].patch)
|
||||
this->operator[](patchi)
|
||||
.evaluate(Pstream::commsTypes::scheduled);
|
||||
}
|
||||
}
|
||||
|
@ -6,7 +6,7 @@
|
||||
\\/ M anipulation |
|
||||
-------------------------------------------------------------------------------
|
||||
Copyright (C) 2011-2017 OpenFOAM Foundation
|
||||
Copyright (C) 2019 OpenCFD Ltd.
|
||||
Copyright (C) 2019-2022 OpenCFD Ltd.
|
||||
-------------------------------------------------------------------------------
|
||||
License
|
||||
This file is part of OpenFOAM.
|
||||
@ -148,13 +148,13 @@ void Foam::LduMatrix<Type, DType, LUType>::updateMatrixInterfaces
|
||||
const lduSchedule& patchSchedule = this->patchSchedule();
|
||||
|
||||
// Loop over all the "normal" interfaces relating to standard patches
|
||||
forAll(patchSchedule, i)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
label interfacei = patchSchedule[i].patch;
|
||||
const label interfacei = schedEval.patch;
|
||||
|
||||
if (interfaces_.set(interfacei))
|
||||
{
|
||||
if (patchSchedule[i].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
interfaces_[interfacei].initInterfaceMatrixUpdate
|
||||
(
|
||||
|
@ -126,17 +126,16 @@ Foam::labelListList Foam::GAMGProcAgglomeration::globalCellCells
|
||||
const lduAddressing& addr = mesh.lduAddr();
|
||||
lduInterfacePtrsList interfaces = mesh.interfaces();
|
||||
|
||||
const label myProcID = Pstream::myProcNo(mesh.comm());
|
||||
const label myProcID = UPstream::myProcNo(mesh.comm());
|
||||
|
||||
globalIndex globalNumbering
|
||||
const globalIndex globalNumbering
|
||||
(
|
||||
addr.size(),
|
||||
Pstream::msgType(),
|
||||
mesh.comm(),
|
||||
Pstream::parRun()
|
||||
UPstream::parRun()
|
||||
);
|
||||
|
||||
labelList globalIndices
|
||||
const labelList globalIndices
|
||||
(
|
||||
identity
|
||||
(
|
||||
@ -163,9 +162,9 @@ Foam::labelListList Foam::GAMGProcAgglomeration::globalCellCells
|
||||
}
|
||||
}
|
||||
|
||||
if (Pstream::parRun())
|
||||
if (UPstream::parRun())
|
||||
{
|
||||
Pstream::waitRequests(nReq);
|
||||
UPstream::waitRequests(nReq);
|
||||
}
|
||||
|
||||
forAll(interfaces, inti)
|
||||
|
@ -93,8 +93,8 @@ void Foam::pointBoundaryMesh::calcGeometry()
|
||||
|
||||
if
|
||||
(
|
||||
Pstream::defaultCommsType == Pstream::commsTypes::blocking
|
||||
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking
|
||||
pBufs.commsType() == Pstream::commsTypes::blocking
|
||||
|| pBufs.commsType() == Pstream::commsTypes::nonBlocking
|
||||
)
|
||||
{
|
||||
forAll(*this, patchi)
|
||||
@ -109,18 +109,18 @@ void Foam::pointBoundaryMesh::calcGeometry()
|
||||
operator[](patchi).calcGeometry(pBufs);
|
||||
}
|
||||
}
|
||||
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled)
|
||||
else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
|
||||
{
|
||||
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
|
||||
|
||||
// Dummy.
|
||||
pBufs.finishedSends();
|
||||
|
||||
forAll(patchSchedule, patchEvali)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
label patchi = patchSchedule[patchEvali].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (patchSchedule[patchEvali].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
operator[](patchi).initGeometry(pBufs);
|
||||
}
|
||||
@ -139,8 +139,8 @@ void Foam::pointBoundaryMesh::movePoints(const pointField& p)
|
||||
|
||||
if
|
||||
(
|
||||
Pstream::defaultCommsType == Pstream::commsTypes::blocking
|
||||
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking
|
||||
pBufs.commsType() == Pstream::commsTypes::blocking
|
||||
|| pBufs.commsType() == Pstream::commsTypes::nonBlocking
|
||||
)
|
||||
{
|
||||
forAll(*this, patchi)
|
||||
@ -155,18 +155,18 @@ void Foam::pointBoundaryMesh::movePoints(const pointField& p)
|
||||
operator[](patchi).movePoints(pBufs, p);
|
||||
}
|
||||
}
|
||||
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled)
|
||||
else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
|
||||
{
|
||||
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
|
||||
|
||||
// Dummy.
|
||||
pBufs.finishedSends();
|
||||
|
||||
forAll(patchSchedule, patchEvali)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
label patchi = patchSchedule[patchEvali].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (patchSchedule[patchEvali].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
operator[](patchi).initMovePoints(pBufs, p);
|
||||
}
|
||||
@ -185,8 +185,8 @@ void Foam::pointBoundaryMesh::updateMesh()
|
||||
|
||||
if
|
||||
(
|
||||
Pstream::defaultCommsType == Pstream::commsTypes::blocking
|
||||
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking
|
||||
pBufs.commsType() == Pstream::commsTypes::blocking
|
||||
|| pBufs.commsType() == Pstream::commsTypes::nonBlocking
|
||||
)
|
||||
{
|
||||
forAll(*this, patchi)
|
||||
@ -201,18 +201,18 @@ void Foam::pointBoundaryMesh::updateMesh()
|
||||
operator[](patchi).updateMesh(pBufs);
|
||||
}
|
||||
}
|
||||
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled)
|
||||
else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
|
||||
{
|
||||
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
|
||||
|
||||
// Dummy.
|
||||
pBufs.finishedSends();
|
||||
|
||||
forAll(patchSchedule, patchEvali)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
label patchi = patchSchedule[patchEvali].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (patchSchedule[patchEvali].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
operator[](patchi).initUpdateMesh(pBufs);
|
||||
}
|
||||
|
@ -267,8 +267,8 @@ void Foam::polyBoundaryMesh::calcGeometry()
|
||||
|
||||
if
|
||||
(
|
||||
Pstream::defaultCommsType == Pstream::commsTypes::blocking
|
||||
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking
|
||||
pBufs.commsType() == Pstream::commsTypes::blocking
|
||||
|| pBufs.commsType() == Pstream::commsTypes::nonBlocking
|
||||
)
|
||||
{
|
||||
forAll(*this, patchi)
|
||||
@ -283,18 +283,18 @@ void Foam::polyBoundaryMesh::calcGeometry()
|
||||
operator[](patchi).calcGeometry(pBufs);
|
||||
}
|
||||
}
|
||||
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled)
|
||||
else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
|
||||
{
|
||||
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
|
||||
|
||||
// Dummy.
|
||||
pBufs.finishedSends();
|
||||
|
||||
for (const auto& patchEval : patchSchedule)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
const label patchi = patchEval.patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (patchEval.init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
operator[](patchi).initGeometry(pBufs);
|
||||
}
|
||||
@ -1120,8 +1120,8 @@ void Foam::polyBoundaryMesh::movePoints(const pointField& p)
|
||||
|
||||
if
|
||||
(
|
||||
Pstream::defaultCommsType == Pstream::commsTypes::blocking
|
||||
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking
|
||||
pBufs.commsType() == Pstream::commsTypes::blocking
|
||||
|| pBufs.commsType() == Pstream::commsTypes::nonBlocking
|
||||
)
|
||||
{
|
||||
forAll(*this, patchi)
|
||||
@ -1136,18 +1136,18 @@ void Foam::polyBoundaryMesh::movePoints(const pointField& p)
|
||||
operator[](patchi).movePoints(pBufs, p);
|
||||
}
|
||||
}
|
||||
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled)
|
||||
else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
|
||||
{
|
||||
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
|
||||
|
||||
// Dummy.
|
||||
pBufs.finishedSends();
|
||||
|
||||
for (const auto& patchEval : patchSchedule)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
const label patchi = patchEval.patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (patchEval.init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
operator[](patchi).initMovePoints(pBufs, p);
|
||||
}
|
||||
@ -1170,8 +1170,8 @@ void Foam::polyBoundaryMesh::updateMesh()
|
||||
|
||||
if
|
||||
(
|
||||
Pstream::defaultCommsType == Pstream::commsTypes::blocking
|
||||
|| Pstream::defaultCommsType == Pstream::commsTypes::nonBlocking
|
||||
pBufs.commsType() == Pstream::commsTypes::blocking
|
||||
|| pBufs.commsType() == Pstream::commsTypes::nonBlocking
|
||||
)
|
||||
{
|
||||
forAll(*this, patchi)
|
||||
@ -1186,18 +1186,18 @@ void Foam::polyBoundaryMesh::updateMesh()
|
||||
operator[](patchi).updateMesh(pBufs);
|
||||
}
|
||||
}
|
||||
else if (Pstream::defaultCommsType == Pstream::commsTypes::scheduled)
|
||||
else if (pBufs.commsType() == Pstream::commsTypes::scheduled)
|
||||
{
|
||||
const lduSchedule& patchSchedule = mesh().globalData().patchSchedule();
|
||||
|
||||
// Dummy.
|
||||
pBufs.finishedSends();
|
||||
|
||||
for (const auto& patchEval : patchSchedule)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
const label patchi = patchEval.patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (patchEval.init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
operator[](patchi).initUpdateMesh(pBufs);
|
||||
}
|
||||
|
@ -188,12 +188,6 @@ void Foam::globalIndex::bin
|
||||
}
|
||||
|
||||
|
||||
void Foam::globalIndex::reset(const label localSize)
|
||||
{
|
||||
reset(localSize, Pstream::msgType(), UPstream::worldComm, true);
|
||||
}
|
||||
|
||||
|
||||
void Foam::globalIndex::reset
|
||||
(
|
||||
const label localSize,
|
||||
@ -209,25 +203,27 @@ void Foam::globalIndex::reset
|
||||
void Foam::globalIndex::reset
|
||||
(
|
||||
const label localSize,
|
||||
const int tag,
|
||||
const label comm,
|
||||
const bool parallel
|
||||
)
|
||||
{
|
||||
labelList localLens;
|
||||
|
||||
const label len = Pstream::nProcs(comm);
|
||||
|
||||
if (len)
|
||||
{
|
||||
// Seed with localSize, zero elsewhere (for non-parallel branch)
|
||||
// NB: can consider UPstream::listGatherValues
|
||||
|
||||
labelList localLens(len, Zero);
|
||||
localLens[Pstream::myProcNo(comm)] = localSize;
|
||||
|
||||
if (parallel)
|
||||
if (parallel && UPstream::parRun())
|
||||
{
|
||||
Pstream::gatherList(localLens, tag, comm);
|
||||
Pstream::scatterList(localLens, tag, comm);
|
||||
localLens = UPstream::listGatherValues(localSize, comm);
|
||||
Pstream::broadcast(localLens, comm);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Non-parallel branch: use localSize on-proc, zero elsewhere
|
||||
|
||||
localLens.resize(len, Zero);
|
||||
localLens[Pstream::myProcNo(comm)] = localSize;
|
||||
}
|
||||
|
||||
reset(localLens, true); // checkOverflow = true
|
||||
|
@ -127,9 +127,14 @@ public:
|
||||
enum accessType accType
|
||||
);
|
||||
|
||||
//- Construct from local size.
|
||||
// Communication with default communicator and message tag.
|
||||
inline explicit globalIndex(const label localSize);
|
||||
//- Construct from local size, using gather/broadcast
|
||||
//- with default/specified communicator if parallel.
|
||||
inline explicit globalIndex
|
||||
(
|
||||
const label localSize,
|
||||
const label comm = UPstream::worldComm, //!< communicator
|
||||
const bool parallel = UPstream::parRun() //!< use parallel comms
|
||||
);
|
||||
|
||||
//- Construct by gathering local sizes without rescattering.
|
||||
//- This 'one-sided' globalIndex will be empty on non-master processes.
|
||||
@ -148,18 +153,7 @@ public:
|
||||
(
|
||||
const label localSize,
|
||||
const globalIndex::gatherNone,
|
||||
const label comm = -1 //!< dummy communicator
|
||||
);
|
||||
|
||||
//- Construct from local size.
|
||||
// Communication with given communicator and message tag,
|
||||
// unless parallel == false
|
||||
inline globalIndex
|
||||
(
|
||||
const label localSize,
|
||||
const int tag, //!< message tag
|
||||
const label comm, //!< communicator
|
||||
const bool parallel //!< use parallel comms
|
||||
const label comm = -1 //!< no communicator needed
|
||||
);
|
||||
|
||||
//- Construct from Istream.
|
||||
@ -211,9 +205,14 @@ public:
|
||||
//- Write-access to the offsets, for changing after construction
|
||||
inline labelList& offsets() noexcept;
|
||||
|
||||
//- Reset from local size.
|
||||
// Does communication with default communicator and message tag.
|
||||
void reset(const label localSize);
|
||||
//- Reset from local size, using gather/broadcast
|
||||
//- with default/specified communicator if parallel.
|
||||
void reset
|
||||
(
|
||||
const label localSize,
|
||||
const label comm = UPstream::worldComm, //!< communicator
|
||||
const bool parallel = UPstream::parRun() //!< use parallel comms
|
||||
);
|
||||
|
||||
//- Reset by gathering local sizes without rescattering.
|
||||
//- This 'one-sided' globalIndex will be empty on non-master processes.
|
||||
@ -226,23 +225,12 @@ public:
|
||||
const label comm = UPstream::worldComm //!< communicator
|
||||
);
|
||||
|
||||
//- Reset from local size.
|
||||
// Does communication with given communicator and message tag,
|
||||
// unless parallel == false
|
||||
void reset
|
||||
(
|
||||
const label localSize,
|
||||
const int tag, //!< message tag
|
||||
const label comm, //!< communicator
|
||||
const bool parallel //!< use parallel comms
|
||||
);
|
||||
|
||||
//- Reset from list of local sizes,
|
||||
//- with optional check for label overflow.
|
||||
//- No communication required
|
||||
void reset
|
||||
(
|
||||
const labelUList& sizes,
|
||||
const labelUList& localLens,
|
||||
const bool checkOverflow = false
|
||||
);
|
||||
|
||||
@ -328,15 +316,6 @@ public:
|
||||
inline label whichProcID(const label i) const;
|
||||
|
||||
|
||||
// Housekeeping
|
||||
|
||||
//- Same as localStart
|
||||
label offset(const label proci) const
|
||||
{
|
||||
return localStart(proci);
|
||||
}
|
||||
|
||||
|
||||
// Iteration
|
||||
|
||||
//- Forward input iterator with const access
|
||||
@ -818,6 +797,41 @@ public:
|
||||
|
||||
friend Istream& operator>>(Istream& is, globalIndex& gi);
|
||||
friend Ostream& operator<<(Ostream& os, const globalIndex& gi);
|
||||
|
||||
|
||||
// Housekeeping
|
||||
|
||||
//- Construct from local size, using gather/broadcast
|
||||
//- with default/specified communicator if parallel.
|
||||
FOAM_DEPRECATED_FOR(2022-03, "construct without message tag")
|
||||
globalIndex
|
||||
(
|
||||
const label localSize,
|
||||
const int tag, // message tag (unused)
|
||||
const label comm, // communicator
|
||||
const bool parallel // use parallel comms
|
||||
)
|
||||
{
|
||||
reset(localSize, comm, parallel);
|
||||
}
|
||||
|
||||
//- Reset from local size, using gather/broadcast
|
||||
//- with default/specified communicator if parallel.
|
||||
FOAM_DEPRECATED_FOR(2022-03, "reset without message tag")
|
||||
void reset
|
||||
(
|
||||
const label localSize,
|
||||
const int tag, // message tag (unused)
|
||||
const label comm, // communicator
|
||||
const bool parallel // use parallel comms
|
||||
)
|
||||
{
|
||||
reset(localSize, comm, parallel);
|
||||
}
|
||||
|
||||
//- Prefer localStart() to avoid confusing with offsets()
|
||||
FOAM_DEPRECATED_FOR(2022-02, "use localStart()")
|
||||
label offset(const label proci) const { return localStart(proci); }
|
||||
};
|
||||
|
||||
|
||||
|
@ -77,9 +77,14 @@ inline Foam::globalIndex::globalIndex
|
||||
}
|
||||
|
||||
|
||||
inline Foam::globalIndex::globalIndex(const label localSize)
|
||||
inline Foam::globalIndex::globalIndex
|
||||
(
|
||||
const label localSize,
|
||||
const label comm,
|
||||
const bool parallel
|
||||
)
|
||||
{
|
||||
reset(localSize);
|
||||
reset(localSize, comm, parallel);
|
||||
}
|
||||
|
||||
|
||||
@ -109,18 +114,6 @@ inline Foam::globalIndex::globalIndex
|
||||
}
|
||||
|
||||
|
||||
inline Foam::globalIndex::globalIndex
|
||||
(
|
||||
const label localSize,
|
||||
const int tag,
|
||||
const label comm,
|
||||
const bool parallel
|
||||
)
|
||||
{
|
||||
reset(localSize, tag, comm, parallel);
|
||||
}
|
||||
|
||||
|
||||
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
|
||||
|
||||
inline bool Foam::globalIndex::empty() const
|
||||
|
@ -296,7 +296,7 @@ bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
|
||||
wordList worlds(numprocs);
|
||||
worlds[Pstream::myProcNo()] = world;
|
||||
Pstream::gatherList(worlds);
|
||||
Pstream::scatterList(worlds);
|
||||
Pstream::broadcast(worlds);
|
||||
|
||||
// Compact
|
||||
if (Pstream::master())
|
||||
@ -315,8 +315,8 @@ bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
|
||||
worldIDs_[proci] = allWorlds_.find(world);
|
||||
}
|
||||
}
|
||||
Pstream::scatter(allWorlds_);
|
||||
Pstream::scatter(worldIDs_);
|
||||
Pstream::broadcast(allWorlds_);
|
||||
Pstream::broadcast(worldIDs_);
|
||||
|
||||
DynamicList<label> subRanks;
|
||||
forAll(worlds, proci)
|
||||
|
@ -54,8 +54,9 @@ namespace Foam
|
||||
namespace PstreamDetail
|
||||
{
|
||||
|
||||
// MPI_Bcast, using root=0
|
||||
template<class Type>
|
||||
void allBroadcast
|
||||
void broadcast0
|
||||
(
|
||||
Type* values,
|
||||
int count,
|
||||
@ -63,6 +64,18 @@ void allBroadcast
|
||||
const label communicator
|
||||
);
|
||||
|
||||
// MPI_Reduce, using root=0
|
||||
template<class Type>
|
||||
void reduce0
|
||||
(
|
||||
Type* values,
|
||||
int count,
|
||||
MPI_Datatype datatype,
|
||||
MPI_Op optype,
|
||||
const label communicator
|
||||
);
|
||||
|
||||
// MPI_Allreduce
|
||||
template<class Type>
|
||||
void allReduce
|
||||
(
|
||||
@ -73,6 +86,8 @@ void allReduce
|
||||
const label communicator
|
||||
);
|
||||
|
||||
|
||||
// MPI_Iallreduce
|
||||
template<class Type>
|
||||
void iallReduce
|
||||
(
|
||||
|
@ -33,7 +33,7 @@ License
|
||||
// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * //
|
||||
|
||||
template<class Type>
|
||||
void Foam::PstreamDetail::allBroadcast
|
||||
void Foam::PstreamDetail::broadcast0
|
||||
(
|
||||
Type* values,
|
||||
int count,
|
||||
@ -62,6 +62,55 @@ void Foam::PstreamDetail::allBroadcast
|
||||
}
|
||||
|
||||
|
||||
template<class Type>
|
||||
void Foam::PstreamDetail::reduce0
|
||||
(
|
||||
Type* values,
|
||||
int count,
|
||||
MPI_Datatype datatype,
|
||||
MPI_Op optype,
|
||||
const label communicator
|
||||
)
|
||||
{
|
||||
if (!UPstream::parRun())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
|
||||
{
|
||||
Pout<< "** reducing:";
|
||||
if (count == 1)
|
||||
{
|
||||
Pout<< (*values);
|
||||
}
|
||||
else
|
||||
{
|
||||
Pout<< UList<Type>(values, count);
|
||||
}
|
||||
Pout<< " with comm:" << communicator
|
||||
<< " warnComm:" << UPstream::warnComm << endl;
|
||||
error::printStack(Pout);
|
||||
}
|
||||
|
||||
profilingPstream::beginTiming();
|
||||
|
||||
// const int retval =
|
||||
MPI_Reduce
|
||||
(
|
||||
MPI_IN_PLACE,
|
||||
values,
|
||||
count,
|
||||
datatype,
|
||||
optype,
|
||||
0, // (root process) is master == UPstream::masterNo()
|
||||
PstreamGlobals::MPICommunicators_[communicator]
|
||||
);
|
||||
|
||||
profilingPstream::addReduceTime();
|
||||
}
|
||||
|
||||
|
||||
template<class Type>
|
||||
void Foam::PstreamDetail::allReduce
|
||||
(
|
||||
|
@ -6,7 +6,7 @@
|
||||
\\/ M anipulation |
|
||||
-------------------------------------------------------------------------------
|
||||
Copyright (C) 2011-2017 OpenFOAM Foundation
|
||||
Copyright (C) 2018 OpenCFD Ltd.
|
||||
Copyright (C) 2018-2022 OpenCFD Ltd.
|
||||
-------------------------------------------------------------------------------
|
||||
License
|
||||
This file is part of OpenFOAM.
|
||||
@ -423,16 +423,16 @@ void Foam::motionSmootherAlgo::setDisplacementPatchFields
|
||||
// fixedValue bc's first.
|
||||
labelHashSet adaptPatchSet(patchIDs);
|
||||
|
||||
const lduSchedule& patchSchedule = displacement.mesh().globalData().
|
||||
patchSchedule();
|
||||
const lduSchedule& patchSchedule =
|
||||
displacement.mesh().globalData().patchSchedule();
|
||||
|
||||
forAll(patchSchedule, patchEvalI)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
const label patchi = patchSchedule[patchEvalI].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (!adaptPatchSet.found(patchi))
|
||||
{
|
||||
if (patchSchedule[patchEvalI].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
displacementBf[patchi]
|
||||
.initEvaluate(Pstream::commsTypes::scheduled);
|
||||
@ -575,17 +575,16 @@ void Foam::motionSmootherAlgo::correctBoundaryConditions
|
||||
|
||||
const lduSchedule& patchSchedule = mesh_.globalData().patchSchedule();
|
||||
|
||||
pointVectorField::Boundary& displacementBf =
|
||||
displacement.boundaryFieldRef();
|
||||
auto& displacementBf = displacement.boundaryFieldRef();
|
||||
|
||||
// 1. evaluate on adaptPatches
|
||||
forAll(patchSchedule, patchEvalI)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
const label patchi = patchSchedule[patchEvalI].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (adaptPatchSet.found(patchi))
|
||||
{
|
||||
if (patchSchedule[patchEvalI].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
displacementBf[patchi]
|
||||
.initEvaluate(Pstream::commsTypes::blocking);
|
||||
@ -600,13 +599,13 @@ void Foam::motionSmootherAlgo::correctBoundaryConditions
|
||||
|
||||
|
||||
// 2. evaluate on non-AdaptPatches
|
||||
forAll(patchSchedule, patchEvalI)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
const label patchi = patchSchedule[patchEvalI].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
if (!adaptPatchSet.found(patchi))
|
||||
{
|
||||
if (patchSchedule[patchEvalI].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
displacementBf[patchi]
|
||||
.initEvaluate(Pstream::commsTypes::blocking);
|
||||
|
@ -5,7 +5,7 @@
|
||||
\\ / A nd | www.openfoam.com
|
||||
\\/ M anipulation |
|
||||
-------------------------------------------------------------------------------
|
||||
Copyright (C) 2016-2020 OpenCFD Ltd.
|
||||
Copyright (C) 2016-2022 OpenCFD Ltd.
|
||||
-------------------------------------------------------------------------------
|
||||
License
|
||||
This file is part of OpenFOAM.
|
||||
@ -91,9 +91,10 @@ void Foam::functionObjects::mapFields::evaluateConstraintTypes
|
||||
const lduSchedule& patchSchedule =
|
||||
fld.mesh().globalData().patchSchedule();
|
||||
|
||||
forAll(patchSchedule, patchEvali)
|
||||
for (const auto& schedEval : patchSchedule)
|
||||
{
|
||||
label patchi = patchSchedule[patchEvali].patch;
|
||||
const label patchi = schedEval.patch;
|
||||
|
||||
fvPatchField<Type>& tgtField = fldBf[patchi];
|
||||
|
||||
if
|
||||
@ -102,7 +103,7 @@ void Foam::functionObjects::mapFields::evaluateConstraintTypes
|
||||
&& polyPatch::constraintType(tgtField.patch().patch().type())
|
||||
)
|
||||
{
|
||||
if (patchSchedule[patchEvali].init)
|
||||
if (schedEval.init)
|
||||
{
|
||||
tgtField.initEvaluate(Pstream::commsTypes::scheduled);
|
||||
}
|
||||
|
@ -480,13 +480,7 @@ void Foam::decompositionMethod::calcCellCells
|
||||
// Create global cell numbers
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
const globalIndex globalAgglom
|
||||
(
|
||||
nLocalCoarse,
|
||||
Pstream::msgType(),
|
||||
Pstream::worldComm,
|
||||
parallel
|
||||
);
|
||||
const globalIndex globalAgglom(nLocalCoarse, Pstream::worldComm, parallel);
|
||||
|
||||
|
||||
// Get agglomerate owner on other side of coupled faces
|
||||
@ -679,13 +673,7 @@ void Foam::decompositionMethod::calcCellCells
|
||||
// Create global cell numbers
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
const globalIndex globalAgglom
|
||||
(
|
||||
nLocalCoarse,
|
||||
Pstream::msgType(),
|
||||
Pstream::worldComm,
|
||||
parallel
|
||||
);
|
||||
const globalIndex globalAgglom(nLocalCoarse, Pstream::worldComm, parallel);
|
||||
|
||||
|
||||
// Get agglomerate owner on other side of coupled faces
|
||||
|
Loading…
Reference in New Issue
Block a user