ENH: remove blocking communication for gather patterns

ENH: eliminate unnecessary duplicate communicator

- in globalMeshData previously had a comm_dup hack to avoid clashes
  with deltaCoeffs calculations. However, this was largely due to a
  manual implementation of reduce() that used point-to-point
  communication. This has since been updated to use an MPI_Allreduce
  and now an MPI_Allgather, neither of which need this hack.
This commit is contained in:
Mark Olesen 2024-02-29 18:35:03 +01:00
parent b98f53ceca
commit 7006056eae
29 changed files with 272 additions and 595 deletions

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011 OpenFOAM Foundation
Copyright (C) 2017-2022 OpenCFD Ltd.
Copyright (C) 2017-2024 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -156,29 +156,24 @@ int main(int argc, char *argv[])
os.writeEntry("idl3", idl3);
}
if (Pstream::parRun())
if (UPstream::parRun())
{
if (Pstream::master())
if (UPstream::master())
{
Pout<< "full: " << flatOutput(idl3.values()) << nl
<< "send: " << flatOutput(idl3) << endl;
for (const int proci : Pstream::subProcs())
for (const int proci : UPstream::subProcs())
{
OPstream toSlave(Pstream::commsTypes::scheduled, proci);
toSlave << idl3;
OPstream::send(idl3, proci);
}
}
else
{
// From master
IPstream fromMaster
(
Pstream::commsTypes::scheduled,
Pstream::masterNo()
);
List<label> recv;
List<label> recv(fromMaster);
IPstream::recv(recv, UPstream::masterNo());
Pout<<"recv: " << flatOutput(recv) << endl;
}

View File

@ -139,7 +139,7 @@ int main(int argc, char *argv[])
{
recvBufs(proci).resize_nocopy(count);
// Non-blocking read
// Non-blocking read - MPI_Irecv()
UIPstream::read
(
recvRequests.emplace_back(),
@ -155,9 +155,9 @@ int main(int argc, char *argv[])
{
IPstream is
(
UPstream::commsTypes::scheduled,
probed.first,
probed.second,
UPstream::commsTypes::scheduled, // ie, MPI_Recv()
proci,
count, // bufSize
tag,
comm
);

View File

@ -131,13 +131,17 @@ void testTransfer(const T& input)
for (const int proci : UPstream::subProcs())
{
Perr<< "master sending to proc:" << proci << endl;
OPstream::bsend(data, proci);
OPstream os(UPstream::commsTypes::blocking, proci);
os << data;
}
}
else
{
Perr<< "proc sending to master" << endl;
OPstream::bsend(data, UPstream::masterNo());
{
Perr<< "proc sending to master" << endl;
OPstream os(UPstream::commsTypes::blocking, UPstream::masterNo());
os << data;
}
Perr<< "proc receiving from master" << endl;
IPstream::recv(data, UPstream::masterNo());
@ -165,13 +169,17 @@ void testTokenized(const T& data)
for (const int proci : UPstream::subProcs())
{
Perr<< "master sending to proc:" << proci << endl;
OPstream::bsend(tok, proci);
OPstream os(UPstream::commsTypes::blocking, proci);
os << tok;
}
}
else
{
Perr<< "proc sending to master" << endl;
OPstream::bsend(tok, UPstream::masterNo());
{
Perr<< "proc sending to master" << endl;
OPstream os(UPstream::commsTypes::blocking, UPstream::masterNo());
os << tok;
}
Perr<< "proc receiving from master" << endl;
IPstream::recv(tok, UPstream::masterNo());

View File

@ -30,16 +30,15 @@ Description
Gather data from all processors onto all processors.
SourceFiles
Gather.C
Gather.txx
\*---------------------------------------------------------------------------*/
#ifndef Gather_H
#define Gather_H
#ifndef Foam_TestGather_H
#define Foam_TestGather_H
#include "List.H"
#include "labelList.H"
#include "GatherBase.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
@ -50,34 +49,34 @@ namespace Foam
Class Gather Declaration
\*---------------------------------------------------------------------------*/
template<class T0>
template<class Type>
class Gather
:
public GatherBase,
public List<T0>
public List<Type>
{
// Private data
// Private Data
//- Number of processors (1 for sequential)
label nProcs_;
//- Storage of type 0
//List<T0> data0_;
//- Storage
//List<Type> list_;
public:
// Constructors
//- Construct from containers across processors
Gather(const T0&, const bool redistribute=true);
Gather(const Type& localData, const bool redistribute=true);
// Member Functions
// const List<T0>& data0() const
// {
// return data0_;
// }
List<Type>& list() noexcept { return *this; }
const List<Type>& list() const noexcept { return *this; }
// List<Type>& list() noexcept { return list_; }
// const List<Type>& list() const noexcept { return list_; }
};

View File

@ -30,96 +30,71 @@ License
#include "IPstream.H"
#include "OPstream.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
// Construct from component
template<class T0>
Gather<T0>::Gather(const T0& localData, const bool redistribute)
template<class Type>
Foam::Gather<Type>::Gather(const Type& localData, const bool redistribute)
:
List<T0>(0),
nProcs_(max(1, Pstream::nProcs()))
nProcs_(Foam::max(1, UPstream::nProcs()))
{
this->setSize(nProcs_);
this->list().resize(nProcs_);
//
// Collect sizes on all processor
//
if (Pstream::parRun())
if (UPstream::parRun())
{
if (Pstream::master())
if (UPstream::master())
{
auto outIter = this->begin();
*outIter = localData;
auto iter = this->list().begin();
*iter = localData;
// Receive data
for (const int proci : Pstream::subProcs())
for (const int proci : UPstream::subProcs())
{
IPstream fromSlave(Pstream::commsTypes::scheduled, proci);
fromSlave >> *(++outIter);
++iter;
IPstream::recv(*iter, proci);
}
// Send data
for (const int proci : Pstream::subProcs())
for (const int proci : UPstream::subProcs())
{
OPstream toSlave(Pstream::commsTypes::scheduled, proci);
if (redistribute)
{
toSlave << *this;
OPstream::send(*this, proci);
}
else
{
// Dummy send just to balance sends/receives
toSlave << 0;
// Dummy send (to balance sends/receives)
OPstream::send(label(0), proci);
}
}
}
else
{
// Slave: send my local data to master
{
OPstream toMaster
(
Pstream::commsTypes::scheduled,
Pstream::masterNo()
);
toMaster << localData;
}
// Send my local data to master
OPstream::send(localData, UPstream::masterNo());
// Receive data from master
if (redistribute)
{
IPstream fromMaster
(
Pstream::commsTypes::scheduled,
Pstream::masterNo()
);
if (redistribute)
{
fromMaster >> *this;
}
else
{
label dummy;
fromMaster >> dummy;
}
IPstream::recv(*this, UPstream::masterNo());
}
else
{
// Dummy receive
label dummy;
IPstream::recv(dummy, UPstream::masterNo());
}
}
}
else
{
this->operator[](0) = localData;
this->list().resize(1);
this->list()[0] = localData;
}
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View File

@ -1,102 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2015 OpenFOAM Foundation
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
Class
Foam::GatherBase
Description
SourceFiles
GatherBase.C
\*---------------------------------------------------------------------------*/
#ifndef GatherBase_H
#define GatherBase_H
#include "List.H"
#include "labelList.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
/*---------------------------------------------------------------------------*\
Class GatherBase Declaration
\*---------------------------------------------------------------------------*/
class GatherBase
{
public:
//- Flatten: appends all elements of list into one single list.
// Used to collapse 'Gathered' data.
template<class T>
static T flatten(const List<T>);
//- Flatten and offset 'Gathered' indices (into value) so they
// remain valid with respect to values (after they have been flattened)
template<class DataType, class IndexType, class AddOp>
static IndexType offset
(
const List<DataType>& values,
const List<IndexType>& indices,
AddOp aop
);
};
template<class T>
class AddOp
{
public:
T operator()
(
const T& x,
const label offset
) const
{
return x + offset;
}
};
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#include "GatherBase.txx"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
#endif
// ************************************************************************* //

View File

@ -1,115 +0,0 @@
/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2015 OpenFOAM Foundation
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "GatherBase.H"
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
namespace Foam
{
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
template<class Type>
Type GatherBase::flatten(const List<Type> lst)
{
label sum = 0;
forAll(lst, lstI)
{
sum += lst[lstI].size();
}
Type result(sum);
label index = 0;
forAll(lst, lstI)
{
const Type& sub = lst[lstI];
forAll(sub, subI)
{
result[index++] = sub[subI];
}
}
return result;
}
template<class DataType, class IndexType, class AddOp>
IndexType GatherBase::offset
(
const List<DataType>& values,
const List<IndexType>& indices,
AddOp aop
)
{
if (values.size() != indices.size())
{
FatalErrorInFunction
<< "Input data and indices lists not equal size." << endl
<< "data size:" << values.size()
<< " indices:" << indices.size()
<< abort(FatalError);
}
label sum = 0;
forAll(indices, lstI)
{
sum += indices[lstI].size();
}
IndexType result(sum);
label index = 0;
label offset = 0;
forAll(indices, lstI)
{
const IndexType& sub = indices[lstI];
forAll(sub, subI)
{
result[index++] = aop(sub[subI], offset);
}
offset += values[lstI].size();
}
return result;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
} // End namespace Foam
// ************************************************************************* //

View File

@ -206,8 +206,8 @@ int main(int argc, char *argv[])
for (const int proci : UPstream::subProcs())
{
IPstream fromProc(UPstream::commsTypes::scheduled, proci);
labelList below(fromProc);
labelList below;
IPstream::recv(below, proci);
printConnection(os, proci, below);
}
@ -222,13 +222,7 @@ int main(int argc, char *argv[])
}
else
{
OPstream toMaster
(
Pstream::commsTypes::scheduled,
Pstream::masterNo()
);
toMaster << myComm.below();
OPstream::send(myComm.below(), UPstream::masterNo());
// Pout<< flatOutput(myComm.allBelow()) << nl;
}

View File

@ -541,7 +541,7 @@ void syncPoints
// Is there any coupled patch with transformation?
bool hasTransformation = false;
if (Pstream::parRun())
if (UPstream::parRun())
{
const labelList& procPatches = mesh.globalData().processorPatches();
@ -569,7 +569,12 @@ void syncPoints
}
// buffered send
OPstream::bsend(patchInfo, procPatch.neighbProcNo());
OPstream toNbr
(
UPstream::commsTypes::blocking,
procPatch.neighbProcNo()
);
toNbr << patchInfo;
}
}
@ -583,10 +588,10 @@ void syncPoints
if (pp.nPoints() && !procPatch.owner())
{
pointField nbrPatchInfo;
// We do not know the number of points on the other side
// so cannot use UIPstream::read
pointField nbrPatchInfo;
IPstream::recv(nbrPatchInfo, procPatch.neighbProcNo());
// Null any value which is not on neighbouring processor

View File

@ -374,9 +374,9 @@ void getInterfaceSizes
}
if (Pstream::parRun())
if (UPstream::parRun())
{
if (Pstream::master())
if (UPstream::master())
{
// Receive and add to my sizes
for (const int proci : UPstream::subProcs())
@ -420,8 +420,8 @@ void getInterfaceSizes
}
else
{
// buffered send to master
OPstream::bsend(regionsToSize, UPstream::masterNo());
// send to master
OPstream::send(regionsToSize, UPstream::masterNo());
}
}

View File

@ -115,8 +115,8 @@ void Foam::ParSortableList<Type>::checkAndSend
}
{
OPstream toSlave(Pstream::commsTypes::blocking, destProci);
toSlave << values << indices;
OPstream toProc(UPstream::commsTypes::blocking, destProci);
toProc << values << indices;
}
}
}
@ -309,9 +309,9 @@ void Foam::ParSortableList<Type>::sort()
Pout<< "Receiving from " << proci << endl;
}
IPstream fromSlave(Pstream::commsTypes::blocking, proci);
IPstream fromProc(UPstream::commsTypes::blocking, proci);
fromSlave >> recValues >> recIndices;
fromProc >> recValues >> recIndices;
if (debug & 2)
{

View File

@ -89,7 +89,7 @@ public:
{
IPstream is
(
UPstream::commsTypes::scheduled,
UPstream::commsTypes::scheduled, // ie, MPI_Recv()
fromProcNo,
0, // bufSize
tag,

View File

@ -93,29 +93,6 @@ public:
os << value;
}
//- Serialize a value and send (buffered/blocking mode).
//- Uses \c operator<< for serialization
template<class Type>
static void bsend
(
const Type& value,
const int toProcNo,
const int tag = UPstream::msgType(),
const label comm = UPstream::worldComm,
IOstreamOption::streamFormat fmt = IOstreamOption::BINARY
)
{
OPstream::send
(
value,
UPstream::commsTypes::blocking,
toProcNo,
tag,
comm,
fmt
);
}
//- Serialize a value and send (standard mode).
//- Uses \c operator<< for serialization
template<class Type>
@ -131,7 +108,7 @@ public:
OPstream::send
(
value,
UPstream::commsTypes::scheduled,
UPstream::commsTypes::scheduled, // ie, MPI_Send()
toProcNo,
tag,
comm,

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011 OpenFOAM Foundation
Copyright (C) 2022-2023 OpenCFD Ltd.
Copyright (C) 2022-2024 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -86,6 +86,15 @@ Foam::OPstream::OPstream
{}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
bool Foam::UOPstream::send()
{
sendAtDestruct_ = false;
return bufferIPCsend();
}
// * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
Foam::UOPstream::~UOPstream()

View File

@ -202,7 +202,7 @@ void exchangeConsensus
UIPstream::read
(
UPstream::commsTypes::blocking,
UPstream::commsTypes::scheduled, // ie, MPI_Recv()
proci,
recvData.data_bytes(),
recvData.size_bytes(),
@ -369,7 +369,7 @@ void exchangeConsensus
UIPstream::read
(
UPstream::commsTypes::blocking,
UPstream::commsTypes::scheduled, // ie, MPI_Recv()
proci,
recvData.data_bytes(),
recvData.size_bytes(),

View File

@ -6,7 +6,7 @@
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2011-2014 OpenFOAM Foundation
Copyright (C) 2017-2023 OpenCFD Ltd.
Copyright (C) 2017-2024 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -104,7 +104,7 @@ protected:
const int comm_;
//- Call bufferIPCsend on termination (in the destructor)
const bool sendAtDestruct_;
bool sendAtDestruct_;
//- Reference to the send buffer data
DynamicList<char>& sendBuf_;
@ -309,7 +309,7 @@ class UOPstream
{
// Private Member Functions
//- Final buffer send, called by destructor
//- Buffer send, usually called by destructor
bool bufferIPCsend();
@ -349,6 +349,10 @@ public:
// Member Functions
//- Send buffer contents now and not in destructor [advanced usage].
//- Returns true on success
bool send();
//- Use all write methods from base class
using UOPstreamBase::write;
@ -470,7 +474,7 @@ class UOPBstream
{
// Private Member Functions
//- Final buffer send, called by destructor
//- Buffer send, usually called by destructor
bool bufferIPCsend();

View File

@ -415,8 +415,8 @@ void Foam::globalMeshData::calcSharedEdges() const
{
if (UPstream::parRun())
{
// buffered send local edges to master
OPstream::bsend(localShared, UPstream::masterNo());
// send local edges to master
OPstream::send(localShared, UPstream::masterNo());
}
}
@ -442,7 +442,7 @@ void Foam::globalMeshData::calcSharedEdges() const
{
// My local edge is indeed a shared one. Go through all local edge
// labels with this point combination.
const labelList& edgeLabels = iter();
const labelList& edgeLabels = iter.val();
for (const label edgei : edgeLabels)
{
@ -470,11 +470,9 @@ void Foam::globalMeshData::calcSharedEdges() const
{
Pout<< "globalMeshData : nGlobalEdges_:" << nGlobalEdges_ << nl
<< "globalMeshData : sharedEdgeLabels:"
<< sharedEdgeLabelsPtr_().size()
<< nl
<< sharedEdgeLabelsPtr_().size() << nl
<< "globalMeshData : sharedEdgeAddr:"
<< sharedEdgeAddrPtr_().size()
<< endl;
<< sharedEdgeAddrPtr_().size() << endl;
}
}
@ -1856,7 +1854,7 @@ Foam::pointField Foam::globalMeshData::sharedPoints() const
const labelList& pointAddr = sharedPointAddr();
const labelList& pointLabels = sharedPointLabels();
if (Pstream::master())
if (UPstream::master())
{
// Master:
// insert my own data first
@ -1868,9 +1866,9 @@ Foam::pointField Foam::globalMeshData::sharedPoints() const
}
// Receive data and insert
for (const int proci : Pstream::subProcs())
for (const int proci : UPstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, proci);
IPstream fromProc(UPstream::commsTypes::scheduled, proci);
labelList nbrSharedPointAddr;
pointField nbrSharedPoints;
@ -1886,13 +1884,13 @@ Foam::pointField Foam::globalMeshData::sharedPoints() const
}
else
{
if (Pstream::parRun())
if (UPstream::parRun())
{
// Send address and points
OPstream toMaster
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
toMaster
<< pointAddr
@ -2689,19 +2687,8 @@ void Foam::globalMeshData::updateMesh()
Pout<< "globalMeshData : merge dist:" << tolDim << endl;
}
// NOTE
// - revisit the dupComm hack
// - verify if it should be mesh_.comm() instead of worldComm
// *** Temporary hack to avoid problems with overlapping communication
// *** between these reductions and the calculation of deltaCoeffs
UPstream::communicator dupComm
(
UPstream::worldComm,
labelRange(UPstream::nProcs(UPstream::worldComm))
);
const label comm = dupComm.comm();
const label comm = mesh_.comm();
const label oldWarnComm = UPstream::commWarn(comm);
if (UPstream::is_parallel(comm))
@ -2752,18 +2739,6 @@ void Foam::globalMeshData::updateMesh()
// Restore communicator settings
UPstream::commWarn(oldWarnComm);
// OLD CODE:
// FixedList<label, 3> totals;
// totals[0] = mesh_.nPoints();
// totals[1] = mesh_.nFaces();
// totals[2] = mesh_.nCells();
//
// reduce(totals, sumOp<label>(), UPstream::msgType(), comm);
//
// nTotalPoints_ = totals[0];
// nTotalFaces_ = totals[1];
// nTotalCells_ = totals[2];
if (debug)
{
Info<< "globalMeshData : Total points/faces/cells : ("

View File

@ -509,7 +509,8 @@ void Foam::mapDistributeBase::distribute
);
// buffered send
OPstream::bsend(subField, proci, tag, comm);
OPstream os(commsType, proci, 0, tag, comm);
os << subField;
}
}
@ -953,7 +954,8 @@ void Foam::mapDistributeBase::distribute
);
// buffered send
OPstream::bsend(subField, proci, tag, comm);
OPstream os(commsType, proci, 0, tag, comm);
os << subField;
}
}

View File

@ -55,6 +55,7 @@ SourceFiles
#include "labelIOList.H"
#include "polyBoundaryMesh.H"
#include "boundBox.H"
#include "globalIndex.H"
#include "pointZoneMesh.H"
#include "faceZoneMesh.H"
#include "cellZoneMesh.H"

View File

@ -94,7 +94,7 @@ void Foam::syncTools::syncPointMap
const globalMeshData& pd = mesh.globalData();
// Values on shared points. Keyed on global shared index.
Map<T> sharedPointValues(0);
Map<T> sharedPointValues;
if (pd.nGlobalPoints() > 0)
{
@ -205,7 +205,7 @@ void Foam::syncTools::syncPointMap
continue;
}
Map<T> nbrPatchInfo(0);
Map<T> nbrPatchInfo;
{
UIPstream fromNbr(nbrProci, pBufs);
fromNbr >> nbrPatchInfo;
@ -472,7 +472,7 @@ void Foam::syncTools::syncEdgeMap
continue;
}
EdgeMap<T> nbrPatchInfo(0);
EdgeMap<T> nbrPatchInfo;
{
UIPstream fromNbr(nbrProci, pBufs);
fromNbr >> nbrPatchInfo;

View File

@ -90,7 +90,7 @@ void Foam::vtk::patchWriter::write
// Receive each patch field and write
for (const int subproci : Pstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, subproci);
IPstream fromProc(UPstream::commsTypes::scheduled, subproci);
for (label i=0; i < nPatches; ++i)
{
@ -105,8 +105,8 @@ void Foam::vtk::patchWriter::write
// Send each patch field
OPstream toProc
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
for (const label patchId : patchIDs_)
@ -188,7 +188,7 @@ void Foam::vtk::patchWriter::write
// Receive each patch field and write
for (const int subproci : Pstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, subproci);
IPstream fromProc(UPstream::commsTypes::scheduled, subproci);
for (label i=0; i < nPatches; ++i)
{
@ -203,8 +203,8 @@ void Foam::vtk::patchWriter::write
// Send each patch field
OPstream toProc
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
for (const label patchId : patchIDs_)
@ -287,14 +287,14 @@ void Foam::vtk::patchWriter::write
// Patch Ids are identical across all processes
const label nPatches = patchIDs_.size();
if (Pstream::master())
if (UPstream::master())
{
Field<Type> recv;
// Receive each patch field and write
for (const int subproci : Pstream::subProcs())
for (const int subproci : UPstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, subproci);
IPstream fromProc(UPstream::commsTypes::scheduled, subproci);
for (label i=0; i < nPatches; ++i)
{
@ -309,8 +309,8 @@ void Foam::vtk::patchWriter::write
// Send each patch field
OPstream toProc
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
for (const label patchId : patchIDs_)

View File

@ -2171,7 +2171,6 @@ Foam::autoPtr<Foam::mapDistributePolyMesh> Foam::fvMeshDistribute::distribute
}
// Pstream for sending mesh and fields
//OPstream str(Pstream::commsTypes::blocking, recvProc);
UOPstream str(recvProc, pBufs);
// Mesh subsetting engine - subset the cells of the current domain.

View File

@ -5,7 +5,7 @@
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2015-2023 OpenCFD Ltd.
Copyright (C) 2015-2024 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -30,6 +30,7 @@ License
#include "addToRunTimeSelectionTable.H"
#include "OSspecific.H"
#include "Fstream.H"
#include "SpanStream.H"
#include "volFields.H"
#include "globalIndex.H"
#include "fvMesh.H"
@ -110,15 +111,15 @@ void Foam::functionObjects::externalCoupled::readColumns
// Get sizes for all processors
const globalIndex globalFaces(globalIndex::gatherOnly{}, nRows);
PstreamBuffers pBufs(Pstream::commsTypes::nonBlocking);
PstreamBuffers pBufs(UPstream::commsTypes::nonBlocking);
if (Pstream::master())
if (UPstream::master())
{
// Read data from file and send to destination processor
auto& ifile = masterFilePtr();
string line;
// Read data from file and send to destination processor
for (const int proci : Pstream::allProcs())
for (const int proci : UPstream::allProcs())
{
// Temporary storage
List<scalarField> values(nColumns);
@ -136,9 +137,9 @@ void Foam::functionObjects::externalCoupled::readColumns
// Get a line
do
{
if (!masterFilePtr().good())
if (!ifile.good())
{
FatalIOErrorInFunction(masterFilePtr())
FatalIOErrorInFunction(ifile)
<< "Trying to read data for processor " << proci
<< " row " << rowi
<< ". Does your file have as many rows as there are"
@ -146,15 +147,15 @@ void Foam::functionObjects::externalCoupled::readColumns
<< ") ?" << exit(FatalIOError);
}
masterFilePtr().getLine(line);
ifile.getLine(line);
}
while (line.empty() || line[0] == '#');
IStringStream lineStr(line);
ISpanStream isstr(line);
for (label columni = 0; columni < nColumns; ++columni)
{
lineStr >> values[columni][rowi];
isstr >> values[columni][rowi];
}
}
@ -165,7 +166,7 @@ void Foam::functionObjects::externalCoupled::readColumns
}
pBufs.finishedScatters();
// Get scattered data from PstreamBuffers
// Get scattered data
UIPstream fromMaster(UPstream::masterNo(), pBufs);
fromMaster >> data;
}
@ -175,21 +176,21 @@ void Foam::functionObjects::externalCoupled::readLines
(
const label nRows,
autoPtr<IFstream>& masterFilePtr,
OStringStream& lines
std::string& lines
) const
{
// Get sizes for all processors
const globalIndex globalFaces(globalIndex::gatherOnly{}, nRows);
PstreamBuffers pBufs(Pstream::commsTypes::nonBlocking);
PstreamBuffers pBufs(UPstream::commsTypes::nonBlocking);
if (Pstream::master())
if (UPstream::master())
{
// Read line from file and send to destination processor
auto& ifile = masterFilePtr();
string line;
// Read line from file and send to destination processor
for (const int proci : Pstream::allProcs())
for (const int proci : UPstream::allProcs())
{
// Number of rows to read for processor proci
const label procNRows = globalFaces.localSize(proci);
@ -201,9 +202,9 @@ void Foam::functionObjects::externalCoupled::readLines
// Get a line
do
{
if (!masterFilePtr().good())
if (!ifile.good())
{
FatalIOErrorInFunction(masterFilePtr())
FatalIOErrorInFunction(ifile)
<< "Trying to read data for processor " << proci
<< " row " << rowi
<< ". Does your file have as many rows as there are"
@ -211,11 +212,11 @@ void Foam::functionObjects::externalCoupled::readLines
<< ") ?" << exit(FatalIOError);
}
masterFilePtr().getLine(line);
ifile.getLine(line);
}
while (line.empty() || line[0] == '#');
// Send line to the destination processor
// Send line (without newline) to the destination processor
toProc << line;
}
}
@ -223,12 +224,16 @@ void Foam::functionObjects::externalCoupled::readLines
pBufs.finishedScatters();
// Get scattered data from PstreamBuffers
// Sizing is approximate (slightly too large)
lines.reserve(pBufs.recvDataCount(UPstream::masterNo()));
// Get scattered data
UIPstream fromMaster(UPstream::masterNo(), pBufs);
for (label rowi = 0; rowi < nRows; ++rowi)
{
string line(fromMaster);
lines << line.c_str() << nl;
lines += line;
lines += '\n';
}
}
@ -253,9 +258,9 @@ void Foam::functionObjects::externalCoupled::writeGeometry
autoPtr<OFstream> osPointsPtr;
autoPtr<OFstream> osFacesPtr;
if (Pstream::master())
if (UPstream::master())
{
mkDir(dir);
Foam::mkDir(dir);
osPointsPtr.reset(new OFstream(dir/"patchPoints"));
osFacesPtr.reset(new OFstream(dir/"patchFaces"));
@ -278,7 +283,7 @@ void Foam::functionObjects::externalCoupled::writeGeometry
(
mesh.boundaryMesh().patchSet
(
wordRes(one{}, groupName)
wordRes(Foam::one{}, groupName)
).sortedToc()
);
@ -303,13 +308,13 @@ void Foam::functionObjects::externalCoupled::writeGeometry
List<faceList> collectedFaces(Pstream::nProcs());
faceList& patchFaces = collectedFaces[proci];
patchFaces = p.localFaces();
forAll(patchFaces, facei)
for (auto& f : patchFaces)
{
inplaceRenumber(pointToGlobal, patchFaces[facei]);
inplaceRenumber(pointToGlobal, f);
}
Pstream::gatherList(collectedFaces);
if (Pstream::master())
if (UPstream::master())
{
allPoints.clear();
allFaces.clear();

View File

@ -5,7 +5,7 @@
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2015-2020 OpenCFD Ltd.
Copyright (C) 2015-2024 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -167,8 +167,8 @@ SourceFiles
\*---------------------------------------------------------------------------*/
#ifndef functionObjects_externalCoupled_H
#define functionObjects_externalCoupled_H
#ifndef Foam_functionObjects_externalCoupled_H
#define Foam_functionObjects_externalCoupled_H
#include "timeFunctionObject.H"
#include "externalFileCoupler.H"
@ -198,7 +198,6 @@ class externalCoupled
public functionObjects::timeFunctionObject,
public externalFileCoupler
{
// Private Member Data
//- Calculation frequency
@ -259,8 +258,9 @@ class externalCoupled
void initCoupling();
//- Read (and distribute) scalar columns from stream. Every processor
// gets nRows (= patch size) of these. Note: could make its argument
//- Read (and distribute) scalar columns from stream.
//- Every processor gets nRows (= patch size) of these.
// Note: could make its argument
// ISstream& but then would need additional logic to construct valid
// stream on all processors.
void readColumns
@ -271,21 +271,17 @@ class externalCoupled
List<scalarField>& data
) const;
//- Read (and distribute) lines from stream. Every processor
// gets nRows (= patch size) of these. Data kept as stream (instead
// of strings) for ease of interfacing to readData routines that take
// an Istream.
//- Read (and distribute) lines from stream.
//- Every processor gets nRows (= patch size) of these
//- (newline terminated).
void readLines
(
const label nRows,
autoPtr<IFstream>& masterFilePtr,
OStringStream& data
//! [out] the nRows lines read (for this rank) - newline terminated
std::string& lines
) const;
//- Helper: append data from all processors onto master
template<class Type>
static tmp<Field<Type>> gatherAndCombine(const Field<Type>& fld);
static void checkOrder(const wordList& regionNames);
//- Perform the coupling with necessary initialization etc.

View File

@ -5,7 +5,7 @@
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2015-2021 OpenCFD Ltd.
Copyright (C) 2015-2024 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -33,6 +33,7 @@ License
#include "mixedFvPatchFields.H"
#include "fixedGradientFvPatchFields.H"
#include "fixedValueFvPatchFields.H"
#include "SpanStream.H"
#include "StringStream.H"
#include "globalIndex.H"
@ -58,7 +59,7 @@ bool Foam::functionObjects::externalCoupled::readData
// File only opened on master; contains data for all processors, for all
// patchIDs.
autoPtr<IFstream> masterFilePtr;
if (Pstream::master())
if (UPstream::master())
{
const fileName transferFile
(
@ -80,13 +81,13 @@ bool Foam::functionObjects::externalCoupled::readData
}
const wordRes patchSelection(one{}, groupName);
const wordRes patchSelection(Foam::one{}, groupName);
label nFound = 0;
for (const fvMesh& mesh : meshes)
{
const volFieldType* vfptr = mesh.findObject<volFieldType>(fieldName);
auto* vfptr = mesh.getObjectPtr<volFieldType>(fieldName);
if (!vfptr)
{
@ -94,8 +95,7 @@ bool Foam::functionObjects::externalCoupled::readData
}
++nFound;
typename volFieldType::Boundary& bf =
const_cast<volFieldType*>(vfptr)->boundaryFieldRef();
auto& bf = vfptr->boundaryFieldRef();
// Get the patches
const labelList patchIDs
@ -117,16 +117,18 @@ bool Foam::functionObjects::externalCoupled::readData
);
// Read from master into local stream
OStringStream os;
std::string lines;
readLines
(
bf[patchi].size(), // number of lines to read
bf[patchi].size(), // number of lines to read
masterFilePtr,
os
lines
);
ISpanStream isstr(lines);
// Pass responsibility for all reading over to bc
pf.readData(IStringStream(os.str())());
pf.readData(isstr);
// Update the value from the read coefficient. Bypass any
// additional processing by derived type.
@ -265,51 +267,6 @@ bool Foam::functionObjects::externalCoupled::readData
}
template<class Type>
Foam::tmp<Foam::Field<Type>>
Foam::functionObjects::externalCoupled::gatherAndCombine
(
const Field<Type>& fld
)
{
// Collect values from all processors
List<Field<Type>> gatheredValues(Pstream::nProcs());
gatheredValues[Pstream::myProcNo()] = fld;
Pstream::gatherList(gatheredValues);
auto tresult = tmp<Field<Type>>::New();
auto& result = tresult.ref();
if (Pstream::master())
{
// Combine values into single field
label globalElemi = 0;
forAll(gatheredValues, lsti)
{
globalElemi += gatheredValues[lsti].size();
}
result.setSize(globalElemi);
globalElemi = 0;
forAll(gatheredValues, lsti)
{
const Field<Type>& sub = gatheredValues[lsti];
forAll(sub, elemi)
{
result[globalElemi++] = sub[elemi];
}
}
}
return tresult;
}
template<class Type>
bool Foam::functionObjects::externalCoupled::writeData
(
@ -352,14 +309,14 @@ bool Foam::functionObjects::externalCoupled::writeData
}
const wordRes patchSelection(one{}, groupName);
const wordRes patchSelection(Foam::one{}, groupName);
bool headerDone = false;
label nFound = 0;
for (const fvMesh& mesh : meshes)
{
const volFieldType* vfptr = mesh.findObject<volFieldType>(fieldName);
const auto* vfptr = mesh.getObjectPtr<volFieldType>(fieldName);
if (!vfptr)
{
@ -367,8 +324,7 @@ bool Foam::functionObjects::externalCoupled::writeData
}
++nFound;
const typename volFieldType::Boundary& bf =
vfptr->boundaryField();
const auto& bf = vfptr->boundaryField();
// Get the patches
const labelList patchIDs
@ -379,7 +335,7 @@ bool Foam::functionObjects::externalCoupled::writeData
// Handle column-wise writing of patch data. Supports most easy types
for (const label patchi : patchIDs)
{
const globalIndex globalFaces(bf[patchi].size());
// const globalIndex globalFaces(bf[patchi].size());
if (isA<patchFieldType>(bf[patchi]))
{
@ -397,7 +353,7 @@ bool Foam::functionObjects::externalCoupled::writeData
// Collect contributions from all processors and output them on
// master
if (Pstream::master())
if (UPstream::master())
{
// Output master data first
if (!headerDone)
@ -407,27 +363,17 @@ bool Foam::functionObjects::externalCoupled::writeData
}
masterFilePtr() << os.str().c_str();
for (const int proci : Pstream::subProcs())
for (const int proci : UPstream::subProcs())
{
IPstream fromSlave
(
Pstream::commsTypes::scheduled,
proci
);
string str;
IPstream::recv(str, proci);
string str(fromSlave);
masterFilePtr() << str.c_str();
}
}
else
{
OPstream toMaster
(
Pstream::commsTypes::scheduled,
Pstream::masterNo()
);
toMaster << os.str();
OPstream::send(os.str(), UPstream::masterNo());
}
}
else if (isA<mixedFvPatchField<Type>>(bf[patchi]))
@ -435,15 +381,21 @@ bool Foam::functionObjects::externalCoupled::writeData
const mixedFvPatchField<Type>& pf =
refCast<const mixedFvPatchField<Type>>(bf[patchi]);
Field<Type> value(gatherAndCombine(pf));
Field<Type> snGrad(gatherAndCombine(pf.snGrad()()));
Field<Type> refValue(gatherAndCombine(pf.refValue()));
Field<Type> refGrad(gatherAndCombine(pf.refGrad()));
scalarField valueFraction(gatherAndCombine(pf.valueFraction()));
const globalIndex glob
(
globalIndex::gatherOnly{},
pf.size()
);
if (Pstream::master())
Field<Type> value(glob.gather(pf));
Field<Type> snGrad(glob.gather(pf.snGrad()()));
Field<Type> refValue(glob.gather(pf.refValue()));
Field<Type> refGrad(glob.gather(pf.refGrad()));
scalarField valueFraction(glob.gather(pf.valueFraction()));
if (UPstream::master())
{
forAll(refValue, facei)
forAll(value, facei)
{
masterFilePtr()
<< value[facei] << token::SPACE
@ -457,9 +409,17 @@ bool Foam::functionObjects::externalCoupled::writeData
else
{
// Output the value and snGrad
Field<Type> value(gatherAndCombine(bf[patchi]));
Field<Type> snGrad(gatherAndCombine(bf[patchi].snGrad()()));
if (Pstream::master())
const globalIndex glob
(
globalIndex::gatherOnly{},
bf[patchi].size()
);
Field<Type> value(glob.gather(bf[patchi]));
Field<Type> snGrad(glob.gather(bf[patchi].snGrad()()));
if (UPstream::master())
{
forAll(value, facei)
{

View File

@ -5,7 +5,7 @@
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2018-2022 OpenCFD Ltd.
Copyright (C) 2018-2024 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -77,7 +77,7 @@ void Foam::functionObjects::dataCloud::writeListParallel
const List<Type>& field
)
{
if (Pstream::master())
if (UPstream::master())
{
writeList(os, points, field);
@ -85,9 +85,9 @@ void Foam::functionObjects::dataCloud::writeListParallel
Field<Type> recvField;
// Receive and write
for (const int proci : Pstream::subProcs())
for (const int proci : UPstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, proci);
IPstream fromProc(UPstream::commsTypes::scheduled, proci);
fromProc >> recvPoints >> recvField;
@ -99,8 +99,8 @@ void Foam::functionObjects::dataCloud::writeListParallel
// Send to master
OPstream toMaster
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
toMaster
@ -134,7 +134,7 @@ void Foam::functionObjects::dataCloud::writeListParallel
const bitSet& selected
)
{
if (Pstream::master())
if (UPstream::master())
{
writeList(os, points, field, selected);
@ -142,9 +142,9 @@ void Foam::functionObjects::dataCloud::writeListParallel
Field<Type> recvField;
// Receive and write
for (const int proci : Pstream::subProcs())
for (const int proci : UPstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, proci);
IPstream fromProc(UPstream::commsTypes::scheduled, proci);
fromProc >> recvPoints >> recvField;
@ -156,8 +156,8 @@ void Foam::functionObjects::dataCloud::writeListParallel
// Send to master
OPstream toMaster
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
toMaster
@ -195,7 +195,7 @@ bool Foam::functionObjects::dataCloud::writeField
autoPtr<OFstream> osPtr;
if (Pstream::master())
if (UPstream::master())
{
osPtr.reset(new OFstream(outputName));
osPtr->precision(precision_);

View File

@ -166,23 +166,8 @@ void Foam::meshRefinement::collectAndPrint
{
const globalIndex globalPoints(points.size());
pointField allPoints;
globalPoints.gather
(
points,
allPoints,
UPstream::msgType(),
Pstream::commsTypes::blocking
);
List<T> allData;
globalPoints.gather
(
data,
allData,
UPstream::msgType(),
Pstream::commsTypes::blocking
);
pointField allPoints(globalPoints.gather(points));
List<T> allData(globalPoints.gather(data));
scalarField magAllPoints(mag(allPoints-point(-0.317, 0.117, 0.501)));

View File

@ -5,7 +5,7 @@
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2016-2023 OpenCFD Ltd.
Copyright (C) 2016-2024 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -87,7 +87,7 @@ void Foam::vtk::patchMeshWriter::writePoints()
this->beginPoints(numberOfPoints_);
if (parallel_ ? Pstream::master() : true)
if (parallel_ ? UPstream::master() : true)
{
for (const label patchId : patchIDs_)
{
@ -103,14 +103,14 @@ void Foam::vtk::patchMeshWriter::writePoints()
// Patch Ids are identical across all processes
const label nPatches = patchIDs_.size();
if (Pstream::master())
if (UPstream::master())
{
pointField recv;
// Receive each point field and write
for (const int subproci : Pstream::subProcs())
for (const int subproci : UPstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, subproci);
IPstream fromProc(UPstream::commsTypes::scheduled, subproci);
for (label i=0; i < nPatches; ++i)
{
@ -125,8 +125,8 @@ void Foam::vtk::patchMeshWriter::writePoints()
// Send each point field
OPstream toProc
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
for (const label patchId : patchIDs_)
@ -523,7 +523,7 @@ void Foam::vtk::patchMeshWriter::writePatchIDs()
this->beginDataArray<label>("patchID", nPolys);
if (parallel_ ? Pstream::master() : true)
if (parallel_ ? UPstream::master() : true)
{
for (const label patchId : patchIDs_)
{
@ -533,14 +533,14 @@ void Foam::vtk::patchMeshWriter::writePatchIDs()
if (parallel_)
{
if (Pstream::master())
if (UPstream::master())
{
labelList recv;
// Receive each pair
for (const int subproci : Pstream::subProcs())
for (const int subproci : UPstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, subproci);
IPstream fromProc(UPstream::commsTypes::scheduled, subproci);
fromProc >> recv;
@ -559,8 +559,8 @@ void Foam::vtk::patchMeshWriter::writePatchIDs()
// Send
OPstream toProc
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
// Encode as [size, id] pairs
@ -595,7 +595,7 @@ bool Foam::vtk::patchMeshWriter::writeProcIDs()
bool Foam::vtk::patchMeshWriter::writeNeighIDs()
{
if (!Pstream::parRun())
if (!UPstream::parRun())
{
// Skip in non-parallel
return false;
@ -626,7 +626,7 @@ bool Foam::vtk::patchMeshWriter::writeNeighIDs()
bool good = false;
if (parallel_ ? Pstream::master() : true)
if (parallel_ ? UPstream::master() : true)
{
for (const label patchId : patchIDs_)
{
@ -642,14 +642,14 @@ bool Foam::vtk::patchMeshWriter::writeNeighIDs()
if (parallel_)
{
if (Pstream::master())
if (UPstream::master())
{
labelList recv;
// Receive each pair
for (const int subproci : Pstream::subProcs())
for (const int subproci : UPstream::subProcs())
{
IPstream fromProc(Pstream::commsTypes::blocking, subproci);
IPstream fromProc(UPstream::commsTypes::scheduled, subproci);
fromProc >> recv;
@ -668,8 +668,8 @@ bool Foam::vtk::patchMeshWriter::writeNeighIDs()
// Send
OPstream toProc
(
Pstream::commsTypes::blocking,
Pstream::masterNo()
UPstream::commsTypes::scheduled,
UPstream::masterNo()
);
// Encode as [size, id] pairs

View File

@ -165,7 +165,12 @@ void Foam::isoSurfacePoint::syncUnseparatedPoints
}
// buffered send
OPstream::bsend(patchInfo, procPatch.neighbProcNo());
OPstream toNbr
(
UPstream::commsTypes::blocking,
procPatch.neighbProcNo()
);
toNbr << patchInfo;
}
}
@ -177,10 +182,10 @@ void Foam::isoSurfacePoint::syncUnseparatedPoints
if (pp.nPoints() && collocatedPatch(pp))
{
pointField nbrPatchInfo;
// We do not know the number of points on the other side
// so cannot use UIPstream::read
pointField nbrPatchInfo;
IPstream::recv(nbrPatchInfo, procPatch.neighbProcNo());
const labelList& meshPts = procPatch.meshPoints();