openfoam/applications/utilities/parallelProcessing/redistributePar/parLagrangianDistributor.C
Mark Olesen fb69a54bc3 ENH: avoid prior communication when using mapDistribute
- in most cases can simply construct mapDistribute with the sendMap
  and have it take care of communication and addressing for the
  corresponding constructMap.

  This removes code duplication, which in some cases was also using
  much less efficient mechanisms (eg, combineReduce on list of
  lists, or an allGatherList on the send sizes etc) and also
  reduces the number of places where Pstream::exchange/exchangeSizes
  is being called.

ENH: reduce communication in turbulentDFSEMInlet

- was doing an allGatherList to populate a mapDistribute.
  Now simply use PstreamBuffers mechanisms directly.
2023-02-14 23:22:55 +01:00

326 lines
9.3 KiB
C

/*---------------------------------------------------------------------------*\
========= |
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | www.openfoam.com
\\/ M anipulation |
-------------------------------------------------------------------------------
Copyright (C) 2015 OpenFOAM Foundation
Copyright (C) 2015-2022 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
OpenFOAM is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
\*---------------------------------------------------------------------------*/
#include "ListOps.H"
#include "parLagrangianDistributor.H"
#include "passivePositionParticleCloud.H"
// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
int Foam::parLagrangianDistributor::verbose_ = 1;
// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
Foam::parLagrangianDistributor::parLagrangianDistributor
(
const fvMesh& srcMesh,
const fvMesh& tgtMesh,
const label nSrcCells,
const mapDistributePolyMesh& distMap
)
:
srcMesh_(srcMesh),
tgtMesh_(tgtMesh),
distMap_(distMap)
{
const mapDistribute& cellMap = distMap_.cellMap();
// Get destination processors and cells
destinationProcID_ = labelList(tgtMesh_.nCells(), Pstream::myProcNo());
cellMap.reverseDistribute(nSrcCells, destinationProcID_);
destinationCell_ = identity(tgtMesh_.nCells());
cellMap.reverseDistribute(nSrcCells, destinationCell_);
}
// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
// Find all clouds (on all processors) and for each cloud all the objects.
// Result will be synchronised on all processors
void Foam::parLagrangianDistributor::findClouds
(
const fvMesh& mesh,
wordList& cloudNames,
List<wordList>& objectNames
)
{
fileNameList localCloudDirs
(
readDir
(
mesh.time().timePath()
/ mesh.dbDir()
/ cloud::prefix,
fileName::DIRECTORY
)
);
cloudNames.setSize(localCloudDirs.size());
forAll(localCloudDirs, i)
{
cloudNames[i] = localCloudDirs[i];
}
// Synchronise cloud names
Pstream::combineReduce(cloudNames, ListOps::uniqueEqOp<word>());
Foam::sort(cloudNames); // Consistent order
objectNames.clear();
objectNames.resize(cloudNames.size());
for (const fileName& localCloudName : localCloudDirs)
{
// Do local scan for valid cloud objects
IOobjectList localObjs
(
mesh,
mesh.time().timeName(),
cloud::prefix/localCloudName
);
bool isCloud = false;
if (localObjs.erase("coordinates"))
{
isCloud = true;
}
if (localObjs.erase("positions"))
{
isCloud = true;
}
if (isCloud)
{
// Has coordinates/positions - so must be a valid cloud
const label cloudi = cloudNames.find(localCloudName);
objectNames[cloudi] = localObjs.sortedNames();
}
}
// Synchronise objectNames (per cloud)
for (wordList& objNames : objectNames)
{
Pstream::combineReduce(objNames, ListOps::uniqueEqOp<word>());
Foam::sort(objNames); // Consistent order
}
}
Foam::autoPtr<Foam::mapDistributeBase>
Foam::parLagrangianDistributor::distributeLagrangianPositions
(
passivePositionParticleCloud& lpi
) const
{
//Debug(lpi.size());
const label oldLpi = lpi.size();
labelListList subMap;
// Allocate transfer buffers
PstreamBuffers pBufs(Pstream::commsTypes::nonBlocking);
{
// List of lists of particles to be transferred for all of the
// neighbour processors
List<IDLList<passivePositionParticle>> particleTransferLists
(
Pstream::nProcs()
);
// Per particle the destination processor
labelList destProc(lpi.size());
label particleI = 0;
for (passivePositionParticle& ppi : lpi)
{
const label destProcI = destinationProcID_[ppi.cell()];
const label destCellI = destinationCell_[ppi.cell()];
ppi.cell() = destCellI;
destProc[particleI++] = destProcI;
particleTransferLists[destProcI].append(lpi.remove(&ppi));
}
// Per processor the indices of the particles to send
subMap = invertOneToMany(Pstream::nProcs(), destProc);
// Stream into send buffers
forAll(particleTransferLists, procI)
{
//Pout<< "To proc " << procI << " sending "
// << particleTransferLists[procI] << endl;
if (particleTransferLists[procI].size())
{
UOPstream particleStream(procI, pBufs);
particleStream << particleTransferLists[procI];
}
}
}
// Start sending
pBufs.finishedSends();
{
// Temporarily rename original cloud so we can construct a new one
// (to distribute the positions) without getting a duplicate
// registration warning
const word cloudName = lpi.name();
lpi.rename(cloudName + "_old");
// New cloud on tgtMesh
passivePositionParticleCloud lagrangianPositions
(
tgtMesh_,
cloudName,
IDLList<passivePositionParticle>()
);
// Retrieve from receive buffers
for (const int proci : pBufs.allProcs())
{
//Pout<< "Receive from processor" << proci << " : "
// << pBufs.recvDataCount(proci) << endl;
if (pBufs.recvDataCount(proci))
{
UIPstream particleStream(proci, pBufs);
// Receive particles and locate them
IDLList<passivePositionParticle> newParticles
(
particleStream,
passivePositionParticle::iNew(tgtMesh_)
);
for (passivePositionParticle& newp : newParticles)
{
lagrangianPositions.addParticle(newParticles.remove(&newp));
}
}
}
if (lagrangianPositions.size())
{
// Write coordinates file
IOPosition<passivePositionParticleCloud>
(
lagrangianPositions
).write();
// Optionally write positions file in v1706 format and earlier
if (particle::writeLagrangianPositions)
{
IOPosition<passivePositionParticleCloud>
(
lagrangianPositions,
cloud::geometryType::POSITIONS
).write();
}
}
else if (oldLpi)
{
// When running with -overwrite it should also delete the old
// files. Below works but is not optimal.
// Remove any existing coordinates
const fileName oldCoords
(
IOPosition<passivePositionParticleCloud>
(
lagrangianPositions
).objectPath()
);
Foam::rm(oldCoords);
// Remove any existing positions
const fileName oldPos
(
IOPosition<passivePositionParticleCloud>
(
lagrangianPositions,
cloud::geometryType::POSITIONS
).objectPath()
);
Foam::rm(oldPos);
}
// Restore cloud name
lpi.rename(cloudName);
}
// Until now (FEB-2023) we have always used processor ordering for the
// construct map (whereas mapDistribute has local transfers first),
// so we'll stick with that for now, but can likely just use the subMap
// directly with mapDistribute and have it determine the constructMap.
labelList recvSizes;
Pstream::exchangeSizes(subMap, recvSizes);
label constructSize = 0;
labelListList constructMap(Pstream::nProcs());
forAll(constructMap, proci)
{
const label len = recvSizes[proci];
constructMap[proci] = identity(len, constructSize);
constructSize += len;
}
return autoPtr<mapDistributeBase>::New
(
constructSize,
std::move(subMap),
std::move(constructMap)
);
}
Foam::autoPtr<Foam::mapDistributeBase>
Foam::parLagrangianDistributor::distributeLagrangianPositions
(
const word& cloudName
) const
{
// Load cloud and send particle
passivePositionParticleCloud lpi(srcMesh_, cloudName, false);
return distributeLagrangianPositions(lpi);
}
// ************************************************************************* //