/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | \\ / A nd | www.openfoam.com \\/ M anipulation | ------------------------------------------------------------------------------- Copyright (C) 2011-2017 OpenFOAM Foundation Copyright (C) 2015-2021 OpenCFD Ltd. ------------------------------------------------------------------------------- License This file is part of OpenFOAM. OpenFOAM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenFOAM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenFOAM. If not, see . Application redistributePar Group grpParallelUtilities Description Redistributes existing decomposed mesh and fields according to the current settings in the decomposeParDict file. Must be run on maximum number of source and destination processors. Balances mesh and writes new mesh to new time directory. Can optionally run in decompose/reconstruct mode to decompose/reconstruct mesh and fields. Usage \b redistributePar [OPTION] Options: - \par -decompose Remove any existing \a processor subdirectories and decomposes the mesh. Equivalent to running without processor subdirectories. - \par -reconstruct Reconstruct mesh and fields (like reconstructParMesh+reconstructPar). - \par -newTimes (in combination with -reconstruct) reconstruct only new times. - \par -dry-run (not in combination with -reconstruct) Test without actually decomposing. - \par -cellDist not in combination with -reconstruct) Write the cell distribution as a labelList, for use with 'manual' decomposition method and as a volScalarField for visualization. - \par -region \ Distribute named region. - \par -allRegions Distribute all regions in regionProperties. Does not check for existence of processor*. \*---------------------------------------------------------------------------*/ #include "argList.H" #include "sigFpe.H" #include "Time.H" #include "fvMesh.H" #include "fvMeshTools.H" #include "fvMeshDistribute.H" #include "decompositionMethod.H" #include "decompositionModel.H" #include "timeSelector.H" #include "PstreamReduceOps.H" #include "volFields.H" #include "surfaceFields.H" #include "IOmapDistributePolyMesh.H" #include "IOobjectList.H" #include "globalIndex.H" #include "loadOrCreateMesh.H" #include "processorFvPatchField.H" #include "zeroGradientFvPatchFields.H" #include "topoSet.H" #include "regionProperties.H" #include "basicFvGeometryScheme.H" #include "parFvFieldReconstructor.H" #include "parLagrangianRedistributor.H" #include "unmappedPassivePositionParticleCloud.H" #include "hexRef8Data.H" #include "meshRefinement.H" #include "pointFields.H" #include "cyclicACMIFvPatch.H" using namespace Foam; // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // // Tolerance (as fraction of the bounding box). Needs to be fairly lax since // usually meshes get written with limited precision (6 digits) static const scalar defaultMergeTol = 1e-6; // Get merging distance when matching face centres scalar getMergeDistance ( const argList& args, const Time& runTime, const boundBox& bb ) { const scalar mergeTol = args.getOrDefault("mergeTol", defaultMergeTol); const scalar writeTol = Foam::pow(scalar(10), -scalar(IOstream::defaultPrecision())); Info<< "Merge tolerance : " << mergeTol << nl << "Write tolerance : " << writeTol << endl; if (runTime.writeFormat() == IOstream::ASCII && mergeTol < writeTol) { FatalErrorInFunction << "Your current settings specify ASCII writing with " << IOstream::defaultPrecision() << " digits precision." << nl << "Your merging tolerance (" << mergeTol << ") is finer than this." << nl << "Please change your writeFormat to binary" << " or increase the writePrecision" << endl << "or adjust the merge tolerance (-mergeTol)." << exit(FatalError); } const scalar mergeDist = mergeTol * bb.mag(); Info<< "Overall meshes bounding box : " << bb << nl << "Relative tolerance : " << mergeTol << nl << "Absolute matching distance : " << mergeDist << nl << endl; return mergeDist; } void setBasicGeometry(fvMesh& mesh) { // Set the fvGeometryScheme to basic since it does not require // any parallel communication to construct the geometry. During // redistributePar one might temporarily end up with processors // with zero procBoundaries. Normally procBoundaries trigger geometry // calculation (e.g. send over cellCentres) so on the processors without // procBoundaries this will not happen. The call to the geometry calculation // is not synchronised and this might lead to a hang for geometry schemes // that do require synchronisation tmp basicGeometry ( fvGeometryScheme::New ( mesh, dictionary(), basicFvGeometryScheme::typeName ) ); mesh.geometry(basicGeometry); } void printMeshData(const polyMesh& mesh) { // Collect all data on master globalIndex globalCells(mesh.nCells()); labelListList patchNeiProcNo(Pstream::nProcs()); labelListList patchSize(Pstream::nProcs()); const labelList& pPatches = mesh.globalData().processorPatches(); patchNeiProcNo[Pstream::myProcNo()].setSize(pPatches.size()); patchSize[Pstream::myProcNo()].setSize(pPatches.size()); forAll(pPatches, i) { const processorPolyPatch& ppp = refCast ( mesh.boundaryMesh()[pPatches[i]] ); patchNeiProcNo[Pstream::myProcNo()][i] = ppp.neighbProcNo(); patchSize[Pstream::myProcNo()][i] = ppp.size(); } Pstream::gatherList(patchNeiProcNo); Pstream::gatherList(patchSize); // Print stats globalIndex globalBoundaryFaces(mesh.nBoundaryFaces()); label maxProcCells = 0; label totProcFaces = 0; label maxProcPatches = 0; label totProcPatches = 0; label maxProcFaces = 0; for (const int procI : Pstream::allProcs()) { Info<< nl << "Processor " << procI << nl << " Number of cells = " << globalCells.localSize(procI) << endl; label nProcFaces = 0; const labelList& nei = patchNeiProcNo[procI]; forAll(patchNeiProcNo[procI], i) { Info<< " Number of faces shared with processor " << patchNeiProcNo[procI][i] << " = " << patchSize[procI][i] << endl; nProcFaces += patchSize[procI][i]; } Info<< " Number of processor patches = " << nei.size() << nl << " Number of processor faces = " << nProcFaces << nl << " Number of boundary faces = " << globalBoundaryFaces.localSize(procI)-nProcFaces << endl; maxProcCells = max(maxProcCells, globalCells.localSize(procI)); totProcFaces += nProcFaces; totProcPatches += nei.size(); maxProcPatches = max(maxProcPatches, nei.size()); maxProcFaces = max(maxProcFaces, nProcFaces); } // Stats scalar avgProcCells = scalar(globalCells.size())/Pstream::nProcs(); scalar avgProcPatches = scalar(totProcPatches)/Pstream::nProcs(); scalar avgProcFaces = scalar(totProcFaces)/Pstream::nProcs(); // In case of all faces on one processor. Just to avoid division by 0. if (totProcPatches == 0) { avgProcPatches = 1; } if (totProcFaces == 0) { avgProcFaces = 1; } Info<< nl << "Number of processor faces = " << totProcFaces/2 << nl << "Max number of cells = " << maxProcCells << " (" << 100.0*(maxProcCells-avgProcCells)/avgProcCells << "% above average " << avgProcCells << ")" << nl << "Max number of processor patches = " << maxProcPatches << " (" << 100.0*(maxProcPatches-avgProcPatches)/avgProcPatches << "% above average " << avgProcPatches << ")" << nl << "Max number of faces between processors = " << maxProcFaces << " (" << 100.0*(maxProcFaces-avgProcFaces)/avgProcFaces << "% above average " << avgProcFaces << ")" << nl << endl; } // Debugging: write volScalarField with decomposition for post processing. void writeDecomposition ( const word& name, const fvMesh& mesh, const labelUList& decomp ) { // Write the decomposition as labelList for use with 'manual' // decomposition method. labelIOList cellDecomposition ( IOobject ( "cellDecomposition", mesh.facesInstance(), // mesh read from facesInstance mesh, IOobject::NO_READ, IOobject::NO_WRITE, false ), decomp ); cellDecomposition.write(); Info<< "Writing wanted cell distribution to volScalarField " << name << " for postprocessing purposes." << nl << endl; volScalarField procCells ( IOobject ( name, mesh.time().timeName(), mesh, IOobject::NO_READ, IOobject::AUTO_WRITE, false // do not register ), mesh, dimensionedScalar(name, dimless, -1), zeroGradientFvPatchScalarField::typeName ); forAll(procCells, cI) { procCells[cI] = decomp[cI]; } procCells.correctBoundaryConditions(); procCells.write(); } void determineDecomposition ( const Time& baseRunTime, const fileName& decompDictFile, // optional location for decomposeParDict const bool decompose, // decompose, i.e. read from undecomposed case const fileName& proc0CaseName, const fvMesh& mesh, const bool writeCellDist, label& nDestProcs, labelList& decomp ) { // Read decomposeParDict (on all processors) const decompositionModel& method = decompositionModel::New ( mesh, decompDictFile ); decompositionMethod& decomposer = method.decomposer(); if (!decomposer.parallelAware()) { WarningInFunction << "You have selected decomposition method " << decomposer.typeName << " which does" << nl << "not synchronise the decomposition across" << " processor patches." << nl << " You might want to select a decomposition method" << " which is aware of this. Continuing." << endl; } if (Pstream::master() && decompose) { Info<< "Setting caseName to " << baseRunTime.caseName() << " to read decomposeParDict" << endl; const_cast(mesh.time()).caseName() = baseRunTime.caseName(); } scalarField cellWeights; if (method.found("weightField")) { word weightName = method.get("weightField"); volScalarField weights ( IOobject ( weightName, mesh.time().timeName(), mesh, IOobject::MUST_READ, IOobject::NO_WRITE ), mesh ); cellWeights = weights.internalField(); } nDestProcs = decomposer.nDomains(); decomp = decomposer.decompose(mesh, cellWeights); if (Pstream::master() && decompose) { Info<< "Restoring caseName to " << proc0CaseName << endl; const_cast(mesh.time()).caseName() = proc0CaseName; } // Dump decomposition to volScalarField if (writeCellDist) { // Note: on master make sure to write to processor0 if (decompose) { if (Pstream::master()) { Info<< "Setting caseName to " << baseRunTime.caseName() << " to write undecomposed cellDist" << endl; Time& tm = const_cast(mesh.time()); tm.caseName() = baseRunTime.caseName(); writeDecomposition("cellDist", mesh, decomp); Info<< "Restoring caseName to " << proc0CaseName << endl; tm.caseName() = proc0CaseName; } } else { writeDecomposition("cellDist", mesh, decomp); } } } // Write addressing if decomposing (1 to many) or reconstructing (many to 1) void writeProcAddressing ( const fvMesh& mesh, const mapDistributePolyMesh& map, const bool decompose ) { Info<< "Writing procAddressing files to " << mesh.facesInstance() << endl; labelIOList cellMap ( IOobject ( "cellProcAddressing", mesh.facesInstance(), polyMesh::meshSubDir, mesh, IOobject::NO_READ ), 0 ); labelIOList faceMap ( IOobject ( "faceProcAddressing", mesh.facesInstance(), polyMesh::meshSubDir, mesh, IOobject::NO_READ ), 0 ); labelIOList pointMap ( IOobject ( "pointProcAddressing", mesh.facesInstance(), polyMesh::meshSubDir, mesh, IOobject::NO_READ ), 0 ); labelIOList patchMap ( IOobject ( "boundaryProcAddressing", mesh.facesInstance(), polyMesh::meshSubDir, mesh, IOobject::NO_READ ), 0 ); // Decomposing: see how cells moved from undecomposed case if (decompose) { cellMap = identity(map.nOldCells()); map.distributeCellData(cellMap); faceMap = identity(map.nOldFaces()); { const mapDistribute& faceDistMap = map.faceMap(); if (faceDistMap.subHasFlip() || faceDistMap.constructHasFlip()) { // Offset by 1 faceMap = faceMap + 1; } // Apply face flips mapDistributeBase::distribute ( Pstream::commsTypes::nonBlocking, List(), faceDistMap.constructSize(), faceDistMap.subMap(), faceDistMap.subHasFlip(), faceDistMap.constructMap(), faceDistMap.constructHasFlip(), faceMap, flipLabelOp() ); } pointMap = identity(map.nOldPoints()); map.distributePointData(pointMap); patchMap = identity(map.oldPatchSizes().size()); const mapDistribute& patchDistMap = map.patchMap(); // Use explicit distribute since we need to provide a null value // (for new patches) and this is the only call that allow us to // provide one ... mapDistributeBase::distribute ( Pstream::commsTypes::nonBlocking, List(), patchDistMap.constructSize(), patchDistMap.subMap(), patchDistMap.subHasFlip(), patchDistMap.constructMap(), patchDistMap.constructHasFlip(), patchMap, label(-1), eqOp