/*---------------------------------------------------------------------------*\ ========= | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox \\ / O peration | \\ / A nd | www.openfoam.com \\/ M anipulation | ------------------------------------------------------------------------------- Copyright (C) 2011-2017 OpenFOAM Foundation Copyright (C) 2015-2023 OpenCFD Ltd. ------------------------------------------------------------------------------- License This file is part of OpenFOAM. OpenFOAM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OpenFOAM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OpenFOAM. If not, see . Application redistributePar Group grpParallelUtilities Description Redistributes existing decomposed mesh and fields according to the current settings in the decomposeParDict file. Must be run on maximum number of source and destination processors. Balances mesh and writes new mesh to new time directory. Can optionally run in decompose/reconstruct mode to decompose/reconstruct mesh and fields. Usage \b redistributePar [OPTION] Options: - \par -decompose Remove any existing \a processor subdirectories and decomposes the mesh. Equivalent to running without processor subdirectories. - \par -reconstruct Reconstruct mesh and fields (like reconstructParMesh+reconstructPar). - \par -newTimes (in combination with -reconstruct) reconstruct only new times. - \par -dry-run (not in combination with -reconstruct) Test without actually decomposing. - \par -cellDist not in combination with -reconstruct) Write the cell distribution as a labelList, for use with 'manual' decomposition method and as a volScalarField for visualization. - \par -region \ Distribute named region. - \par -allRegions Distribute all regions in regionProperties. Does not check for existence of processor*. \*---------------------------------------------------------------------------*/ #include "argList.H" #include "sigFpe.H" #include "Time.H" #include "fvMesh.H" #include "fvMeshTools.H" #include "fvMeshDistribute.H" #include "fieldsDistributor.H" #include "decompositionMethod.H" #include "decompositionModel.H" #include "timeSelector.H" #include "PstreamReduceOps.H" #include "volFields.H" #include "surfaceFields.H" #include "IOmapDistributePolyMesh.H" #include "IOobjectList.H" #include "globalIndex.H" #include "loadOrCreateMesh.H" #include "processorFvPatchField.H" #include "topoSet.H" #include "regionProperties.H" #include "parFvFieldDistributor.H" #include "parPointFieldDistributor.H" #include "hexRef8Data.H" #include "meshRefinement.H" #include "pointFields.H" #include "faMeshSubset.H" #include "faMeshTools.H" #include "faMeshDistributor.H" #include "parFaFieldDistributorCache.H" #include "redistributeLagrangian.H" using namespace Foam; // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // const int debug(::Foam::debug::debugSwitch("redistributePar", 0)); #define InfoOrPout (::debug ? Pout : Info()) // Allocate a new file handler on valid processors only // retaining the original IO ranks if possible autoPtr getNewHandler(const boolUList& useProc, bool verbose = true) { autoPtr handler ( fileOperation::New(fileHandler(), useProc, verbose) ); if (::debug && handler) { Pout<< "Allocated " << handler().info() << " ptr:" << Foam::name(handler.get()) << endl; } return handler; } // Allocate a new file handler on valid processors only // retaining the original IO ranks if possible void newHandler(const boolUList& useProc, refPtr& handler) { if (!handler) { handler = getNewHandler(useProc); } } void createTimeDirs(const fileName& path) { // Get current set of local processor's time directories. Uses // fileHandler instantList localTimeDirs(Time::findTimes(path, "constant")); instantList masterTimeDirs; if (Pstream::master()) { //const bool oldParRun = Pstream::parRun(false); //timeDirs = Time::findTimes(path, "constant"); //Pstream::parRun(oldParRun); // Restore parallel state masterTimeDirs = localTimeDirs; } Pstream::broadcast(masterTimeDirs); // Sync any cached times (e.g. masterUncollatedFileOperation::times_) // since only master would have done the findTimes for (const instant& t : masterTimeDirs) { if (!localTimeDirs.contains(t)) { const fileName timePath(path/t.name()); //Pout<< "Time:" << t << nl // << " raw :" << timePath << nl // << endl; // Bypass fileHandler Foam::mkDir(timePath); } } // Just to make sure remove all state and re-scan fileHandler().flush(); (void)Time::findTimes(path, "constant"); } void copyUniform ( refPtr& readHandler, refPtr& writeHandler, const bool reconstruct, const bool decompose, const word& readTimeName, const fileName& readCaseName, const objectRegistry& readDb, const objectRegistry& writeDb ) { // 3 modes: reconstruct, decompose, redistribute // In reconstruct mode (separate reconstructed mesh): // - read using readDb + readHandler // - write using writeDb + writeHandler // In decompose mode (since re-using processor0 mesh): // - read using readDb + readCaseName + readHandler // - write using writeDb + writeHandler // In redistribute mode: // - read using readDb + readHandler // - write using writeDb + writeHandler fileName readPath; if (readHandler) { auto oldHandler = fileOperation::fileHandler(readHandler); const label oldComm = UPstream::commWorld(fileHandler().comm()); //Pout<< "** copyUniform: switching to handler:" << fileHandler().type() // << " with comm:" << fileHandler().comm() // << " with procs:" << UPstream::procID(fileHandler().comm()) // << endl; Time& readTime = const_cast(readDb.time()); bool oldProcCase = readTime.processorCase(); string oldCaseName; if (decompose) { //Pout<< "***Setting caseName to " << readCaseName // << " to read undecomposed uniform" << endl; oldCaseName = readTime.caseName(); readTime.caseName() = readCaseName; oldProcCase = readTime.processorCase(false); } // Detect uniform/ at original database + time readPath = fileHandler().dirPath ( false, // local directory IOobject("uniform", readTimeName, readDb), false // do not search in time ); readHandler = fileOperation::fileHandler(oldHandler); UPstream::commWorld(oldComm); //Pout<< "** copyUniform:" // << " switched back to handler:" << fileHandler().type() // << " with comm:" << fileHandler().comm() // << " with procs:" << UPstream::procID(fileHandler().comm()) // << endl; if (decompose) { // Reset caseName on master //Pout<< "***Restoring caseName to " << oldCaseName << endl; readTime.caseName() = oldCaseName; readTime.processorCase(oldProcCase); } } Pstream::broadcast(readPath, UPstream::worldComm); if (!readPath.empty()) { InfoOrPout << "Detected additional non-decomposed files in " << readPath << endl; // readPath: searching is the same for all file handlers. Typical: // /0.1/uniform (parent dir, decompose mode) // /processor1/0.1/uniform (redistribute/reconstruct mode) // /processors2/0.1/uniform ,, // writePath: // uncollated : /0.1/uniform (reconstruct mode). Should only // be done by master // uncollated : /processorXXX/0.1/uniform. Should be done by all. // collated : /processors2/0.1/uniform. Should be done by // local master only. const IOobject writeIO ( "uniform", writeDb.time().timeName(), writeDb ); // Switch to writeHandler if (writeHandler) { //const label oldWorldComm = UPstream::worldComm; auto oldHandler = fileOperation::fileHandler(writeHandler); // Check: fileHandler.comm() is size 1 for uncollated const label writeComm = fileHandler().comm(); //UPstream::worldComm = writeComm; if (reconstruct) { const bool oldParRun = UPstream::parRun(false); const fileName writePath ( fileHandler().objectPath ( writeIO, word::null ) ); fileHandler().cp(readPath, writePath); UPstream::parRun(oldParRun); } else { const fileName writePath ( fileHandler().objectPath ( writeIO, word::null ) ); if (::debug) { Pout<< " readPath :" << readPath << endl; Pout<< " writePath :" << writePath << endl; } fileHandler().broadcastCopy ( writeComm, // send to all in writeComm UPstream::master(writeComm), // to use ioranks. Check! readPath, writePath ); } writeHandler = fileOperation::fileHandler(oldHandler); //UPstream::worldComm = oldWorldComm; } } } void printMeshData(const polyMesh& mesh) { // Collect all data on master labelListList patchNeiProcNo(Pstream::nProcs()); labelListList patchSize(Pstream::nProcs()); const labelList& pPatches = mesh.globalData().processorPatches(); patchNeiProcNo[Pstream::myProcNo()].setSize(pPatches.size()); patchSize[Pstream::myProcNo()].setSize(pPatches.size()); forAll(pPatches, i) { const processorPolyPatch& ppp = refCast ( mesh.boundaryMesh()[pPatches[i]] ); patchNeiProcNo[Pstream::myProcNo()][i] = ppp.neighbProcNo(); patchSize[Pstream::myProcNo()][i] = ppp.size(); } Pstream::gatherList(patchNeiProcNo); Pstream::gatherList(patchSize); // Print stats const globalIndex globalCells(mesh.nCells()); const globalIndex globalBoundaryFaces(mesh.nBoundaryFaces()); label maxProcCells = 0; label maxProcFaces = 0; label totProcFaces = 0; label maxProcPatches = 0; label totProcPatches = 0; for (const int proci : Pstream::allProcs()) { const label nLocalCells = globalCells.localSize(proci); const label nBndFaces = globalBoundaryFaces.localSize(proci); InfoOrPout<< nl << "Processor " << proci; if (!nLocalCells) { InfoOrPout<< " (empty)" << endl; continue; } else { InfoOrPout<< nl << " Number of cells = " << nLocalCells << endl; } label nProcFaces = 0; const labelList& nei = patchNeiProcNo[proci]; forAll(patchNeiProcNo[proci], i) { InfoOrPout << " Number of faces shared with processor " << patchNeiProcNo[proci][i] << " = " << patchSize[proci][i] << nl; nProcFaces += patchSize[proci][i]; } { InfoOrPout << " Number of processor patches = " << nei.size() << nl << " Number of processor faces = " << nProcFaces << nl << " Number of boundary faces = " << nBndFaces-nProcFaces << endl; } maxProcCells = max(maxProcCells, nLocalCells); totProcFaces += nProcFaces; totProcPatches += nei.size(); maxProcFaces = max(maxProcFaces, nProcFaces); maxProcPatches = max(maxProcPatches, nei.size()); } // Summary stats InfoOrPout << nl << "Number of processor faces = " << (totProcFaces/2) << nl << "Max number of cells = " << maxProcCells; if (maxProcCells != globalCells.totalSize()) { scalar avgValue = scalar(globalCells.totalSize())/Pstream::nProcs(); InfoOrPout << " (" << 100.0*(maxProcCells-avgValue)/avgValue << "% above average " << avgValue << ')'; } InfoOrPout<< nl; InfoOrPout<< "Max number of processor patches = " << maxProcPatches; if (totProcPatches) { scalar avgValue = scalar(totProcPatches)/Pstream::nProcs(); InfoOrPout << " (" << 100.0*(maxProcPatches-avgValue)/avgValue << "% above average " << avgValue << ')'; } InfoOrPout<< nl; InfoOrPout<< "Max number of faces between processors = " << maxProcFaces; if (totProcFaces) { scalar avgValue = scalar(totProcFaces)/Pstream::nProcs(); InfoOrPout << " (" << 100.0*(maxProcFaces-avgValue)/avgValue << "% above average " << avgValue << ')'; } InfoOrPout<< nl << endl; } // Debugging: write volScalarField with decomposition for post processing. void writeDecomposition ( const word& name, const fvMesh& mesh, const labelUList& decomp ) { // Write the decomposition as labelList for use with 'manual' // decomposition method. IOListRef