ENH: additional decompose options

- decomposePar: -no-fields to suppress decomposition of fields

- makeFaMesh: -no-decompose to suppress creation of *ProcAddressing
  and fields, -no-fields to suppress decomposition of fields only
This commit is contained in:
Mark Olesen 2021-11-08 11:57:41 +01:00
parent e8aa3aad25
commit a78e79908b
7 changed files with 279 additions and 191 deletions

View File

@ -11,11 +11,11 @@ License
This file is part of OpenFOAM, distributed under GPL-3.0-or-later.
Description
Decompose area fields, when mesh was generated in parallel
Write proc addressing and decompose area fields (parallel only).
\*---------------------------------------------------------------------------*/
if (Pstream::parRun())
if (doDecompose && Pstream::parRun())
{
faMeshReconstructor reconstructor(aMesh);
reconstructor.writeAddressing();
@ -43,24 +43,42 @@ if (Pstream::parRun())
reconstructor.writeMesh();
const bool oldDistributed = fileHandler().distributed();
auto oldHandler = fileHandler(fileOperation::NewUncollated());
fileHandler().distributed(true);
IOobjectList objects(fullMesh.time(), runTime.timeName());
faFieldDecomposer::readFields(fullMesh, objects, areaScalarFields);
faFieldDecomposer::readFields(fullMesh, objects, areaVectorFields);
faFieldDecomposer::readFields(fullMesh, objects, areaSphTensorFields);
faFieldDecomposer::readFields(fullMesh, objects, areaSymmTensorFields);
faFieldDecomposer::readFields(fullMesh, objects, areaTensorFields);
// Restore old settings
if (oldHandler)
if (doDecompFields)
{
fileHandler(std::move(oldHandler));
const bool oldDistributed = fileHandler().distributed();
auto oldHandler = fileHandler(fileOperation::NewUncollated());
fileHandler().distributed(true);
IOobjectList objects(fullMesh.time(), runTime.timeName());
faFieldDecomposer::readFields
(
fullMesh, objects, areaScalarFields
);
faFieldDecomposer::readFields
(
fullMesh, objects, areaVectorFields
);
faFieldDecomposer::readFields
(
fullMesh, objects, areaSphTensorFields
);
faFieldDecomposer::readFields
(
fullMesh, objects, areaSymmTensorFields
);
faFieldDecomposer::readFields
(
fullMesh, objects, areaTensorFields
);
// Restore old settings
if (oldHandler)
{
fileHandler(std::move(oldHandler));
}
fileHandler().distributed(oldDistributed);
}
fileHandler().distributed(oldDistributed);
}
const label nAreaFields =
@ -124,4 +142,5 @@ if (Pstream::parRun())
}
}
// ************************************************************************* //

View File

@ -71,10 +71,21 @@ int main(int argc, char *argv[])
);
argList::addOption("dict", "file", "Alternative faMeshDefinition");
argList::addDryRunOption
(
"Create but do not write"
);
argList::addBoolOption
(
"dry-run",
"Create but do not write"
"no-decompose",
"Suppress procAddressing creation and field decomposition"
" (parallel)"
);
argList::addBoolOption
(
"no-fields",
"Suppress field decomposition"
" (parallel)"
);
argList::addBoolOption
(
@ -93,7 +104,17 @@ int main(int argc, char *argv[])
#include "createTime.H"
#include "createNamedPolyMesh.H"
const bool dryrun = args.found("dry-run");
const bool doDecompose = !args.found("no-decompose");
const bool doDecompFields = !args.found("no-fields");
if (!doDecompose)
{
Info<< "Skip decompose of finiteArea mesh/fields" << nl;
}
else if (!doDecompFields)
{
Info<< "Skip decompose of finiteArea fields" << nl;
}
// Reading faMeshDefinition dictionary
#include "findMeshDefinitionDict.H"
@ -122,7 +143,7 @@ int main(int argc, char *argv[])
#include "faMeshWriteVTK.H"
}
if (dryrun)
if (args.dryRun())
{
Info<< "\ndry-run: not writing mesh or decomposing fields\n" << nl;
}

View File

@ -141,11 +141,15 @@ Usage
\*---------------------------------------------------------------------------*/
#include "argList.H"
#include "timeSelector.H"
#include "OSspecific.H"
#include "fvCFD.H"
#include "IOobjectList.H"
#include "decompositionModel.H"
#include "domainDecomposition.H"
#include "domainDecompositionDryRun.H"
#include "labelIOField.H"
#include "labelFieldIOField.H"
#include "scalarIOField.H"
@ -165,9 +169,7 @@ Usage
#include "fvFieldDecomposer.H"
#include "pointFieldDecomposer.H"
#include "lagrangianFieldDecomposer.H"
#include "decompositionModel.H"
#include "faCFD.H"
#include "emptyFaPatch.H"
#include "faMeshDecomposition.H"
#include "faFieldDecomposer.H"
@ -306,6 +308,8 @@ void decomposeUniform
} // End namespace Foam
using namespace Foam;
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
int main(int argc, char *argv[])
@ -384,6 +388,11 @@ int main(int argc, char *argv[])
"fields",
"Use existing geometry decomposition and convert fields only"
);
argList::addBoolOption
(
"no-fields",
"Suppress conversion of fields (volume, finite-area, lagrangian)"
);
argList::addBoolOption
(
@ -417,6 +426,7 @@ int main(int argc, char *argv[])
const bool decomposeIfRequired = args.found("ifRequired");
const bool doDecompFields = !args.found("no-fields");
const bool doFiniteArea = !args.found("no-finite-area");
const bool doLagrangian = !args.found("no-lagrangian");
@ -436,6 +446,18 @@ int main(int argc, char *argv[])
}
else
{
if (decomposeFieldsOnly && !doDecompFields)
{
FatalErrorIn(args.executable())
<< "Options -fields and -no-fields are mutually exclusive"
<< " ... giving up" << nl
<< exit(FatalError);
}
if (!doDecompFields)
{
Info<< "Skip decompose of all fields" << nl;
}
if (!doFiniteArea)
{
Info<< "Skip decompose of finiteArea mesh/fields" << nl;
@ -448,6 +470,7 @@ int main(int argc, char *argv[])
times = timeSelector::selectIfPresent(runTime, args);
}
// Allow override of decomposeParDict location
fileName decompDictFile(args.get<fileName>("decomposeParDict", ""));
if (!decompDictFile.empty() && !decompDictFile.isAbsolute())
@ -507,7 +530,8 @@ int main(int argc, char *argv[])
Info<< nl << endl;
// Determine the existing processor count directly
label nProcs = fileHandler().nProcs(runTime.path(), regionDir);
const label nProcsOld =
fileHandler().nProcs(runTime.path(), regionDir);
// Get requested numberOfSubdomains directly from the dictionary.
// Note: have no mesh yet so cannot use decompositionModel::New
@ -538,22 +562,22 @@ int main(int argc, char *argv[])
if (decomposeFieldsOnly)
{
// Sanity check on previously decomposed case
if (nProcs != nDomains)
if (nProcsOld != nDomains)
{
FatalErrorInFunction
FatalErrorIn(args.executable())
<< "Specified -fields, but the case was decomposed with "
<< nProcs << " domains"
<< nProcsOld << " domains"
<< nl
<< "instead of " << nDomains
<< " domains as specified in decomposeParDict" << nl
<< exit(FatalError);
}
}
else if (nProcs)
else if (nProcsOld)
{
bool procDirsProblem = true;
if (decomposeIfRequired && nProcs == nDomains)
if (decomposeIfRequired && nProcsOld == nDomains)
{
// We can reuse the decomposition
decomposeFieldsOnly = true;
@ -571,7 +595,7 @@ int main(int argc, char *argv[])
if (forceOverwrite)
{
Info<< "Removing " << nProcs
Info<< "Removing " << nProcsOld
<< " existing processor directories" << endl;
// Remove existing processors directory
@ -614,8 +638,8 @@ int main(int argc, char *argv[])
if (procDirsProblem)
{
FatalErrorInFunction
<< "Case is already decomposed with " << nProcs
FatalErrorIn(args.executable())
<< "Case is already decomposed with " << nProcsOld
<< " domains, use the -force option or manually" << nl
<< "remove processor directories before decomposing. e.g.,"
<< nl
@ -644,7 +668,6 @@ int main(int argc, char *argv[])
if (!decomposeFieldsOnly)
{
mesh.decomposeMesh();
mesh.writeDecomposition(decomposeSets);
if (writeCellDist)
@ -747,7 +770,7 @@ int main(int argc, char *argv[])
}
else
{
// Decompose the field files
// Decompose field files, lagrangian, finite-area
// Cached processor meshes and maps. These are only preserved if
// running with multiple times.
@ -756,8 +779,9 @@ int main(int argc, char *argv[])
PtrList<labelIOList> faceProcAddressingList(mesh.nProcs());
PtrList<labelIOList> cellProcAddressingList(mesh.nProcs());
PtrList<labelIOList> boundaryProcAddressingList(mesh.nProcs());
PtrList<fvFieldDecomposer> fieldDecomposerList(mesh.nProcs());
PtrList<labelIOList> pointProcAddressingList(mesh.nProcs());
PtrList<fvFieldDecomposer> fieldDecomposerList(mesh.nProcs());
PtrList<pointFieldDecomposer> pointFieldDecomposerList
(
mesh.nProcs()
@ -765,83 +789,139 @@ int main(int argc, char *argv[])
// Loop over all times
forAll(times, timeI)
forAll(times, timei)
{
runTime.setTime(times[timeI], timeI);
runTime.setTime(times[timei], timei);
Info<< "Time = " << runTime.timeName() << endl;
// Search for list of objects for this time
IOobjectList objects(mesh, runTime.timeName());
// Field objects at this time
IOobjectList objects;
if (doDecompFields)
{
objects = IOobjectList(mesh, runTime.timeName());
// Ignore generated fields: (cellDist)
objects.remove("cellDist");
}
// Finite area handling
autoPtr<faMeshDecomposition> faMeshDecompPtr;
if (doFiniteArea)
{
IOobject io
(
"faBoundary",
mesh.time().findInstance
(
mesh.dbDir()/polyMesh::meshSubDir,
"boundary"
),
faMesh::meshSubDir,
mesh,
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE,
false // not registered
);
if (io.typeHeaderOk<faBoundaryMesh>(true))
{
// Always based on the volume decomposition!
faMeshDecompPtr.reset
(
new faMeshDecomposition
(
mesh,
mesh.nProcs(),
mesh.model()
)
);
}
}
// Construct the vol fields
// ~~~~~~~~~~~~~~~~~~~~~~~~
// Vol fields
// ~~~~~~~~~~
PtrList<volScalarField> volScalarFields;
readFields(mesh, objects, volScalarFields, false);
PtrList<volVectorField> volVectorFields;
readFields(mesh, objects, volVectorFields, false);
PtrList<volSphericalTensorField> volSphericalTensorFields;
readFields(mesh, objects, volSphericalTensorFields, false);
PtrList<volSphericalTensorField> volSphTensorFields;
PtrList<volSymmTensorField> volSymmTensorFields;
readFields(mesh, objects, volSymmTensorFields, false);
PtrList<volTensorField> volTensorFields;
readFields(mesh, objects, volTensorFields, false);
if (doDecompFields)
{
readFields(mesh, objects, volScalarFields, false);
readFields(mesh, objects, volVectorFields, false);
readFields(mesh, objects, volSphTensorFields, false);
readFields(mesh, objects, volSymmTensorFields, false);
readFields(mesh, objects, volTensorFields, false);
}
// Construct the dimensioned fields
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Internal fields
// ~~~~~~~~~~~~~~~
PtrList<DimensionedField<scalar, volMesh>> dimScalarFields;
readFields(mesh, objects, dimScalarFields);
PtrList<DimensionedField<vector, volMesh>> dimVectorFields;
readFields(mesh, objects, dimVectorFields);
PtrList<DimensionedField<sphericalTensor, volMesh>>
dimSphericalTensorFields;
readFields(mesh, objects, dimSphericalTensorFields);
dimSphTensorFields;
PtrList<DimensionedField<symmTensor, volMesh>>
dimSymmTensorFields;
readFields(mesh, objects, dimSymmTensorFields);
PtrList<DimensionedField<tensor, volMesh>> dimTensorFields;
readFields(mesh, objects, dimTensorFields);
if (doDecompFields)
{
readFields(mesh, objects, dimScalarFields);
readFields(mesh, objects, dimVectorFields);
readFields(mesh, objects, dimSphTensorFields);
readFields(mesh, objects, dimSymmTensorFields);
readFields(mesh, objects, dimTensorFields);
}
// Construct the surface fields
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Surface fields
// ~~~~~~~~~~~~~~
PtrList<surfaceScalarField> surfaceScalarFields;
readFields(mesh, objects, surfaceScalarFields, false);
PtrList<surfaceVectorField> surfaceVectorFields;
readFields(mesh, objects, surfaceVectorFields, false);
PtrList<surfaceSphericalTensorField>
surfaceSphericalTensorFields;
readFields(mesh, objects, surfaceSphericalTensorFields, false);
surfaceSphTensorFields;
PtrList<surfaceSymmTensorField> surfaceSymmTensorFields;
readFields(mesh, objects, surfaceSymmTensorFields, false);
PtrList<surfaceTensorField> surfaceTensorFields;
readFields(mesh, objects, surfaceTensorFields, false);
if (doDecompFields)
{
readFields(mesh, objects, surfaceScalarFields, false);
readFields(mesh, objects, surfaceVectorFields, false);
readFields(mesh, objects, surfaceSphTensorFields, false);
readFields(mesh, objects, surfaceSymmTensorFields, false);
readFields(mesh, objects, surfaceTensorFields, false);
}
// Construct the point fields
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
// Point fields
// ~~~~~~~~~~~~
const pointMesh& pMesh = pointMesh::New(mesh);
PtrList<pointScalarField> pointScalarFields;
readFields(pMesh, objects, pointScalarFields, false);
PtrList<pointVectorField> pointVectorFields;
readFields(pMesh, objects, pointVectorFields, false);
PtrList<pointSphericalTensorField> pointSphericalTensorFields;
readFields(pMesh, objects, pointSphericalTensorFields, false);
PtrList<pointSphericalTensorField> pointSphTensorFields;
PtrList<pointSymmTensorField> pointSymmTensorFields;
readFields(pMesh, objects, pointSymmTensorFields, false);
PtrList<pointTensorField> pointTensorFields;
readFields(pMesh, objects, pointTensorFields, false);
if (doDecompFields)
{
readFields(pMesh, objects, pointScalarFields, false);
readFields(pMesh, objects, pointVectorFields, false);
readFields(pMesh, objects, pointSphTensorFields, false);
readFields(pMesh, objects, pointSymmTensorFields, false);
readFields(pMesh, objects, pointTensorFields, false);
}
// Construct the Lagrangian fields
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Lagrangian fields
// ~~~~~~~~~~~~~~~~~
fileNameList cloudDirs;
if (doLagrangian)
if (doDecompFields && doLagrangian)
{
cloudDirs = fileHandler().readDir
(
@ -889,13 +969,17 @@ int main(int argc, char *argv[])
cloudDirs.size()
);
PtrList<PtrList<sphericalTensorIOField>>
lagrangianSphericalTensorFields
lagrangianSphTensorFields
(
cloudDirs.size()
);
PtrList<PtrList<sphericalTensorFieldCompactIOField>>
lagrangianSphericalTensorFieldFields(cloudDirs.size());
PtrList<PtrList<symmTensorIOField>> lagrangianSymmTensorFields
lagrangianSphTensorFieldFields
(
cloudDirs.size()
);
PtrList<PtrList<symmTensorIOField>>
lagrangianSymmTensorFields
(
cloudDirs.size()
);
@ -914,6 +998,7 @@ int main(int argc, char *argv[])
cloudDirs.size()
);
label cloudI = 0;
for (const fileName& cloudDir : cloudDirs)
@ -977,7 +1062,7 @@ int main(int argc, char *argv[])
// Check
if (celli < 0 || celli >= mesh.nCells())
{
FatalErrorInFunction
FatalErrorIn(args.executable())
<< "Illegal cell number " << celli
<< " for particle with index "
<< p.index()
@ -1059,14 +1144,14 @@ int main(int argc, char *argv[])
(
cloudI,
lagrangianObjects,
lagrangianSphericalTensorFields
lagrangianSphTensorFields
);
lagrangianFieldDecomposer::readFieldFields
(
cloudI,
lagrangianObjects,
lagrangianSphericalTensorFieldFields
lagrangianSphTensorFieldFields
);
lagrangianFieldDecomposer::readFields
@ -1097,29 +1182,34 @@ int main(int argc, char *argv[])
lagrangianTensorFieldFields
);
cloudI++;
++cloudI;
}
}
lagrangianPositions.setSize(cloudI);
cellParticles.setSize(cloudI);
lagrangianLabelFields.setSize(cloudI);
lagrangianLabelFieldFields.setSize(cloudI);
lagrangianScalarFields.setSize(cloudI);
lagrangianScalarFieldFields.setSize(cloudI);
lagrangianVectorFields.setSize(cloudI);
lagrangianVectorFieldFields.setSize(cloudI);
lagrangianSphericalTensorFields.setSize(cloudI);
lagrangianSphericalTensorFieldFields.setSize(cloudI);
lagrangianSymmTensorFields.setSize(cloudI);
lagrangianSymmTensorFieldFields.setSize(cloudI);
lagrangianTensorFields.setSize(cloudI);
lagrangianTensorFieldFields.setSize(cloudI);
lagrangianPositions.resize(cloudI);
cellParticles.resize(cloudI);
lagrangianLabelFields.resize(cloudI);
lagrangianLabelFieldFields.resize(cloudI);
lagrangianScalarFields.resize(cloudI);
lagrangianScalarFieldFields.resize(cloudI);
lagrangianVectorFields.resize(cloudI);
lagrangianVectorFieldFields.resize(cloudI);
lagrangianSphTensorFields.resize(cloudI);
lagrangianSphTensorFieldFields.resize(cloudI);
lagrangianSymmTensorFields.resize(cloudI);
lagrangianSymmTensorFieldFields.resize(cloudI);
lagrangianTensorFields.resize(cloudI);
lagrangianTensorFieldFields.resize(cloudI);
Info<< endl;
// split the fields over processors
for (label proci = 0; proci < mesh.nProcs(); ++proci)
for
(
label proci = 0;
doDecompFields && proci < mesh.nProcs();
++proci
)
{
Info<< "Processor " << proci << ": field transfer" << endl;
@ -1207,22 +1297,19 @@ int main(int argc, char *argv[])
const fvFieldDecomposer& fieldDecomposer =
fieldDecomposerList[proci];
// vol fields
// Vol fields
fieldDecomposer.decomposeFields(volScalarFields);
fieldDecomposer.decomposeFields(volVectorFields);
fieldDecomposer.decomposeFields
(
volSphericalTensorFields
);
fieldDecomposer.decomposeFields(volSphTensorFields);
fieldDecomposer.decomposeFields(volSymmTensorFields);
fieldDecomposer.decomposeFields(volTensorFields);
// surface fields
// Surface fields
fieldDecomposer.decomposeFields(surfaceScalarFields);
fieldDecomposer.decomposeFields(surfaceVectorFields);
fieldDecomposer.decomposeFields
(
surfaceSphericalTensorFields
surfaceSphTensorFields
);
fieldDecomposer.decomposeFields
(
@ -1233,7 +1320,7 @@ int main(int argc, char *argv[])
// internal fields
fieldDecomposer.decomposeFields(dimScalarFields);
fieldDecomposer.decomposeFields(dimVectorFields);
fieldDecomposer.decomposeFields(dimSphericalTensorFields);
fieldDecomposer.decomposeFields(dimSphTensorFields);
fieldDecomposer.decomposeFields(dimSymmTensorFields);
fieldDecomposer.decomposeFields(dimTensorFields);
@ -1250,7 +1337,7 @@ int main(int argc, char *argv[])
(
pointScalarFields.size()
|| pointVectorFields.size()
|| pointSphericalTensorFields.size()
|| pointSphTensorFields.size()
|| pointSymmTensorFields.size()
|| pointTensorFields.size()
)
@ -1284,10 +1371,7 @@ int main(int argc, char *argv[])
pointDecomposer.decomposeFields(pointScalarFields);
pointDecomposer.decomposeFields(pointVectorFields);
pointDecomposer.decomposeFields
(
pointSphericalTensorFields
);
pointDecomposer.decomposeFields(pointSphTensorFields);
pointDecomposer.decomposeFields(pointSymmTensorFields);
pointDecomposer.decomposeFields(pointTensorFields);
@ -1351,12 +1435,12 @@ int main(int argc, char *argv[])
fieldDecomposer.decomposeFields
(
cloudDirs[cloudI],
lagrangianSphericalTensorFields[cloudI]
lagrangianSphTensorFields[cloudI]
);
fieldDecomposer.decomposeFieldFields
(
cloudDirs[cloudI],
lagrangianSphericalTensorFieldFields[cloudI]
lagrangianSphTensorFieldFields[cloudI]
);
fieldDecomposer.decomposeFields
(
@ -1382,17 +1466,24 @@ int main(int argc, char *argv[])
}
}
// Decompose the "uniform" directory in the time region
// directory
decomposeUniform(copyUniform, mesh, processorDb, regionDir);
// For a multi-region case, also decompose the "uniform"
// directory in the time directory
if (regionNames.size() > 1 && regioni == 0)
if (doDecompFields)
{
decomposeUniform(copyUniform, mesh, processorDb);
// Decompose "uniform" directory in the time region
// directory
decomposeUniform
(
copyUniform, mesh, processorDb, regionDir
);
// For a multi-region case, also decompose "uniform"
// directory in the time directory
if (regionNames.size() > 1 && regioni == 0)
{
decomposeUniform(copyUniform, mesh, processorDb);
}
}
// We have cached all the constant mesh data for the current
// processor. This is only important if running with
// multiple times, otherwise it is just extra storage.
@ -1406,72 +1497,46 @@ int main(int argc, char *argv[])
}
}
// Finite area mesh and field decomposition
IOobject faMeshBoundaryIOobj
(
"faBoundary",
mesh.time().findInstance
(
mesh.dbDir()/polyMesh::meshSubDir,
"boundary"
),
faMesh::meshSubDir,
mesh,
IOobject::READ_IF_PRESENT,
IOobject::NO_WRITE,
false // not registered
);
if
(
doFiniteArea
&& faMeshBoundaryIOobj.typeHeaderOk<faBoundaryMesh>(true)
)
if (faMeshDecompPtr)
{
Info<< "\nFinite area mesh decomposition" << endl;
// Always based on the volume decomposition!
faMeshDecomposition aMesh
(
mesh,
mesh.nProcs(),
mesh.model()
);
faMeshDecomposition& aMesh = faMeshDecompPtr();
aMesh.decomposeMesh();
aMesh.writeDecomposition();
// Construct the area fields
// ~~~~~~~~~~~~~~~~~~~~~~~~
// Area fields
// ~~~~~~~~~~~
PtrList<areaScalarField> areaScalarFields;
readFields(aMesh, objects, areaScalarFields);
PtrList<areaVectorField> areaVectorFields;
readFields(aMesh, objects, areaVectorFields);
PtrList<areaSphericalTensorField> areaSphericalTensorFields;
readFields(aMesh, objects, areaSphericalTensorFields);
PtrList<areaSphericalTensorField> areaSphTensorFields;
PtrList<areaSymmTensorField> areaSymmTensorFields;
readFields(aMesh, objects, areaSymmTensorFields);
PtrList<areaTensorField> areaTensorFields;
readFields(aMesh, objects, areaTensorFields);
// Construct the edge fields
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Edge fields (limited number of types)
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
PtrList<edgeScalarField> edgeScalarFields;
readFields(aMesh, objects, edgeScalarFields);
if (doDecompFields)
{
readFields(aMesh, objects, areaScalarFields);
readFields(aMesh, objects, areaVectorFields);
readFields(aMesh, objects, areaSphTensorFields);
readFields(aMesh, objects, areaSymmTensorFields);
readFields(aMesh, objects, areaTensorFields);
readFields(aMesh, objects, edgeScalarFields);
}
const label nAreaFields =
(
areaScalarFields.size()
+ areaVectorFields.size()
+ areaSphericalTensorFields.size()
+ areaSphTensorFields.size()
+ areaSymmTensorFields.size()
+ areaTensorFields.size()
+ edgeScalarFields.size()
@ -1574,10 +1639,7 @@ int main(int argc, char *argv[])
fieldDecomposer.decomposeFields(areaScalarFields);
fieldDecomposer.decomposeFields(areaVectorFields);
fieldDecomposer.decomposeFields
(
areaSphericalTensorFields
);
fieldDecomposer.decomposeFields(areaSphTensorFields);
fieldDecomposer.decomposeFields(areaSymmTensorFields);
fieldDecomposer.decomposeFields(areaTensorFields);

View File

@ -52,7 +52,7 @@ void Foam::domainDecomposition::writeVolField
false
),
this->mesh(),
dimensionedScalar(dimless, Zero),
dimensionedScalar("cellDist", dimless, -1),
zeroGradientFvPatchScalarField::typeName
);

View File

@ -44,13 +44,6 @@ void Foam::readFields
// Search list of objects for fields of type GeoField
IOobjectList fieldObjects(objects.lookupClass<GeoField>());
// Remove the cellDist field
auto iter = fieldObjects.find("cellDist");
if (iter.found())
{
fieldObjects.erase(iter);
}
// Use sorted set of names
// (different processors might read objects in different order)
const wordList masterNames(fieldObjects.sortedNames());

View File

@ -46,13 +46,6 @@ void Foam::faFieldDecomposer::readFields
// Search list of objects for fields of type GeoField
IOobjectList fieldObjects(objects.lookupClass<GeoField>());
/// // Remove the cellDist field
/// auto iter = fieldObjects.find("cellDist");
/// if (iter.found())
/// {
/// fieldObjects.erase(iter);
/// }
// Use sorted set of names
// (different processors might read objects in different order)
const wordList masterNames(fieldObjects.sortedNames());

View File

@ -155,19 +155,19 @@ public:
// Settings
//- Number of processor in decomposition
label nProcs() const
label nProcs() const noexcept
{
return nProcs_;
}
//- Is decomposition data to be distributed for each processor
bool distributed() const
bool distributed() const noexcept
{
return distributed_;
}
//- Change distributed flag
bool distributed(const bool on)
bool distributed(const bool on) noexcept
{
bool old(distributed_);
distributed_ = on;
@ -175,13 +175,13 @@ public:
}
//- Are global face zones used
bool useGlobalFaceZones() const
bool useGlobalFaceZones() const noexcept
{
return distributed_;
}
//- Change global face zones flag
bool useGlobalFaceZones(const bool on)
bool useGlobalFaceZones(const bool on) noexcept
{
bool old(hasGlobalFaceZones_);
hasGlobalFaceZones_ = on;