STYLE: relocate distributed flag into ParRunControl

- adjust member order in TimePaths to better packing
This commit is contained in:
Mark Olesen 2018-10-11 09:12:01 +02:00
parent f7c85b034b
commit a77ab5d4eb
8 changed files with 90 additions and 65 deletions

View File

@ -33,6 +33,7 @@ Description
#include "nil.H"
#include "IOstreams.H"
#include "PstreamBuffers.H"
#include "argList.H"
#include "Time.H"
namespace Foam
@ -63,6 +64,13 @@ int main(int argc, char *argv[])
nil x;
cout<<"nil:" << sizeof(x) << nl;
}
{
argList x(argc, argv);
cout<<"argList:" << sizeof(x) << nl;
TimePaths y(x);
cout<<"TimePaths:" << sizeof(y) << nl;
}
{
zero x;
cout<<"zero:" << sizeof(x) << nl;

View File

@ -336,8 +336,7 @@ void determineDecomposition
{
Info<< "Setting caseName to " << baseRunTime.caseName()
<< " to read decomposeParDict" << endl;
const_cast<Time&>(mesh.time()).TimePaths::caseName() =
baseRunTime.caseName();
const_cast<Time&>(mesh.time()).caseName() = baseRunTime.caseName();
}
scalarField cellWeights;
@ -366,8 +365,7 @@ void determineDecomposition
if (Pstream::master() && decompose)
{
Info<< "Restoring caseName to " << proc0CaseName << endl;
const_cast<Time&>(mesh.time()).TimePaths::caseName() =
proc0CaseName;
const_cast<Time&>(mesh.time()).caseName() = proc0CaseName;
}
// Dump decomposition to volScalarField
@ -383,10 +381,10 @@ void determineDecomposition
Time& tm = const_cast<Time&>(mesh.time());
tm.TimePaths::caseName() = baseRunTime.caseName();
tm.caseName() = baseRunTime.caseName();
writeDecomposition("cellDist", mesh, decomp);
Info<< "Restoring caseName to " << proc0CaseName << endl;
tm.TimePaths::caseName() = proc0CaseName;
tm.caseName() = proc0CaseName;
}
}
else
@ -908,12 +906,12 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
// Get original objects (before incrementing time!)
if (Pstream::master() && decompose)
{
runTime.TimePaths::caseName() = baseRunTime.caseName();
runTime.caseName() = baseRunTime.caseName();
}
IOobjectList objects(mesh, runTime.timeName());
if (Pstream::master() && decompose)
{
runTime.TimePaths::caseName() = proc0CaseName;
runTime.caseName() = proc0CaseName;
}
Info<< "From time " << runTime.timeName()
@ -932,7 +930,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
if (Pstream::master() && decompose)
{
runTime.TimePaths::caseName() = baseRunTime.caseName();
runTime.caseName() = baseRunTime.caseName();
}
readFields
(
@ -1112,7 +1110,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
if (Pstream::master() && decompose)
{
runTime.TimePaths::caseName() = proc0CaseName;
runTime.caseName() = proc0CaseName;
}
}
@ -1192,7 +1190,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
{
Info<< "Setting caseName to " << baseRunTime.caseName()
<< " to write reconstructed mesh and fields." << endl;
runTime.TimePaths::caseName() = baseRunTime.caseName();
runTime.caseName() = baseRunTime.caseName();
mesh.write();
topoSet::removeFiles(mesh);
@ -1212,7 +1210,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
// Now we've written all. Reset caseName on master
Info<< "Restoring caseName to " << proc0CaseName << endl;
runTime.TimePaths::caseName() = proc0CaseName;
runTime.caseName() = proc0CaseName;
}
}
else
@ -1258,7 +1256,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
// Read refinement data
if (Pstream::master() && decompose)
{
runTime.TimePaths::caseName() = baseRunTime.caseName();
runTime.caseName() = baseRunTime.caseName();
}
IOobject io
(
@ -1274,7 +1272,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
hexRef8Data refData(io);
if (Pstream::master() && decompose)
{
runTime.TimePaths::caseName() = proc0CaseName;
runTime.caseName() = proc0CaseName;
}
// Make sure all processors have valid data (since only some will
@ -1294,13 +1292,13 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
{
Info<< "Setting caseName to " << baseRunTime.caseName()
<< " to write reconstructed refinement data." << endl;
runTime.TimePaths::caseName() = baseRunTime.caseName();
runTime.caseName() = baseRunTime.caseName();
refData.write();
// Now we've written all. Reset caseName on master
Info<< "Restoring caseName to " << proc0CaseName << endl;
runTime.TimePaths::caseName() = proc0CaseName;
runTime.caseName() = proc0CaseName;
}
}
else
@ -1314,7 +1312,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
// // Read sets
// if (Pstream::master() && decompose)
// {
// runTime.TimePaths::caseName() = baseRunTime.caseName();
// runTime.caseName() = baseRunTime.caseName();
// }
// IOobjectList objects(mesh, mesh.facesInstance(), "polyMesh/sets");
//
@ -1323,7 +1321,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
//
// if (Pstream::master() && decompose)
// {
// runTime.TimePaths::caseName() = proc0CaseName;
// runTime.caseName() = proc0CaseName;
// }
//
// forAll(cellSets, i)
@ -1337,7 +1335,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
// {
// Info<< "Setting caseName to " << baseRunTime.caseName()
// << " to write reconstructed refinement data." << endl;
// runTime.TimePaths::caseName() = baseRunTime.caseName();
// runTime.caseName() = baseRunTime.caseName();
//
// forAll(cellSets, i)
// {
@ -1346,7 +1344,7 @@ autoPtr<mapDistributePolyMesh> redistributeAndWrite
//
// // Now we've written all. Reset caseName on master
// Info<< "Restoring caseName to " << proc0CaseName << endl;
// runTime.TimePaths::caseName() = proc0CaseName;
// runTime.caseName() = proc0CaseName;
// }
// }
// else
@ -2931,7 +2929,7 @@ int main(int argc, char *argv[])
{
Info<< "Setting caseName to " << baseRunTime.caseName()
<< " to find undecomposed mesh" << endl;
runTime.TimePaths::caseName() = baseRunTime.caseName();
runTime.caseName() = baseRunTime.caseName();
}
masterInstDir = runTime.findInstance
@ -2944,7 +2942,7 @@ int main(int argc, char *argv[])
if (decompose)
{
Info<< "Restoring caseName to " << proc0CaseName << endl;
runTime.TimePaths::caseName() = proc0CaseName;
runTime.caseName() = proc0CaseName;
}
}
Pstream::scatter(masterInstDir);
@ -2970,7 +2968,7 @@ int main(int argc, char *argv[])
{
Info<< "Setting caseName to " << baseRunTime.caseName()
<< " to read undecomposed mesh" << endl;
runTime.TimePaths::caseName() = baseRunTime.caseName();
runTime.caseName() = baseRunTime.caseName();
}
autoPtr<fvMesh> meshPtr = loadOrCreateMesh
@ -2987,7 +2985,7 @@ int main(int argc, char *argv[])
if (Pstream::master() && decompose)
{
Info<< "Restoring caseName to " << proc0CaseName << endl;
runTime.TimePaths::caseName() = proc0CaseName;
runTime.caseName() = proc0CaseName;
}
fvMesh& mesh = meshPtr();
@ -3046,7 +3044,7 @@ int main(int argc, char *argv[])
// Detect lagrangian fields
if (Pstream::master() && decompose)
{
runTime.TimePaths::caseName() = baseRunTime.caseName();
runTime.caseName() = baseRunTime.caseName();
}
parLagrangianRedistributor::findClouds
(
@ -3069,7 +3067,7 @@ int main(int argc, char *argv[])
);
if (Pstream::master() && decompose)
{
runTime.TimePaths::caseName() = proc0CaseName;
runTime.caseName() = proc0CaseName;
}

View File

@ -70,8 +70,8 @@ Foam::TimePaths::TimePaths
)
:
processorCase_(args.parRunControl().parRun()),
distributed_(args.parRunControl().distributed()),
rootPath_(args.rootPath()),
distributed_(args.distributed()),
globalCaseName_(args.globalCaseName()),
case_(args.caseName()),
system_(systemName),
@ -93,8 +93,8 @@ Foam::TimePaths::TimePaths
)
:
processorCase_(false),
rootPath_(rootPath),
distributed_(false),
rootPath_(rootPath),
globalCaseName_(caseName),
case_(caseName),
system_(systemName),
@ -117,8 +117,8 @@ Foam::TimePaths::TimePaths
)
:
processorCase_(processorCase),
rootPath_(rootPath),
distributed_(distributed),
rootPath_(rootPath),
globalCaseName_(globalCaseName),
case_(caseName),
system_(systemName),

View File

@ -56,8 +56,9 @@ class TimePaths
// Private data
bool processorCase_;
const fileName rootPath_;
bool distributed_;
const fileName rootPath_;
fileName globalCaseName_;
fileName case_;
const word system_;

View File

@ -774,8 +774,7 @@ Foam::argList::argList
)
:
args_(argc),
options_(argc),
distributed_(false)
options_(argc)
{
// Check for -fileHandler, which requires an argument.
word handlerType(getEnv("FOAM_FILEHANDLER"));
@ -1150,7 +1149,7 @@ void Foam::argList::parse
label dictNProcs = -1;
if (this->readListIfPresent("roots", roots))
{
distributed_ = true;
parRunControl_.distributed(true);
source = "-roots";
if (roots.size() != 1)
{
@ -1222,7 +1221,7 @@ void Foam::argList::parse
if (decompDict.lookupOrDefault("distributed", false))
{
distributed_ = true;
parRunControl_.distributed(true);
decompDict.readEntry("roots", roots);
}
}
@ -1350,12 +1349,16 @@ void Foam::argList::parse
else
{
// Collect the master's argument list
label nroots;
IPstream fromMaster
(
Pstream::commsTypes::scheduled,
Pstream::masterNo()
);
fromMaster >> args_ >> options_ >> distributed_;
fromMaster >> args_ >> options_ >> nroots;
parRunControl_.distributed(nroots);
// Establish rootPath_/globalCase_/case_ for slave
setCasePaths();

View File

@ -129,7 +129,7 @@ class argList
static bool checkProcessorDirectories_;
//- Switch on/off parallel mode.
// Must be first to be constructed so destructor is done last.
// Construct first so destructor is done last.
ParRunControl parRunControl_;
//- The arguments after removing known options
@ -140,7 +140,6 @@ class argList
word executable_;
fileName rootPath_;
bool distributed_;
fileName globalCase_;
fileName case_;
@ -288,19 +287,12 @@ public:
//- Return root path
inline const fileName& rootPath() const;
//- Return distributed flag
//- (i.e. are rootPaths different on different machines)
inline bool distributed() const;
//- Return case name (parallel run) or global case (serial run)
inline const fileName& caseName() const;
//- Return global case name
inline const fileName& globalCaseName() const;
//- Return parRunControl
inline const ParRunControl& parRunControl() const;
//- Return the full path to the (processor local) case
// \note This is guaranteed to be an absolute path
inline fileName path() const;
@ -309,6 +301,13 @@ public:
// \note This is guaranteed to be an absolute path
inline fileName globalPath() const;
//- Return distributed flag
//- (i.e. are rootPaths different on different machines)
inline bool distributed() const;
//- Return the ParRunControl
inline const ParRunControl& parRunControl() const;
//- Return the number of arguments
inline label size() const;

View File

@ -63,12 +63,6 @@ inline const Foam::fileName& Foam::argList::rootPath() const
}
inline bool Foam::argList::distributed() const
{
return distributed_;
}
inline const Foam::fileName& Foam::argList::caseName() const
{
return case_;
@ -81,12 +75,6 @@ inline const Foam::fileName& Foam::argList::globalCaseName() const
}
inline const Foam::ParRunControl& Foam::argList::parRunControl() const
{
return parRunControl_;
}
inline Foam::fileName Foam::argList::path() const
{
return rootPath()/caseName();
@ -99,6 +87,18 @@ inline Foam::fileName Foam::argList::globalPath() const
}
inline bool Foam::argList::distributed() const
{
return parRunControl_.distributed();
}
inline const Foam::ParRunControl& Foam::argList::parRunControl() const
{
return parRunControl_;
}
inline Foam::label Foam::argList::size() const
{
return args_.size();

View File

@ -3,7 +3,7 @@
\\ / F ield | OpenFOAM: The Open Source CFD Toolbox
\\ / O peration |
\\ / A nd | Copyright (C) 2011-2018 OpenFOAM Foundation
\\/ M anipulation |
\\/ M anipulation | Copyright (C) 2018 OpenCFD Ltd.
-------------------------------------------------------------------------------
License
This file is part of OpenFOAM.
@ -49,18 +49,22 @@ namespace Foam
class ParRunControl
{
bool RunPar;
bool parallel_;
bool distributed_;
public:
//- Construct null
ParRunControl()
:
RunPar(false)
parallel_(false),
distributed_(false)
{}
//- Destructor, triggers Pstream::exit
~ParRunControl()
{
if (RunPar)
if (parallel_)
{
Info<< "Finalising parallel run" << endl;
}
@ -69,10 +73,11 @@ public:
Pstream::exit(0);
}
//- Initialize Pstream for a parallel run
void runPar(int& argc, char**& argv, const bool needsThread)
void runPar(int& argc, char**& argv, bool needsThread)
{
RunPar = true;
parallel_ = true;
if (!Pstream::init(argc, argv, needsThread))
{
@ -81,11 +86,22 @@ public:
}
}
//- Is this a parallel run?
//- True if this is parallel run.
bool parRun() const
{
return RunPar;
return parallel_;
}
//- True if this is a parallel run and uses distributed roots.
bool distributed() const
{
return parallel_ && distributed_;
}
//- Set use of distributed roots.
void distributed(bool on)
{
distributed_ = (parallel_ ? on : false);
}
};