Skip to content
Snippets Groups Projects
Commit bbb9ec41 authored by Markus Holzer's avatar Markus Holzer Committed by Christoph Schwarzmeier
Browse files

Always create process map when creating a blockforest

parent f8d0e35d
Branches
No related merge requests found
...@@ -50,7 +50,7 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const shar ...@@ -50,7 +50,7 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const shar
CellInterval * requestedDomainSize, CellInterval * requestedDomainSize,
const bool keepGlobalBlockInformation ) const bool keepGlobalBlockInformation )
{ {
if( !!config ) if( config != nullptr )
{ {
auto block = config->getGlobalBlock(); auto block = config->getGlobalBlock();
if( block ) { if( block ) {
...@@ -200,17 +200,17 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const Conf ...@@ -200,17 +200,17 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const Conf
//********************************************************************************************************************** //**********************************************************************************************************************
shared_ptr< BlockForest > shared_ptr< BlockForest >
createBlockForest( const AABB& domainAABB, createBlockForest( const AABB& domainAABB,
const uint_t numberOfXBlocks, const uint_t numberOfYBlocks, const uint_t numberOfZBlocks, const uint_t numberOfXBlocks, const uint_t numberOfYBlocks, const uint_t numberOfZBlocks,
const uint_t numberOfXProcesses, const uint_t numberOfYProcesses, const uint_t numberOfZProcesses, const uint_t numberOfXProcesses, const uint_t numberOfYProcesses, const uint_t numberOfZProcesses,
const bool xPeriodic /* = false */, const bool yPeriodic /* = false */, const bool zPeriodic /* = false */, const bool xPeriodic /* = false */, const bool yPeriodic /* = false */, const bool zPeriodic /* = false */,
const bool keepGlobalBlockInformation /* = false */ ) { const bool keepGlobalBlockInformation /* = false */ ) {
const uint_t numberOfProcesses = numberOfXProcesses * numberOfYProcesses * numberOfZProcesses; const uint_t numberOfProcesses = numberOfXProcesses * numberOfYProcesses * numberOfZProcesses;
if( numeric_cast< int >( numberOfProcesses ) != MPIManager::instance()->numProcesses() ) if( numeric_cast< int >( numberOfProcesses ) != MPIManager::instance()->numProcesses() )
WALBERLA_ABORT( "The number of requested processes (" << numberOfProcesses << ") doesn't match the number " WALBERLA_ABORT( "The number of requested processes (" << numberOfProcesses << ") doesn't match the number "
"of active MPI processes (" << MPIManager::instance()->numProcesses() << ")!" ); "of active MPI processes (" << MPIManager::instance()->numProcesses() << ")!" );
// initialize SetupBlockForest = determine domain decomposition // initialize SetupBlockForest = determine domain decomposition
...@@ -227,10 +227,14 @@ createBlockForest( const AABB& domainAABB, ...@@ -227,10 +227,14 @@ createBlockForest( const AABB& domainAABB,
WALBERLA_MPI_SECTION() WALBERLA_MPI_SECTION()
{ {
auto mpiManager = MPIManager::instance(); auto mpiManager = MPIManager::instance();
//create cartesian communicator only if not yet a cartesian communicator (or other communicator was created) if (!mpiManager->hasWorldCommSetup())
if ( ! mpiManager->rankValid() )
{ {
mpiManager->createCartesianComm( numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, xPeriodic, yPeriodic, zPeriodic ); //create cartesian communicator only if not yet a cartesian communicator (or other communicator was created)
if ( ! mpiManager->rankValid() )
{
mpiManager->createCartesianComm(numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, xPeriodic,
yPeriodic, zPeriodic);
}
processIdMap.resize( numberOfProcesses ); processIdMap.resize( numberOfProcesses );
...@@ -244,12 +248,13 @@ createBlockForest( const AABB& domainAABB, ...@@ -244,12 +248,13 @@ createBlockForest( const AABB& domainAABB,
} }
} }
} }
} }
// calculate process distribution // calculate process distribution
sforest.balanceLoad( blockforest::CartesianDistribution( numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, &processIdMap ), sforest.balanceLoad( blockforest::CartesianDistribution( numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, &processIdMap ),
numberOfXProcesses * numberOfYProcesses * numberOfZProcesses ); numberOfXProcesses * numberOfYProcesses * numberOfZProcesses );
// create StructuredBlockForest (encapsulates a newly created BlockForest) // create StructuredBlockForest (encapsulates a newly created BlockForest)
......
...@@ -122,6 +122,7 @@ public: ...@@ -122,6 +122,7 @@ public:
bool hasCartesianSetup() const { return cartesianSetup_; } bool hasCartesianSetup() const { return cartesianSetup_; }
/// Rank is valid after calling createCartesianComm() or useWorldComm() /// Rank is valid after calling createCartesianComm() or useWorldComm()
bool rankValid() const { return rank_ >= 0; } bool rankValid() const { return rank_ >= 0; }
bool hasWorldCommSetup() const { return rankValid() && !hasCartesianSetup();}
/// Indicates whether MPI-IO can be used with the current MPI communicator; certain versions of OpenMPI produce /// Indicates whether MPI-IO can be used with the current MPI communicator; certain versions of OpenMPI produce
/// segmentation faults when using MPI-IO with a 3D Cartesian MPI communicator (see waLBerla issue #73) /// segmentation faults when using MPI-IO with a 3D Cartesian MPI communicator (see waLBerla issue #73)
...@@ -135,13 +136,13 @@ public: ...@@ -135,13 +136,13 @@ public:
private: private:
/// Rank in MPI_COMM_WORLD /// Rank in MPI_COMM_WORLD
int worldRank_; int worldRank_{0};
/// Rank in the custom communicator /// Rank in the custom communicator
int rank_; int rank_{-1};
/// Total number of processes /// Total number of processes
int numProcesses_; int numProcesses_{1};
/// Use this communicator for all MPI calls /// Use this communicator for all MPI calls
/// this is in general not equal to MPI_COMM_WORLD /// this is in general not equal to MPI_COMM_WORLD
...@@ -150,20 +151,17 @@ private: ...@@ -150,20 +151,17 @@ private:
MPI_Comm comm_; MPI_Comm comm_;
/// Indicates whether initializeMPI has been called. If true, MPI_Finalize is called upon destruction /// Indicates whether initializeMPI has been called. If true, MPI_Finalize is called upon destruction
bool isMPIInitialized_; bool isMPIInitialized_{false};
/// Indicates whether a Cartesian communicator has been created /// Indicates whether a Cartesian communicator has been created
bool cartesianSetup_; bool cartesianSetup_{false};
bool currentlyAborting_; bool currentlyAborting_{false};
bool finalizeOnDestruction_; bool finalizeOnDestruction_{false};
// Singleton // Singleton
MPIManager() : worldRank_(0), rank_(-1), numProcesses_(1), comm_(MPI_COMM_NULL), MPIManager() : comm_(MPI_COMM_NULL) { WALBERLA_NON_MPI_SECTION() { rank_ = 0; } }
isMPIInitialized_(false), cartesianSetup_(false), currentlyAborting_(false),
finalizeOnDestruction_(false)
{ WALBERLA_NON_MPI_SECTION() { rank_ = 0; } }
}; // class MPIManager }; // class MPIManager
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment