From bbb9ec41c04af5614ca72b4d241ea4ef14ac18ff Mon Sep 17 00:00:00 2001 From: Markus Holzer <markus.holzer@fau.de> Date: Wed, 8 Mar 2023 08:19:39 +0100 Subject: [PATCH] Always create process map when creating a blockforest --- src/blockforest/Initialization.cpp | 27 ++++++++++++++++----------- src/core/mpi/MPIManager.h | 20 +++++++++----------- 2 files changed, 25 insertions(+), 22 deletions(-) diff --git a/src/blockforest/Initialization.cpp b/src/blockforest/Initialization.cpp index 288a04a49..b91923eeb 100644 --- a/src/blockforest/Initialization.cpp +++ b/src/blockforest/Initialization.cpp @@ -50,7 +50,7 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const shar CellInterval * requestedDomainSize, const bool keepGlobalBlockInformation ) { - if( !!config ) + if( config != nullptr ) { auto block = config->getGlobalBlock(); if( block ) { @@ -200,17 +200,17 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const Conf //********************************************************************************************************************** shared_ptr< BlockForest > -createBlockForest( const AABB& domainAABB, - const uint_t numberOfXBlocks, const uint_t numberOfYBlocks, const uint_t numberOfZBlocks, - const uint_t numberOfXProcesses, const uint_t numberOfYProcesses, const uint_t numberOfZProcesses, - const bool xPeriodic /* = false */, const bool yPeriodic /* = false */, const bool zPeriodic /* = false */, - const bool keepGlobalBlockInformation /* = false */ ) { + createBlockForest( const AABB& domainAABB, + const uint_t numberOfXBlocks, const uint_t numberOfYBlocks, const uint_t numberOfZBlocks, + const uint_t numberOfXProcesses, const uint_t numberOfYProcesses, const uint_t numberOfZProcesses, + const bool xPeriodic /* = false */, const bool yPeriodic /* = false */, const bool zPeriodic /* = false */, + const bool keepGlobalBlockInformation /* = false */ ) { const uint_t numberOfProcesses = numberOfXProcesses * numberOfYProcesses * numberOfZProcesses; if( numeric_cast< int >( numberOfProcesses ) != MPIManager::instance()->numProcesses() ) WALBERLA_ABORT( "The number of requested processes (" << numberOfProcesses << ") doesn't match the number " - "of active MPI processes (" << MPIManager::instance()->numProcesses() << ")!" ); + "of active MPI processes (" << MPIManager::instance()->numProcesses() << ")!" ); // initialize SetupBlockForest = determine domain decomposition @@ -227,10 +227,14 @@ createBlockForest( const AABB& domainAABB, WALBERLA_MPI_SECTION() { auto mpiManager = MPIManager::instance(); - //create cartesian communicator only if not yet a cartesian communicator (or other communicator was created) - if ( ! mpiManager->rankValid() ) + if (!mpiManager->hasWorldCommSetup()) { - mpiManager->createCartesianComm( numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, xPeriodic, yPeriodic, zPeriodic ); + //create cartesian communicator only if not yet a cartesian communicator (or other communicator was created) + if ( ! mpiManager->rankValid() ) + { + mpiManager->createCartesianComm(numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, xPeriodic, + yPeriodic, zPeriodic); + } processIdMap.resize( numberOfProcesses ); @@ -244,12 +248,13 @@ createBlockForest( const AABB& domainAABB, } } } + } // calculate process distribution sforest.balanceLoad( blockforest::CartesianDistribution( numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, &processIdMap ), - numberOfXProcesses * numberOfYProcesses * numberOfZProcesses ); + numberOfXProcesses * numberOfYProcesses * numberOfZProcesses ); // create StructuredBlockForest (encapsulates a newly created BlockForest) diff --git a/src/core/mpi/MPIManager.h b/src/core/mpi/MPIManager.h index cef90cd62..9ba3fb4d0 100644 --- a/src/core/mpi/MPIManager.h +++ b/src/core/mpi/MPIManager.h @@ -122,6 +122,7 @@ public: bool hasCartesianSetup() const { return cartesianSetup_; } /// Rank is valid after calling createCartesianComm() or useWorldComm() bool rankValid() const { return rank_ >= 0; } + bool hasWorldCommSetup() const { return rankValid() && !hasCartesianSetup();} /// Indicates whether MPI-IO can be used with the current MPI communicator; certain versions of OpenMPI produce /// segmentation faults when using MPI-IO with a 3D Cartesian MPI communicator (see waLBerla issue #73) @@ -135,13 +136,13 @@ public: private: /// Rank in MPI_COMM_WORLD - int worldRank_; + int worldRank_{0}; /// Rank in the custom communicator - int rank_; + int rank_{-1}; /// Total number of processes - int numProcesses_; + int numProcesses_{1}; /// Use this communicator for all MPI calls /// this is in general not equal to MPI_COMM_WORLD @@ -150,20 +151,17 @@ private: MPI_Comm comm_; /// Indicates whether initializeMPI has been called. If true, MPI_Finalize is called upon destruction - bool isMPIInitialized_; + bool isMPIInitialized_{false}; /// Indicates whether a Cartesian communicator has been created - bool cartesianSetup_; + bool cartesianSetup_{false}; - bool currentlyAborting_; + bool currentlyAborting_{false}; - bool finalizeOnDestruction_; + bool finalizeOnDestruction_{false}; // Singleton - MPIManager() : worldRank_(0), rank_(-1), numProcesses_(1), comm_(MPI_COMM_NULL), - isMPIInitialized_(false), cartesianSetup_(false), currentlyAborting_(false), - finalizeOnDestruction_(false) - { WALBERLA_NON_MPI_SECTION() { rank_ = 0; } } + MPIManager() : comm_(MPI_COMM_NULL) { WALBERLA_NON_MPI_SECTION() { rank_ = 0; } } }; // class MPIManager -- GitLab