diff --git a/src/blockforest/Initialization.cpp b/src/blockforest/Initialization.cpp
index 288a04a499575ff5d35417a0c77ff2aebdf91f59..b91923eebd5b53156fbe3e916c8aeb84963a283b 100644
--- a/src/blockforest/Initialization.cpp
+++ b/src/blockforest/Initialization.cpp
@@ -50,7 +50,7 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const shar
                                                                       CellInterval * requestedDomainSize,
                                                                       const bool keepGlobalBlockInformation )
 {
-   if( !!config )
+   if( config != nullptr )
    {
       auto block = config->getGlobalBlock();
       if( block ) {
@@ -200,17 +200,17 @@ shared_ptr< StructuredBlockForest > createUniformBlockGridFromConfig( const Conf
 //**********************************************************************************************************************
 
 shared_ptr< BlockForest >
-createBlockForest(      const AABB& domainAABB,
-                        const uint_t numberOfXBlocks,         const uint_t numberOfYBlocks,         const uint_t numberOfZBlocks,
-                        const uint_t numberOfXProcesses,      const uint_t numberOfYProcesses,      const uint_t numberOfZProcesses,
-                        const bool   xPeriodic /* = false */, const bool   yPeriodic /* = false */, const bool   zPeriodic /* = false */,
-                        const bool keepGlobalBlockInformation /* = false */ ) {
+   createBlockForest(      const AABB& domainAABB,
+                     const uint_t numberOfXBlocks,         const uint_t numberOfYBlocks,         const uint_t numberOfZBlocks,
+                     const uint_t numberOfXProcesses,      const uint_t numberOfYProcesses,      const uint_t numberOfZProcesses,
+                     const bool   xPeriodic /* = false */, const bool   yPeriodic /* = false */, const bool   zPeriodic /* = false */,
+                     const bool keepGlobalBlockInformation /* = false */ ) {
 
    const uint_t numberOfProcesses = numberOfXProcesses * numberOfYProcesses * numberOfZProcesses;
 
    if( numeric_cast< int >( numberOfProcesses ) != MPIManager::instance()->numProcesses() )
       WALBERLA_ABORT( "The number of requested processes (" << numberOfProcesses << ") doesn't match the number "
-                                                                                    "of active MPI processes (" << MPIManager::instance()->numProcesses() << ")!" );
+                                                                                   "of active MPI processes (" << MPIManager::instance()->numProcesses() << ")!" );
 
    // initialize SetupBlockForest = determine domain decomposition
 
@@ -227,10 +227,14 @@ createBlockForest(      const AABB& domainAABB,
    WALBERLA_MPI_SECTION()
    {
       auto mpiManager = MPIManager::instance();
-      //create cartesian communicator only if not yet a cartesian communicator (or other communicator was created)
-      if ( ! mpiManager->rankValid() )
+      if (!mpiManager->hasWorldCommSetup())
       {
-         mpiManager->createCartesianComm( numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, xPeriodic, yPeriodic, zPeriodic );
+         //create cartesian communicator only if not yet a cartesian communicator (or other communicator was created)
+         if ( ! mpiManager->rankValid() )
+         {
+            mpiManager->createCartesianComm(numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, xPeriodic,
+                                            yPeriodic, zPeriodic);
+         }
 
          processIdMap.resize( numberOfProcesses );
 
@@ -244,12 +248,13 @@ createBlockForest(      const AABB& domainAABB,
             }
          }
       }
+
    }
 
    // calculate process distribution
 
    sforest.balanceLoad( blockforest::CartesianDistribution( numberOfXProcesses, numberOfYProcesses, numberOfZProcesses, &processIdMap ),
-                        numberOfXProcesses * numberOfYProcesses * numberOfZProcesses );
+                       numberOfXProcesses * numberOfYProcesses * numberOfZProcesses );
 
    // create StructuredBlockForest (encapsulates a newly created BlockForest)
 
diff --git a/src/core/mpi/MPIManager.h b/src/core/mpi/MPIManager.h
index cef90cd62eeffa5e898f007a88fbf829280992cb..9ba3fb4d04b8f6b0c7f1e2041454677b9156bf75 100644
--- a/src/core/mpi/MPIManager.h
+++ b/src/core/mpi/MPIManager.h
@@ -122,6 +122,7 @@ public:
    bool hasCartesianSetup() const { return cartesianSetup_;  }
    /// Rank is valid after calling createCartesianComm() or useWorldComm()
    bool rankValid()         const { return rank_ >= 0;       }
+   bool hasWorldCommSetup()  const { return rankValid() && !hasCartesianSetup();}
 
    /// Indicates whether MPI-IO can be used with the current MPI communicator; certain versions of OpenMPI produce
    /// segmentation faults when using MPI-IO with a 3D Cartesian MPI communicator (see waLBerla issue #73)
@@ -135,13 +136,13 @@ public:
 private:
 
    /// Rank in MPI_COMM_WORLD
-   int worldRank_;
+   int worldRank_{0};
 
    /// Rank in the custom communicator
-   int rank_;
+   int rank_{-1};
 
    /// Total number of processes
-   int numProcesses_;
+   int numProcesses_{1};
 
    /// Use this communicator for all MPI calls
    /// this is in general not equal to MPI_COMM_WORLD
@@ -150,20 +151,17 @@ private:
    MPI_Comm comm_;
 
    /// Indicates whether initializeMPI has been called. If true, MPI_Finalize is called upon destruction
-   bool isMPIInitialized_;
+   bool isMPIInitialized_{false};
 
    /// Indicates whether a Cartesian communicator has been created
-   bool cartesianSetup_;
+   bool cartesianSetup_{false};
 
-   bool currentlyAborting_;
+   bool currentlyAborting_{false};
 
-   bool finalizeOnDestruction_;
+   bool finalizeOnDestruction_{false};
 
    // Singleton
-   MPIManager() : worldRank_(0), rank_(-1), numProcesses_(1), comm_(MPI_COMM_NULL),
-                  isMPIInitialized_(false), cartesianSetup_(false), currentlyAborting_(false),
-                  finalizeOnDestruction_(false)
-   { WALBERLA_NON_MPI_SECTION() { rank_ = 0; } }
+   MPIManager() : comm_(MPI_COMM_NULL) { WALBERLA_NON_MPI_SECTION() { rank_ = 0; } }
 
 }; // class MPIManager