From fc11e7c9494cdd2b1b229db190580cb41a85d290 Mon Sep 17 00:00:00 2001
From: Sebastian Eibl <sebastian.eibl@fau.de>
Date: Wed, 28 Mar 2018 14:53:16 +0200
Subject: [PATCH] [BUGFIX] cannot call MPI_Rank with MPI_COMM_NULL

was only a debug check
---
 src/blockforest/loadbalancing/DynamicParMetis.cpp | 15 ---------------
 1 file changed, 15 deletions(-)

diff --git a/src/blockforest/loadbalancing/DynamicParMetis.cpp b/src/blockforest/loadbalancing/DynamicParMetis.cpp
index 0ec01197d..0e215b6f9 100644
--- a/src/blockforest/loadbalancing/DynamicParMetis.cpp
+++ b/src/blockforest/loadbalancing/DynamicParMetis.cpp
@@ -136,21 +136,6 @@ bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *,
    MPI_Group_incl(allGroup, int_c(ranks.size()), &ranks[0], &subGroup);
    MPI_Comm_create( MPIManager::instance()->comm(), subGroup, &subComm);
 
-   if ( targetProcess.size() != 0)
-   {
-      int subRank;
-      int subSize;
-      MPI_Comm_rank(subComm, &subRank);
-      MPI_Comm_size(subComm, &subSize);
-      WALBERLA_CHECK_GREATER_EQUAL(subRank, 0);
-      WALBERLA_CHECK_LESS(subRank, subSize);
-   } else
-   {
-      int subRank;
-      MPI_Comm_rank(subComm, &subRank);
-      WALBERLA_CHECK_EQUAL(subRank, MPI_UNDEFINED);
-   }
-
    int64_t edgecut = 0;
    WALBERLA_CHECK_EQUAL( phantomForest.getNumberOfBlocks(), targetProcess.size() );
    std::vector<int64_t> part( targetProcess.size(), int64_c( MPIManager::instance()->rank() ) );
-- 
GitLab