Skip to content
Snippets Groups Projects
Commit d8fe0a2e authored by Sebastian Eibl's avatar Sebastian Eibl
Browse files

[BUGFIX] prevented communication to non existent ranks

parent 6c514a80
Branches
Tags
No related merge requests found
......@@ -30,6 +30,7 @@
#include "core/mpi/MPIManager.h"
#include "core/mpi/Gather.h"
#include "core/mpi/Gatherv.h"
#include "core/mpi/Reduce.h"
#include "core/timing/Timer.h"
......@@ -42,6 +43,12 @@ namespace blockforest {
std::pair<uint_t, uint_t> getBlockSequenceRange( const PhantomBlockForest & phantomForest, MPI_Comm comm )
{
const uint_t rank = uint_c(mpi::translateRank(mpi::MPIManager::instance()->comm(), comm, MPIManager::instance()->rank()));
WALBERLA_DEBUG_SECTION()
{
int rankRaw;
MPI_Comm_rank(comm, &rankRaw);
WALBERLA_ASSERT_EQUAL(rank, rankRaw);
}
uint_t numLocalBlocks = phantomForest.getNumberOfBlocks();
......@@ -68,7 +75,11 @@ std::map< blockforest::BlockID, uint_t > getBlockIdToSequenceMapping( const Phan
mpi::BufferSystem bs( comm );
for( auto it = neighborProcesses.begin(); it != neighborProcesses.end(); ++it )
bs.sendBuffer( mpi::translateRank(mpi::MPIManager::instance()->comm(), comm, int_c(*it)) ) << mapping;
{
auto destRank = mpi::translateRank(mpi::MPIManager::instance()->comm(), comm, int_c(*it));
if (destRank != -1)
bs.sendBuffer( destRank ) << mapping;
}
bs.setReceiverInfoFromSendBufferState( false, true );
......@@ -101,6 +112,8 @@ T * ptr( std::vector<T> & v )
typedef uint_t idx_t;
bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *, uint_t > > & targetProcess,
std::set< uint_t > & processesToRecvFrom,
const PhantomBlockForest & phantomForest,
......@@ -177,14 +190,15 @@ bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *,
WALBERLA_ASSERT_EQUAL( vwgt.size(), phantomForest.getNumberOfBlocks() );
WALBERLA_ASSERT_EQUAL( vsize.size(), phantomForest.getNumberOfBlocks() );
WALBERLA_ASSERT_EQUAL( adjncy.size(), adjwgt.size() );
WALBERLA_ASSERT_EQUAL( adjwgt.size(), xadj.back() );
int64_t wgtflag = weightsToUse_;
int64_t numflag = 0; // C-style ordering
int64_t ncon = 1; // Number of constraints
int64_t ndims = 3; // Number of dimensions
double ubvec[] = { real_t( 1.05 ) }; // imbalance tolerance
double ubvec[] = { real_t( 1.05 ) }; // imbalance tolerance
int64_t nparts = int64_c( MPIManager::instance()->numProcesses() ); // number of subdomains
double ipc2redist = real_t( 1000000.0 ); // compute repartitioning with low edge cut (set lower (down to 0.000001) to get minimal repartitioning )
auto ipc2redist = ipc2redist_;
MPI_Comm comm = subComm; //MPIManager::instance()->comm();
std::vector<double> tpwgts( uint_c(nparts * ncon), 1.0 / double_c( nparts ) ); // vertex weight fraction that is stored in a subdomain
int64_t options[] = { int64_t( 1 ), int64_t( 0 ), int64_t( 23 ), int64_t( 1 ) };
......
......@@ -54,6 +54,10 @@ public:
const PhantomBlockForest & phantomForest,
const uint_t iteration ) const;
void setipc2redist(double val) {ipc2redist_ = val;}
double getipc2redist() const {return ipc2redist_;}
bool edgeWeightsUsed() const { return ( weightsToUse_ == PARMETIS_EDGE_WEIGHTS ) || ( weightsToUse_ == PARMETIS_BOTH_WEIGHTS ); }
bool vertexWeightsUsed() const { return ( weightsToUse_ == PARMETIS_VERTEX_WEIGHTS ) || ( weightsToUse_ == PARMETIS_BOTH_WEIGHTS ); }
bool vertexSizeUsed() const { return algorithm_ == PARMETIS_ADAPTIVE_REPART; }
......@@ -70,6 +74,8 @@ protected:
Algorithm algorithm_;
WeightsToUse weightsToUse_;
EdgeSource edgeSource_;
double ipc2redist_ = real_t( 1000.0 ); ///< compute repartitioning with low edge cut (set lower (down to 0.000001) to get minimal repartitioning )
};
class DynamicParMetisBlockInfo
......
......@@ -20,9 +20,18 @@
#include "MPIHelper.h"
#include <core/debug/CheckFunctions.h>
namespace walberla {
namespace mpi {
//!
//! \brief This functions maps the rank in one communicator to the rank in another communicator.
//! \param srcComm source communicator
//! \param destComm destination communicator
//! \param srcRank rank in the source communicator
//! \return rank in the destination communicator or -1 if not available
//!
int translateRank(const MPI_Comm srcComm, const MPI_Comm destComm, const int srcRank)
{
int destRank = -1;
......@@ -30,11 +39,25 @@ int translateRank(const MPI_Comm srcComm, const MPI_Comm destComm, const int src
MPI_Comm_group(srcComm, &srcGroup);
MPI_Comm_group(destComm, &destGroup);
MPI_Group_translate_ranks(srcGroup, 1, const_cast<int*>(&srcRank), destGroup, &destRank);
int size;
MPI_Comm_size(destComm, &size);
if (destRank < 0 || destRank >= size)
{
WALBERLA_CHECK_EQUAL( destRank, MPI_UNDEFINED );
destRank = -1;
}
MPI_Group_free(&srcGroup);
MPI_Group_free(&destGroup);
return destRank;
}
//!
//! \brief This functions converts a array of ranks in one communicator to an array of ranks in another communicator.
//! \param srcComm source communicator
//! \param destComm destination communicator
//! \param srcRank source ranks
//! \return converted ranks, -1 if not available
//!
std::vector<int> translateRank(const MPI_Comm srcComm, const MPI_Comm destComm, const std::vector<int>& srcRank)
{
std::vector<int> destRank(srcRank.size(), -1);
......@@ -42,6 +65,16 @@ std::vector<int> translateRank(const MPI_Comm srcComm, const MPI_Comm destComm,
MPI_Comm_group(srcComm, &srcGroup);
MPI_Comm_group(destComm, &destGroup);
MPI_Group_translate_ranks(srcGroup, int_c(srcRank.size()), const_cast<int*>(&srcRank[0]), destGroup, &destRank[0]);
int size;
MPI_Comm_size(destComm, &size);
for (auto& dstRnk : destRank)
{
if (dstRnk < 0 || dstRnk >= size)
{
WALBERLA_CHECK_EQUAL( dstRnk, MPI_UNDEFINED );
dstRnk = -1;
}
}
MPI_Group_free(&srcGroup);
MPI_Group_free(&destGroup);
return destRank;
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment