Skip to content
Snippets Groups Projects
Commit baf667d3 authored by Christoph Rettinger's avatar Christoph Rettinger
Browse files

Merge branch 'amr_and_lb_utils' into 'master'

Utilities and functionalities for AMR and LB

See merge request walberla/walberla!116
parents 2d079319 38ecfa2c
Branches
No related merge requests found
......@@ -22,6 +22,7 @@
#pragma once
#include "SetupBlockForest.h"
#include "BlockForest.h"
#include "core/config/Config.h"
#include "core/math/AABB.h"
......@@ -39,6 +40,8 @@ class AABBRefinementSelection
{
public:
AABBRefinementSelection(){}
AABBRefinementSelection( const Config::BlockHandle & configBlock )
{
if( configBlock )
......@@ -79,25 +82,10 @@ public:
regions_.push_back( std::make_pair( region, level ) );
}
std::vector< std::pair< math::AABB, uint_t > > transformRegionsToAABBs( SetupBlockForest & forest ) const
{
std::vector< std::pair< math::AABB, uint_t > > aabbs;
math::AABB aabb = forest.getDomain();
for( auto region = regions_.begin(); region != regions_.end(); ++region )
{
aabbs.push_back( std::make_pair( math::AABB( aabb.xMin() + region->first.xMin() * aabb.xSize(),
aabb.yMin() + region->first.yMin() * aabb.ySize(),
aabb.zMin() + region->first.zMin() * aabb.zSize(),
aabb.xMin() + region->first.xMax() * aabb.xSize(),
aabb.yMin() + region->first.yMax() * aabb.ySize(),
aabb.zMin() + region->first.zMax() * aabb.zSize() ), region->second ) );
}
return aabbs;
}
// for static refinement
void operator()( SetupBlockForest & forest )
{
std::vector< std::pair< math::AABB, uint_t > > aabbs = transformRegionsToAABBs( forest );
std::vector< std::pair< math::AABB, uint_t > > aabbs = transformRegionsToAABBs( forest.getDomain() );
aabbs.insert( aabbs.end(), aabbs_.begin(), aabbs_.end() );
if( aabbs.empty() )
......@@ -113,8 +101,60 @@ public:
}
}
// for dynamic refinement
void operator()(std::vector< std::pair< const Block *, uint_t > > & minTargetLevels,
std::vector< const Block * > &, const BlockForest & forest )
{
std::vector< std::pair< math::AABB, uint_t > > aabbs = transformRegionsToAABBs( forest.getDomain() );
aabbs.insert( aabbs.end(), aabbs_.begin(), aabbs_.end() );
for( auto it = minTargetLevels.begin(); it != minTargetLevels.end(); ++it )
{
uint_t currentLevelOfBlock = it->first->getLevel();
uint_t targetLevelOfBlock = currentLevelOfBlock;
for( auto aabb = aabbs.begin(); aabb != aabbs.end(); ++aabb )
{
if( it->first->getAABB().intersects( aabb->first ) )
{
uint_t targetLevelOfAABB = aabb->second;
if( currentLevelOfBlock > targetLevelOfAABB )
{
targetLevelOfBlock = currentLevelOfBlock - uint_t(1);
}
else if ( currentLevelOfBlock < targetLevelOfBlock )
{
targetLevelOfBlock = currentLevelOfBlock + uint_t(1);
}
// only the first found intersecting AABB is taken into account
break;
}
}
WALBERLA_CHECK_LESS_EQUAL(std::abs(int_c(targetLevelOfBlock) - int_c(currentLevelOfBlock)), uint_t(1), "Only level difference of maximum 1 allowed!");
it->second = targetLevelOfBlock;
}
}
private:
std::vector< std::pair< math::AABB, uint_t > > transformRegionsToAABBs( const math::AABB & simulationDomain ) const
{
std::vector< std::pair< math::AABB, uint_t > > aabbs;
for( auto region = regions_.begin(); region != regions_.end(); ++region )
{
aabbs.push_back( std::make_pair( math::AABB( simulationDomain.xMin() + region->first.xMin() * simulationDomain.xSize(),
simulationDomain.yMin() + region->first.yMin() * simulationDomain.ySize(),
simulationDomain.zMin() + region->first.zMin() * simulationDomain.zSize(),
simulationDomain.xMin() + region->first.xMax() * simulationDomain.xSize(),
simulationDomain.yMin() + region->first.yMax() * simulationDomain.ySize(),
simulationDomain.zMin() + region->first.zMax() * simulationDomain.zSize() ), region->second ) );
}
return aabbs;
}
std::vector< std::pair< math::AABB, uint_t > > aabbs_;
std::vector< std::pair< math::AABB, uint_t > > regions_;
......
......@@ -989,6 +989,62 @@ private:
class CombinedMinTargetLevelDeterminationFunctions
{
public:
typedef blockforest::BlockForest::RefreshMinTargetLevelDeterminationFunction MinTargetLevelDeterminationFunction;
CombinedMinTargetLevelDeterminationFunctions(const std::function<uint_t(const std::vector<uint_t> &)> & targetLevelReductionFct = [](const std::vector<uint_t> & t){ return *std::max_element(t.begin(), t.end());})
: targetLevelReductionFct_( targetLevelReductionFct )
{
}
void add( const MinTargetLevelDeterminationFunction & function )
{
functions_.push_back( function );
}
void operator()( std::vector< std::pair< const Block *, uint_t > > & minTargetLevels,
std::vector< const Block * > & blocksAlreadyMarkedForRefinement,
const blockforest::BlockForest & forest )
{
const uint_t numberOfBlocks = minTargetLevels.size();
std::vector< std::vector< std::pair< const Block *, uint_t > > > minTargetLevelsPerFunction(functions_.size(), minTargetLevels);
// evaluate the different determination functions
auto iter = minTargetLevelsPerFunction.begin();
for( auto function = functions_.begin(); function != functions_.end(); ++function, ++iter )
{
(*function)( *iter, blocksAlreadyMarkedForRefinement, forest );
WALBERLA_ASSERT_EQUAL(iter->size(), numberOfBlocks, "Number of blocks has changed during min target level determination!");
}
// combine the outcome of the different functions into a single target level
std::vector<uint_t> targetLevels(functions_.size());
for( uint_t block = 0; block < numberOfBlocks; ++block )
{
for( uint_t fct = 0; fct < functions_.size(); ++fct)
{
WALBERLA_ASSERT_EQUAL(minTargetLevelsPerFunction[fct][block].first->getId(), minTargetLevels[block].first->getId());
targetLevels[fct] = minTargetLevelsPerFunction[fct][block].second;
}
minTargetLevels[block].second = targetLevelReductionFct_(targetLevels);
}
}
private:
std::vector< MinTargetLevelDeterminationFunction > functions_;
std::function<uint_t(const std::vector<uint_t> &)> targetLevelReductionFct_;
}; // class CombinedMinTargetLevelDeterminationFunctions
} // namespace blockforest
using blockforest::BlockForest;
......
......@@ -101,19 +101,6 @@ std::map< blockforest::BlockID, uint_t > getBlockIdToSequenceMapping( const Phan
return mapping;
}
template< typename T >
T * ptr( std::vector<T> & v )
{
if( v.empty() )
return NULL;
else
return &( v.front() );
}
typedef uint_t idx_t;
bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *, uint_t > > & targetProcess,
std::set< uint_t > & processesToRecvFrom,
const PhantomBlockForest & phantomForest,
......@@ -163,6 +150,7 @@ bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *,
std::vector<double> xyz;
uint_t blockIndex = 0;
int64_t ncon = int64_c(ncon_);
for( auto it = targetProcess.begin(); it != targetProcess.end(); ++it, ++blockIndex )
{
WALBERLA_CHECK_EQUAL(blockIndex, mapping.find(it->first->getId())->second - blockSequenceRange.first);
......@@ -181,7 +169,7 @@ bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *,
WALBERLA_CHECK_LESS( mapIt->second, vtxdist.back() );
adjncy.push_back( int64_c( mapIt->second ) );
auto edgeWeightIt = bi.getEdgeWeights().find( nit->getId() );
//WALBERLA_CHECK_UNEQUAL( edgeWeightIt->second, 0 ); // perhaps WARNING
WALBERLA_CHECK_GREATER_EQUAL( edgeWeightIt->second, 0 );
adjwgt.push_back( edgeWeightIt == bi.getEdgeWeights().end() ? int64_t( 1 ) : edgeWeightIt->second );
}
break;
......@@ -193,14 +181,22 @@ bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *,
WALBERLA_CHECK_GREATER_EQUAL( mapIt->second, 0 );
WALBERLA_CHECK_LESS( mapIt->second, vtxdist.back() );
adjncy.push_back( int64_c( mapIt->second ) );
//WALBERLA_CHECK_UNEQUAL( edgeIt->second, 0 ); // perhaps WARNING
WALBERLA_CHECK_GREATER_EQUAL( edgeIt->second, 0 );
adjwgt.push_back( edgeIt->second );
}
break;
}
WALBERLA_CHECK_UNEQUAL( bi.getVertexWeight(), 0 );
vwgt.push_back( bi.getVertexWeight() );
WALBERLA_CHECK_EQUAL(ncon, int64_c(bi.getNcon()), "Number of constraints on block does not fit to specified number of constraints in ParMetis setup!");
for( uint_t con = uint_t(0); con < bi.getNcon(); ++con )
{
WALBERLA_CHECK_GREATER_EQUAL( bi.getVertexWeight(con), 0 );
vwgt.push_back( bi.getVertexWeight(con) );
}
vsize.push_back( bi.getVertexSize() );
xyz.push_back( bi.getVertexCoords()[0] );
xyz.push_back( bi.getVertexCoords()[1] );
xyz.push_back( bi.getVertexCoords()[2] );
......@@ -215,20 +211,19 @@ bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *,
{
WALBERLA_ASSERT_LESS( xadj[i-1], xadj[i] );
}
WALBERLA_CHECK_EQUAL( vwgt.size(), targetProcess.size() );
WALBERLA_CHECK_EQUAL( int64_c(vwgt.size()), int64_c(targetProcess.size()) * ncon);
WALBERLA_CHECK_EQUAL( vsize.size(), targetProcess.size() );
WALBERLA_CHECK_EQUAL( xyz.size(), targetProcess.size() * 3 );
WALBERLA_CHECK_EQUAL( adjncy.size(), adjwgt.size() );
WALBERLA_CHECK_EQUAL( adjwgt.size(), xadj.back() );
int64_t wgtflag = weightsToUse_;
int64_t numflag = 0; // C-style ordering
int64_t ncon = 1; // Number of constraints
int64_t ndims = 3; // Number of dimensions
std::vector<double> ubvec( uint_c(ncon), double_c( 1.05 ) ); // imbalance tolerance
int64_t nparts = int64_c( MPIManager::instance()->numProcesses() ); // number of subdomains
double ipc2redist = double_c(ipc2redist_);
MPI_Comm comm = subComm; //MPIManager::instance()->comm();
int64_t wgtflag = weightsToUse_;
int64_t numflag = 0; // C-style ordering
int64_t ndims = 3; // Number of dimensions
std::vector<double> ubvec = ubvec_; // imbalance tolerance
int64_t nparts = int64_c( MPIManager::instance()->numProcesses() ); // number of subdomains
double ipc2redist = double_c(ipc2redist_);
MPI_Comm comm = subComm; //MPIManager::instance()->comm();
std::vector<double> tpwgts( uint_c(nparts * ncon), 1.0 / double_c( nparts ) ); // vertex weight fraction that is stored in a subdomain
std::vector<int64_t> options = { int64_t( 1 ), int64_t( 0 ), int64_t( 23 ), int64_t( 1 ) };
......@@ -237,34 +232,36 @@ bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *,
switch( algorithm_ )
{
case PARMETIS_PART_GEOM:
WALBERLA_ASSERT_EQUAL(ncon, int64_t(1), "Chosen algorithm can only work with single constraints!");
parmetisTimer.start();
metisResult = core::ParMETIS_V3_PartGeom( ptr( vtxdist ), &ndims, ptr( xyz ), ptr( part ), &comm );
metisResult = core::ParMETIS_V3_PartGeom( vtxdist.data(), &ndims, xyz.data(), part.data(), &comm );
parmetisTimer.end();
break;
case PARMETIS_PART_GEOM_KWAY:
parmetisTimer.start();
metisResult = core::ParMETIS_V3_PartGeomKway( ptr( vtxdist ), ptr( xadj ), ptr( adjncy ), ptr( vwgt ), ptr( adjwgt ), &wgtflag, &numflag, &ndims, ptr( xyz ), &ncon, &nparts, ptr( tpwgts ), ptr( ubvec ), ptr( options ), &edgecut, ptr( part ), &comm );
metisResult = core::ParMETIS_V3_PartGeomKway( vtxdist.data(), xadj.data(), adjncy.data(), vwgt.data(), adjwgt.data(), &wgtflag, &numflag, &ndims, xyz.data(), &ncon, &nparts, tpwgts.data(), ubvec.data(), options.data(), &edgecut, part.data(), &comm );
parmetisTimer.end();
break;
case PARMETIS_PART_KWAY:
parmetisTimer.start();
metisResult = core::ParMETIS_V3_PartKway( ptr( vtxdist ), ptr( xadj ), ptr( adjncy ), ptr( vwgt ), ptr( adjwgt ), &wgtflag, &numflag, &ncon, &nparts, ptr( tpwgts ), ptr(ubvec), ptr(options), &edgecut, ptr( part ), &comm );
metisResult = core::ParMETIS_V3_PartKway( vtxdist.data(), xadj.data(), adjncy.data(), vwgt.data(), adjwgt.data(), &wgtflag, &numflag, &ncon, &nparts, tpwgts.data(), ubvec.data(), options.data(), &edgecut, part.data(), &comm );
parmetisTimer.end();
break;
case PARMETIS_ADAPTIVE_REPART:
parmetisTimer.start();
metisResult = core::ParMETIS_V3_AdaptiveRepart( ptr( vtxdist ), ptr( xadj ), ptr( adjncy ), ptr( vwgt ), ptr( vsize ), ptr( adjwgt ), &wgtflag, &numflag, &ncon, &nparts, ptr( tpwgts ), ptr(ubvec), &ipc2redist, ptr(options), &edgecut, ptr( part ), &comm );
metisResult = core::ParMETIS_V3_AdaptiveRepart( vtxdist.data(), xadj.data(), adjncy.data(), vwgt.data(), vsize.data(), adjwgt.data(), &wgtflag, &numflag, &ncon, &nparts, tpwgts.data(), ubvec.data(), &ipc2redist, options.data(), &edgecut, part.data(), &comm );
parmetisTimer.end();
break;
case PARMETIS_REFINE_KWAY:
parmetisTimer.start();
metisResult = core::ParMETIS_V3_RefineKway( ptr( vtxdist ), ptr( xadj ), ptr( adjncy ), ptr( vwgt ), ptr( adjwgt ), &wgtflag, &numflag, &ncon, &nparts, ptr( tpwgts ), ptr(ubvec), ptr(options), &edgecut, ptr( part ), &comm );
metisResult = core::ParMETIS_V3_RefineKway( vtxdist.data(), xadj.data(), adjncy.data(), vwgt.data(), adjwgt.data(), &wgtflag, &numflag, &ncon, &nparts, tpwgts.data(), ubvec.data(), options.data(), &edgecut, part.data(), &comm );
parmetisTimer.end();
break;
}
if( metisResult != core::METIS_OK )
WALBERLA_ABORT("ParMetis failed!");
}
// Determine which processes will receive a block from this process
......@@ -278,7 +275,7 @@ bool DynamicParMetis::operator()( std::vector< std::pair< const PhantomBlock *,
isSendingBlockToProcess[uint_c(MPIManager::instance()->rank())] = uint8_t( 0 );
std::vector<uint8_t> isReceivingBlockFromProcess( uint_c(MPIManager::instance()->numProcesses()), uint8_t( 0 ) );
MPI_Alltoall( ptr( isSendingBlockToProcess ), 1, MPITrait<uint8_t>::type(), ptr( isReceivingBlockFromProcess ), 1, MPITrait<uint8_t>::type(), MPIManager::instance()->comm() );
MPI_Alltoall( isSendingBlockToProcess.data(), 1, MPITrait<uint8_t>::type(), isReceivingBlockFromProcess.data(), 1, MPITrait<uint8_t>::type(), MPIManager::instance()->comm() );
for( uint_t i = 0; i < isReceivingBlockFromProcess.size(); ++i )
if( isReceivingBlockFromProcess[i] == uint8_t( 1 ) )
processesToRecvFrom.insert( i );
......@@ -317,12 +314,12 @@ DynamicParMetis::Algorithm DynamicParMetis::stringToAlgorithm( std::string s )
return PARMETIS_PART_GEOM;
else if( s == "PART_KWAY" )
return PARMETIS_PART_KWAY;
else if( s == "PART_ADAPTIVE_REPART" )
else if( s == "ADAPTIVE_REPART" )
return PARMETIS_ADAPTIVE_REPART;
else if( s == "REFINE_KWAY" )
return PARMETIS_REFINE_KWAY;
else
WALBERLA_ABORT( "Illegal ParMetis algorithm specified (" << s << ")! Valid choices are: \"PART_GEOM_KWAY\", \"PART_KWAY\", \"PART_ADAPTIVE_REPART\", or \"REFINE_KWAY\"." );
WALBERLA_ABORT( "Illegal ParMetis algorithm specified (" << s << ")! Valid choices are: \"PART_GEOM_KWAY\", \"PART_GEOM\", \"PART_KWAY\", \"ADAPTIVE_REPART\", or \"REFINE_KWAY\"." );
}
......@@ -369,7 +366,7 @@ std::string DynamicParMetis::algorithmToString( ) const
case walberla::blockforest::DynamicParMetis::PARMETIS_PART_KWAY:
return "PART_KWAY";
case walberla::blockforest::DynamicParMetis::PARMETIS_ADAPTIVE_REPART:
return "PART_ADAPTIVE_REPART";
return "ADAPTIVE_REPART";
case walberla::blockforest::DynamicParMetis::PARMETIS_REFINE_KWAY:
return "PARMETIS_REFINE_KWAY";
}
......
......@@ -30,6 +30,7 @@
#include <cmath>
#include <map>
#include <vector>
namespace walberla {
namespace blockforest {
......@@ -48,8 +49,9 @@ public:
DynamicParMetis( const Algorithm algorithm = PARMETIS_PART_GEOM_KWAY,
const WeightsToUse weightsToUse = PARMETIS_BOTH_WEIGHTS,
const EdgeSource edgeSource = PARMETIS_EDGES_FROM_EDGE_WEIGHTS )
: algorithm_( algorithm ), weightsToUse_( weightsToUse ), edgeSource_( edgeSource ) {}
const EdgeSource edgeSource = PARMETIS_EDGES_FROM_EDGE_WEIGHTS,
const uint_t ncon = uint_t(1))
: algorithm_( algorithm ), weightsToUse_( weightsToUse ), edgeSource_( edgeSource ), ncon_( ncon ), ubvec_( ncon, real_t(1.05) ) {}
bool operator()( std::vector< std::pair< const PhantomBlock *, uint_t > > & targetProcess,
std::set< uint_t > & processesToRecvFrom,
......@@ -60,6 +62,10 @@ public:
void setipc2redist(double val) {ipc2redist_ = val;}
double getipc2redist() const {return ipc2redist_;}
void setImbalanceTolerances(const std::vector<double> & tolerances){ WALBERLA_ASSERT_EQUAL(tolerances.size(), ncon_-uint_t(1)); ubvec_ = tolerances; }
void setImbalanceTolerance(double tolerance, const uint_t con = 0){ WALBERLA_ASSERT_LESS(con, ncon_); ubvec_[con] = tolerance; }
double getImbalanceTolerance(const uint_t con = 0) const { WALBERLA_ASSERT_LESS(con, ncon_); return ubvec_[con]; }
bool edgeWeightsUsed() const { return ( weightsToUse_ == PARMETIS_EDGE_WEIGHTS ) || ( weightsToUse_ == PARMETIS_BOTH_WEIGHTS ); }
bool vertexWeightsUsed() const { return ( weightsToUse_ == PARMETIS_VERTEX_WEIGHTS ) || ( weightsToUse_ == PARMETIS_BOTH_WEIGHTS ); }
bool vertexSizeUsed() const { return algorithm_ == PARMETIS_ADAPTIVE_REPART; }
......@@ -77,6 +83,8 @@ protected:
WeightsToUse weightsToUse_;
EdgeSource edgeSource_;
uint_t ncon_ = uint_t(1); ///< number of constraints
std::vector<double> ubvec_; ///< imbalance tolerance for each constraint, value between 1 (perfect balance) and number of subdomains (= number of processes, perfect imbalance)
double ipc2redist_ = real_t( 1000.0 ); ///< compute repartitioning with low edge cut (set lower (down to 0.000001) to get minimal repartitioning )
};
......@@ -87,8 +95,8 @@ public:
typedef int64_t weight_t;
typedef int64_t vsize_t;
DynamicParMetisBlockInfo( const weight_t vertexWeight )
: vertexWeight_(vertexWeight), vertexSize_(1)
DynamicParMetisBlockInfo( const weight_t vertexWeight, const uint_t ncon = 1 )
: vertexWeight_(ncon, vertexWeight), vertexSize_(1)
{ }
DynamicParMetisBlockInfo( mpi::RecvBuffer & buffer )
......@@ -96,13 +104,35 @@ public:
buffer >> vertexWeight_ >> vertexSize_ >> vertexCoords_ >> edgeWeights_;
}
weight_t getVertexWeight() const { return vertexWeight_; }
void setVertexWeight( const weight_t vertexWeight ) { vertexWeight_ = vertexWeight; }
DynamicParMetisBlockInfo( const std::vector<weight_t> & vertexWeight )
: vertexWeight_(vertexWeight), vertexSize_(1)
{ }
// get number of constraints ( = number of vertex weights), default: 1, > 1 for multi physics problems
uint_t getNcon() const
{
return vertexWeight_.size();
}
weight_t getVertexWeight( const uint_t con = 0 ) const
{
WALBERLA_ASSERT_LESS(con, getNcon());
return vertexWeight_[con];
}
void setVertexWeight( const weight_t vertexWeight, const uint_t con = 0 )
{
WALBERLA_ASSERT_LESS(con, getNcon());
vertexWeight_[con] = vertexWeight;
}
vsize_t getVertexSize() const { return vertexSize_; }
void setVertexSize( const vsize_t size ) { vertexSize_ = size; }
vsize_t getVertexSize( ) const { return vertexSize_; }
void setVertexSize( const vsize_t size) { vertexSize_ = size; }
const Vector3<double> & getVertexCoords() const { WALBERLA_ASSERT( !std::isnan(vertexCoords_[0]) && !std::isnan(vertexCoords_[1]) && !std::isnan(vertexCoords_[2]) ); return vertexCoords_; }
const Vector3<double> & getVertexCoords() const
{
WALBERLA_ASSERT( !std::isnan(vertexCoords_[0]) && !std::isnan(vertexCoords_[1]) && !std::isnan(vertexCoords_[2]) );
return vertexCoords_;
}
void setVertexCoords( const Vector3<double> & p ) { vertexCoords_ = p; }
void setEdgeWeight( const blockforest::BlockID & blockId, const weight_t edgeWeight ) { edgeWeights_[blockId] = edgeWeight; }
......@@ -117,8 +147,8 @@ public:
private:
weight_t vertexWeight_; /// Defines the weight of a block
vsize_t vertexSize_; /// Defines the cost of rebalancing a block
std::vector<weight_t> vertexWeight_; /// Defines the ncon weights of a block
vsize_t vertexSize_; /// Defines the cost of rebalancing a block. Needed by some ParMetis algorithms
Vector3<double> vertexCoords_ = Vector3<double>(std::numeric_limits<double>::signaling_NaN()); /// Defines where the block is located in space. Needed by some ParMetis algorithms
std::map< blockforest::BlockID, weight_t > edgeWeights_; /// Defines the cost of communication with other blocks
};
......
......@@ -25,5 +25,8 @@
#include "Cartesian.h"
#include "DynamicCurve.h"
#include "DynamicDiffusive.h"
#include "DynamicParMetis.h"
#include "NoPhantomData.h"
#include "PODPhantomData.h"
#include "StaticCurve.h"
#include "StaticParMetis.h"
......@@ -129,7 +129,7 @@ int main( int argc, char ** argv )
walberla::MPIManager::instance()->initializeMPI( &argc, &argv );
walberla::MPIManager::instance()->useWorldComm();
std::vector<std::string> algs = {"PART_GEOM", "PART_GEOM_KWAY", "PART_KWAY", "PART_ADAPTIVE_REPART", "REFINE_KWAY"};
std::vector<std::string> algs = {"PART_GEOM", "PART_GEOM_KWAY", "PART_KWAY", "ADAPTIVE_REPART", "REFINE_KWAY"};
std::vector<std::string> wtu = {"NO_WEIGHTS", "EDGE_WEIGHTS", "VERTEX_WEIGHTS", "BOTH_WEIGHTS"};
std::vector<std::string> es = {"EDGES_FROM_FOREST", "EDGES_FROM_EDGE_WEIGHTS"};
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment