Commit f20daf76 authored by Marcel Koch's avatar Marcel Koch
Browse files

use distributed Ginkgo matrix and vector

parent 2d68f4dc
#pragma once
#include <memory>
#include <optional>
#include <utility>
#include "hyteg/composites/StrongFreeSlipWrapper.hpp"
......@@ -31,10 +32,12 @@
namespace hyteg {
template < typename mtx, typename vec >
class DirichletHandlerBase
{
public:
using vec = gko::distributed::Vector< real_t >;
using mtx = gko::distributed::Matrix< real_t, int32_t >;
DirichletHandlerBase( std::vector< int32_t > bcIndices, const vec* dir_vals, mtx* matrix, bool doUpdate = true )
: bcIndices_( std::move( bcIndices ) )
, dir_vals_( dir_vals )
......@@ -58,25 +61,47 @@ class DirichletHandlerBase
bool doUpdate_;
};
template < typename mtx, typename vec >
class PeneltyDirichletHandler : public DirichletHandlerBase< mtx, vec >
template < typename T >
std::optional< T > to_local_idx( T global_idx, const gko::distributed::Partition< T >* part, T rank )
{
auto local_start = part->get_const_range_bounds()[rank];
auto local_end = part->get_const_range_bounds()[rank + 1];
if ( local_start <= global_idx && global_idx < local_end )
{
return global_idx - local_start;
}
else
{
return {};
}
}
class PeneltyDirichletHandler : public DirichletHandlerBase
{
public:
using DirichletHandlerBase< mtx, vec >::DirichletHandlerBase;
using DirichletHandlerBase::DirichletHandlerBase;
void update_matrix() override
{
if ( this->doUpdate_ )
{
auto row_ptrs = this->matrix_->get_const_row_ptrs();
auto cols = this->matrix_->get_const_col_idxs();
auto vals = this->matrix_->get_values();
auto local_mat = this->matrix_->get_local_diag();
auto row_ptrs = local_mat->get_const_row_ptrs();
auto cols = local_mat->get_const_col_idxs();
auto vals = local_mat->get_values();
auto rank = this->matrix_->get_communicator()->rank();
auto part = this->matrix_->get_partition();
for ( auto idx : this->bcIndices_ )
{
for ( int i = row_ptrs[idx]; i < row_ptrs[idx + 1]; ++i )
if ( auto lidx = to_local_idx( idx, part, rank ); lidx )
{
if ( idx == cols[i] )
vals[i] += tgv;
for ( int i = row_ptrs[*lidx]; i < row_ptrs[*lidx + 1]; ++i )
{
if ( *lidx == cols[i] )
vals[i] += tgv;
}
}
}
}
......@@ -87,20 +112,30 @@ class PeneltyDirichletHandler : public DirichletHandlerBase< mtx, vec >
vec* get_initial_guess( const vec* cur_initial_guess ) override
{
initial_guess_ = gko::clone( cur_initial_guess );
auto rank = this->matrix_->get_communicator()->rank();
auto part = this->matrix_->get_partition();
for ( auto idx : this->bcIndices_ )
{
initial_guess_->at( idx ) = this->dir_vals_->at( idx );
if ( auto lidx = to_local_idx( idx, part, rank ); lidx )
{
initial_guess_->get_local()->at( *lidx ) = this->dir_vals_->get_local()->at( *lidx );
}
}
return initial_guess_.get();
}
vec* get_rhs( const vec* cur_rhs, const vec* ) override
{
rhs_ = gko::clone( cur_rhs );
rhs_ = gko::clone( cur_rhs );
auto rank = this->matrix_->get_communicator()->rank();
auto part = this->matrix_->get_partition();
// use something like a scattered view + view->scale
for ( auto idx : this->bcIndices_ )
{
rhs_->at( idx ) *= tgv;
if ( auto lidx = to_local_idx( idx, part, rank ); lidx )
{
rhs_->get_local()->at( *lidx ) *= tgv;
}
}
return rhs_.get();
}
......@@ -111,29 +146,43 @@ class PeneltyDirichletHandler : public DirichletHandlerBase< mtx, vec >
double tgv = 1e30;
};
template < typename mtx, typename vec >
class ZeroRowsDirichletHandler : public DirichletHandlerBase< mtx, vec >
class ZeroRowsDirichletHandler : public DirichletHandlerBase
{
using valueType = typename vec::value_type;
public:
using DirichletHandlerBase< mtx, vec >::DirichletHandlerBase;
using DirichletHandlerBase::DirichletHandlerBase;
void update_matrix() override
{
if ( this->doUpdate_ )
{
auto row_ptrs = this->matrix_->get_const_row_ptrs();
auto cols = this->matrix_->get_const_col_idxs();
auto vals = this->matrix_->get_values();
auto rank = this->matrix_->get_communicator()->rank();
auto part = this->matrix_->get_partition();
auto local_mat = this->matrix_->get_local_diag();
auto row_ptrs = local_mat->get_const_row_ptrs();
auto cols = local_mat->get_const_col_idxs();
auto vals = local_mat->get_values();
auto local_offdiag_mat = this->matrix_->get_local_offdiag();
auto od_row_ptrs = local_offdiag_mat->get_const_row_ptrs();
auto od_vals = local_offdiag_mat->get_values();
for ( auto idx : this->bcIndices_ )
{
for ( int i = row_ptrs[idx]; i < row_ptrs[idx + 1]; ++i )
if ( auto lidx = to_local_idx( idx, part, rank ); lidx )
{
if ( idx != cols[i] )
vals[i] = gko::zero< valueType >();
else
vals[i] = gko::one< valueType >();
for ( int i = row_ptrs[*lidx]; i < row_ptrs[*lidx + 1]; ++i )
{
if ( *lidx != cols[i] )
vals[i] = gko::zero< valueType >();
else
vals[i] = gko::one< valueType >();
}
for ( int i = od_row_ptrs[*lidx]; i < od_row_ptrs[*lidx + 1]; ++i )
{
od_vals[i] = gko::zero< valueType >();
}
}
}
}
......@@ -141,16 +190,25 @@ class ZeroRowsDirichletHandler : public DirichletHandlerBase< mtx, vec >
void update_solution( vec* cur_solution ) override
{
auto rank = this->matrix_->get_communicator()->rank();
auto part = this->matrix_->get_partition();
auto one = gko::initialize< gko::matrix::Dense< valueType > >( { 1 }, cur_solution->get_executor() );
cur_solution->add_scaled( one.get(), orig_initial_guess_ );
for ( auto idx : this->bcIndices_ )
{
cur_solution->at( idx ) = this->dir_vals_->at( idx );
if ( auto lidx = to_local_idx( idx, part, rank ); lidx )
{
cur_solution->get_local()->at( *lidx ) = this->dir_vals_->get_local()->at( *lidx );
}
}
}
vec* get_rhs( const vec* cur_rhs, const vec* orig_initial_guess ) override
{
auto rank = this->matrix_->get_communicator()->rank();
auto part = this->matrix_->get_partition();
auto one = gko::initialize< gko::matrix::Dense< valueType > >( { 1 }, cur_rhs->get_executor() );
auto neg_one = gko::initialize< gko::matrix::Dense< valueType > >( { -1 }, cur_rhs->get_executor() );
......@@ -158,7 +216,10 @@ class ZeroRowsDirichletHandler : public DirichletHandlerBase< mtx, vec >
this->matrix_->apply( neg_one.get(), orig_initial_guess, one.get(), rhs_.get() );
for ( auto idx : this->bcIndices_ )
{
rhs_->at( idx ) = gko::zero< valueType >();
if ( auto lidx = to_local_idx( idx, part, rank ); lidx )
{
rhs_->get_local()->at( *lidx ) = gko::zero< valueType >();
}
}
return rhs_.get();
......@@ -166,11 +227,17 @@ class ZeroRowsDirichletHandler : public DirichletHandlerBase< mtx, vec >
vec* get_initial_guess( const vec* cur_initial_guess ) override
{
auto rank = this->matrix_->get_communicator()->rank();
auto part = this->matrix_->get_partition();
orig_initial_guess_ = cur_initial_guess;
z_ = gko::clone( cur_initial_guess );
for ( auto idx : this->bcIndices_ )
{
z_->at( idx ) = gko::zero< valueType >();
if ( auto lidx = to_local_idx( idx, part, rank ); lidx )
{
z_->get_local()->at( *lidx ) = gko::zero< valueType >();
}
}
return z_.get();
}
......@@ -188,6 +255,16 @@ enum class constraints
zero_row
};
std::pair< int, int > local_range( const uint_t local_size, std::shared_ptr<gko::mpi::communicator> comm )
{
uint_t start = 0;
uint_t end = 0;
MPI_Exscan( &local_size, &start, 1, MPI_UINT64_T, MPI_SUM, comm->get() );
MPI_Scan( &local_size, &end, 1, MPI_UINT64_T, MPI_SUM, comm->get() );
return std::make_pair( start, end );
}
template < class OperatorType >
class GinkgoCGSolver : public Solver< OperatorType >
{
......@@ -207,7 +284,7 @@ class GinkgoCGSolver : public Solver< OperatorType >
, level_( level )
, constraints_type_( constraints_type )
, host_exec_( solver_exec->get_master() )
, solver_exec_( std::move(solver_exec) )
, solver_exec_( std::move( solver_exec ) )
, num_( "numerator", storage, level, level )
{
auto rel_mode = constraints_type == constraints::penalty ? gko::stop::mode::initial_resnorm : gko::stop::mode::rhs_norm;
......@@ -230,57 +307,88 @@ class GinkgoCGSolver : public Solver< OperatorType >
void solve( const OperatorType& A, const FunctionType& x, const FunctionType& b, const walberla::uint_t level ) override
{
const auto num_local_dofs = numberOfLocalDoFs< typename FunctionType::Tag >( *storage_, level );
const auto num_local_dofs = numberOfLocalDoFs< typename FunctionType::Tag >( *storage_, level );
const auto num_global_dofs = numberOfGlobalDoFs< typename FunctionType::Tag >( *storage_, level );
// maybe called in parallel, thus need to keep it for empty processes
num_.copyBoundaryConditionFromFunction( x );
num_.enumerate( level );
if(num_local_dofs)
{
using mtx = gko::matrix::Csr< valueType, int32_t >;
using vec = gko::matrix::Dense< valueType >;
auto rank = walberla::mpi::MPIManager::instance()->rank();
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "start solve" );
auto comm = gko::share( gko::mpi::communicator::create( walberla::mpi::MPIManager::instance()->comm() ) );
auto [start, end] = local_range( num_local_dofs, comm);
auto part = gko::share(gko::distributed::Partition<>::build_from_local_range(host_exec_, start, end, comm));
auto x_vec = vec::create( host_exec_, gko::dim< 2 >{ num_local_dofs, 1 } );
auto b_vec = vec::create( host_exec_, gko::dim< 2 >{ num_local_dofs, 1 } );
WALBERLA_LOG_INFO( "Rank " << rank << " -> " << "local range " << start << "-" << end );
if ( num_local_dofs )
{
using mtx = gko::distributed::Matrix< valueType, int32_t >;
using vec = gko::distributed::Vector< valueType >;
auto one = gko::initialize< vec >( { 1 }, host_exec_ );
auto neg_one = gko::initialize< vec >( { -1 }, host_exec_ );
auto x_vec = vec::create( host_exec_, comm, gko::dim< 2 >{ num_global_dofs, 1 }, gko::dim< 2 >{ num_local_dofs, 1 } );
auto b_vec = vec::create( host_exec_, comm, gko::dim< 2 >{ num_global_dofs, 1 }, gko::dim< 2 >{ num_local_dofs, 1 } );
x.getStorage()->getTimingTree()->start( "Ginkgo CG Solver" );
std::vector< int32_t > bcIndices;
hyteg::petsc::applyDirichletBC( num_, bcIndices, level );
std::sort( begin( bcIndices ), end( bcIndices ) );
std::sort( std::begin( bcIndices ), std::end( bcIndices ) );
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "start vector assembly" );
hyteg::petsc::createVectorFromFunction(
x, num_, std::make_shared< GinkgoVectorProxy >( gko::lend( x_vec ) ), level, All );
x,
num_,
std::make_shared< GinkgoVectorProxy >( gko::lend( x_vec ), gko::dim< 2 >{ num_global_dofs, 1 }, part ),
level,
All );
hyteg::petsc::createVectorFromFunction(
b, num_, std::make_shared< GinkgoVectorProxy >( gko::lend( b_vec ) ), level, All );
b,
num_,
std::make_shared< GinkgoVectorProxy >( gko::lend( b_vec ), gko::dim< 2 >{ num_global_dofs, 1 }, part ),
level,
All );
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "end vector assembly" );
// Todo: add check if assembly is neccessary
const bool doAssemble = !matrix_ || reassembleMatrix_;
if ( doAssemble )
{
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "start matrix assembly" );
x.getStorage()->getTimingTree()->start( "Ginkgo System Matrix Assembly" );
matrix_ = gko::share( mtx::create( host_exec_, comm ) );
auto matrix_proxy =
std::make_shared< GinkgoSparseMatrixProxy< mtx > >( host_exec_, gko::dim< 2 >{ num_local_dofs, num_local_dofs } );
std::make_shared< GinkgoSparseMatrixProxy< mtx > >( matrix_.get(),
gko::dim< 2 >{ num_global_dofs, num_global_dofs },
part);
hyteg::petsc::createMatrix< OperatorType >( A, num_, num_, matrix_proxy, level, All );
matrix_ = matrix_proxy->finalize();
matrix_proxy->finalize();
x.getStorage()->getTimingTree()->stop( "Ginkgo System Matrix Assembly" );
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "end matrix assembly" );
}
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "start dirichlet handling" );
std::unique_ptr< DirichletHandlerBase< mtx, vec > > dir_handler;
std::unique_ptr< DirichletHandlerBase > dir_handler;
if ( constraints_type_ == constraints::penalty )
{
dir_handler =
std::make_unique< PeneltyDirichletHandler< mtx, vec > >( bcIndices, b_vec.get(), matrix_.get(), doAssemble );
dir_handler = std::make_unique< PeneltyDirichletHandler >( bcIndices, b_vec.get(), matrix_.get(), doAssemble );
}
else if ( constraints_type_ == constraints::zero_row )
{
dir_handler =
std::make_unique< ZeroRowsDirichletHandler< mtx, vec > >( bcIndices, b_vec.get(), matrix_.get(), doAssemble );
dir_handler = std::make_unique< ZeroRowsDirichletHandler >( bcIndices, b_vec.get(), matrix_.get(), doAssemble );
}
else
{
......@@ -290,27 +398,39 @@ class GinkgoCGSolver : public Solver< OperatorType >
dir_handler->update_matrix();
auto rhs = dir_handler->get_rhs( b_vec.get(), x_vec.get() );
auto x0 = dir_handler->get_initial_guess( x_vec.get() );
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "end dirichlet handling" );
if ( !solver_ || doAssemble )
{
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "start solver setup" );
x.getStorage()->getTimingTree()->start( "Ginkgo CG Solver Set-Up" );
auto host_matrix = matrix_;
matrix_ = mtx::create( solver_exec_ );
host_matrix->move_to( gko::lend( matrix_ ) );
auto par_ilu = gko::factorization::Ilu< valueType, int32_t >::build().on( solver_exec_ )->generate( matrix_ );
auto ilu = gko::preconditioner::Ilu<>::build().on( solver_exec_ )->generate( gko::share( par_ilu ) );
//auto par_ilu = gko::factorization::Ilu< valueType, int32_t >::build().on( solver_exec_ )->generate( matrix_ );
//auto ilu = gko::preconditioner::Ilu<>::build().on( solver_exec_ )->generate( gko::share( par_ilu ) );
solver_ = solver_factory_->generate( matrix_ );
solver_->set_preconditioner( gko::share( ilu ) );
//solver_->set_preconditioner( gko::share( ilu ) );
x.getStorage()->getTimingTree()->stop( "Ginkgo CG Solver Set-Up" );
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "end solver setup" );
}
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "start solver apply" );
x.getStorage()->getTimingTree()->start( "Ginkgo CG Solver Apply" );
solver_->apply( gko::lend( rhs ), gko::lend( x0 ) );
x.getStorage()->getTimingTree()->stop( "Ginkgo CG Solver Apply" );
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "end solver apply" );
dir_handler->update_solution( x0 );
hyteg::petsc::createFunctionFromVector( x, num_, std::make_shared< GinkgoVectorProxy >( x0 ), level, All );
hyteg::petsc::createFunctionFromVector(
x, num_, std::make_shared< GinkgoVectorProxy >( x0, gko::dim< 2 >{ num_global_dofs, 1 }, part ), level, All );
x.getStorage()->getTimingTree()->stop( "Ginkgo CG Solver" );
......@@ -318,12 +438,16 @@ class GinkgoCGSolver : public Solver< OperatorType >
{
auto log = gko::as< gko::log::Convergence< valueType > >( solver_->get_stop_criterion_factory()->get_loggers()[0] );
WALBERLA_LOG_INFO_ON_ROOT( "[Ginkgo CG]" << ( !log->has_converged() ? " NOT " : " " ) << "converged after "
<< log->get_num_iterations() << " iterations, residual norm: "
<< solver_exec_->copy_val_to_host(
gko::as< vec >( log->get_residual_norm() )->get_const_values() ) );
WALBERLA_LOG_INFO_ON_ROOT( "[Ginkgo CG]"
<< ( !log->has_converged() ? " NOT " : " " ) << "converged after "
<< log->get_num_iterations() << " iterations, residual norm: "
<< solver_exec_->copy_val_to_host(
gko::as <
gko::matrix::Dense< valueType >>( log->get_residual_norm() )->get_const_values() ) );
}
}
WALBERLA_LOG_INFO( "Rank " << rank << " -> "
<< "end solve" );
}
void setPrintInfo( bool printInfo ) { printInfo_ = printInfo; }
......@@ -343,10 +467,10 @@ class GinkgoCGSolver : public Solver< OperatorType >
std::unique_ptr< typename gko::solver::Cg< valueType >::Factory > solver_factory_;
std::unique_ptr< typename gko::solver::Cg< valueType > > solver_;
std::shared_ptr< gko::matrix::Csr< valueType, int32_t > > matrix_;
std::shared_ptr< gko::distributed::Matrix< valueType, int32_t > > matrix_;
bool printInfo_ = false;
bool reassembleMatrix_ = false;
};
}
\ No newline at end of file
} // namespace hyteg
\ No newline at end of file
......@@ -5,37 +5,166 @@
#include "hyteg/sparseassembly/SparseMatrixProxy.hpp"
#include "ginkgo/core/base/matrix_assembly_data.hpp"
#include "ginkgo/core/base/mpi.hpp"
#include "ginkgo/core/distributed/matrix.hpp"
#include "ginkgo/core/distributed/partition.hpp"
#include "ginkgo/core/matrix/csr.hpp"
namespace hyteg{
namespace hyteg {
template < typename ValueType, typename IndexType >
auto build_send_buffer( const gko::matrix_data< ValueType, IndexType >& data,
std::shared_ptr< gko::distributed::Partition<> > partition,
std::shared_ptr< gko::mpi::communicator > comm )
{
using nonzero = gko::matrix_data_entry< ValueType, IndexType >;
auto local_part = comm->rank();
auto range_bounds = partition->get_const_range_bounds();
auto range_parts = partition->get_const_part_ids();
auto num_ranges = partition->get_num_ranges();
auto find_part = [&]( IndexType idx ) {
auto it = std::upper_bound( range_bounds + 1, range_bounds + num_ranges + 1, idx );
auto range = std::distance( range_bounds + 1, it );
return range_parts[range];
};
std::vector< std::pair< gko::distributed::comm_index_type, nonzero > > send_buffer_local;
for ( size_t i = 0; i < data.nonzeros.size(); ++i )
{
auto entry = data.nonzeros[i];
auto p_id = find_part( entry.row );
if ( p_id != local_part )
{
send_buffer_local.emplace_back( p_id, entry );
}
}
std::sort( std::begin( send_buffer_local ), std::end( send_buffer_local ), []( const auto& a, const auto& b ) {
return a.first < b.first;
} );
return send_buffer_local;
}
template < typename ValueType, typename IndexType >
auto build_send_pattern(
const std::vector< std::pair< gko::distributed::comm_index_type, gko::matrix_data_entry< ValueType, IndexType > > >&
send_buffer,
std::shared_ptr< gko::distributed::Partition<> > partition )
{
using comm_index_type = gko::distributed::comm_index_type;
auto num_parts = partition->get_num_parts();
std::vector< comm_index_type > send_sizes( num_parts, 0 );
std::vector< comm_index_type > send_offsets( num_parts + 1, 0 );
for ( auto& [p_id, entry] : send_buffer )
{
send_sizes[p_id]++;
}
std::partial_sum( std::begin( send_sizes ), std::end( send_sizes ), std::begin( send_offsets ) + 1 );
return std::make_tuple( send_sizes, send_offsets );
}
auto build_receive_pattern( const std::vector< gko::distributed::comm_index_type >& send_sizes,
std::shared_ptr< gko::distributed::Partition<> > partition,
std::shared_ptr< gko::mpi::communicator > comm )
{
using comm_index_type = gko::distributed::comm_index_type;
auto num_parts = partition->get_num_parts();
std::vector< comm_index_type > recv_sizes( num_parts, 0 );
std::vector< comm_index_type > recv_offsets( num_parts + 1, 0 );
gko::mpi::all_to_all( send_sizes.data(), 1, recv_sizes.data(), 1, comm );
std::partial_sum( std::begin( recv_sizes ), std::end( recv_sizes ), std::begin( recv_offsets ) + 1 );
return std::make_tuple( recv_sizes, recv_offsets );
}
template < typename ValueType, typename IndexType >
auto split_nonzero_entries(
const std::vector< std::pair< gko::distributed::comm_index_type, gko::matrix_data_entry< ValueType, IndexType > > >
send_buffer )
{
const auto size = send_buffer.size();
std::vector< IndexType > row_buffer( size );
std::vector< IndexType > col_buffer( size );
std::vector< ValueType > val_buffer( size );
for ( size_t i = 0; i < size; ++i )
{
const auto& entry = send_buffer[i].second;
row_buffer[i] = entry.row;
col_buffer[i] = entry.column;
val_buffer[i] = entry.value;
}
return std::make_tuple( row_buffer, col_buffer, val_buffer );
}
template < typename ValueType, typename IndexType >
void communicate_overlap( gko::matrix_assembly_data< ValueType, IndexType >& data,
std::shared_ptr< gko::distributed::Partition<> > partition,
std::shared_ptr< gko::mpi::communicator > comm )
{
auto ordered_data = data.get_ordered_data();
auto send_buffer = build_send_buffer( ordered_data, partition, comm );
// build send pattern
auto [send_sizes, send_offsets] = build_send_pattern<ValueType, IndexType>( send_buffer, partition );
// build receive pattern
auto [recv_sizes, recv_offsets] = build_receive_pattern( send_sizes, partition, comm );
// split nonzero entries into buffers
auto [send_row, send_col, send_val] = split_nonzero_entries<ValueType, IndexType>( send_buffer );
// communicate buffers
const auto size_recv_entries = recv_offsets.back();
std::vector< IndexType > recv_row( size_recv_entries );
std::vector< IndexType > recv_col( size_recv_entries );
std::vector< ValueType > recv_val( size_recv_entries );
#define HYTEG_GKO_MPI_SEND_ALL( sb, rb ) \
gko::mpi::all_to_all( \
( sb ).data(), send_sizes.data(), send_offsets.data(), ( rb ).data(), recv_sizes.data(), recv_offsets.data(), 1, comm )
HYTEG_GKO_MPI_SEND_ALL( send_row, recv_row );
HYTEG_GKO_MPI_SEND_ALL( send_col, recv_col );
HYTEG_GKO_MPI_SEND_ALL( send_val, recv_val );
#undef HYTEG_GKO_MPI_SEND_ALL
// add new entries
for ( size_t i = 0; i < size_recv_entries; ++i )
{
data.add_value( recv_row[i], recv_col[i], recv_val[i] );
}
}
template < typename mtx >
class GinkgoSparseMatrixProxy : public SparseMatrixProxy
{
public:
explicit GinkgoSparseMatrixProxy( const std::shared_ptr< gko::Executor >& exec, const gko::dim< 2 > size )