Commit 32050e1e authored by Martin Bauer's avatar Martin Bauer

Merge branch 'flatten' into 'master'

flattened shallow copy of field

See merge request !258
parents 07d6098a 0248a0fb
Pipeline #22783 failed with stages
in 533 minutes and 15 seconds
......@@ -35,15 +35,22 @@ namespace walberla {
*/
//**********************************************************************************************************************
template< typename T >
template< typename T, class Enable = void >
struct VectorTrait
{
typedef void OutputType;
static const uint_t F_SIZE = 0u;
};
template< typename T >
struct VectorTrait<T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
{
typedef T OutputType;
static const uint_t F_SIZE = 1u;
static T get ( T value, uint_t /*f*/ ) { return value; }
static void set( T & value, uint_t /*f*/, T val ) { value = val; }
static_assert( std::is_arithmetic<T>::value, "Specialize OutputTrait for your type!" );
};
......
......@@ -174,8 +174,8 @@ public:
inline const Matrix3 getCholesky() const;
template< typename Other > inline const Vector3<HIGH> solve( const Vector3<Other> &rhs ) const;
inline Type trace() const;
inline Type* data() {return v_;}
inline Type const * data() const {return v_;}
inline Type* data() {return v_.data();}
inline Type const * data() const {return v_.data();}
//@}
//*******************************************************************************************************************
......
......@@ -246,6 +246,36 @@ BlockDataID addCloneToStorage( const shared_ptr< BlockStorage_T > & blocks,
//**********************************************************************************************************************
/*! Adds a flattened shallow copy of an existing field to BlockStorage
*
* Template parameters:
* Field_T the type of the field that should be cloned and flattened
* BlockStorage_T the type of the BlockStorage ( will be deduced automatically )
*
* Parameters:
* \param blocks BlockStorage where the original field is stored and the new one is created
* \param fieldToClone BlockDataID of the Field that is cloned
* \param identifier name for new the field ( displayed in GUI and debugging functions )
*/
//**********************************************************************************************************************
template< typename Field_T, typename BlockStorage_T >
BlockDataID addFlattenedShallowCopyToStorage( const shared_ptr< BlockStorage_T > & blocks,
ConstBlockDataID fieldToClone,
const std::string & identifier,
const Set<SUID> & requiredSelectors = Set<SUID>::emptySet(),
const Set<SUID> & incompatibleSelectors = Set<SUID>::emptySet() )
{
return blocks->addBlockData( make_shared< field::FlattenedShallowCopyBlockDataHandling< Field_T > >( fieldToClone ),
identifier, requiredSelectors, incompatibleSelectors );
}
//**********************************************************************************************************************
/*! BlockDataCreator for fields
*
......
......@@ -86,6 +86,11 @@ namespace field {
typedef FieldPointer<Field<T,fSize_>, Field<T,fSize_>, T > Ptr;
typedef FieldPointer<Field<T,fSize_>, const Field<T,fSize_>, const T > ConstPtr;
typedef typename std::conditional<VectorTrait<T>::F_SIZE!=0,
Field<typename VectorTrait<T>::OutputType, VectorTrait<T>::F_SIZE*fSize_>,
Field<T, fSize_>
>::type FlattenedField;
static const uint_t F_SIZE = fSize_;
//@}
//****************************************************************************************************************
......@@ -119,6 +124,7 @@ namespace field {
Field<T,fSize_> * clone() const;
Field<T,fSize_> * cloneUninitialized() const;
Field<T,fSize_> * cloneShallowCopy() const;
FlattenedField * flattenedShallowCopy() const;
//@}
//****************************************************************************************************************
......@@ -320,10 +326,13 @@ namespace field {
/*! \name Shallow Copy */
//@{
Field(const Field & other);
template <typename T2, uint_t fSize2>
Field(const Field<T2, fSize2> & other);
virtual uint_t referenceCount() const;
virtual Field<T,fSize_> * cloneShallowCopyInternal() const;
virtual FlattenedField * flattenedShallowCopyInternal() const;
//@}
//****************************************************************************************************************
......@@ -373,6 +382,8 @@ namespace field {
friend class FieldIterator<T,fSize_>;
friend class FieldIterator<const T,fSize_>;
template <typename T2, uint_t fSize2>
friend class Field;
static_assert(fSize_ > 0, "fSize()=0 means: empty field");
......
......@@ -143,6 +143,21 @@ namespace field {
}
//*******************************************************************************************************************
/*! Returns a flattened shallow copy of the current field.
*
* Shallow copy means, that the new field internally uses the same memory as this field.
* Flattened means that any VectorTrait-compatible containers are absorbed into the fSize.
*
* \return a new field, that has to be freed by caller
*******************************************************************************************************************/
template<typename T, uint_t fSize_>
typename Field<T,fSize_>::FlattenedField * Field<T,fSize_>::flattenedShallowCopy() const
{
return flattenedShallowCopyInternal();
}
//*******************************************************************************************************************
/*!\brief Does the same as cloneShallowCopy (but is virtual)
*
......@@ -156,6 +171,19 @@ namespace field {
return new Field<T,fSize_>(*this) ;
}
//*******************************************************************************************************************
/*!\brief Does the same as flattenedShallowCopy (but is virtual)
*
* This version has to be implemented by derived classes. The flattenedShallowCopy() itself cannot be
* virtual, since the implementation of flattenedShallowCopy() of derived classes has a different signature.
*
*******************************************************************************************************************/
template<typename T, uint_t fSize_>
typename Field<T,fSize_>::FlattenedField * Field<T,fSize_>::flattenedShallowCopyInternal() const
{
return new FlattenedField(*this) ;
}
//*******************************************************************************************************************
/*! Creates a new field that has equal size and layout as this field. The memory of the new
* field is uninitialized.
......@@ -226,6 +254,40 @@ namespace field {
}
//*******************************************************************************************************************
/*! Private copy constructor that creates a flattened shallow copy
* i.e. reuses the memory of the copied field
*******************************************************************************************************************/
template<typename T, uint_t fSize_>
template <typename T2, uint_t fSize2>
Field<T,fSize_>::Field( const Field<T2,fSize2> & other )
: values_ ( other.values_[0].data() ),
valuesWithOffset_ ( other.valuesWithOffset_[0].data() ),
xOff_ ( other.xOff_),
yOff_ ( other.yOff_),
zOff_ ( other.zOff_),
xSize_ ( other.xSize_ ),
ySize_ ( other.ySize_ ),
zSize_ ( other.zSize_ ),
xAllocSize_ ( other.xAllocSize_ ),
yAllocSize_ ( other.yAllocSize_ ),
zAllocSize_ ( other.zAllocSize_ ),
fAllocSize_ ( other.fAllocSize_*fSize_/fSize2 ),
layout_ ( other.layout_ ),
allocSize_ ( other.allocSize_*fSize_/fSize2 ),
ffact_ ( other.ffact_ ),
xfact_ ( other.xfact_*cell_idx_t(fSize_/fSize2) ),
yfact_ ( other.yfact_*cell_idx_t(fSize_/fSize2) ),
zfact_ ( other.zfact_*cell_idx_t(fSize_/fSize2) ),
allocator_ ( std::shared_ptr<FieldAllocator<T>>(other.allocator_, reinterpret_cast<FieldAllocator<T>*>(other.allocator_.get())) )
{
WALBERLA_CHECK_EQUAL(layout_, Layout::zyxf);
static_assert(fSize_ % fSize2 == 0, "number of field components do not match");
static_assert(std::is_same<typename Field<T2,fSize2>::FlattenedField, Field<T,fSize_>>::value, "field types are incompatible for flattening");
allocator_->incrementReferenceCount ( values_ );
}
//*******************************************************************************************************************
/*! Initializes the field with a given size, in a given layout
*
......
......@@ -66,6 +66,11 @@ namespace field {
typedef typename Field<T,fSize_>::Ptr Ptr;
typedef typename Field<T,fSize_>::ConstPtr ConstPtr;
typedef typename std::conditional<VectorTrait<T>::F_SIZE!=0,
GhostLayerField<typename VectorTrait<T>::OutputType, VectorTrait<T>::F_SIZE*fSize_>,
GhostLayerField<T, fSize_>
>::type FlattenedField;
//@}
//****************************************************************************************************************
......@@ -105,6 +110,7 @@ namespace field {
inline GhostLayerField<T,fSize_> * clone() const;
inline GhostLayerField<T,fSize_> * cloneUninitialized() const;
inline GhostLayerField<T,fSize_> * cloneShallowCopy() const;
inline FlattenedField * flattenedShallowCopy() const;
//@}
//****************************************************************************************************************
......@@ -209,11 +215,15 @@ namespace field {
/*! \name Shallow Copy */
//@{
virtual Field<T,fSize_> * cloneShallowCopyInternal() const;
virtual typename Field<T,fSize_>::FlattenedField * flattenedShallowCopyInternal() const;
GhostLayerField(const GhostLayerField<T,fSize_> & other);
template <typename T2, uint_t fSize2>
GhostLayerField(const GhostLayerField<T2, fSize2> & other);
//@}
//****************************************************************************************************************
template <typename T2, uint_t fSize2>
friend class GhostLayerField;
};
} // namespace field
......
......@@ -696,6 +696,17 @@ namespace field {
{
}
//*******************************************************************************************************************
/*!\brief Private copy constructor, which does a flattened shallow copy
*******************************************************************************************************************/
template<typename T, uint_t fSize_>
template <typename T2, uint_t fSize2>
GhostLayerField<T,fSize_>::GhostLayerField(const GhostLayerField<T2,fSize2> & other)
: Field<T,fSize_>::Field(other),
gl_( other.gl_ )
{
}
//*******************************************************************************************************************
/*!\brief See Field::cloneShallowCopyInternal()
* Has to be re-implemented because a new GhostLayerField is created
......@@ -706,6 +717,16 @@ namespace field {
return new GhostLayerField<T,fSize_>(*this);
}
//*******************************************************************************************************************
/*!\brief See Field::flattenedShallowCopyInternal()
* Has to be re-implemented because a new GhostLayerField is created
*******************************************************************************************************************/
template<typename T, uint_t fSize_>
typename Field<T,fSize_>::FlattenedField * GhostLayerField<T,fSize_>::flattenedShallowCopyInternal() const
{
return new GhostLayerField<T,fSize_>::FlattenedField(*this);
}
template<typename T, uint_t fSize_>
GhostLayerField<T,fSize_> * GhostLayerField<T,fSize_>::clone() const
......@@ -725,6 +746,12 @@ namespace field {
return dynamic_cast<GhostLayerField<T,fSize_>* > (Field<T,fSize_>::cloneShallowCopy() );
}
template<typename T, uint_t fSize_>
typename GhostLayerField<T,fSize_>::FlattenedField * GhostLayerField<T,fSize_>::flattenedShallowCopy() const
{
return dynamic_cast<GhostLayerField<T,fSize_>::FlattenedField* > (Field<T,fSize_>::flattenedShallowCopy() );
}
template<typename T, uint_t fSize_>
void GhostLayerField<T,fSize_>::slice( const CellInterval & interval )
{
......
......@@ -25,6 +25,7 @@
#include "AlignedMalloc.h"
#include "core/debug/Debug.h"
#include "field/CMakeDefs.h"
#include "core/VectorTrait.h"
#include <map>
#include <new>
......@@ -34,6 +35,13 @@ namespace walberla {
namespace field {
template <typename T>
class FieldAllocatorBase
{
protected:
static std::map<T*, uint_t> referenceCounts_;
};
//*******************************************************************************************************************
/*! Allocation Strategy base class for fields
*
......@@ -44,9 +52,10 @@ namespace field {
*/
//*******************************************************************************************************************
template<typename T>
class FieldAllocator
class FieldAllocator : FieldAllocatorBase<typename std::conditional<VectorTrait<T>::F_SIZE!=0, typename VectorTrait<T>::OutputType, T>::type>
{
public:
using BaseType = typename std::conditional<VectorTrait<T>::F_SIZE!=0, typename VectorTrait<T>::OutputType, T>::type;
virtual ~FieldAllocator() = default;
......@@ -65,14 +74,15 @@ namespace field {
uint_t & allocSize1, uint_t & allocSize2, uint_t & allocSize3)
{
T * mem = allocateMemory(size0,size1,size2,size3,allocSize1,allocSize2,allocSize3);
BaseType * bmem = reinterpret_cast<BaseType *>(mem);
#ifdef WALBERLA_THREAD_SAFE_FIELD_ALLOCATION
#ifdef _OPENMP
#pragma omp critical( walberla_field_allocator_refcount )
#endif
#endif
{
WALBERLA_ASSERT( referenceCounts_.find(mem) == referenceCounts_.end() || referenceCounts_[mem] == 0 );
referenceCounts_[mem] = 1;
WALBERLA_ASSERT( referenceCounts_.find(bmem) == referenceCounts_.end() || referenceCounts_[bmem] == 0 );
referenceCounts_[bmem] = 1;
}
return mem;
}
......@@ -89,6 +99,7 @@ namespace field {
T * allocate ( uint_t allocSize )
{
T * mem = allocateMemory( allocSize );
BaseType * bmem = reinterpret_cast<BaseType *>(mem);
#ifdef WALBERLA_THREAD_SAFE_FIELD_ALLOCATION
#ifdef _OPENMP
......@@ -96,8 +107,8 @@ namespace field {
#endif
#endif
{
WALBERLA_ASSERT( referenceCounts_.find(mem) == referenceCounts_.end() || referenceCounts_[mem] == 0 );
referenceCounts_[mem] = 1;
WALBERLA_ASSERT( referenceCounts_.find(bmem) == referenceCounts_.end() || referenceCounts_[bmem] == 0 );
referenceCounts_[bmem] = 1;
}
return mem;
}
......@@ -112,15 +123,17 @@ namespace field {
*/
void incrementReferenceCount( T * mem )
{
BaseType * bmem = reinterpret_cast<BaseType *>(mem);
#ifdef WALBERLA_THREAD_SAFE_FIELD_ALLOCATION
#ifdef _OPENMP
#pragma omp critical( walberla_field_allocator_refcount )
#endif
#endif
{
WALBERLA_ASSERT( referenceCounts_.find(mem) != referenceCounts_.end() );
WALBERLA_ASSERT_GREATER( referenceCounts_[mem], 0 );
referenceCounts_[mem]++;
WALBERLA_ASSERT( referenceCounts_.find(bmem) != referenceCounts_.end() );
WALBERLA_ASSERT_GREATER( referenceCounts_[bmem], 0 );
referenceCounts_[bmem]++;
}
}
......@@ -136,6 +149,7 @@ namespace field {
*/
bool decrementReferenceCount( T * mem )
{
BaseType * bmem = reinterpret_cast<BaseType *>(mem);
bool memoryFreed = false;
uint_t refCount = 0;
......@@ -146,10 +160,10 @@ namespace field {
#endif
#endif
{
WALBERLA_ASSERT( referenceCounts_.find(mem) != referenceCounts_.end() );
WALBERLA_ASSERT_GREATER( referenceCounts_[mem], 0 );
WALBERLA_ASSERT( referenceCounts_.find(bmem) != referenceCounts_.end() );
WALBERLA_ASSERT_GREATER( referenceCounts_[bmem], 0 );
refCount = --referenceCounts_[mem];
refCount = --referenceCounts_[bmem];
}
if( refCount == 0 ) {
......@@ -163,6 +177,7 @@ namespace field {
uint_t referenceCount ( T * mem ) const
{
BaseType * bmem = reinterpret_cast<BaseType *>(mem);
uint_t refCount = 0;
#ifdef WALBERLA_THREAD_SAFE_FIELD_ALLOCATION
#ifdef _OPENMP
......@@ -170,8 +185,8 @@ namespace field {
#endif
#endif
{
WALBERLA_ASSERT( referenceCounts_.find(mem) != referenceCounts_.end() );
refCount = referenceCounts_[mem];
WALBERLA_ASSERT( referenceCounts_.find(bmem) != referenceCounts_.end() );
refCount = referenceCounts_[bmem];
}
return refCount;
......@@ -198,12 +213,11 @@ namespace field {
virtual void deallocate( T *& values ) = 0;
private:
static std::map<T*, uint_t> referenceCounts_;
using FieldAllocatorBase<BaseType>::referenceCounts_;
};
template<typename T>
std::map<T*, uint_t> FieldAllocator<T>::referenceCounts_ = std::map<T*,uint_t>();
std::map<T*, uint_t> FieldAllocatorBase<T>::referenceCounts_ = std::map<T*,uint_t>();
......
......@@ -538,5 +538,31 @@ private:
template< typename Field_T >
class FlattenedShallowCopyBlockDataHandling : public blockforest::AlwaysInitializeBlockDataHandling< typename Field_T::FlattenedField >
{
public:
FlattenedShallowCopyBlockDataHandling( const ConstBlockDataID & fieldToClone ) :
fieldToClone_( fieldToClone )
{}
typename Field_T::FlattenedField * initialize( IBlock * const block )
{
const Field_T * toClone = block->template getData< Field_T >( fieldToClone_ );
return toClone->flattenedShallowCopy();
}
private:
ConstBlockDataID fieldToClone_;
}; // class FlattenedShallowCopyBlockDataHandling
} // namespace field
} // namespace walberla
//======================================================================================================================
//
// This file is part of waLBerla. waLBerla is free software: you can
// redistribute it and/or modify it under the terms of the GNU General Public
// License as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// waLBerla is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// for more details.
//
// You should have received a copy of the GNU General Public License along
// with waLBerla (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>.
//
//! \file CollectTest.h
//! \author Martin Bauer <martin.bauer@fau.de>
//
//======================================================================================================================
#include "core/debug/TestSubsystem.h"
#include "core/Environment.h"
#include "blockforest/Initialization.h"
#include "field/AddToStorage.h"
namespace walberla {
int main( int argc, char ** argv )
{
debug::enterTestMode();
walberla::Environment walberlaEnv( argc, argv );
auto blocks = blockforest::createUniformBlockGrid( 2, 2, 2, // blocks in x,y,z
4, 4, 4, // nr of cells per block
1.0, // dx
false
);
typedef GhostLayerField<Vector3<uint_t>,1> VectorField;
typedef GhostLayerField<uint_t, 3> FlattenedField;
BlockDataID fieldID = field::addToStorage<VectorField>( blocks, "Field" );
BlockDataID flattenedID = field::addFlattenedShallowCopyToStorage<VectorField>( blocks, fieldID, "flattened Field");
for( auto blockIt = blocks->begin(); blockIt != blocks->end(); ++blockIt )
{
VectorField * field = blockIt->getData<VectorField>( fieldID );
for( auto cellIt = field->beginWithGhostLayerXYZ(); cellIt != field->end(); ++cellIt )
{
for( uint_t f = 0; f < 3; ++f)
{
uint_t val = uint_t(&(*cellIt)[f]);
(*cellIt)[f] = val;
}
}
}
BlockDataID copyID = field::addCloneToStorage<VectorField>( blocks, fieldID, "copied Field");
for( auto blockIt = blocks->begin(); blockIt != blocks->end(); ++blockIt )
{
VectorField * field = blockIt->getData<VectorField>( fieldID );
FlattenedField * flattened = blockIt->getData<FlattenedField>( flattenedID );
VectorField * copy = blockIt->getData<VectorField>( copyID );
for( auto cellIt = field->beginWithGhostLayerXYZ(); cellIt != field->end(); ++cellIt )
{
for( uint_t f = 0; f < 3; ++f)
{
WALBERLA_CHECK_EQUAL((*cellIt)[f], copy->get(cellIt.x(), cellIt.y(), cellIt.z())[f]);
WALBERLA_CHECK_UNEQUAL(&(*cellIt)[f], &(copy->get(cellIt.x(), cellIt.y(), cellIt.z())[f]));
WALBERLA_CHECK_EQUAL(&(*cellIt)[f], &(flattened->get(cellIt.x(), cellIt.y(), cellIt.z(), f)));
}
}
}
return 0;
}
}
int main( int argc, char ** argv )
{
return walberla::main(argc,argv);
}
......@@ -7,6 +7,9 @@
waLBerla_compile_test( FILES AccuracyEvaluationTest.cpp DEPENDS blockforest )
waLBerla_execute_test( NAME AccuracyEvaluationTest4 COMMAND $<TARGET_FILE:AccuracyEvaluationTest> PROCESSES 4 )
waLBerla_compile_test( FILES AddToStorageTest.cpp DEPENDS blockforest )
waLBerla_execute_test( NAME AddToStorageTest )
waLBerla_compile_test( FILES communication/FieldPackInfoTest.cpp DEPENDS blockforest )
waLBerla_execute_test( NAME FieldPackInfoTest )
......
......@@ -577,6 +577,99 @@ void fieldPointerTest()
}
template<uint_t fSize>
void flattenTest()
{
Field<Vector3<uint_t>, fSize> field ( 2,2,1 );
for( cell_idx_t x = 0; x < cell_idx_c(field.xSize()); ++x )
for( cell_idx_t y = 0; y < cell_idx_c(field.ySize()); ++y )
for( cell_idx_t z = 0; z < cell_idx_c(field.zSize()); ++z )
for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f )
for( uint_t g = 0; g < 3; ++g )
{
uint_t val = uint_t(&(field( x,y,z,f )[g]));
field( x,y,z,f )[g] = val;
}
shared_ptr<Field<uint_t, 3*fSize>> flattened(field.flattenedShallowCopy());
Field<uint_t, 3*fSize> cmp ( 2,2,1 );
WALBERLA_CHECK_EQUAL(cmp.xSize(), flattened->xSize());
WALBERLA_CHECK_EQUAL(cmp.ySize(), flattened->ySize());
WALBERLA_CHECK_EQUAL(cmp.zSize(), flattened->zSize());
WALBERLA_CHECK_EQUAL(cmp.fSize(), flattened->fSize());
WALBERLA_CHECK_EQUAL(cmp.xAllocSize(), flattened->xAllocSize());
WALBERLA_CHECK_EQUAL(cmp.yAllocSize(), flattened->yAllocSize());
WALBERLA_CHECK_EQUAL(cmp.zAllocSize(), flattened->zAllocSize());
WALBERLA_CHECK_EQUAL(cmp.fAllocSize(), flattened->fAllocSize());
WALBERLA_CHECK_EQUAL(cmp.allocSize(), flattened->allocSize());
WALBERLA_CHECK_EQUAL(cmp.xStride(), flattened->xStride());
WALBERLA_CHECK_EQUAL(cmp.yStride(), flattened->yStride());
WALBERLA_CHECK_EQUAL(cmp.zStride(), flattened->zStride());
WALBERLA_CHECK_EQUAL(cmp.fStride(), flattened->fStride());
WALBERLA_CHECK_EQUAL(cmp.xOff(), flattened->xOff());
WALBERLA_CHECK_EQUAL(cmp.yOff(), flattened->yOff());
WALBERLA_CHECK_EQUAL(cmp.zOff(), flattened->zOff());
for( cell_idx_t x = 0; x < cell_idx_c(field.xSize()); ++x )
for( cell_idx_t y = 0; y < cell_idx_c(field.ySize()); ++y )
for( cell_idx_t z = 0; z < cell_idx_c(field.zSize()); ++z )
for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f )
for( uint_t g = 0; g < 3; ++g )
{
WALBERLA_CHECK_EQUAL(field(x,y,z,f)[g], flattened->get(x,y,z,3*f+cell_idx_c(g)));
}
}
template<uint_t fSize>
void ghostFlattenTest()
{
GhostLayerField<Vector3<uint_t>, fSize> field ( 2,2,1, 1 );
for( cell_idx_t x = -cell_idx_c(field.nrOfGhostLayers()); x < cell_idx_c(field.xSize()+field.nrOfGhostLayers()); ++x )
for( cell_idx_t y = -cell_idx_c(field.nrOfGhostLayers()); y < cell_idx_c(field.ySize()+field.nrOfGhostLayers()); ++y )
for( cell_idx_t z = -cell_idx_c(field.nrOfGhostLayers()); z < cell_idx_c(field.zSize()+field.nrOfGhostLayers()); ++z )
for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f )
for( uint_t g = 0; g < 3; ++g )
{
uint_t val = uint_t(&(field( x,y,z,f )[g]));
field( x,y,z,f )[g] = val;
}
shared_ptr<GhostLayerField<uint_t, 3*fSize>> flattened(field.flattenedShallowCopy());
GhostLayerField<uint_t, 3*fSize> cmp ( 2,2,1, 1 );
WALBERLA_CHECK_EQUAL(cmp.xSize(), flattened->xSize());
WALBERLA_CHECK_EQUAL(cmp.ySize(), flattened->ySize());
WALBERLA_CHECK_EQUAL(cmp.zSize(), flattened->zSize());
WALBERLA_CHECK_EQUAL(cmp.fSize(), flattened->fSize());
WALBERLA_CHECK_EQUAL(cmp.xAllocSize(), flattened->xAllocSize());
WALBERLA_CHECK_EQUAL(cmp.yAllocSize(), flattened->yAllocSize());
WALBERLA_CHECK_EQUAL(cmp.zAllocSize(), flattened->zAllocSize());
WALBERLA_CHECK_EQUAL(cmp.fAllocSize(), flattened->fAllocSize());
WALBERLA_CHECK_EQUAL(cmp.allocSize(), flattened->allocSize());
WALBERLA_CHECK_EQUAL(cmp.xStride(), flattened->xStride());
WALBERLA_CHECK_EQUAL(cmp.yStride(), flattened->yStride());
WALBERLA_CHECK_EQUAL(cmp.zStride(), flattened->zStride());
WALBERLA_CHECK_EQUAL(cmp.fStride(), flattened->fStride());
WALBERLA_CHECK_EQUAL(cmp.xOff(), flattened->xOff());
WALBERLA_CHECK_EQUAL(cmp.yOff(), flattened->yOff());
WALBERLA_CHECK_EQUAL(cmp.zOff(), flattened->zOff());
WALBERLA_CHECK_EQUAL(cmp.nrOfGhostLayers(), flattened->nrOfGhostLayers());
for( cell_idx_t x = -cell_idx_c(field.nrOfGhostLayers()); x < cell_idx_c(field.xSize()+field.nrOfGhostLayers()); ++x )
for( cell_idx_t y = -cell_idx_c(field.nrOfGhostLayers()); y < cell_idx_c(field.ySize()+field.nrOfGhostLayers()); ++y )
for( cell_idx_t z = -cell_idx_c(field.nrOfGhostLayers()); z < cell_idx_c(field.zSize()+field.nrOfGhostLayers()); ++z )
for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f )
for( uint_t g = 0; g < 3; ++g )
{
WALBERLA_CHECK_EQUAL(field(x,y,z,f)[g], flattened->get(x,y,z,3*f+cell_idx_c(g)));
}
}
int main( int argc, char**argv )
{
walberla::Environment walberlaEnv( argc, argv );
......@@ -627,6 +720,11 @@ int main( int argc, char**argv )
isIteratorConsecutiveTest( fzyx );
isIteratorConsecutiveTest( zyxf );