diff --git a/src/core/VectorTrait.h b/src/core/VectorTrait.h index 0909f4075011098526ada7d6cfb25678ed852f61..b050d159ee2a9a39670e715f5ebdccd7a95d42aa 100644 --- a/src/core/VectorTrait.h +++ b/src/core/VectorTrait.h @@ -35,15 +35,22 @@ namespace walberla { */ //********************************************************************************************************************** -template< typename T > +template< typename T, class Enable = void > struct VectorTrait +{ + typedef void OutputType; + + static const uint_t F_SIZE = 0u; +}; + +template< typename T > +struct VectorTrait<T, typename std::enable_if<std::is_arithmetic<T>::value>::type> { typedef T OutputType; static const uint_t F_SIZE = 1u; static T get ( T value, uint_t /*f*/ ) { return value; } static void set( T & value, uint_t /*f*/, T val ) { value = val; } - static_assert( std::is_arithmetic<T>::value, "Specialize OutputTrait for your type!" ); }; diff --git a/src/core/math/Matrix3.h b/src/core/math/Matrix3.h index 88c7818b7926b05747cdaa09578aa8bbc32c6cb1..b206d484dfcc17ce47a11b667f75b2cfc176b1f4 100644 --- a/src/core/math/Matrix3.h +++ b/src/core/math/Matrix3.h @@ -174,8 +174,8 @@ public: inline const Matrix3 getCholesky() const; template< typename Other > inline const Vector3<HIGH> solve( const Vector3<Other> &rhs ) const; inline Type trace() const; - inline Type* data() {return v_;} - inline Type const * data() const {return v_;} + inline Type* data() {return v_.data();} + inline Type const * data() const {return v_.data();} //@} //******************************************************************************************************************* diff --git a/src/field/AddToStorage.h b/src/field/AddToStorage.h index dbb0b947d21d02133a4125808c99141edc110151..add0eb16d82095f1134245b36b8750759d45683a 100644 --- a/src/field/AddToStorage.h +++ b/src/field/AddToStorage.h @@ -246,6 +246,36 @@ BlockDataID addCloneToStorage( const shared_ptr< BlockStorage_T > & blocks, +//********************************************************************************************************************** +/*! Adds a flattened shallow copy of an existing field to BlockStorage +* +* Template parameters: +* Field_T the type of the field that should be cloned and flattened +* BlockStorage_T the type of the BlockStorage ( will be deduced automatically ) +* +* Parameters: +* \param blocks BlockStorage where the original field is stored and the new one is created +* \param fieldToClone BlockDataID of the Field that is cloned +* \param identifier name for new the field ( displayed in GUI and debugging functions ) +*/ +//********************************************************************************************************************** +template< typename Field_T, typename BlockStorage_T > +BlockDataID addFlattenedShallowCopyToStorage( const shared_ptr< BlockStorage_T > & blocks, + ConstBlockDataID fieldToClone, + const std::string & identifier, + const Set<SUID> & requiredSelectors = Set<SUID>::emptySet(), + const Set<SUID> & incompatibleSelectors = Set<SUID>::emptySet() ) +{ + return blocks->addBlockData( make_shared< field::FlattenedShallowCopyBlockDataHandling< Field_T > >( fieldToClone ), + identifier, requiredSelectors, incompatibleSelectors ); +} + + + + + + + //********************************************************************************************************************** /*! BlockDataCreator for fields * diff --git a/src/field/Field.h b/src/field/Field.h index 913655771e2eec0c8937b3e96ce3511093ec3f34..7b3103b802ea59f7d2350a4526d38bd0634736ea 100644 --- a/src/field/Field.h +++ b/src/field/Field.h @@ -86,6 +86,11 @@ namespace field { typedef FieldPointer<Field<T,fSize_>, Field<T,fSize_>, T > Ptr; typedef FieldPointer<Field<T,fSize_>, const Field<T,fSize_>, const T > ConstPtr; + typedef typename std::conditional<VectorTrait<T>::F_SIZE!=0, + Field<typename VectorTrait<T>::OutputType, VectorTrait<T>::F_SIZE*fSize_>, + Field<T, fSize_> + >::type FlattenedField; + static const uint_t F_SIZE = fSize_; //@} //**************************************************************************************************************** @@ -119,6 +124,7 @@ namespace field { Field<T,fSize_> * clone() const; Field<T,fSize_> * cloneUninitialized() const; Field<T,fSize_> * cloneShallowCopy() const; + FlattenedField * flattenedShallowCopy() const; //@} //**************************************************************************************************************** @@ -320,10 +326,13 @@ namespace field { /*! \name Shallow Copy */ //@{ Field(const Field & other); + template <typename T2, uint_t fSize2> + Field(const Field<T2, fSize2> & other); virtual uint_t referenceCount() const; virtual Field<T,fSize_> * cloneShallowCopyInternal() const; + virtual FlattenedField * flattenedShallowCopyInternal() const; //@} //**************************************************************************************************************** @@ -373,6 +382,8 @@ namespace field { friend class FieldIterator<T,fSize_>; friend class FieldIterator<const T,fSize_>; + template <typename T2, uint_t fSize2> + friend class Field; static_assert(fSize_ > 0, "fSize()=0 means: empty field"); diff --git a/src/field/Field.impl.h b/src/field/Field.impl.h index 39b9b227d76ddb481ceed93bc32feebdef124cf4..2ae44c77fd0bf6ad639741049d0263d2bea20813 100644 --- a/src/field/Field.impl.h +++ b/src/field/Field.impl.h @@ -143,6 +143,21 @@ namespace field { } + //******************************************************************************************************************* + /*! Returns a flattened shallow copy of the current field. + * + * Shallow copy means, that the new field internally uses the same memory as this field. + * Flattened means that any VectorTrait-compatible containers are absorbed into the fSize. + * + * \return a new field, that has to be freed by caller + *******************************************************************************************************************/ + template<typename T, uint_t fSize_> + typename Field<T,fSize_>::FlattenedField * Field<T,fSize_>::flattenedShallowCopy() const + { + return flattenedShallowCopyInternal(); + } + + //******************************************************************************************************************* /*!\brief Does the same as cloneShallowCopy (but is virtual) * @@ -156,6 +171,19 @@ namespace field { return new Field<T,fSize_>(*this) ; } + //******************************************************************************************************************* + /*!\brief Does the same as flattenedShallowCopy (but is virtual) + * + * This version has to be implemented by derived classes. The flattenedShallowCopy() itself cannot be + * virtual, since the implementation of flattenedShallowCopy() of derived classes has a different signature. + * + *******************************************************************************************************************/ + template<typename T, uint_t fSize_> + typename Field<T,fSize_>::FlattenedField * Field<T,fSize_>::flattenedShallowCopyInternal() const + { + return new FlattenedField(*this) ; + } + //******************************************************************************************************************* /*! Creates a new field that has equal size and layout as this field. The memory of the new * field is uninitialized. @@ -226,6 +254,40 @@ namespace field { } + //******************************************************************************************************************* + /*! Private copy constructor that creates a flattened shallow copy + * i.e. reuses the memory of the copied field + *******************************************************************************************************************/ + template<typename T, uint_t fSize_> + template <typename T2, uint_t fSize2> + Field<T,fSize_>::Field( const Field<T2,fSize2> & other ) + : values_ ( other.values_[0].data() ), + valuesWithOffset_ ( other.valuesWithOffset_[0].data() ), + xOff_ ( other.xOff_), + yOff_ ( other.yOff_), + zOff_ ( other.zOff_), + xSize_ ( other.xSize_ ), + ySize_ ( other.ySize_ ), + zSize_ ( other.zSize_ ), + xAllocSize_ ( other.xAllocSize_ ), + yAllocSize_ ( other.yAllocSize_ ), + zAllocSize_ ( other.zAllocSize_ ), + fAllocSize_ ( other.fAllocSize_*fSize_/fSize2 ), + layout_ ( other.layout_ ), + allocSize_ ( other.allocSize_*fSize_/fSize2 ), + ffact_ ( other.ffact_ ), + xfact_ ( other.xfact_*cell_idx_t(fSize_/fSize2) ), + yfact_ ( other.yfact_*cell_idx_t(fSize_/fSize2) ), + zfact_ ( other.zfact_*cell_idx_t(fSize_/fSize2) ), + allocator_ ( std::shared_ptr<FieldAllocator<T>>(other.allocator_, reinterpret_cast<FieldAllocator<T>*>(other.allocator_.get())) ) + { + WALBERLA_CHECK_EQUAL(layout_, Layout::zyxf); + static_assert(fSize_ % fSize2 == 0, "number of field components do not match"); + static_assert(std::is_same<typename Field<T2,fSize2>::FlattenedField, Field<T,fSize_>>::value, "field types are incompatible for flattening"); + allocator_->incrementReferenceCount ( values_ ); + } + + //******************************************************************************************************************* /*! Initializes the field with a given size, in a given layout * diff --git a/src/field/GhostLayerField.h b/src/field/GhostLayerField.h index 122e2a465bcdaf2c573f936d650fc012d3ecc42d..e6c8b454c5aeec31439c2d80fa4b91a7f40f09c4 100644 --- a/src/field/GhostLayerField.h +++ b/src/field/GhostLayerField.h @@ -66,6 +66,11 @@ namespace field { typedef typename Field<T,fSize_>::Ptr Ptr; typedef typename Field<T,fSize_>::ConstPtr ConstPtr; + + typedef typename std::conditional<VectorTrait<T>::F_SIZE!=0, + GhostLayerField<typename VectorTrait<T>::OutputType, VectorTrait<T>::F_SIZE*fSize_>, + GhostLayerField<T, fSize_> + >::type FlattenedField; //@} //**************************************************************************************************************** @@ -105,6 +110,7 @@ namespace field { inline GhostLayerField<T,fSize_> * clone() const; inline GhostLayerField<T,fSize_> * cloneUninitialized() const; inline GhostLayerField<T,fSize_> * cloneShallowCopy() const; + inline FlattenedField * flattenedShallowCopy() const; //@} //**************************************************************************************************************** @@ -209,11 +215,15 @@ namespace field { /*! \name Shallow Copy */ //@{ virtual Field<T,fSize_> * cloneShallowCopyInternal() const; + virtual typename Field<T,fSize_>::FlattenedField * flattenedShallowCopyInternal() const; GhostLayerField(const GhostLayerField<T,fSize_> & other); + template <typename T2, uint_t fSize2> + GhostLayerField(const GhostLayerField<T2, fSize2> & other); //@} //**************************************************************************************************************** - + template <typename T2, uint_t fSize2> + friend class GhostLayerField; }; } // namespace field diff --git a/src/field/GhostLayerField.impl.h b/src/field/GhostLayerField.impl.h index 27c87ce0e119ad0d2b6d5fdc37626451fa0bc5e8..5a8c6cc7ab2566bf02fb78866c34eeef2bc9a602 100644 --- a/src/field/GhostLayerField.impl.h +++ b/src/field/GhostLayerField.impl.h @@ -696,6 +696,17 @@ namespace field { { } + //******************************************************************************************************************* + /*!\brief Private copy constructor, which does a flattened shallow copy + *******************************************************************************************************************/ + template<typename T, uint_t fSize_> + template <typename T2, uint_t fSize2> + GhostLayerField<T,fSize_>::GhostLayerField(const GhostLayerField<T2,fSize2> & other) + : Field<T,fSize_>::Field(other), + gl_( other.gl_ ) + { + } + //******************************************************************************************************************* /*!\brief See Field::cloneShallowCopyInternal() * Has to be re-implemented because a new GhostLayerField is created @@ -706,6 +717,16 @@ namespace field { return new GhostLayerField<T,fSize_>(*this); } + //******************************************************************************************************************* + /*!\brief See Field::flattenedShallowCopyInternal() + * Has to be re-implemented because a new GhostLayerField is created + *******************************************************************************************************************/ + template<typename T, uint_t fSize_> + typename Field<T,fSize_>::FlattenedField * GhostLayerField<T,fSize_>::flattenedShallowCopyInternal() const + { + return new GhostLayerField<T,fSize_>::FlattenedField(*this); + } + template<typename T, uint_t fSize_> GhostLayerField<T,fSize_> * GhostLayerField<T,fSize_>::clone() const @@ -725,6 +746,12 @@ namespace field { return dynamic_cast<GhostLayerField<T,fSize_>* > (Field<T,fSize_>::cloneShallowCopy() ); } + template<typename T, uint_t fSize_> + typename GhostLayerField<T,fSize_>::FlattenedField * GhostLayerField<T,fSize_>::flattenedShallowCopy() const + { + return dynamic_cast<GhostLayerField<T,fSize_>::FlattenedField* > (Field<T,fSize_>::flattenedShallowCopy() ); + } + template<typename T, uint_t fSize_> void GhostLayerField<T,fSize_>::slice( const CellInterval & interval ) { diff --git a/src/field/allocation/FieldAllocator.h b/src/field/allocation/FieldAllocator.h index 418fb6c0bd6d520f4a6a213a957000c27da23dc7..f78771e15b2d1df7e0b61a7c2d9f29d44e7a9add 100644 --- a/src/field/allocation/FieldAllocator.h +++ b/src/field/allocation/FieldAllocator.h @@ -25,6 +25,7 @@ #include "AlignedMalloc.h" #include "core/debug/Debug.h" #include "field/CMakeDefs.h" +#include "core/VectorTrait.h" #include <map> #include <new> @@ -34,6 +35,13 @@ namespace walberla { namespace field { + template <typename T> + class FieldAllocatorBase + { + protected: + static std::map<T*, uint_t> referenceCounts_; + }; + //******************************************************************************************************************* /*! Allocation Strategy base class for fields * @@ -44,9 +52,10 @@ namespace field { */ //******************************************************************************************************************* template<typename T> - class FieldAllocator + class FieldAllocator : FieldAllocatorBase<typename std::conditional<VectorTrait<T>::F_SIZE!=0, typename VectorTrait<T>::OutputType, T>::type> { public: + using BaseType = typename std::conditional<VectorTrait<T>::F_SIZE!=0, typename VectorTrait<T>::OutputType, T>::type; virtual ~FieldAllocator() = default; @@ -65,14 +74,15 @@ namespace field { uint_t & allocSize1, uint_t & allocSize2, uint_t & allocSize3) { T * mem = allocateMemory(size0,size1,size2,size3,allocSize1,allocSize2,allocSize3); + BaseType * bmem = reinterpret_cast<BaseType *>(mem); #ifdef WALBERLA_THREAD_SAFE_FIELD_ALLOCATION #ifdef _OPENMP #pragma omp critical( walberla_field_allocator_refcount ) #endif #endif { - WALBERLA_ASSERT( referenceCounts_.find(mem) == referenceCounts_.end() || referenceCounts_[mem] == 0 ); - referenceCounts_[mem] = 1; + WALBERLA_ASSERT( referenceCounts_.find(bmem) == referenceCounts_.end() || referenceCounts_[bmem] == 0 ); + referenceCounts_[bmem] = 1; } return mem; } @@ -89,6 +99,7 @@ namespace field { T * allocate ( uint_t allocSize ) { T * mem = allocateMemory( allocSize ); + BaseType * bmem = reinterpret_cast<BaseType *>(mem); #ifdef WALBERLA_THREAD_SAFE_FIELD_ALLOCATION #ifdef _OPENMP @@ -96,8 +107,8 @@ namespace field { #endif #endif { - WALBERLA_ASSERT( referenceCounts_.find(mem) == referenceCounts_.end() || referenceCounts_[mem] == 0 ); - referenceCounts_[mem] = 1; + WALBERLA_ASSERT( referenceCounts_.find(bmem) == referenceCounts_.end() || referenceCounts_[bmem] == 0 ); + referenceCounts_[bmem] = 1; } return mem; } @@ -112,15 +123,17 @@ namespace field { */ void incrementReferenceCount( T * mem ) { + BaseType * bmem = reinterpret_cast<BaseType *>(mem); + #ifdef WALBERLA_THREAD_SAFE_FIELD_ALLOCATION #ifdef _OPENMP #pragma omp critical( walberla_field_allocator_refcount ) #endif #endif { - WALBERLA_ASSERT( referenceCounts_.find(mem) != referenceCounts_.end() ); - WALBERLA_ASSERT_GREATER( referenceCounts_[mem], 0 ); - referenceCounts_[mem]++; + WALBERLA_ASSERT( referenceCounts_.find(bmem) != referenceCounts_.end() ); + WALBERLA_ASSERT_GREATER( referenceCounts_[bmem], 0 ); + referenceCounts_[bmem]++; } } @@ -136,6 +149,7 @@ namespace field { */ bool decrementReferenceCount( T * mem ) { + BaseType * bmem = reinterpret_cast<BaseType *>(mem); bool memoryFreed = false; uint_t refCount = 0; @@ -146,10 +160,10 @@ namespace field { #endif #endif { - WALBERLA_ASSERT( referenceCounts_.find(mem) != referenceCounts_.end() ); - WALBERLA_ASSERT_GREATER( referenceCounts_[mem], 0 ); + WALBERLA_ASSERT( referenceCounts_.find(bmem) != referenceCounts_.end() ); + WALBERLA_ASSERT_GREATER( referenceCounts_[bmem], 0 ); - refCount = --referenceCounts_[mem]; + refCount = --referenceCounts_[bmem]; } if( refCount == 0 ) { @@ -163,6 +177,7 @@ namespace field { uint_t referenceCount ( T * mem ) const { + BaseType * bmem = reinterpret_cast<BaseType *>(mem); uint_t refCount = 0; #ifdef WALBERLA_THREAD_SAFE_FIELD_ALLOCATION #ifdef _OPENMP @@ -170,8 +185,8 @@ namespace field { #endif #endif { - WALBERLA_ASSERT( referenceCounts_.find(mem) != referenceCounts_.end() ); - refCount = referenceCounts_[mem]; + WALBERLA_ASSERT( referenceCounts_.find(bmem) != referenceCounts_.end() ); + refCount = referenceCounts_[bmem]; } return refCount; @@ -198,12 +213,11 @@ namespace field { virtual void deallocate( T *& values ) = 0; private: - static std::map<T*, uint_t> referenceCounts_; - + using FieldAllocatorBase<BaseType>::referenceCounts_; }; template<typename T> - std::map<T*, uint_t> FieldAllocator<T>::referenceCounts_ = std::map<T*,uint_t>(); + std::map<T*, uint_t> FieldAllocatorBase<T>::referenceCounts_ = std::map<T*,uint_t>(); diff --git a/src/field/blockforest/BlockDataHandling.h b/src/field/blockforest/BlockDataHandling.h index 49968c760379b1ac99ff0ccee3b194562b41b583..dc6328a66508f23429da30c5ad5752ae4aaee9ab 100644 --- a/src/field/blockforest/BlockDataHandling.h +++ b/src/field/blockforest/BlockDataHandling.h @@ -538,5 +538,31 @@ private: + + + +template< typename Field_T > +class FlattenedShallowCopyBlockDataHandling : public blockforest::AlwaysInitializeBlockDataHandling< typename Field_T::FlattenedField > +{ +public: + + FlattenedShallowCopyBlockDataHandling( const ConstBlockDataID & fieldToClone ) : + fieldToClone_( fieldToClone ) + {} + + typename Field_T::FlattenedField * initialize( IBlock * const block ) + { + const Field_T * toClone = block->template getData< Field_T >( fieldToClone_ ); + return toClone->flattenedShallowCopy(); + } + +private: + + ConstBlockDataID fieldToClone_; + +}; // class FlattenedShallowCopyBlockDataHandling + + + } // namespace field } // namespace walberla diff --git a/tests/field/AddToStorageTest.cpp b/tests/field/AddToStorageTest.cpp new file mode 100644 index 0000000000000000000000000000000000000000..270826cea118210674c7ce377dc2e262096ad41c --- /dev/null +++ b/tests/field/AddToStorageTest.cpp @@ -0,0 +1,88 @@ +//====================================================================================================================== +// +// This file is part of waLBerla. waLBerla is free software: you can +// redistribute it and/or modify it under the terms of the GNU General Public +// License as published by the Free Software Foundation, either version 3 of +// the License, or (at your option) any later version. +// +// waLBerla is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +// for more details. +// +// You should have received a copy of the GNU General Public License along +// with waLBerla (see COPYING.txt). If not, see <http://www.gnu.org/licenses/>. +// +//! \file CollectTest.h +//! \author Martin Bauer <martin.bauer@fau.de> +// +//====================================================================================================================== + + +#include "core/debug/TestSubsystem.h" +#include "core/Environment.h" + +#include "blockforest/Initialization.h" + +#include "field/AddToStorage.h" + + +namespace walberla { + + +int main( int argc, char ** argv ) +{ + debug::enterTestMode(); + walberla::Environment walberlaEnv( argc, argv ); + + auto blocks = blockforest::createUniformBlockGrid( 2, 2, 2, // blocks in x,y,z + 4, 4, 4, // nr of cells per block + 1.0, // dx + false + ); + typedef GhostLayerField<Vector3<uint_t>,1> VectorField; + typedef GhostLayerField<uint_t, 3> FlattenedField; + BlockDataID fieldID = field::addToStorage<VectorField>( blocks, "Field" ); + BlockDataID flattenedID = field::addFlattenedShallowCopyToStorage<VectorField>( blocks, fieldID, "flattened Field"); + + for( auto blockIt = blocks->begin(); blockIt != blocks->end(); ++blockIt ) + { + VectorField * field = blockIt->getData<VectorField>( fieldID ); + + for( auto cellIt = field->beginWithGhostLayerXYZ(); cellIt != field->end(); ++cellIt ) + { + for( uint_t f = 0; f < 3; ++f) + { + uint_t val = uint_t(&(*cellIt)[f]); + (*cellIt)[f] = val; + } + } + } + + BlockDataID copyID = field::addCloneToStorage<VectorField>( blocks, fieldID, "copied Field"); + + for( auto blockIt = blocks->begin(); blockIt != blocks->end(); ++blockIt ) + { + VectorField * field = blockIt->getData<VectorField>( fieldID ); + FlattenedField * flattened = blockIt->getData<FlattenedField>( flattenedID ); + VectorField * copy = blockIt->getData<VectorField>( copyID ); + + for( auto cellIt = field->beginWithGhostLayerXYZ(); cellIt != field->end(); ++cellIt ) + { + for( uint_t f = 0; f < 3; ++f) + { + WALBERLA_CHECK_EQUAL((*cellIt)[f], copy->get(cellIt.x(), cellIt.y(), cellIt.z())[f]); + WALBERLA_CHECK_UNEQUAL(&(*cellIt)[f], &(copy->get(cellIt.x(), cellIt.y(), cellIt.z())[f])); + WALBERLA_CHECK_EQUAL(&(*cellIt)[f], &(flattened->get(cellIt.x(), cellIt.y(), cellIt.z(), f))); + } + } + } + + return 0; +} +} + +int main( int argc, char ** argv ) +{ + return walberla::main(argc,argv); +} diff --git a/tests/field/CMakeLists.txt b/tests/field/CMakeLists.txt index 2fb6b91887db6aea9ef2c21d67d687a55f2b7e0f..6a6b8546d3e80d8202f5edee95933f242b7beb64 100644 --- a/tests/field/CMakeLists.txt +++ b/tests/field/CMakeLists.txt @@ -7,6 +7,9 @@ waLBerla_compile_test( FILES AccuracyEvaluationTest.cpp DEPENDS blockforest ) waLBerla_execute_test( NAME AccuracyEvaluationTest4 COMMAND $<TARGET_FILE:AccuracyEvaluationTest> PROCESSES 4 ) +waLBerla_compile_test( FILES AddToStorageTest.cpp DEPENDS blockforest ) +waLBerla_execute_test( NAME AddToStorageTest ) + waLBerla_compile_test( FILES communication/FieldPackInfoTest.cpp DEPENDS blockforest ) waLBerla_execute_test( NAME FieldPackInfoTest ) diff --git a/tests/field/FieldTest.cpp b/tests/field/FieldTest.cpp index 9fcf83cfe6a626ab424adae2b1d940e93b880cdb..2021554dd992aa7ff26c71b2740f81712f4ce411 100644 --- a/tests/field/FieldTest.cpp +++ b/tests/field/FieldTest.cpp @@ -577,6 +577,99 @@ void fieldPointerTest() } +template<uint_t fSize> +void flattenTest() +{ + Field<Vector3<uint_t>, fSize> field ( 2,2,1 ); + + for( cell_idx_t x = 0; x < cell_idx_c(field.xSize()); ++x ) + for( cell_idx_t y = 0; y < cell_idx_c(field.ySize()); ++y ) + for( cell_idx_t z = 0; z < cell_idx_c(field.zSize()); ++z ) + for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f ) + for( uint_t g = 0; g < 3; ++g ) + { + uint_t val = uint_t(&(field( x,y,z,f )[g])); + field( x,y,z,f )[g] = val; + } + + shared_ptr<Field<uint_t, 3*fSize>> flattened(field.flattenedShallowCopy()); + + Field<uint_t, 3*fSize> cmp ( 2,2,1 ); + WALBERLA_CHECK_EQUAL(cmp.xSize(), flattened->xSize()); + WALBERLA_CHECK_EQUAL(cmp.ySize(), flattened->ySize()); + WALBERLA_CHECK_EQUAL(cmp.zSize(), flattened->zSize()); + WALBERLA_CHECK_EQUAL(cmp.fSize(), flattened->fSize()); + WALBERLA_CHECK_EQUAL(cmp.xAllocSize(), flattened->xAllocSize()); + WALBERLA_CHECK_EQUAL(cmp.yAllocSize(), flattened->yAllocSize()); + WALBERLA_CHECK_EQUAL(cmp.zAllocSize(), flattened->zAllocSize()); + WALBERLA_CHECK_EQUAL(cmp.fAllocSize(), flattened->fAllocSize()); + WALBERLA_CHECK_EQUAL(cmp.allocSize(), flattened->allocSize()); + WALBERLA_CHECK_EQUAL(cmp.xStride(), flattened->xStride()); + WALBERLA_CHECK_EQUAL(cmp.yStride(), flattened->yStride()); + WALBERLA_CHECK_EQUAL(cmp.zStride(), flattened->zStride()); + WALBERLA_CHECK_EQUAL(cmp.fStride(), flattened->fStride()); + WALBERLA_CHECK_EQUAL(cmp.xOff(), flattened->xOff()); + WALBERLA_CHECK_EQUAL(cmp.yOff(), flattened->yOff()); + WALBERLA_CHECK_EQUAL(cmp.zOff(), flattened->zOff()); + + for( cell_idx_t x = 0; x < cell_idx_c(field.xSize()); ++x ) + for( cell_idx_t y = 0; y < cell_idx_c(field.ySize()); ++y ) + for( cell_idx_t z = 0; z < cell_idx_c(field.zSize()); ++z ) + for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f ) + for( uint_t g = 0; g < 3; ++g ) + { + WALBERLA_CHECK_EQUAL(field(x,y,z,f)[g], flattened->get(x,y,z,3*f+cell_idx_c(g))); + } +} + + +template<uint_t fSize> +void ghostFlattenTest() +{ + GhostLayerField<Vector3<uint_t>, fSize> field ( 2,2,1, 1 ); + + for( cell_idx_t x = -cell_idx_c(field.nrOfGhostLayers()); x < cell_idx_c(field.xSize()+field.nrOfGhostLayers()); ++x ) + for( cell_idx_t y = -cell_idx_c(field.nrOfGhostLayers()); y < cell_idx_c(field.ySize()+field.nrOfGhostLayers()); ++y ) + for( cell_idx_t z = -cell_idx_c(field.nrOfGhostLayers()); z < cell_idx_c(field.zSize()+field.nrOfGhostLayers()); ++z ) + for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f ) + for( uint_t g = 0; g < 3; ++g ) + { + uint_t val = uint_t(&(field( x,y,z,f )[g])); + field( x,y,z,f )[g] = val; + } + + shared_ptr<GhostLayerField<uint_t, 3*fSize>> flattened(field.flattenedShallowCopy()); + + GhostLayerField<uint_t, 3*fSize> cmp ( 2,2,1, 1 ); + WALBERLA_CHECK_EQUAL(cmp.xSize(), flattened->xSize()); + WALBERLA_CHECK_EQUAL(cmp.ySize(), flattened->ySize()); + WALBERLA_CHECK_EQUAL(cmp.zSize(), flattened->zSize()); + WALBERLA_CHECK_EQUAL(cmp.fSize(), flattened->fSize()); + WALBERLA_CHECK_EQUAL(cmp.xAllocSize(), flattened->xAllocSize()); + WALBERLA_CHECK_EQUAL(cmp.yAllocSize(), flattened->yAllocSize()); + WALBERLA_CHECK_EQUAL(cmp.zAllocSize(), flattened->zAllocSize()); + WALBERLA_CHECK_EQUAL(cmp.fAllocSize(), flattened->fAllocSize()); + WALBERLA_CHECK_EQUAL(cmp.allocSize(), flattened->allocSize()); + WALBERLA_CHECK_EQUAL(cmp.xStride(), flattened->xStride()); + WALBERLA_CHECK_EQUAL(cmp.yStride(), flattened->yStride()); + WALBERLA_CHECK_EQUAL(cmp.zStride(), flattened->zStride()); + WALBERLA_CHECK_EQUAL(cmp.fStride(), flattened->fStride()); + WALBERLA_CHECK_EQUAL(cmp.xOff(), flattened->xOff()); + WALBERLA_CHECK_EQUAL(cmp.yOff(), flattened->yOff()); + WALBERLA_CHECK_EQUAL(cmp.zOff(), flattened->zOff()); + WALBERLA_CHECK_EQUAL(cmp.nrOfGhostLayers(), flattened->nrOfGhostLayers()); + + for( cell_idx_t x = -cell_idx_c(field.nrOfGhostLayers()); x < cell_idx_c(field.xSize()+field.nrOfGhostLayers()); ++x ) + for( cell_idx_t y = -cell_idx_c(field.nrOfGhostLayers()); y < cell_idx_c(field.ySize()+field.nrOfGhostLayers()); ++y ) + for( cell_idx_t z = -cell_idx_c(field.nrOfGhostLayers()); z < cell_idx_c(field.zSize()+field.nrOfGhostLayers()); ++z ) + for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f ) + for( uint_t g = 0; g < 3; ++g ) + { + WALBERLA_CHECK_EQUAL(field(x,y,z,f)[g], flattened->get(x,y,z,3*f+cell_idx_c(g))); + } +} + + int main( int argc, char**argv ) { walberla::Environment walberlaEnv( argc, argv ); @@ -627,6 +720,11 @@ int main( int argc, char**argv ) isIteratorConsecutiveTest( fzyx ); isIteratorConsecutiveTest( zyxf ); + flattenTest<1>(); + flattenTest<3>(); + ghostFlattenTest<1>(); + ghostFlattenTest<3>(); + //swapableCompareTest(); //printerTest();