Commit 6f9accab authored by Michael Kuron's avatar Michael Kuron 🔬

flattened shallow copy of Field

parent b2d24ca2
Pipeline #22484 canceled with stages
in 208 minutes and 34 seconds
......@@ -35,15 +35,22 @@ namespace walberla {
*/
//**********************************************************************************************************************
template< typename T >
template< typename T, class Enable = void >
struct VectorTrait
{
typedef void OutputType;
static const uint_t F_SIZE = 0u;
};
template< typename T >
struct VectorTrait<T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
{
typedef T OutputType;
static const uint_t F_SIZE = 1u;
static T get ( T value, uint_t /*f*/ ) { return value; }
static void set( T & value, uint_t /*f*/, T val ) { value = val; }
static_assert( std::is_arithmetic<T>::value, "Specialize OutputTrait for your type!" );
};
......
......@@ -174,8 +174,8 @@ public:
inline const Matrix3 getCholesky() const;
template< typename Other > inline const Vector3<HIGH> solve( const Vector3<Other> &rhs ) const;
inline Type trace() const;
inline Type* data() {return v_;}
inline Type const * data() const {return v_;}
inline Type* data() {return v_.data();}
inline Type const * data() const {return v_.data();}
//@}
//*******************************************************************************************************************
......
......@@ -86,6 +86,11 @@ namespace field {
typedef FieldPointer<Field<T,fSize_>, Field<T,fSize_>, T > Ptr;
typedef FieldPointer<Field<T,fSize_>, const Field<T,fSize_>, const T > ConstPtr;
typedef typename std::conditional<VectorTrait<T>::F_SIZE!=0,
Field<typename VectorTrait<T>::OutputType, VectorTrait<T>::F_SIZE*fSize_>,
Field<T, fSize_>
>::type FlattenedField;
static const uint_t F_SIZE = fSize_;
//@}
//****************************************************************************************************************
......@@ -119,6 +124,7 @@ namespace field {
Field<T,fSize_> * clone() const;
Field<T,fSize_> * cloneUninitialized() const;
Field<T,fSize_> * cloneShallowCopy() const;
FlattenedField * flattenedShallowCopy() const;
//@}
//****************************************************************************************************************
......@@ -320,10 +326,13 @@ namespace field {
/*! \name Shallow Copy */
//@{
Field(const Field & other);
template <typename T2, uint_t fSize2>
Field(const Field<T2, fSize2> & other);
virtual uint_t referenceCount() const;
virtual Field<T,fSize_> * cloneShallowCopyInternal() const;
virtual FlattenedField * flattenedShallowCopyInternal() const;
//@}
//****************************************************************************************************************
......@@ -373,6 +382,8 @@ namespace field {
friend class FieldIterator<T,fSize_>;
friend class FieldIterator<const T,fSize_>;
template <typename T2, uint_t fSize2>
friend class Field;
static_assert(fSize_ > 0, "fSize()=0 means: empty field");
......
......@@ -143,6 +143,21 @@ namespace field {
}
//*******************************************************************************************************************
/*! Returns a flattened shallow copy of the current field.
*
* Shallow copy means, that the new field internally uses the same memory as this field.
* Flattened means that any VectorTrait-compatible containers are absorbed into the fSize.
*
* \return a new field, that has to be freed by caller
*******************************************************************************************************************/
template<typename T, uint_t fSize_>
typename Field<T,fSize_>::FlattenedField * Field<T,fSize_>::flattenedShallowCopy() const
{
return flattenedShallowCopyInternal();
}
//*******************************************************************************************************************
/*!\brief Does the same as cloneShallowCopy (but is virtual)
*
......@@ -156,6 +171,19 @@ namespace field {
return new Field<T,fSize_>(*this) ;
}
//*******************************************************************************************************************
/*!\brief Does the same as flattenedShallowCopy (but is virtual)
*
* This version has to be implemented by derived classes. The flattenedShallowCopy() itself cannot be
* virtual, since the implementation of flattenedShallowCopy() of derived classes has a different signature.
*
*******************************************************************************************************************/
template<typename T, uint_t fSize_>
typename Field<T,fSize_>::FlattenedField * Field<T,fSize_>::flattenedShallowCopyInternal() const
{
return new FlattenedField(*this) ;
}
//*******************************************************************************************************************
/*! Creates a new field that has equal size and layout as this field. The memory of the new
* field is uninitialized.
......@@ -226,6 +254,40 @@ namespace field {
}
//*******************************************************************************************************************
/*! Private copy constructor that creates a flattened shallow copy
* i.e. reuses the memory of the copied field
*******************************************************************************************************************/
template<typename T, uint_t fSize_>
template <typename T2, uint_t fSize2>
Field<T,fSize_>::Field( const Field<T2,fSize2> & other )
: values_ ( other.values_[0].data() ),
valuesWithOffset_ ( other.valuesWithOffset_[0].data() ),
xOff_ ( other.xOff_),
yOff_ ( other.yOff_),
zOff_ ( other.zOff_),
xSize_ ( other.xSize_ ),
ySize_ ( other.ySize_ ),
zSize_ ( other.zSize_ ),
xAllocSize_ ( other.xAllocSize_ ),
yAllocSize_ ( other.yAllocSize_ ),
zAllocSize_ ( other.zAllocSize_ ),
fAllocSize_ ( other.fAllocSize_*fSize_/fSize2 ),
layout_ ( other.layout_ ),
allocSize_ ( other.allocSize_*fSize_/fSize2 ),
ffact_ ( other.ffact_ ),
xfact_ ( other.xfact_*cell_idx_t(fSize_/fSize2) ),
yfact_ ( other.yfact_*cell_idx_t(fSize_/fSize2) ),
zfact_ ( other.zfact_*cell_idx_t(fSize_/fSize2) ),
allocator_ ( std::shared_ptr<FieldAllocator<T>>(other.allocator_, reinterpret_cast<FieldAllocator<T>*>(other.allocator_.get())) )
{
WALBERLA_CHECK_EQUAL(layout_, Layout::zyxf);
static_assert(fSize_ % fSize2 == 0, "number of field components do not match");
static_assert(std::is_same<typename Field<T2,fSize2>::FlattenedField, Field<T,fSize_>>::value, "field types are incompatible for flattening");
allocator_->incrementReferenceCount ( values_ );
}
//*******************************************************************************************************************
/*! Initializes the field with a given size, in a given layout
*
......
......@@ -34,6 +34,13 @@ namespace walberla {
namespace field {
template <typename T>
class FieldAllocatorBase
{
protected:
static std::map<T*, uint_t> referenceCounts_;
};
//*******************************************************************************************************************
/*! Allocation Strategy base class for fields
*
......@@ -44,7 +51,7 @@ namespace field {
*/
//*******************************************************************************************************************
template<typename T>
class FieldAllocator
class FieldAllocator : FieldAllocatorBase<void>
{
public:
......@@ -196,14 +203,10 @@ namespace field {
* \param values Return value of allocate function()
*/
virtual void deallocate( T *& values ) = 0;
private:
static std::map<T*, uint_t> referenceCounts_;
};
template<typename T>
std::map<T*, uint_t> FieldAllocator<T>::referenceCounts_ = std::map<T*,uint_t>();
std::map<T*, uint_t> FieldAllocatorBase<T>::referenceCounts_ = std::map<T*,uint_t>();
......
......@@ -577,6 +577,52 @@ void fieldPointerTest()
}
template<uint_t fSize>
void flattenTest()
{
Field<Vector3<uint_t>, fSize> field ( 2,2,1 );
for( cell_idx_t x = 0; x < cell_idx_c(field.xSize()); ++x )
for( cell_idx_t y = 0; y < cell_idx_c(field.ySize()); ++y )
for( cell_idx_t z = 0; z < cell_idx_c(field.zSize()); ++z )
for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f )
for( uint_t g = 0; g < 3; ++g )
{
uint_t val = uint_t(&(field( x,y,z,f )[g]));
field( x,y,z,f )[g] = val;
}
shared_ptr<Field<uint_t, 3*fSize>> flattened(field.flattenedShallowCopy());
Field<uint_t, 3*fSize> cmp ( 2,2,1 );
WALBERLA_CHECK_EQUAL(cmp.xSize(), flattened->xSize());
WALBERLA_CHECK_EQUAL(cmp.ySize(), flattened->ySize());
WALBERLA_CHECK_EQUAL(cmp.zSize(), flattened->zSize());
WALBERLA_CHECK_EQUAL(cmp.fSize(), flattened->fSize());
WALBERLA_CHECK_EQUAL(cmp.xAllocSize(), flattened->xAllocSize());
WALBERLA_CHECK_EQUAL(cmp.yAllocSize(), flattened->yAllocSize());
WALBERLA_CHECK_EQUAL(cmp.zAllocSize(), flattened->zAllocSize());
WALBERLA_CHECK_EQUAL(cmp.fAllocSize(), flattened->fAllocSize());
WALBERLA_CHECK_EQUAL(cmp.allocSize(), flattened->allocSize());
WALBERLA_CHECK_EQUAL(cmp.xStride(), flattened->xStride());
WALBERLA_CHECK_EQUAL(cmp.yStride(), flattened->yStride());
WALBERLA_CHECK_EQUAL(cmp.zStride(), flattened->zStride());
WALBERLA_CHECK_EQUAL(cmp.fStride(), flattened->fStride());
WALBERLA_CHECK_EQUAL(cmp.xOff(), flattened->xOff());
WALBERLA_CHECK_EQUAL(cmp.yOff(), flattened->yOff());
WALBERLA_CHECK_EQUAL(cmp.zOff(), flattened->zOff());
for( cell_idx_t x = 0; x < cell_idx_c(field.xSize()); ++x )
for( cell_idx_t y = 0; y < cell_idx_c(field.ySize()); ++y )
for( cell_idx_t z = 0; z < cell_idx_c(field.zSize()); ++z )
for( cell_idx_t f = 0; f < cell_idx_c(field.fSize()); ++f )
for( uint_t g = 0; g < 3; ++g )
{
WALBERLA_CHECK_EQUAL(field(x,y,z,f)[g], flattened->get(x,y,z,3*f+cell_idx_c(g)));
}
}
int main( int argc, char**argv )
{
walberla::Environment walberlaEnv( argc, argv );
......@@ -627,6 +673,9 @@ int main( int argc, char**argv )
isIteratorConsecutiveTest( fzyx );
isIteratorConsecutiveTest( zyxf );
flattenTest<1>();
flattenTest<3>();
//swapableCompareTest();
//printerTest();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment