Commit cd5bacdb authored by wagnandr's avatar wagnandr
Browse files

Merge branch 'master' into dechant/GMRESSolver

parents 152e294a dc32756e
......@@ -7,3 +7,4 @@
.idea
cmake-build-*
.vs/
build/*
This diff is collapsed.
cmake_minimum_required (VERSION 3.10)
cmake_minimum_required (VERSION 3.14)
PROJECT ( hyteg )
enable_testing()
set_property(GLOBAL PROPERTY USE FOLDERS TRUE)
option ( HYTEG_BUILD_WITH_PETSC "Build with PETSc" OFF)
option ( HYTEG_BUILD_WITH_EIGEN "Build with Eigen" OFF)
option ( HYTEG_BUILD_WITH_TRILINOS "Build with Trilinos" OFF)
option ( HYTEG_USE_GENERATED_KERNELS "Use generated pystencils kernels if available" ON)
option ( HYTEG_GIT_SUBMODULE_AUTO "Check submodules during build" ON)
......@@ -14,6 +14,8 @@ set(WALBERLA_LOG_SKIPPED ON CACHE BOOL "Log skipped cmake targets"
set(WALBERLA_DIR ${hyteg_SOURCE_DIR}/walberla CACHE PATH "waLBerla path")
set(EIGEN_DIR ${hyteg_SOURCE_DIR}/eigen CACHE PATH "eigen path")
include_directories ( ${EIGEN_DIR} )
link_directories ( ${EIGEN_DIR} )
include_directories ( src )
......@@ -26,27 +28,6 @@ else()
message(STATUS "Generated HyTeG kernels DISABLED! - Performance might not be optimal and some features might not be working correctly.")
endif()
if ( HYTEG_BUILD_WITH_PETSC )
find_package( PETSc )
include_directories ( ${PETSC_INCLUDES} )
link_directories ( ${PETSC_LIBRARIES} )
list ( APPEND SERVICE_LIBS ${PETSC_LIBRARIES} )
set(WALBERLA_BUILD_WITH_MPI ON CACHE BOOL "Build with MPI" FORCE)
message(STATUS "WALBERLA_BUILD_WITH_MPI was force set to ON")
endif()
if ( HYTEG_BUILD_WITH_EIGEN )
include_directories ( ${EIGEN_DIR} )
link_directories ( ${EIGEN_DIR} )
endif()
if ( HYTEG_BUILD_WITH_TRILINOS)
find_package( Trilinos )
message(STATUS "Found Trilinos! Trilinos_DIR = ${Trilinos_DIR} ")
INCLUDE_DIRECTORIES ( ${Trilinos_INCLUDE_DIRS} ${Trilinos_TPL_INCLUDE_DIRS})
list ( APPEND SERVICE_LIBS ${Trilinos_LIBRARIES} ${Trilinos_TPL_LIBRARY_DIRS})
endif()
find_package( waLBerla )
if(WALBERLA_CXX_COMPILER_IS_GNU)
......@@ -74,6 +55,23 @@ if(WALBERLA_CXX_COMPILER_IS_MSVC)
add_flag( CMAKE_CXX_FLAGS "/wd4065" )
endif()
if ( HYTEG_BUILD_WITH_PETSC )
find_package( PETSc REQUIRED )
include_directories ( ${PETSC_INCLUDES} )
link_directories ( ${PETSC_LIBRARIES} )
list ( APPEND SERVICE_LIBS ${PETSC_LIBRARIES} )
set(WALBERLA_BUILD_WITH_MPI ON CACHE BOOL "Build with MPI" FORCE)
message(STATUS "WALBERLA_BUILD_WITH_MPI was force set to ON")
endif()
if ( HYTEG_BUILD_WITH_TRILINOS)
find_package( Trilinos REQUIRED )
message(STATUS "Found Trilinos! Trilinos_DIR = ${Trilinos_DIR} ")
INCLUDE_DIRECTORIES ( ${Trilinos_INCLUDE_DIRS} ${Trilinos_TPL_INCLUDE_DIRS})
list ( APPEND SERVICE_LIBS ${Trilinos_LIBRARIES} ${Trilinos_TPL_LIBRARY_DIRS})
endif()
configure_file ( ${hyteg_SOURCE_DIR}/src/hyteg/HytegDefinitions.in.hpp
src/hyteg/HytegDefinitions.hpp )
......@@ -199,6 +197,7 @@ endif()
string( TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER )
set ( HYTEG_BUILD_TYPE ${CMAKE_BUILD_TYPE} )
set ( HYTEG_COMPILER_INFO "${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}" )
set ( HYTEG_COMPILER_FLAGS "${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}} ${CMAKE_CXX_FLAGS}" )
string ( REPLACE "\"" "\\\"" HYTEG_COMPILER_FLAGS ${HYTEG_COMPILER_FLAGS} )
......@@ -207,4 +206,26 @@ configure_file(
${CMAKE_BINARY_DIR}/src/hyteg/BuildInfo.hpp
)
# this returns all targets within one folder
function(get_all_targets _result _dir)
get_property(_subdirs DIRECTORY "${_dir}" PROPERTY SUBDIRECTORIES)
foreach (_subdir IN LISTS _subdirs)
get_all_targets(${_result} "${_subdir}")
endforeach ()
get_property(_sub_targets DIRECTORY "${_dir}" PROPERTY BUILDSYSTEM_TARGETS)
set(${_result} ${${_result}} ${_sub_targets} PARENT_SCOPE)
endfunction()
get_all_targets(_tests_targets "tests")
add_custom_target(hyteg_all_tests)
add_dependencies(hyteg_all_tests ${_tests_targets})
get_all_targets(_apps_targets "apps")
get_all_targets(_tutorials_targets "tutorials")
add_custom_target(hyteg_all_apps)
add_dependencies(hyteg_all_apps ${_apps_targets} ${_tutorials_targets})
add_custom_target(hyteg_all)
add_dependencies(hyteg_all ${_apps_targets} ${_tutorials_targets} ${_tests_targets})
############################################################################################################################
This diff is collapsed.
This diff is collapsed.
......@@ -81,10 +81,7 @@ Optional:
* [Eigen](http://eigen.tuxfamily.org "Eigen homepage") for some linear algebra operations
Eigen is, (like waLBerla) automatically cloned as a git submodule, but disabled by default.
To activate Eigen, simply set the cmake variable `HYTEG_BUILD_WITH_EIGEN` to `ON`, e.g. via:
`$ cmake ../hyteg -DHYTEG_BUILD_WITH_EIGEN=ON`
Eigen is, (like waLBerla) automatically cloned as a git submodule.
CMake will automatically find the Eigen submodule, there is no need to specify a path
or to download Eigen at all.
......
......@@ -237,7 +237,7 @@ void solve( MeshInfo& meshInfo,
FunctionType cMass( "cMass", storage, level, level );
FunctionType tmp( "tmp", storage, level, level );
FunctionType tmp2( "tmp2", storage, level, level );
typename FunctionTrait< FunctionType >::AssocVectorFunctionType uvw( "uvw", storage, level, level );
typename FunctionTrait< FunctionType >::AssocVectorFunctionType uvwLast( "uvwLast", storage, level, level );
// FunctionType u( "u", storage, level, level );
......@@ -269,7 +269,8 @@ void solve( MeshInfo& meshInfo,
if ( enableDiffusion )
{
solver = std::make_shared< CGSolver< UnsteadyDiffusionOperator > >( storage, level, level, std::numeric_limits< uint_t >::max(), 1e-12 );
solver = std::make_shared< CGSolver< UnsteadyDiffusionOperator > >(
storage, level, level, std::numeric_limits< uint_t >::max(), 1e-12 );
}
UnsteadyDiffusion< FunctionType, UnsteadyDiffusionOperator, LaplaceOperator, MassOperator > diffusionSolver(
......@@ -302,7 +303,7 @@ void solve( MeshInfo& meshInfo,
printCurrentMemoryUsage();
}
cError.assign( {1.0, -1.0}, {c, cSolution}, level, All );
cError.assign( { 1.0, -1.0 }, { c, cSolution }, level, All );
if ( verbose )
{
......@@ -317,7 +318,9 @@ void solve( MeshInfo& meshInfo,
const auto initialMass = mass;
auto massChange = ( mass / initialMass ) - 1.0;
real_t timeTotal = 0;
real_t vMax = velocityMaxMagnitude( uvw[0], uvw[1], uvw[2], tmp, tmp2, level, All );
real_t vMax = 0;
vMax = velocityMaxMagnitude( uvw, tmp, tmp2, level, All );
if ( verbose )
{
......@@ -382,15 +385,16 @@ void solve( MeshInfo& meshInfo,
// To implement re-init intervals > 1 for time dependent velocity fields, we need to
// reverse the the velocity field over the period in which the solution is transported
// in the Lagrangian domain.
auto lagrangeIntervalLength = resetParticlesInterval;
auto lagrangeIntervalIdx = (i - 1) / lagrangeIntervalLength;
auto lagrangeIntervalInnerIdx = (i - 1) % lagrangeIntervalLength;
auto tsVelocityCurrent = lagrangeIntervalLength * lagrangeIntervalIdx + (lagrangeIntervalLength - lagrangeIntervalInnerIdx) - 1;
auto tsVelocityNext = lagrangeIntervalLength * lagrangeIntervalIdx + (lagrangeIntervalLength - lagrangeIntervalInnerIdx);
auto lagrangeIntervalLength = resetParticlesInterval;
auto lagrangeIntervalIdx = ( i - 1 ) / lagrangeIntervalLength;
auto lagrangeIntervalInnerIdx = ( i - 1 ) % lagrangeIntervalLength;
auto tsVelocityCurrent =
lagrangeIntervalLength * lagrangeIntervalIdx + ( lagrangeIntervalLength - lagrangeIntervalInnerIdx ) - 1;
auto tsVelocityNext = lagrangeIntervalLength * lagrangeIntervalIdx + ( lagrangeIntervalLength - lagrangeIntervalInnerIdx );
velocityX.setTime( startTimeX + dt * real_c(tsVelocityCurrent) );
velocityY.setTime( startTimeY + dt * real_c(tsVelocityCurrent) );
velocityZ.setTime( startTimeZ + dt * real_c(tsVelocityCurrent) );
velocityX.setTime( startTimeX + dt * real_c( tsVelocityCurrent ) );
velocityY.setTime( startTimeY + dt * real_c( tsVelocityCurrent ) );
velocityZ.setTime( startTimeZ + dt * real_c( tsVelocityCurrent ) );
uvwLast[0].interpolate( std::function< real_t( const Point3D& ) >( std::ref( velocityX ) ), level );
uvwLast[1].interpolate( std::function< real_t( const Point3D& ) >( std::ref( velocityY ) ), level );
......@@ -399,9 +403,9 @@ void solve( MeshInfo& meshInfo,
uvwLast[2].interpolate( std::function< real_t( const Point3D& ) >( std::ref( velocityZ ) ), level );
}
velocityX.setTime( startTimeX + dt * real_c(tsVelocityNext) );
velocityY.setTime( startTimeY + dt * real_c(tsVelocityNext) );
velocityZ.setTime( startTimeZ + dt * real_c(tsVelocityNext) );
velocityX.setTime( startTimeX + dt * real_c( tsVelocityNext ) );
velocityY.setTime( startTimeY + dt * real_c( tsVelocityNext ) );
velocityZ.setTime( startTimeZ + dt * real_c( tsVelocityNext ) );
uvw[0].interpolate( std::function< real_t( const Point3D& ) >( std::ref( velocityX ) ), level );
uvw[1].interpolate( std::function< real_t( const Point3D& ) >( std::ref( velocityY ) ), level );
......@@ -415,13 +419,13 @@ void solve( MeshInfo& meshInfo,
real_t advectionTimeStepRunTime;
vMax = velocityMaxMagnitude( uvw[0], uvw[1], uvw[2], tmp, tmp2, level, All );
vMax = velocityMaxMagnitude( uvw, tmp, tmp2, level, All );
if ( enableDiffusion && strangSplitting )
{
solution.incTime( 0.5 * dt );
cOld.assign( {1.0}, {c}, level, All );
cOld.assign( { 1.0 }, { c }, level, All );
c.interpolate( std::function< real_t( const Point3D& ) >( std::ref( solution ) ), level, DirichletBoundary );
......@@ -459,7 +463,7 @@ void solve( MeshInfo& meshInfo,
Inner,
dt,
1,
i == 1 || ( resetParticles && (i-1) % resetParticlesInterval == 0 ),
i == 1 || ( resetParticles && ( i - 1 ) % resetParticlesInterval == 0 ),
globalMaxLimiter,
setParticlesOutsideDomainToZero );
localTimer.end();
......@@ -475,7 +479,7 @@ void solve( MeshInfo& meshInfo,
solution.incTime( dt );
}
cOld.assign( {1.0}, {c}, level, All );
cOld.assign( { 1.0 }, { c }, level, All );
c.interpolate( std::function< real_t( const Point3D& ) >( std::ref( solution ) ), level, DirichletBoundary );
......@@ -487,7 +491,7 @@ void solve( MeshInfo& meshInfo,
}
cSolution.interpolate( std::function< real_t( const Point3D& ) >( std::ref( solution ) ), level );
cError.assign( {1.0, -1.0}, {c, cSolution}, level, All );
cError.assign( { 1.0, -1.0 }, { c, cSolution }, level, All );
timeTotal += dt;
......
This diff is collapsed.
Parameters
{
level 4;
level 2;
minLevel 0;
rMin 0.5;
rMin 0.55;
rMax 1.0;
nTan 12;
nRad 2;
threeDim false;
// nTan 12;
// nRad 2;
// threeDim false;
//nTan 3;
//nRad 3;
//threeDim true;
nTan 3;
nRad 3;
threeDim true;
// PETSC_MUMPS = 0,
// PETSC_MINRES_JACOBI = 1,
......@@ -25,8 +25,8 @@ Parameters
stokesSolverType 5;
stokesMaxNumIterations 1;
stokesAbsoluteResidualUTolerance 1e-6;
stokesRelativeResidualUTolerance 1e-6;
stokesAbsoluteResidualUTolerance 1e-8;
stokesRelativeResidualUTolerance 1e-8;
uzawaPreSmooth 6;
uzawaPostSmooth 6;
uzawaInnerIterations 10;
......@@ -35,24 +35,31 @@ Parameters
// PETSC_MINRES = 0,
// HYTEG_CG = 1
// HYTEG_GMG = 2
diffusionSolverType 0;
diffusionSolverType 1;
diffusionMaxNumIterations 10000;
diffusionAbsoluteResidualUTolerance 1e-12;
maxNumTimeSteps 1;
maxNumTimeSteps 100;
simulationTime 10;
cflMax 0.5;
cflMax 1.0;
fixedTimeStep false;
dtConstant 1e-4;
rayleighNumber 1e8;
vtk true;
vtkOutputVelocity false;
vtkOutputInterval 1;
vtkOutputVertexDoFs true;
outputDirectory conv_rate;
outputBaseName conv_rate_u_6_6_10;
sphericalTemperatureSlice true;
sphericalTemperatureSliceInterval 1;
sphericalTemperatureSliceNumMeridians 360;
sphericalTemperatureSliceNumParallels 360;
sphericalTemperatureSliceIcoRefinements 6;
outputDirectory review;
outputBaseName review_test;
verbose true;
}
......@@ -219,7 +219,7 @@ void runSimulation( int argc, char** argv )
return std::sin( std::atan2( x[1], x[0] ) );
};
outwardNormalField.uvw.interpolate( { normalX, normalY }, maxLevel );
outwardNormalField.uvw().interpolate( { normalX, normalY }, maxLevel );
// VTK
......@@ -260,10 +260,10 @@ void runSimulation( int argc, char** argv )
{
uLast.assign( { 1.0 }, { u }, maxLevel, All );
M.apply( c, f.uvw[0], maxLevel, All );
M.apply( c, f.uvw[1], maxLevel, All );
f.uvw.multElementwise( { f.uvw, outwardNormalField.uvw }, maxLevel );
f.uvw.assign( { rayleighNumber }, { f.uvw }, maxLevel, All );
M.apply( c, f.uvw()[0], maxLevel, All );
M.apply( c, f.uvw()[1], maxLevel, All );
f.uvw().multElementwise( { f.uvw(), outwardNormalField.uvw() }, maxLevel );
f.uvw().assign( { rayleighNumber }, { f.uvw() }, maxLevel, All );
uint_t numVCycles = 0;
real_t currentResidual = std::numeric_limits< real_t >::max();
......@@ -280,10 +280,10 @@ void runSimulation( int argc, char** argv )
walberla::format( " %6s | %12s | %14s | %12s | %8d | %15e ", "", "", "", "", numVCycles, currentResidual ) )
}
const auto maxVelocity = velocityMaxMagnitude( u.uvw[0], u.uvw[1], tmp.uvw[0], tmp.uvw[1], maxLevel, All );
const auto maxVelocity = velocityMaxMagnitude( u.uvw(), tmp.uvw()[0], tmp.uvw()[1], maxLevel, All );
const auto dt = ( cflUpperBound / maxVelocity ) * hMin;
transport.step( c, u.uvw, uLast.uvw, maxLevel, Inner, dt, 1, true );
transport.step( c, u.uvw(), uLast.uvw(), maxLevel, Inner, dt, 1, true );
if ( writeVTK )
{
......
......@@ -12,7 +12,7 @@ def create_files(args, args_dict):
# parameter file
binary_name = 'MantleConvection'
base_name = '_'.join(['mc', datestamp, f'ra_{args.ra}', f'nodes_{args.num_nodes}'])
base_name = '_'.join(['mc', datestamp, f'nodes_{args.num_nodes}'])
parameter_file_name = base_name + '.prm'
args_dict['base_name'] = base_name
args_dict['output_directory'] = args.out_dir
......@@ -21,14 +21,11 @@ def create_files(args, args_dict):
# job file
if args.machine == 'hawk':
raise Exception('Hawk untested')
job_file_name = base_name + '.job'
args_dict['out_dir'] = '../hawk'
args_dict['path'] = os.getcwd().rstrip('scriptGeneration')
args_dict['out_dir'] = '.'
args_dict['binary_name'] = binary_name
args_dict['job_name'] = base_name
args_dict['paramfile_name'] = parameter_file_name
args_dict['total_num_procs'] = args.num_cores * args.num_nodes
job_file = job_file_hawk(**args_dict)
elif args.machine == 'supermuc':
job_file_name = base_name + '.job'
......@@ -63,23 +60,25 @@ if __name__ == '__main__':
parser.add_argument('--ntan', default=5, help='parameter for spherical shell mesh', type=int)
parser.add_argument('--nrad', default=4, help='parameter for spherical shell mesh', type=int)
parser.add_argument('--max_level', default=5, help='max level for multigrid', type=int)
parser.add_argument('--ra', default=1e8, help='Rayleigh number')
parser.add_argument('--max_level', default=4, help='max level for multigrid', type=int)
parser.add_argument('--ra', default=4.8e8, help='Rayleigh number')
parser.add_argument('--cfl', default=1.0, help='CFL number')
parser.add_argument('--out_dir', default='.', help='output directory')
parser.add_argument('--max_num_time_steps', default=10000, type=int)
parser.add_argument('--uzawa_omega', default=0.6, type=float)
parser.add_argument('--uzawa_omega', default=0.3, type=float)
parser.add_argument('--uzawa_pre', default=10, type=int)
parser.add_argument('--uzawa_post', default=10, type=int)
parser.add_argument('--uzawa_inner', default=6, type=int)
parser.add_argument('--stokes_abs_tol', default=1e-6, type=float)
parser.add_argument('--stokes_rel_tol', default=1e-6, type=float)
parser.add_argument('--stokes_abs_tol', default=1e-8, type=float)
parser.add_argument('--stokes_rel_tol', default=1e-8, type=float)
parser.add_argument('--vtk_interval', default=10, type=int)
parser.add_argument('--vtk_vertex_dofs', default=False, type=bool)
parser.add_argument('--sph_tmp_interval', default=10, type=int)
args = parser.parse_args()
args_dict = vars(args)
......
#!/bin/bash
module load gcc
module load mpt
module load cmake
module load petsc
module list
def create_parameter_file(max_level: int, ra: float, output_directory: str, base_name: str, max_num_time_steps: int, uzawa_omega: float, cfl: float, uzawa_pre: int, uzawa_post: int, uzawa_inner: int,
stokes_rel_tol: float, stokes_abs_tol: float, ntan: int, nrad: int, vtk_interval: int, vtk_vertex_dofs: bool, **kwargs):
def create_parameter_file(max_level: int, ra: float, output_directory: str, base_name: str, max_num_time_steps: int,
uzawa_omega: float, cfl: float, uzawa_pre: int, uzawa_post: int, uzawa_inner: int,
stokes_rel_tol: float, stokes_abs_tol: float, ntan: int, nrad: int, vtk_interval: int,
vtk_vertex_dofs: bool, sph_tmp_interval: int, **kwargs):
return f"""Parameters
{{
level {max_level};
minLevel 0;
rMin 0.5;
rMin 0.55;
rMax 1.0;
// nTan 12;
......@@ -49,12 +49,19 @@ def create_parameter_file(max_level: int, ra: float, output_directory: str, base
cflMax {cfl};
fixedTimeStep false;
dtConstant 1e-4;
rayleighNumber 1e8;
rayleighNumber {ra};
vtk true;
vtkOutputVelocity false;
vtkOutputInterval {vtk_interval};
vtkOutputVertexDoFs {vtk_vertex_dofs};
sphericalTemperatureSlice true;
sphericalTemperatureSliceInterval {sph_tmp_interval};
sphericalTemperatureSliceNumMeridians 360;
sphericalTemperatureSliceNumParallels 360;
sphericalTemperatureSliceIcoRefinements 6;
outputDirectory {output_directory};
outputBaseName {base_name};
......@@ -63,35 +70,34 @@ def create_parameter_file(max_level: int, ra: float, output_directory: str, base
"""
def job_file_hawk(job_name: str, binary_name: str, num_nodes: int, num_mpi_procs_per_node: int, walltime: str,
paramfile_name: str, **kwargs):
petsc_detail_string = "-ksp_view -ksp_monitor -log_view -mat_mumps_icntl_4 2"
def job_file_hawk(job_name: str, binary_name: str, num_nodes: int, num_omp_threads_per_mpi_proc: int, walltime: str, total_num_procs: int, paramfile_name: str,
path: str, **kwargs):
return f"""#!/bin/bash
#PBS -N {job_name}
#PBS -l select={num_nodes}:node_type=rome:mpiprocs={num_omp_threads_per_mpi_proc}
#PBS -l select={num_nodes}:node_type=rome:mpiprocs={num_mpi_procs_per_node}
#PBS -l walltime={walltime}
#PBS -m abe
#PBS -M dominik.thoennes@fau.de
export MPI_LAUNCH_TIMEOUT=1000
export MPI_IB_CONGESTED=1
module load gcc
module load mpt
module load cmake
module load petsc
module list
#PBS -M nils.kohl@fau.de
cd $PBS_O_WORKDIR
source load_modules_hawk.sh
cd ..
pwd
ls -lha
mpirun -np {total_num_procs} omplace -c 0-128:st={int(128 / num_omp_threads_per_mpi_proc)} {path}{binary_name} {path}hawk/{paramfile_name}
export MPI_LAUNCH_TIMEOUT=1000
export MPI_IB_CONGESTED=1
mpirun -np {num_nodes*num_mpi_procs_per_node} omplace -c 0-128:st={int(128 / num_mpi_procs_per_node)} ./{binary_name} MantleConvectionRunScripts/{paramfile_name} {petsc_detail_string}
"""
def job_file_supermuc(job_name: str, binary_name: str, num_nodes: int, num_mpi_procs_per_node: int, walltime: str, paramfile_name: str, **kwargs):
def job_file_supermuc(job_name: str, binary_name: str, num_nodes: int, num_mpi_procs_per_node: int, walltime: str,
paramfile_name: str, **kwargs):
petsc_detail_string = "-ksp_view -ksp_monitor -log_view -mat_mumps_icntl_4 2"
def partition(num_nodes):
......
......@@ -197,27 +197,27 @@ void solveRHS0Implementation( const std::shared_ptr< PrimitiveStorage >&
for ( uint_t level = minLevel; level <= maxLevel; level++ )
{
u.uvw.interpolate( { initialU, initialV, initialW }, level, All );
u.uvw.interpolate( { solutionU, solutionV, solutionW }, level, DirichletBoundary );
u.p.interpolate( initialP, level, All );
u.uvw().interpolate( { initialU, initialV, initialW }, level, All );
u.uvw().interpolate( { solutionU, solutionV, solutionW }, level, DirichletBoundary );
u.p().interpolate( initialP, level, All );
if ( RHSisZero )
{
if ( level < maxLevel )
{
f.uvw.interpolate( 0, level, All );
f.p.interpolate( 0, level, All );
f.uvw().interpolate( 0, level, All );
f.p().interpolate( 0, level, All );
}
}
else
{
tmp.uvw.interpolate( { rhsU,rhsV, rhsW }, level, All );
tmp.uvw().interpolate( { rhsU,rhsV, rhsW }, level, All );
velocityMassOperator.apply( tmp.uvw[0], f.uvw[0], level, All );
velocityMassOperator.apply( tmp.uvw[1], f.uvw[1], level, All );
velocityMassOperator.apply( tmp.uvw[2], f.uvw[2], level, All );
velocityMassOperator.apply( tmp.uvw()[0], f.uvw()[0], level, All );
velocityMassOperator.apply( tmp.uvw()[1], f.uvw()[1], level, All );
velocityMassOperator.apply( tmp.uvw()[2], f.uvw()[2], level, All );
f.p.interpolate( 0, level, All );
f.p().interpolate( 0, level, All );
}
}
......@@ -342,7 +342,7 @@ void solveRHS0Implementation( const std::shared_ptr< PrimitiveStorage >&
if ( projectPressure )
{
vertexdof::projectMean( u.p, currentLevel );
vertexdof::projectMean( u.p(), currentLevel );
}
errorAndResidual( A,
......@@ -459,7 +459,7 @@ void solveRHS0Implementation( const std::shared_ptr< PrimitiveStorage >&
if ( projectPressure )
{
vertexdof::projectMean( u.p, maxLevel );
vertexdof::projectMean( u.p(), maxLevel );
}
errorAndResidual( A,
......@@ -493,7 +493,7 @@ void solveRHS0Implementation( const std::shared_ptr< PrimitiveStorage >&
if ( projectPressure )
{
vertexdof::projectMean( u.p, maxLevel );
vertexdof::projectMean( u.p(), maxLevel );
}
errorAndResidual( A,
......@@ -590,7 +590,7 @@ void solve( const std::shared_ptr< PrimitiveStorage >& storage,
else
{
solveRHS0Implementation< P1StokesFunction,
P1StokesOperator,
P1P1StokesOperator,
P1ConstantMassOperator,
P1ConstantMassOperator,
P1P1StokesToP1P1StokesRestriction,
......
......@@ -155,8 +155,8 @@ inline void errorAndResidual( const StokesOperator& A,
{
tmp.interpolate( 0, level );
residualNegativeRHS0( u, A, level, flag, tmp );
residualL2Velocity = pointwiseScaledL2Norm( tmp.uvw, level );
residualL2Pressure = pointwiseScaledL2Norm( tmp.p, level );
residualL2Velocity = pointwiseScaledL2Norm( tmp.uvw()</