Commit d7b3e0e8 authored by Jean-Noël Grad's avatar Jean-Noël Grad
Browse files

Fix typos in comments and docstrings

parent 85cb8eeb
......@@ -1452,7 +1452,7 @@ int main( int argc, char **argv )
WALBERLA_LOG_INFO_ON_ROOT("Refreshing blockforest...")
// check refinement criterions and refine/coarsen if necessary
// check refinement criteria and refine/coarsen if necessary
uint_t stampBefore = blocks->getBlockForest().getModificationStamp();
blocks->refresh();
uint_t stampAfter = blocks->getBlockForest().getModificationStamp();
......@@ -2090,7 +2090,7 @@ int main( int argc, char **argv )
WALBERLA_LOG_INFO_ON_ROOT("Refreshing blockforest...")
// check refinement criterions and refine/coarsen if necessary
// check refinement criteria and refine/coarsen if necessary
uint_t stampBefore = blocks->getBlockForest().getModificationStamp();
blocks->refresh();
uint_t stampAfter = blocks->getBlockForest().getModificationStamp();
......
......@@ -929,7 +929,7 @@ int main( int argc, char **argv )
if( !useStaticRefinement && refinementCheckFrequency == 0 && numberOfLevels != 1 )
{
// determine check frequency automatically based on maximum admissable velocity and block sizes
// determine check frequency automatically based on maximum admissible velocity and block sizes
real_t uMax = real_t(0.1);
real_t refinementCheckFrequencyFinestLevel = ( overlap + real_c(blockSize) - real_t(2) * real_t(FieldGhostLayers) * dx) / uMax;
refinementCheckFrequency = uint_c( refinementCheckFrequencyFinestLevel / real_t(lbmTimeStepsPerTimeLoopIteration));
......@@ -1252,7 +1252,7 @@ int main( int argc, char **argv )
(*velocityCommunicationScheme)();
}
// check refinement criterions and refine/coarsen if necessary
// check refinement criteria and refine/coarsen if necessary
uint_t stampBefore = blocks->getBlockForest().getModificationStamp();
blocks->refresh();
uint_t stampAfter = blocks->getBlockForest().getModificationStamp();
......
......@@ -6,7 +6,7 @@ import os
class Parameter:
def __init__(self, name, type, defValue="", comment=""):
"""Propery of a data strcuture
"""Property of a data structure
Parameters
----------
......
......@@ -878,7 +878,7 @@ int main( int argc, char **argv )
real_t defaultOmegaBulk = lbm_mesapd_coupling::omegaBulkFromOmega(omega, real_t(1));
shared_ptr<OmegaBulkAdapter_T> omegaBulkAdapter = make_shared<OmegaBulkAdapter_T>(blocks, omegaBulkFieldID, accessor, defaultOmegaBulk, omegaBulk, adaptionLayerSize, sphereSelector);
timeloopAfterParticles.add() << Sweep( makeSharedSweep(omegaBulkAdapter), "Omega Bulk Adapter");
// initally adapt
// initially adapt
for (auto blockIt = blocks->begin(); blockIt != blocks->end(); ++blockIt) {
(*omegaBulkAdapter)(blockIt.get());
}
......
......@@ -843,7 +843,7 @@ int main( int argc, char **argv )
auto sphereShape = ss->create<mesa_pd::data::Sphere>( diameter * real_t(0.5) );
ss->shapes[sphereShape]->updateMassAndInertia(densityRatio);
std::mt19937 randomGenerator (static_cast<unsigned int>(2610)); // fixed seed: quasi-random and reproducable
std::mt19937 randomGenerator (static_cast<unsigned int>(2610)); // fixed seed: quasi-random and reproducible
for( uint_t nSed = 0; nSed < numberOfSediments; ++nSed )
{
......@@ -962,7 +962,7 @@ int main( int argc, char **argv )
if(currentPhase == 1)
{
// damp velocites to avoid too large ones
// damp velocities to avoid too large ones
ps->forEachParticle( useOpenMP, mesa_pd::kernel::SelectLocal(), *accessor,
[](const size_t idx, ParticleAccessor_T& ac){
ac.setLinearVelocity(idx, ac.getLinearVelocity(idx) * real_t(0.5));
......
......@@ -573,7 +573,7 @@ int main( int argc, char **argv )
if(maxPenetrationDepth < overlapLimit) break;
// reset velocites to avoid too large ones
// reset velocities to avoid too large ones
ps->forEachParticle( useOpenMP, mesa_pd::kernel::SelectLocal(), *accessor,
[](const size_t idx, ParticleAccessor_T& ac){
......
......@@ -6,7 +6,7 @@ import os
class Parameter:
def __init__(self, name, type, defValue=""):
"""Propery of a data strcuture
"""Property of a data structure
Parameters
----------
......
......@@ -1064,7 +1064,7 @@ void keepInflowOutflowAtTheSameLevel( std::vector< std::pair< const Block *, uin
uint_t maxInflowLevel( uint_t(0) );
uint_t maxOutflowLevel( uint_t(0) );
// In addtion to keeping in- and outflow blocks at the same level, this callback also
// In addition to keeping in- and outflow blocks at the same level, this callback also
// prevents these blocks from coarsening.
for( auto it = minTargetLevels.begin(); it != minTargetLevels.end(); ++it )
......@@ -2569,14 +2569,14 @@ void run( const shared_ptr< Config > & config, const LatticeModel_T & latticeMod
blockforest::DynamicDiffusionBalance< blockforest::NoPhantomData >( maxIterations, flowIterations ) );
}
// add callback functions which are executed after all block data was unpakced after the dynamic load balancing
// add callback functions which are executed after all block data was unpacked after the dynamic load balancing
// for blocks that have *not* migrated: store current flag field state (required for lbm::PostProcessing)
blockforest.addRefreshCallbackFunctionAfterBlockDataIsUnpacked( lbm::MarkerFieldGenerator< LatticeModel_T, field::FlagFieldEvaluationFilter<FlagField_T> >(
pdfFieldId, markerDataId, flagFieldFilter ) );
// (re)set boundaries = (re)initialize flag field for every block with respect to the new block structure (the size of neighbor blocks might have changed)
blockforest.addRefreshCallbackFunctionAfterBlockDataIsUnpacked( blockforest::BlockForest::RefreshCallbackWrappper( boundarySetter ) );
// treat boundary-fluid cell convertions
// treat boundary-fluid cell conversions
blockforest.addRefreshCallbackFunctionAfterBlockDataIsUnpacked( lbm::PostProcessing< LatticeModel_T, field::FlagFieldEvaluationFilter<FlagField_T> >(
pdfFieldId, markerDataId, flagFieldFilter ) );
// (re)set velocity field (velocity field data is not migrated!)
......
......@@ -295,7 +295,7 @@ class DummySweep
void emptyFunction() {}
//*******************************************************************************************************************
/*!\brief Simualtion of a strongly heterogeneous sized particulate flow system using combined resolved and unresolved
/*!\brief Simulation of a strongly heterogeneous sized particulate flow system using combined resolved and unresolved
* methods.
*
* For the coupling of resolved particles the Momentum Exchange Method (MEM) is used, whereas for the
......
......@@ -599,7 +599,7 @@ int main(int argc, char** argv) {
WALBERLA_CHECK(!(useCurlCriterion && useVorticityCriterion),
"Using curl and vorticity criterion together makes no sense.");
// create base dir if it doesnt already exist
// create base dir if it doesn't already exist
filesystem::path bpath(baseFolder);
if (!filesystem::exists(bpath)) {
filesystem::create_directory(bpath);
......
......@@ -206,7 +206,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Our numeric solver's symbolic representation is now complete! Next, we use pystencils to generate and compile a C implementation of our kernel. The code is generated as shown below, compiled into a shared libary and then bound to `kernel_func`. All unbound sympy symbols (`dx`, `dt` and `kappa`) as well as the fields `u` and `u_tmp` are arguments to the generated kernel function. "
"Our numeric solver's symbolic representation is now complete! Next, we use pystencils to generate and compile a C implementation of our kernel. The code is generated as shown below, compiled into a shared library and then bound to `kernel_func`. All unbound sympy symbols (`dx`, `dt` and `kappa`) as well as the fields `u` and `u_tmp` are arguments to the generated kernel function. "
]
},
{
......@@ -355,7 +355,7 @@
"source": [
"### Prototype Simulation\n",
"\n",
"We can set up and run a simple simulation wich the generated kernel right here. The first step is to set up the fields and simulation parameters."
"We can set up and run a simple simulation with the generated kernel right here. The first step is to set up the fields and simulation parameters."
]
},
{
......
......@@ -130,7 +130,7 @@ CommScheme communication( blocks );
communication.addDataToCommunicate( make_shared<field::communication::UniformMPIDatatypeInfo<GPUField> > (gpuFieldSrcID) );
\endcode
This scheme also supports heterogenous simulations, i.e. using a CPU field on
This scheme also supports heterogeneous simulations, i.e. using a CPU field on
some processes and a GPU field on other processes.
*/
......
......@@ -90,7 +90,7 @@ A lattice model defines the basic ingredients needed for an LBM simulation:
- **collision model**: The first template parameter for a lattice model is a collision model.
The collision or relaxation model defines which method to use in the collide step. Here, we use the single relaxation time
model (SRT) also called BGK model: lbm::collision_model::SRT. For other options, see the file lbm/lattice_model/CollisionModel.h
- There are further template parameters specifing compressibility, force model, etc.
- There are further template parameters specifying compressibility, force model, etc.
These arguments have default parameters which we use here.
For a more detailed description of lattice models, see lbm::LatticeModelBase
......@@ -151,7 +151,7 @@ boundary::BoundaryHandling. Otherwise the `near boundary` and `domain` flags are
The boundary handling is a heavily templated part of waLBerla since it contains performance critical code
and at the same time has to be very flexible, i.e., it should be easy to write new boundary conditions.
By using template concepts (compile-time polymorphism) instead of inheritence (runtime polymorphism) the compiler is able to
By using template concepts (compile-time polymorphism) instead of inheritance (runtime polymorphism) the compiler is able to
resolve all function calls at compile time and can do optimizations like function inlining.
To make setting up a boundary handling easier, a convenience factory class lbm::DefaultBoundaryHandlingFactory exists
that creates a boundary::BoundaryHandling with six often used boundary conditions. Together with the `near boundary` and
......@@ -268,7 +268,7 @@ timeloop.addFuncAfterTimeStep( makeSharedFunctor( field::makeStabilityChecker< P
"LBM stability check" );
\endcode
Additionally, a small functor is scheduled that periodically prints the estimated remaining time of the simultion:
Additionally, a small functor is scheduled that periodically prints the estimated remaining time of the simulation:
\code
timeloop.addFuncAfterTimeStep( timing::RemainingTimeLogger( timeloop.getNrOfTimeSteps(), remainingTimeLoggerFrequency ),
......
......@@ -7,7 +7,7 @@ namespace walberla {
\section tutorial05_overview Overview
The aim of this tutorial is to show how to build and solve the backward-facing step model using lattice Boltzman method in waLBerla.
The aim of this tutorial is to show how to build and solve the backward-facing step model using lattice Boltzmann method in waLBerla.
The "01_BasicLBM" case is used as the foundation of the current work. Therefore, most of the functionalities have already been introduced and discussed in LBM 1 tutorial.
Here the main focus is on the following areas:
......@@ -54,13 +54,13 @@ Finally, viscosity consequently **omega** are calculated with the Reynolds numbe
\section tutorial05_geometry Geometry
Since the step geometry is a plain rectangular area, the simplest approach is to create it by geometry module in walberla.
This module offers capability to read boundaries of a random geometry from images, voxel files, coordinates of verticies, etc.
This module offers capability to read boundaries of a random geometry from images, voxel files, coordinates of vertices, etc.
Using this module, obstacles of basic shapes could be conveniently positioned inside the domain.
It is also easier to have the program to read the geometry specifications from the Boundaries section of the configuration file.
This is implemented by reading and storing the Boundaries block of the configuration file in 'boundariesConfig' object and passing it to a convenience function provided in the geometry class to initialize the boundaries.
\snippet 05_BackwardFacingStep.cpp geomboundary
Here a subblock 'Body' is created inside 'Boundaries' section in the configuration file in order to create a box (rectangle in 2D) using two diagonal verticies.
Here a subblock 'Body' is created inside 'Boundaries' section in the configuration file in order to create a box (rectangle in 2D) using two diagonal vertices.
\snippet 05_BackwardFacingStep.prm geometry
......@@ -77,7 +77,7 @@ This mechanism is implemented by a functor named `ReattachmentLengthFinder` and
After running the program, the locations of reattachment against timestep are written to 'ReattachmentLengthLogging_Re_[Re].txt' in the working directory.
Note that there might be more than one reattachment location before the flow fully develops along the channel, and all are given in the file.
This simply means that it is expected to have multiple occurances of seperation and reattachment at the same time along the bottom boundary of the channel following the step in the early stages.
This simply means that it is expected to have multiple occurences of seperation and reattachment at the same time along the bottom boundary of the channel following the step in the early stages.
However, most of them are smeared later as the flow starts to develop.
The logging frequency can also be adjusted by 'checkFrequency' which is passed to the `ReattachmentLengthFinder` functor.
......
......@@ -57,7 +57,7 @@ void initBC( const shared_ptr< StructuredBlockStorage > & blocks, const BlockDat
auto src = block->getData< ScalarField >( srcID );
auto dst = block->getData< ScalarField >( dstID );
// obtain a CellInterval object that holds information about the number of cells in x,y,z direction of the field inlcuding ghost layers
// obtain a CellInterval object that holds information about the number of cells in x,y,z direction of the field including ghost layers
// Since src and dst have the same size, one object is enough.
CellInterval xyz = src->xyzSizeWithGhostLayer();
......
......@@ -243,7 +243,7 @@ class QMapPrinter:
ret += gdb.lookup_type('void').pointer().sizeof
# but because of data alignment the value can be higher
# so guess it's aliged by sizeof(void*)
# so guess it's aligned by sizeof(void*)
# TODO: find a real solution for this problem
ret += ret % gdb.lookup_type('void').pointer().sizeof
......@@ -494,7 +494,7 @@ class QUrlPrinter:
return self.val['d']['encodedOriginal']
except RuntimeError as error:
print(error)
# if no debug information is avaliable for Qt, try guessing the correct address for encodedOriginal
# if no debug information is available for Qt, try guessing the correct address for encodedOriginal
# problem with this is that if QUrlPrivate members get changed, this fails
offset = gdb.lookup_type('int').sizeof
offset += offset % gdb.lookup_type('void').pointer().sizeof # alignment
......
......@@ -11,7 +11,7 @@
# The checksum is used as an index in a different array. If an item with that index already exists the suppression must be a duplicate and is discarded.
BEGIN { suppression=0; md5sum = "md5sum" }
# If the line begins with '{', it's the start of a supression; so set the var and initialise things
# If the line begins with '{', it's the start of a suppression; so set the var and initialise things
/^{/ {
suppression=1; i=0; next
}
......@@ -24,7 +24,7 @@ BEGIN { suppression=0; md5sum = "md5sum" }
delete supparray # We don't want subsequent suppressions to append to it!
}
}
# Otherwise, it's a normal line. If we're inside a supression, store it, and pipe it to md5sum. Otherwise it's cruft, so ignore it
# Otherwise, it's a normal line. If we're inside a suppression, store it, and pipe it to md5sum. Otherwise it's cruft, so ignore it
{ if (suppression)
{
supparray[++i] = $0
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment