From d7b3e0e85b1b51ac6ca42003b1779b598b19ea1b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jean-No=C3=ABl=20Grad?= <jgrad@icp.uni-stuttgart.de>
Date: Fri, 3 Sep 2021 18:32:58 +0200
Subject: [PATCH] Fix typos in comments and docstrings

---
 .../AMRSedimentSettling.cpp                               | 4 ++--
 .../AMRSettlingSphere.cpp                                 | 4 ++--
 apps/benchmarks/CNT/FilmSpecimenGenerator.py              | 2 +-
 .../SphereMovingWithPrescribedVelocity.cpp                | 2 +-
 .../FluidParticleWorkloadDistribution.cpp                 | 4 ++--
 .../FluidParticleWorkloadEvaluation.cpp                   | 2 +-
 apps/benchmarks/GranularGas/ConfigGenerator.py            | 2 +-
 apps/benchmarks/SchaeferTurek/SchaeferTurek.cpp           | 6 +++---
 .../CombinedResolvedUnresolved.cpp                        | 2 +-
 .../LightRisingParticleInFluidAMR.cpp                     | 2 +-
 apps/tutorials/codegen/Heat Equation Kernel.ipynb         | 4 ++--
 apps/tutorials/cuda/01_GameOfLife_cuda.dox                | 2 +-
 apps/tutorials/lbm/01_BasicLBM.dox                        | 6 +++---
 apps/tutorials/lbm/05_BackwardFacingStep.dox              | 8 ++++----
 apps/tutorials/pde/01_SolvingPDE.cpp                      | 2 +-
 utilities/gdbPrettyPrinter/qt4/printers.py                | 4 ++--
 utilities/valgrind-supression/supression.sh               | 4 ++--
 17 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/apps/benchmarks/AdaptiveMeshRefinementFluidParticleCoupling/AMRSedimentSettling.cpp b/apps/benchmarks/AdaptiveMeshRefinementFluidParticleCoupling/AMRSedimentSettling.cpp
index 73e19a030..2b7cdc8a7 100644
--- a/apps/benchmarks/AdaptiveMeshRefinementFluidParticleCoupling/AMRSedimentSettling.cpp
+++ b/apps/benchmarks/AdaptiveMeshRefinementFluidParticleCoupling/AMRSedimentSettling.cpp
@@ -1452,7 +1452,7 @@ int main( int argc, char **argv )
 
          WALBERLA_LOG_INFO_ON_ROOT("Refreshing blockforest...")
 
-         // check refinement criterions and refine/coarsen if necessary
+         // check refinement criteria and refine/coarsen if necessary
          uint_t stampBefore = blocks->getBlockForest().getModificationStamp();
          blocks->refresh();
          uint_t stampAfter = blocks->getBlockForest().getModificationStamp();
@@ -2090,7 +2090,7 @@ int main( int argc, char **argv )
 
             WALBERLA_LOG_INFO_ON_ROOT("Refreshing blockforest...")
 
-            // check refinement criterions and refine/coarsen if necessary
+            // check refinement criteria and refine/coarsen if necessary
             uint_t stampBefore = blocks->getBlockForest().getModificationStamp();
             blocks->refresh();
             uint_t stampAfter = blocks->getBlockForest().getModificationStamp();
diff --git a/apps/benchmarks/AdaptiveMeshRefinementFluidParticleCoupling/AMRSettlingSphere.cpp b/apps/benchmarks/AdaptiveMeshRefinementFluidParticleCoupling/AMRSettlingSphere.cpp
index c13a7c93d..d803a2f85 100644
--- a/apps/benchmarks/AdaptiveMeshRefinementFluidParticleCoupling/AMRSettlingSphere.cpp
+++ b/apps/benchmarks/AdaptiveMeshRefinementFluidParticleCoupling/AMRSettlingSphere.cpp
@@ -929,7 +929,7 @@ int main( int argc, char **argv )
 
    if( !useStaticRefinement && refinementCheckFrequency == 0 && numberOfLevels != 1 )
    {
-      // determine check frequency automatically based on maximum admissable velocity and block sizes
+      // determine check frequency automatically based on maximum admissible velocity and block sizes
       real_t uMax = real_t(0.1);
       real_t refinementCheckFrequencyFinestLevel = ( overlap + real_c(blockSize) - real_t(2) * real_t(FieldGhostLayers) * dx) / uMax;
       refinementCheckFrequency = uint_c( refinementCheckFrequencyFinestLevel / real_t(lbmTimeStepsPerTimeLoopIteration));
@@ -1252,7 +1252,7 @@ int main( int argc, char **argv )
             (*velocityCommunicationScheme)();
          }
 
-         // check refinement criterions and refine/coarsen if necessary
+         // check refinement criteria and refine/coarsen if necessary
          uint_t stampBefore = blocks->getBlockForest().getModificationStamp();
          blocks->refresh();
          uint_t stampAfter = blocks->getBlockForest().getModificationStamp();
diff --git a/apps/benchmarks/CNT/FilmSpecimenGenerator.py b/apps/benchmarks/CNT/FilmSpecimenGenerator.py
index 4b91cd1f3..9bb4c35c6 100644
--- a/apps/benchmarks/CNT/FilmSpecimenGenerator.py
+++ b/apps/benchmarks/CNT/FilmSpecimenGenerator.py
@@ -6,7 +6,7 @@ import os
 
 class Parameter:
     def __init__(self, name, type, defValue="", comment=""):
-        """Propery of a data strcuture
+        """Property of a data structure
 
         Parameters
         ----------
diff --git a/apps/benchmarks/FluidParticleCoupling/SphereMovingWithPrescribedVelocity.cpp b/apps/benchmarks/FluidParticleCoupling/SphereMovingWithPrescribedVelocity.cpp
index 0b9e6ec68..f7cb96702 100644
--- a/apps/benchmarks/FluidParticleCoupling/SphereMovingWithPrescribedVelocity.cpp
+++ b/apps/benchmarks/FluidParticleCoupling/SphereMovingWithPrescribedVelocity.cpp
@@ -878,7 +878,7 @@ int main( int argc, char **argv )
       real_t defaultOmegaBulk = lbm_mesapd_coupling::omegaBulkFromOmega(omega, real_t(1));
       shared_ptr<OmegaBulkAdapter_T> omegaBulkAdapter = make_shared<OmegaBulkAdapter_T>(blocks, omegaBulkFieldID, accessor, defaultOmegaBulk, omegaBulk, adaptionLayerSize, sphereSelector);
       timeloopAfterParticles.add() << Sweep( makeSharedSweep(omegaBulkAdapter), "Omega Bulk Adapter");
-      // initally adapt
+      // initially adapt
       for (auto blockIt = blocks->begin(); blockIt != blocks->end(); ++blockIt) {
          (*omegaBulkAdapter)(blockIt.get());
       }
diff --git a/apps/benchmarks/FluidParticleCouplingWithLoadBalancing/FluidParticleWorkloadDistribution.cpp b/apps/benchmarks/FluidParticleCouplingWithLoadBalancing/FluidParticleWorkloadDistribution.cpp
index 78a8570bc..943d6ee9d 100644
--- a/apps/benchmarks/FluidParticleCouplingWithLoadBalancing/FluidParticleWorkloadDistribution.cpp
+++ b/apps/benchmarks/FluidParticleCouplingWithLoadBalancing/FluidParticleWorkloadDistribution.cpp
@@ -843,7 +843,7 @@ int main( int argc, char **argv )
    auto sphereShape = ss->create<mesa_pd::data::Sphere>( diameter * real_t(0.5) );
    ss->shapes[sphereShape]->updateMassAndInertia(densityRatio);
 
-   std::mt19937 randomGenerator (static_cast<unsigned int>(2610)); // fixed seed: quasi-random and reproducable
+   std::mt19937 randomGenerator (static_cast<unsigned int>(2610)); // fixed seed: quasi-random and reproducible
 
    for( uint_t nSed = 0; nSed < numberOfSediments; ++nSed )
    {
@@ -962,7 +962,7 @@ int main( int argc, char **argv )
 
       if(currentPhase == 1)
       {
-         // damp velocites to avoid too large ones
+         // damp velocities to avoid too large ones
          ps->forEachParticle( useOpenMP, mesa_pd::kernel::SelectLocal(), *accessor,
                               [](const size_t idx, ParticleAccessor_T& ac){
                                  ac.setLinearVelocity(idx, ac.getLinearVelocity(idx) * real_t(0.5));
diff --git a/apps/benchmarks/FluidParticleCouplingWithLoadBalancing/FluidParticleWorkloadEvaluation.cpp b/apps/benchmarks/FluidParticleCouplingWithLoadBalancing/FluidParticleWorkloadEvaluation.cpp
index a78736fff..423af69ce 100644
--- a/apps/benchmarks/FluidParticleCouplingWithLoadBalancing/FluidParticleWorkloadEvaluation.cpp
+++ b/apps/benchmarks/FluidParticleCouplingWithLoadBalancing/FluidParticleWorkloadEvaluation.cpp
@@ -573,7 +573,7 @@ int main( int argc, char **argv )
 
       if(maxPenetrationDepth < overlapLimit) break;
 
-      // reset velocites to avoid too large ones
+      // reset velocities to avoid too large ones
 
       ps->forEachParticle( useOpenMP, mesa_pd::kernel::SelectLocal(), *accessor,
             [](const size_t idx, ParticleAccessor_T& ac){
diff --git a/apps/benchmarks/GranularGas/ConfigGenerator.py b/apps/benchmarks/GranularGas/ConfigGenerator.py
index 6d84f6434..344ef90b6 100644
--- a/apps/benchmarks/GranularGas/ConfigGenerator.py
+++ b/apps/benchmarks/GranularGas/ConfigGenerator.py
@@ -6,7 +6,7 @@ import os
 
 class Parameter:
     def __init__(self, name, type, defValue=""):
-        """Propery of a data strcuture
+        """Property of a data structure
 
         Parameters
         ----------
diff --git a/apps/benchmarks/SchaeferTurek/SchaeferTurek.cpp b/apps/benchmarks/SchaeferTurek/SchaeferTurek.cpp
index cdbd64fbe..20eacd46f 100644
--- a/apps/benchmarks/SchaeferTurek/SchaeferTurek.cpp
+++ b/apps/benchmarks/SchaeferTurek/SchaeferTurek.cpp
@@ -1064,7 +1064,7 @@ void keepInflowOutflowAtTheSameLevel( std::vector< std::pair< const Block *, uin
    uint_t maxInflowLevel( uint_t(0) );
    uint_t maxOutflowLevel( uint_t(0) );
 
-   // In addtion to keeping in- and outflow blocks at the same level, this callback also
+   // In addition to keeping in- and outflow blocks at the same level, this callback also
    // prevents these blocks from coarsening.
 
    for( auto it = minTargetLevels.begin(); it != minTargetLevels.end(); ++it )
@@ -2569,14 +2569,14 @@ void run( const shared_ptr< Config > & config, const LatticeModel_T & latticeMod
                      blockforest::DynamicDiffusionBalance< blockforest::NoPhantomData >( maxIterations, flowIterations ) );
       }
 
-      // add callback functions which are executed after all block data was unpakced after the dynamic load balancing
+      // add callback functions which are executed after all block data was unpacked after the dynamic load balancing
 
       // for blocks that have *not* migrated: store current flag field state (required for lbm::PostProcessing)
       blockforest.addRefreshCallbackFunctionAfterBlockDataIsUnpacked( lbm::MarkerFieldGenerator< LatticeModel_T, field::FlagFieldEvaluationFilter<FlagField_T> >(
                pdfFieldId, markerDataId, flagFieldFilter ) );
       // (re)set boundaries = (re)initialize flag field for every block with respect to the new block structure (the size of neighbor blocks might have changed)
       blockforest.addRefreshCallbackFunctionAfterBlockDataIsUnpacked( blockforest::BlockForest::RefreshCallbackWrappper( boundarySetter ) );
-      // treat boundary-fluid cell convertions
+      // treat boundary-fluid cell conversions
       blockforest.addRefreshCallbackFunctionAfterBlockDataIsUnpacked( lbm::PostProcessing< LatticeModel_T, field::FlagFieldEvaluationFilter<FlagField_T> >(
                pdfFieldId, markerDataId, flagFieldFilter ) );
       // (re)set velocity field (velocity field data is not migrated!)
diff --git a/apps/showcases/CombinedResolvedUnresolved/CombinedResolvedUnresolved.cpp b/apps/showcases/CombinedResolvedUnresolved/CombinedResolvedUnresolved.cpp
index 5e1074ad0..026171328 100644
--- a/apps/showcases/CombinedResolvedUnresolved/CombinedResolvedUnresolved.cpp
+++ b/apps/showcases/CombinedResolvedUnresolved/CombinedResolvedUnresolved.cpp
@@ -295,7 +295,7 @@ class DummySweep
 void emptyFunction() {}
 
 //*******************************************************************************************************************
-/*!\brief Simualtion of a strongly heterogeneous sized particulate flow system using combined resolved and unresolved
+/*!\brief Simulation of a strongly heterogeneous sized particulate flow system using combined resolved and unresolved
  * methods.
  *
  * For the coupling of resolved particles the Momentum Exchange Method (MEM) is used, whereas for the
diff --git a/apps/showcases/LightRisingParticleInFluidAMR/LightRisingParticleInFluidAMR.cpp b/apps/showcases/LightRisingParticleInFluidAMR/LightRisingParticleInFluidAMR.cpp
index 6dcafd552..f15b333c9 100644
--- a/apps/showcases/LightRisingParticleInFluidAMR/LightRisingParticleInFluidAMR.cpp
+++ b/apps/showcases/LightRisingParticleInFluidAMR/LightRisingParticleInFluidAMR.cpp
@@ -599,7 +599,7 @@ int main(int argc, char** argv) {
    WALBERLA_CHECK(!(useCurlCriterion && useVorticityCriterion),
          "Using curl and vorticity criterion together makes no sense.");
 
-   // create base dir if it doesnt already exist
+   // create base dir if it doesn't already exist
    filesystem::path bpath(baseFolder);
    if (!filesystem::exists(bpath)) {
       filesystem::create_directory(bpath);
diff --git a/apps/tutorials/codegen/Heat Equation Kernel.ipynb b/apps/tutorials/codegen/Heat Equation Kernel.ipynb
index 486e7c248..e5ccffbb7 100644
--- a/apps/tutorials/codegen/Heat Equation Kernel.ipynb	
+++ b/apps/tutorials/codegen/Heat Equation Kernel.ipynb	
@@ -206,7 +206,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "Our numeric solver's symbolic representation is now complete! Next, we use pystencils to generate and compile a C implementation of our kernel. The code is generated as shown below, compiled into a shared libary and then bound to `kernel_func`. All unbound sympy symbols (`dx`, `dt` and `kappa`) as well as the fields `u` and `u_tmp` are arguments to the generated kernel function. "
+    "Our numeric solver's symbolic representation is now complete! Next, we use pystencils to generate and compile a C implementation of our kernel. The code is generated as shown below, compiled into a shared library and then bound to `kernel_func`. All unbound sympy symbols (`dx`, `dt` and `kappa`) as well as the fields `u` and `u_tmp` are arguments to the generated kernel function. "
    ]
   },
   {
@@ -355,7 +355,7 @@
    "source": [
     "### Prototype Simulation\n",
     "\n",
-    "We can set up and run a simple simulation wich the generated kernel right here. The first step is to set up the fields and simulation parameters."
+    "We can set up and run a simple simulation with the generated kernel right here. The first step is to set up the fields and simulation parameters."
    ]
   },
   {
diff --git a/apps/tutorials/cuda/01_GameOfLife_cuda.dox b/apps/tutorials/cuda/01_GameOfLife_cuda.dox
index 4e654d4c0..7bbb50fe4 100644
--- a/apps/tutorials/cuda/01_GameOfLife_cuda.dox
+++ b/apps/tutorials/cuda/01_GameOfLife_cuda.dox
@@ -130,7 +130,7 @@ CommScheme communication( blocks );
 communication.addDataToCommunicate( make_shared<field::communication::UniformMPIDatatypeInfo<GPUField> > (gpuFieldSrcID) );
 \endcode
 
-This scheme also supports heterogenous simulations, i.e. using a CPU field on
+This scheme also supports heterogeneous simulations, i.e. using a CPU field on
 some processes and a GPU field on other processes.
 
 */
diff --git a/apps/tutorials/lbm/01_BasicLBM.dox b/apps/tutorials/lbm/01_BasicLBM.dox
index c18192170..981e26465 100644
--- a/apps/tutorials/lbm/01_BasicLBM.dox
+++ b/apps/tutorials/lbm/01_BasicLBM.dox
@@ -90,7 +90,7 @@ A lattice model defines the basic ingredients needed for an LBM simulation:
 - **collision model**: The first template parameter for a lattice model is a collision model.
   The collision or relaxation model defines which method to use in the collide step. Here, we use the single relaxation time
   model (SRT) also called BGK model: lbm::collision_model::SRT. For other options, see the file lbm/lattice_model/CollisionModel.h
-- There are further template parameters specifing compressibility, force model, etc.
+- There are further template parameters specifying compressibility, force model, etc.
   These arguments have default parameters which we use here.
 
 For a more detailed description of lattice models, see lbm::LatticeModelBase
@@ -151,7 +151,7 @@ boundary::BoundaryHandling. Otherwise the `near boundary` and `domain` flags are
 
 The boundary handling is a heavily templated part of waLBerla since it contains performance critical code
 and at the same time has to be very flexible, i.e., it should be easy to write new boundary conditions.
-By using template concepts (compile-time polymorphism) instead of inheritence (runtime polymorphism) the compiler is able to
+By using template concepts (compile-time polymorphism) instead of inheritance (runtime polymorphism) the compiler is able to
 resolve all function calls at compile time and can do optimizations like function inlining.
 To make setting up a boundary handling easier, a convenience factory class lbm::DefaultBoundaryHandlingFactory exists
 that creates a boundary::BoundaryHandling with six often used boundary conditions. Together with the `near boundary` and
@@ -268,7 +268,7 @@ timeloop.addFuncAfterTimeStep( makeSharedFunctor( field::makeStabilityChecker< P
                                "LBM stability check" );
 \endcode
 
-Additionally, a small functor is scheduled that periodically prints the estimated remaining time of the simultion:
+Additionally, a small functor is scheduled that periodically prints the estimated remaining time of the simulation:
 
 \code
 timeloop.addFuncAfterTimeStep( timing::RemainingTimeLogger( timeloop.getNrOfTimeSteps(), remainingTimeLoggerFrequency ),
diff --git a/apps/tutorials/lbm/05_BackwardFacingStep.dox b/apps/tutorials/lbm/05_BackwardFacingStep.dox
index 52f7160d5..aaafc2d48 100644
--- a/apps/tutorials/lbm/05_BackwardFacingStep.dox
+++ b/apps/tutorials/lbm/05_BackwardFacingStep.dox
@@ -7,7 +7,7 @@ namespace walberla {
 
 \section tutorial05_overview Overview
 
-The aim of this tutorial is to show how to build and solve the backward-facing step model using lattice Boltzman method in waLBerla.
+The aim of this tutorial is to show how to build and solve the backward-facing step model using lattice Boltzmann method in waLBerla.
 The "01_BasicLBM" case is used as the foundation of the current work. Therefore, most of the functionalities have already been introduced and discussed in LBM 1 tutorial.
 Here the main focus is on the following areas:
 
@@ -54,13 +54,13 @@ Finally, viscosity consequently **omega** are calculated with the Reynolds numbe
 \section tutorial05_geometry Geometry
 
 Since the step geometry is a plain rectangular area, the simplest approach is to create it by geometry module in walberla.
-This module offers capability to read boundaries of a random geometry from images, voxel files, coordinates of verticies, etc.
+This module offers capability to read boundaries of a random geometry from images, voxel files, coordinates of vertices, etc.
 Using this module, obstacles of basic shapes could be conveniently positioned inside the domain.
 It is also easier to have the program to read the geometry specifications from the Boundaries section of the configuration file.
 This is implemented by reading and storing the Boundaries block of the configuration file in 'boundariesConfig' object and passing it to a convenience function provided in the geometry class to initialize the boundaries.
 
 \snippet 05_BackwardFacingStep.cpp geomboundary
-Here a subblock 'Body' is created inside 'Boundaries' section in the configuration file in order to create a box (rectangle in 2D) using two diagonal verticies.
+Here a subblock 'Body' is created inside 'Boundaries' section in the configuration file in order to create a box (rectangle in 2D) using two diagonal vertices.
 
 \snippet 05_BackwardFacingStep.prm geometry
 
@@ -77,7 +77,7 @@ This mechanism is implemented by a functor named `ReattachmentLengthFinder` and
 
 After running the program, the locations of reattachment against timestep are written to 'ReattachmentLengthLogging_Re_[Re].txt' in the working directory.
 Note that there might be more than one reattachment location before the flow fully develops along the channel, and all are given in the file.
-This simply means that it is expected to have multiple occurances of seperation and reattachment at the same time along the bottom boundary of the channel following the step in the early stages.
+This simply means that it is expected to have multiple occurences of seperation and reattachment at the same time along the bottom boundary of the channel following the step in the early stages.
 However, most of them are smeared later as the flow starts to develop.
 The logging frequency can also be adjusted by 'checkFrequency' which is passed to the `ReattachmentLengthFinder` functor.
 
diff --git a/apps/tutorials/pde/01_SolvingPDE.cpp b/apps/tutorials/pde/01_SolvingPDE.cpp
index 3091c7597..9b5f0477a 100644
--- a/apps/tutorials/pde/01_SolvingPDE.cpp
+++ b/apps/tutorials/pde/01_SolvingPDE.cpp
@@ -57,7 +57,7 @@ void initBC( const shared_ptr< StructuredBlockStorage > & blocks, const BlockDat
          auto src = block->getData< ScalarField >( srcID );
          auto dst = block->getData< ScalarField >( dstID );
 
-         // obtain a CellInterval object that holds information about the number of cells in x,y,z direction of the field inlcuding ghost layers
+         // obtain a CellInterval object that holds information about the number of cells in x,y,z direction of the field including ghost layers
          // Since src and dst have the same size, one object is enough.
          CellInterval xyz = src->xyzSizeWithGhostLayer();
 
diff --git a/utilities/gdbPrettyPrinter/qt4/printers.py b/utilities/gdbPrettyPrinter/qt4/printers.py
index 169f283d1..07a930384 100644
--- a/utilities/gdbPrettyPrinter/qt4/printers.py
+++ b/utilities/gdbPrettyPrinter/qt4/printers.py
@@ -243,7 +243,7 @@ class QMapPrinter:
             ret += gdb.lookup_type('void').pointer().sizeof
 
             # but because of data alignment the value can be higher
-            # so guess it's aliged by sizeof(void*)
+            # so guess it's aligned by sizeof(void*)
             # TODO: find a real solution for this problem
             ret += ret % gdb.lookup_type('void').pointer().sizeof
 
@@ -494,7 +494,7 @@ class QUrlPrinter:
             return self.val['d']['encodedOriginal']
         except RuntimeError as error:
             print(error)
-            # if no debug information is avaliable for Qt, try guessing the correct address for encodedOriginal
+            # if no debug information is available for Qt, try guessing the correct address for encodedOriginal
             # problem with this is that if QUrlPrivate members get changed, this fails
             offset = gdb.lookup_type('int').sizeof
             offset += offset % gdb.lookup_type('void').pointer().sizeof  # alignment
diff --git a/utilities/valgrind-supression/supression.sh b/utilities/valgrind-supression/supression.sh
index 2388ec28b..3f6b11434 100644
--- a/utilities/valgrind-supression/supression.sh
+++ b/utilities/valgrind-supression/supression.sh
@@ -11,7 +11,7 @@
 # The checksum is used as an index in a different array. If an item with that index already exists the suppression must be a duplicate and is discarded.
  
 BEGIN { suppression=0; md5sum = "md5sum" }
-  # If the line begins with '{', it's the start of a supression; so set the var and initialise things
+  # If the line begins with '{', it's the start of a suppression; so set the var and initialise things
   /^{/  {
            suppression=1;  i=0; next 
         }
@@ -24,7 +24,7 @@ BEGIN { suppression=0; md5sum = "md5sum" }
              delete supparray     # We don't want subsequent suppressions to append to it!
            }
      }
-  # Otherwise, it's a normal line. If we're inside a supression, store it, and pipe it to md5sum. Otherwise it's cruft, so ignore it
+  # Otherwise, it's a normal line. If we're inside a suppression, store it, and pipe it to md5sum. Otherwise it's cruft, so ignore it
      { if (suppression)
          { 
             supparray[++i] = $0
-- 
GitLab