From 8d12e04e26a8bdcc1c4fe7562fb155eaffff4410 Mon Sep 17 00:00:00 2001
From: markus <markus.holzer@fau.de>
Date: Fri, 17 Jul 2020 20:29:31 +0200
Subject: [PATCH] Adapted flake8 linter for the remaining waLBerla files

---
 .flake8                                       |   1 +
 python/lbmpy_walberla/boundary.py             |   2 +-
 .../tests/test_walberla_codegen.py            |   2 +-
 .../lbmpy_walberla/walberla_lbm_generation.py |  34 +-
 python/pystencils_walberla/boundary.py        |   4 +-
 .../pystencils_walberla/cmake_integration.py  |   3 +-
 python/pystencils_walberla/codegen.py         |   3 +-
 python/pystencils_walberla/jinja_filters.py   |   2 +-
 .../tests/test_walberla_gen.py                |   6 +-
 python/waLBerla/__init__.py                   |  34 +-
 python/waLBerla/callbacks.py                  |  22 +-
 python/waLBerla/core_extension.py             |  50 +-
 python/waLBerla/cuda_extension.py             |  14 +-
 python/waLBerla/evaluation/__init__.py        |   4 +-
 .../waLBerla/evaluation/scenariocreators.py   |  85 +--
 python/waLBerla/evaluation/timeseries.py      |  68 ++-
 python/waLBerla/field_extension.py            |  68 +--
 python/waLBerla/geometry_setup.py             | 110 ++--
 python/waLBerla/plot.py                       | 128 ++---
 python/waLBerla/timeloop_extension.py         |  39 +-
 python/waLBerla/tools/jobscripts/__init__.py  |  13 +-
 python/waLBerla/tools/jobscripts/hornet.py    |  69 ++-
 .../tools/jobscripts/pizdaint_hybrid.py       |   2 +-
 python/waLBerla/tools/jobscripts/supermuc.py  |  95 ++--
 .../tools/jobscripts/supermuc_phase2.py       | 104 ++--
 .../waLBerla/tools/jobscripts/supermucng.py   |  18 +-
 .../tools/lbm_unitconversion/__init__.py      |   2 +
 .../lbm_unitconversion/lattice_factors.py     |  42 +-
 .../lbm_unitconversion/pint_unit_converter.py | 116 +++--
 python/waLBerla/tools/report/__init__.py      |  10 +-
 python/waLBerla/tools/report/report.py        | 273 +++++-----
 .../tools/source_checker/ParsedCodeFile.py    | 230 ++++-----
 .../tools/source_checker/SourceChecker.py     | 360 +++++++------
 python/waLBerla/tools/source_checker/Utils.py |  29 +-
 .../tools/source_checker/walberla_check.py    | 488 +++++++++---------
 python/waLBerla/tools/sqlitedb/__init__.py    |   4 +-
 python/waLBerla/tools/sqlitedb/insert.py      | 163 +++---
 python/waLBerla/tools/sqlitedb/merge.py       | 119 ++---
 python/waLBerla_docs/doxylink.py              |  78 +--
 .../material/matplotlib_setup.py              |  68 +--
 python/waLBerla_tests/test_blockforest.py     |  34 +-
 python/waLBerla_tests/test_core.py            |  48 +-
 python/waLBerla_tests/test_cuda_comm.py       |  18 +-
 python/waLBerla_tests/test_field.py           |  88 ++--
 python/waLBerla_tests/test_simpleLBM.py       | 201 ++++----
 .../tools/test_lbm_unitconversion.py          |  79 ++-
 python/waLBerla_tests/tools/test_sqlitedb.py  |  19 +-
 src/stencil/generate.py                       | 272 +++++-----
 .../blockforest/communication/timing/plot.py  | 245 +++++----
 tests/cuda/codegen/CudaJacobiKernel.py        |   7 +-
 tests/cuda/codegen/CudaPoisson.py             |   7 +-
 tests/field/PythonExportTest.py               |  51 +-
 tests/field/codegen/JacobiKernel.py           |   6 +-
 tests/field/codegen/Poisson.py                |   6 +-
 .../FieldLayoutAndVectorizationTest.py        |  17 +-
 tests/lbm/codegen/FluctuatingMRT.py           |   4 +-
 tests/lbm/codegen/LbCodeGenerationExample.py  |   6 +-
 tests/python_coupling/BasicDatatypeTest.py    |  42 +-
 tests/python_coupling/CallbackTest.py         |  17 +-
 tests/python_coupling/ConfigFromPythonTest.py |  16 +-
 tests/python_coupling/FieldExportTest.py      |   9 +-
 utilities/bashhelper/correspondingDirs.py     |  19 +-
 utilities/bashhelper/createShortcuts.py       | 107 ++--
 utilities/bashhelper/folderComplete.py        |  18 +-
 utilities/filterCompileCommands.py            |  47 +-
 utilities/findMissingIncludeGuards.py         |  16 +-
 utilities/gdbPrettyPrinter/STLv6/printers.py  | 257 ++++-----
 .../gdbPrettyPrinter/boost_1_40/printers.py   |  64 ++-
 utilities/gdbPrettyPrinter/qt4/printers.py    | 258 ++++-----
 .../gdbPrettyPrinter/walberla/printers.py     |  72 +--
 utilities/moduleDependenciesToDot.py          |  15 +-
 71 files changed, 2540 insertions(+), 2487 deletions(-)

diff --git a/.flake8 b/.flake8
index 9bd296133..31de67a53 100644
--- a/.flake8
+++ b/.flake8
@@ -2,4 +2,5 @@
 max-line-length=120
 exclude=apps/showcases/Mixer/GenerateModule.py, # contains only statements
         apps/benchmarks/FieldCommunication/config.py # just a config file 
+        utilities/bashhelper/createShortcuts.py # contains a lot of really long strings
 ignore = W503 C901 E741
diff --git a/python/lbmpy_walberla/boundary.py b/python/lbmpy_walberla/boundary.py
index b3fbb8c31..ba1d697b9 100644
--- a/python/lbmpy_walberla/boundary.py
+++ b/python/lbmpy_walberla/boundary.py
@@ -52,7 +52,7 @@ def generate_boundary(generation_context, class_name, boundary_object, lb_method
         'StructDeclaration': struct_from_numpy_dtype(struct_name, index_struct_dtype),
         'kernel': KernelInfo(kernel),
         'stencil_info': stencil_info,
-        'inverse_directions' : inv_dirs,
+        'inverse_directions': inv_dirs,
         'dim': lb_method.dim,
         'target': target,
         'namespace': 'lbm',
diff --git a/python/lbmpy_walberla/tests/test_walberla_codegen.py b/python/lbmpy_walberla/tests/test_walberla_codegen.py
index 60b86fa57..391743468 100644
--- a/python/lbmpy_walberla/tests/test_walberla_codegen.py
+++ b/python/lbmpy_walberla/tests/test_walberla_codegen.py
@@ -84,7 +84,7 @@ class WalberlaLbmpyCodegenTest(unittest.TestCase):
             # the collision rule of the LB method where the some advanced features
             collision_rule = create_lb_collision_rule(
                 stencil='D3Q19', compressible=True, fluctuating={'seed': 0, 'temperature': 1e-6},
-                method='mrt', relaxation_rates=[omega_shear]*19,
+                method='mrt', relaxation_rates=[omega_shear] * 19,
                 force_model='guo', force=force_field.center_vector,
                 optimization={'cse_global': False}
             )
diff --git a/python/lbmpy_walberla/walberla_lbm_generation.py b/python/lbmpy_walberla/walberla_lbm_generation.py
index a191c47e9..a7563b76c 100644
--- a/python/lbmpy_walberla/walberla_lbm_generation.py
+++ b/python/lbmpy_walberla/walberla_lbm_generation.py
@@ -1,4 +1,4 @@
-import warnings
+# import warnings
 
 import numpy as np
 import sympy as sp
@@ -6,7 +6,7 @@ from jinja2 import Environment, PackageLoader, StrictUndefined, Template
 from sympy.tensor import IndexedBase
 
 import pystencils as ps
-from lbmpy.creationfunctions import create_lb_update_rule, update_with_default_parameters
+# from lbmpy.creationfunctions import create_lb_update_rule, update_with_default_parameters
 from lbmpy.fieldaccess import CollideOnlyInplaceAccessor, StreamPullTwoFieldsAccessor
 from lbmpy.relaxationrates import relaxation_rate_scaling
 from lbmpy.stencils import get_stencil
@@ -52,7 +52,7 @@ def __lattice_model(generation_context, class_name, lb_method, stream_collide_as
     macroscopic_velocity_shift = None
     if force_model:
         if hasattr(force_model, 'macroscopic_velocity_shift'):
-            macroscopic_velocity_shift = [expression_to_code(e, "lm.", ["rho"],dtype=dtype)
+            macroscopic_velocity_shift = [expression_to_code(e, "lm.", ["rho"], dtype=dtype)
                                           for e in force_model.macroscopic_velocity_shift(rho_sym)]
 
     cqc = lb_method.conserved_quantity_computation
@@ -67,7 +67,8 @@ def __lattice_model(generation_context, class_name, lb_method, stream_collide_as
     required_headers = get_headers(stream_collide_ast)
 
     if refinement_scaling:
-        refinement_scaling_info = [ (e0,e1,expression_to_code(e2, '', dtype=dtype)) for e0,e1,e2 in refinement_scaling.scaling_info ]
+        refinement_scaling_info = [(e0, e1, expression_to_code(e2, '', dtype=dtype)) for e0, e1, e2 in
+                                   refinement_scaling.scaling_info]
         # append '_' to entries since they are used as members
         for i in range(len(refinement_scaling_info)):
             updated_entry = (refinement_scaling_info[i][0],
@@ -87,7 +88,7 @@ def __lattice_model(generation_context, class_name, lb_method, stream_collide_as
         'Q': len(lb_method.stencil),
         'compressible': lb_method.conserved_quantity_computation.compressible,
         'weights': ",".join(str(w.evalf()) + constant_suffix for w in lb_method.weights),
-        'inverse_weights': ",".join(str((1/w).evalf()) + constant_suffix for w in lb_method.weights),
+        'inverse_weights': ",".join(str((1 / w).evalf()) + constant_suffix for w in lb_method.weights),
 
         'equilibrium_from_direction': stencil_switch_statement(lb_method.stencil, equilibrium),
         'symmetric_equilibrium_from_direction': stencil_switch_statement(lb_method.stencil, symmetric_equilibrium),
@@ -109,7 +110,9 @@ def __lattice_model(generation_context, class_name, lb_method, stream_collide_as
         'target': 'cpu',
         'namespace': 'lbm',
         'headers': required_headers,
-        'need_block_offsets': ['block_offset_{}'.format(i) in [param.symbol.name for param in stream_collide_ast.get_parameters()] for i in range(3)],
+        'need_block_offsets': [
+            'block_offset_{}'.format(i) in [param.symbol.name for param in stream_collide_ast.get_parameters()] for i in
+            range(3)],
     }
 
     env = Environment(loader=PackageLoader('lbmpy_walberla'), undefined=StrictUndefined)
@@ -124,7 +127,6 @@ def __lattice_model(generation_context, class_name, lb_method, stream_collide_as
 
 def generate_lattice_model(generation_context, class_name, collision_rule, field_layout='zyxf', refinement_scaling=None,
                            **create_kernel_params):
-
     # usually a numpy layout is chosen by default i.e. xyzf - which is bad for waLBerla where at least the spatial
     # coordinates should be ordered in reverse direction i.e. zyx
     is_float = not generation_context.double_accuracy
@@ -144,7 +146,8 @@ def generate_lattice_model(generation_context, class_name, collision_rule, field
         create_kernel_params['cpu_vectorize_info']['assume_inner_stride_one'] = False
 
     src_field = ps.Field.create_generic('pdfs', dim, dtype, index_dimensions=1, layout=field_layout, index_shape=(q,))
-    dst_field = ps.Field.create_generic('pdfs_tmp', dim, dtype, index_dimensions=1, layout=field_layout, index_shape=(q,))
+    dst_field = ps.Field.create_generic('pdfs_tmp', dim, dtype, index_dimensions=1, layout=field_layout,
+                                        index_shape=(q,))
 
     stream_collide_update_rule = create_lbm_kernel(collision_rule, src_field, dst_field, StreamPullTwoFieldsAccessor())
     stream_collide_ast = create_kernel(stream_collide_update_rule, **create_kernel_params)
@@ -156,7 +159,8 @@ def generate_lattice_model(generation_context, class_name, collision_rule, field
     collide_ast.function_name = 'kernel_collide'
     collide_ast.assumed_inner_stride_one = create_kernel_params['cpu_vectorize_info']['assume_inner_stride_one']
 
-    stream_update_rule = create_stream_pull_only_kernel(lb_method.stencil, None, 'pdfs', 'pdfs_tmp', field_layout, dtype)
+    stream_update_rule = create_stream_pull_only_kernel(lb_method.stencil, None, 'pdfs', 'pdfs_tmp', field_layout,
+                                                        dtype)
     stream_ast = create_kernel(stream_update_rule, **create_kernel_params)
     stream_ast.function_name = 'kernel_stream'
     stream_ast.assumed_inner_stride_one = create_kernel_params['cpu_vectorize_info']['assume_inner_stride_one']
@@ -256,7 +260,7 @@ def field_and_symbol_substitute(expr, variable_prefix="lm.", variables_without_p
     return expr.subs(substitutions)
 
 
-def expression_to_code(expr, variable_prefix="lm.", variables_without_prefix=[],dtype="double"):
+def expression_to_code(expr, variable_prefix="lm.", variables_without_prefix=[], dtype="double"):
     """
     Takes a sympy expression and creates a C code string from it. Replaces field accesses by
     walberla field accesses i.e. field_W^1 -> field->get(-1, 0, 0, 1)
@@ -266,7 +270,8 @@ def expression_to_code(expr, variable_prefix="lm.", variables_without_prefix=[],
     :param variables_without_prefix: this variables are not prefixed
     :return: code string
     """
-    return cpp_printer.doprint(type_expr(field_and_symbol_substitute(expr, variable_prefix, variables_without_prefix),dtype=dtype))
+    return cpp_printer.doprint(
+        type_expr(field_and_symbol_substitute(expr, variable_prefix, variables_without_prefix), dtype=dtype))
 
 
 def type_expr(eq, dtype):
@@ -282,7 +287,6 @@ def type_expr(eq, dtype):
 
 
 def equations_to_code(equations, variable_prefix="lm.", variables_without_prefix=[], dtype="double"):
-
     if isinstance(equations, AssignmentCollection):
         equations = equations.all_assignments
 
@@ -291,9 +295,11 @@ def equations_to_code(equations, variable_prefix="lm.", variables_without_prefix
     result = []
     left_hand_side_names = [e.lhs.name for e in equations]
     for eq in equations:
-        assignment = SympyAssignment(type_expr(eq.lhs,dtype=dtype),
+        assignment = SympyAssignment(type_expr(eq.lhs, dtype=dtype),
                                      type_expr(field_and_symbol_substitute(eq.rhs, variable_prefix,
-                                                                 variables_without_prefix + left_hand_side_names),dtype=dtype))
+                                                                           variables_without_prefix
+                                                                           + left_hand_side_names),
+                                               dtype=dtype))
         result.append(c_backend(assignment))
     return "\n".join(result)
 
diff --git a/python/pystencils_walberla/boundary.py b/python/pystencils_walberla/boundary.py
index 6a93733e2..1984fbfc5 100644
--- a/python/pystencils_walberla/boundary.py
+++ b/python/pystencils_walberla/boundary.py
@@ -52,7 +52,7 @@ def generate_staggered_boundary(generation_context, class_name, boundary_object,
         'StructDeclaration': struct_from_numpy_dtype(struct_name, index_struct_dtype),
         'kernel': KernelInfo(kernel),
         'stencil_info': stencil_info,
-        'inverse_directions' : inv_dirs,
+        'inverse_directions': inv_dirs,
         'dim': dim,
         'target': target,
         'namespace': 'pystencils',
@@ -111,7 +111,7 @@ def generate_staggered_flux_boundary(generation_context, class_name, boundary_ob
         'StructDeclaration': struct_from_numpy_dtype(struct_name, index_struct_dtype),
         'kernel': KernelInfo(kernel),
         'stencil_info': stencil_info,
-        'inverse_directions' : inv_dirs,
+        'inverse_directions': inv_dirs,
         'dim': dim,
         'target': target,
         'namespace': 'pystencils',
diff --git a/python/pystencils_walberla/cmake_integration.py b/python/pystencils_walberla/cmake_integration.py
index 6c842838b..d8b7e615c 100644
--- a/python/pystencils_walberla/cmake_integration.py
+++ b/python/pystencils_walberla/cmake_integration.py
@@ -32,7 +32,8 @@ class CodeGeneration:
             written = set(os.path.realpath(f) for f in self.context.files_written)
             only_in_cmake = expected - written
             only_generated = written - expected
-            error_message = "Generated files (OUT_FILES) specified not correctly in cmake with 'waLBerla_generate_target_from_python'\n"
+            error_message = "Generated files (OUT_FILES) specified not correctly" \
+                            + "in cmake with 'waLBerla_generate_target_from_python'\n"
             if only_in_cmake:
                 error_message += "Files only specified in CMake {}\n".format(
                     [os.path.basename(p) for p in only_in_cmake])
diff --git a/python/pystencils_walberla/codegen.py b/python/pystencils_walberla/codegen.py
index 7b94dc955..a314baf78 100644
--- a/python/pystencils_walberla/codegen.py
+++ b/python/pystencils_walberla/codegen.py
@@ -132,7 +132,7 @@ def generate_pack_info_from_kernel(generation_context, class_name: str, assignme
         class_name: name of the generated class
         assignments: list of assignments from the compute kernel - generates PackInfo for "pull" part only
                      i.e. the kernel is expected to only write to the center
-        kind:                      
+        kind:
         **create_kernel_params: remaining keyword arguments are passed to `pystencils.create_kernel`
     """
     assert kind in ('push', 'pull')
@@ -339,6 +339,7 @@ def get_vectorize_instruction_set(generation_context):
     else:
         return None
 
+
 def default_create_kernel_parameters(generation_context, params):
     default_dtype = "float64" if generation_context.double_accuracy else 'float32'
 
diff --git a/python/pystencils_walberla/jinja_filters.py b/python/pystencils_walberla/jinja_filters.py
index b05eefd22..60903be9e 100644
--- a/python/pystencils_walberla/jinja_filters.py
+++ b/python/pystencils_walberla/jinja_filters.py
@@ -1,6 +1,6 @@
 import jinja2
 import sympy as sp
-import re
+# import re
 
 from pystencils import TypedSymbol
 from pystencils.backends.cbackend import generate_c
diff --git a/python/pystencils_walberla/tests/test_walberla_gen.py b/python/pystencils_walberla/tests/test_walberla_gen.py
index 3ffe156e9..939d7af55 100644
--- a/python/pystencils_walberla/tests/test_walberla_gen.py
+++ b/python/pystencils_walberla/tests/test_walberla_gen.py
@@ -31,9 +31,9 @@ class CodegenTest(unittest.TestCase):
 
                     @ps.kernel
                     def kernel_func():
-                        dst[0, 0, 0] @= (src[1, 0, 0] + src[-1, 0, 0] +
-                                         src[0, 1, 0] + src[0, -1, 0] +
-                                         src[0, 0, 1] + src[0, 0, -1]) / (6 * h ** 2)
+                        dst[0, 0, 0] @= (src[1, 0, 0] + src[-1, 0, 0]
+                                         + src[0, 1, 0] + src[0, -1, 0]
+                                         + src[0, 0, 1] + src[0, 0, -1]) / (6 * h ** 2)
 
                     generate_sweep(ctx, 'JacobiKernel3D', kernel_func, field_swaps=[(src, dst)])
 
diff --git a/python/waLBerla/__init__.py b/python/waLBerla/__init__.py
index 3de7ea24d..e9527a266 100644
--- a/python/waLBerla/__init__.py
+++ b/python/waLBerla/__init__.py
@@ -1,17 +1,17 @@
-from .callbacks import callback, ScenarioManager, memberCallback
-from .callbacks import memberCallback as member_callback  # deprecated, was renamed to memberCallback
+from .callbacks import callback, ScenarioManager, memberCallback  # noqa:F401
+# deprecated, was renamed to memberCallback
+from .callbacks import memberCallback as member_callback  # noqa:F401
 
 import sys
 
-
-
-
 try:
-    from .walberla_cpp import *
+    from .walberla_cpp import field, cuda, geometry, lbm, postprocessing, timeloop, mpi
+
     cpp_available = True
 except ImportError:
     try:
-        from walberla_cpp import *
+        from walberla_cpp import field, cuda, geometry, lbm, postprocessing, timeloop, mpi
+
         cpp_available = True
     except ImportError:
         cpp_available = False
@@ -30,21 +30,24 @@ except ImportError:
         thismodule.log_warning = print
 
 if cpp_available:
-    from .core_extension  import extend as extend_core
+    from .core_extension import extend as extend_core
+
     thismodule = sys.modules[__name__]
     extend_core(thismodule)
 
-    if 'field' in globals(): # check if field was exported
+    if 'field' in globals():  # check if field was exported
         # Update modules dict to be able to write e.g. from waLBerla import field
         # otherwise "field" would only be a scope not a module
         sys.modules[__name__ + '.field'] = field
         # extend the C++ module with some python functions
         from .field_extension import extend as extend_field
-        extend_field( field     )
+
+        extend_field(field)
     if 'cuda' in globals():
         sys.modules[__name__ + '.cuda'] = cuda
         from .cuda_extension import extend as extend_cuda
-        extend_cuda( cuda )
+
+        extend_cuda(cuda)
     if 'geometry' in globals():
         sys.modules[__name__ + '.geometry'] = geometry
     if 'lbm' in globals():
@@ -56,7 +59,10 @@ if cpp_available:
     if 'timeloop' in globals():
         sys.modules[__name__ + '.timeloop'] = timeloop
         from .timeloop_extension import extend as extend_timeloop
-        extend_timeloop( timeloop )
+
+        extend_timeloop(timeloop)
 else:
-    class Dummy:  pass
-    callbacks= Dummy()
+    class Dummy:
+        pass
+
+    callbacks = Dummy()
diff --git a/python/waLBerla/callbacks.py b/python/waLBerla/callbacks.py
index 23fed845b..8b24c6e0e 100644
--- a/python/waLBerla/callbacks.py
+++ b/python/waLBerla/callbacks.py
@@ -7,26 +7,26 @@ This C++ class can call a Python function identified by a string using this C++
     python_coupling::PythonCallback callback ( "someMagicString" );
     // expose some data here and pass them as arguments ( see C++ documentation for details)
 
-    
+
 There are two ways to mark a python function to be called by this callback.
 The first option are normal function callbacks::
- 
+
     @waLBerla.callback("someMagicString")
     def someArbitraryName( parameter1 ):
         pass
 
-    
+
 More advanced are callback classes, which can carry state::
 
     class MyScenario:
         def __init__( someState ):
             self._someState = someState
-            
+
         @waLBerla.memberCallback
         def someMagicString:
             # react here according to the state
             pass
-    
+
     scenarios = waLBerla.ScenarioManager()
     scenarios.add( MyScenario() )
 
@@ -38,7 +38,7 @@ Internals:
 ^^^^^^^^^
 
 The C++ waLBerla module walberla_cpp has a callbacks object.
-To register a certain python function as callback it has to be set as attribute of this object: 
+To register a certain python function as callback it has to be set as attribute of this object:
 ``setattr( walberla_cpp.callbacks, "someMagicString", theCallbackFunction)``
 
 
@@ -86,14 +86,15 @@ class ScenarioManager:
     """Use this class to simulate multiple scenarios
        A scenario is an instance of a class with member callbacks.
        See docstring of this module for an example.
-       
+
        Internals:
            ScenarioManager is driven by "config" callbacks from the C++ code.
-              ``for( auto configIt = python_coupling::configBegin(argc, argv); configIt != python_coupling::configEnd(); ++configIt )``
+              ``for( auto configIt = python_coupling::configBegin(argc, argv); configIt != python_coupling::configEnd();
+                     ++configIt )``
            Activation means to register the _configLoopCallback as 'config' waLBerla callback function
            which is called when a new scenario is expected.
            When config is called again the calbacks of the next scenario are activated.
-            
+
     """
 
     def __init__(self):
@@ -135,7 +136,8 @@ class ScenarioManager:
                 setattr(walberla_cpp.callbacks, callback_name, bound_callback)
 
             if 'config' not in callbacks:
-                walberla_cpp.log_warning_on_root("Error: Registered Scenario of class '%s' has no 'config' callback. Skipping... " % (type(sc),))
+                walberla_cpp.log_warning_on_root(
+                    "Error: Registered Scenario of class '%s' has no 'config' callback. Skipping... " % (type(sc),))
                 return None
 
             config = sc.config(*args, **kwargs)
diff --git a/python/waLBerla/core_extension.py b/python/waLBerla/core_extension.py
index e25e06d1b..f06fefa68 100644
--- a/python/waLBerla/core_extension.py
+++ b/python/waLBerla/core_extension.py
@@ -3,59 +3,62 @@ try:
 except ImportError:
     import walberla_cpp
 
+
 class SliceMaker(object):
     def __getitem__(self, item):
         return item
+
+
 makeSlice = SliceMaker()
 
 
-def normalizeSlice( slices, sizes ):
+def normalizeSlice(slices, sizes):
     """Converts slices with floating point entries to integer slices"""
-    assert( len(slices) == len(sizes) )
+    assert (len(slices) == len(sizes))
 
     result = []
 
-    for s, size in zip( slices, sizes ):
+    for s, size in zip(slices, sizes):
         if type(s) is int:
-            result.append( s )
+            result.append(s)
             continue
         if type(s) is float:
-            result.append( int( s*size ) )
+            result.append(int(s * size))
             continue
-            
-        assert( type(s) is slice )
-        
+
+        assert (type(s) is slice)
+
         if s.start is None:
             newStart = 0
         elif type(s.start) is float:
-            newStart = int(s.start * size )
+            newStart = int(s.start * size)
         else:
             newStart = s.start
-            
+
         if s.stop is None:
             newStop = size
         elif type(s.stop) is float:
-            newStop = int(s.stop * size )
+            newStop = int(s.stop * size)
         else:
             newStop = s.stop
-        
-        result.append( slice(newStart,newStop, s.step) )
-    
+
+        result.append(slice(newStart, newStop, s.step))
+
     return tuple(result)
 
 
-def sliceToCellInterval( s ):
-    newMin = [0,0,0]
-    newMax = [0,0,0]
+def sliceToCellInterval(s):
+    newMin = [0, 0, 0]
+    newMax = [0, 0, 0]
     for i in range(3):
         if type(s[i]) is int:
             newMin[i] = s[i]
             newMax[i] = s[i]
         else:
             newMin[i] = s[i].start
-            newMax[i] = s[i].stop-1
-    return walberla_cpp.CellInterval( newMin,newMax)
-    
+            newMax[i] = s[i].stop - 1
+    return walberla_cpp.CellInterval(newMin, newMax)
+
 
 def cellIntervalToSlice(cellInterval, collapseExtentOne=True):
     if not hasattr(collapseExtentOne, '__len__'):
@@ -64,15 +67,14 @@ def cellIntervalToSlice(cellInterval, collapseExtentOne=True):
     slices = []
     for i, collapseInfo in enumerate(collapseExtentOne):
         if collapseInfo and cellInterval.min[i] == cellInterval.max[i]:
-            slices.append( cellInterval.min[i] )
+            slices.append(cellInterval.min[i])
         else:
-            slices.append( slice(cellInterval.min[i], cellInterval.max[i]+1,None ) )
+            slices.append(slice(cellInterval.min[i], cellInterval.max[i] + 1, None))
     return tuple(slices)
 
 
-
 def extend(coreModule):
     coreModule.makeSlice = SliceMaker()
     coreModule.normalizeSlice = normalizeSlice
     coreModule.CellInterval.fromSlice = staticmethod(sliceToCellInterval)
-    coreModule.CellInterval.toSlice = cellIntervalToSlice
\ No newline at end of file
+    coreModule.CellInterval.toSlice = cellIntervalToSlice
diff --git a/python/waLBerla/cuda_extension.py b/python/waLBerla/cuda_extension.py
index e61612b45..0cbb96163 100644
--- a/python/waLBerla/cuda_extension.py
+++ b/python/waLBerla/cuda_extension.py
@@ -8,19 +8,19 @@ def toGpuArray(f, withGhostLayers=True):
     if not f:
         return None
     dtype = np.dtype(f.dtypeStr)
-    strides = [dtype.itemsize*a for a in f.strides]
+    strides = [dtype.itemsize * a for a in f.strides]
     res = GPUArray(f.sizeWithGhostLayers, dtype, gpudata=f.ptr, strides=strides)
     if withGhostLayers is True:
         return res
 
     ghostLayers = normalizeGhostlayerInfo(f, withGhostLayers)
-    glCutoff = [ f.nrOfGhostLayers - gl for gl in ghostLayers ]
-    res = res[ glCutoff[0]:-glCutoff[0] if glCutoff[0] > 0 else None,
-               glCutoff[1]:-glCutoff[1] if glCutoff[1] > 0 else None,
-               glCutoff[2]:-glCutoff[2] if glCutoff[2] > 0 else None,
-               : ]
+    glCutoff = [f.nrOfGhostLayers - gl for gl in ghostLayers]
+    res = res[glCutoff[0]:-glCutoff[0] if glCutoff[0] > 0 else None,
+              glCutoff[1]:-glCutoff[1] if glCutoff[1] > 0 else None,
+              glCutoff[2]:-glCutoff[2] if glCutoff[2] > 0 else None,
+              :]
     return res
 
+
 def extend(cppCudaModule):
     cppCudaModule.toGpuArray = toGpuArray
-
diff --git a/python/waLBerla/evaluation/__init__.py b/python/waLBerla/evaluation/__init__.py
index 7305e1fe1..44f49edcc 100644
--- a/python/waLBerla/evaluation/__init__.py
+++ b/python/waLBerla/evaluation/__init__.py
@@ -1,2 +1,4 @@
 from waLBerla.evaluation.scenariocreators import HashedScenarioFolderCreator
-from waLBerla.evaluation.timeseries import TimeSeries
\ No newline at end of file
+from waLBerla.evaluation.timeseries import TimeSeries
+
+__all__ = ['HashedScenarioFolderCreator', 'TimeSeries']
diff --git a/python/waLBerla/evaluation/scenariocreators.py b/python/waLBerla/evaluation/scenariocreators.py
index 0e42fbd27..2f592d7ff 100644
--- a/python/waLBerla/evaluation/scenariocreators.py
+++ b/python/waLBerla/evaluation/scenariocreators.py
@@ -11,29 +11,32 @@ except ImportError:
 
 
 def waitForFoldersToExist(folders, waitTime=3, timeout=10):
-    """For big MPI parallel runs it can happen that mkdir returns but not all processes already see the newly created folders
-       due to network filesystem issues. This function waits until the given folders exist."""
+    """For big MPI parallel runs it can happen that mkdir returns but not all processes already
+       see the newly created folders due to network filesystem issues. This function waits until
+       the given folders exist."""
 
     def allFoldersExits():
         for f in folders:
-            if not os.path.exists(f): return False
+            if not os.path.exists(f):
+                return False
         return True
 
     waitedTime = 0
     while not allFoldersExits():
         if waitedTime > timeout:
-            raise RuntimeError("Creating folder timeout:  when waiting for folder creation. Network filesystem problems?" + str(folders))
+            raise RuntimeError(
+                "Creating folder timeout:  when waiting for folder creation. Network filesystem problems?" + str(
+                    folders))
         time.sleep(waitTime)
         waitedTime += waitTime
 
 
-
 class HashedScenarioFolderCreator:
     """
     Helper class to create the following directory structure for simulation results
-    
+
     Example:
-    
+
         baseFolder / 2fc7c861eb/ subfolderNames[0]
                                / subfolderNames[1]
                                / subfolderNames[2]
@@ -42,70 +45,70 @@ class HashedScenarioFolderCreator:
                               / subfolderNames[1]
                               / subfolderNames[2]
                               params.json
-                  
+
     In the baseFolder several scenario folder are created. The name of this folders
     is  a hash value of the parameters of the scenarios.
-    The subfolder names (in this example meshes, logs, img ) can be configured and 
+    The subfolder names (in this example meshes, logs, img ) can be configured and
     are passed as folderNames in the constructor. Optionally a json file is written
     to the scenario folders that has contains the parameters in readable form.
-    
-    If a scenario was already simulated, i.e. the folder does already exists, a new folder is 
+
+    If a scenario was already simulated, i.e. the folder does already exists, a new folder is
     created with "_1" appended to the name.
-    
+
     All filesystem operations are carried out by root process, but function have to be called
     by all processes.
     """
-    def __init__(self, baseFolder, subfolderNames, hashFunc = hashlib.sha1, hashLength=10 ):
-        self._baseFolder     = baseFolder
+
+    def __init__(self, baseFolder, subfolderNames, hashFunc=hashlib.sha1, hashLength=10):
+        self._baseFolder = baseFolder
         self._subfolderNames = subfolderNames
-        self._hashLength     = hashLength
-        self._hashFunc       = hashFunc
+        self._hashLength = hashLength
+        self._hashFunc = hashFunc
         # create base folder if it does not exist yet
-        if mpi.worldRank() == 0 and not os.path.exists( self._baseFolder ):
-            os.makedirs( result_base_folder )
+        if mpi.worldRank() == 0 and not os.path.exists(self._baseFolder):
+            os.makedirs(self._baseFolder)
 
     def _scenarioBaseName(self, paramsDict, postfix=0):
         """Scenario name without numbering postfix"""
         # Hash function needs encoded string -> encode the dictionary using json
-        encodedJSON = json.dumps( paramsDict, sort_keys=True ).encode('utf-8')
-        hashStr = self._hashFunc( encodedJSON ).hexdigest()[:self._hashLength] 
+        encodedJSON = json.dumps(paramsDict, sort_keys=True).encode('utf-8')
+        hashStr = self._hashFunc(encodedJSON).hexdigest()[:self._hashLength]
         if postfix > 0:
-            return "%s_%d" % ( hashStr, postfix )
+            return "%s_%d" % (hashStr, postfix)
         else:
             return hashStr
 
-    def scenarioExists(self, paramsDict, postfix = 0):
+    def scenarioExists(self, paramsDict, postfix=0):
         postfixStr = "_%d" % (postfix,) if postfix > 0 else ""
-        path = os.path.join( self._baseFolder, self._scenarioBaseName(paramsDict) + postfixStr )
-        return os.path.exists( path )
-        
-    def create( self, paramsDict, writeParamsJSON=True ):
+        path = os.path.join(self._baseFolder, self._scenarioBaseName(paramsDict) + postfixStr)
+        return os.path.exists(path)
+
+    def create(self, paramsDict, writeParamsJSON=True):
         """Creates a new scenario based on the given parameters, waits until all
            subfolders are created on all processes.
            Returns path to scenario folder"""
         if mpi.worldRank() == 0:
             postfix = 0
-            while self.scenarioExists( paramsDict, postfix ):
+            while self.scenarioExists(paramsDict, postfix):
                 postfix += 1
-            scenarioName = self._scenarioBaseName( paramsDict, postfix ) 
-            scenarioName = mpi.broadcastString( scenarioName )
+            scenarioName = self._scenarioBaseName(paramsDict, postfix)
+            scenarioName = mpi.broadcastString(scenarioName)
         if mpi.worldRank() != 0:
-            scenarioName = mpi.broadcastString( "" )
-        
-        folders = [ os.path.join( self._baseFolder, scenarioName, subfolder ) for subfolder in ["" ] + self._subfolderNames ]
-        scenarioFolder = folders[0] 
-        
+            scenarioName = mpi.broadcastString("")
+
+        folders = [os.path.join(self._baseFolder, scenarioName, subfolder) for subfolder in [""] + self._subfolderNames]
+        scenarioFolder = folders[0]
+
         if mpi.worldRank() == 0:
             for f in folders:
-                os.mkdir( f )
+                os.mkdir(f)
             if writeParamsJSON:
                 paramsDict['scenarioName'] = scenarioName
-                with open( os.path.join(scenarioFolder,"params.json"), 'w' ) as f:
-                    json.dump( paramsDict, f, indent=4, sort_keys=True)
-            
+                with open(os.path.join(scenarioFolder, "params.json"), 'w') as f:
+                    json.dump(paramsDict, f, indent=4, sort_keys=True)
+
         mpi.worldBarrier()
-        waitForFoldersToExist( folders )
+        waitForFoldersToExist(folders)
         mpi.worldBarrier()
-        
-        return scenarioName, scenarioFolder
 
+        return scenarioName, scenarioFolder
diff --git a/python/waLBerla/evaluation/timeseries.py b/python/waLBerla/evaluation/timeseries.py
index 9ecd6e807..4c23d652d 100644
--- a/python/waLBerla/evaluation/timeseries.py
+++ b/python/waLBerla/evaluation/timeseries.py
@@ -1,74 +1,72 @@
 import numpy as np
 import time
 
+
 class TimeSeries:
     """Stores timeseries of values that are occuring during a simulation.
        Can estimate runtime until a value has reached a certain threshold.
-       Data is collected in a format that can be written to a sqlite database easily.""" 
-       
-    def __init__(self ):
+       Data is collected in a format that can be written to a sqlite database easily."""
+
+    def __init__(self):
         self._data = dict()
         self._timestamps = []
-        
-    def __getitem__(self, dataName ):
+
+    def __getitem__(self, dataName):
         return self._data[dataName]
-    
+
     @property
     def dataDict(self):
         return self._data
-    
+
     def addDatapoint(self, datapoint):
-        """Add several measure quantities. 
+        """Add several measure quantities.
             Example:
                addDatapoint( { 'maxVelocity' : 0.04, 'dropWidth' : 8, } ) """
-        if len( self._data ) == 0: # on first call
+        if len(self._data) == 0:  # on first call
             for key in datapoint:
                 self._data[key] = []
-        
-        if set( datapoint.keys() ) != set( self._data.keys() ):
+
+        if set(datapoint.keys()) != set(self._data.keys()):
             raise ValueError("Datapoints have to contain always the same quantities")
-        
-        self._timestamps.append( time.time() )
+
+        self._timestamps.append(time.time())
         for key, val in datapoint.items():
-            self._data[key].append( val )
+            self._data[key].append(val)
 
     def __len__(self):
         return len(self._timestamps)
-        
-        
-    def estimateRemainingTimeLinear(self, dataName, targetValue, historyLength=0 ):
+
+    def estimateRemainingTimeLinear(self, dataName, targetValue, historyLength=0):
         """
-        :param historyLength: a linear estimate is generated using the last added value and the value data[historyLength]
+        :param historyLength: a linear estimate is generated using the last added value and the
+                              value data[historyLength]
                               for out of range values abs(historyLength) > len(data): the first data point is chosen
-        :return: estimated number of seconds until targetValue is reached ( may be negative if distance to targetValue is increasing )
+        :return: estimated number of seconds until targetValue is reached
+                (may be negative if distance to targetValue is increasing)
         """
         if abs(historyLength) > len(self):
             historyLength = 0
         if len(self) < 2:
             return None
-        
+
         data = self._data[dataName]
-        
-        dt   = self._timestamps[-1] - self._timestamps[historyLength]
-        dVal = data[-1]             -  data[historyLength]
-        
+
+        dt = self._timestamps[-1] - self._timestamps[historyLength]
+        dVal = data[-1] - data[historyLength]
+
         distance = targetValue - data[-1]
         if dVal == 0:
             return None
-        
+
         res = distance / dVal * dt
         return None if res < 0 else res
-    
-    
-    def isStationary(self, dataName, historyLength, tolerance ):
-        """Views only the last historyLength values. Returns True if none of these values 
+
+    def isStationary(self, dataName, historyLength, tolerance):
+        """Views only the last historyLength values. Returns True if none of these values
            does deviate more than tolerance from their average"""
         if len(self) < historyLength:
             return False
-        
-        data = np.array( self._data[dataName][-historyLength:] )
-        data = np.abs( data - np.average(data) )
-        return np.max(data) < tolerance
-
-
 
+        data = np.array(self._data[dataName][-historyLength:])
+        data = np.abs(data - np.average(data))
+        return np.max(data) < tolerance
diff --git a/python/waLBerla/field_extension.py b/python/waLBerla/field_extension.py
index aa1be40cb..7e08aa730 100644
--- a/python/waLBerla/field_extension.py
+++ b/python/waLBerla/field_extension.py
@@ -1,81 +1,82 @@
 import numpy
-try:
-    from . import walberla_cpp
-except ImportError:
-    import walberla_cpp
+
+# try:
+#     from . import walberla_cpp
+# except ImportError:
+#     import walberla_cpp
 
 
 # ----------------------------- Python functions to extend the C++ field module ---------------------------------
 
-def normalizeGhostlayerInfo( field, withGhostLayers):
+def normalizeGhostlayerInfo(field, withGhostLayers):
     """Takes one ghost layer parameter and returns an integer:
         True -> all ghost layers, False->no ghost layers"""
 
     def normalizeComponent(gl):
-        if gl == False:
+        if gl is False:
             return 0
-        if gl == True:
+        if gl is True:
             return field.nrOfGhostLayers
         if gl > field.nrOfGhostLayers:
-            raise ValueError("Field only has %d ghost layers (requested %d)" % ( field.nrOfGhostLayers, gl ) )
+            raise ValueError("Field only has %d ghost layers (requested %d)" % (field.nrOfGhostLayers, gl))
         return gl
 
-    if hasattr( withGhostLayers, "__len__") and len(withGhostLayers) == 3:
-        ghostLayers = [ normalizeComponent(gl) for gl in withGhostLayers ]
+    if hasattr(withGhostLayers, "__len__") and len(withGhostLayers) == 3:
+        ghostLayers = [normalizeComponent(gl) for gl in withGhostLayers]
     else:
-        ghostLayers = [ normalizeComponent(withGhostLayers) ] * 3
+        ghostLayers = [normalizeComponent(withGhostLayers)] * 3
     return ghostLayers
 
+
 def npArrayFromWaLBerlaField(field, withGhostLayers=False):
     """ Creates a numpy array view on the waLBerla field data
         @field: the waLBerla field
-        @withGhostLayers: Possible values: 
+        @withGhostLayers: Possible values:
                             1. Boolean: False: no ghost layers included
                                         True:  all ghost layers included
                             2. Integer: number of ghost layers to include
                             3. List with three booleans or integers with ghost layer info for x,y,z direction
     """
-    
+
     if not field:
         return None
 
     ghostLayers = normalizeGhostlayerInfo(field, withGhostLayers)
 
-    if not hasattr(field, 'buffer'): # Field adaptor -> create field with adapted values
+    if not hasattr(field, 'buffer'):  # Field adaptor -> create field with adapted values
         field = field.copyToField()
-    
-    if ghostLayers[0]==0 and ghostLayers[1]==0 and ghostLayers[2] == 0:
-        return numpy.asarray( field.buffer( False ) )
+
+    if ghostLayers[0] == 0 and ghostLayers[1] == 0 and ghostLayers[2] == 0:
+        return numpy.asarray(field.buffer(False))
     else:
-        result   = numpy.asarray( field.buffer( True ) )
-        glCutoff = [ field.nrOfGhostLayers - gl for gl in ghostLayers ]
-        view     = result[ glCutoff[0]:-glCutoff[0] if glCutoff[0] > 0 else None,
-                           glCutoff[1]:-glCutoff[1] if glCutoff[1] > 0 else None,
-                           glCutoff[2]:-glCutoff[2] if glCutoff[2] > 0 else None,
-                           : ]
+        result = numpy.asarray(field.buffer(True))
+        glCutoff = [field.nrOfGhostLayers - gl for gl in ghostLayers]
+        view = result[glCutoff[0]:-glCutoff[0] if glCutoff[0] > 0 else None,
+                      glCutoff[1]:-glCutoff[1] if glCutoff[1] > 0 else None,
+                      glCutoff[2]:-glCutoff[2] if glCutoff[2] > 0 else None,
+                      :]
         return view
 
 
 def arrayFromWaLBerlaAdaptor(field, withGhostLayers=False):
-    return npArrayFromWaLBerlaField( field.copyToField(), withGhostLayers )
+    return npArrayFromWaLBerlaField(field.copyToField(), withGhostLayers)
 
 
-def copyArrayToField(dstField, srcArray, slice=[slice(None,None,None) ]*3, withGhostLayers=False):
+def copyArrayToField(dstField, srcArray, slice=[slice(None, None, None)] * 3, withGhostLayers=False):
     """ Copies a numpy array into (part of) a waLBerla field
-    
+
     Usually no copying has to take place between waLBerla fields and numpy arrays, since an array view can be
     constructed on a field that uses the same memory.
     When running certain numpy operations that cannot be done in-place,however, the data has to be copied back.
-    
+
     @param dstField: waLBerla field, where the data is copied to
     @param srcArray: numpy array where to copy from
-    @param slice:    the numpy array is allowed to be smaller than the field. In this case the target region 
+    @param slice:    the numpy array is allowed to be smaller than the field. In this case the target region
                      has to be specified via this 3 dimensional slice
     @param withGhostLayers: if true the ghost layers of the field are considered as well
     """
     dstAsArray = npArrayFromWaLBerlaField(dstField, withGhostLayers)
-    numpy.copyto( dstAsArray[slice], srcArray )
-
+    numpy.copyto(dstAsArray[slice], srcArray)
 
 
 def extend(cppFieldModule):
@@ -88,8 +89,7 @@ def extend(cppFieldModule):
         else:
             return None
 
-
-    cppFieldModule.toArray          = npArrayFromWaLBerlaField
-    cppFieldModule.adaptorToArray   = arrayFromWaLBerlaAdaptor
+    cppFieldModule.toArray = npArrayFromWaLBerlaField
+    cppFieldModule.adaptorToArray = arrayFromWaLBerlaAdaptor
     cppFieldModule.copyArrayToField = copyArrayToField
-    cppFieldModule.gatherField      = gatherField
+    cppFieldModule.gatherField = gatherField
diff --git a/python/waLBerla/geometry_setup.py b/python/waLBerla/geometry_setup.py
index b77771a04..32947a35b 100644
--- a/python/waLBerla/geometry_setup.py
+++ b/python/waLBerla/geometry_setup.py
@@ -9,8 +9,9 @@ except ImportError:
 
 from .core_extension import normalizeSlice
 
-def setBoundaryFromArray( blocks, boundaryID, targetSlice, imageArr, boundaryConfig,
-                          resizeFunc=None, extrusionCoordinate=-1 ):
+
+def setBoundaryFromArray(blocks, boundaryID, targetSlice, imageArr, boundaryConfig,
+                         resizeFunc=None, extrusionCoordinate=-1):
     """Initializes Boundary Handling using an image
     :param blocks: the block storage
     :param boundaryID: block data name of boundary handling
@@ -19,7 +20,7 @@ def setBoundaryFromArray( blocks, boundaryID, targetSlice, imageArr, boundaryCon
                         has to be given.
                         Example: for targetSlice=[0.25:0.75,  0  , 0.25:0.75] the image is resized to half
                         the x-z domain size, placed in the middle of the domain and extruded in y direction.
-    :param imageArr:    a 2D array used to set up the boundaries    
+    :param imageArr:    a 2D array used to set up the boundaries
     :param boundaryConfig: dictionary mapping values of imageArr to boundary configurations,
                            used as index array in forceBoundary()
     :param resizeFunc: if the given slice does not match the shape of imageArr, the image array has to be resized.
@@ -35,103 +36,99 @@ def setBoundaryFromArray( blocks, boundaryID, targetSlice, imageArr, boundaryCon
         return
 
     nrOfGhostLayers = blocks[0][boundaryID].getFlagField().nrOfGhostLayers
-    
-    imageArr = np.rot90( imageArr,3 )
+
+    imageArr = np.rot90(imageArr, 3)
 
     sliceWithGhostLayers = 'g' in targetSlice
-    targetSlice = [ s for s in targetSlice if s != 'g' ]
-    size = [  s + 2* nrOfGhostLayers if sliceWithGhostLayers else s  for s in blocks.getDomainCellBB().size ]
-    imageCellInterval = CellInterval.fromSlice( normalizeSlice(targetSlice, size ) )
+    targetSlice = [s for s in targetSlice if s != 'g']
+    size = [s + 2 * nrOfGhostLayers if sliceWithGhostLayers else s for s in blocks.getDomainCellBB().size]
+    imageCellInterval = CellInterval.fromSlice(normalizeSlice(targetSlice, size))
     if sliceWithGhostLayers:
-        imageCellInterval.shift( -nrOfGhostLayers, -nrOfGhostLayers, -nrOfGhostLayers )
-
+        imageCellInterval.shift(-nrOfGhostLayers, -nrOfGhostLayers, -nrOfGhostLayers)
 
     # Automatic detection of extrusion coordinate
     if extrusionCoordinate < 0 or extrusionCoordinate > 2:
-        possibleExtrusionCoordinate = np.array( [ 0, 0, 0 ] )
+        possibleExtrusionCoordinate = np.array([0, 0, 0])
         for i in range(3):
             if imageCellInterval.min[i] == imageCellInterval.max[i]:
                 possibleExtrusionCoordinate[i] = 1
                 extrusionCoordinate = i
-        if sum( possibleExtrusionCoordinate ) != 1:
+        if sum(possibleExtrusionCoordinate) != 1:
             raise ValueError("No valid extrusionCoordinate given - "
                              "and extrusion coordinate could not be found automatically")
-    assert( extrusionCoordinate < 3 and extrusionCoordinate >=0 )
-    
-    
+    assert (extrusionCoordinate < 3 and extrusionCoordinate >= 0)
+
     # Resize image
-    imageBounds = list( imageCellInterval.size )
+    imageBounds = list(imageCellInterval.size)
     del imageBounds[extrusionCoordinate]
-    if imageArr.shape != tuple(imageBounds ):
+    if imageArr.shape != tuple(imageBounds):
         if resizeFunc is None:
             raise ValueError("The given image size does not match the target slice: "
                              "resizing would be necessary but no resizeFunc was given.")
-        imageArr = resizeFunc( imageArr, imageBounds )
-        
+        imageArr = resizeFunc(imageArr, imageBounds)
+
     assert imageArr.shape == tuple(imageBounds)
-    assert imageArr.dtype.kind=='i', "imageArr has to be of integer type"
+    assert imageArr.dtype.kind == 'i', "imageArr has to be of integer type"
 
     unusedIdx = 0
     while unusedIdx in boundaryConfig:
         unusedIdx += 1
-    
-    def make2Dfrom3DSlice( targetSlice, extrusionCoordinate ):
-        l = list( targetSlice )
+
+    def make2Dfrom3DSlice(targetSlice, extrusionCoordinate):
+        l = list(targetSlice)
         del l[extrusionCoordinate]
         return l
-    
+
     for block in blocks:
-        blockCellInterval = blocks.getBlockCellBB( block )
-        blockCellInterval.expand( nrOfGhostLayers )
-        intersectionGlobalCoord = blockCellInterval.getIntersection( imageCellInterval )
+        blockCellInterval = blocks.getBlockCellBB(block)
+        blockCellInterval.expand(nrOfGhostLayers)
+        intersectionGlobalCoord = blockCellInterval.getIntersection(imageCellInterval)
 
         if intersectionGlobalCoord.empty():
             continue
-        intersectionLocalCoord = blocks.transformGlobalToLocal( block, intersectionGlobalCoord )
-        
+        intersectionLocalCoord = blocks.transformGlobalToLocal(block, intersectionGlobalCoord)
+
         # Create a field with same size as block
-        blockCellBB = blocks.getBlockCellBB( block )
-        wlbIndexField = field.createField( list( blockCellBB.size ), np.int32, ghostLayers=nrOfGhostLayers )
-        indexField = field.toArray( wlbIndexField, withGhostLayers=nrOfGhostLayers )[:,:,:,:]
-        indexField[:,:,:,:] = unusedIdx
-                
+        blockCellBB = blocks.getBlockCellBB(block)
+        wlbIndexField = field.createField(list(blockCellBB.size), np.int32, ghostLayers=nrOfGhostLayers)
+        indexField = field.toArray(wlbIndexField, withGhostLayers=nrOfGhostLayers)[:, :, :, :]
+        indexField[:, :, :, :] = unusedIdx
+
         # Copy image into this field
         targetSlice = intersectionLocalCoord.getShifted(nrOfGhostLayers, nrOfGhostLayers, nrOfGhostLayers).toSlice()
 
-        minCoord = np.array( imageCellInterval.min )
-        imgTargetSlice = intersectionGlobalCoord.getShifted( *(-minCoord) ).toSlice()
-        sliceInImage = make2Dfrom3DSlice(imgTargetSlice,extrusionCoordinate)
-        indexField[ targetSlice + [0] ] = imageArr[ sliceInImage ]
-
-        block[boundaryID].forceBoundary( wlbIndexField,  boundaryConfig )
-
+        minCoord = np.array(imageCellInterval.min)
+        imgTargetSlice = intersectionGlobalCoord.getShifted(*(-minCoord)).toSlice()
+        sliceInImage = make2Dfrom3DSlice(imgTargetSlice, extrusionCoordinate)
+        indexField[targetSlice + [0]] = imageArr[sliceInImage]
 
+        block[boundaryID].forceBoundary(wlbIndexField, boundaryConfig)
 
 
-def binaryResize( img, newSize ):
+def binaryResize(img, newSize):
     """This can be used as resize function for setBoundaryFromArray for arrays with
        zero and ones. After resizing the image is again binarized"""
-    img = scipy.misc.imresize( img, size=newSize)
-    img[ img <= 254 ] = 0
-    img[ img  > 254 ] = 1
-    img = img.astype( np.int32 )
+    img = scipy.misc.imresize(img, size=newSize)
+    img[img <= 254] = 0
+    img[img > 254] = 1
+    img = img.astype(np.int32)
     return img
 
 
-def setBoundaryFromBlackAndWhiteImage( blocks, boundaryID, targetSlice, imagePath, boundaryConfig, extrusionCoordinate=-1):
+def setBoundaryFromBlackAndWhiteImage(blocks, boundaryID, targetSlice, imagePath, boundaryConfig,
+                                      extrusionCoordinate=-1):
     """Loads array from image file and calls setBoundaryFromArray.
 
      :param imagePath: path to image file.
 
      For the other parameters see documentation of setBoundaryFromArray.
     """
-    imgArr = scipy.ndimage.imread(imagePath, flatten=True ).astype(int)
-    setBoundaryFromArray( blocks, boundaryID, targetSlice, imgArr, { 0: boundaryConfig },
-                          resizeFunc=binaryResize, extrusionCoordinate=extrusionCoordinate )
+    imgArr = scipy.ndimage.imread(imagePath, flatten=True).astype(int)
+    setBoundaryFromArray(blocks, boundaryID, targetSlice, imgArr, {0: boundaryConfig},
+                         resizeFunc=binaryResize, extrusionCoordinate=extrusionCoordinate)
 
 
-
-def setFieldUsingFlagMask( blocks, targetField, targetValue, flagField, flagNames ):
+def setFieldUsingFlagMask(blocks, targetField, targetValue, flagField, flagNames):
     """
     Sets all values of a target field to given value where a certain flag is set.
 
@@ -146,9 +143,6 @@ def setFieldUsingFlagMask( blocks, targetField, targetValue, flagField, flagName
         for flagName in flagNames:
             mask |= b[flagField].flag(flagName)
 
-        targetArr = field.toArray( b[targetField], True )
-        flagArr   = field.toArray( b[flagField]  , True )[:,:,:,0]
-        targetArr[ np.bitwise_and( flagArr, mask ) > 0, : ] = targetValue
-
-
-    
\ No newline at end of file
+        targetArr = field.toArray(b[targetField], True)
+        flagArr = field.toArray(b[flagField], True)[:, :, :, 0]
+        targetArr[np.bitwise_and(flagArr, mask) > 0, :] = targetValue
diff --git a/python/waLBerla/plot.py b/python/waLBerla/plot.py
index 085d47268..e4b007455 100644
--- a/python/waLBerla/plot.py
+++ b/python/waLBerla/plot.py
@@ -1,22 +1,24 @@
 """Small wrappers aroung matplotlib to gather and plot parts of a waLBerla domain"""
 
-
 import matplotlib.animation as animation
+import numpy as np
 
 try:
     from . import walberla_cpp
 except ImportError:
     import walberla_cpp
 
-from matplotlib.pyplot import *
+from matplotlib.pyplot import imshow, gcf, figure, plot, quiver
+
+
+def fieldShow(npField, **kwargs):
+    npField = np.rot90(npField, 3)
+    imshow(npField, origin='lower', **kwargs)
 
-def fieldShow( npField, **kwargs ):
-    npField = np.rot90( npField,3 )
-    imshow( npField, origin='lower', **kwargs )
-    
-def scalarField( blocks, name, sliceDef, fCoord=0, targetRank=0, **kwargs  ):
+
+def scalarField(blocks, name, sliceDef, fCoord=0, targetRank=0, **kwargs):
     """Plots a 2D slice through the global domain as an image
-    
+
     :param blocks:      the blockstorage
     :param name:        Name of the block data to be plotted. Has to be a scalar field
     :param sliceDef:    a two dimensional slice through the domain. Can be created with waLBerla.makeSlice
@@ -24,74 +26,74 @@ def scalarField( blocks, name, sliceDef, fCoord=0, targetRank=0, **kwargs  ):
     :param targetRank:  rank that gathers and plots the data
     :param kwargs:      further keyword arguments are passed to matplotlib.pyplot.imshow
     """
-    f = walberla_cpp.field.gather( blocks, name, sliceDef, targetRank=targetRank )
+    f = walberla_cpp.field.gather(blocks, name, sliceDef, targetRank=targetRank)
     if f:
-        npField = np.asarray( f.buffer() )[:,:,:,fCoord].squeeze()
-        npField = np.swapaxes( npField, 0, 1  )
-        imshow( npField, origin='lower', **kwargs )
+        npField = np.asarray(f.buffer())[:, :, :, fCoord].squeeze()
+        npField = np.swapaxes(npField, 0, 1)
+        imshow(npField, origin='lower', **kwargs)
 
 
-def scalarFieldAnimation( blocks, name, sliceDef, runFunction, plotSetupFunction=lambda:None,
-                          plotUpdateFunction=lambda:None, fCoord=0, targetRank=0, interval=30, frames=180, **kwargs  ):
+def scalarFieldAnimation(blocks, name, sliceDef, runFunction, plotSetupFunction=lambda: None,
+                         plotUpdateFunction=lambda: None, fCoord=0, targetRank=0, interval=30, frames=180, **kwargs):
     """Creates animation of 2D slices through the global domain
-    
+
     :param runFunction:        function without arguments which is run between frames (should move simulation forward)
     :param plotSetupFunction:  function without arguments that is called after the plot was initially created.
                                Can be used to configure plot (set title etc.)
     :param plotUpdateFunction: function without arguments that is called when figure is updated
     :param interval:           passed to matplotlib.animation.FuncAnimation: milliseconds between two frames
     :param frames:             passed to :class:`matplotlib.animation.FuncAnimation` number of frames
-    
+
     for other params see :func:`scalarField`
     """
     fig = gcf()
-    f = walberla_cpp.field.gather( blocks, name, sliceDef, targetRank=targetRank )
+    f = walberla_cpp.field.gather(blocks, name, sliceDef, targetRank=targetRank)
     im = None
     if f:
-        npField = np.asarray( f.buffer() )[:,:,:,fCoord].squeeze()
-        npField = np.swapaxes( npField, 0, 1  )
-        im = imshow( npField, origin='lower', **kwargs )
+        npField = np.asarray(f.buffer())[:, :, :, fCoord].squeeze()
+        npField = np.swapaxes(npField, 0, 1)
+        im = imshow(npField, origin='lower', **kwargs)
         plotSetupFunction()
-        
+
     def updatefig(*args):
         runFunction()
-        f = walberla_cpp.field.gather( blocks, name, sliceDef, targetRank=targetRank )
+        f = walberla_cpp.field.gather(blocks, name, sliceDef, targetRank=targetRank)
         if f:
-            npField = np.swapaxes( np.asarray( f.buffer() ), 0, 1  )
-            npField = npField[:,:,:,fCoord ].squeeze()
-            im.set_array( npField)
+            npField = np.swapaxes(np.asarray(f.buffer()), 0, 1)
+            npField = npField[:, :, :, fCoord].squeeze()
+            im.set_array(npField)
             plotUpdateFunction()
             return im,
 
-    return animation.FuncAnimation( fig, updatefig, interval=interval, frames=frames )
-        
-        
+    return animation.FuncAnimation(fig, updatefig, interval=interval, frames=frames)
+
 
-def vectorField( blocks, name, sliceDef, xComponent=0, yComponent=1, targetRank=0, xStep=1, yStep=1, **kwargs ):
+def vectorField(blocks, name, sliceDef, xComponent=0, yComponent=1, targetRank=0, xStep=1, yStep=1, **kwargs):
     """Plots a vector field slice using matplotlib quiver
-    
+
     :param blocks:     the blockstorage
     :param name:       Name of the block data to be plotted. Has to be a scalar field
     :param sliceDef:   a two dimensional slice through the domain. Can be created with waLBerla.makeSlice
-    :param xComponent: which component of the vector field (0,1 or 2) to take as the horizontal value for the quiver arrows
-    :param yComponent: which component of the vector field (0,1 or 2) to take as the vertical value for the quiver arrows
+    :param xComponent: which component of the vector field (0,1 or 2)
+                       to take as the horizontal value for the quiver arrows
+    :param yComponent: which component of the vector field (0,1 or 2)
+                       to take as the vertical value for the quiver arrows
     :param xStep:      take only every xStep's cell/arrow in x direction
-    :param yStep:      take only every yStep's cell/arrow in y direction   
+    :param yStep:      take only every yStep's cell/arrow in y direction
     :param targetRank: rank that gathers and plots the data
     :param kwargs:     further keyword arguments are passed to matplotlib.pyplot.quiver
     """
-    f = walberla_cpp.field.gather( blocks, name, sliceDef, targetRank=targetRank )
+    f = walberla_cpp.field.gather(blocks, name, sliceDef, targetRank=targetRank)
     if f:
-        npField = np.swapaxes( np.asarray( f.buffer() ), 0, 1  )
-        xVel = npField[::xStep,::yStep,:,xComponent].squeeze()
-        yVel = npField[::xStep,::yStep,:,yComponent].squeeze()
-        quiver(xVel,yVel, **kwargs )
-
+        npField = np.swapaxes(np.asarray(f.buffer()), 0, 1)
+        xVel = npField[::xStep, ::yStep, :, xComponent].squeeze()
+        yVel = npField[::xStep, ::yStep, :, yComponent].squeeze()
+        quiver(xVel, yVel, **kwargs)
 
 
-def alongLine( blocks, name, sliceDef, fCoord=0, targetRank=0, **kwargs ):
+def alongLine(blocks, name, sliceDef, fCoord=0, targetRank=0, **kwargs):
     """Plot a field value along a one dimensional slice through the domain
-    
+
     :param blocks:      the blockstorage
     :param name:        Name of the block data to be plotted. Has to be a scalar field
     :param sliceDef:    a one dimensional slice through the domain. Can be created with :func:`waLBerla.makeSlice`
@@ -99,38 +101,38 @@ def alongLine( blocks, name, sliceDef, fCoord=0, targetRank=0, **kwargs ):
     :param targetRank:  rank that gathers and plots the data
     :param kwargs:      further keyword arguments are passed to :func:`matplotlib.pyplot.plot`
     """
-    f = walberla_cpp.field.gather( blocks, name, sliceDef, targetRank=targetRank )
+    f = walberla_cpp.field.gather(blocks, name, sliceDef, targetRank=targetRank)
     if f:
-        npField = np.asarray( f.buffer() )
-        npField = npField[:,:,:,fCoord].squeeze()
-        plot( npField, **kwargs )
-        
-        
-def alongLineAnimation( blocks, name, sliceDef, runFunction, plotSetupFunction=lambda:None, fCoord=0, targetRank=0, interval=30, frames=180, **kwargs ):
+        npField = np.asarray(f.buffer())
+        npField = npField[:, :, :, fCoord].squeeze()
+        plot(npField, **kwargs)
+
+
+def alongLineAnimation(blocks, name, sliceDef, runFunction, plotSetupFunction=lambda: None, fCoord=0, targetRank=0,
+                       interval=30, frames=180, **kwargs):
     """Animated version of :func:`alongLine`
-    
+
     For parameter documentation see :func:`scalarFieldAnimation` and :func:`alongLine`
-    
+
     """
     fig = figure()
 
-    f = walberla_cpp.field.gather( blocks, name, sliceDef, targetRank=targetRank )
+    f = walberla_cpp.field.gather(blocks, name, sliceDef, targetRank=targetRank)
     line = None
     if f:
-        npField = np.asarray( f.buffer() )
-        npField = npField[:,:,:,fCoord].squeeze()
-        line, = plot( npField, **kwargs )
+        npField = np.asarray(f.buffer())
+        npField = npField[:, :, :, fCoord].squeeze()
+        line, = plot(npField, **kwargs)
         plotSetupFunction()
-        
+
     def updatefig(*args):
         runFunction()
-        f = walberla_cpp.field.gather( blocks, name, sliceDef, targetRank=targetRank )
+        f = walberla_cpp.field.gather(blocks, name, sliceDef, targetRank=targetRank)
         if f:
-            npField = np.asarray( f.buffer() )
-            npField = npField[:,:,:,fCoord].squeeze()
-            fig.gca().set_ylim( (np.min(npField) ,np.max(npField) ) )
-            line.set_ydata( npField )
+            npField = np.asarray(f.buffer())
+            npField = npField[:, :, :, fCoord].squeeze()
+            fig.gca().set_ylim((np.min(npField), np.max(npField)))
+            line.set_ydata(npField)
             return line,
-    
-    return animation.FuncAnimation( fig, updatefig, interval=interval, frames=frames, blit=False )
-                
\ No newline at end of file
+
+    return animation.FuncAnimation(fig, updatefig, interval=interval, frames=frames, blit=False)
diff --git a/python/waLBerla/timeloop_extension.py b/python/waLBerla/timeloop_extension.py
index 966c6f453..31b859ad1 100644
--- a/python/waLBerla/timeloop_extension.py
+++ b/python/waLBerla/timeloop_extension.py
@@ -1,60 +1,69 @@
-
 try:
     from . import walberla_cpp
 except ImportError:
     import walberla_cpp
 
+
 def functorFromSweep(blocks, sweep):
     """Makes a functor from a sweep, by iterating over all the blocks in the provided block storage"""
+
     def functor():
         for b in blocks:
             sweep(b)
-    return functor
 
+    return functor
 
 
-class Timeloop( walberla_cpp.timeloop.ITimeloop ):
-    def __init__(self, nrOfTimesteps ):
+class Timeloop(walberla_cpp.timeloop.ITimeloop):
+    def __init__(self, nrOfTimesteps):
         super().__init__()
         self._nrOfTimesteps = nrOfTimesteps
         self._timestep = 0
         self._functors = []
         self._stopFlag = False
-    def run(self, timesteps=None ):
+
+    def run(self, timesteps=None):
         if not timesteps:
-            timesteps= self._nrOfTimesteps
+            timesteps = self._nrOfTimesteps
         for t in range(timesteps):
             self.singleStep()
-            if self._stopFlag: break
+            if self._stopFlag:
+                break
+
     def singleStep(self):
         for func in self._functors:
             func()
         self._timestep += 1
+
     def stop(self):
         self._stopFlag = True
+
     def synchronizedStop(self, stop=True):
-        syncStop = wlb.mpi.allreduceInt( int(stop), wlb.mpi.LOGICAL_OR )
+        # syncStop = wlb.mpi.allreduceInt(int(stop), wlb.mpi.LOGICAL_OR)
         self._stopFlag = True
+
     def setCurrentTimeStep(self, ts):
         self._timestep = ts
+
     def getCurrentTimeStep(self):
         return self._timestep
+
     def getNrOfTimeSteps(self):
         return self._nrOfTimesteps
 
-    def __getFunctor( self, functor, blocks ):
-        if blocks: #assume that it is a sweep if blocks were given
+    def __getFunctor(self, functor, blocks):
+        if blocks:  # assume that it is a sweep if blocks were given
             return functorFromSweep(blocks, functor)
         else:
             return functor
 
     def add(self, functor, blocks=None):
-        self._functors.append( self.__getFunctor(functor,blocks) )
-        return len(self._functors)-1
+        self._functors.append(self.__getFunctor(functor, blocks))
+        return len(self._functors) - 1
+
+    def replace(self, handle, functor, blocks=None):
+        self._functors[handle] = self.__getFunctor(functor, blocks)
 
-    def replace(self, handle, functor, blocks=None ):
-        self._functors[handle] = self.__getFunctor(functor,blocks)
 
 def extend(cppTimeloopModule):
     cppTimeloopModule.Timeloop = Timeloop
-    
diff --git a/python/waLBerla/tools/jobscripts/__init__.py b/python/waLBerla/tools/jobscripts/__init__.py
index 8fa8261d5..ff14ff781 100644
--- a/python/waLBerla/tools/jobscripts/__init__.py
+++ b/python/waLBerla/tools/jobscripts/__init__.py
@@ -13,10 +13,13 @@ from waLBerla.tools.jobscripts.pizdaint_hybrid import createJobscript as _cr_piz
 
 def createJobscript(*args, **kwargs):
     """
-        :param machine:     Currently supported target machines are  ``supermuc``, ``supermuc_phase2``, ``juqueen`` and ``hornet``
+        :param machine:     Currently supported target machines are  ``supermuc``, ``supermuc_phase2``,
+                            ``juqueen`` and ``hornet``
         :param nodes:       Number of nodes to run on. You can either specify nodes or cores.
-        :param cores:       specify eiter nodes or cores. If using more than one node the nodes have to be filled completely
-        :param job_class:   optional, the jobclass is usually computed depending on number of nodes and wall_time, this parameter overrides this
+        :param cores:       specify eiter nodes or cores. If using more than one node the nodes
+                            have to be filled completely
+        :param job_class:   optional, the jobclass is usually computed depending on number of nodes and wall_time,
+                            this parameter overrides this
         :param initial_dir: initial working directory of the job, optional, defaults to home directory
         :param job_name:    name of the job in the queuing system, defaults to 'waLBerla'
         :param output_file: file where stdout will be redirected to by the queueing system
@@ -32,8 +35,8 @@ def createJobscript(*args, **kwargs):
 
         Run multiple programs:
 
-        :param commands:  can be either a list of two-tuples with (executableName, configFile), which are then run in this order with mpirun
-                          or a list of string which are just appended to the jobscript file
+        :param commands:  can be either a list of two-tuples with (executableName, configFile), which are then run
+                          in this order with mpirun or a list of string which are just appended to the jobscript file
     """
     funcs = {
         'supermuc': _cr_supermuc,
diff --git a/python/waLBerla/tools/jobscripts/hornet.py b/python/waLBerla/tools/jobscripts/hornet.py
index af74febf0..92720d16b 100644
--- a/python/waLBerla/tools/jobscripts/hornet.py
+++ b/python/waLBerla/tools/jobscripts/hornet.py
@@ -4,64 +4,57 @@ import os
 import math
 
 
-
-def createJobscript( wall_time = None, nodes = None, cores = None, job_class = None, 
-                     initial_dir = '~', job_name="waLBerla", hyperthreading = 1, 
-                     exe_name = None, arguments = [], commands = [], **kwargs ):
-    
+def createJobscript(wall_time=None, nodes=None, cores=None, job_class=None,
+                    initial_dir='~', job_name="waLBerla", hyperthreading=1,
+                    exe_name=None, arguments=[], commands=[], **kwargs):
     if type(hyperthreading) is bool:
         hyperthreading = 2 if hyperthreading else 1
 
-    
     CORES_PER_NODE = 24 * hyperthreading
-    
-    if wall_time and wall_time.total_seconds() >  24 * 3600:
+
+    if wall_time and wall_time.total_seconds() > 24 * 3600:
         raise ValueError("No jobs longer that 24h allowed")
 
     if hyperthreading > 2:
-        raise ValueError("Hornet supports only two way hyperthreading (requested %d)" %(hyperthreading,) )
-        
+        raise ValueError("Hornet supports only two way hyperthreading (requested %d)" % (hyperthreading,))
+
     if nodes is not None and cores is not None:
         raise ValueError("You can either specify nodes or cores - not both.")
-    
+
     if nodes is None and cores is None:
         raise ValueError('Specify either cores or nodes.')
-    
+
     if cores > CORES_PER_NODE and cores % CORES_PER_NODE != 0:
-        raise ValueError( "When using more than one node, the number of cores has to be a multiple of %d" %(CORES_PER_NODE,) )
-    
+        raise ValueError(
+            "When using more than one node, the number of cores has to be a multiple of %d" % (CORES_PER_NODE,))
+
     if nodes is None:
-        nodes = math.ceil( cores / CORES_PER_NODE )
+        nodes = math.ceil(cores / CORES_PER_NODE)
     if cores is None:
         cores = nodes * CORES_PER_NODE
-    
-    
-    tasks_per_node = min( CORES_PER_NODE, cores )
-    
-    template_file = os.path.join(  os.path.dirname( os.path.realpath(__file__)  ), "hornet.job" )
-    
-    
-    result = open(template_file).read().format( cores = cores, 
-                                                nodes = nodes, 
-                                                initial_dir = initial_dir, 
-                                                tasks_per_node = tasks_per_node,
-                                                job_class = job_class,
-                                                job_name = job_name,
-                                                wall_time = wall_time )
-    
-    
+
+    tasks_per_node = min(CORES_PER_NODE, cores)
+
+    template_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "hornet.job")
+
+    result = open(template_file).read().format(cores=cores,
+                                               nodes=nodes,
+                                               initial_dir=initial_dir,
+                                               tasks_per_node=tasks_per_node,
+                                               job_class=job_class,
+                                               job_name=job_name,
+                                               wall_time=wall_time)
+
     exec_line = "aprun -n %d -N %d -j %d  %s %s \n"
-    
+
     if exe_name is not None:
         for param_file in arguments:
-            result += exec_line %( cores, tasks_per_node, hyperthreading, exe_name, param_file )
-            
+            result += exec_line % (cores, tasks_per_node, hyperthreading, exe_name, param_file)
+
     for exe_paramfile_pair in commands:
         if type(exe_paramfile_pair) is not tuple:
             result += exe_paramfile_pair + "\n"
         else:
-            result += exec_line %( cores, tasks_per_node, hyperthreading, exe_paramfile_pair[0], exe_paramfile_pair[1] )
-        
-    
-    return result
+            result += exec_line % (cores, tasks_per_node, hyperthreading, exe_paramfile_pair[0], exe_paramfile_pair[1])
 
+    return result
diff --git a/python/waLBerla/tools/jobscripts/pizdaint_hybrid.py b/python/waLBerla/tools/jobscripts/pizdaint_hybrid.py
index 00363d39f..4d379bd27 100644
--- a/python/waLBerla/tools/jobscripts/pizdaint_hybrid.py
+++ b/python/waLBerla/tools/jobscripts/pizdaint_hybrid.py
@@ -10,7 +10,7 @@ def createJobscript(wall_time=None, nodes=None, cores=None, initial_dir=None, jo
     if type(hyperthreading) is bool:
         hyperthreading = 2 if hyperthreading else 1
 
-    CORES_PER_NODE = 1#12 * hyperthreading
+    CORES_PER_NODE = 1  # 12 * hyperthreading
 
     if wall_time and wall_time.total_seconds() > 24 * 3600:
         raise ValueError("No jobs longer that 24h allowed")
diff --git a/python/waLBerla/tools/jobscripts/supermuc.py b/python/waLBerla/tools/jobscripts/supermuc.py
index a20ec18ab..973048d58 100644
--- a/python/waLBerla/tools/jobscripts/supermuc.py
+++ b/python/waLBerla/tools/jobscripts/supermuc.py
@@ -4,90 +4,87 @@ import os
 import math
 
 
-
-def createJobscript( wall_time = None, nodes = None, cores = None, job_class = None, island_count = None,
-                     initial_dir = '~', job_name="waLBerla", energy_tag="",
-                     exe_name = None, parameter_files = [], commands = [], hyperthreading=1,  
-                     output_file=None, error_file=None, **kwargs ):
+def createJobscript(wall_time=None, nodes=None, cores=None, job_class=None, island_count=None,
+                    initial_dir='~', job_name="waLBerla", energy_tag="",
+                    exe_name=None, parameter_files=[], commands=[], hyperthreading=1,
+                    output_file=None, error_file=None, **kwargs):
     if type(hyperthreading) == bool:
         hyperthreading = 2 if hyperthreading else 1
-    
+
     CORES_PER_NODE = 16 * hyperthreading
     NODES_PER_ISLAND = 512
-    
-    if wall_time and wall_time.total_seconds() >  48 * 3600:
+
+    if wall_time and wall_time.total_seconds() > 48 * 3600:
         raise ValueError("No jobs longer that 48h allowed")
 
     if hyperthreading > 2:
-        raise ValueError("SuperMUC supports only two way hyperthreading (requested %d)" %(hyperthreading,) )
-        
+        raise ValueError("SuperMUC supports only two way hyperthreading (requested %d)" % (hyperthreading,))
+
     if nodes is not None and cores is not None:
         raise ValueError("You can either specify nodes or cores - not both.")
-    
+
     if nodes is None and cores is None:
         raise ValueError('Specify either cores or nodes.')
-    
+
     if nodes is None:
-        nodes = math.ceil( cores / CORES_PER_NODE )
+        nodes = math.ceil(cores / CORES_PER_NODE)
     if cores is None:
         cores = nodes * CORES_PER_NODE
 
     if island_count is None:
-        island_count = math.ceil( nodes / NODES_PER_ISLAND )
+        island_count = math.ceil(nodes / NODES_PER_ISLAND)
 
     if cores > CORES_PER_NODE and cores % CORES_PER_NODE != 0:
         raise ValueError("When using more than one node, the number of cores has to be a multiple of 16")
-        
-    if not output_file: output_file = job_name
-    if not error_file:  error_file  = job_name
+
+    if not output_file:
+        output_file = job_name
+    if not error_file:
+        error_file = job_name
 
     if not job_class:
         if nodes <= 32:
-            if wall_time.total_seconds() < 30*60:
-                job_class='test'
+            if wall_time.total_seconds() < 30 * 60:
+                job_class = 'test'
             else:
-                job_class='micro' 
+                job_class = 'micro'
         elif nodes <= 512:
-            job_class= 'general'
+            job_class = 'general'
         else:
             job_class = 'large'
 
-    tasks_per_node = min( CORES_PER_NODE, cores )
-    
-    task_affinity = "core" if hyperthreading==1 else "cpu"
-    
+    tasks_per_node = min(CORES_PER_NODE, cores)
+
+    task_affinity = "core" if hyperthreading == 1 else "cpu"
+
     energy_tag_statements = ""
-    if len( energy_tag ) > 0:
+    if len(energy_tag) > 0:
         energy_tag_statements = "\n#@ energy_policy_tag = " + energy_tag + "\n#@ minimize_time_to_solution = yes"
 
+    template_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "supermuc.job")
+    result = open(template_file).read().format(cores=cores,
+                                               nodes=nodes,
+                                               initial_dir=initial_dir,
+                                               tasks_per_node=tasks_per_node,
+                                               job_class=job_class,
+                                               job_name=job_name,
+                                               island_count=island_count,
+                                               energy_tag_statements=energy_tag_statements,
+                                               wall_time=wall_time,
+                                               output_file=output_file,
+                                               error_file=error_file,
+                                               task_affinity=task_affinity)
+
+    exec_line = "mpiexec -n %d %s %s \n"
 
-    template_file = os.path.join(  os.path.dirname( os.path.realpath(__file__)  ), "supermuc.job" )
-    result = open(template_file).read().format( cores = cores, 
-                                                nodes = nodes, 
-                                                initial_dir = initial_dir, 
-                                                tasks_per_node = tasks_per_node,
-                                                job_class = job_class,
-                                                job_name = job_name,
-                                                island_count = island_count,
-                                                energy_tag_statements = energy_tag_statements,
-                                                wall_time = wall_time,
-                                                output_file = output_file,
-                                                error_file = error_file,
-                                                task_affinity=task_affinity )
-    
-    
-    exec_line = "mpiexec -n %d %s %s \n"    
-    
     if exe_name is not None:
         for param_file in parameter_files:
-            result += exec_line %( cores, exe_name, param_file )
-    
+            result += exec_line % (cores, exe_name, param_file)
+
     for exe_paramfile_pair in commands:
         if type(exe_paramfile_pair) is not tuple:
             result += exe_paramfile_pair + "\n"
         else:
-            result += exec_line %( cores, exe_paramfile_pair[0], exe_paramfile_pair[1] )
-    
-    
-    return result
+            result += exec_line % (cores, exe_paramfile_pair[0], exe_paramfile_pair[1])
 
+    return result
diff --git a/python/waLBerla/tools/jobscripts/supermuc_phase2.py b/python/waLBerla/tools/jobscripts/supermuc_phase2.py
index d43b5819e..b4cee231a 100644
--- a/python/waLBerla/tools/jobscripts/supermuc_phase2.py
+++ b/python/waLBerla/tools/jobscripts/supermuc_phase2.py
@@ -4,92 +4,90 @@ import os
 import math
 
 
-
-def createJobscript( wall_time = None, nodes = None, cores = None, job_class = None, island_count = None,
-                     initial_dir = '~', job_name="waLBerla", energy_tag="",
-                     exe_name = None, parameter_files = [], commands = [], hyperthreading=1, 
-                     output_file=None, error_file=None,  **kwargs ):
+def createJobscript(wall_time=None, nodes=None, cores=None, job_class=None, island_count=None,
+                    initial_dir='~', job_name="waLBerla", energy_tag="",
+                    exe_name=None, parameter_files=[], commands=[], hyperthreading=1,
+                    output_file=None, error_file=None, **kwargs):
     if type(hyperthreading) == bool:
         hyperthreading = 2 if hyperthreading else 1
-    
+
     CORES_PER_NODE = 28 * hyperthreading
     NODES_PER_ISLAND = 512
-    
-    if wall_time and wall_time.total_seconds() >  48 * 3600:
+
+    if wall_time and wall_time.total_seconds() > 48 * 3600:
         raise ValueError("No jobs longer that 48h allowed")
 
     if hyperthreading > 2:
-        raise ValueError("SuperMUC supports only two way hyperthreading (requested %d)" %(hyperthreading,) )
-        
+        raise ValueError("SuperMUC supports only two way hyperthreading (requested %d)" % (hyperthreading,))
+
     if nodes is not None and cores is not None:
         raise ValueError("You can either specify nodes or cores - not both.")
-    
+
     if nodes is None and cores is None:
         raise ValueError('Specify either cores or nodes.')
-    
+
     if nodes is None:
-        nodes = math.ceil( cores / CORES_PER_NODE )
+        nodes = math.ceil(cores / CORES_PER_NODE)
     if cores is None:
         cores = nodes * CORES_PER_NODE
 
     if cores > CORES_PER_NODE and cores % CORES_PER_NODE != 0:
-        raise ValueError("When using more than one node, the number of cores has to be a multiple of %d" , ( CORES_PER_NODE,) )
+        raise ValueError("When using more than one node, the number of cores has to be a multiple of %d",
+                         (CORES_PER_NODE,))
 
     if island_count is None:
-        island_count = math.ceil( nodes / NODES_PER_ISLAND )
-        
-    if not output_file: output_file = job_name
-    if not error_file:  error_file  = job_name
-    
+        island_count = math.ceil(nodes / NODES_PER_ISLAND)
+
+    if not output_file:
+        output_file = job_name
+    if not error_file:
+        error_file = job_name
+
     if not job_class:
         if nodes <= 20:
-            if wall_time.total_seconds() < 30*60:
-                job_class='test'
+            if wall_time.total_seconds() < 30 * 60:
+                job_class = 'test'
             else:
-                job_class='micro' 
+                job_class = 'micro'
         elif nodes <= 512:
-            job_class= 'general'
+            job_class = 'general'
         elif nodes <= 2048:
             job_class = 'big'
         else:
             job_class = 'special'
-    
-    tasks_per_node = min( CORES_PER_NODE, cores )
-    
-    task_affinity = "core" if hyperthreading==1 else "cpu"
-    
-    
+
+    tasks_per_node = min(CORES_PER_NODE, cores)
+
+    task_affinity = "core" if hyperthreading == 1 else "cpu"
+
     energy_tag_statements = ""
-    if len( energy_tag ) > 0:
+    if len(energy_tag) > 0:
         energy_tag_statements = "\n#@ energy_policy_tag = " + energy_tag + "\n#@ minimize_time_to_solution = yes"
 
-    template_file = os.path.join(  os.path.dirname( os.path.realpath(__file__)  ), "supermuc.job" )
-    result = open(template_file).read().format( cores = cores, 
-                                                nodes = nodes, 
-                                                initial_dir = initial_dir, 
-                                                tasks_per_node = tasks_per_node,
-                                                job_class = job_class,
-                                                job_name = job_name,
-                                                island_count = island_count,
-                                                energy_tag_statements = energy_tag_statements,
-                                                wall_time = wall_time,
-                                                output_file = output_file,
-                                                error_file = error_file,
-                                                task_affinity=task_affinity )
-    
-    
-    exec_line = "mpiexec -n %d %s %s \n"    
-    
+    template_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "supermuc.job")
+    result = open(template_file).read().format(cores=cores,
+                                               nodes=nodes,
+                                               initial_dir=initial_dir,
+                                               tasks_per_node=tasks_per_node,
+                                               job_class=job_class,
+                                               job_name=job_name,
+                                               island_count=island_count,
+                                               energy_tag_statements=energy_tag_statements,
+                                               wall_time=wall_time,
+                                               output_file=output_file,
+                                               error_file=error_file,
+                                               task_affinity=task_affinity)
+
+    exec_line = "mpiexec -n %d %s %s \n"
+
     if exe_name is not None:
         for param_file in parameter_files:
-            result += exec_line %( cores, exe_name, param_file )
-    
+            result += exec_line % (cores, exe_name, param_file)
+
     for exe_paramfile_pair in commands:
         if type(exe_paramfile_pair) is not tuple:
             result += exe_paramfile_pair + "\n"
         else:
-            result += exec_line %( cores, exe_paramfile_pair[0], exe_paramfile_pair[1] )
-    
-    
-    return result
+            result += exec_line % (cores, exe_paramfile_pair[0], exe_paramfile_pair[1])
 
+    return result
diff --git a/python/waLBerla/tools/jobscripts/supermucng.py b/python/waLBerla/tools/jobscripts/supermucng.py
index 3b51c7c0f..9cf88d512 100644
--- a/python/waLBerla/tools/jobscripts/supermucng.py
+++ b/python/waLBerla/tools/jobscripts/supermucng.py
@@ -4,9 +4,11 @@ import os
 import math
 
 
-def createJobscript(wall_time=None, nodes=None, cores=None, job_class=None, initial_dir='~', job_name="waLBerla",
-                    exe_name=None, parameter_files=[], commands=[], hyperthreading=1,
-                    output_file=None, error_file=None, account=None, fixed_freq=True, omp_num_threads=1, **_):
+def createJobscript_supermucng(wall_time=None, nodes=None, cores=None, job_class=None,
+                               initial_dir='~', job_name="waLBerla", exe_name=None,
+                               parameter_files=[], commands=[], hyperthreading=1,
+                               output_file=None, error_file=None, account=None,
+                               fixed_freq=True, omp_num_threads=1, **_):
     if type(hyperthreading) == bool:
         hyperthreading = 2 if hyperthreading else 1
 
@@ -30,7 +32,8 @@ def createJobscript(wall_time=None, nodes=None, cores=None, job_class=None, init
         cores = nodes * cores_per_node
 
     if cores > cores_per_node and cores % cores_per_node != 0:
-        raise ValueError("When using more than one node, the number of cores has to be a multiple of %d", (cores_per_node,))
+        raise ValueError("When using more than one node, the number of cores has to be a multiple of %d",
+                         (cores_per_node,))
 
     if not output_file:
         output_file = job_name
@@ -50,9 +53,9 @@ def createJobscript(wall_time=None, nodes=None, cores=None, job_class=None, init
         else:
             job_class = 'special'
 
-    if  cores_per_node % omp_num_threads != 0:
+    if cores_per_node % omp_num_threads != 0:
         raise ValueError("Could not divede cores_per_node %d to omp_num_threads %d", (cores_per_node, omp_num_threads))
-    tasks_per_node = min(cores_per_node//omp_num_threads, cores)
+    tasks_per_node = min(cores_per_node // omp_num_threads, cores)
 
     omp_places = "cores" if hyperthreading == 1 else "threads"
 
@@ -94,5 +97,6 @@ def createJobscript(wall_time=None, nodes=None, cores=None, job_class=None, init
 
 if __name__ == '__main__':
     from waLBerla.tools.jobscripts import createJobscript
-    print(createJobscript(wall_time=60*60, nodes=4, exe_name='grandchem', parameter_files=['a.cfg', 'b.cfg'],
+
+    print(createJobscript(wall_time=60 * 60, nodes=4, exe_name='grandchem', parameter_files=['a.cfg', 'b.cfg'],
                           machine='supermuc_ng'))
diff --git a/python/waLBerla/tools/lbm_unitconversion/__init__.py b/python/waLBerla/tools/lbm_unitconversion/__init__.py
index f3a5c478c..c0f58b73f 100644
--- a/python/waLBerla/tools/lbm_unitconversion/__init__.py
+++ b/python/waLBerla/tools/lbm_unitconversion/__init__.py
@@ -1,2 +1,4 @@
 from waLBerla.tools.lbm_unitconversion.pint_unit_converter import PintUnitConverter
 from waLBerla.tools.lbm_unitconversion.lattice_factors import extractLatticeFactors, computeLatticeFactors
+
+__all__ = ['PintUnitConverter', 'extractLatticeFactors', 'computeLatticeFactors']
diff --git a/python/waLBerla/tools/lbm_unitconversion/lattice_factors.py b/python/waLBerla/tools/lbm_unitconversion/lattice_factors.py
index 87ab2d7d0..583f1f9a2 100644
--- a/python/waLBerla/tools/lbm_unitconversion/lattice_factors.py
+++ b/python/waLBerla/tools/lbm_unitconversion/lattice_factors.py
@@ -2,7 +2,6 @@ import sympy as sp
 from waLBerla.tools.lbm_unitconversion import PintUnitConverter
 
 
-
 def computeLatticeFactors(constant=True, **kwargs):
     """
     Computes lattice factors with given physical input values
@@ -19,11 +18,11 @@ def computeLatticeFactors(constant=True, **kwargs):
         :param args: keys for all symbols to create
         """
         for arg in args:
-            if not arg in d.keys():
+            if arg not in d.keys():
                 d[arg] = sp.symbols(arg, positive=True)
 
-    def __append_equation(d,eq):
-        if not eq in d:
+    def __append_equation(d, eq):
+        if eq not in d:
             d.append(eq)
 
     val = {}
@@ -32,31 +31,31 @@ def computeLatticeFactors(constant=True, **kwargs):
     for k, v in kwargs.items():
         if k in ['time', 'l_time']:
             __create_symbols(val, 'time', 'l_time', 'l_s')
-            __append_equation(eqn, sp.Eq(val['time'], val['l_time']*val['l_s']))
+            __append_equation(eqn, sp.Eq(val['time'], val['l_time'] * val['l_s']))
         elif k in ['size', 'l_size']:
             __create_symbols(val, 'size', 'l_size', 'l_m')
-            __append_equation(eqn, sp.Eq(val['size'], val['l_size']*val['l_m']))
+            __append_equation(eqn, sp.Eq(val['size'], val['l_size'] * val['l_m']))
         elif k in ['omega', 'tau', 'nu', 'eta', 'l_nu', 'rho']:
             if constant:
                 __create_symbols(val, 'omega', 'tau', 'nu', 'eta', 'l_nu', 'rho', 'l_rho', 'l_m', 'l_kg', 'l_s')
-                __append_equation(eqn, sp.Eq(val['nu'],    val['eta']/val['rho']))
+                __append_equation(eqn, sp.Eq(val['nu'], val['eta'] / val['rho']))
             else:
                 __create_symbols(val, 'omega', 'tau', 'nu', 'l_nu', 'rho', 'l_rho', 'l_m', 'l_kg', 'l_s')
-            __append_equation(eqn, sp.Eq(val['omega'], 1/val['tau']))
-            __append_equation(eqn, sp.Eq(val['tau'],   3*val['l_nu']+0.5))
-            __append_equation(eqn, sp.Eq(val['nu'],    val['l_nu']*val['l_m']**2/val['l_s']))
+            __append_equation(eqn, sp.Eq(val['omega'], 1 / val['tau']))
+            __append_equation(eqn, sp.Eq(val['tau'], 3 * val['l_nu'] + 0.5))
+            __append_equation(eqn, sp.Eq(val['nu'], val['l_nu'] * val['l_m'] ** 2 / val['l_s']))
             __append_equation(eqn, sp.Eq(val['l_rho'], 1))
-            __append_equation(eqn, sp.Eq(val['rho'],   val['l_rho']*val['l_kg']/val['l_m']**3))
+            __append_equation(eqn, sp.Eq(val['rho'], val['l_rho'] * val['l_kg'] / val['l_m'] ** 3))
         elif k in ['omega_sol', 'tau_sol', 'a_sol', 'l_a_sol']:
             __create_symbols(val, 'omega_sol', 'tau_sol', 'a_sol', 'l_a_sol', 'l_m', 'l_s')
-            __append_equation(eqn, sp.Eq(val['omega_sol'], 1/val['tau_sol']))
-            __append_equation(eqn, sp.Eq(val['tau_sol'],   3*val['l_a_sol']+0.5))
-            __append_equation(eqn, sp.Eq(val['a_sol'],     val['l_a_sol']*val['l_m']**2/val['l_s']))
+            __append_equation(eqn, sp.Eq(val['omega_sol'], 1 / val['tau_sol']))
+            __append_equation(eqn, sp.Eq(val['tau_sol'], 3 * val['l_a_sol'] + 0.5))
+            __append_equation(eqn, sp.Eq(val['a_sol'], val['l_a_sol'] * val['l_m'] ** 2 / val['l_s']))
         elif k in ['omega_liq', 'tau_liq', 'a_liq', 'l_a_liq']:
             __create_symbols(val, 'omega_liq', 'tau_liq', 'a_liq', 'l_a_liq', 'l_m', 'l_s')
-            __append_equation(eqn, sp.Eq(val['omega_liq'], 1/val['tau_liq']))
-            __append_equation(eqn, sp.Eq(val['tau_liq'],   3*val['l_a_liq']+0.5))
-            __append_equation(eqn, sp.Eq(val['a_liq'],     val['l_a_liq']*val['l_m']**2/val['l_s']))            
+            __append_equation(eqn, sp.Eq(val['omega_liq'], 1 / val['tau_liq']))
+            __append_equation(eqn, sp.Eq(val['tau_liq'], 3 * val['l_a_liq'] + 0.5))
+            __append_equation(eqn, sp.Eq(val['a_liq'], val['l_a_liq'] * val['l_m'] ** 2 / val['l_s']))
         else:
             __create_symbols(val, k)
 
@@ -66,7 +65,7 @@ def computeLatticeFactors(constant=True, **kwargs):
     solutions = sp.solve(eqn, tuple(val.values()), dict=True, force=True)
 
     if len(solutions) == 1:
-        #print(solutions[0])
+        # print(solutions[0])
         values = dict()
         for var in val.keys():
             if val[var] in solutions[0]:
@@ -86,7 +85,8 @@ def extractLatticeFactors(config, constant=True):
 
     def __scan_dict(src, dst):
         """
-        Scans the whole src dictionary for special keys and store the magnitudes of the values after conversion to SI units
+        Scans the whole src dictionary for special keys and store
+        the magnitudes of the values after conversion to SI units
 
         :param src: src dictionary to walk through
         :param dst: dst dictionary, where all converted quantities are stored
@@ -95,11 +95,9 @@ def extractLatticeFactors(config, constant=True):
             if type(value) is dict:
                 __scan_dict(value, dst)
             elif key in ['omega', 'tau', 'nu', 'eta', 'l_nu', 'rho', 'time', 'l_time', 'size', 'l_size',
-                    'l_m', 'l_s', 'l_kg', 'l_K', 'l_mol', 'l_A', 'l_cd']:
+                         'l_m', 'l_s', 'l_kg', 'l_K', 'l_mol', 'l_A', 'l_cd']:
                 dst[key] = puc.to_si_units(puc.ureg.Quantity(value)).magnitude
 
     si_config = {}
     __scan_dict(config, si_config)
     return computeLatticeFactors(constant, **si_config)
-
-
diff --git a/python/waLBerla/tools/lbm_unitconversion/pint_unit_converter.py b/python/waLBerla/tools/lbm_unitconversion/pint_unit_converter.py
index eec47971e..fa0df9491 100644
--- a/python/waLBerla/tools/lbm_unitconversion/pint_unit_converter.py
+++ b/python/waLBerla/tools/lbm_unitconversion/pint_unit_converter.py
@@ -4,7 +4,7 @@ from pint import UnitRegistry
 
 class PintUnitConverter:
 
-    def __init__( self, ureg = UnitRegistry(), **kwargs ):
+    def __init__(self, ureg=UnitRegistry(), **kwargs):
         """Possible arguments (all in SI units):
                 * l_m  (lattice meter, same as dx)
                 * l_s  (lattice second, same as dt)
@@ -17,86 +17,89 @@ class PintUnitConverter:
         """
         self.ureg = ureg
 
-        #if not 'l_m'   in kwargs: kwargs['l_m']   = 1
-        #if not 'l_s'   in kwargs: kwargs['l_s']   = 1
-        #if not 'l_kg'  in kwargs: kwargs['l_kg']  = 1
-        #if not 'l_K'   in kwargs: kwargs['l_K']   = 1
-        #if not 'l_mol' in kwargs: kwargs['l_mol'] = 1
-        #if not 'l_A'   in kwargs: kwargs['l_A']   = 1
-        #if not 'l_cd'  in kwargs: kwargs['l_cd']  = 1
-
-        self.ureg.define( 'l_radian = radian' )
-        self.define_lattice_units( **kwargs )
-                
-    def define_lattice_units( self, **kwargs ):
+        # if not 'l_m'   in kwargs: kwargs['l_m']   = 1
+        # if not 'l_s'   in kwargs: kwargs['l_s']   = 1
+        # if not 'l_kg'  in kwargs: kwargs['l_kg']  = 1
+        # if not 'l_K'   in kwargs: kwargs['l_K']   = 1
+        # if not 'l_mol' in kwargs: kwargs['l_mol'] = 1
+        # if not 'l_A'   in kwargs: kwargs['l_A']   = 1
+        # if not 'l_cd'  in kwargs: kwargs['l_cd']  = 1
+
+        self.ureg.define('l_radian = radian')
+        self.define_lattice_units(**kwargs)
+
+    def define_lattice_units(self, **kwargs):
         """Defines lattice units"""
-        for key,value in kwargs.items():
-            if   key=='l_m'  : self.ureg.define( 'l_meter    = %g * m            = l_m = l_meter   ' % value )
-            elif key=='l_s'  : self.ureg.define( 'l_second   = %g * s            = l_s = l_sec     ' % value )
-            elif key=='l_kg' : self.ureg.define( 'l_kilogram = %g * kg           = l_kg            ' % value )
-            elif key=='l_K'  : self.ureg.define( 'l_kelvin   = %g * K  ;offset:0 = l_K = l_degK    ' % value )
-            elif key=='l_mol': self.ureg.define( 'l_mole     = %g * mol          = l_mol           ' % value )
-            elif key=='l_A'  : self.ureg.define( 'l_ampere   = %g * A            = l_A = l_amp     ' % value )
-            elif key=='l_cd' : self.ureg.define( 'l_candela  = %g * cd           = l_cd = l_candle ' % value )
-      
-
-    def to_si_units( self, value ):
+        for key, value in kwargs.items():
+            if key == 'l_m':
+                self.ureg.define('l_meter    = %g * m            = l_m = l_meter   ' % value)
+            elif key == 'l_s':
+                self.ureg.define('l_second   = %g * s            = l_s = l_sec     ' % value)
+            elif key == 'l_kg':
+                self.ureg.define('l_kilogram = %g * kg           = l_kg            ' % value)
+            elif key == 'l_K':
+                self.ureg.define('l_kelvin   = %g * K  ;offset:0 = l_K = l_degK    ' % value)
+            elif key == 'l_mol':
+                self.ureg.define('l_mole     = %g * mol          = l_mol           ' % value)
+            elif key == 'l_A':
+                self.ureg.define('l_ampere   = %g * A            = l_A = l_amp     ' % value)
+            elif key == 'l_cd':
+                self.ureg.define('l_candela  = %g * cd           = l_cd = l_candle ' % value)
+
+    def to_si_units(self, value):
         """SI= Systeme international d'unites, International System of Units"""
-        if not hasattr( value, 'units'):
+        if not hasattr(value, 'units'):
             return value
-        #convert to base units (m,s,g,K,mol,A,cd)
+        # convert to base units (m,s,g,K,mol,A,cd)
         value.ito_base_units()
         return value
 
-    def to_si_units_mag(self, value ):
-        if not hasattr( value, 'units'):
+    def to_si_units_mag(self, value):
+        if not hasattr(value, 'units'):
             return value
         else:
             return self.to_si_units(value).magnitude
-        
 
-    def to_sl_units( self, value ):
+    def to_sl_units(self, value):
         """SL = Lattice System of Units"""
 
-        if not hasattr( value, 'units'):
+        if not hasattr(value, 'units'):
             return value
-        #convert to SI units (m,s,kg,K,mol,A,cd)
-        value = self.to_si_units( value )
+        # convert to SI units (m,s,kg,K,mol,A,cd)
+        value = self.to_si_units(value)
 
         latticeUnits = {
-            '[length]'      : 'l_m',
-            '[time]'        : 'l_s',
-            '[mass]'        : 'l_kg',
-            '[temperature]' : 'l_K',
-            '[substance]'   : 'l_mol',
-            '[current]'     : 'l_A',
-            '[luminosity]'  : 'l_cd',
+            '[length]': 'l_m',
+            '[time]': 'l_s',
+            '[mass]': 'l_kg',
+            '[temperature]': 'l_K',
+            '[substance]': 'l_mol',
+            '[current]': 'l_A',
+            '[luminosity]': 'l_cd',
         }
         units = dict()
         for unit, power in value.dimensionality.items():
-            units[ latticeUnits[ unit ] ] = power
-        return value.to( units )
+            units[latticeUnits[unit]] = power
+        return value.to(units)
 
     def to_sl_units_mag(self, value):
-        if not hasattr( value, 'units'):
+        if not hasattr(value, 'units'):
             return value
         else:
             return self.to_sl_units(value).magnitude
-        
-
 
     def conv_config(self, config):
         result = {}
 
-        for key,value in config.items():
+        for key, value in config.items():
             if type(value) is dict:
-                result[key] = self.conv_config( value )
+                result[key] = self.conv_config(value)
             elif type(value) is self.ureg.Quantity:
-                result[key] = self.to_sl_units_mag( value )
+                result[key] = self.to_sl_units_mag(value)
             elif type(value) is tuple:
-                result[key] = tuple( [ self.to_sl_units_mag(e) for e in value ] )
+                result[key] = tuple([self.to_sl_units_mag(e) for e in value])
             elif type(value) is list:
-                result[key] = [ self.to_sl_units_mag(e) for e in value ]
+                result[key] = [self.to_sl_units_mag(e) for e in value]
             else:
                 result[key] = value
 
@@ -105,25 +108,24 @@ class PintUnitConverter:
     def __conf_config_strings_to_units(self, config):
         result = {}
 
-        for key,value in config.items():
+        for key, value in config.items():
             if type(value) is dict:
-                result[key] = self.__conf_config_strings_to_units( value )
+                result[key] = self.__conf_config_strings_to_units(value)
             elif key == 'type' or key == 'direction':
                 result[key] = value
             elif type(value) is tuple:
                 try:
-                    result[key] = tuple( [ self.ureg.Quantity(e) for e in value ] )
+                    result[key] = tuple([self.ureg.Quantity(e) for e in value])
                 except (pint.UndefinedUnitError, ValueError, TypeError):
                     result[key] = value
             else:
                 try:
-                    result[key] = self.ureg.Quantity( value )
+                    result[key] = self.ureg.Quantity(value)
                 except (pint.UndefinedUnitError, ValueError, TypeError):
                     result[key] = value
 
         return result
 
-
     def conv_config_unit_strings(self, config):
-        result = self.__conf_config_strings_to_units( config )
-        return self.conv_config( result )
+        result = self.__conf_config_strings_to_units(config)
+        return self.conv_config(result)
diff --git a/python/waLBerla/tools/report/__init__.py b/python/waLBerla/tools/report/__init__.py
index e13910bbc..419160efb 100644
--- a/python/waLBerla/tools/report/__init__.py
+++ b/python/waLBerla/tools/report/__init__.py
@@ -7,16 +7,16 @@ It provides custom template markup for generating matplotlib graphs with data fr
 To use this module one first has to write the simulation results to a sqlite3 database e.g. using waLBerla.tools.sqlite
 
 Then create a Jinja2 HTML template. Here is an example using base templates provided by this module::
-        
+
     {% extends "waLBerla/bootstrap_report.html" %}
     {% block content %}
     <div class="container">
       <div class="text-center">
         <h1> MySetup </h1>
-        
+
         {% matplotlib %}
             # Some SQL query that returns a two column result
-            q = "SELECT capillaryNr,shapeFactor FROM runs WHERE yCells=160 AND zCells=160 AND bubbleDiameter=60 ORDER BY capillaryNr"
+            q = "SELECT capillaryNr,shapeFactor FROM runs WHERE yCells=160 AND zCells=160 AND bubbleDiameter=60 ORDER BY capillaryNr"  # noqa: E501
             # plt is matplotlib.pyplot extended with the custom 'dbplot' function
             plt.dbplot( q, label="sim shapefactor", marker="o" )
             plt.legend( loc='center')
@@ -30,4 +30,6 @@ Either once or run a small webserver that reloads the template if it was changed
 
 from __future__ import print_function, absolute_import, division, unicode_literals
 
-from waLBerla.tools.report.report import setupFlaskApp, runWebserver, generate, cliFrontend
\ No newline at end of file
+from waLBerla.tools.report.report import setupFlaskApp, runWebserver, generate, cliFrontend
+
+__all__ = ['setupFlaskApp', 'runWebserver', 'generate', 'cliFrontend']
diff --git a/python/waLBerla/tools/report/report.py b/python/waLBerla/tools/report/report.py
index ee44f43e7..28b194965 100644
--- a/python/waLBerla/tools/report/report.py
+++ b/python/waLBerla/tools/report/report.py
@@ -8,21 +8,20 @@ from glob import glob
 from functools import partial
 import matplotlib.pyplot as plt
 from matplotlib import rc
-import matplotlib
 
-from jinja2 import Environment, PackageLoader, PrefixLoader,FileSystemLoader, ChoiceLoader
+from jinja2 import Environment, PackageLoader, PrefixLoader, FileSystemLoader, ChoiceLoader
 from jinja2 import nodes
 from jinja2.ext import Extension
 
-
 try:
     import mpld3
+
     mpld3_available = True
 except ImportError:
     mpld3_available = False
 
 rc('font', size='8.0')
-#matplotlib.style.use('ggplot')
+# matplotlib.style.use('ggplot')
 
 
 # mpl3d is a library for displaying interactive plots in html pages
@@ -34,96 +33,96 @@ write_pdfs = True
 pdf_output_dir = "pdfs"
 
 
-
-def plot_to_html( name = None ):
+def plot_to_html(name=None):
     """Converts a matplotlib figure, such that it can be displayed in an html page
         either using an embedded svg tag (default)
-        or by using mpld3 
+        or by using mpld3
     """
     result = ""
-    if mpld3_available and use_mpld3 :
-        html = mpld3.fig_to_html( plt.gcf() )
+    if mpld3_available and use_mpld3:
+        html = mpld3.fig_to_html(plt.gcf())
         result = html
     else:
         figStr = io.StringIO()
-        plt.savefig( figStr, format='svg', bbox_inches='tight' )
+        plt.savefig(figStr, format='svg', bbox_inches='tight')
         result = figStr.getvalue()
-    
+
     if write_pdfs and name:
         if not os.path.exists(pdf_output_dir):
             os.makedirs(pdf_output_dir)
 
         filename = os.path.join(pdf_output_dir, name + '.pdf')
         filepath = os.path.join(filename)
-        plt.savefig( filepath, bbox_inches='tight' )
-        result = '<a href="%s" > %s </a>' % ( filename, result )
-        
+        plt.savefig(filepath, bbox_inches='tight')
+        result = '<a href="%s" > %s </a>' % (filename, result)
+
     plt.clf()
-        
+
     return result
 
-def dbQuery( database_getter, query ):
+
+def dbQuery(database_getter, query):
     """Executes an SQL query on the given sqlite database handle
-       Returns tuple of two elements, first element is a list of column names, 
+       Returns tuple of two elements, first element is a list of column names,
        the second element contains the data (row by row)"""
     database_handle = database_getter()
     c = database_handle.cursor()
-    c.execute( query )
+    c.execute(query)
     column_names = []
     for column_description in c.description:
-        column_names += [ column_description[0] ]
+        column_names += [column_description[0]]
     return column_names, c.fetchall()
-    
 
-def numpy_arr_from_db( database_getter, query ):
+
+def numpy_arr_from_db(database_getter, query):
     """Executes sqlite query and returns result as numpy arrays
         Returns 2-tuple: xVals, [yVals0, yVals1, ... ] (depending on number of result columns
                          yVals* are numpy arrays, xVals can also be of other type (f.e. strings)
     """
-    column_names, data = dbQuery( database_getter, query )      
-    xVals = [ data[i][0] for i in range( len( data ) ) ]
+    column_names, data = dbQuery(database_getter, query)
+    xVals = [data[i][0] for i in range(len(data))]
     yArrays = []
     if data:
-       for j in range( 1, len( data[0] ) ):
-          yVals = np.array( [ data[i][j] for i in range( len( data ) ) ] )        
-          yArrays.append( yVals )
-    
+        for j in range(1, len(data[0])):
+            yVals = np.array([data[i][j] for i in range(len(data))])
+            yArrays.append(yVals)
+
     return xVals, yArrays
-    
-    
-def dbPlot( database_getter, query, **kwargs ):
-    column_names, data = dbQuery( database_getter, query )  
-    
-    xVals, yArrays = numpy_arr_from_db( database_getter, query )
-    for j,yVals in enumerate(yArrays):
-      for value in yVals:
-          assert ( isinstance( value, (int, float) ) )
-      if 'label' in kwargs:                
-          plt.plot( xVals, yVals, **kwargs )
-      else:
-          plt.plot( xVals, yVals, label=column_names[j], **kwargs )
-
-
-def remove_indentation( input ):
-    input =  input.replace('\t', '    ' )
-    indentation = min( [ len(s) - len( s.lstrip() ) for s in input.splitlines(False) if len(s.lstrip() ) > 0 ] )
-    return "\n".join( [ s[indentation:] for s in input.splitlines(False) ] )     
-    
+
+
+def dbPlot(database_getter, query, **kwargs):
+    column_names, data = dbQuery(database_getter, query)
+
+    xVals, yArrays = numpy_arr_from_db(database_getter, query)
+    for j, yVals in enumerate(yArrays):
+        for value in yVals:
+            assert (isinstance(value, (int, float)))
+        if 'label' in kwargs:
+            plt.plot(xVals, yVals, **kwargs)
+        else:
+            plt.plot(xVals, yVals, label=column_names[j], **kwargs)
+
+
+def remove_indentation(input):
+    input = input.replace('\t', '    ')
+    indentation = min([len(s) - len(s.lstrip()) for s in input.splitlines(False) if len(s.lstrip()) > 0])
+    return "\n".join([s[indentation:] for s in input.splitlines(False)])
+
 
 class MatplotlibExtension(Extension):
     # a set of names that trigger the extension.
     tags = set(['matplotlib', 'plot'])
 
-    def __init__(self, environment ):
+    def __init__(self, environment):
         super(MatplotlibExtension, self).__init__(environment)
-        
+
         # add the defaults to the environment
         environment.extend(
             plt_object=plt,
-            output_folder = ".", 
-            plot_funcs = {},
-            vars = {},
-            database_file = "database.sqlite"
+            output_folder=".",
+            plot_funcs={},
+            vars={},
+            database_file="database.sqlite"
         )
 
     def parse(self, parser):
@@ -131,154 +130,146 @@ class MatplotlibExtension(Extension):
         args = []
         if parser.stream.current.type != 'block_end':
             lineno = parser.stream.current.lineno
-            args.append ( parser.parse_expression() )
+            args.append(parser.parse_expression())
         else:
-            args.append( nodes.Const(None) )
-        
-        body = parser.parse_statements(['name:endmatplotlib', 'name:endplot', 'name:end_matplotlib', 'name:end_plot'], drop_needle=True )
-        args.append( nodes.ContextReference() )
-        args.append( nodes.Name('i', 'load') )
-        return nodes.CallBlock(self.call_method('_execute_matplotlib', args ),
-                               [], [], body).set_lineno(lineno)
-
+            args.append(nodes.Const(None))
 
+        body = parser.parse_statements(['name:endmatplotlib', 'name:endplot', 'name:end_matplotlib', 'name:end_plot'],
+                                       drop_needle=True)
+        args.append(nodes.ContextReference())
+        args.append(nodes.Name('i', 'load'))
+        return nodes.CallBlock(self.call_method('_execute_matplotlib', args),
+                               [], [], body).set_lineno(lineno)
 
     def _execute_matplotlib(self, name, context, i, caller):
         tag_body = caller()
-        plt.dbplot = partial ( dbPlot, self.environment.database_getter )
+        plt.dbplot = partial(dbPlot, self.environment.database_getter)
         context = context.get_all()
 
-        context.update( {'plt'  : plt, 
-                         'vars' : self.environment.vars, 
-                         'numpy_arr_from_db' : partial( numpy_arr_from_db, self.environment.database_getter ),
-                         'db_query' :          partial( dbQuery, self.environment.database_getter), 
-                         'np' : np  } ) 
-        
-        context.update( self.environment.plot_funcs )
-        context.update( {'i': i } )
+        context.update({'plt': plt,
+                        'vars': self.environment.vars,
+                        'numpy_arr_from_db': partial(numpy_arr_from_db, self.environment.database_getter),
+                        'db_query': partial(dbQuery, self.environment.database_getter),
+                        'np': np})
+
+        context.update(self.environment.plot_funcs)
+        context.update({'i': i})
         try:
-            exec ( remove_indentation( tag_body ), context )
+            exec(remove_indentation(tag_body), context)
         except Exception as e:
-            print(  remove_indentation( tag_body ) )
-            raise e  
-        
-        return plot_to_html( name )
-
+            print(remove_indentation(tag_body))
+            raise e
 
+        return plot_to_html(name)
 
 
-##############################################################################################################################################
-#############################################    User Functions   ############################################################################
-##############################################################################################################################################
+# User Functions
 
 
-def setupFlaskApp( app, context={}, plot_funcs = {}, database_file="database.sqlite"  ):
-    """Call this function to set up your custom flask app. This is an advanced function, probably you want to use runWebserver instead"""
+def setupFlaskApp(app, context={}, plot_funcs={}, database_file="database.sqlite"):
+    """Call this function to set up your custom flask app. This is an advanced function,
+       probably you want to use runWebserver instead"""
     from flask import g
-    
+
     def get_db():
         db = getattr(g, '_database', None)
         if db is None:
-            db = g._database = sqlite3.connect( database_file )
+            db = g._database = sqlite3.connect(database_file)
         return db
-    
-    app.jinja_loader = ChoiceLoader( [ FileSystemLoader('.'), 
-                                      PrefixLoader( { 'waLBerla' : PackageLoader ('waLBerla', 'tools', 'report', 'templates') } ) ] )
-    
-    app.jinja_env.add_extension( MatplotlibExtension)
 
-    app.jinja_env.database_file   = database_file 
+    app.jinja_loader = ChoiceLoader([FileSystemLoader('.'),
+                                     PrefixLoader(
+                                         {'waLBerla': PackageLoader('waLBerla', 'tools', 'report', 'templates')})])
+
+    app.jinja_env.add_extension(MatplotlibExtension)
+
+    app.jinja_env.database_file = database_file
     app.jinja_env.database_getter = get_db
-    app.jinja_env.plot_funcs      = plot_funcs
+    app.jinja_env.plot_funcs = plot_funcs
 
 
-def runWebserver( context={}, plot_funcs = {}, database_file="database.sqlite", 
-                   open_browser=False, debug=True ):
-    """Runs a small local webserver using the flask module ( pip3 install flask ) serving the report. 
+def runWebserver(context={}, plot_funcs={}, database_file="database.sqlite",
+                 open_browser=False, debug=True):
+    """Runs a small local webserver using the flask module ( pip3 install flask ) serving the report.
        When refreshing in the browser the report is updated."""
     from flask import Flask
-    from flask import render_template, make_response, send_from_directory
-    
+    from flask import render_template, send_from_directory
+
     context['database_file'] = database_file
-    
+
     app = Flask(__name__)
-    app.debug=True
-    setupFlaskApp(app, context=context, plot_funcs=plot_funcs, database_file=database_file )
-    
+    app.debug = True
+    setupFlaskApp(app, context=context, plot_funcs=plot_funcs, database_file=database_file)
+
     @app.route("/")
     def main_route():
-        reports = [ n[2:-5] for n in glob( "t_*.html") ]
-        return render_template( "waLBerla/report_overview.html", reports=reports, **context )
+        reports = [n[2:-5] for n in glob("t_*.html")]
+        return render_template("waLBerla/report_overview.html", reports=reports, **context)
 
     @app.route("/<template_name>")
     def report_route(template_name):
         template_name = "t_" + template_name + ".html"
-        return render_template( template_name, **context )
-    
-    @app.route("/pdfs/<path:filename>" )
-    def pdf_route( filename ):
-        return send_from_directory( os.path.join( os.getcwd() , pdf_output_dir), filename)
-    
+        return render_template(template_name, **context)
+
+    @app.route("/pdfs/<path:filename>")
+    def pdf_route(filename):
+        return send_from_directory(os.path.join(os.getcwd(), pdf_output_dir), filename)
+
     if open_browser:
         import webbrowser
         webbrowser.open('http://127.0.0.1:5000/')
 
-    app.run( debug=debug )
+    app.run(debug=debug)
 
 
-def generate( context={}, plot_funcs = {}, database_file="database.sqlite", input_output_list=None, open_browser=False ):
+def generate(context={}, plot_funcs={}, database_file="database.sqlite", input_output_list=None, open_browser=False):
     """Generates a html report. Uses jinja2 templating engine with custom
        matplotlib tag, which inserts matplotlib figures as svg graphics"""
-    
+
     def get_db():
         if get_db.databaseHandle is None:
-            get_db.databaseHandle = sqlite3.connect( database_file )
+            get_db.databaseHandle = sqlite3.connect(database_file)
         return get_db.databaseHandle
+
     get_db.databaseHandle = None
 
-    
     context['database_file'] = database_file
-    
-    loaders = ChoiceLoader( [ FileSystemLoader('.'), 
-                              PrefixLoader( { 'waLBerla' : PackageLoader ('waLBerla', 'tools', 'report', 'templates') } ) ] )
-    
 
-    env = Environment( loader = loaders, extensions=[MatplotlibExtension] )
-    env.database_file   = database_file 
+    loaders = ChoiceLoader([FileSystemLoader('.'),
+                            PrefixLoader({'waLBerla': PackageLoader('waLBerla', 'tools', 'report', 'templates')})])
+
+    env = Environment(loader=loaders, extensions=[MatplotlibExtension])
+    env.database_file = database_file
     env.database_getter = get_db
-    env.plot_funcs      = plot_funcs
+    env.plot_funcs = plot_funcs
 
     if input_output_list is None:
-        input_output_list = [ ( input, input[2:]) for input in glob( "t_*.html") ] 
-        context.update( { 'reports' : [ r[1] for r in input_output_list ] } )
-        input_output_list.append( ('waLBerla/report_overview.html', 'index.html') )
-        
+        input_output_list = [(input, input[2:]) for input in glob("t_*.html")]
+        context.update({'reports': [r[1] for r in input_output_list]})
+        input_output_list.append(('waLBerla/report_overview.html', 'index.html'))
+
     for e in input_output_list:
-        template = env.get_template( e[0] )
-        with open( e[1],"w") as f:
-            print( "-- Generating " + e[1] )
-            f.write( template.render( **context ) )
-            
-            
+        template = env.get_template(e[0])
+        with open(e[1], "w") as f:
+            print("-- Generating " + e[1])
+            f.write(template.render(**context))
+
     if open_browser:
         import webbrowser
-        webbrowser.open( os.path.join( os.getcwd(),'index.html' ) )
+        webbrowser.open(os.path.join(os.getcwd(), 'index.html'))
 
 
-def cliFrontend( context={} ):
+def cliFrontend(context={}):
     """Command line interface for above functions"""
     import argparse
     parser = argparse.ArgumentParser()
-    parser.add_argument( "-i", "--interactive", action="store_true",    help="Run a webserver to interactively develop your reports. (flask required)" )
-    parser.add_argument( "-d", "--database",    default="database.sqlite", help="Database file (default: database.sqlite)" )
-    parser.add_argument( "-b", "--open_browser", action="store_true", help="Open Browser (on" )
+    parser.add_argument("-i", "--interactive", action="store_true",
+                        help="Run a webserver to interactively develop your reports. (flask required)")
+    parser.add_argument("-d", "--database", default="database.sqlite", help="Database file (default: database.sqlite)")
+    parser.add_argument("-b", "--open_browser", action="store_true", help="Open Browser (on")
 
     args = parser.parse_args()
     if args.interactive:
-        runWebserver(context=context, database_file= args.database, open_browser=args.open_browser )
+        runWebserver(context=context, database_file=args.database, open_browser=args.open_browser)
     else:
-        generate(context=context, database_file= args.database, open_browser=args.open_browser )
-    
-
-
-    
+        generate(context=context, database_file=args.database, open_browser=args.open_browser)
diff --git a/python/waLBerla/tools/source_checker/ParsedCodeFile.py b/python/waLBerla/tools/source_checker/ParsedCodeFile.py
index 2c10b7512..35c7b3a40 100644
--- a/python/waLBerla/tools/source_checker/ParsedCodeFile.py
+++ b/python/waLBerla/tools/source_checker/ParsedCodeFile.py
@@ -1,75 +1,72 @@
 import re
-import os
 import sys
 import itertools
 
-class ParsingException( Exception ):
-    def __init__ (self, msg ):
+
+class ParsingException(Exception):
+    def __init__(self, msg):
         self.msg = msg
+
     def __str__(self):
         return self.msg
 
 
 class ParsedCodeFile:
-    separatorRe  = re.compile( r'//={80,}') # A separator line has at least 80 "=" characters
-    propertiesRe = re.compile( r'//! \\(file|brief|author|ingroup) (.*)' )
-    
-    def __init__(self, filename ):
+    separatorRe = re.compile(r'//={80,}')  # A separator line has at least 80 "=" characters
+    propertiesRe = re.compile(r'//! \\(file|brief|author|ingroup) (.*)')
+
+    def __init__(self, filename):
         self.filename = filename
-        self.file = open( filename )
-        self.isHeaderFile =  ( filename.endswith(".h") and not filename.endswith(".impl.h") )
-        
+        self.file = open(filename)
+        self.isHeaderFile = (filename.endswith(".h") and not filename.endswith(".impl.h"))
+
         # Parse File
         self._parseHeaderLine()
-        self.license  = self._parseLicense()
-        self.tags     = self._parseTags()
-        self.doc      = self._parseDocumentationAtTop()
+        self.license = self._parseLicense()
+        self.tags = self._parseTags()
+        self.doc = self._parseDocumentationAtTop()
         self.includes = self._parseIncludes()
-        self.body     = self._parseBody()
-
-    
-    ####################################################################################################################
-    ################################## Parser Functions ################################################################
-    ####################################################################################################################
-    
-    def _putBackLine(self, line ):
-        self.file = itertools.chain((line,), self.file )
-    
-    
+        self.body = self._parseBody()
+
+# Parser Functions
+
+    def _putBackLine(self, line):
+        self.file = itertools.chain((line,), self.file)
+
     def _parseHeaderLine(self):
         firstLine = self.file.readline()
-        if not ParsedCodeFile.separatorRe.match( firstLine ):
-            raise ParsingException( "No Header block at beginning of file found. File has to start with //======== in first line" )
-        
+        if not ParsedCodeFile.separatorRe.match(firstLine):
+            raise ParsingException(
+                "No Header block at beginning of file found. File has to start with //======== in first line")
+
     def _parseLicense(self):
         parsedLicense = []
         for line in self.file:
             if line.startswith('//') and not line.startswith('//!'):
-                parsedLicense.append( line[2:-1].lstrip() )
+                parsedLicense.append(line[2:-1].lstrip())
             else:
                 self._putBackLine(line)
                 break
-        
-        while len ( parsedLicense) > 0 and parsedLicense[-1] == "":
+
+        while len(parsedLicense) > 0 and parsedLicense[-1] == "":
             del parsedLicense[-1]
-        
-        while len ( parsedLicense) > 0 and parsedLicense[0] == "":
+
+        while len(parsedLicense) > 0 and parsedLicense[0] == "":
             del parsedLicense[0]
-        
+
         return parsedLicense
-    
-    
+
     def _parseTags(self):
         tags = []
         for line in self.file:
-            r = ParsedCodeFile.propertiesRe.match( line )
+            r = ParsedCodeFile.propertiesRe.match(line)
             if r:
-                tags.append( ( r.group(1).strip(), r.group(2).strip() ) )
+                tags.append((r.group(1).strip(), r.group(2).strip()))
             else:
                 self._putBackLine(line)
                 break
         return tags
-    
+
     def _parseDocumentationAtTop(self):
         result = []
         for line in self.file:
@@ -79,35 +76,35 @@ class ParsedCodeFile:
                 else:
                     strippedLine = line[2:].rstrip()
                 if len(strippedLine) > 0:
-                    result.append( strippedLine )
+                    result.append(strippedLine)
             else:
-                if not ParsedCodeFile.separatorRe.match( line ):
-                    raise ParsingException( "Header block at beginning of file does not end with separator line" )
+                if not ParsedCodeFile.separatorRe.match(line):
+                    raise ParsingException("Header block at beginning of file does not end with separator line")
                 break
-        
+
         strippedResult = []
         if len(result) > 0:
             whiteSpacesInFront = sys.maxsize
             for line in result:
-                if line.strip() == "": continue
-                whiteSpacesInFront = min( whiteSpacesInFront, len(line) - len( line.lstrip() ) )
-            
+                if line.strip() == "":
+                    continue
+                whiteSpacesInFront = min(whiteSpacesInFront, len(line) - len(line.lstrip()))
+
             for line in result:
-                strippedResult.append(  line[whiteSpacesInFront:] )
+                strippedResult.append(line[whiteSpacesInFront:])
         else:
             strippedResult = result
-            
+
         return strippedResult
-        
-    
+
     def _parseIncludes(self):
         includes = []
         pragmaOnceFound = False
-        
+
         for line in self.file:
             cleanLine = line.strip()
             if cleanLine.startswith("#include"):
-                includes.append( cleanLine[ 8: ].strip() )
+                includes.append(cleanLine[8:].strip())
             elif cleanLine == "#pragma once":
                 if not pragmaOnceFound:
                     pragmaOnceFound = True
@@ -120,89 +117,86 @@ class ParsedCodeFile:
             else:
                 self._putBackLine(line)
                 break
-        
+
         if len(includes) > 0 and includes[-1] == "":
             del includes[-1]
-         
+
         if (self.isHeaderFile and not pragmaOnceFound):
             raise ParsingException("No 'pragma once' in Header file")
-    
+
         return includes
-    
-    
+
     def _parseBody(self):
         alreadyWarned = False
         body = []
         for line in self.file:
-            body.append( line )
+            body.append(line)
             cleanLine = line.strip()
             if cleanLine.startswith("#include"):
-                includedFile = cleanLine[ 8: ].strip() 
+                includedFile = cleanLine[8:].strip()
                 includedFile = includedFile[1:-1]
                 if not includedFile.endswith(".impl.h") and not alreadyWarned:
                     alreadyWarned = True
-                    #print( self.filename + " Warning: undetected include " + cleanLine + " because of line " + body[0] )
+                    # print(f"{self.filename} Warning: undetected include {cleanLine} because of line {body[0]}")
         return body
-    
 
 
 class OldStyleParsedCodeFile:
-    separatorRe  = re.compile( r'//={80,}') # A separator line has at least 80 "=" characters
-    propertiesRe = re.compile( r'\s*\\(file|brief|author|ingroup) (.*)' )
-    replacerRe   = re.compile ( r'\/\*!?|\*\/|\*|//' )
-    def __init__(self, filename ):
+    separatorRe = re.compile(r'//={80,}')  # A separator line has at least 80 "=" characters
+    propertiesRe = re.compile(r'\s*\\(file|brief|author|ingroup) (.*)')
+    replacerRe = re.compile(r'\/\*!?|\*\/|\*|//')
+
+    def __init__(self, filename):
         self.filename = filename
-        self.file = open( filename )
-        self.isHeaderFile =  ( filename.endswith(".h") and not filename.endswith(".impl.h") )
-        
+        self.file = open(filename)
+        self.isHeaderFile = (filename.endswith(".h") and not filename.endswith(".impl.h"))
+
         # Parse File
         self._parseHeaderLine()
         self._parseTopCommentBlock()
         self.includes = self._parseIncludes()
-        self.body     = self._parseBody()
-        self.license  = ""
-    
-    ####################################################################################################################
-    ################################## Parser Functions ################################################################
-    ####################################################################################################################
-    
-    def _putBackLine(self, line ):
-        self.file = itertools.chain((line,), self.file )
-    
-    
+        self.body = self._parseBody()
+        self.license = ""
+
+# Parser Functions
+
+    def _putBackLine(self, line):
+        self.file = itertools.chain((line,), self.file)
+
     def _parseHeaderLine(self):
         firstLine = self.file.readline()
-        if not ParsedCodeFile.separatorRe.match( firstLine ):
-            raise ParsingException( "No Header block at beginning of file found. File has to start with //======== in first line" )
-        
+        if not ParsedCodeFile.separatorRe.match(firstLine):
+            raise ParsingException(
+                "No Header block at beginning of file found. File has to start with //======== in first line")
+
     def _parseTopCommentBlock(self):
         tags = []
         doc = []
         for line in self.file:
-            if OldStyleParsedCodeFile.separatorRe.match( line ):
-                break;
-    
-            replacer = re.compile ( r'\/\*!?|\*\/|\*|//' )
-            line = OldStyleParsedCodeFile.replacerRe.sub( "", line )
-            
-            r = OldStyleParsedCodeFile.propertiesRe.search( line )
+            if OldStyleParsedCodeFile.separatorRe.match(line):
+                break
+
+            # replacer = re.compile(r'\/\*!?|\*\/|\*|//')
+            line = OldStyleParsedCodeFile.replacerRe.sub("", line)
+
+            r = OldStyleParsedCodeFile.propertiesRe.search(line)
             if r:
-                tags.append( ( r.group(1).strip(), r.group(2).strip() ) )
+                tags.append((r.group(1).strip(), r.group(2).strip()))
             else:
-                if line.strip() != "": 
-                    doc.append( line[:-1] )
-        
+                if line.strip() != "":
+                    doc.append(line[:-1])
+
         self.tags = tags
         self.doc = doc
-    
+
     def _parseIncludes(self):
         includes = []
         pragmaOnceFound = False
-        
+
         for line in self.file:
             cleanLine = line.strip()
             if cleanLine.startswith("#include"):
-                includes.append( cleanLine[ 8: ].strip() )
+                includes.append(cleanLine[8:].strip())
             elif cleanLine == "#pragma once":
                 if not pragmaOnceFound:
                     pragmaOnceFound = True
@@ -213,46 +207,40 @@ class OldStyleParsedCodeFile:
                     includes.append("")
                 continue
             else:
-                if ( cleanLine.lower() != "// local includes" ) and \
-                   ( cleanLine.lower() != "// module includes") and \
-                    (cleanLine.lower() != "// extern includes") and \
-                    (cleanLine.lower() != "// boost includes" ) and \
-                    (cleanLine.lower() != "//boost includes" ) and \
-                    (cleanLine.lower() != "// core includes" ) and \
-                    (cleanLine.lower() != "// local" ) and \
-                    (cleanLine.lower() != "// extern" ) and \
-                    (cleanLine.lower() != "//core" ) and \
-                    (cleanLine.lower() != "// Modules" ) and \
-                    (cleanLine.lower() != "// modules includes" ) and \
-                    (cleanLine.lower() != "//STL" ) and \
-                    (cleanLine.lower() != "// stl includes")   :  
+                if (cleanLine.lower() != "// local includes") and \
+                        (cleanLine.lower() != "// module includes") and \
+                        (cleanLine.lower() != "// extern includes") and \
+                        (cleanLine.lower() != "// boost includes") and \
+                        (cleanLine.lower() != "//boost includes") and \
+                        (cleanLine.lower() != "// core includes") and \
+                        (cleanLine.lower() != "// local") and \
+                        (cleanLine.lower() != "// extern") and \
+                        (cleanLine.lower() != "//core") and \
+                        (cleanLine.lower() != "// Modules") and \
+                        (cleanLine.lower() != "// modules includes") and \
+                        (cleanLine.lower() != "//STL") and \
+                        (cleanLine.lower() != "// stl includes"):
                     self._putBackLine(line)
                     break
-        
+
         if len(includes) > 0 and includes[-1] == "":
             del includes[-1]
-         
+
         if (self.isHeaderFile and not pragmaOnceFound):
             raise ParsingException("No 'pragma once' in Header file")
-    
+
         return includes
-    
-    
+
     def _parseBody(self):
         alreadyWarned = False
         body = []
         for line in self.file:
-            body.append( line )
+            body.append(line)
             cleanLine = line.strip()
             if cleanLine.startswith("#include"):
-                includedFile = cleanLine[ 8: ].strip() 
+                includedFile = cleanLine[8:].strip()
                 includedFile = includedFile[1:-1]
                 if not includedFile.endswith(".impl.h") and not alreadyWarned:
                     alreadyWarned = True
-                    #print( self.filename + " Warning: undetected include " + cleanLine + " because of line " + body[0] )
+                    # print(f"{self.filename} Warning: undetected include {cleanLine} because of line {body[0]}")
         return body
-    
-    
-
-    
-
diff --git a/python/waLBerla/tools/source_checker/SourceChecker.py b/python/waLBerla/tools/source_checker/SourceChecker.py
index 217f428cf..4d1b51ea3 100644
--- a/python/waLBerla/tools/source_checker/SourceChecker.py
+++ b/python/waLBerla/tools/source_checker/SourceChecker.py
@@ -1,25 +1,23 @@
 import re
 import os
 
-class ParsingException( Exception ):
-    def __init__ (self, msg ):
+
+class ParsingException(Exception):
+    def __init__(self, msg):
         self.msg = msg
+
     def __str__(self):
         return self.msg
 
 
-
-
-
-########################################################################################################################
 # Reads the  comment block at the top of a waLBerla source file
 #
 # The top comment block has to have the following format:
-# 
-#         //=========================================================== 
+#
+#         //===========================================================
 #         /*!
 #          *  \file   MyHeader.h
-#          *  \author Foo Bar <foo.bar@fau.de> 
+#          *  \author Foo Bar <foo.bar@fau.de>
 #          *  \author Another author <another.author@fau.de>
 #          *  \brief  In this file the great xyz feature is implemented
 #          *
@@ -29,51 +27,50 @@ class ParsingException( Exception ):
 #         //============================================================
 #
 #
-########################################################################################################################
-def parseTopCommentBlock( file ):
-    separatorRe  = re.compile( r'//={20,}') # A separator line has at least 20 "=" characters
+#
+def parseTopCommentBlock(file):
+    separatorRe = re.compile(r'//={20,}')  # A separator line has at least 20 "=" characters
     # Finds pairs like the following
     # \brief some description
-    propertiesRe = re.compile( r'\s*\\(file|brief|author|ingroup) (.*)' )
-    
+    propertiesRe = re.compile(r'\s*\\(file|brief|author|ingroup) (.*)')
+
     firstLine = file.readline()
-    if not separatorRe.match( firstLine ):
-        raise ParsingException( "No Header block at beginning of file" )
-    
+    if not separatorRe.match(firstLine):
+        raise ParsingException("No Header block at beginning of file")
+
     tags = []  # tuples of key-value, for example ( 'file', 'MyHeader.h'), (author, "Foo Bar <foo.bar@fau.de>" )
     rest = []  # all additional lines
-    
-    lines = []
+
+    # lines = []
     for line in file:
-        if separatorRe.match( line ):
-            break;
+        if separatorRe.match(line):
+            break
 
-        replacer = re.compile ( r'\/\*!?|\*\/|\*|//' )
-        line = replacer.sub( "", line )
-        
-        r = propertiesRe.match( line )
+        replacer = re.compile(r'\/\*!?|\*\/|\*|//')
+        line = replacer.sub("", line)
+
+        r = propertiesRe.match(line)
         if r:
-            tags.append( ( r.group(1).strip(), r.group(2).strip() ) )
+            tags.append((r.group(1).strip(), r.group(2).strip()))
         else:
-            if line.strip() != "": 
-                rest.append( line[:-1] )
-    
-    return ( tags, rest )
-    
+            if line.strip() != "":
+                rest.append(line[:-1])
+
+    return (tags, rest)
 
 
 ########################################################################################################################
-# Parses the file content after the top comment block 
+# Parses the file content after the top comment block
 #
 #  Allowed content:
 #    - pragma once
 #    - include directives
 #    - legacy: "local includes, module includes, extern includes" comments
-# 
+#
 # Parameter: isHeaderFile  - if true the pragma once has to be present
 #
 ########################################################################################################################
-def parseIncludes( file, isHeaderFile, filename ):
+def parseIncludes(file, isHeaderFile, filename):
     includes = []
     content = []
     pragmaOnceFound = False
@@ -83,7 +80,7 @@ def parseIncludes( file, isHeaderFile, filename ):
         if cleanLine == "":
             continue
         if cleanLine.startswith("#include"):
-            includes.append( cleanLine[ 8: ].strip() )
+            includes.append(cleanLine[8:].strip())
         elif cleanLine == "#pragma once":
             if not pragmaOnceFound:
                 pragmaOnceFound = True
@@ -91,40 +88,40 @@ def parseIncludes( file, isHeaderFile, filename ):
                 raise ParsingException("Double 'pragma once' found")
         elif cleanLine.startswith("namespace") or 'cond internal' in cleanLine or 'using namespace' in cleanLine:
             headerStopLine = line
-            content.append( line )
+            content.append(line)
             break
         else:
-            if ( cleanLine.lower() != "// local includes" ) and \
-               ( cleanLine.lower() != "// module includes") and \
-                (cleanLine.lower() != "// extern includes") and \
-                (cleanLine.lower() != "// boost includes" ) and \
-                (cleanLine.lower() != "//boost includes" ) and \
-                (cleanLine.lower() != "// core includes" ) and \
-                (cleanLine.lower() != "// local" ) and \
-                (cleanLine.lower() != "// extern" ) and \
-                (cleanLine.lower() != "//core" ) and \
-                (cleanLine.lower() != "// Modules" ) and \
-                (cleanLine.lower() != "// modules includes" ) and \
-                (cleanLine.lower() != "//STL" ) and \
-                (cleanLine.lower() != "// stl includes")   :  
+            if (cleanLine.lower() != "// local includes") and \
+                    (cleanLine.lower() != "// module includes") and \
+                    (cleanLine.lower() != "// extern includes") and \
+                    (cleanLine.lower() != "// boost includes") and \
+                    (cleanLine.lower() != "//boost includes") and \
+                    (cleanLine.lower() != "// core includes") and \
+                    (cleanLine.lower() != "// local") and \
+                    (cleanLine.lower() != "// extern") and \
+                    (cleanLine.lower() != "//core") and \
+                    (cleanLine.lower() != "// Modules") and \
+                    (cleanLine.lower() != "// modules includes") and \
+                    (cleanLine.lower() != "//STL") and \
+                    (cleanLine.lower() != "// stl includes"):
                 headerStopLine = line
-                content.append( line )
+                content.append(line)
                 break
-                #raise ParsingException( "Additional Line at top of file: \n" + cleanLine )
-    
+                # raise ParsingException( "Additional Line at top of file: \n" + cleanLine )
+
     for line in file:
-        content.append( line )
+        content.append(line)
         cleanLine = line.strip()
         if cleanLine.startswith("#include"):
-            includedFile = cleanLine[ 8: ].strip() 
+            includedFile = cleanLine[8:].strip()
             includedFile = includedFile[1:-1]
             if not includedFile.endswith(".impl.h"):
-                print( filename +  " Undetected include:" + cleanLine + " Due to line "+ headerStopLine )
-        
+                print(filename + " Undetected include:" + cleanLine + " Due to line " + headerStopLine)
+
     if isHeaderFile and not pragmaOnceFound:
         raise ParsingException("No pragma once found")
-    
-    return ( includes, content )
+
+    return (includes, content)
 
 
 ########################################################################################################################
@@ -133,158 +130,168 @@ def parseIncludes( file, isHeaderFile, filename ):
 # and external includes occur last
 #
 ########################################################################################################################
-def sortIncludes( includes, sourceFileName  ):
-    externRe = re.compile( r'<([^>]*)>')
-    internRe = re.compile( r'"([^"]*)"')
+def sortIncludes(includes, sourceFileName):
+    # externRe = re.compile(r'<([^>]*)>')
+    # internRe = re.compile(r'"([^"]*)"')
 
-    modulesRe    = re.compile( r'free_surface/([^/]*)/([^/]*)/')
-    r = modulesRe.search( sourceFileName )
+    modulesRe = re.compile(r'free_surface/([^/]*)/([^/]*)/')
+    r = modulesRe.search(sourceFileName)
     currentModule = r.group(2)
 
-
     # List of tuples with (weight, includeFile)
     # lists with lower weight come first
     weightedIncludes = []
-    
+
     for include in includes:
         if '"' in include:
-            if not "/" in include:
-                weightedIncludes.append( (1, 0, include) ) # same folder include -> weighted 1
+            if "/" not in include:
+                weightedIncludes.append((1, 0, include))  # same folder include -> weighted 1
             else:
                 fileName = include.strip()[1:-1]
                 splittedName = fileName.split("/")
                 module = splittedName[0]
-                if ( module == currentModule ):
-                    weightedIncludes.append( (2, len(splittedName), include ) ) # same module include -> weighted 2
+                if (module == currentModule):
+                    weightedIncludes.append((2, len(splittedName), include))  # same module include -> weighted 2
                 else:
-                    weightedIncludes.append( (3, len(splittedName), include ) ) # other module include -> weighted 3
+                    weightedIncludes.append((3, len(splittedName), include))  # other module include -> weighted 3
         elif "<" in include:
             if "/" in include:
                 fileName = include.strip()[1:-1]
                 splittedName = fileName.split("/")
-                weightedIncludes.append( ( 4, len(splittedName), include ) )  # external include with subpath -> weighted 4
+                weightedIncludes.append((4, len(splittedName), include))  # external include with subpath -> weighted 4
             else:
-                weightedIncludes.append( ( 5, 0, include ) ) # external include without subpath -> weighted 5
-    
-    sortedList = sorted( weightedIncludes, key = lambda e: str( e[0] ) + str(e[1]) + e[2]   )
-    return [ e[2] for e in sortedList ]
+                weightedIncludes.append((5, 0, include))  # external include without subpath -> weighted 5
+
+    sortedList = sorted(weightedIncludes, key=lambda e: str(e[0]) + str(e[1]) + e[2])
+    return [e[2] for e in sortedList]
 
 
+def prependBeforeEachLine(textToPrepend, text):
+    l = [textToPrepend + line for line in text.split('\n')]
+    return "\n".join(l)
 
-def prependBeforeEachLine( textToPrepend, text ):
-    l =  [ textToPrepend + line for line in text.split('\n') ]
-    return "\n".join( l )
-    
 
-def writeHeader( commentBlockTags, commentBlockRest, includes, walberlaBaseDir, isHeaderFile ):
+def writeHeader(commentBlockTags, commentBlockRest, includes, walberlaBaseDir, isHeaderFile):
     sourceCheckerDir = walberlaBaseDir + "/utilities/source_checker/"
-    license        = open( sourceCheckerDir + "license.txt" ).read()
-    
-    tags = "\n".join( [ "\\" + e[0] + " " + e[1] for e in commentBlockTags ] )  
-    rest = "\n".join( commentBlockRest ) 
-    
-    separatorLine = '//======================================================================================================================'
-    
+    license = open(sourceCheckerDir + "license.txt").read()
+
+    tags = "\n".join(["\\" + e[0] + " " + e[1] for e in commentBlockTags])
+    rest = "\n".join(commentBlockRest)
+
+    separatorLine = '//================================================================================================'
+
     result = ""
     result += separatorLine + "\n//\n"
-    
-    result += prependBeforeEachLine( "//  ", license ) + "\n//\n"
-    result += prependBeforeEachLine( "//! ", tags  ) + "\n//\n"
-    if  rest.strip() != "" :
-        result += prependBeforeEachLine( "//! ", rest    )
+
+    result += prependBeforeEachLine("//  ", license) + "\n//\n"
+    result += prependBeforeEachLine("//! ", tags) + "\n//\n"
+    if rest.strip() != "":
+        result += prependBeforeEachLine("//! ", rest)
         result += "\n//\n"
     result += separatorLine + "\n"
-    
+
     result += "\n"
-    
+
     if isHeaderFile:
-        result += "#pragma once\n\n" 
-    
+        result += "#pragma once\n\n"
+
     externNewlineAdded = False
-    for i in includes: 
-        if "<" in i and externNewlineAdded == False:
+    for i in includes:
+        if "<" in i and not externNewlineAdded:
             result += "\n"
             externNewlineAdded = True
-        
+
         result += "#include " + i + '\n'
 
     return result
 
 
-def correctSeparatorLineLength( line, fillChar, targetLineLength= 120 ):
-    line = line.rstrip() # remove newline
+def correctSeparatorLineLength(line, fillChar, targetLineLength=120):
+    line = line.rstrip()  # remove newline
     maxLength = 80
     if fillChar * maxLength in line and len(line) != targetLineLength:
-        if len(line) > targetLineLength: #remove chars
+        if len(line) > targetLineLength:  # remove chars
             numberCharsToDelete = len(line) - targetLineLength
-            line = line.replace( fillChar * numberCharsToDelete, "",1 )
-        else: # add chars
+            line = line.replace(fillChar * numberCharsToDelete, "", 1)
+        else:  # add chars
             numberCharsToAdd = targetLineLength - len(line)
-            line = line.replace( fillChar * maxLength, fillChar * (numberCharsToAdd + maxLength),1 )
-            
+            line = line.replace(fillChar * maxLength, fillChar * (numberCharsToAdd + maxLength), 1)
+
     return line
 
 
-def checkContent( content ):
-    targetLineLength = 120
+def checkContent(content):
+    # targetLineLength = 120
     result = []
     for line in content:
-        line = correctSeparatorLineLength( line, '*' )
-        line = correctSeparatorLineLength( line, '-' )
-        line = correctSeparatorLineLength( line, '=' )
-        line = correctSeparatorLineLength( line, '/' )        
+        line = correctSeparatorLineLength(line, '*')
+        line = correctSeparatorLineLength(line, '-')
+        line = correctSeparatorLineLength(line, '=')
+        line = correctSeparatorLineLength(line, '/')
         result.append(line + "\n")
-        
+
     return result
-            
 
-def cleanupHeaderTags( tags, sourceFileName ):
-    modulesRe    = re.compile( r'free_surface/([^/]*)/([^/]*)/')
-    r = modulesRe.search( sourceFileName )
+
+def cleanupHeaderTags(tags, sourceFileName):
+    modulesRe = re.compile(r'free_surface/([^/]*)/([^/]*)/')
+    r = modulesRe.search(sourceFileName)
     module = r.group(2)
-    
-    sourceFileName = os.path.basename( sourceFileName )
-    
-    
+
+    sourceFileName = os.path.basename(sourceFileName)
+
     result = []
-    result.append( ("file", sourceFileName) )
-    result.append( ("ingroup", module ) )
+    result.append(("file", sourceFileName))
+    result.append(("ingroup", module))
     for tuple in tags:
         if tuple[0] == 'author':
-            authorText = re.sub( r'<[^>]*>', "", tuple[1] )            
-            if  len( authorText.split() ) > 2 or  "," in tuple[1]:
-                raise ParsingException( "Multiple Authors in one line?" )
-             
-            if   "Bauer"         in tuple[1]: result.append( ('author', "Martin Bauer <martin.bauer@fau.de>") )
-            elif "Schornbaum"    in tuple[1]: result.append( ('author', "Florian Schornbaum <florian.schornbaum@fau.de>") )
-            elif "Godenschwager" in tuple[1]: result.append( ('author', "Christian Godenschwager <christian.godenschwager@fau.de>") )
-            elif "Markl"         in tuple[1]: result.append( ('author', "Matthias Markl <matthias.markl@fau.de>") )
-            elif "Anderl"        in tuple[1]: result.append( ('author', "Daniela Anderl <daniela.anderl@lstm.uni-erlangen.de>") )
-            elif "Staubach"      in tuple[1]: result.append( ('author', "David Staubach <david.staubach@fau.de>") )
-            elif "Fattahi"       in tuple[1]: result.append( ('author', "Ehsan Fattahi <ehsan.fattahi@fau.de>" ) )
-            elif "Bogner"        in tuple[1]: result.append( ('author', "Simon Bogner <simon.bogner@fau.de>" ) )
-            elif "Ammer"         in tuple[1]: result.append( ('author', "Regina Ammer <regina.ammer@fau.de>" ) )
-            elif "Feichtinger"   in tuple[1]: result.append( ('author', "Christian Feichtinger") )
-            elif "Iglberger"     in tuple[1]: result.append( ('author', "Klaus Iglberger") )
-            elif "Donath"        in tuple[1]: result.append( ('author', "Stefan Donath") )
-            else: raise ParsingException ("Unknown Author: " + tuple[1] )
+            authorText = re.sub(r'<[^>]*>', "", tuple[1])
+            if len(authorText.split()) > 2 or "," in tuple[1]:
+                raise ParsingException("Multiple Authors in one line?")
+
+            if "Bauer" in tuple[1]:
+                result.append(('author', "Martin Bauer <martin.bauer@fau.de>"))
+            elif "Schornbaum" in tuple[1]:
+                result.append(('author', "Florian Schornbaum <florian.schornbaum@fau.de>"))
+            elif "Godenschwager" in tuple[1]:
+                result.append(('author', "Christian Godenschwager <christian.godenschwager@fau.de>"))
+            elif "Markl" in tuple[1]:
+                result.append(('author', "Matthias Markl <matthias.markl@fau.de>"))
+            elif "Anderl" in tuple[1]:
+                result.append(('author', "Daniela Anderl <daniela.anderl@lstm.uni-erlangen.de>"))
+            elif "Staubach" in tuple[1]:
+                result.append(('author', "David Staubach <david.staubach@fau.de>"))
+            elif "Fattahi" in tuple[1]:
+                result.append(('author', "Ehsan Fattahi <ehsan.fattahi@fau.de>"))
+            elif "Bogner" in tuple[1]:
+                result.append(('author', "Simon Bogner <simon.bogner@fau.de>"))
+            elif "Ammer" in tuple[1]:
+                result.append(('author', "Regina Ammer <regina.ammer@fau.de>"))
+            elif "Feichtinger" in tuple[1]:
+                result.append(('author', "Christian Feichtinger"))
+            elif "Iglberger" in tuple[1]:
+                result.append(('author', "Klaus Iglberger"))
+            elif "Donath" in tuple[1]:
+                result.append(('author', "Stefan Donath"))
+            else:
+                raise ParsingException("Unknown Author: " + tuple[1])
 
     for tuple in tags:
         if tuple[0] != 'author' and tuple[0] != "file" and tuple[1].strip() != "":
-            result.append( tuple )
-    
+            result.append(tuple)
+
     return result
 
 
-def isDeprecated( filename ):
-    file = open( filename )
+def isDeprecated(filename):
+    file = open(filename)
     content = file.read()
     return "#pragma message" in content and "deprecated" in content
-    
+
 
 walberlaBaseDir = "/home/bauer/devel/free_surface/"
 
-for root, dirs, files in os.walk( walberlaBaseDir + "tests/"):
+for root, dirs, files in os.walk(walberlaBaseDir + "tests/"):
     if 'extern' in dirs:
         dirs.remove('extern')
     if 'stencil' in dirs:
@@ -292,48 +299,39 @@ for root, dirs, files in os.walk( walberlaBaseDir + "tests/"):
     if 'vof' in dirs:
         dirs.remove('vof')
 
-    
     for filename in files:
-        fileExtension = os.path.splitext( filename )[1]
-        isHeader     = ( fileExtension == ".h"   ) and not filename.endswith(".impl.h")
-        
-        isSourceFile =  ( ( fileExtension == ".cpp" ) or ( fileExtension == ".h" ) ) and not filename.endswith(".in.h") 
-        
-        if not ( isSourceFile ):
+        fileExtension = os.path.splitext(filename)[1]
+        isHeader = (fileExtension == ".h") and not filename.endswith(".impl.h")
+
+        isSourceFile = ((fileExtension == ".cpp") or (fileExtension == ".h")) and not filename.endswith(".in.h")
+
+        if not (isSourceFile):
             continue
-        
+
         filename = root + "/" + filename
 
-        if ( isDeprecated(filename ) ):
-            continue 
-        
-        file = open( filename )
-        
+        if (isDeprecated(filename)):
+            continue
+
+        file = open(filename)
+
         try:
-            
-            (commentBlockTags, commentBlockRest ) = parseTopCommentBlock(file)
-            commentBlockTags = cleanupHeaderTags( commentBlockTags, filename )
-            
-            ( includes, content ) = parseIncludes( file, isHeader, filename  )
-            includes = sortIncludes( includes,  filename )
-            
-            content = checkContent( content )
 
-            file.close()
-            
-                        
-            #file = open( filename, 'w')
-            #newContent = writeHeader( commentBlockTags, commentBlockRest, includes, walberlaBaseDir, isHeader )
-            #newContent += "\n\n"
-            #newContent += "".join( content )
-            #file.write( newContent )
-            
-        except ParsingException as exception:
-            print ( filename + " " + str(exception) )
-        
-        
+            (commentBlockTags, commentBlockRest) = parseTopCommentBlock(file)
+            commentBlockTags = cleanupHeaderTags(commentBlockTags, filename)
 
-    
+            (includes, content) = parseIncludes(file, isHeader, filename)
+            includes = sortIncludes(includes, filename)
 
+            content = checkContent(content)
 
+            file.close()
+
+            # file = open( filename, 'w')
+            # newContent = writeHeader( commentBlockTags, commentBlockRest, includes, walberlaBaseDir, isHeader )
+            # newContent += "\n\n"
+            # newContent += "".join( content )
+            # file.write( newContent )
 
+        except ParsingException as exception:
+            print(filename + " " + str(exception))
diff --git a/python/waLBerla/tools/source_checker/Utils.py b/python/waLBerla/tools/source_checker/Utils.py
index d567b1edb..c723dfd4f 100644
--- a/python/waLBerla/tools/source_checker/Utils.py
+++ b/python/waLBerla/tools/source_checker/Utils.py
@@ -1,40 +1,39 @@
 import re
 import os
 
+getAppDirRegexp = re.compile(r'(src|test|apps)')
 
-getAppDirRegexp  = re.compile( r'(src|test|apps)') 
-def getAppDir(filename ):
+
+def getAppDir(filename):
     """Returns the first part of the path without (src|tests|app)/someModule/someFile.cpp
       Example: filename = ~/devel/walberla/src/core/timing/Timer.cpp
                returns    ~/devel/walberla           """
-    filename = os.path.abspath( filename )
-    matchObj = getAppDirRegexp.search( filename )
-    return filename[ :matchObj.start()-1]
-
+    filename = os.path.abspath(filename)
+    matchObj = getAppDirRegexp.search(filename)
+    return filename[:matchObj.start() - 1]
 
 
-def getAppModules( appDir ):
+def getAppModules(appDir):
     """Returns all subfolders of appDir/src  i.e. all modules given the appDir"""
     searchDir = appDir + "/src"
-    return [ d for d in os.listdir( searchDir ) if os.path.isdir(os.path.join(searchDir, d)) ]
+    return [d for d in os.listdir(searchDir) if os.path.isdir(os.path.join(searchDir, d))]
 
 
+getModuleRegexp = re.compile(r'(src|tests)/([^/]*)')
 
 
-getModuleRegexp = re.compile( r'(src|tests)/([^/]*)')
-def getModule( filename  ):
+def getModule(filename):
     """Returns the  module of a file
        Examples:   somepath/walberla/src/core/something.cpp       returns core
                    somePrefix/yourApp/tests/greatModule/file.cpp  returns greatModule"""
-    r = getModuleRegexp.search( filename )
-    if r and not '.' in r.group(2):
+    r = getModuleRegexp.search(filename)
+    if r and '.' not in r.group(2):
         return r.group(2)
     else:
         return ""
 
 
-def isDeprecated( filename ):
-    file = open( filename )
+def isDeprecated(filename):
+    file = open(filename)
     content = file.read()
     return "#pragma message" in content and "deprecated" in content
-    
\ No newline at end of file
diff --git a/python/waLBerla/tools/source_checker/walberla_check.py b/python/waLBerla/tools/source_checker/walberla_check.py
index 43e165997..f23b6a83d 100755
--- a/python/waLBerla/tools/source_checker/walberla_check.py
+++ b/python/waLBerla/tools/source_checker/walberla_check.py
@@ -2,433 +2,409 @@
 
 
 import os
-from Utils import *
-from ParsedCodeFile import *
+import re
+from Utils import getAppDir, getAppModules, getModule, isDeprecated
+from ParsedCodeFile import OldStyleParsedCodeFile, ParsedCodeFile, ParsingException
 
-####################################################################################################################
-######################################  Settings ###################################################################
-####################################################################################################################
+# Settings
 
-skipDirectories = [ 'extern', 'stencil', 'vof' ]
+
+skipDirectories = ['extern', 'stencil', 'vof']
 
 walberlaDir = os.environ['WALBERLA_SOURCE_DIR']
 
-authorfile  = walberlaDir + "/utilities/py_waLBerla/source_checker/authors.txt"
+authorfile = walberlaDir + "/utilities/py_waLBerla/source_checker/authors.txt"
 licensefile = walberlaDir + "/utilities/py_waLBerla/source_checker/license.txt"
 
 
-
-
-
-class CodeStyleException( Exception ):
-    def __init__ (self, msg ):
+class CodeStyleException(Exception):
+    def __init__(self, msg):
         self.msg = msg
+
     def __str__(self):
         return self.msg
 
 
+# Separator Line Length
 
-####################################################################################################################
-################################## Separator Line Length ###########################################################
-####################################################################################################################
-
-def getSortedIncludes( parsedCodeFile ):
+def getSortedIncludes(parsedCodeFile):
     filename = parsedCodeFile.filename
     includeList = parsedCodeFile.includes
-    
+
     appDir = getAppDir(filename)
-    appName = [x for x in appDir.split('/') if len(x) > 0 ][-1]
+    appName = [x for x in appDir.split('/') if len(x) > 0][-1]
     appName = appDir.split('/')[-1]
     if appName == "walberla":
         appModules = []
     else:
-        appModules = getAppModules( appDir )
+        appModules = getAppModules(appDir)
+
+    filename = filename  # name of file containing the includes
+    currentModule = getModule(filename)
 
-    filename = filename     # name of file containing the includes
-    currentModule = getModule( filename )
-    
-    localIncludes  = []
+    localIncludes = []
     moduleIncludes = []
-    appIncludes    = []
+    appIncludes = []
     externIncludes = []
-    
+
     for include in includeList:
-        if '"' in include  and not "/" in include:
-            localIncludes.append( include )
+        if '"' in include and "/" not in include:
+            localIncludes.append(include)
         elif '"' in include:
             moduleOfInclude = include.split('/')[0][1:]
             if moduleOfInclude in appModules:
-                appIncludes.append( include )
+                appIncludes.append(include)
             elif moduleOfInclude == currentModule:
-                localIncludes.append( include )
+                localIncludes.append(include)
             else:
-                moduleIncludes.append( include )
+                moduleIncludes.append(include)
         elif '<' in include:
-            externIncludes.append( include )
-        elif len( include.strip() ) == 0:
+            externIncludes.append(include)
+        elif len(include.strip()) == 0:
             continue
         else:
-            raise Exception("Internal Error when parsing include: " + include )
+            raise Exception("Internal Error when parsing include: " + include)
 
-    def keyFunction( include ):
+    def keyFunction(include):
         """Key function for sorting module and local includes"""
         splitted = include.split('/')
-        return splitted[0] + "/" + str( len(splitted) ) + "/" + "/".join( splitted[1:] )
-    
-    def externKeyFunction( include ):
+        return splitted[0] + "/" + str(len(splitted)) + "/" + "/".join(splitted[1:])
+
+    def externKeyFunction(include):
         """Key function for sorting extern includes"""
-        order = [ 'pe', 'boost' ]
+        order = ['pe', 'boost']
         if "/" in include:
-            lib = include.split('/') [0]
-            lib = lib[1:] #remove '<' character
+            lib = include.split('/')[0]
+            lib = lib[1:]  # remove '<' character
             if lib in order:
                 weight = order.index(lib)
             else:
                 weight = 8
         else:
             weight = 9
-        
+
         return str(weight) + include
-    
-    
-    moduleIncludes = sorted( moduleIncludes, key = keyFunction       )            
-    appIncludes    = sorted( appIncludes   , key = keyFunction       )
-    externIncludes = sorted( externIncludes, key = externKeyFunction )
 
+    moduleIncludes = sorted(moduleIncludes, key=keyFunction)
+    appIncludes = sorted(appIncludes, key=keyFunction)
+    externIncludes = sorted(externIncludes, key=externKeyFunction)
 
-    isApp = len( appModules ) > 0 
-    splitModuleIncludes = ( len(localIncludes) + len(moduleIncludes) ) > 5 and not isApp
-    splitExternIncludes = ( len(externIncludes) ) > 5 and not isApp
+    isApp = len(appModules) > 0
+    splitModuleIncludes = (len(localIncludes) + len(moduleIncludes)) > 5 and not isApp
+    splitExternIncludes = (len(externIncludes)) > 5 and not isApp
 
     result = []
-    
-    if len( appIncludes) > 0:
+
+    if len(appIncludes) > 0:
         result += appIncludes
         result += [""]
 
-
-    if len (localIncludes + moduleIncludes) > 0:    
+    if len(localIncludes + moduleIncludes) > 0:
         if splitModuleIncludes:
-            result += splitUpIncludes( localIncludes + moduleIncludes )
+            result += splitUpIncludes(localIncludes + moduleIncludes)
         else:
             result += localIncludes + moduleIncludes
-        result += [""]  
-       
-    if len( externIncludes ) > 0:
+        result += [""]
+
+    if len(externIncludes) > 0:
         if splitExternIncludes:
-            result += splitUpIncludes( externIncludes )
+            result += splitUpIncludes(externIncludes)
         else:
             result += externIncludes
-    
+
     if len(result) > 0 and result[-1] == "":
         del result[-1]
-    
+
     return result
 
-def splitUpIncludes( includes ):
+
+def splitUpIncludes(includes):
     result = []
-    
-    result.append( includes[0] )
-    
-    for i in range(1, len(includes )):
-        cur  = includes[i]
-        last = includes[i-1]
+
+    result.append(includes[0])
+
+    for i in range(1, len(includes)):
+        cur = includes[i]
+        last = includes[i - 1]
         curHasSlash = '/' in cur
         lastHasSlash = '/' in last
-        if (not curHasSlash and lastHasSlash) or (curHasSlash and not lastHasSlash ):
+        if (not curHasSlash and lastHasSlash) or (curHasSlash and not lastHasSlash):
             result.append("")
         if curHasSlash and lastHasSlash:
             lastModule = last.split('/')[0]
-            curModule  = cur.split('/')[0]
+            curModule = cur.split('/')[0]
             if lastModule != curModule:
                 result.append("")
-        
+
         result.append(cur)
-    
+
     return result
-    
 
-def sortIncludes( parsedCodeFile ):
-    parsedCodeFile.includes = getSortedIncludes( parsedCodeFile )
-    
-def checkIncludeOrder( parsedCodeFile ):
-    res = getSortedIncludes( parsedCodeFile )
+
+def sortIncludes(parsedCodeFile):
+    parsedCodeFile.includes = getSortedIncludes(parsedCodeFile)
+
+
+def checkIncludeOrder(parsedCodeFile):
+    res = getSortedIncludes(parsedCodeFile)
 
     if res != parsedCodeFile.includes:
-        raise CodeStyleException( "Includes are not sorted correctly" )
+        raise CodeStyleException("Includes are not sorted correctly")
 
 
-####################################################################################################################
-################################## Separator Line Length ###########################################################
-####################################################################################################################
+# Separator Line Length
 
-def getBodyWithCorrectSeparatorLines( parsedCodeFile, targetLineLength = 120, maxLength = 80 ):
+def getBodyWithCorrectSeparatorLines(parsedCodeFile, targetLineLength=120, maxLength=80):
     """ Lines that have at least 'maxLength' times the same filling char (*,-,=,/ )
         are extended or shrunk to have exactly targetLineLength  """
-        
-    def correctSeparatorLineLength( line, fillChar ):
-        
+
+    def correctSeparatorLineLength(line, fillChar):
+
         if fillChar * maxLength in line and '"' in line:
-            #print ( parsedCodeFile.filename + " Warning: Fill line with quotation mark detected: " + line)
+            # print ( parsedCodeFile.filename + " Warning: Fill line with quotation mark detected: " + line)
             return line
 
-        if fillChar * maxLength in line and len( line.rstrip() ) != targetLineLength:
-            line = line.rstrip() # remove newline
-            if len(line) > targetLineLength: #remove chars
+        if fillChar * maxLength in line and len(line.rstrip()) != targetLineLength:
+            line = line.rstrip()  # remove newline
+            if len(line) > targetLineLength:  # remove chars
                 numberCharsToDelete = len(line) - targetLineLength
-                line = line.replace( fillChar * numberCharsToDelete, "",1 )
-            else: # add chars
+                line = line.replace(fillChar * numberCharsToDelete, "", 1)
+            else:  # add chars
                 numberCharsToAdd = targetLineLength - len(line)
-                line = line.replace( fillChar * maxLength, fillChar * (numberCharsToAdd + maxLength),1 )
-            
+                line = line.replace(fillChar * maxLength, fillChar * (numberCharsToAdd + maxLength), 1)
+
             line += "\n"
-            
+
         return line
-    
+
     result = []
-    for line in parsedCodeFile.body:
-        line = correctSeparatorLineLength( line, '*' )
-        line = correctSeparatorLineLength( line, '-' )
-        line = correctSeparatorLineLength( line, '=' )
-        line = correctSeparatorLineLength( line, '/' )        
-        result.append(line)
-            
+    for le in parsedCodeFile.body:
+        le = correctSeparatorLineLength(le, '*')
+        le = correctSeparatorLineLength(le, '-')
+        le = correctSeparatorLineLength(le, '=')
+        le = correctSeparatorLineLength(le, '/')
+        result.append(le)
+
     return result
 
 
-def correctSeparatorLines( parsedCodeFile ):
-    parsedCodeFile.body = getBodyWithCorrectSeparatorLines( parsedCodeFile )
-    
-def checkSeparatorLines( parsedCodeFile ):
-    res = getBodyWithCorrectSeparatorLines( parsedCodeFile )
-    if res != parsedCodeFile.body:
-        raise CodeStyleException( "Not all separator lines have length 120" )
-    
+def correctSeparatorLines(parsedCodeFile):
+    parsedCodeFile.body = getBodyWithCorrectSeparatorLines(parsedCodeFile)
 
 
+def checkSeparatorLines(parsedCodeFile):
+    res = getBodyWithCorrectSeparatorLines(parsedCodeFile)
+    if res != parsedCodeFile.body:
+        raise CodeStyleException("Not all separator lines have length 120")
 
-####################################################################################################################
-################################## Doxygen Tags ####################################################################
-####################################################################################################################
 
-def checkTags( parsedCodeFile ):
+# Doxygen Tags
+
+def checkTags(parsedCodeFile):
     tags = parsedCodeFile.tags
-    
-    sourceFileName = os.path.basename( parsedCodeFile.filename )
-    currentModule = getModule( parsedCodeFile.filename )
-    
+
+    sourceFileName = os.path.basename(parsedCodeFile.filename)
+    # currentModule = getModule(parsedCodeFile.filename)
+
     fileTagFound = False
-    authorTagFound = False
+    # authorTagFound = False
     ingroupTagFound = False
     briefTagFound = False
-    
-    for (tag,value) in tags:
-        if tag=="file":
-            if fileTagFound: 
+
+    for (tag, value) in tags:
+        if tag == "file":
+            if fileTagFound:
                 raise CodeStyleException("Duplicate '\file' tag ")
             fileTagFound = True
-            
+
             if not value == sourceFileName:
-                raise CodeStyleException("'file' tag in header has wrong value " + value )
-        elif tag=="author":
+                raise CodeStyleException("'file' tag in header has wrong value " + value)
+        elif tag == "author":
             if value not in checkTags.knownAuthors:
-                raise CodeStyleException("Unknown author or email. If valid extend the authors.txt. Invalid Value was: " + value )
-        elif tag=="ingroup":
-            if ingroupTagFound: 
-                raise CodeStyleException("Duplicate '\ingroup' tag")
-        elif tag =="brief":
+                raise CodeStyleException(
+                    "Unknown author or email. If valid extend the authors.txt. Invalid Value was: " + value)
+        elif tag == "ingroup":
+            if ingroupTagFound:
+                raise CodeStyleException("Duplicate ingroup tag")
+        elif tag == "brief":
             if briefTagFound:
                 raise CodeStyleException("Duplicate '\brief' tag")
             briefTagFound = True
 
-checkTags.knownAuthors = []
-for line in open( authorfile ):
-    checkTags.knownAuthors.append( line.split("|")[0].strip() )
-
 
+checkTags.knownAuthors = []
+for line in open(authorfile):
+    checkTags.knownAuthors.append(line.split("|")[0].strip())
 
 
-def correctTags( parsedCodeFile ):
+def correctTags(parsedCodeFile):
     tags = parsedCodeFile.tags
-    sourceFileName = os.path.basename( parsedCodeFile.filename )
-    currentModule = getModule( parsedCodeFile.filename )
-    
+    sourceFileName = os.path.basename(parsedCodeFile.filename)
+    currentModule = getModule(parsedCodeFile.filename)
+
     result = []
-    result.append( ("file", sourceFileName) )
+    result.append(("file", sourceFileName))
     if len(currentModule) > 0:
-        result.append( ("ingroup", currentModule ) )
-        
-    for (tag,value) in tags:
+        result.append(("ingroup", currentModule))
+
+    for (tag, value) in tags:
         if tag == 'author':
-            authorText = re.sub( r'<[^>]*>', "",value )            
-            if  len( authorText.split() ) > 2 or  "," in value:
-                raise CodeStyleException( "Multiple Authors in one line?" )
-            
+            authorText = re.sub(r'<[^>]*>', "", value)
+            if len(authorText.split()) > 2 or "," in value:
+                raise CodeStyleException("Multiple Authors in one line?")
+
             matchingAuthor = ""
-            for (author, regexp ) in correctTags.authorRegexps:
-                if regexp.search( value ):
-                     matchingAuthor = author
-                     break
-            
-            if matchingAuthor =="":
-                raise CodeStyleException ("Unknown Author: " + value )
+            for (author, regexp) in correctTags.authorRegexps:
+                if regexp.search(value):
+                    matchingAuthor = author
+                    break
+
+            if matchingAuthor == "":
+                raise CodeStyleException("Unknown Author: " + value)
             else:
-                result.append( ('author', matchingAuthor) )
+                result.append(('author', matchingAuthor))
 
-    for (tag,value) in tags:
+    for (tag, value) in tags:
         if tag != 'author' and tag != "file" and tag != "ingroup" and value.strip() != "":
-            result.append( (tag,value) )
-            
+            result.append((tag, value))
+
     parsedCodeFile.tags = result
 
 
 # Parse authors file
 correctTags.authorRegexps = []
-for line in open( authorfile ):
+for line in open(authorfile):
     splittedLine = line.strip().split("|")
     author = splittedLine[0].strip()
     if len(splittedLine) > 1:
-        regexp = re.compile( splittedLine[1].strip(), re.IGNORECASE )
+        regexp = re.compile(splittedLine[1].strip(), re.IGNORECASE)
     else:
-        regexp = re.compile( splittedLine[0].strip(), re.IGNORECASE )
-    correctTags.authorRegexps.append( (author, regexp) )
-    
-
-####################################################################################################################
-################################## License Check ###################################################################
-####################################################################################################################
-
-referenceLicense =  [ l.replace("\n","") for l in open( licensefile ) ]
-def checkLicense( parsedCodeFile ):
-    ref  = [ l.strip() for l in referenceLicense ]
-    lic  = [ l.strip() for l in parsedCodeFile.license ]
-    
+        regexp = re.compile(splittedLine[0].strip(), re.IGNORECASE)
+    correctTags.authorRegexps.append((author, regexp))
+
+# License Check
+
+referenceLicense = [l.replace("\n", "") for l in open(licensefile)]
+
+
+def checkLicense(parsedCodeFile):
+    ref = [l.strip() for l in referenceLicense]
+    lic = [l.strip() for l in parsedCodeFile.license]
+
     if not lic == ref:
         raise CodeStyleException("License is not correct")
 
 
-def correctLicense( parsedCodeFile ):
+def correctLicense(parsedCodeFile):
     parsedCodeFile.license = referenceLicense
-    
 
 
+# Write Parsed Code File
+
+
+def writeParsedCodeFile(parsedCodeFile, filename):
+    def prependBeforeEachLine(textToPrepend, text):
+        l = [textToPrepend + line for line in text.split('\n')]
+        return "\n".join(l)
+
+    tags = "\n".join(["\\" + e[0] + " " + e[1] for e in parsedCodeFile.tags])
+    doc = "\n".join(parsedCodeFile.doc)
 
-####################################################################################################################
-################################## Write Parsed Code File ##########################################################
-####################################################################################################################
+    separatorLine = '//' + (120 - 2) * "="
 
-def writeParsedCodeFile( parsedCodeFile, filename ):
-    
-    def prependBeforeEachLine( textToPrepend, text ):
-        l =  [ textToPrepend + line for line in text.split('\n') ]
-        return "\n".join( l )
-    
-    tags = "\n".join( [ "\\" + e[0] + " " + e[1] for e in parsedCodeFile.tags ] )  
-    doc = "\n".join( parsedCodeFile.doc ) 
-    
-    separatorLine = '//' + (120-2) * "="
-    
     result = ""
     result += separatorLine + "\n//\n"
-    
-    result += prependBeforeEachLine( "//  ", "\n".join(parsedCodeFile.license) ) + "\n//\n"
-    result += prependBeforeEachLine( "//! ", tags  ) 
-    
-    if  doc.strip() != "" :
-        result +=  "\n//!\n"
-        result += prependBeforeEachLine( "//! ", doc    )
+
+    result += prependBeforeEachLine("//  ", "\n".join(parsedCodeFile.license)) + "\n//\n"
+    result += prependBeforeEachLine("//! ", tags)
+
+    if doc.strip() != "":
+        result += "\n//!\n"
+        result += prependBeforeEachLine("//! ", doc)
         result += "\n//\n"
     else:
-         result += "\n//\n"
-         
+        result += "\n//\n"
+
     result += separatorLine + "\n"
-    
+
     result += "\n"
-    
+
     if parsedCodeFile.isHeaderFile:
-        result += "#pragma once\n\n" 
-    
-    for i in parsedCodeFile.includes: 
-        if len( i.strip() ) > 0:
+        result += "#pragma once\n\n"
+
+    for i in parsedCodeFile.includes:
+        if len(i.strip()) > 0:
             result += "#include " + i + '\n'
         else:
             result += "\n"
 
     result += "\n\n"
-    result += "".join( parsedCodeFile.body )
+    result += "".join(parsedCodeFile.body)
 
-    file = open( filename, 'w')
-    file.write( result )
+    file = open(filename, 'w')
+    file.write(result)
     file.close()
-    
-    
-
-####################################################################################################################
-##################################   Main    #######################################################################
-####################################################################################################################
-
-def processFile( filename, args ):
-    fileExtension = os.path.splitext( filename )[1]
-    
-    if not ( ( ( fileExtension == ".cpp" ) or ( fileExtension == ".h" ) ) and not filename.endswith(".in.h") ): 
-        return 
-    
-    if ( isDeprecated(filename ) ):
+
+
+# Main
+
+def processFile(filename, args):
+    fileExtension = os.path.splitext(filename)[1]
+
+    if not (((fileExtension == ".cpp") or (fileExtension == ".h")) and not filename.endswith(".in.h")):
+        return
+
+    if (isDeprecated(filename)):
         return
-    
+
     parsedCodeFile = None
     try:
         if args.oldStyle:
-            parsedCodeFile = OldStyleParsedCodeFile( filename )
+            parsedCodeFile = OldStyleParsedCodeFile(filename)
         else:
-            parsedCodeFile = ParsedCodeFile( filename )
-            
+            parsedCodeFile = ParsedCodeFile(filename)
+
     except ParsingException as exception:
-        print( filename + " " + str(exception) ) 
-        
+        print(filename + " " + str(exception))
+
     if parsedCodeFile:
         try:
-            checkTags          ( parsedCodeFile )
-            checkLicense       ( parsedCodeFile )
-            checkSeparatorLines( parsedCodeFile )
-            checkIncludeOrder  ( parsedCodeFile )
+            checkTags(parsedCodeFile)
+            checkLicense(parsedCodeFile)
+            checkSeparatorLines(parsedCodeFile)
+            checkIncludeOrder(parsedCodeFile)
         except CodeStyleException as exception:
-            print( filename + " " + str(exception) )
-       
+            print(filename + " " + str(exception))
+
     if args.autoCorrect:
-        correctLicense       ( parsedCodeFile )
-        correctSeparatorLines( parsedCodeFile )
-        correctTags          ( parsedCodeFile )
-        sortIncludes         ( parsedCodeFile )
+        correctLicense(parsedCodeFile)
+        correctSeparatorLines(parsedCodeFile)
+        correctTags(parsedCodeFile)
+        sortIncludes(parsedCodeFile)
         writeParsedCodeFile(parsedCodeFile, filename)
 
 
-
 if __name__ == "__main__":
     import argparse
-    
+
     argParser = argparse.ArgumentParser()
-    argParser.add_argument( "target", nargs='?', default=os.getcwd(), help="file or foldername to check" )
-    argParser.add_argument( "-a", "--autoCorrect", action="store_true", 
-                            help="automatically correct source files if possible" )
-    argParser.add_argument( "-o", "--oldStyle", action="store_true",
-                            help="Parse old style headers" )
-    #argParser.add_argument( "-g", "--graph", help="write depencency graph in json format to specified file" )
+    argParser.add_argument("target", nargs='?', default=os.getcwd(), help="file or foldername to check")
+    argParser.add_argument("-a", "--autoCorrect", action="store_true",
+                           help="automatically correct source files if possible")
+    argParser.add_argument("-o", "--oldStyle", action="store_true",
+                           help="Parse old style headers")
+    # argParser.add_argument( "-g", "--graph", help="write depencency graph in json format to specified file" )
     args = argParser.parse_args()
-    
-    
-    if os.path.isdir( args.target):
-        for root, dirs, files in os.walk( args.target ):
+
+    if os.path.isdir(args.target):
+        for root, dirs, files in os.walk(args.target):
             # Skip specified directories
             for d in skipDirectories:
                 if d in dirs:
                     dirs.remove(d)
-        
+
             for filename in files:
                 filename = root + "/" + filename
-                processFile(filename, args )
+                processFile(filename, args)
     else:
-        processFile( os.path.abspath(args.target), args )
-
-
-
+        processFile(os.path.abspath(args.target), args)
diff --git a/python/waLBerla/tools/sqlitedb/__init__.py b/python/waLBerla/tools/sqlitedb/__init__.py
index e709f6b3e..3e35fc485 100644
--- a/python/waLBerla/tools/sqlitedb/__init__.py
+++ b/python/waLBerla/tools/sqlitedb/__init__.py
@@ -1,2 +1,4 @@
 from waLBerla.tools.sqlitedb.insert import storeSingle, storeMultiple, checkAndUpdateSchema, sequenceValuesToScalars
-from waLBerla.tools.sqlitedb.merge  import mergeSqliteFiles
+from waLBerla.tools.sqlitedb.merge import mergeSqliteFiles
+
+__all__ = ['storeSingle', 'storeMultiple', 'checkAndUpdateSchema', 'sequenceValuesToScalars', 'mergeSqliteFiles']
diff --git a/python/waLBerla/tools/sqlitedb/insert.py b/python/waLBerla/tools/sqlitedb/insert.py
index 60f757b56..29fa37c16 100644
--- a/python/waLBerla/tools/sqlitedb/insert.py
+++ b/python/waLBerla/tools/sqlitedb/insert.py
@@ -1,35 +1,33 @@
-"""Small wrapper around Pythons sqlite3 module - simplifies table creation and is mainly tailored to storage of 
+"""Small wrapper around Pythons sqlite3 module - simplifies table creation and is mainly tailored to storage of
    aggregated simulation results"""
 
-
 import sqlite3
 
 
-def sequenceValuesToScalars( data ):
-    """Transforms sequence values in tht data dictionary into multiple entries 
-    
+def sequenceValuesToScalars(data):
+    """Transforms sequence values in tht data dictionary into multiple entries
+
        Example: { 'domainSize' : (4,5,6) } is transformed int
                 { 'domainSize_0' : 4, 'domainSize_1' : 5, 'domainSize_2' : 6 }
-    
-       This is useful when using the dictionary with storeSingle(), since each entry gets a separate column 
+
+       This is useful when using the dictionary with storeSingle(), since each entry gets a separate column
        after the sequences have been separated.
      """
     keysToDelete = []
     newValues = {}
     for key, value in data.items():
-        if type(value) in [ list, tuple ]:
-            keysToDelete.append( key )
-            for i in range( len(value) ):
-                newValues[ "%s_%d" % (key,i) ] = value[i]
+        if type(value) in [list, tuple]:
+            keysToDelete.append(key)
+            for i in range(len(value)):
+                newValues["%s_%d" % (key, i)] = value[i]
     for k in keysToDelete:
         del data[k]
-    data.update( newValues )    
-    
+    data.update(newValues)
 
 
-def storeSingle( data, tableName, dbFile = "database.sqlite", runId=None ):
+def storeSingle(data, tableName, dbFile="database.sqlite", runId=None):
     """Stores results of a single simulation run to a sqlite database.
-    
+
     Primary key column 'runId' and a timestamp are automatically added.
 
         :param data:       Dictionary with data to store in sqlite database.
@@ -37,114 +35,117 @@ def storeSingle( data, tableName, dbFile = "database.sqlite", runId=None ):
         :param tableName: name of sqlite table
         :param dbFile:    database file
         :param runId:     override the otherwise manually created runId which serves as primary key
-        :return: runId of inserted run  
+        :return: runId of inserted run
     """
     if runId:
         data['runId'] = runId
-    
-    keyString   = ",".join( data.keys() )
-    valueString = ",".join( [ "?" for e in data.values() ] )
-    query = "INSERT INTO %s ( %s ) VALUES ( %s )" % ( tableName, keyString, valueString )
 
-    conn = sqlite3.connect( dbFile )
+    keyString = ",".join(data.keys())
+    valueString = ",".join(["?" for e in data.values()])
+    query = "INSERT INTO %s ( %s ) VALUES ( %s )" % (tableName, keyString, valueString)
+
+    conn = sqlite3.connect(dbFile)
     c = conn.cursor()
-    c.execute( query, list(data.values()) )
+    c.execute(query, list(data.values()))
     lastrowid = c.lastrowid
     conn.commit()
     conn.close()
-    
+
     return lastrowid
 
 
-def storeMultiple( data, tableName, dbFile="database.sqlite", runId=None ):
+def storeMultiple(data, tableName, dbFile="database.sqlite", runId=None):
     """Stores data from multiple simulation runs into the database.
-    
+
        :param data:  similar to storeSingle, but each value of the dictionary has to be a list
                      for all keys the values have to be lists of equal length
        :param runId: leave this to none if runId is the primary key of this table
-                     if this table references a table with primary key runId, you can reference a 
+                     if this table references a table with primary key runId, you can reference a
                      specific run by passing here the id of the run to reference.
        :return: primary key of last inserted run, the other runs have runId: returnValue-1, returnValue-2, ...
     """
     # Check if data is a dictionary that only has lists as values, all these lists have to have the
     # same length
     list_length = -1
-    for key,value in data.items():
-        assert( type(value)  is list )
+    for key, value in data.items():
+        assert (type(value) is list)
         if list_length < 0:
             list_length = len(value)
-        assert( len(value) == list_length )
-    
+        assert (len(value) == list_length)
+
     if runId is not None:
-        data.update( {'runId': [runId] * list_length } )
-
-    
-    keyString   = ",".join( data.keys() )
-    valueString = ",".join( [ "?" for e in data.values() ] )
-    query = "INSERT INTO %s ( %s ) VALUES ( %s )" % ( tableName, keyString, valueString )
-    
-    conn = sqlite3.connect( dbFile )
+        data.update({'runId': [runId] * list_length})
+
+    keyString = ",".join(data.keys())
+    valueString = ",".join(["?" for e in data.values()])
+    query = "INSERT INTO %s ( %s ) VALUES ( %s )" % (tableName, keyString, valueString)
+
+    conn = sqlite3.connect(dbFile)
     c = conn.cursor()
-    
-    sql_data = [  tuple( [  v[i] for v in data.values() ] ) for i in range(list_length ) ]
-    
-    c.executemany( query, sql_data  )
+
+    sql_data = [tuple([v[i] for v in data.values()]) for i in range(list_length)]
+
+    c.executemany(query, sql_data)
     lastrowid = c.lastrowid
     conn.commit()
     conn.close()
-    
+
     return lastrowid
-    
 
-def checkAndUpdateSchema( data, tableName, dbFile="database.sqlite", referenceRuns=False ):
+
+def checkAndUpdateSchema(data, tableName, dbFile="database.sqlite", referenceRuns=False):
     """Alters a sqlite table in order to match the given data:
-    
+
         * if table with given name does not exist yet, it is created
-        * keys in the data dictionary correspond to columns 
+        * keys in the data dictionary correspond to columns
         * columns are added if necessary, existing data has NULL in these new columns
-    
+
     :param data:          see :func:`storeSingle` or :func:`storeMultiple`
     :param referenceRuns: if False the table gets an autoincrementing integer column 'runId'
-                          if True, a normal column runId is created that points into another table with runId as primary key 
+                          if True, a normal column runId is created that points into
+                          another table with runId as primary key
      """
-    def pythonToSqlType( python_type ):
-        if   python_type is int:   return( "INTEGER" )
-        elif python_type is bool:  return( "INTEGER" )
-        elif python_type is float: return( "DOUBLE"  )
-        elif python_type is str:   return( "TEXT"    )
-        elif python_type is list:  return( pythonToSqlType( type(value[0] ) ))
-        elif python_type is tuple: return( pythonToSqlType( type(value[0] ) ))
-        
+
+    def pythonToSqlType(python_type):
+        if python_type is int:
+            return ("INTEGER")
+        elif python_type is bool:
+            return ("INTEGER")
+        elif python_type is float:
+            return ("DOUBLE")
+        elif python_type is str:
+            return ("TEXT")
+        elif python_type is list:
+            return (pythonToSqlType(type(value[0])))
+        elif python_type is tuple:
+            return (pythonToSqlType(type(value[0])))
+
     if referenceRuns:
-        names= [ "runId"   ]
-        types= [ "INTEGER" ]
+        names = ["runId"]
+        types = ["INTEGER"]
     else:
-        names = [ "id",                  "timestamp" ]
-        types = [ "INTEGER PRIMARY KEY", "DATETIME DEFAULT  (datetime('now','localtime'))" ]
-    
-    for key,value in data.items():
-        names.append( key )
-        types.append( pythonToSqlType( type(value) ) )
-        
-    columns = [ ("%s %s") % e  for e in  zip( names, types ) ]
-    
-    createQuery = "CREATE TABLE IF NOT EXISTS %s ( %s );" % ( tableName, ",".join( columns )  )
-    alterQueries = [ "ALTER TABLE %s ADD COLUMN %s %s;" % ( tableName, key, typ ) for key,typ in zip(names, types) ]
-    
-    conn = sqlite3.connect( dbFile )
+        names = ["id", "timestamp"]
+        types = ["INTEGER PRIMARY KEY", "DATETIME DEFAULT  (datetime('now','localtime'))"]
+
+    for key, value in data.items():
+        names.append(key)
+        types.append(pythonToSqlType(type(value)))
+
+    columns = [("%s %s") % e for e in zip(names, types)]
+
+    createQuery = "CREATE TABLE IF NOT EXISTS %s ( %s );" % (tableName, ",".join(columns))
+    alterQueries = ["ALTER TABLE %s ADD COLUMN %s %s;" % (tableName, key, typ) for key, typ in zip(names, types)]
+
+    conn = sqlite3.connect(dbFile)
     c = conn.cursor()
-    
-    c.execute( createQuery )
+
+    c.execute(createQuery)
     for q in alterQueries:
         try:
-            c.execute( q )
+            c.execute(q)
         except sqlite3.OperationalError as e:
+            print(e)
             pass
-    
+
     conn.commit()
     conn.close()
-    
-    
-
-
-
diff --git a/python/waLBerla/tools/sqlitedb/merge.py b/python/waLBerla/tools/sqlitedb/merge.py
index 840c4019c..5628b1f39 100755
--- a/python/waLBerla/tools/sqlitedb/merge.py
+++ b/python/waLBerla/tools/sqlitedb/merge.py
@@ -3,107 +3,109 @@
 import sqlite3
 
 
-def getColumnNames ( db, tableName, dbName ):
+def getColumnNames(db, tableName, dbName):
     """Returns list of columns for a sqlite3 table
     :param db       :  database connection
     :param dbName   : name of database
     :param tableName: name of table
     :return: list of column names for this table"""
     cursor = db.cursor()
-    cursor.execute("PRAGMA %s.table_info(%s)"  % (dbName,tableName) )
+    cursor.execute("PRAGMA %s.table_info(%s)" % (dbName, tableName))
     columns = cursor.fetchall()
-    
+
     res = []
     for e in columns:
-        res.append ( (e[1], e[2].upper()) )
-    
+        res.append((e[1], e[2].upper()))
+
     return res
 
 
-def mergeSqliteFiles ( targetFile, fileToMerge ):
+def mergeSqliteFiles(targetFile, fileToMerge):
     """Merges sqlite3 database 'fileToMerge' into 'targetFile' database.
        Works only if both tables have a 'runs' table.  Other tables are ignored!
        If the runs table in one of the databases has more columns, these columns are created in the merged database.
     """
-    db  = sqlite3.connect( targetFile )
-    db.execute ('ATTACH "' + fileToMerge + '" AS toMerge')
-    
-    targetColumns  = getColumnNames( db, "runs", "main" )
-    toMergeColumns = getColumnNames( db, "runs", "toMerge" )
-    
+    db = sqlite3.connect(targetFile)
+    db.execute('ATTACH "' + fileToMerge + '" AS toMerge')
+
+    targetColumns = getColumnNames(db, "runs", "main")
+    toMergeColumns = getColumnNames(db, "runs", "toMerge")
+
     columnsToCreate = [e for e in toMergeColumns if e not in targetColumns]
-    
+
     for column in columnsToCreate:
-        print( "Adding Column {} to run table of {} ".format( column[0], targetFile ) )
-        db.execute ( "ALTER TABLE main.runs ADD COLUMN %s %s" % ( column[0], column[1] ) )
-        
+        print("Adding Column {} to run table of {} ".format(column[0], targetFile))
+        db.execute("ALTER TABLE main.runs ADD COLUMN %s %s" % (column[0], column[1]))
+
     # Fetch all runs from toMerge,
     # check if an entry with same date exists, if not add the run and the timing pool entries
     # to the targetTable
     c = db.cursor()
-    assert( toMergeColumns[0][0] == "runId")
-    columns = [ e[0] for e in toMergeColumns ]
-    columnString        = ",".join( columns     )
-    columnStringNoRunId = ",".join( columns[1:] )
-    
-    query  = 'SELECT {} FROM toMerge.runs WHERE timestamp || " " || uuid NOT IN '.format(columnString,)
-    query += '( SELECT timestamp || " " || uuid FROM main.runs )' 
-
-    # associated tables are tables that reference the runs table, having a first column of 'runId' which is a 
+    assert (toMergeColumns[0][0] == "runId")
+    columns = [e[0] for e in toMergeColumns]
+    columnString = ",".join(columns)
+    columnStringNoRunId = ",".join(columns[1:])
+
+    query = 'SELECT {} FROM toMerge.runs WHERE timestamp || " " || uuid NOT IN '.format(columnString, )
+    query += '( SELECT timestamp || " " || uuid FROM main.runs )'
+
+    # associated tables are tables that reference the runs table, having a first column of 'runId' which is a
     # foreign key to runs
     associatedTables = []
     associatedTablesColumnNames = []
-    
+
     c.execute("SELECT name FROM sqlite_master WHERE type='table';")
-    tableNames = [ e[0] for e in c.fetchall() ]
+    tableNames = [e[0] for e in c.fetchall()]
     for tableName in tableNames:
-        if tableName == 'runs': continue
+        if tableName == 'runs':
+            continue
 
-        mainColumns    = getColumnNames(db, tableName, "main"   )
+        mainColumns = getColumnNames(db, tableName, "main")
         toMergeColumns = getColumnNames(db, tableName, "toMerge")
-        
+
         if mainColumns != toMergeColumns:
-            print ("Warning: Not merging associated table %s, since they have different columns." % (tableName,) ) 
+            print("Warning: Not merging associated table %s, since they have different columns." % (tableName,))
             continue
-        
-        columnNames = [ e[0] for e in mainColumns ]
+
+        columnNames = [e[0] for e in mainColumns]
         if columnNames[0] != "runId":
-            print ("Warning: Not merging table %s, since foreign key column 'runId' not found." % (tableName,) ) 
+            print("Warning: Not merging table %s, since foreign key column 'runId' not found." % (tableName,))
             continue
-        
-        associatedTables.append( tableName )
-        associatedTablesColumnNames.append( columnNames )
+
+        associatedTables.append(tableName)
+        associatedTablesColumnNames.append(columnNames)
 
     mergedRuns = 0
-    for run in c.execute (query):
+    for run in c.execute(query):
         # Build up insert statement for 'runs' table
-        questionMarkList   = ['?'] * (len(run)-1)
-        questionMarkString = ",".join( questionMarkList )
-        insertStatement = "INSERT INTO main.runs (%s) VALUES (%s);" % ( columnStringNoRunId, questionMarkString ) 
+        questionMarkList = ['?'] * (len(run) - 1)
+        questionMarkString = ",".join(questionMarkList)
+        insertStatement = "INSERT INTO main.runs (%s) VALUES (%s);" % (columnStringNoRunId, questionMarkString)
         # Execute the insert
         insertCursor = db.cursor()
-        insertCursor.execute( insertStatement, run[1:] )
+        insertCursor.execute(insertStatement, run[1:])
         insertedRunId = insertCursor.lastrowid
         originalRunId = run[0]
-        
+
         # Insert the corresponding entries from associated tables
-        for associatedTable, columnNames in zip( associatedTables, associatedTablesColumnNames ):
-            assocTableQuery = "SELECT %s FROM toMerge.%s WHERE runId=?" % ( ",".join( columnNames[1:] ), associatedTable )
+        for associatedTable, columnNames in zip(associatedTables, associatedTablesColumnNames):
+            assocTableQuery = "SELECT %s FROM toMerge.%s WHERE runId=?" % (",".join(columnNames[1:]), associatedTable)
             assocTableInsertCursor = db.cursor()
-            assocTableQueryCursor  = db.cursor()
-    
-            for vals in assocTableQueryCursor.execute ( assocTableQuery, ( originalRunId,) ): 
-                questionMarkList   = ['?'] * len(columnNames) 
-                questionMarkString = ",".join( questionMarkList )
-                insertQuery = "INSERT INTO main.%s (%s) VALUES (%s)" % ( associatedTable, ",".join(columnNames), questionMarkString)
-                assocTableInsertCursor.execute ( insertQuery, (insertedRunId,) + vals )
-            
-        mergedRuns = mergedRuns +1
-    
+            assocTableQueryCursor = db.cursor()
+
+            for vals in assocTableQueryCursor.execute(assocTableQuery, (originalRunId,)):
+                questionMarkList = ['?'] * len(columnNames)
+                questionMarkString = ",".join(questionMarkList)
+                insertQuery = "INSERT INTO main.%s (%s) VALUES (%s)" % (associatedTable, ",".join(columnNames),
+                                                                        questionMarkString)
+                assocTableInsertCursor.execute(insertQuery, (insertedRunId,) + vals)
+
+        mergedRuns = mergedRuns + 1
+
     c.execute("SELECT COUNT(*) FROM toMerge.runs")
     totalRows = c.fetchall()[0][0]
-    
-    print( "Merged {}/{} runs from {} to {} ".format( mergedRuns, totalRows, fileToMerge, targetFile ) )
+
+    print("Merged {}/{} runs from {} to {} ".format(mergedRuns, totalRows, fileToMerge, targetFile))
     db.commit()
     db.close()
 
@@ -118,7 +120,8 @@ if __name__ == "__main__":
     parser.add_argument('output_file', type=Path)
     parser.add_argument('input_folder', type=Path)
     parser.add_argument('-r', '--recursive', action='store_true', help='Take all subfolders into account.')
-    parser.add_argument('-f', '--filter', default='.*', dest='regex', help='Only files matching this regex are accepted.')
+    parser.add_argument('-f', '--filter', default='.*', dest='regex',
+                        help='Only files matching this regex are accepted.')
     args = parser.parse_args()
 
     regex = re.compile(args.regex)
diff --git a/python/waLBerla_docs/doxylink.py b/python/waLBerla_docs/doxylink.py
index 5f18eaf00..4d3c8c8d6 100644
--- a/python/waLBerla_docs/doxylink.py
+++ b/python/waLBerla_docs/doxylink.py
@@ -1,64 +1,78 @@
-from docutils.parsers.rst import Directive
-from sphinx.addnodes import download_reference
-from sphinx.writers.html import HTMLTranslator
+# from docutils.parsers.rst import Directive
+# from sphinx.addnodes import download_reference
+# from sphinx.writers.html import HTMLTranslator
 from sphinx.domains import Domain
 from docutils import nodes
 
+
 class DoxyDomain(Domain):
     name = "doxylink"
 
 
-def generate_doxygen_link( type, value ):
-    assert( type=="class" or type=="struct" or type=="namespace" or type == "file")
-    value = value.replace("_", "__" )
-    value = value.replace(":", "_1" )
-    value = value.replace("/", "_2" )
-    value = value.replace(".", "_8" )
-    
-    if type=="file" : type = ""
-    
+def generate_doxygen_link(type, value):
+    assert (type == "class" or type == "struct" or type == "namespace" or type == "file")
+    value = value.replace("_", "__")
+    value = value.replace(":", "_1")
+    value = value.replace("/", "_2")
+    value = value.replace(".", "_8")
+
+    if type == "file":
+        type = ""
+
     result = type + value + ".html"
     return result
 
 
-def doxylink_class    ( *args, **kwargs ): return doxygenlink_role( "class",     *args, **kwargs )
-def doxylink_struct   ( *args, **kwargs ): return doxygenlink_role( "struct",    *args, **kwargs )
-def doxylink_namespace( *args, **kwargs ): return doxygenlink_role( "namespace", *args, **kwargs )
-def doxylink_file     ( *args, **kwargs ): return doxygenlink_role( "file",      *args, **kwargs )
-    
-def doxygenlink_role( type, name, rawtext, text, lineno, inliner, options={}, content=[]):
+def doxylink_class(*args, **kwargs):
+    return doxygenlink_role("class", *args, **kwargs)
+
+
+def doxylink_struct(*args, **kwargs):
+    return doxygenlink_role("struct", *args, **kwargs)
+
+
+def doxylink_namespace(*args, **kwargs):
+    return doxygenlink_role("namespace", *args, **kwargs)
+
+
+def doxylink_file(*args, **kwargs):
+    return doxygenlink_role("file", *args, **kwargs)
+
+
+def doxygenlink_role(type, name, rawtext, text, lineno, inliner, options={}, content=[]):
     try:
         app = inliner.document.settings.env.app
         doxylink = generate_doxygen_link(type, text)
-        node = make_link_node(rawtext, app, text, doxylink , options)
+        node = make_link_node(rawtext, app, text, doxylink, options)
         return [node], []
     except ValueError:
-        msg = inliner.reporter.error( "Error parsing doxylink. It has to be of form [class,struct,namespace]:identifierName" )
+        msg = inliner.reporter.error(
+            "Error parsing doxylink. It has to be of form [class,struct,namespace]:identifierName")
         prb = inliner.problematic(rawtext, rawtext, msg)
         return [prb], [msg]
-    
-    
+
+
 def make_link_node(rawtext, app, text, doxylink, options):
     try:
         base = app.config.doxylink_baseurl
         if not base:
             raise AttributeError
-    except (AttributeError, err):
+    except AttributeError as err:
         raise ValueError('doxygenlink_base_url configuration value is not set (%s)' % str(err))
     #
     slash = '/' if base[-1] != '/' else ''
     ref = base + slash + '/' + doxylink
-    node = nodes.reference(rawtext, text, refuri=ref, reftitle='(C++ Documentation)' )
+    node = nodes.reference(rawtext, text, refuri=ref, reftitle='(C++ Documentation)')
     return node
 
-    
+
 def setup(app):
-    app.add_domain( DoxyDomain)
-    
-    app.add_role_to_domain('doxylink', 'class'    , doxylink_class    )
-    app.add_role_to_domain('doxylink', 'struct'   , doxylink_struct   )
+    app.add_domain(DoxyDomain)
+
+    app.add_role_to_domain('doxylink', 'class', doxylink_class)
+    app.add_role_to_domain('doxylink', 'struct', doxylink_struct)
     app.add_role_to_domain('doxylink', 'namespace', doxylink_namespace)
-    app.add_role_to_domain('doxylink', 'file'     , doxylink_file     )
-    
+    app.add_role_to_domain('doxylink', 'file', doxylink_file)
+
     app.add_config_value('doxylink_baseurl', None, 'env')
-    return    
\ No newline at end of file
+    return
diff --git a/python/waLBerla_docs/ipython/ipython-tutorials/material/matplotlib_setup.py b/python/waLBerla_docs/ipython/ipython-tutorials/material/matplotlib_setup.py
index f2eac1bcd..3120118f7 100644
--- a/python/waLBerla_docs/ipython/ipython-tutorials/material/matplotlib_setup.py
+++ b/python/waLBerla_docs/ipython/ipython-tutorials/material/matplotlib_setup.py
@@ -6,19 +6,22 @@ from tempfile import NamedTemporaryFile
 import base64
 
 from IPython import get_ipython
+
 ipython = get_ipython()
 
+ipython.magic("matplotlib inline")  # Show plots as images embedded in iPython notebook
 
-ipython.magic("matplotlib inline")                      # Show plots as images embedded in iPython notebook
 
 def setMplFigureSize():
     matplotlib.rcParams['figure.figsize'] = (15.0, 12.0)
 
+
 VIDEO_TAG = """<video controls width="80%">
  <source src="data:video/x-m4v;base64,{0}" type="video/mp4">
  Your browser does not support the video tag.
 </video>"""
 
+
 def __anim_to_html(anim, fps):
     if not hasattr(anim, '_encoded_video'):
         with NamedTemporaryFile(suffix='.mp4') as f:
@@ -30,17 +33,18 @@ def __anim_to_html(anim, fps):
     return VIDEO_TAG.format(anim._encoded_video)
 
 
-
-def makeImshowAnimation( grid, gridUpdateFunction, frames=90, **kwargs ):
+def makeImshowAnimation(grid, gridUpdateFunction, frames=90, **kwargs):
     from functools import partial
     fig = plt.figure()
-    im = plt.imshow( grid, interpolation='none' )
+    im = plt.imshow(grid, interpolation='none')
+
     def updatefig(*args, **kwargs):
         image = kwargs['image']
-        image = gridUpdateFunction( image )
-        im.set_array( image )
+        image = gridUpdateFunction(image)
+        im.set_array(image)
         return im,
-    return animation.FuncAnimation(fig, partial(updatefig,image=grid), frames=frames )
+
+    return animation.FuncAnimation(fig, partial(updatefig, image=grid), frames=frames)
 
 
 # -------   Version 1: Embed the animation as HTML5 video --------- ----------------------------------
@@ -54,7 +58,7 @@ def displayAsHtmlVideo(anim, fps=30, show=True, **kwargs):
         else:
             return HTML("")
     except KeyboardInterrupt:
-      pass
+        pass
 
 
 # -------   Version 2: Animation is shown in extra matplotlib window ----------------------------------
@@ -63,15 +67,15 @@ def displayAsHtmlVideo(anim, fps=30, show=True, **kwargs):
 def displayInExtraWindow(animation, *args, **kwargs):
     fig = plt.gcf()
     try:
-      fig.canvas.manager.window.raise_()
+        fig.canvas.manager.window.raise_()
     except Exception:
-      pass
+        pass
     plt.show()
 
 
 # -------   Version 3: Animation is shown in images that are updated directly in website --------------
 
-def displayAsHtmlImage(animation, show=True, iterations=10000,  *args, **kwargs):
+def displayAsHtmlImage(animation, show=True, iterations=10000, *args, **kwargs):
     from IPython import display
 
     try:
@@ -94,29 +98,31 @@ def displayAsHtmlImage(animation, show=True, iterations=10000,  *args, **kwargs)
 animation_display_mode = 'imageupdate'
 display_animation_func = None
 
+
 def disp(*args, **kwargs):
-  if not display_animation_func:
-    raise("Call set_display_mode first")
-  return display_animation_func(*args,**kwargs)
+    if not display_animation_func:
+        raise ("Call set_display_mode first")
+    return display_animation_func(*args, **kwargs)
 
 
 def set_display_mode(mode):
-  from IPython import get_ipython
-  ipython = get_ipython()
-  global animation_display_mode
-  global display_animation_func
-  animation_display_mode = mode
-  if animation_display_mode == 'video':
-    ipython.magic("matplotlib inline")
-    display_animation_func = displayAsHtmlVideo
-  elif animation_display_mode == 'window':
-    ipython.magic("matplotlib qt")
-    display_animation_func = displayInExtraWindow
-  elif animation_display_mode == 'imageupdate':
-    ipython.magic("matplotlib inline")
-    display_animation_func = displayAsHtmlImage
-  else:
-    raise Exception("Unknown mode. Available modes 'imageupdate', 'video' and 'window' ")
+    from IPython import get_ipython
+    ipython = get_ipython()
+    global animation_display_mode
+    global display_animation_func
+    animation_display_mode = mode
+    if animation_display_mode == 'video':
+        ipython.magic("matplotlib inline")
+        display_animation_func = displayAsHtmlVideo
+    elif animation_display_mode == 'window':
+        ipython.magic("matplotlib qt")
+        display_animation_func = displayInExtraWindow
+    elif animation_display_mode == 'imageupdate':
+        ipython.magic("matplotlib inline")
+        display_animation_func = displayAsHtmlImage
+    else:
+        raise Exception("Unknown mode. Available modes 'imageupdate', 'video' and 'window' ")
+
 
 set_display_mode('imageupdate')
-setMplFigureSize()
\ No newline at end of file
+setMplFigureSize()
diff --git a/python/waLBerla_tests/test_blockforest.py b/python/waLBerla_tests/test_blockforest.py
index 44fb90c90..59e0057b2 100644
--- a/python/waLBerla_tests/test_blockforest.py
+++ b/python/waLBerla_tests/test_blockforest.py
@@ -1,39 +1,37 @@
 import unittest
-from waLBerla import *
+from waLBerla import field, createUniformBlockGrid
 
 
+class BlockforestModuleTest(unittest.TestCase):
 
-class BlockforestModuleTest( unittest.TestCase ):
-    
     def testMemoryManagement1(self):
         """Testing correct reference counting of block data"""
-        blocks = createUniformBlockGrid( cells=(2,2,2) )
-        field.addToStorage( blocks, "TestField", float )
+        blocks = createUniformBlockGrid(cells=(2, 2, 2))
+        field.addToStorage(blocks, "TestField", float)
         f = blocks[0]['TestField']
         stridesBefore = f.strides
         del blocks
-        # create another block structure - this has triggered segfault 
+        # create another block structure - this has triggered segfault
         # when previous blockstructure was already freed
-        blocks = createUniformBlockGrid( cells=(2,2,2) )
-        
+        blocks = createUniformBlockGrid(cells=(2, 2, 2))  # noqa: F841
+
         # The first block structure must exist here, since we hold a reference to block data
         # if it would have been deleted already f.strides should lead to segfault or invalid values
         self.assertEqual(stridesBefore, f.strides)
-        
+
     def testMemoryManagement2(self):
-        """Testing correct reference counting of block data 
+        """Testing correct reference counting of block data
            Holding only a numpy array pointing to a waLBerla field should still hold the blockstructure alive"""
-        blocks = createUniformBlockGrid( cells=(2,2,2) )
-        field.addToStorage( blocks, "TestField", float )
-        npf = field.toArray( blocks[0]['TestField'] )
-        npf[:,:,:,:] = 42.0
+        blocks = createUniformBlockGrid(cells=(2, 2, 2))
+        field.addToStorage(blocks, "TestField", float)
+        npf = field.toArray(blocks[0]['TestField'])
+        npf[:, :, :, :] = 42.0
         del blocks
-        # create another block structure - this has triggered segfault 
+        # create another block structure - this has triggered segfault
         # when previous blockstructure was already freed
-        blocks = createUniformBlockGrid( cells=(2,2,2) )
-        self.assertEqual(npf[0,0,0,0], 42.0)
+        blocks = createUniformBlockGrid(cells=(2, 2, 2))  # noqa: F841
+        self.assertEqual(npf[0, 0, 0, 0], 42.0)
 
 
 if __name__ == '__main__':
     unittest.main()
-    
\ No newline at end of file
diff --git a/python/waLBerla_tests/test_core.py b/python/waLBerla_tests/test_core.py
index a37d6a99b..7d59da398 100644
--- a/python/waLBerla_tests/test_core.py
+++ b/python/waLBerla_tests/test_core.py
@@ -2,32 +2,30 @@ import unittest
 import waLBerla as wlb
 
 
-class CoreTest( unittest.TestCase ):
-    
+class CoreTest(unittest.TestCase):
+
     def test_CellInterval(self):
-        ci1 = wlb.CellInterval( 0,0,0, 5,5,5 )
-        ci2 = wlb.CellInterval( [0]*3, [5]*3 )
-        self.assertEqual   ( ci1, ci2,   "Equality comparison of CellIntervals failed." )
-        self.assertFalse   ( ci1 != ci2, "Inequality check for CellIntervals wrong " )
-        
-        self.assertEqual( ci1.min, (0,0,0) , "CellInterval min wrong")
-        self.assertEqual( ci1.max, (5,5,5) , "CellInterval max wrong")
-        
-        
-        self.assertFalse( ci1.empty() )
-        
-        ci1.intersect( ci2 )
-        self.assertTrue( ci1.contains( ci2 ) )
-        
-        ci2.expand( 1 )
-        self.assertFalse( ci1.contains( ci2 ) )
-        
+        ci1 = wlb.CellInterval(0, 0, 0, 5, 5, 5)
+        ci2 = wlb.CellInterval([0] * 3, [5] * 3)
+        self.assertEqual(ci1, ci2, "Equality comparison of CellIntervals failed.")
+        self.assertFalse(ci1 != ci2, "Inequality check for CellIntervals wrong ")
+
+        self.assertEqual(ci1.min, (0, 0, 0), "CellInterval min wrong")
+        self.assertEqual(ci1.max, (5, 5, 5), "CellInterval max wrong")
+
+        self.assertFalse(ci1.empty())
+
+        ci1.intersect(ci2)
+        self.assertTrue(ci1.contains(ci2))
+
+        ci2.expand(1)
+        self.assertFalse(ci1.contains(ci2))
+
     def test_AABB(self):
-        aabb1 = wlb.AABB( 0,0,0, 5,5,5 )
-        aabb2 = wlb.AABB( [0]*3, [5]*3 )
+        aabb1 = wlb.AABB(0, 0, 0, 5, 5, 5)
+        aabb2 = wlb.AABB([0] * 3, [5] * 3)
         self.assertEqual(aabb1, aabb2)
-        
-        
-        
+
+
 if __name__ == '__main__':
-    unittest.main()        
\ No newline at end of file
+    unittest.main()
diff --git a/python/waLBerla_tests/test_cuda_comm.py b/python/waLBerla_tests/test_cuda_comm.py
index c94f2cb6a..3ef61ee30 100644
--- a/python/waLBerla_tests/test_cuda_comm.py
+++ b/python/waLBerla_tests/test_cuda_comm.py
@@ -1,25 +1,25 @@
-from waLBerla import *
+from waLBerla import field, createUniformBlockGrid, createUniformBufferedScheme, cuda
 import numpy as np
-import pycuda.autoinit
-from pycuda.gpuarray import *
-from pycuda import *
+import pycuda.autoinit  # noqa: F401
+import pycuda.gpuarray as gpuArr
+# from pycuda import *
 from pystencils.field import createNumpyArrayWithLayout, getLayoutOfArray
 
-blocks = createUniformBlockGrid( cells=(1,1,1), periodic=(1,1,1) )
+blocks = createUniformBlockGrid(cells=(1, 1, 1), periodic=(1, 1, 1))
 cuda.addGpuFieldToStorage(blocks, "gpuField", float, fSize=1, ghostLayers=1, layout=field.fzyx, usePitchedMem=False)
 
-gpuArr = cuda.toGpuArray(blocks[0]['gpuField'])
+gpuArr = cuda.toGpuArray(blocks[0]['gpuField'])  # noqa: F811
 
 testField = createNumpyArrayWithLayout(gpuArr.shape, getLayoutOfArray(gpuArr))
 testField[...] = 0
-testField[1,1,1,0] = 1
+testField[1, 1, 1, 0] = 1
 gpuArr.set(testField)
 
 scheme = createUniformBufferedScheme(blocks, "D3Q27")
-scheme.addDataToCommunicate( cuda.createPackInfo(blocks, "gpuField") )
+scheme.addDataToCommunicate(cuda.createPackInfo(blocks, "gpuField"))
 
 scheme()
 
 gpuArr = cuda.toGpuArray(blocks[0]['gpuField'])
 
-assert(np.allclose(np.ones([3,3,3,1]), gpuArr.get()))
+assert (np.allclose(np.ones([3, 3, 3, 1]), gpuArr.get()))
diff --git a/python/waLBerla_tests/test_field.py b/python/waLBerla_tests/test_field.py
index a2b6c70ee..9f984b3e0 100644
--- a/python/waLBerla_tests/test_field.py
+++ b/python/waLBerla_tests/test_field.py
@@ -1,59 +1,59 @@
 import unittest
-from waLBerla import *
+from waLBerla import field, createUniformBlockGrid
 
-class FieldModuleTest( unittest.TestCase ):
+
+class FieldModuleTest(unittest.TestCase):
 
     def testFieldAsBlockData(self):
-        blocks = createUniformBlockGrid( cells=( 3,2, 2 ), periodic=(1,0,0) )
-        field.addToStorage( blocks, 'myField', float, fSize=3, ghostLayers=0, initValue=0.0 )
+        blocks = createUniformBlockGrid(cells=(3, 2, 2), periodic=(1, 0, 0))
+        field.addToStorage(blocks, 'myField', float, fSize=3, ghostLayers=0, initValue=0.0)
         myField = blocks[0]['myField']
-        self.assertEqual( myField[0,0,0,0] , 0 )
-        myField[0,0,0,0] = 42.0
-        self.assertEqual( myField[0,0,0,0], 42.0 )
+        self.assertEqual(myField[0, 0, 0, 0], 0)
+        myField[0, 0, 0, 0] = 42.0
+        self.assertEqual(myField[0, 0, 0, 0], 42.0)
 
-        self.assertRaises(IndexError, myField.__getitem__, (3,0,0) )
+        self.assertRaises(IndexError, myField.__getitem__, (3, 0, 0))
 
     def testNumpyConversionWithoutGhostLayers(self):
-        f1 = field.createField( [1,2,3,4],  float, 2 , field.zyxf )
-        f2 = field.createField( [1,2,3,5],  float, 4 , field.zyxf )
-        f1np = field.toArray( f1 )
-        f2np = field.toArray( f2 )
-        self.assertEqual( f1np[0,0,0,0], 0 )
-        self.assertEqual( f1np.shape, (1,2,3,4) )
-        self.assertEqual( f2np.shape, (1,2,3,5) )
-
-        f1np[0,0,0,0] = 1
-        f2np[0,0,0,0] = 2
-        self.assertEqual( f1[0,0,0,0], 1)
-        self.assertEqual( f2[0,0,0,0], 2)
+        f1 = field.createField([1, 2, 3, 4], float, 2, field.zyxf)
+        f2 = field.createField([1, 2, 3, 5], float, 4, field.zyxf)
+        f1np = field.toArray(f1)
+        f2np = field.toArray(f2)
+        self.assertEqual(f1np[0, 0, 0, 0], 0)
+        self.assertEqual(f1np.shape, (1, 2, 3, 4))
+        self.assertEqual(f2np.shape, (1, 2, 3, 5))
+
+        f1np[0, 0, 0, 0] = 1
+        f2np[0, 0, 0, 0] = 2
+        self.assertEqual(f1[0, 0, 0, 0], 1)
+        self.assertEqual(f2[0, 0, 0, 0], 2)
 
     def testNumpyConversionWithGhostLayers(self):
-        f = field.createField( [1,2,3,1],  float, 2 , field.zyxf )
-        fnp = field.toArray( f, withGhostLayers=True )
-
-        self.assertEqual( fnp[0,0,0,0], 0 )
-        self.assertEqual( fnp.shape, (1+4,2+4,3+4,1) )
-        fnp[0,0,0,0] = 42
-        self.assertEqual( f[-2,-2,-2,0], 42 )
-    
+        f = field.createField([1, 2, 3, 1], float, 2, field.zyxf)
+        fnp = field.toArray(f, withGhostLayers=True)
+
+        self.assertEqual(fnp[0, 0, 0, 0], 0)
+        self.assertEqual(fnp.shape, (1 + 4, 2 + 4, 3 + 4, 1))
+        fnp[0, 0, 0, 0] = 42
+        self.assertEqual(f[-2, -2, -2, 0], 42)
+
     def testGhostLayerExtraction(self):
         size = [10, 5, 4]
         gl = 3
-        f = field.createField( size,  float, ghostLayers = gl )
-        
-        view1 = field.toArray( f, withGhostLayers=True )
-        self.assertEqual( view1[:,:,:,0].shape, tuple( [ s+2*gl for s in size ] ) )
-
-        view2 = field.toArray( f, withGhostLayers=False )
-        self.assertEqual( view2[:,:,:,0].shape, tuple( size ) )
-        
-        view3 = field.toArray( f, withGhostLayers=2 )
-        self.assertEqual( view3[:,:,:,0].shape, tuple( [ s+2*2 for s in size ] ) )
-        
-        view4 = field.toArray( f, withGhostLayers=[2,False,True] )
-        self.assertEqual( view4[:,:,:,0].shape, tuple( [ size[0]+2*2, size[1]+2*0, size[2]+2*gl  ] ) )
-
-    
+        f = field.createField(size, float, ghostLayers=gl)
+
+        view1 = field.toArray(f, withGhostLayers=True)
+        self.assertEqual(view1[:, :, :, 0].shape, tuple([s + 2 * gl for s in size]))
+
+        view2 = field.toArray(f, withGhostLayers=False)
+        self.assertEqual(view2[:, :, :, 0].shape, tuple(size))
+
+        view3 = field.toArray(f, withGhostLayers=2)
+        self.assertEqual(view3[:, :, :, 0].shape, tuple([s + 2 * 2 for s in size]))
+
+        view4 = field.toArray(f, withGhostLayers=[2, False, True])
+        self.assertEqual(view4[:, :, :, 0].shape, tuple([size[0] + 2 * 2, size[1] + 2 * 0, size[2] + 2 * gl]))
+
+
 if __name__ == '__main__':
     unittest.main()
-	
\ No newline at end of file
diff --git a/python/waLBerla_tests/test_simpleLBM.py b/python/waLBerla_tests/test_simpleLBM.py
index 9ffe213a4..defe348eb 100644
--- a/python/waLBerla_tests/test_simpleLBM.py
+++ b/python/waLBerla_tests/test_simpleLBM.py
@@ -1,129 +1,136 @@
-from waLBerla import *
-from waLBerla.geometry_setup import *
+from waLBerla import makeSlice, field, mpi, lbm, createUniformBlockGrid, createUniformBufferedScheme
+from waLBerla.geometry_setup import setBoundaryFromBlackAndWhiteImage, setFieldUsingFlagMask
 import itertools
 
 import os
+import numpy as np
+import scipy
 
-imageFile = os.path.join( os.path.dirname(__file__), 'wing.png' )
+imageFile = os.path.join(os.path.dirname(__file__), 'wing.png')
 
-def setBoundariesChannel( blocks, boundaryHandlingID ):
+
+def setBoundariesChannel(blocks, boundaryHandlingID):
     for block in blocks:
-        b = block[ boundaryHandlingID ]
+        b = block[boundaryHandlingID]
         if block.atDomainMinBorder[1]:
-            b.forceBoundary( 'NoSlip',   makeSlice[ :, 0, :, 'g'] )
+            b.forceBoundary('NoSlip', makeSlice[:, 0, :, 'g'])
         if block.atDomainMaxBorder[1]:
-            b.forceBoundary( 'NoSlip',   makeSlice[ :,-1, :, 'g'] )
+            b.forceBoundary('NoSlip', makeSlice[:, -1, :, 'g'])
         b.fillWithDomain()
 
 
 class ForceCalculationMasks:
     @staticmethod
-    def addToBlock( block, blockStorage ):
-        pdfFieldArr  = field.toArray( block['pdfs'] )
-        flagFieldArr = field.toArray( block['flags'] )[:,:,:,0]
-        directions   = block['pdfs'].latticeModel.directions
-        maskArr = np.zeros( pdfFieldArr.shape, dtype=bool )
-        pdfDirectionArr = np.zeros( list(pdfFieldArr.shape) + [3] )
-
-        nearBoundaryFlag =  block['flags'].flag("fluid")
-        noSlipFlag       =  block['flags'].flag("NoSlip")
-
-        innerPartOfDomain = itertools.product( range( 2, maskArr.shape[0]-2),
-                                               range( 2, maskArr.shape[1]-2),
-                                               range( maskArr.shape[2] ) )
-
-        for x,y,z in innerPartOfDomain :
-            if flagFieldArr[x,y,z] & nearBoundaryFlag:
+    def addToBlock(block, blockStorage):
+        pdfFieldArr = field.toArray(block['pdfs'])
+        flagFieldArr = field.toArray(block['flags'])[:, :, :, 0]
+        directions = block['pdfs'].latticeModel.directions
+        maskArr = np.zeros(pdfFieldArr.shape, dtype=bool)
+        pdfDirectionArr = np.zeros(list(pdfFieldArr.shape) + [3])
+
+        nearBoundaryFlag = block['flags'].flag("fluid")
+        noSlipFlag = block['flags'].flag("NoSlip")
+
+        innerPartOfDomain = itertools.product(range(2, maskArr.shape[0] - 2),
+                                              range(2, maskArr.shape[1] - 2),
+                                              range(maskArr.shape[2]))
+
+        for x, y, z in innerPartOfDomain:
+            if flagFieldArr[x, y, z] & nearBoundaryFlag:
                 for dirIdx, dir in enumerate(directions):
-                    nx, ny, nz = x+dir[0], y+dir[1], z+dir[2]
-                    if flagFieldArr[nx,ny,nz] & noSlipFlag:
-                        maskArr[x,y,z,dirIdx ] = True
-                        pdfDirectionArr[x,y,z,:] = dir
-        return ForceCalculationMasks( maskArr, pdfDirectionArr )
+                    nx, ny, nz = x + dir[0], y + dir[1], z + dir[2]
+                    if flagFieldArr[nx, ny, nz] & noSlipFlag:
+                        maskArr[x, y, z, dirIdx] = True
+                        pdfDirectionArr[x, y, z, :] = dir
+        return ForceCalculationMasks(maskArr, pdfDirectionArr)
 
     def __init__(self, maskArr, pdfDirectionArr):
         self._maskArr = maskArr
         self._pdfDirectionArr = pdfDirectionArr
 
     def calculateForceOnBoundary(self, pdfField):
-        force = np.array( [ 0.0 ] * 3 )
-        pdfFieldArr     = field.toArray( pdfField )
+        force = np.array([0.0] * 3)
+        pdfFieldArr = field.toArray(pdfField)
         for i in range(3):
-            fArr = pdfFieldArr[ self._maskArr ] * self._pdfDirectionArr[self._maskArr,i]
-            force[i] += np.sum( fArr )
+            fArr = pdfFieldArr[self._maskArr] * self._pdfDirectionArr[self._maskArr, i]
+            force[i] += np.sum(fArr)
         return force
 
-def calculateForceOnBoundary( blocks ):
-    force = np.array( [ 0.0 ] * 3 )
+
+def calculateForceOnBoundary(blocks):
+    force = np.array([0.0] * 3)
     for block in blocks:
-        force += block['ForceCalculation'].calculateForceOnBoundary( block['pdfs'] )
-    return np.array( mpi.reduceReal( force, mpi.SUM ) )
+        force += block['ForceCalculation'].calculateForceOnBoundary(block['pdfs'])
+    return np.array(mpi.reduceReal(force, mpi.SUM))
 
 
-def makeNacaAirfoilImage( domainSize, thicknessInPercent=30, angle=0 ):
-    def nacaAirfoil(x, thicknessInPercent,chordLength):
+def makeNacaAirfoilImage(domainSize, thicknessInPercent=30, angle=0):
+    def nacaAirfoil(x, thicknessInPercent, chordLength):
         xOverC = x / chordLength
         y_t = 0
-        coeffs = [ 0.2969, -0.1260, - 0.3516, 0.2843, -0.1015 ]
-        for coeff, exponent in zip( coeffs, [ 0.5, 1,2,3,4 ] ):
+        coeffs = [0.2969, -0.1260, - 0.3516, 0.2843, -0.1015]
+        for coeff, exponent in zip(coeffs, [0.5, 1, 2, 3, 4]):
             y_t += coeff * xOverC ** exponent
-        y_t *= 5 * thicknessInPercent/100 * chordLength
+        y_t *= 5 * thicknessInPercent / 100 * chordLength
         return y_t
 
-    domain = np.zeros( domainSize )
-    it = np.nditer( domain, flags=['multi_index'], op_flags= ['readwrite'] )
+    domain = np.zeros(domainSize)
+    it = np.nditer(domain, flags=['multi_index'], op_flags=['readwrite'])
     while not it.finished:
-        x,y = it.multi_index
-        y -= domain.shape[1]/2
-        if abs(y) < nacaAirfoil( x, thicknessInPercent, domain.shape[0] ):
+        x, y = it.multi_index
+        y -= domain.shape[1] / 2
+        if abs(y) < nacaAirfoil(x, thicknessInPercent, domain.shape[0]):
             it[0] = 1
         it.iternext()
-    domain = np.rot90( domain,1 )
-    domain = scipy.ndimage.interpolation.rotate( domain, angle=angle)
+    domain = np.rot90(domain, 1)
+    domain = scipy.ndimage.interpolation.rotate(domain, angle=angle)
 
-    domain[ domain > 0.5 ] = 1
-    domain[ domain <= 0.5 ] = 0
-    domain = domain.astype( np.int32 )
+    domain[domain > 0.5] = 1
+    domain[domain <= 0.5] = 0
+    domain = domain.astype(np.int32)
     return domain
 
 
-img = makeNacaAirfoilImage( [300, 300 ], 30, angle=-30 )
-
-
+img = makeNacaAirfoilImage([300, 300], 30, angle=-30)
 
 omega = 1.9
-blocks = createUniformBlockGrid( cells=(500,200,1), periodic=(1,0,1) )
+blocks = createUniformBlockGrid(cells=(500, 200, 1), periodic=(1, 0, 1))
 
-collisionModel =lbm.collisionModels.SRT( omega )
-forceModel = lbm.forceModels.SimpleConstant( (1e-5,0,0) )
-latticeModel = lbm.makeLatticeModel( "D2Q9", collisionModel, forceModel )
-lbm.addPdfFieldToStorage( blocks, "pdfs", latticeModel, velocityAdaptor="vel", densityAdaptor="rho", initialDensity=1.0 )
-field.addFlagFieldToStorage( blocks, 'flags' )
-lbm.addBoundaryHandlingToStorage( blocks, 'boundary', 'pdfs', 'flags' )
+collisionModel = lbm.collisionModels.SRT(omega)
+forceModel = lbm.forceModels.SimpleConstant((1e-5, 0, 0))
+latticeModel = lbm.makeLatticeModel("D2Q9", collisionModel, forceModel)
+lbm.addPdfFieldToStorage(blocks, "pdfs", latticeModel, velocityAdaptor="vel", densityAdaptor="rho", initialDensity=1.0)
+field.addFlagFieldToStorage(blocks, 'flags')
+lbm.addBoundaryHandlingToStorage(blocks, 'boundary', 'pdfs', 'flags')
 
+# setBoundaryFromArray( blocks, 'boundary', makeSlice[0.4:0.6, 0.4:0.55 ,0.5], img, { 1: 'NoSlip' } )
+setBoundaryFromBlackAndWhiteImage(blocks, "boundary", makeSlice[0.25:0.75, 0.3:0.6, 0.5], imageFile, "NoSlip")
+setBoundariesChannel(blocks, 'boundary')
 
-#setBoundaryFromArray( blocks, 'boundary', makeSlice[0.4:0.6, 0.4:0.55 ,0.5], img, { 1: 'NoSlip' } )
-setBoundaryFromBlackAndWhiteImage( blocks, "boundary", makeSlice[0.25:0.75, 0.3:0.6 ,0.5], imageFile, "NoSlip" )
-setBoundariesChannel( blocks, 'boundary' )
+blocks.addBlockData('ForceCalculation', ForceCalculationMasks.addToBlock)
 
-blocks.addBlockData( 'ForceCalculation', ForceCalculationMasks.addToBlock )
+sweep = lbm.makeCellwiseSweep(blocks, "pdfs", flagFieldID='flags', flagList=['fluid'])
 
-sweep = lbm.makeCellwiseSweep( blocks, "pdfs", flagFieldID='flags', flagList=['fluid'] )
+scheme = createUniformBufferedScheme(blocks, 'D3Q19')
+scheme.addDataToCommunicate(field.createPackInfo(blocks, 'pdfs'))
 
-scheme = createUniformBufferedScheme( blocks, 'D3Q19')
-scheme.addDataToCommunicate( field.createPackInfo( blocks, 'pdfs') )
 
 def timestep():
     scheme()
-    for block in blocks: block['boundary']()
-    for block in blocks: sweep.streamCollide( block )
+    for block in blocks:
+        block['boundary']()
+    for block in blocks:
+        sweep.streamCollide(block)
     return calculateForceOnBoundary(blocks)
 
-def run( timesteps ):
+
+def run(timesteps):
     for t in range(timesteps):
         scheme()
-        for block in blocks: block['boundary']()
-        for block in blocks: sweep.streamCollide( block )
+        for block in blocks:
+            block['boundary']()
+        for block in blocks:
+            sweep.streamCollide(block)
 
 
 def makeAnimation(blocks, interval=30, frames=180):
@@ -132,50 +139,48 @@ def makeAnimation(blocks, interval=30, frames=180):
 
     plt.style.use('ggplot')
     NR_OF_TIMESTEPS_SHOWN = 600
-    lifts  = []
+    lifts = []
 
     fig = plt.gcf()
-    f = field.gather( blocks, 'rho', makeSlice[:,:,0.5] )
+    f = field.gather(blocks, 'rho', makeSlice[:, :, 0.5])
     im = None
 
     ymax = [0.05]
     if f:
         npField = field.toArray(f).squeeze()
-        npField = np.rot90( npField,1 )
+        npField = np.rot90(npField, 1)
 
-        plt.subplot(2,1,1)
+        plt.subplot(2, 1, 1)
         plt.title("Lattice Density")
-        im = plt.imshow( npField )
+        im = plt.imshow(npField)
         plt.colorbar()
 
-        plt.subplot(2,1,2)
+        plt.subplot(2, 1, 2)
         plt.title("Lift")
-        plt.ylim( 0, ymax[0] )
-        plt.xlim( 0,NR_OF_TIMESTEPS_SHOWN )
+        plt.ylim(0, ymax[0])
+        plt.xlim(0, NR_OF_TIMESTEPS_SHOWN)
         liftPlot, = plt.plot(lifts)
 
-
     def updatefig(*args):
         force = timestep()
-        f = field.gather( blocks, 'rho', makeSlice[:,:,0.5] )
+        f = field.gather(blocks, 'rho', makeSlice[:, :, 0.5])
         if f:
             npField = field.toArray(f).squeeze()
-            npField = np.rot90( npField, 1 )
-            im.set_array( npField)
+            npField = np.rot90(npField, 1)
+            im.set_array(npField)
             im.autoscale()
-            if lifts and max( lifts ) *1.2 > ymax[0]:
-                ymax[0] = max(lifts) *1.2
-                liftPlot.axes.set_ylim( 0, ymax[0] )
+            if lifts and max(lifts) * 1.2 > ymax[0]:
+                ymax[0] = max(lifts) * 1.2
+                liftPlot.axes.set_ylim(0, ymax[0])
 
-            lifts.append( force[1] )
+            lifts.append(force[1])
             nrOfSamples = len(lifts)
-            xMin = max(0,nrOfSamples-NR_OF_TIMESTEPS_SHOWN)
-            liftPlot.axes.set_xlim( xMin, xMin+NR_OF_TIMESTEPS_SHOWN )
-            liftPlot.set_data( np.arange( nrOfSamples ), lifts )
+            xMin = max(0, nrOfSamples - NR_OF_TIMESTEPS_SHOWN)
+            liftPlot.axes.set_xlim(xMin, xMin + NR_OF_TIMESTEPS_SHOWN)
+            liftPlot.set_data(np.arange(nrOfSamples), lifts)
             return im, liftPlot
 
-    return animation.FuncAnimation( fig, updatefig, interval=interval, frames=frames, blit=False, repeat=False )
-
+    return animation.FuncAnimation(fig, updatefig, interval=interval, frames=frames, blit=False, repeat=False)
 
 
 showPlots = False
@@ -183,11 +188,11 @@ showPlots = False
 if showPlots:
     import waLBerla.plot as wplt
 
-    setFieldUsingFlagMask(blocks, 'pdfs', np.NaN, 'flags', ['NoSlip'] )
+    setFieldUsingFlagMask(blocks, 'pdfs', np.NaN, 'flags', ['NoSlip'])
     run(1)
-    setFieldUsingFlagMask(blocks, 'pdfs', np.NaN, 'flags', ['NoSlip'] )
+    setFieldUsingFlagMask(blocks, 'pdfs', np.NaN, 'flags', ['NoSlip'])
 
-    ani = makeAnimation( blocks, frames=6000, )
+    ani = makeAnimation(blocks, frames=6000, )
     wplt.show()
 else:
-    run(10)
\ No newline at end of file
+    run(10)
diff --git a/python/waLBerla_tests/tools/test_lbm_unitconversion.py b/python/waLBerla_tests/tools/test_lbm_unitconversion.py
index e8fd0a5e2..72d87aa35 100644
--- a/python/waLBerla_tests/tools/test_lbm_unitconversion.py
+++ b/python/waLBerla_tests/tools/test_lbm_unitconversion.py
@@ -1,78 +1,77 @@
 import unittest
 
 
-class UnitConversionTest( unittest.TestCase ):
-    
+class UnitConversionTest(unittest.TestCase):
+
     def testExtractLatticeFactors(self):
         try:
-            import pint
+            import pint  # noqa: F401
         except ImportError:
             print("Skipping unit conversion test since pint module not available")
             return
 
         from waLBerla.tools.lbm_unitconversion import extractLatticeFactors, computeLatticeFactors
-        example = { 'rho': '4000kg/m**3',
-                    'l_m': '5um',
-                    'tau': '0.6',
-                    'nu' : '1e-6m/s**2',
-                    'l_K': '1mK' }
+        example = {'rho': '4000kg/m**3',
+                   'l_m': '5um',
+                   'tau': '0.6',
+                   'nu': '1e-6m/s**2',
+                   'l_K': '1mK'}
 
-        #from pprint import pprint
+        # from pprint import pprint
         lf1 = extractLatticeFactors(example)
         lf2 = computeLatticeFactors(rho=4000, l_m=5e-6, tau=0.6, nu=1e-6, l_K=1e-3)
-        self.assertEqual( lf1, lf2 )
+        self.assertEqual(lf1, lf2)
 
-        lf = computeLatticeFactors(rho=4000, l_m=5e-6, tau=0.6, l_s=1e-8, l_K=1e-3, l_A=1e-2)
-        lf = computeLatticeFactors(rho=4000, l_m=5e-6, l_s=1e-8, l_K=1e-3)
-        lf = computeLatticeFactors(rho=4000, l_m=5e-6, time=1e-6, l_time=50)
+        print(computeLatticeFactors(rho=4000, l_m=5e-6, tau=0.6, l_s=1e-8, l_K=1e-3, l_A=1e-2))
+        print(computeLatticeFactors(rho=4000, l_m=5e-6, l_s=1e-8, l_K=1e-3))
+        print(computeLatticeFactors(rho=4000, l_m=5e-6, time=1e-6, l_time=50))
 
     def testUnitConverter(self):
         try:
-            import pint
+            import pint  # noqa: F401
         except ImportError:
             print("Skipping unit conversion test since pint module not available")
             return
 
         from waLBerla.tools.lbm_unitconversion import PintUnitConverter
 
-        conv = PintUnitConverter( l_m = 5e-6, l_s = 1e-8, l_kg = 5e-13, l_K = 1e-3, l_A = 1 )
+        conv = PintUnitConverter(l_m=5e-6, l_s=1e-8, l_kg=5e-13, l_K=1e-3, l_A=1)
 
         ureg = conv.ureg
-    
-        rho = 4000 * ureg.kg / ureg.m**3
-        vol = 1.0 * ureg.mm**3
+
+        rho = 4000 * ureg.kg / ureg.m ** 3
+        vol = 1.0 * ureg.mm ** 3
         mass = rho * vol
-        T   = ureg.Quantity('1934 K')
-        I   = ureg.Quantity('1.9 mA')
-        c_p = ureg.Quantity( '700.0 J/(kg*K)' )
-        
-        
-        self.assertAlmostEqual(rho.to('l_kg/l_m^3').magnitude, 1.0,      places=8 )
-        self.assertAlmostEqual(mass.to('l_kg')     .magnitude, 8000000 , places=8)
-        self.assertAlmostEqual(vol.to( 'l_m**3' )  .magnitude, 8000000 , places=8)
-        self.assertAlmostEqual(vol.to( 'l_m**3' )  .magnitude, 8000000 , places=8)
-        self.assertAlmostEqual(I.to( 'l_A' )       .magnitude, 0.0019 ,  places=8)
-
-        self.assertAlmostEqual(T.to( 'l_K' )       .magnitude, 1934000,  places=8)
-        self.assertAlmostEqual(mass.to_base_units().magnitude, 4e-6,  places=8)
-        
+        T = ureg.Quantity('1934 K')
+        I = ureg.Quantity('1.9 mA')
+        print(ureg.Quantity('700.0 J/(kg*K)'))
+
+        self.assertAlmostEqual(rho.to('l_kg/l_m^3').magnitude, 1.0, places=8)
+        self.assertAlmostEqual(mass.to('l_kg').magnitude, 8000000, places=8)
+        self.assertAlmostEqual(vol.to('l_m**3').magnitude, 8000000, places=8)
+        self.assertAlmostEqual(vol.to('l_m**3').magnitude, 8000000, places=8)
+        self.assertAlmostEqual(I.to('l_A').magnitude, 0.0019, places=8)
+
+        self.assertAlmostEqual(T.to('l_K').magnitude, 1934000, places=8)
+        self.assertAlmostEqual(mass.to_base_units().magnitude, 4e-6, places=8)
+
         u = conv.ureg
         c1 = {
-            'Parameters' : {
+            'Parameters': {
                 'unitlessParam': 2,
                 'param2': 4e-6 * u.kg,
             }
         }
         c2 = {
-            'Parameters' : {
+            'Parameters': {
                 'unitlessParam': 2,
-                'param2':  '4e-6 kg',
+                'param2': '4e-6 kg',
             }
-        }    
-        c1 = conv.conv_config( c1 )
-        c2 = conv.conv_config_unit_strings( c2 )
+        }
+        c1 = conv.conv_config(c1)
+        c2 = conv.conv_config_unit_strings(c2)
         self.assertEqual(c1, c2)
 
-            
+
 if __name__ == '__main__':
-    unittest.main()        
\ No newline at end of file
+    unittest.main()
diff --git a/python/waLBerla_tests/tools/test_sqlitedb.py b/python/waLBerla_tests/tools/test_sqlitedb.py
index 854b9de4d..939ff65c5 100644
--- a/python/waLBerla_tests/tools/test_sqlitedb.py
+++ b/python/waLBerla_tests/tools/test_sqlitedb.py
@@ -3,27 +3,28 @@ import os
 import tempfile
 import shutil
 
-from waLBerla.tools.sqlitedb import *
+from waLBerla.tools.sqlitedb import checkAndUpdateSchema, storeSingle, storeMultiple
 
 
-class SqliteDBTest( unittest.TestCase ):
+class SqliteDBTest(unittest.TestCase):
 
     def testSimpleInsertion(self):
         try:
             d = tempfile.mkdtemp()
 
-            dbFile = os.path.join( d, "database.sqlite" )
-            myData = { 'integerCol': 5, 'stringCol': 'someTestString', 'realCol': 3.141 }
+            dbFile = os.path.join(d, "database.sqlite")
+            myData = {'integerCol': 5, 'stringCol': 'someTestString', 'realCol': 3.141}
 
-            checkAndUpdateSchema( myData, "sometable", dbFile=dbFile )
-            runId = storeSingle( myData, 'sometable', dbFile=dbFile )
+            checkAndUpdateSchema(myData, "sometable", dbFile=dbFile)
+            runId = storeSingle(myData, 'sometable', dbFile=dbFile)
 
-            valueData = {'x': [ 1,2,3,4,5], 'y': [ 3,2,5,1,0] }
-            checkAndUpdateSchema( valueData, "data", dbFile=dbFile, referenceRuns=True )
-            storeMultiple ( valueData, "data", dbFile=dbFile, runId = runId )
+            valueData = {'x': [1, 2, 3, 4, 5], 'y': [3, 2, 5, 1, 0]}
+            checkAndUpdateSchema(valueData, "data", dbFile=dbFile, referenceRuns=True)
+            storeMultiple(valueData, "data", dbFile=dbFile, runId=runId)
 
         finally:
             shutil.rmtree(d)
 
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/src/stencil/generate.py b/src/stencil/generate.py
index 3599a98f7..47291f67d 100755
--- a/src/stencil/generate.py
+++ b/src/stencil/generate.py
@@ -1,104 +1,83 @@
 #!/usr/bin/python
-
-
+import re
+import numpy as np
 
 # -------------------------------------------------------------------------------------------------
 # --------------------    Definition of all Stencils    -------------------------------------------
 # -------------------------------------------------------------------------------------------------
 
 
-
 # Template input file
 templateFile = "Stencil.in.h"
 
-
 # Directions have to be in the same order as they are defined in the Directions.h
 # they also have to have same names
 
-directions = ['C','N','S','W','E','T','B',
-              'NW','NE','SW','SE','TN','TS',
-              'TW','TE','BN','BS','BW','BE',
-              'TNE','TNW','TSE','TSW','BNE','BNW','BSE','BSW'];
+directions = ['C', 'N', 'S', 'W', 'E', 'T', 'B',
+              'NW', 'NE', 'SW', 'SE', 'TN', 'TS',
+              'TW', 'TE', 'BN', 'BS', 'BW', 'BE',
+              'TNE', 'TNW', 'TSE', 'TSW', 'BNE', 'BNW', 'BSE', 'BSW']
 
-directionCoords  = [ [ 0, 0, 0 ], # C 
-                     [ 0, 1, 0 ], # N
-                     [ 0,-1, 0 ], # S
-                     [-1, 0, 0 ], # W
-                     [ 1, 0, 0 ], # E
-                     [ 0, 0, 1 ], # T
-                     [ 0, 0,-1 ], # B
-                    ]
+directionCoords = [[0, 0, 0],  # C
+                   [0, 1, 0],  # N
+                   [0, -1, 0],  # S
+                   [-1, 0, 0],  # W
+                   [1, 0, 0],  # E
+                   [0, 0, 1],  # T
+                   [0, 0, -1],  # B
+                   ]
 
 # List of all stencils
 # Edit this to add new stencils (make sure name is unique)
 stencils = [
-            { 'name' : 'D2Q4', 'dim' : 2, 'dirs' : directions[1: 5] },
-            { 'name' : 'D2Q5', 'dim' : 2, 'dirs' : directions[ : 5] },
-            { 'name' : 'D2Q9', 'dim' : 2, 'dirs' : ['C','N','S','W','E','NW','NE','SW','SE'] },
-            { 'name' : 'D2CornerStencil', 'dim' : 2, 'dirs' : ['NW','NE','SW','SE'] },
-            { 'name' : 'D3Q6', 'dim' : 3, 'dirs' : directions[1: 7] },            
-            { 'name' : 'D3Q7', 'dim' : 3, 'dirs' : directions[ : 7] },
-            { 'name' : 'D3Q19','dim' : 3, 'dirs' : directions[ :19] },
-            { 'name' : 'D3Q15','dim' : 3, 'dirs' : ['C','N','S','W','E','T','B','TNE','TNW','TSE','TSW','BNE','BNW','BSE','BSW'] },
-            { 'name' : 'D3Q27','dim' : 3, 'dirs' : directions[ :27] },
-            #also possible: pick directions as you need them in arbitrary order:
-            { 'name' : 'EdgeStencil','dim' : 3, 'dirs' : ['NW','NE','SW','SE','TN','TS','TW','TE','BN','BS','BW','BE'] },
-            { 'name' : 'D3CornerStencil', 'dim': 3, 'dirs' : directions[19:27] },
-            { 'name' : 'D3EdgeCornerStencil', 'dim': 3, 'dirs' : directions[7:27] }
-           ]
-
-
-
-
-# ---------------------------------------------------------------------------------------------------------------------   
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+    {'name': 'D2Q4', 'dim': 2, 'dirs': directions[1: 5]},
+    {'name': 'D2Q5', 'dim': 2, 'dirs': directions[: 5]},
+    {'name': 'D2Q9', 'dim': 2, 'dirs': ['C', 'N', 'S', 'W', 'E', 'NW', 'NE', 'SW', 'SE']},
+    {'name': 'D2CornerStencil', 'dim': 2, 'dirs': ['NW', 'NE', 'SW', 'SE']},
+    {'name': 'D3Q6', 'dim': 3, 'dirs': directions[1: 7]},
+    {'name': 'D3Q7', 'dim': 3, 'dirs': directions[: 7]},
+    {'name': 'D3Q19', 'dim': 3, 'dirs': directions[:19]},
+    {'name': 'D3Q15', 'dim': 3,
+     'dirs': ['C', 'N', 'S', 'W', 'E', 'T', 'B', 'TNE', 'TNW', 'TSE', 'TSW', 'BNE', 'BNW', 'BSE', 'BSW']},
+    {'name': 'D3Q27', 'dim': 3, 'dirs': directions[:27]},
+    # also possible: pick directions as you need them in arbitrary order:
+    {'name': 'EdgeStencil', 'dim': 3, 'dirs': ['NW', 'NE', 'SW', 'SE', 'TN', 'TS', 'TW', 'TE', 'BN', 'BS', 'BW', 'BE']},
+    {'name': 'D3CornerStencil', 'dim': 3, 'dirs': directions[19:27]},
+    {'name': 'D3EdgeCornerStencil', 'dim': 3, 'dirs': directions[7:27]}
+]
 
+# ---------------------------------------------------------------------------------------------------------------------
 
 
 # ---------------------------------------------------------------------------------------------------------------------
 # --------------------    Code for Stencil Generation -----------------------------------------------------------------
 # ---------------------------------------------------------------------------------------------------------------------
 
-import re
-import numpy as np
+def directionToCoordinate(dir):
+    newCoord = np.zeros(3, dtype='i')
+    for character in dir:
+        index = directions.index(character)
+        newCoord = newCoord + np.array(directionCoords[index], dtype='i')
+    return list(newCoord)
 
 
-def directionToCoordinate( dir ):
-    newCoord = np.zeros( 3, dtype='i')
-    for character in dir:
-        index = directions.index( character )
-        newCoord = newCoord + np.array( directionCoords[index], dtype='i' )
-    return  list( newCoord )
-
-def coordinateToDirection( coord ):
-    xPart = [coord[0],0,0]   
-    yPart = [0,coord[1],0]    
-    zPart = [0,0,coord[2]]
-    
+def coordinateToDirection(coord):
+    xPart = [coord[0], 0, 0]
+    yPart = [0, coord[1], 0]
+    zPart = [0, 0, coord[2]]
+
     directionStr = ""
-    if zPart in directionCoords[1:]: directionStr += directions[ directionCoords.index(zPart) ]
-    if yPart in directionCoords[1:]:   directionStr += directions[ directionCoords.index(yPart) ]
-    if xPart in directionCoords[1:]:   directionStr += directions[ directionCoords.index(xPart) ]
-    
-    if directionStr =="":
+    if zPart in directionCoords[1:]:
+        directionStr += directions[directionCoords.index(zPart)]
+    if yPart in directionCoords[1:]:
+        directionStr += directions[directionCoords.index(yPart)]
+    if xPart in directionCoords[1:]:
+        directionStr += directions[directionCoords.index(xPart)]
+
+    if directionStr == "":
         directionStr = "C"
-     
-    return directionStr
 
+    return directionStr
 
 
 header = """//====================================================================================================================
@@ -112,21 +91,22 @@ footer = """
 #endif // DOXY_SKIP_INTERNAL
 """
 
-def isSubDirection(general,specific):
+
+def isSubDirection(general, specific):
     """ Example: general="N", specific= "NW" -> true
                  general="W", specific= "SE" -> false
     """
     for char in general:
-        if not char in specific:
+        if char not in specific:
             return False
     return True
 
 
-def indexFromDir (dirs):
+def indexFromDir(dirs):
     """ Return list of indices that elements in dir array have in global
         directions array"""
-    res = []   
-    index = 0;
+    res = []
+    index = 0
     for d in directions:
         if (d in dirs):
             res.append(index)
@@ -134,123 +114,120 @@ def indexFromDir (dirs):
         else:
             res.append("INVALID_DIR")
 
-    return res;
+    return res
 
 
-def generate_d_per_d (dirs):
+def generate_d_per_d(dirs):
     """ Generate d_per_d array from directions"""
     d_per_d = []
     d_per_d_length = []
-    
+
     for globalDir1 in directions:
         subdirs = []
         for localDir in dirs:
             if isSubDirection(globalDir1, localDir):
                 subdirs.append(localDir)
-        
-        d_per_d.append( "{" + ",".join(subdirs) + "}" )
-        d_per_d_length.append( len(subdirs))
-        
-    return (d_per_d,d_per_d_length)
-    
 
+        d_per_d.append("{" + ",".join(subdirs) + "}")
+        d_per_d_length.append(len(subdirs))
 
-def generate_dir_pos( dirs ):
-    """ Generates an array containing only half of the directions. This can be used to iterate over 
+    return (d_per_d, d_per_d_length)
+
+
+def generate_dir_pos(dirs):
+    """ Generates an array containing only half of the directions. This can be used to iterate over
         half the stencil, fetching the remaining direction using inv_dir.
         Only works for symmetrical stencils"""
     result = []
     for d in dirs:
-        c = directionToCoordinate( d )
-        if c[0]==1  or ( c[0]==0 and c[1]==1) or ( c[0]==0 and c[1]==0 and c[2]==1 ):
+        c = directionToCoordinate(d)
+        if c[0] == 1 or (c[0] == 0 and c[1] == 1) or (c[0] == 0 and c[1] == 0 and c[2] == 1):
             result.append(d)
-            
-    #assert( len(dirs) / 2 == len(result) ) # Fails for asymmetric stencils
+
+    # assert( len(dirs) / 2 == len(result) ) # Fails for asymmetric stencils
     return result
-    
-    
-def getNeighborsOfDirection ( startDir, walkingDirs, possibleResults ):
+
+
+def getNeighborsOfDirection(startDir, walkingDirs, possibleResults):
     """ This function views a direction as a cell (i.e. the cell where the direction points to)
         The whole domain consists of the 3x3x3 grid (other cells do not exist).
         The function returns all neighboring cells ( in the 3x3x3 grid ) of a given cell
-    
+
         Input: a start direction (i.e. cell where to start)
                walkingDirs: list of directions, in which the neighboring directions are searched, starting from startDir
                             in this argument, directions are interpreted as directions not as cells!
                possibleResults: if direction not in this list, then it is not included in the results
                """
     result = []
-    curDirCoord =  directionToCoordinate( startDir )
-    
+    curDirCoord = directionToCoordinate(startDir)
+
     for walkingDir in walkingDirs:
-        neighborCoord = directionToCoordinate( walkingDir )
-        sumDirCoord = [ curDirCoord[0] + neighborCoord[0],
-                        curDirCoord[1] + neighborCoord[1],
-                        curDirCoord[2] + neighborCoord[2] ]
-        
-        if sumDirCoord[0] > 1 or sumDirCoord[0] < -1: continue
-        if sumDirCoord[1] > 1 or sumDirCoord[1] < -1: continue
-        if sumDirCoord[2] > 1 or sumDirCoord[2] < -1: continue
+        neighborCoord = directionToCoordinate(walkingDir)
+        sumDirCoord = [curDirCoord[0] + neighborCoord[0],
+                       curDirCoord[1] + neighborCoord[1],
+                       curDirCoord[2] + neighborCoord[2]]
+
+        if sumDirCoord[0] > 1 or sumDirCoord[0] < -1:
+            continue
+        if sumDirCoord[1] > 1 or sumDirCoord[1] < -1:
+            continue
+        if sumDirCoord[2] > 1 or sumDirCoord[2] < -1:
+            continue
 
-        sumDir = coordinateToDirection( sumDirCoord )
+        sumDir = coordinateToDirection(sumDirCoord)
 
         if sumDir not in possibleResults:
             continue
-        
+
         if sumDir == startDir:
             continue
-        
-        result.append( sumDir )
-    
-    
-    result = sorted(result, key = lambda d: directions.index(d) )
-    
+
+        result.append(sumDir)
+
+    result = sorted(result, key=lambda d: directions.index(d))
+
     return result
 
 
-def generateNeighborsOfDirection( stencilDirs ):
+def generateNeighborsOfDirection(stencilDirs):
     neighborList = []
     lengthList = []
-    for d in directions :
-        neighbors = getNeighborsOfDirection(d, stencilDirs, directions )
-        neighborList.append( "{" + ",".join( neighbors ) + "}" )
-        lengthList.append  ( str( len(neighbors) ) )
-    
-    return ( neighborList, lengthList )
+    for d in directions:
+        neighbors = getNeighborsOfDirection(d, stencilDirs, directions)
+        neighborList.append("{" + ",".join(neighbors) + "}")
+        lengthList.append(str(len(neighbors)))
+
+    return (neighborList, lengthList)
 
-    
 
-    
-# ---------------------------------------------------------------------------------------------------------------------   
-    
-    
+# ---------------------------------------------------------------------------------------------------------------------
+
 
-tmplFile = open(templateFile,"r") 
+tmplFile = open(templateFile, "r")
 
-# Pythons string.format() function takes a string where the parts that should be substituted# are marked with "{varname}" 
+# Pythons string.format() function takes a string where
+# the parts that should be substituted# are marked with "{varname}"
 # Every curly bracket that should be left unaffected by the substitution has  be expressed as double curly bracket
 # Since we want to do the substitution in a C++ Header file, this syntax is not very comfortable.
 
 # So we define a new substitution rule: instead of writing {varname} we write $varname
-# The first step is not, to prepare the read template file 
+# The first step is not, to prepare the read template file
 
-# Because of Python's substitution mechanism, all curly brackets have to be replaced by double 
+# Because of Python's substitution mechanism, all curly brackets have to be replaced by double
 # curly brackets
-tmpl = tmplFile.read().replace("{", "{{");
-tmpl = tmpl.replace("}", "}}");
-
-# Then we replace all "$var" constructs by "{var}"    
-tmpl = re.sub("\$[A-Za-z_]*", (lambda x:  "{" + x.group(0)[1:] + "}") ,tmpl)
+tmpl = tmplFile.read().replace("{", "{{")
+tmpl = tmpl.replace("}", "}}")
 
+# Then we replace all "$var" constructs by "{var}"
+tmpl = re.sub("\$[A-Za-z_]*", (lambda x: "{" + x.group(0)[1:] + "}"), tmpl)  # noqa: W605
 
 """Reads stencil dict and generates header files"""
 for stencil in stencils:
-    
     # Make sure that all directions are sorted as in global direction array
     # and implicitly check that all entries of dirs are in directions array
-    dirs = sorted(stencil['dirs'], key = lambda d: directions.index(d) )
+    dirs = sorted(stencil['dirs'], key=lambda d: directions.index(d))
     name = stencil['name']
-    
+
     # Build up a dictionary of replacements
     vals = dict()
     vals['name'] = name
@@ -259,27 +236,24 @@ for stencil in stencils:
     vals['indexFromDir'] = ",".join(str(i) for i in indexFromDir(dirs))
     vals['D'] = stencil['dim']
     vals['Q'] = len(dirs)
-    
-    (d_per_d,d_per_d_length) = generate_d_per_d(dirs)
-    vals['d_per_d'] = ",\n\t\t\t\t\t\t\t\t".join(d_per_d )
+
+    (d_per_d, d_per_d_length) = generate_d_per_d(dirs)
+    vals['d_per_d'] = ",\n\t\t\t\t\t\t\t\t".join(d_per_d)
     vals['d_per_d_length'] = ",".join(str(i) for i in d_per_d_length)
     vals['containsCenter'] = "true" if ('C' in dirs) else "false"
     vals['noCenterFirstIndex'] = "1" if ('C' in dirs) else '0'
 
-    ( dir_neighbors, dir_neighbors_length ) = generateNeighborsOfDirection ( dirs )
-    vals['dir_neighbors']        = ",\n\t\t\t\t\t\t\t\t".join(dir_neighbors )
+    (dir_neighbors, dir_neighbors_length) = generateNeighborsOfDirection(dirs)
+    vals['dir_neighbors'] = ",\n\t\t\t\t\t\t\t\t".join(dir_neighbors)
     vals['dir_neighbors_length'] = ",".join(str(i) for i in dir_neighbors_length)
 
-    vals['dir_pos'] = ",".join( generate_dir_pos(dirs) )
-    
+    vals['dir_pos'] = ",".join(generate_dir_pos(dirs))
+
     content = tmpl.format(**vals)
-    
+
     # Open file for writing
     out = open(name + ".h", 'w')
     out.write(header)
     out.write(content)
     out.write(footer)
-    out.close();
-    
-
-
+    out.close()
diff --git a/tests/blockforest/communication/timing/plot.py b/tests/blockforest/communication/timing/plot.py
index 1923e2aa3..f2f5b2f38 100644
--- a/tests/blockforest/communication/timing/plot.py
+++ b/tests/blockforest/communication/timing/plot.py
@@ -1,8 +1,11 @@
 import numpy as np
 import matplotlib
 import copy
-matplotlib.use('Qt4Agg')
 import matplotlib.pyplot as plt
+
+matplotlib.use('Qt4Agg')
+
+
 # ------------------- Timing -------------------------------------
 
 class Timing:
@@ -11,215 +14,209 @@ class Timing:
         self.max = 0
         self.avg = 0
 
-    def get (self, str):
-        if(str=="min"): return self.min
-        if(str=="max"): return self.max
-        if(str=="avg"): return self.avg
-        
+    def get(self, value):
+        if value == "min":
+            return self.min
+        if value == "max":
+            return self.max
+        if value == "avg":
+            return self.avg
 
     def __add__(self, other):
         ret = copy.deepcopy(self)
         ret.min += other.min
         ret.max += other.max
-        ret.avg += other.avg     
+        ret.avg += other.avg
         return ret
-        
+
     def __div__(self, other):
-        ret = copy.deepcopy(self)       
+        ret = copy.deepcopy(self)
         ret.min /= other
         ret.max /= other
         ret.avg /= other
         return ret
- 
-    def readFromArray(self,arr):
+
+    def readFromArray(self, arr):
         self.min = np.double(arr[0])
         self.avg = np.double(arr[1])
         self.max = np.double(arr[2])
-        
+
+
 # ---------------Module Timing------------------------------------
-        
-        
+
+
 class ModuleTiming:
     def __init__(self):
         self.total = Timing()
-        self.pack  = Timing()
-        self.mpi   = Timing()
-        self.unpack= Timing()
+        self.pack = Timing()
+        self.mpi = Timing()
+        self.unpack = Timing()
 
     def __add__(self, other):
         ret = copy.deepcopy(self)
         ret.total += other.total
-        ret.pack  += other.pack
-        ret.mpi   += other.mpi     
-        ret.unpack+= other.unpack     
+        ret.pack += other.pack
+        ret.mpi += other.mpi
+        ret.unpack += other.unpack
         return ret
- 
+
     def __div__(self, other):
-        ret = copy.deepcopy(self)       
+        ret = copy.deepcopy(self)
         ret.total /= other
-        ret.pack  /= other
-        ret.mpi   /= other     
-        ret.unpack/= other   
+        ret.pack /= other
+        ret.mpi /= other
+        ret.unpack /= other
         return ret
-    
+
     def readFromArray(self, arr):
         self.pack = Timing()
         self.pack.readFromArray(arr[0:3])
-        
+
         self.mpi = Timing()
         self.mpi.readFromArray(arr[3:6])
-        
+
         self.unpack = Timing()
         self.unpack.readFromArray(arr[6:9])
-        
-        
-        
+
+
 # ---------------Timing DataSet ----------------------------------
 
 
 class TimingDataSet:
-    """ Represents one line of a timing file""" 
+    """ Represents one line of a timing file"""
+
     def __init__(self):
-        self.functionality  =  ""
-        self.timesteps      = 0
-        self.cores          = 0
-        self.blocksize      = np.array([0,0,0])
-        self.blocks         = np.array([0,0,0])
-        
-        self.oldModule  = ModuleTiming()
-        self.newModule  = ModuleTiming()
-
-
-    def __add__(self, other):      
-        assert(self == other) 
-        ret = copy.deepcopy(self) 
+        self.functionality = ""
+        self.timesteps = 0
+        self.cores = 0
+        self.blocksize = np.array([0, 0, 0])
+        self.blocks = np.array([0, 0, 0])
+
+        self.oldModule = ModuleTiming()
+        self.newModule = ModuleTiming()
+
+    def __add__(self, other):
+        assert (self == other)
+        ret = copy.deepcopy(self)
         ret.oldModule += other.oldModule
         ret.newModule += other.newModule
         return ret
-        
+
     def __div__(self, other):
-        ret = copy.deepcopy(self) 
+        ret = copy.deepcopy(self)
         ret.oldModule /= other
         ret.newModule /= other
         return ret
 
-    def __ne__(self,other):
+    def __ne__(self, other):
         return not self.__eq__(other)
 
     def __eq__(self, other):
         if not isinstance(other, TimingDataSet):
             return False
-        
-        
-        return (self.functionality == other.functionality) and\
-               (self.timesteps     == other.timesteps    ) and\
-               (self.cores         == other.cores        ) and\
-               (self.blocksize     == other.blocksize    ).all and\
-               (self.blocks        == other.blocks       ).all;
 
+        return (self.functionality == other.functionality) and \
+               (self.timesteps == other.timesteps) and \
+               (self.cores == other.cores) and \
+               (self.blocksize == other.blocksize).all and \
+               (self.blocks == other.blocks).all
 
     def normalizeUnitTimeStep(self):
         self.oldModule /= self.timesteps
         self.newModule /= self.timesteps
         self.timesteps = 1
-        
-        
-    def readFromLine(self,line):
+
+    def readFromLine(self, line):
         splitted = line.split()
         self.functionality = splitted[0]
-        self.timesteps     = np.double(splitted[1])
-        self.cores         = int(splitted[2])
-        self.blocks        = np.array([ splitted[3], splitted[4], splitted[5] ] )
-        self.blocksize     = np.array([ splitted[6], splitted[7], splitted[8] ])
-        
+        self.timesteps = np.double(splitted[1])
+        self.cores = int(splitted[2])
+        self.blocks = np.array([splitted[3], splitted[4], splitted[5]])
+        self.blocksize = np.array([splitted[6], splitted[7], splitted[8]])
+
         self.oldModule = ModuleTiming()
         self.oldModule.readFromArray(splitted[9:21])
-        
+
         self.newModule = ModuleTiming()
         self.newModule.readFromArray(splitted[21:33])
-    
+
     def getLabel(self):
-        ret  = self.functionality + "\n"
-        ret += "blocks:  " + str(self.blocks)  + "\n"
+        ret = self.functionality + "\n"
+        ret += "blocks:  " + str(self.blocks) + "\n"
         ret += "bl size: " + str(self.blocksize) + "\n"
         ret += str(self.cores) + " cores"
-        return ret     
-        
+        return ret
+
 
 def parseTimingFile(fileObject):
     """Parses Timing file of following format
-       functionality #timesteps #cores  #blocks #blocksize   oldPack oldMpi oldUnpack oldTotal     newPack newMpi newUnpack newTotal
+       functionality #timesteps #cores  #blocks #blocksize
+       oldPack oldMpi oldUnpack oldTotal newPack newMpi newUnpack newTotal
        """
-   
+
     timingDataSetList = []
     for line in fileObject:
-        dataset = TimingDataSet();
+        dataset = TimingDataSet()
         dataset.readFromLine(line)
         timingDataSetList.append(dataset)
-    
+
     return timingDataSetList
-       
-       
-def plotListOfDataSets(l,str="avg"):
-    labels = [i.getLabel() for i in l ]
-    labelLoc = np.array( [i*3 for i in range(0,len(l))] )
-    
-    xLocations = np.empty([len(l)*2] )
-    packing    = np.empty([len(l)*2] )
-    mpi        = np.empty([len(l)*2] )
-    unpacking  = np.empty([len(l)*2] )
-    rest       = np.empty([len(l)*2] )
-   
-    for i in range(0,len(l) ):
-        xLocations[2*i]   = i*3
-        xLocations[2*i+1] = i*3 + 1
-        
-        packing[2*i]   = l[i].oldModule.pack.get(str)
-        packing[2*i+1] = l[i].newModule.pack.get(str)
-        
-        mpi[2*i]   = l[i].oldModule.mpi.get(str)
-        mpi[2*i+1] = l[i].newModule.mpi.get(str)
-        
-        unpacking[2*i]   = l[i].oldModule.unpack.get(str)
-        unpacking[2*i+1] = l[i].newModule.unpack.get(str)
-
-        rest[2*i]   = l[i].oldModule.total.get(str) - packing[2*i  ] - unpacking[2*i  ] - mpi[2*i  ]
-        rest[2*i+1] = l[i].newModule.total.get(str) - packing[2*i+1] - unpacking[2*i+1] - mpi[2*i+1]
-
-
-
-    plt.bar(xLocations,packing,  width=1.0, color='#00e9e1')
-    plt.bar(xLocations,mpi      ,width=1.0, bottom=packing,color='#5af441')
-    plt.bar(xLocations,unpacking,width=1.0, bottom=packing+mpi, color='#4d4dff')
-    plt.xticks(labelLoc+1, labels)
-    #plt.bar(xLocations,rest,     width=1.0, bottom=packing+mpi+unpacking,color='y')
-    plt.show()       
-    
+
+
+def plotListOfDataSets(l, value="avg"):
+    labels = [i.getLabel() for i in l]
+    labelLoc = np.array([i * 3 for i in range(0, len(l))])
+
+    xLocations = np.empty([len(l) * 2])
+    packing = np.empty([len(l) * 2])
+    mpi = np.empty([len(l) * 2])
+    unpacking = np.empty([len(l) * 2])
+    rest = np.empty([len(l) * 2])
+
+    for i in range(0, len(l)):
+        xLocations[2 * i] = i * 3
+        xLocations[2 * i + 1] = i * 3 + 1
+
+        packing[2 * i] = l[i].oldModule.pack.get(value)
+        packing[2 * i + 1] = l[i].newModule.pack.get(value)
+
+        mpi[2 * i] = l[i].oldModule.mpi.get(value)
+        mpi[2 * i + 1] = l[i].newModule.mpi.get(value)
+
+        unpacking[2 * i] = l[i].oldModule.unpack.get(value)
+        unpacking[2 * i + 1] = l[i].newModule.unpack.get(value)
+
+        rest[2 * i] = l[i].oldModule.total.get(value) - packing[2 * i] - unpacking[2 * i] - mpi[2 * i]
+        rest[2 * i + 1] = l[i].newModule.total.get(value) - packing[2 * i + 1] - unpacking[2 * i + 1] - mpi[2 * i + 1]
+
+    plt.bar(xLocations, packing, width=1.0, color='#00e9e1')
+    plt.bar(xLocations, mpi, width=1.0, bottom=packing, color='#5af441')
+    plt.bar(xLocations, unpacking, width=1.0, bottom=packing + mpi, color='#4d4dff')
+    plt.xticks(labelLoc + 1, labels)
+    # plt.bar(xLocations,rest,     width=1.0, bottom=packing+mpi+unpacking,color='y')
+    plt.show()
+
 
 def combineDataSetsWithSameInput(l):
-    
     l = copy.deepcopy(l)
-    res = [] 
-    
+    res = []
+
     while (len(l) > 0):
-        curDataSet = l[0]      
+        curDataSet = l[0]
         sameDataList = [x for x in l if x == curDataSet]
         l = [x for x in l if x != curDataSet]
-        
+
         # calculate average over all timings with same simulation parameters
         sum = sameDataList[0]
-        for i in range(1,len(sameDataList)):
+        for i in range(1, len(sameDataList)):
             sum += sameDataList[i]
-            
-        res.append( sum / len(sameDataList) )
-            
-    
+
+        res.append(sum / len(sameDataList))
+
     return res
-    
-    
-    
 
-#timingFile = "/home/bauer/code/walberlaGit/bin/tests/modules/communication2/timing.out"
+
+# timingFile = "/home/bauer/code/walberlaGit/bin/tests/modules/communication2/timing.out"
 timingFile = "/home/bauer/code/walberlaGit/timing.out"
 
 l = parseTimingFile(open(timingFile))
diff --git a/tests/cuda/codegen/CudaJacobiKernel.py b/tests/cuda/codegen/CudaJacobiKernel.py
index f8713d1d1..24a48238b 100644
--- a/tests/cuda/codegen/CudaJacobiKernel.py
+++ b/tests/cuda/codegen/CudaJacobiKernel.py
@@ -1,5 +1,4 @@
 import numpy as np
-import sympy as sp
 import pystencils as ps
 from pystencils_walberla import CodeGeneration, generate_sweep
 
@@ -18,8 +17,8 @@ with CodeGeneration() as ctx:
 
     @ps.kernel
     def kernel_func():
-        dst[0, 0, 0] @= (3 * src[1, 0, 0] + 4 * src[-1, 0, 0] +
-                         5 * src[0, 1, 0] + 6 * src[0, -1, 0] +
-                         7 * src[0, 0, 1] + 8 * src[0, 0, -1]) / 33
+        dst[0, 0, 0] @= (3 * src[1, 0, 0] + 4 * src[-1, 0, 0]
+                         + 5 * src[0, 1, 0] + 6 * src[0, -1, 0]
+                         + 7 * src[0, 0, 1] + 8 * src[0, 0, -1]) / 33
 
     generate_sweep(ctx, 'CudaJacobiKernel3D', kernel_func, field_swaps=[(src, dst)], target="gpu")
diff --git a/tests/cuda/codegen/CudaPoisson.py b/tests/cuda/codegen/CudaPoisson.py
index 53752ee96..e14cbdea8 100644
--- a/tests/cuda/codegen/CudaPoisson.py
+++ b/tests/cuda/codegen/CudaPoisson.py
@@ -1,4 +1,3 @@
-import numpy as np
 import sympy as sp
 import pystencils as ps
 from pystencils_walberla import CodeGeneration, generate_sweep
@@ -12,8 +11,8 @@ with CodeGeneration() as ctx:
 
     @ps.kernel
     def kernel_func():
-        src[0, 0] @= ((dy**2 * (src[1, 0] + src[-1, 0])) +
-                      (dx**2 * (src[0, 1] + src[0, -1])) -
-                      (rhs[0, 0] * dx**2 * dy**2)) / (2 * (dx**2 + dy**2))
+        src[0, 0] @= ((dy**2 * (src[1, 0] + src[-1, 0]))
+                      + (dx**2 * (src[0, 1] + src[0, -1]))
+                      - (rhs[0, 0] * dx**2 * dy**2)) / (2 * (dx**2 + dy**2))
 
     generate_sweep(ctx, 'PoissonGPU', kernel_func, target='gpu')
diff --git a/tests/field/PythonExportTest.py b/tests/field/PythonExportTest.py
index bbdad2be5..e8ca4ed65 100644
--- a/tests/field/PythonExportTest.py
+++ b/tests/field/PythonExportTest.py
@@ -1,40 +1,39 @@
 import unittest
 import waLBerla as wlb
+from waLBerla import field
 import numpy as np
 
 
-class BasicDatatypesTest( unittest.TestCase ):
-    
+class BasicDatatypesTest(unittest.TestCase):
+
     def test_numpyConversion(self):
-        f = wlb.field.create ( [3,3,3], ghostLayers=2 )
-        a = np.asarray( f.buffer() )
-        self.assertEqual( a.shape, (3,3,3,1) )
-        
-        
-        f = field.create ( [3,3,3], ghostLayers=2 )
+        f = wlb.field.create([3, 3, 3], ghostLayers=2)
+        a = np.asarray(f.buffer())
+        self.assertEqual(a.shape, (3, 3, 3, 1))
+
+        f = field.create([3, 3, 3], ghostLayers=2)
         f.bufferWithGhostLayers()
-        b = np.asarray( f.buffer( True ) )
-        self.assertEqual( b.shape, (7,7,7,1) )
-        
-    
+        b = np.asarray(f.buffer(True))
+        self.assertEqual(b.shape, (7, 7, 7, 1))
+
     def test_swapDataPointers(self):
-        f1 = wlb.field.createField([ 30]*3, float )
-        f2 = wlb.field.createField([ 30]*3, float )
-        
-        nf1 = np.asarray( f1.buffer() )
-        nf2 = np.asarray( f2.buffer() )
-        
+        f1 = wlb.field.createField([30] * 3, float)
+        f2 = wlb.field.createField([30] * 3, float)
+
+        nf1 = np.asarray(f1.buffer())
+        nf2 = np.asarray(f2.buffer())
+
         nf1[:] = 1
         nf2[:] = 2
-        
+
         del nf2
-        
-        f1.swapDataPointers( f2 )
-        
+
+        f1.swapDataPointers(f2)
+
         del f2
-        
-        #Should free part1
+
+        # Should free part1
         del nf1
-        
+
         # should free part2
-        del f1
\ No newline at end of file
+        del f1
diff --git a/tests/field/codegen/JacobiKernel.py b/tests/field/codegen/JacobiKernel.py
index 3421c8cdd..29e677a72 100644
--- a/tests/field/codegen/JacobiKernel.py
+++ b/tests/field/codegen/JacobiKernel.py
@@ -17,8 +17,8 @@ with CodeGeneration() as ctx:
 
     @ps.kernel
     def kernel_func():
-        dst[0, 0, 0] @= (3 * src[1, 0, 0] + 4 * src[-1, 0, 0] +
-                         5 * src[0, 1, 0] + 6 * src[0, -1, 0] +
-                         7 * src[0, 0, 1] + 8 * src[0, 0, -1]) / 33
+        dst[0, 0, 0] @= (3 * src[1, 0, 0] + 4 * src[-1, 0, 0]
+                         + 5 * src[0, 1, 0] + 6 * src[0, -1, 0]
+                         + 7 * src[0, 0, 1] + 8 * src[0, 0, -1]) / 33
 
     generate_sweep(ctx, 'JacobiKernel3D', kernel_func, field_swaps=[(src, dst)])
diff --git a/tests/field/codegen/Poisson.py b/tests/field/codegen/Poisson.py
index 6edec0315..a4da698dd 100644
--- a/tests/field/codegen/Poisson.py
+++ b/tests/field/codegen/Poisson.py
@@ -11,8 +11,8 @@ with CodeGeneration() as ctx:
 
     @ps.kernel
     def kernel_func():
-        src[0, 0] @= ((dy**2 * (src[1, 0] + src[-1, 0])) +
-                      (dx**2 * (src[0, 1] + src[0, -1])) -
-                      (rhs[0, 0] * dx**2 * dy**2)) / (2 * (dx**2 + dy**2))
+        src[0, 0] @= ((dy**2 * (src[1, 0] + src[-1, 0]))
+                      + (dx**2 * (src[0, 1] + src[0, -1]))
+                      - (rhs[0, 0] * dx**2 * dy**2)) / (2 * (dx**2 + dy**2))
 
     generate_sweep(ctx, 'Poisson', kernel_func)
diff --git a/tests/lbm/codegen/FieldLayoutAndVectorizationTest.py b/tests/lbm/codegen/FieldLayoutAndVectorizationTest.py
index 5e43eee55..58c88d426 100644
--- a/tests/lbm/codegen/FieldLayoutAndVectorizationTest.py
+++ b/tests/lbm/codegen/FieldLayoutAndVectorizationTest.py
@@ -8,18 +8,19 @@ from collections import namedtuple
 
 with CodeGeneration() as ctx:
     omega_shear = sp.symbols("omega")
-    collision_rule = create_lb_collision_rule( stencil='D2Q9', compressible=False, method='srt' )
+    collision_rule = create_lb_collision_rule(stencil='D2Q9', compressible=False, method='srt')
 
     SetupDefinition = namedtuple('SetupDefinition', ['name', 'field_layout', 'vectorization_dict'])
 
     default_vectorize_instruction_set = get_vectorize_instruction_set(ctx)
 
-    configurations = [SetupDefinition('FZYX_Vec',   'fzyx', {'instruction_set': default_vectorize_instruction_set} ),
-                      SetupDefinition('FZYX_NoVec', 'fzyx', {'instruction_set': None} ),
-                      SetupDefinition('ZYXF_Vec',   'zyxf', {'instruction_set': default_vectorize_instruction_set} ), # does/should not vectorize, but instead yield warning
-                      SetupDefinition('ZYXF_NoVec', 'zyxf', {'instruction_set': None} )]
+    configurations = [SetupDefinition('FZYX_Vec', 'fzyx', {'instruction_set': default_vectorize_instruction_set}),
+                      SetupDefinition('FZYX_NoVec', 'fzyx', {'instruction_set': None}),
+                      SetupDefinition('ZYXF_Vec', 'zyxf', {'instruction_set': default_vectorize_instruction_set}),
+                      # does/should not vectorize, but instead yield warning
+                      SetupDefinition('ZYXF_NoVec', 'zyxf', {'instruction_set': None})]
 
     for conf in configurations:
-        generate_lattice_model(ctx, 'FieldLayoutAndVectorizationTest_'+conf.name+'_LatticeModel', collision_rule,
-                               field_layout=conf.field_layout, refinement_scaling=None, cpu_vectorize_info=conf.vectorization_dict)
-
+        generate_lattice_model(ctx, 'FieldLayoutAndVectorizationTest_' + conf.name + '_LatticeModel', collision_rule,
+                               field_layout=conf.field_layout, refinement_scaling=None,
+                               cpu_vectorize_info=conf.vectorization_dict)
diff --git a/tests/lbm/codegen/FluctuatingMRT.py b/tests/lbm/codegen/FluctuatingMRT.py
index a5ffa9baf..98a8812ee 100644
--- a/tests/lbm/codegen/FluctuatingMRT.py
+++ b/tests/lbm/codegen/FluctuatingMRT.py
@@ -42,8 +42,8 @@ with CodeGeneration() as ctx:
     collision_rule = create_lb_collision_rule(
         method,
         fluctuating={
-            'temperature' : temperature,
-            'block_offsets' : 'walberla',
+            'temperature': temperature,
+            'block_offsets': 'walberla',
         },
         optimization={'cse_global': True}
     )
diff --git a/tests/lbm/codegen/LbCodeGenerationExample.py b/tests/lbm/codegen/LbCodeGenerationExample.py
index 259a9787e..bdf992612 100644
--- a/tests/lbm/codegen/LbCodeGenerationExample.py
+++ b/tests/lbm/codegen/LbCodeGenerationExample.py
@@ -14,11 +14,13 @@ with CodeGeneration() as ctx:
         stencil='D3Q19', compressible=True,
         method='mrt', relaxation_rates=[omega, omega, omega_free, omega_free, omega_free, omega_free],
         entropic=True,                    # entropic method where second omega is chosen s.t. entropy condition
-        omega_output_field=omega_out,     # scalar field where automatically chosen omega of entropic or Smagorinsky method is written to
+        omega_output_field=omega_out,     # scalar field where automatically chosen omega of entropic or
+                                          # Smagorinsky method is written to
         force=force_field.center_vector,  # read forces for each lattice cell from an external force field
                                           # that is initialized and changed in C++ app
         output={'velocity': vel_field},   # write macroscopic velocity to field in every time step
-                                          # useful for coupling multiple LB methods, e.g. hydrodynamic to advection/diffusion LBM
+                                          # useful for coupling multiple LB methods,
+                                          # e.g. hydrodynamic to advection/diffusion LBM
         optimization={'cse_global': True}
     )
 
diff --git a/tests/python_coupling/BasicDatatypeTest.py b/tests/python_coupling/BasicDatatypeTest.py
index f7f960872..bd5deff65 100644
--- a/tests/python_coupling/BasicDatatypeTest.py
+++ b/tests/python_coupling/BasicDatatypeTest.py
@@ -2,27 +2,25 @@ import unittest
 import waLBerla as wlb
 
 
-class BasicDatatypesTest( unittest.TestCase ):
-    
+class BasicDatatypesTest(unittest.TestCase):
+
     def test_CellInterval(self):
-        ci1 = wlb.CellInterval( 0,0,0, 5,5,5 )
-        ci2 = wlb.CellInterval( [0]*3, [5]*3 )
-        self.assertEqual   ( ci1, ci2,   "Equality comparison of CellIntervals failed." )
-        self.assertFalse   ( ci1 != ci2, "Inequality check for CellIntervals wrong " )
-        
-        self.assertEqual( ci1.min, (0,0,0) , "CellInterval min wrong")
-        self.assertEqual( ci1.max, (5,5,5) , "CellInterval max wrong")
-        
-        
-        self.assertFalse( ci1.empty() )
-        
-        ci1.intersect( ci2 )
-        self.assertTrue( ci1.contains( ci2 ) )
-        
-        ci2.expand( 1 )
-        self.assertFalse( ci1.contains( ci2 ) )
-        
+        ci1 = wlb.CellInterval(0, 0, 0, 5, 5, 5)
+        ci2 = wlb.CellInterval([0] * 3, [5] * 3)
+        self.assertEqual(ci1, ci2, "Equality comparison of CellIntervals failed.")
+        self.assertFalse(ci1 != ci2, "Inequality check for CellIntervals wrong ")
+
+        self.assertEqual(ci1.min, (0, 0, 0), "CellInterval min wrong")
+        self.assertEqual(ci1.max, (5, 5, 5), "CellInterval max wrong")
+
+        self.assertFalse(ci1.empty())
+
+        ci1.intersect(ci2)
+        self.assertTrue(ci1.contains(ci2))
+
+        ci2.expand(1)
+        self.assertFalse(ci1.contains(ci2))
+
     def test_AABB(self):
-        aabb1 = wlb.AABB( 0,0,0, 5,5,5 )
-        aabb2 = wlb.AABB( [0]*3, [5]*3 )
-        
\ No newline at end of file
+        aabb1 = wlb.AABB(0, 0, 0, 5, 5, 5)  # noqa: F841
+        aabb2 = wlb.AABB([0] * 3, [5] * 3)  # noqa: F841
diff --git a/tests/python_coupling/CallbackTest.py b/tests/python_coupling/CallbackTest.py
index 8c06b3444..e6aa0a7b3 100644
--- a/tests/python_coupling/CallbackTest.py
+++ b/tests/python_coupling/CallbackTest.py
@@ -1,18 +1,17 @@
 import waLBerla
-import numpy as np
+# import numpy as np
 
 
 @waLBerla.callback("cb1")
-def someCallback( input1, input2 ):
+def someCallback(input1, input2):
     return input1 + input2
 
 
 @waLBerla.callback("cb2")
 def fieldCallback(field):
-    npArray = waLBerla.field.toArray( field )
-    npArray[0,0,0] = 42
-    
-    npArrayGl = waLBerla.field.toArray( field, withGhostLayers=True )
-    print( npArrayGl.shape )
-    npArrayGl[0,0,0] = 5
-    
\ No newline at end of file
+    npArray = waLBerla.field.toArray(field)
+    npArray[0, 0, 0] = 42
+
+    npArrayGl = waLBerla.field.toArray(field, withGhostLayers=True)
+    print(npArrayGl.shape)
+    npArrayGl[0, 0, 0] = 5
diff --git a/tests/python_coupling/ConfigFromPythonTest.py b/tests/python_coupling/ConfigFromPythonTest.py
index ebced8d18..9c4a4f45a 100644
--- a/tests/python_coupling/ConfigFromPythonTest.py
+++ b/tests/python_coupling/ConfigFromPythonTest.py
@@ -4,12 +4,12 @@ import waLBerla
 @waLBerla.callback("config")
 def waLBerlaConfig():
     conf = {
-	'testInt': 4, 
-	'testString': "someString",
-	'testDouble': 42.42,
-	'44242': 'ohoh_IntegerKey',
-	'subBlock': {'subKey1' : 'abc',
-		    'subKey2' : 'def'
-		    }
+        'testInt': 4,
+        'testString': "someString",
+        'testDouble': 42.42,
+        '44242': 'ohoh_IntegerKey',
+        'subBlock': {'subKey1': 'abc',
+                     'subKey2': 'def'
+                     }
     }
-    return  conf
+    return conf
diff --git a/tests/python_coupling/FieldExportTest.py b/tests/python_coupling/FieldExportTest.py
index becce3070..7790e9d2c 100644
--- a/tests/python_coupling/FieldExportTest.py
+++ b/tests/python_coupling/FieldExportTest.py
@@ -2,10 +2,9 @@ import waLBerla
 from waLBerla.field import toArray
 import numpy as np
 
+
 @waLBerla.callback("theCallback")
-def theCallback( blocks ):
+def theCallback(blocks):
     for block in blocks:
-        np.copyto( toArray( block['vec2Field']), toArray( block['sca2Field']) )
-        np.copyto( toArray( block['vec3Field']), toArray( block['sca3Field']) )
-
-
+        np.copyto(toArray(block['vec2Field']), toArray(block['sca2Field']))
+        np.copyto(toArray(block['vec3Field']), toArray(block['sca3Field']))
diff --git a/utilities/bashhelper/correspondingDirs.py b/utilities/bashhelper/correspondingDirs.py
index 288a4fd0e..500c89a05 100755
--- a/utilities/bashhelper/correspondingDirs.py
+++ b/utilities/bashhelper/correspondingDirs.py
@@ -4,7 +4,6 @@ import sys
 import os
 import os.path
 
-
 DIR1 = sys.argv[1]
 DIR2 = sys.argv[2]
 
@@ -15,8 +14,8 @@ def is_in_folder(filename, folder):
     fd = os.path.normpath(folder)
 
     if fn == fd:
-      return True
-    
+        return True
+
     # get common prefix
     commonprefix = os.path.commonprefix([fn, fd])
     if commonprefix == fd:
@@ -31,11 +30,11 @@ def is_in_folder(filename, folder):
     return False
 
 
-if is_in_folder( os.getcwd(), DIR1 ):
-  rel = os.path.relpath( os.getcwd(), DIR1) 
-  print (DIR2 + "/" + rel)
-elif is_in_folder( os.getcwd(), DIR2 ):
-  rel = os.path.relpath( os.getcwd(), DIR2) 
-  print (DIR1 + "/" + rel)
+if is_in_folder(os.getcwd(), DIR1):
+    rel = os.path.relpath(os.getcwd(), DIR1)
+    print(DIR2 + "/" + rel)
+elif is_in_folder(os.getcwd(), DIR2):
+    rel = os.path.relpath(os.getcwd(), DIR2)
+    print(DIR1 + "/" + rel)
 else:
-  print ( os.getcwd() )
+    print(os.getcwd())
diff --git a/utilities/bashhelper/createShortcuts.py b/utilities/bashhelper/createShortcuts.py
index af3dd5427..ac05e7537 100755
--- a/utilities/bashhelper/createShortcuts.py
+++ b/utilities/bashhelper/createShortcuts.py
@@ -1,64 +1,60 @@
 #!/usr/bin/env python3
 from __future__ import print_function
-from sys import argv
+# from sys import argv
 from os import path
 import argparse
 
-
 usage = """
     Example usage:
     Assume you have a waLBerla app in ~/code/app and the waLBerla sources in ~/code/wlb
     Add the following lines to your .bashrc:
-        
+
         createShortcuts.py ~/code/wlb wl-     ~/code/wlb  > ~/.waLBerla_shortcuts
-        createShortcuts.py ~/code/app myapp-  ~/code/app --app --build ~/build/app  --walberla_source ~/code/wlb  >> ~/.waLBerla_shortcuts
+        createShortcuts.py ~/code/app myapp-  ~/code/app --app --build ~/build/app  --walberla_source ~/code/wlb >> ~/.waLBerla_shortcuts
         source file_to_source
-        
+
     Then the following aliases are available:
         wl-s     cd ~/code/wlb
         wl-ss    cd ~/code/wlb/src
         wl-st    cd ~/code/wlb/tests
         wl-sa    cd ~/code/wlb/apps
-        
+
         app-s    cd ~/code/app
         app-sa   cd ~/code/app/apps
         app-b    cd ~/build/app
-        app-ba   cd ~/build/app/apps 
-        
+        app-ba   cd ~/build/app/apps
+
         app-make
         app-ccmake
         app-git
 """
 
-
 parser = argparse.ArgumentParser()
-parser.add_argument('prefix',           help='Shortcut Prefix')
-parser.add_argument('src',              help='Source directory')
-parser.add_argument('--build',          help='Build directory')
-parser.add_argument('--cmake_def',      help='CMake Definitions')
-parser.add_argument('--walberla_source',help='waLBerla source directory')
-parser.add_argument('--app'            ,help='Generate shortcut for apps',     action="store_true")
-parser.add_argument('--no_ccache'      ,help='CMake macros do not use ccache', action="store_true")
-parser.add_argument('--shell'          ,help='Supported shells bash and fish', default="bash" )
+parser.add_argument('prefix', help='Shortcut Prefix')
+parser.add_argument('src', help='Source directory')
+parser.add_argument('--build', help='Build directory')
+parser.add_argument('--cmake_def', help='CMake Definitions')
+parser.add_argument('--walberla_source', help='waLBerla source directory')
+parser.add_argument('--app', help='Generate shortcut for apps', action="store_true")
+parser.add_argument('--no_ccache', help='CMake macros do not use ccache', action="store_true")
+parser.add_argument('--shell', help='Supported shells bash and fish', default="bash")
 args = parser.parse_args()
 
 if args.app:
     if not args.prefix:
-        print( 'When creating shortcuts for a waLBerla app you have to define a prefix (--prefix)' )
+        print('When creating shortcuts for a waLBerla app you have to define a prefix (--prefix)')
         exit(1)
     if not args.walberla_source:
-        print( 'When creating shortcuts for a waLBerla app you have to define the path to waLBerla sources (--walberla_source)' )
+        print('When creating shortcuts for a waLBerla app you have to define'
+              'the path to waLBerla sources (--walberla_source)')
         exit(1)
-        
 
 # Assume that a "folderComplete.py" script is located in the same directory with this script
-dir_of_current_script = path.dirname( path.realpath(__file__) )
-complete_script       = path.join( dir_of_current_script, 'folderComplete.py' ) 
-
+dir_of_current_script = path.dirname(path.realpath(__file__))
+complete_script = path.join(dir_of_current_script, 'folderComplete.py')
 
 cd_shortcut_rule = dict()
 
-
 cd_shortcut_rule['bash'] = """
 {short_command_name}() {{
   cd {dir}/$1
@@ -79,17 +75,17 @@ complete -f --command {short_command_name} --arguments '(__fish_complete_directo
 """
 
 
+def create_cd_shortcut_rule(short_command_name, dir, shell):
+    return cd_shortcut_rule[shell].format(complete_script=complete_script,
+                                          short_command_name=short_command_name,
+                                          dir=dir)
 
-def create_cd_shortcut_rule( short_command_name, dir, shell ):
-    return cd_shortcut_rule[shell].format( complete_script=complete_script, 
-                                           short_command_name=short_command_name,
-                                           dir=dir )
 
 ########################################################################################################################
-###          CMake Aliases 
+# CMake Aliases
 ########################################################################################################################
-        
-build_aliases= dict()
+
+build_aliases = dict()
 build_aliases['bash'] = """
 alias {prefix}cmake-gcc='CC="{ccache}gcc" CXX="{ccache}g++" cmake {definitions} {source_dir}'
 alias {prefix}cmake-intel='CC="{ccache}icc" CXX="{ccache}icpc" cmake {definitions} {source_dir}'
@@ -119,7 +115,7 @@ function {prefix}cmake-clang
     cmake {definitions} {source_dir}
 end
 function {prefix}cmake-clang-nocolor
-    set CC  {ccache}clang -Qunused-arguments 
+    set CC  {ccache}clang -Qunused-arguments
     set CXX {ccache}clang++ -Qunused-arguments
     cmake {definitions} {source_dir}
 end
@@ -131,16 +127,16 @@ alias {prefix}make='make -f {build_dir}/apps/Makefile'
 """
 
 
-def create_build_aliases( prefix, source_dir, build_dir, definitions, shell ):
-    if args.no_ccache: 
-        ccache=""
-    else: 
-        ccache = "ccache " 
-    return build_aliases[shell].format( prefix = prefix,
-                                        source_dir = source_dir,
-                                        build_dir = build_dir,
-                                        definitions = definitions,
-                                        ccache = ccache )
+def create_build_aliases(prefix, source_dir, build_dir, definitions, shell):
+    if args.no_ccache:
+        ccache = ""
+    else:
+        ccache = "ccache "
+    return build_aliases[shell].format(prefix=prefix,
+                                       source_dir=source_dir,
+                                       build_dir=build_dir,
+                                       definitions=definitions,
+                                       ccache=ccache)
 
 
 source_aliases = """
@@ -148,27 +144,24 @@ alias {prefix}git='git --git-dir {source_dir}/.git --work-tree {source_dir}'
 alias {prefix}git-buildscript='git --git-dir {source_dir}/.git --work-tree {source_dir} push origin master:personal/$USER/buildscript -f'
 """
 
-def create_source_aliases( prefix, source_dir ):
-    return source_aliases.format( prefix=prefix, source_dir=source_dir )
 
+def create_source_aliases(prefix, source_dir):
+    return source_aliases.format(prefix=prefix, source_dir=source_dir)
 
 
 cmake_defs = args.cmake_def
 if args.app:
     cmake_defs += " -DWALBERLA_DIR=" + args.walberla_source
 
-
-print( create_cd_shortcut_rule( args.prefix + "s",  args.src, args.shell ) )
-print( create_cd_shortcut_rule( args.prefix + "ss", path.join(args.src,"src")  , args.shell ) )
-print( create_cd_shortcut_rule( args.prefix + "st", path.join(args.src,"tests"), args.shell ) )
-print( create_cd_shortcut_rule( args.prefix + "sa", path.join(args.src,"apps") , args.shell ) )
-print( create_source_aliases( args.prefix, args.src ) )
+print(create_cd_shortcut_rule(args.prefix + "s", args.src, args.shell))
+print(create_cd_shortcut_rule(args.prefix + "ss", path.join(args.src, "src"), args.shell))
+print(create_cd_shortcut_rule(args.prefix + "st", path.join(args.src, "tests"), args.shell))
+print(create_cd_shortcut_rule(args.prefix + "sa", path.join(args.src, "apps"), args.shell))
+print(create_source_aliases(args.prefix, args.src))
 
 if args.build:
-    print( create_build_aliases( args.prefix, args.src, args.build , cmake_defs, args.shell ) )
-    print( create_cd_shortcut_rule( args.prefix + "b",  args.build, args.shell ) ) 
-    print( create_cd_shortcut_rule( args.prefix + "bs", path.join(args.build,"src")  , args.shell ) ) 
-    print( create_cd_shortcut_rule( args.prefix + "bt", path.join(args.build,"tests"), args.shell ) ) 
-    print( create_cd_shortcut_rule( args.prefix + "ba", path.join(args.build,"apps") , args.shell ) )
-
-
+    print(create_build_aliases(args.prefix, args.src, args.build, cmake_defs, args.shell))
+    print(create_cd_shortcut_rule(args.prefix + "b", args.build, args.shell))
+    print(create_cd_shortcut_rule(args.prefix + "bs", path.join(args.build, "src"), args.shell))
+    print(create_cd_shortcut_rule(args.prefix + "bt", path.join(args.build, "tests"), args.shell))
+    print(create_cd_shortcut_rule(args.prefix + "ba", path.join(args.build, "apps"), args.shell))
diff --git a/utilities/bashhelper/folderComplete.py b/utilities/bashhelper/folderComplete.py
index c4d6bac78..3b8389831 100755
--- a/utilities/bashhelper/folderComplete.py
+++ b/utilities/bashhelper/folderComplete.py
@@ -22,31 +22,29 @@ import os.path
 # complete -o filenames  -o nospace -F _cdb cdb
 
 
-
 # Example:
 # ./folderComplete.py /home/bauer/devel/walberla tests/g
 
 
 # Input:   directory name, and beginning of word
-base_dir    = sys.argv[1] 
+base_dir = sys.argv[1]
 
-if len( sys.argv ) == 2:
-  to_complete=""
+if len(sys.argv) == 2:
+    to_complete = ""
 else:
-  to_complete = sys.argv[2]
-
+    to_complete = sys.argv[2]
 
 complete_path = base_dir + "/" + to_complete
 
 # In example valid_path = "/home/bauer/devel/walberla/tests "
 #            tail = "g"
-valid_path, tail = complete_path.rsplit("/",1)
+valid_path, tail = complete_path.rsplit("/", 1)
 
 # We look for all subdirs in valid_path, only if they start with tail
-subdirs = [o for o in os.listdir( valid_path ) if os.path.isdir( valid_path + "/" + o) and o.startswith(tail) ]
+subdirs = [o for o in os.listdir(valid_path) if os.path.isdir(valid_path + "/" + o) and o.startswith(tail)]
 
 # Make all paths relative to base_dir
-result = [ os.path.relpath( valid_path + "/" + p, base_dir) + "/" for p in subdirs ]
+result = [os.path.relpath(valid_path + "/" + p, base_dir) + "/" for p in subdirs]
 
 # Print the result
-print (" ".join( result))
+print(" ".join(result))
diff --git a/utilities/filterCompileCommands.py b/utilities/filterCompileCommands.py
index cb4813231..361ee2f35 100755
--- a/utilities/filterCompileCommands.py
+++ b/utilities/filterCompileCommands.py
@@ -3,36 +3,39 @@
 import json
 import sys
 
+
 def compileCommandSelector(x):
-   return not (("extern" in x["file"]) or ("tests" in x["file"]))
+    return not (("extern" in x["file"]) or ("tests" in x["file"]))
+
 
 def removePrecompiler(x):
-   pos = x.find("clang++")
-   if (pos != -1):
-      return x[pos:]
-   else:
-      return x
+    pos = x.find("clang++")
+    if pos != -1:
+        return x[pos:]
+    else:
+        return x
+
 
 if __name__ == "__main__":
-   if (len(sys.argv) != 2):
-      print("usage: ./filterCompileCommands.py compile_commands.json")
-      exit(-1)
+    if len(sys.argv) != 2:
+        print("usage: ./filterCompileCommands.py compile_commands.json")
+        exit(-1)
 
-   filename = sys.argv[1]
-   print("loading compile commands file: {}".format(filename))
+    filename = sys.argv[1]
+    print("loading compile commands file: {}".format(filename))
 
-   fin = open(filename, "r")
-   cc = json.load(fin)
-   fin.close()
+    fin = open(filename, "r")
+    cc = json.load(fin)
+    fin.close()
 
-   print("compile commands read: {}".format(len(cc)))
+    print("compile commands read: {}".format(len(cc)))
 
-   cc_filtered = list( filter(compileCommandSelector, cc) )
-   for x in cc_filtered:
-      x["command"] = removePrecompiler(x["command"])
+    cc_filtered = list(filter(compileCommandSelector, cc))
+    for x in cc_filtered:
+        x["command"] = removePrecompiler(x["command"])
 
-   print("compile commands filtered: {}".format(len(cc_filtered)))
+    print("compile commands filtered: {}".format(len(cc_filtered)))
 
-   fout = open(filename, "w")
-   json.dump(cc_filtered, fout)
-   fout.close()
+    fout = open(filename, "w")
+    json.dump(cc_filtered, fout)
+    fout.close()
diff --git a/utilities/findMissingIncludeGuards.py b/utilities/findMissingIncludeGuards.py
index f2c4561ad..ca0ed42fe 100755
--- a/utilities/findMissingIncludeGuards.py
+++ b/utilities/findMissingIncludeGuards.py
@@ -4,13 +4,13 @@ import os
 
 error = False
 for root, dirnames, filenames in os.walk(".."):
-   for filename in filenames:
-      if filename.endswith((".h")) and not filename.endswith((".impl.h")):
-         if not "extern" in root:
-            file = os.path.join(root, filename)
-            if not "#pragma once" in open(file).read():
-               print(file)
-               error = True
+    for filename in filenames:
+        if filename.endswith(".h") and not filename.endswith(".impl.h"):
+            if "extern" not in root:
+                file = os.path.join(root, filename)
+                if "#pragma once" not in open(file).read():
+                    print(file)
+                    error = True
 
 if error:
-   exit(-1)
+    exit(-1)
diff --git a/utilities/gdbPrettyPrinter/STLv6/printers.py b/utilities/gdbPrettyPrinter/STLv6/printers.py
index 8326601cf..ef5e9aefd 100644
--- a/utilities/gdbPrettyPrinter/STLv6/printers.py
+++ b/utilities/gdbPrettyPrinter/STLv6/printers.py
@@ -26,6 +26,7 @@ try:
 except ImportError:
     _use_gdb_pp = False
 
+
 # Starting with the type ORIG, search for the member type NAME.  This
 # handles searching upward through superclasses.  This is needed to
 # work around http://sourceware.org/bugzilla/show_bug.cgi?id=13615.
@@ -42,17 +43,18 @@ def find_type(orig, name):
         # anything fancier here.
         field = typ.fields()[0]
         if not field.is_base_class:
-            raise ValueError, "Cannot find type %s::%s" % (str(orig), name)
+            raise ValueError("Cannot find type %s::%s" % (str(orig), name))
         typ = field.type
 
+
 class SharedPointerPrinter:
     "Print a shared_ptr or weak_ptr"
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.typename = typename
         self.val = val
 
-    def to_string (self):
+    def to_string(self):
         state = 'empty'
         refcounts = self.val['_M_refcount']['_M_pi']
         if refcounts != 0:
@@ -64,17 +66,19 @@ class SharedPointerPrinter:
                 state = 'count %d, weak %d' % (usecount, weakcount - 1)
         return '%s (%s) %s' % (self.typename, state, self.val['_M_ptr'])
 
+
 class UniquePointerPrinter:
     "Print a unique_ptr"
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.val = val
 
-    def to_string (self):
+    def to_string(self):
         v = self.val['_M_t']['_M_head_impl']
         return ('std::unique_ptr<%s> containing %s' % (str(v.type.target()),
                                                        str(v)))
 
+
 class StdListPrinter:
     "Print a std::list"
 
@@ -95,7 +99,7 @@ class StdListPrinter:
             self.base = elt['_M_next']
             count = self.count
             self.count = self.count + 1
-            return ('[%d]' % count, elt['_M_data'])
+            return '[%d]' % count, elt['_M_data']
 
     def __init__(self, typename, val):
         self.typename = typename
@@ -108,8 +112,9 @@ class StdListPrinter:
 
     def to_string(self):
         if self.val['_M_impl']['_M_node'].address == self.val['_M_impl']['_M_node']['_M_next']:
-            return 'empty %s' % (self.typename)
-        return '%s' % (self.typename)
+            return 'empty %s' % self.typename
+        return '%s' % self.typename
+
 
 class StdListIteratorPrinter:
     "Print std::list::iterator"
@@ -123,6 +128,7 @@ class StdListIteratorPrinter:
         nodetype = nodetype.strip_typedefs().pointer()
         return self.val['_M_node'].cast(nodetype).dereference()['_M_data']
 
+
 class StdSlistPrinter:
     "Print a __gnu_cxx::slist"
 
@@ -142,7 +148,7 @@ class StdSlistPrinter:
             self.base = elt['_M_next']
             count = self.count
             self.count = self.count + 1
-            return ('[%d]' % count, elt['_M_data'])
+            return '[%d]' % count, elt['_M_data']
 
     def __init__(self, typename, val):
         self.val = val
@@ -157,6 +163,7 @@ class StdSlistPrinter:
             return 'empty __gnu_cxx::slist'
         return '__gnu_cxx::slist'
 
+
 class StdSlistIteratorPrinter:
     "Print __gnu_cxx::slist::iterator"
 
@@ -168,17 +175,18 @@ class StdSlistIteratorPrinter:
         nodetype = nodetype.strip_typedefs().pointer()
         return self.val['_M_node'].cast(nodetype).dereference()['_M_data']
 
+
 class StdVectorPrinter:
     "Print a std::vector"
 
     class _iterator:
-        def __init__ (self, start, finish, bitvec):
+        def __init__(self, start, finish, bitvec):
             self.bitvec = bitvec
             if bitvec:
-                self.item   = start['_M_p']
-                self.so     = start['_M_offset']
+                self.item = start['_M_p']
+                self.so = start['_M_offset']
                 self.finish = finish['_M_p']
-                self.fo     = finish['_M_offset']
+                self.fo = finish['_M_offset']
                 itype = self.item.dereference().type
                 self.isize = 8 * itype.sizeof
             else:
@@ -204,25 +212,25 @@ class StdVectorPrinter:
                 if self.so >= self.isize:
                     self.item = self.item + 1
                     self.so = 0
-                return ('[%d]' % count, obit)
+                return '[%d]' % count, obit
             else:
                 if self.item == self.finish:
                     raise StopIteration
                 elt = self.item.dereference()
                 self.item = self.item + 1
-                return ('[%d]' % count, elt)
+                return '[%d]' % count, elt
 
     def __init__(self, typename, val):
         self.typename = typename
         self.val = val
-        self.is_bool = val.type.template_argument(0).code  == gdb.TYPE_CODE_BOOL
+        self.is_bool = val.type.template_argument(0).code == gdb.TYPE_CODE_BOOL
 
     def children(self):
         return self._iterator(self.val['_M_impl']['_M_start'],
                               self.val['_M_impl']['_M_finish'],
                               self.is_bool)
 
-    #def to_string(self):
+    # def to_string(self):
     #    start = self.val['_M_impl']['_M_start']
     #    finish = self.val['_M_impl']['_M_finish']
     #    end = self.val['_M_impl']['_M_end_of_storage']
@@ -244,6 +252,7 @@ class StdVectorPrinter:
     def display_hint(self):
         return 'array'
 
+
 class StdVectorIteratorPrinter:
     "Print std::vector::iterator"
 
@@ -253,85 +262,88 @@ class StdVectorIteratorPrinter:
     def to_string(self):
         return self.val['_M_current'].dereference()
 
+
 class StdTuplePrinter:
     "Print a std::tuple"
 
     class _iterator:
-        def __init__ (self, head):
+        def __init__(self, head):
             self.head = head
 
             # Set the base class as the initial head of the
             # tuple.
-            nodes = self.head.type.fields ()
-            if len (nodes) == 1:
+            nodes = self.head.type.fields()
+            if len(nodes) == 1:
                 # Set the actual head to the first pair.
-                self.head  = self.head.cast (nodes[0].type)
-            elif len (nodes) != 0:
-                raise ValueError, "Top of tuple tree does not consist of a single node."
+                self.head = self.head.cast(nodes[0].type)
+            elif len(nodes) != 0:
+                raise ValueError("Top of tuple tree does not consist of a single node.")
             self.count = 0
 
-        def __iter__ (self):
+        def __iter__(self):
             return self
 
-        def next (self):
-            nodes = self.head.type.fields ()
+        def next(self):
+            nodes = self.head.type.fields()
             # Check for further recursions in the inheritance tree.
-            if len (nodes) == 0:
+            if len(nodes) == 0:
                 raise StopIteration
             # Check that this iteration has an expected structure.
-            if len (nodes) != 2:
-                raise ValueError, "Cannot parse more than 2 nodes in a tuple tree."
+            if len(nodes) != 2:
+                raise ValueError("Cannot parse more than 2 nodes in a tuple tree.")
 
             # - Left node is the next recursion parent.
             # - Right node is the actual class contained in the tuple.
 
             # Process right node.
-            impl = self.head.cast (nodes[1].type)
+            impl = self.head.cast(nodes[1].type)
 
             # Process left node and set it as head.
-            self.head  = self.head.cast (nodes[0].type)
+            self.head = self.head.cast(nodes[0].type)
             self.count = self.count + 1
 
             # Finally, check the implementation.  If it is
             # wrapped in _M_head_impl return that, otherwise return
             # the value "as is".
-            fields = impl.type.fields ()
-            if len (fields) < 1 or fields[0].name != "_M_head_impl":
-                return ('[%d]' % self.count, impl)
+            fields = impl.type.fields()
+            if len(fields) < 1 or fields[0].name != "_M_head_impl":
+                return '[%d]' % self.count, impl
             else:
-                return ('[%d]' % self.count, impl['_M_head_impl'])
+                return '[%d]' % self.count, impl['_M_head_impl']
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.typename = typename
-        self.val = val;
+        self.val = val
 
-    def children (self):
-        return self._iterator (self.val)
+    def children(self):
+        return self._iterator(self.val)
+
+    def to_string(self):
+        if len(self.val.type.fields()) == 0:
+            return 'empty %s' % self.typename
+        return '%s containing' % self.typename
 
-    def to_string (self):
-        if len (self.val.type.fields ()) == 0:
-            return 'empty %s' % (self.typename)
-        return '%s containing' % (self.typename)
 
 class StdStackOrQueuePrinter:
     "Print a std::stack or std::queue"
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.typename = typename
         self.visualizer = gdb.default_visualizer(val['c'])
 
-    def children (self):
+    def children(self):
         return self.visualizer.children()
 
-    def to_string (self):
+    def to_string(self):
         return '%s wrapping: %s' % (self.typename,
                                     self.visualizer.to_string())
 
-    def display_hint (self):
-        if hasattr (self.visualizer, 'display_hint'):
-            return self.visualizer.display_hint ()
+    def display_hint(self):
+        if hasattr(self.visualizer, 'display_hint'):
+            return self.visualizer.display_hint
         return None
 
+
 class RbtreeIterator:
     def __init__(self, rbtree):
         self.size = rbtree['_M_t']['_M_impl']['_M_node_count']
@@ -342,7 +354,7 @@ class RbtreeIterator:
         return self
 
     def __len__(self):
-        return int (self.size)
+        return int(self.size)
 
     def next(self):
         if self.count == self.size:
@@ -366,32 +378,35 @@ class RbtreeIterator:
             self.node = node
         return result
 
+
 # This is a pretty printer for std::_Rb_tree_iterator (which is
 # std::map::iterator), and has nothing to do with the RbtreeIterator
 # class above.
 class StdRbtreeIteratorPrinter:
     "Print std::map::iterator"
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.val = val
 
-    def to_string (self):
+    def to_string(self):
         typename = str(self.val.type.strip_typedefs()) + '::_Link_type'
         nodetype = gdb.lookup_type(typename).strip_typedefs()
         return self.val.cast(nodetype).dereference()['_M_value_field']
 
+
 class StdDebugIteratorPrinter:
     "Print a debug enabled version of an iterator"
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.val = val
 
     # Just strip away the encapsulating __gnu_debug::_Safe_iterator
     # and return the wrapped iterator value.
-    def to_string (self):
+    def to_string(self):
         itype = self.val.type.template_argument(0)
         return self.val['_M_current'].cast(itype)
 
+
 class StdMapPrinter:
     "Print a std::map or std::multimap"
 
@@ -417,23 +432,24 @@ class StdMapPrinter:
             self.count = self.count + 1
             return result
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.typename = typename
         self.val = val
 
-    def to_string (self):
+    def to_string(self):
         return '%s with %d elements' % (self.typename,
-                                        len (RbtreeIterator (self.val)))
+                                        len(RbtreeIterator(self.val)))
 
-    def children (self):
+    def children(self):
         rep_type = find_type(self.val.type, '_Rep_type')
         node = find_type(rep_type, '_Link_type')
         node = node.strip_typedefs()
-        return self._iter (RbtreeIterator (self.val), node)
+        return self._iter(RbtreeIterator(self.val), node)
 
-    def display_hint (self):
+    def display_hint(self):
         return 'map'
 
+
 class StdSetPrinter:
     "Print a std::set or std::multiset"
 
@@ -456,19 +472,20 @@ class StdSetPrinter:
             self.count = self.count + 1
             return result
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.typename = typename
         self.val = val
 
-    def to_string (self):
+    def to_string(self):
         return '%s with %d elements' % (self.typename,
-                                        len (RbtreeIterator (self.val)))
+                                        len(RbtreeIterator(self.val)))
 
-    def children (self):
+    def children(self):
         rep_type = find_type(self.val.type, '_Rep_type')
         node = find_type(rep_type, '_Link_type')
         node = node.strip_typedefs()
-        return self._iter (RbtreeIterator (self.val), node)
+        return self._iter(RbtreeIterator(self.val), node)
+
 
 class StdBitsetPrinter:
     "Print a std::bitset"
@@ -477,12 +494,12 @@ class StdBitsetPrinter:
         self.typename = typename
         self.val = val
 
-    def to_string (self):
+    def to_string(self):
         # If template_argument handled values, we could print the
         # size.  Or we could use a regexp on the type.
-        return '%s' % (self.typename)
+        return '%s' % self.typename
 
-    def children (self):
+    def children(self):
         words = self.val['_M_w']
         wtype = words.type
 
@@ -490,10 +507,10 @@ class StdBitsetPrinter:
         # array.  This depends on the template specialization used.
         # If it is a single long, convert to a single element list.
         if wtype.code == gdb.TYPE_CODE_ARRAY:
-            tsize = wtype.target ().sizeof
+            tsize = wtype.target().sizeof
         else:
             words = [words]
-            tsize = wtype.sizeof 
+            tsize = wtype.sizeof
 
         nwords = wtype.sizeof / tsize
         result = []
@@ -510,6 +527,7 @@ class StdBitsetPrinter:
             byte = byte + 1
         return result
 
+
 class StdDequePrinter:
     "Print a std::deque"
 
@@ -549,7 +567,7 @@ class StdDequePrinter:
         self.elttype = val.type.template_argument(0)
         size = self.elttype.sizeof
         if size < 512:
-            self.buffer_size = int (512 / size)
+            self.buffer_size = int(512 / size)
         else:
             self.buffer_size = 1
 
@@ -563,7 +581,7 @@ class StdDequePrinter:
 
         size = self.buffer_size * delta_n + delta_s + delta_e
 
-        return '%s with %d elements' % (self.typename, long (size))
+        return f'{self.typename} with {size} elements'
 
     def children(self):
         start = self.val['_M_impl']['_M_start']
@@ -571,9 +589,10 @@ class StdDequePrinter:
         return self._iter(start['_M_node'], start['_M_cur'], start['_M_last'],
                           end['_M_cur'], self.buffer_size)
 
-    def display_hint (self):
+    def display_hint(self):
         return 'array'
 
+
 class StdDequeIteratorPrinter:
     "Print std::deque::iterator"
 
@@ -583,6 +602,7 @@ class StdDequeIteratorPrinter:
     def to_string(self):
         return self.val['_M_cur'].dereference()
 
+
 class StdStringPrinter:
     "Print a std::basic_string of some kind"
 
@@ -593,32 +613,33 @@ class StdStringPrinter:
         # Make sure &string works, too.
         type = self.val.type
         if type.code == gdb.TYPE_CODE_REF:
-            type = type.target ()
+            type = type.target()
 
         # Calculate the length of the string so that to_string returns
         # the string according to length, not according to first null
         # encountered.
-        ptr = self.val ['_M_dataplus']['_M_p']
-        realtype = type.unqualified ().strip_typedefs ()
-        reptype = gdb.lookup_type (str (realtype) + '::_Rep').pointer ()
+        ptr = self.val['_M_dataplus']['_M_p']
+        realtype = type.unqualified().strip_typedefs()
+        reptype = gdb.lookup_type(str(realtype) + '::_Rep').pointer()
         header = ptr.cast(reptype) - 1
-        len = header.dereference ()['_M_length']
+        len = header.dereference()['_M_length']
         if hasattr(ptr, "lazy_string"):
-            return ptr.lazy_string (length = len)
-        return ptr.string (length = len)
+            return ptr.lazy_string(length=len)
+        return ptr.string(length=len)
 
-    def display_hint (self):
+    def display_hint(self):
         return 'string'
 
+
 class Tr1HashtableIterator:
-    def __init__ (self, hash):
+    def __init__(self, hash):
         self.node = hash['_M_before_begin']['_M_nxt']
         self.node_type = find_type(hash.type, '__node_type').pointer()
 
-    def __iter__ (self):
+    def __iter__(self):
         return self
 
-    def next (self):
+    def next(self):
         if self.node == 0:
             raise StopIteration
         node = self.node.cast(self.node_type)
@@ -626,58 +647,61 @@ class Tr1HashtableIterator:
         self.node = node.dereference()['_M_nxt']
         return result
 
+
 class Tr1UnorderedSetPrinter:
     "Print a tr1::unordered_set"
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.typename = typename
         self.val = val
 
-    def to_string (self):
+    def to_string(self):
         return '%s with %d elements' % (self.typename, self.val['_M_element_count'])
 
     @staticmethod
-    def format_count (i):
+    def format_count(i):
         return '[%d]' % i
 
-    def children (self):
-        counter = itertools.imap (self.format_count, itertools.count())
-        return itertools.izip (counter, Tr1HashtableIterator (self.val))
+    def children(self):
+        counter = itertools.imap(self.format_count, itertools.count())
+        return itertools.izip(counter, Tr1HashtableIterator(self.val))
+
 
 class Tr1UnorderedMapPrinter:
     "Print a tr1::unordered_map"
 
-    def __init__ (self, typename, val):
+    def __init__(self, typename, val):
         self.typename = typename
         self.val = val
 
-    def to_string (self):
+    def to_string(self):
         return '%s with %d elements' % (self.typename, self.val['_M_element_count'])
 
     @staticmethod
-    def flatten (list):
+    def flatten(list):
         for elt in list:
             for i in elt:
                 yield i
 
     @staticmethod
-    def format_one (elt):
+    def format_one(elt):
         return (elt['first'], elt['second'])
 
     @staticmethod
-    def format_count (i):
+    def format_count(i):
         return '[%d]' % i
 
-    def children (self):
-        counter = itertools.imap (self.format_count, itertools.count())
+    def children(self):
+        counter = itertools.imap(self.format_count, itertools.count())
         # Map over the hash table and flatten the result.
-        data = self.flatten (itertools.imap (self.format_one, Tr1HashtableIterator (self.val)))
+        data = self.flatten(itertools.imap(self.format_one, Tr1HashtableIterator(self.val)))
         # Zip the two iterators together.
-        return itertools.izip (counter, data)
+        return itertools.izip(counter, data)
 
-    def display_hint (self):
+    def display_hint(self):
         return 'map'
 
+
 class StdForwardListPrinter:
     "Print a std::forward_list"
 
@@ -697,7 +721,7 @@ class StdForwardListPrinter:
             self.base = elt['_M_next']
             count = self.count
             self.count = self.count + 1
-            return ('[%d]' % count, elt['_M_value'])
+            return '[%d]' % count, elt['_M_value']
 
     def __init__(self, typename, val):
         self.val = val
@@ -710,8 +734,8 @@ class StdForwardListPrinter:
 
     def to_string(self):
         if self.val['_M_impl']['_M_head']['_M_next'] == 0:
-            return 'empty %s' % (self.typename)
-        return '%s' % (self.typename)
+            return 'empty %s' % self.typename
+        return '%s' % self.typename
 
 
 # A "regular expression" printer which conforms to the
@@ -728,6 +752,7 @@ class RxPrinter(object):
             return None
         return self.function(self.name, value)
 
+
 # A pretty-printer that conforms to the "PrettyPrinter" protocol from
 # gdb.printing.  It can also be used directly as an old-style printer.
 class Printer(object):
@@ -743,7 +768,7 @@ class Printer(object):
         # A small sanity check.
         # FIXME
         if not self.compiled_rx.match(name + '<>'):
-            raise ValueError, 'libstdc++ programming error: "%s" does not match' % name
+            raise ValueError('libstdc++ programming error: "%s" does not match' % name)
         printer = RxPrinter(name, function)
         self.subprinters.append(printer)
         self.lookup[name] = printer
@@ -762,10 +787,10 @@ class Printer(object):
     def get_basic_type(type):
         # If it points to a reference, get the reference.
         if type.code == gdb.TYPE_CODE_REF:
-            type = type.target ()
+            type = type.target()
 
         # Get the unqualified type, stripped of typedefs.
-        type = type.unqualified ().strip_typedefs ()
+        type = type.unqualified().strip_typedefs()
 
         return type.tag
 
@@ -787,9 +812,11 @@ class Printer(object):
         # Cannot find a pretty printer.  Return None.
         return None
 
+
 libstdcxx_printer = None
 
-def register_libstdcxx_printers (obj):
+
+def register_libstdcxx_printers(obj):
     "Register libstdc++ pretty-printers with objfile Obj."
 
     global _use_gdb_pp
@@ -802,19 +829,19 @@ def register_libstdcxx_printers (obj):
             obj = gdb
         obj.pretty_printers.append(libstdcxx_printer)
 
-def build_libstdcxx_dictionary ():
+
+def build_libstdcxx_dictionary():
     global libstdcxx_printer
 
     libstdcxx_printer = Printer("libstdc++-v6")
 
     # For _GLIBCXX_BEGIN_NAMESPACE_VERSION.
-    vers = '(__7::)?'
+    # vers = '(__7::)?'
     # For _GLIBCXX_BEGIN_NAMESPACE_CONTAINER.
-    container = '(__cxx1998::' + vers + ')?'
-
+    # container = '(__cxx1998::' + vers + ')?'
 
-    #libstdcxx_printer.add_container('std::', 'vector', StdVectorPrinter)
-    #return
+    # libstdcxx_printer.add_container('std::', 'vector', StdVectorPrinter)
+    # return
 
     # libstdc++ objects requiring pretty-printing.
     # In order from:
@@ -891,7 +918,6 @@ def build_libstdcxx_dictionary ():
     libstdcxx_printer.add('std::__debug::forward_list',
                           StdForwardListPrinter)
 
-
     # Extensions.
     libstdcxx_printer.add_version('__gnu_cxx::', 'slist', StdSlistPrinter)
 
@@ -931,4 +957,5 @@ def build_libstdcxx_dictionary ():
         libstdcxx_printer.add('std::__norm::_Deque_iterator',
                               StdDequeIteratorPrinter)
 
-build_libstdcxx_dictionary ()
+
+build_libstdcxx_dictionary()
diff --git a/utilities/gdbPrettyPrinter/boost_1_40/printers.py b/utilities/gdbPrettyPrinter/boost_1_40/printers.py
index 7b8949725..110a8ac3e 100644
--- a/utilities/gdbPrettyPrinter/boost_1_40/printers.py
+++ b/utilities/gdbPrettyPrinter/boost_1_40/printers.py
@@ -35,20 +35,27 @@
 import gdb
 import re
 
+
 class static:
     "Creates a 'static' method"
+
     def __init__(self, function):
         self.__call__ = function
 
-boost_pretty_printers = [ ]
+
+boost_pretty_printers = []
+
+
 def register_pretty_printer(pretty_printer):
     "Registers a Pretty Printer"
     boost_pretty_printers.append(pretty_printer)
     return pretty_printer
 
+
 @register_pretty_printer
 class BoostIteratorRange:
     "Pretty Printer for boost::iterator_range (Boost.Range)"
+
     @static
     def supports(typename):
         return re.compile('^boost::iterator_range<.*>$').search(typename)
@@ -86,6 +93,7 @@ class BoostIteratorRange:
     def display_hint(self):
         return 'array'
 
+
 @register_pretty_printer
 class BoostOptional:
     "Pretty Printer for boost::optional (Boost.Optional)"
@@ -108,14 +116,14 @@ class BoostOptional:
             return self
 
         def next(self):
-            if(self.done):
+            if (self.done):
                 raise StopIteration
             self.done = True
             return ('value', self.member.dereference())
 
     def children(self):
         initialized = self.value['m_initialized']
-        if(not initialized):
+        if (not initialized):
             return self._iterator('', True)
         else:
             match = BoostOptional.regex.search(self.typename)
@@ -124,16 +132,17 @@ class BoostOptional:
                     membertype = gdb.lookup_type(match.group(1)).pointer()
                     member = self.value['m_storage']['dummy_']['data'].address.cast(membertype)
                     return self._iterator(member, False)
-                except:
+                except Exception:
                     return self._iterator('', True)
 
     def to_string(self):
         initialized = self.value['m_initialized']
-        if(not initialized):
+        if (not initialized):
             return "%s is not initialized" % self.typename
         else:
             return "%s is initialized" % self.typename
 
+
 @register_pretty_printer
 class BoostReferenceWrapper:
     "Pretty Printer for boost::reference_wrapper (Boost.Ref)"
@@ -150,6 +159,7 @@ class BoostReferenceWrapper:
     def to_string(self):
         return '(%s) %s' % (self.typename, self.value['t_'].dereference())
 
+
 @register_pretty_printer
 class BoostTribool:
     "Pretty Printer for boost::logic::tribool (Boost.Tribool)"
@@ -166,17 +176,19 @@ class BoostTribool:
     def to_string(self):
         state = self.value['value']
         s = 'indeterminate'
-        if(state == 0):
+        if (state == 0):
             s = 'false'
-        elif(state == 1):
+        elif (state == 1):
             s = 'true'
         return '(%s) %s' % (self.typename, s)
 
+
 @register_pretty_printer
 class BoostScopedPtr:
     "Pretty Printer for boost::scoped/intrusive_ptr/array (Boost.SmartPtr)"
 
     regex = re.compile('^boost::(intrusive|scoped)_(ptr|array)<(.*)>$')
+
     @static
     def supports(typename):
         return BoostScopedPtr.regex.search(typename)
@@ -188,26 +200,28 @@ class BoostScopedPtr:
     def to_string(self):
         return '(%s) %s' % (self.typename, self.value['px'])
 
+
 @register_pretty_printer
 class BoostSharedPtr:
     "Pretty Printer for boost::shared/weak_ptr/array (Boost.SmartPtr)"
-    
+
     class _iterator:
-        def __init__(self,sharedPtr):
+        def __init__(self, sharedPtr):
             self.sharedPtr = sharedPtr
-            self.atEnd     = False
+            self.atEnd = False
+
         def __iter__(self):
             return self
-        
+
         def next(self):
             if self.atEnd:
                 raise StopIteration
             else:
                 self.atEnd = True
-                return ('[deref]', self.sharedPtr['px'].dereference() )
-        
+                return ('[deref]', self.sharedPtr['px'].dereference())
 
     regex = re.compile('^boost::(weak|shared)_(ptr|array)<(.*)>$')
+
     @static
     def supports(typename):
         return BoostSharedPtr.regex.search(typename)
@@ -225,13 +239,16 @@ class BoostSharedPtr:
         return '(%s) (count %d, weak count %d) %s' % (self.typename,
                                                       refcount, weakcount,
                                                       self.value['px'])
+
     def children(self):
         return self._iterator(self.value)
 
+
 @register_pretty_printer
 class BoostArray:
     "Pretty Printer for boost::array (Boost.Array)"
-    regex = re.compile('^boost::array<(.*)>$');
+    regex = re.compile('^boost::array<(.*)>$')
+
     @static
     def supports(typename):
         return BoostArray.regex.search(typename)
@@ -246,10 +263,12 @@ class BoostArray:
     def display_hint(self):
         return 'array'
 
+
 @register_pretty_printer
 class BoostVariant:
     "Pretty Printer for boost::variant (Boost.Variant)"
-    regex = re.compile('^boost::variant<(.*)>$');
+    regex = re.compile('^boost::variant<(.*)>$')
+
     @static
     def supports(typename):
         return BoostVariant.regex.search(typename)
@@ -262,18 +281,19 @@ class BoostVariant:
         m = BoostVariant.regex.search(self.typename)
         # TODO this breaks with boost::variant< foo<a,b>, bar >!
         types = map(lambda s: s.strip(), m.group(1).split(','))
-        which = long(self.value['which_'])
+        which = self.value['which_']
         type = types[which]
         data = ''
         try:
             ptrtype = gdb.lookup_type(type).pointer()
             data = self.value['storage_']['data_']['buf'].address.cast(ptrtype)
-        except:
+        except Exception:
             data = self.value['storage_']['data_']['buf']
         return '(boost::variant<...>) which (%d) = %s value = %s' % (which,
                                                                      type,
                                                                      data.dereference())
 
+
 @register_pretty_printer
 class BoostUuid:
     "Pretty Printer for boost::uuids::uuid (Boost.Uuid)"
@@ -288,21 +308,22 @@ class BoostUuid:
         self.value = value
 
     def to_string(self):
-        u = (self.value['data'][i] for i in xrange(16))
+        u = (self.value['data'][i] for i in range(16))
         s = 'xxxx-xx-xx-xx-xxxxxx'.replace('x', '%02x') % tuple(u)
         return '(%s) %s' % (self.typename, s)
 
+
 def find_pretty_printer(value):
     "Find a pretty printer suitable for value"
     type = value.type
 
     if type.code == gdb.TYPE_CODE_REF:
-       type = type.target()
+        type = type.target()
 
     type = type.unqualified().strip_typedefs()
 
     typename = type.tag
-    if typename == None:
+    if typename is None:
         return None
 
     for pretty_printer in boost_pretty_printers:
@@ -311,8 +332,9 @@ def find_pretty_printer(value):
 
     return None
 
+
 def register_boost_printers(obj):
     "Register Boost Pretty Printers."
-    if obj == None:
+    if obj is None:
         obj = gdb
     obj.pretty_printers.append(find_pretty_printer)
diff --git a/utilities/gdbPrettyPrinter/qt4/printers.py b/utilities/gdbPrettyPrinter/qt4/printers.py
index 43e7be302..169f283d1 100644
--- a/utilities/gdbPrettyPrinter/qt4/printers.py
+++ b/utilities/gdbPrettyPrinter/qt4/printers.py
@@ -17,30 +17,31 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import gdb
-import itertools
+# import itertools
 import re
 
+
 class QStringPrinter:
 
     def __init__(self, val):
         self.val = val
 
     def to_string(self):
-        #ret = ""
-        #i = 0
+        # ret = ""
+        # i = 0
         size = self.val['d']['size']
-        #while i < size:
+        # while i < size:
         #    char = self.val['d']['data'][i]
         #    if (char > 127):
         #        ret += "\\u%x" % int(char)
         #    else:
         #        ret += chr(char)
         #    i = i + 1
-        #return ret
+        # return ret
         dataAsCharPointer = self.val['d']['data'].cast(gdb.lookup_type("char").pointer())
-        return dataAsCharPointer.string(encoding = 'UTF-16', length = size * 2)
+        return dataAsCharPointer.string(encoding='UTF-16', length=size * 2)
 
-    def display_hint (self):
+    def display_hint(self):
         return 'string'
 
 
@@ -63,18 +64,19 @@ class QByteArrayPrinter:
                 raise StopIteration
             count = self.count
             self.count = self.count + 1
-            return ('[%d]' % count, self.data[count])
+            return '[%d]' % count, self.data[count]
 
     def children(self):
         return self._iterator(self.val['d']['data'], self.val['d']['size'])
 
     def to_string(self):
-        #todo: handle charset correctly
+        # todo: handle charset correctly
         return self.val['d']['data'].string()
 
-    def display_hint (self):
+    def display_hint(self):
         return 'string'
 
+
 class QListPrinter:
     "Print a QList"
 
@@ -93,36 +95,38 @@ class QListPrinter:
             count = self.count
             array = self.d['array'][self.d['begin'] + count]
 
-            #from QTypeInfo::isLarge
+            # from QTypeInfo::isLarge
             isLarge = self.nodetype.sizeof > gdb.lookup_type('void').pointer().sizeof
 
-            #isStatic is not needed anymore since Qt 4.6
-            #isPointer = self.nodetype.code == gdb.TYPE_CODE_PTR
+            # isStatic is not needed anymore since Qt 4.6
+            # isPointer = self.nodetype.code == gdb.TYPE_CODE_PTR
             #
-            ##unfortunately we can't use QTypeInfo<T>::isStatic as it's all inlined, so use
-            ##this list of types that use Q_DECLARE_TYPEINFO(T, Q_MOVABLE_TYPE)
-            ##(obviously it won't work for custom types)
-            #movableTypes = ['QRect', 'QRectF', 'QString', 'QMargins', 'QLocale', 'QChar', 'QDate', 'QTime', 'QDateTime', 'QVector',
-            #    'QRegExpr', 'QPoint', 'QPointF', 'QByteArray', 'QSize', 'QSizeF', 'QBitArray', 'QLine', 'QLineF', 'QModelIndex', 'QPersitentModelIndex',
-            #    'QVariant', 'QFileInfo', 'QUrl', 'QXmlStreamAttribute', 'QXmlStreamNamespaceDeclaration', 'QXmlStreamNotationDeclaration',
-            #    'QXmlStreamEntityDeclaration']
-            #if movableTypes.count(self.nodetype.tag):
+            # unfortunately we can't use QTypeInfo<T>::isStatic as it's all inlined, so use
+            # this list of types that use Q_DECLARE_TYPEINFO(T, Q_MOVABLE_TYPE)
+            # (obviously it won't work for custom types)
+            # movableTypes = ['QRect', 'QRectF', 'QString', 'QMargins', 'QLocale', 'QChar',
+            #                'QDate', 'QTime', 'QDateTime', 'QVector', QRegExpr', 'QPoint',
+            #                'QPointF', 'QByteArray', 'QSize', 'QSizeF', 'QBitArray', 'QLine',
+            #                'QLineF', 'QModelIndex', 'QPersitentModelIndex', 'QVariant', 'QFileInfo',
+            #                'QUrl', 'QXmlStreamAttribute', 'QXmlStreamNamespaceDeclaration',
+            #                'QXmlStreamNotationDeclaration', 'QXmlStreamEntityDeclaration']
+            # if movableTypes.count(self.nodetype.tag):
             #    isStatic = False
-            #else:
+            # else:
             #    isStatic = not isPointer
             isStatic = False
 
-            if isLarge or isStatic: #see QList::Node::t()
+            if isLarge or isStatic:  # see QList::Node::t()
                 node = array.cast(gdb.lookup_type('QList<%s>::Node' % self.nodetype).pointer())
             else:
                 node = array.cast(gdb.lookup_type('QList<%s>::Node' % self.nodetype))
             self.count = self.count + 1
-            return ('[%d]' % count, node['v'].cast(self.nodetype))
+            return '[%d]' % count, node['v'].cast(self.nodetype)
 
     def __init__(self, val, container, itype):
         self.val = val
         self.container = container
-        if itype == None:
+        if itype is None:
             self.itype = self.val.type.template_argument(0)
         else:
             self.itype = gdb.lookup_type(itype)
@@ -136,7 +140,8 @@ class QListPrinter:
         else:
             empty = ""
 
-        return "%s%s<%s>" % ( empty, self.container, self.itype )
+        return "%s%s<%s>" % (empty, self.container, self.itype)
+
 
 class QVectorPrinter:
     "Print a QVector"
@@ -157,7 +162,7 @@ class QVectorPrinter:
             count = self.count
 
             self.count = self.count + 1
-            return ('[%d]' % count, self.p['array'][count])
+            return '[%d]' % count, self.p['array'][count]
 
     def __init__(self, val, container):
         self.val = val
@@ -173,7 +178,8 @@ class QVectorPrinter:
         else:
             empty = ""
 
-        return "%s%s<%s>" % ( empty, self.container, self.itype )
+        return "%s%s<%s>" % (empty, self.container, self.itype)
+
 
 class QLinkedListPrinter:
     "Print a QLinkedList"
@@ -196,7 +202,7 @@ class QLinkedListPrinter:
             val = self.it['t']
             self.it = self.it['n']
             self.pos = self.pos + 1
-            return ('[%d]' % pos, val)
+            return '[%d]' % pos, val
 
     def __init__(self, val):
         self.val = val
@@ -211,7 +217,8 @@ class QLinkedListPrinter:
         else:
             empty = ""
 
-        return "%sQLinkedList<%s>" % ( empty, self.itype )
+        return "%sQLinkedList<%s>" % (empty, self.itype)
+
 
 class QMapPrinter:
     "Print a QMap"
@@ -227,23 +234,23 @@ class QMapPrinter:
         def __iter__(self):
             return self
 
-        def payload (self):
+        def payload(self):
 
-            #we can't use QMapPayloadNode as it's inlined
-            #as a workaround take the sum of sizeof(members)
+            # we can't use QMapPayloadNode as it's inlined
+            # as a workaround take the sum of sizeof(members)
             ret = self.ktype.sizeof
             ret += self.vtype.sizeof
             ret += gdb.lookup_type('void').pointer().sizeof
 
-            #but because of data alignment the value can be higher
-            #so guess it's aliged by sizeof(void*)
-            #TODO: find a real solution for this problem
+            # but because of data alignment the value can be higher
+            # so guess it's aliged by sizeof(void*)
+            # TODO: find a real solution for this problem
             ret += ret % gdb.lookup_type('void').pointer().sizeof
 
             ret -= gdb.lookup_type('void').pointer().sizeof
             return ret
 
-        def concrete (self, data_node):
+        def concrete(self, data_node):
             node_type = gdb.lookup_type('QMapNode<%s, %s>' % (self.ktype, self.vtype)).pointer()
             return (data_node.cast(gdb.lookup_type('char').pointer()) - self.payload()).cast(node_type)
 
@@ -261,7 +268,6 @@ class QMapPrinter:
             self.count = self.count + 1
             return result
 
-
     def __init__(self, val, container):
         self.val = val
         self.container = container
@@ -275,11 +281,15 @@ class QMapPrinter:
         else:
             empty = ""
 
-        return "%s%s<%s, %s>" % ( empty, self.container, self.val.type.template_argument(0), self.val.type.template_argument(1) )
+        return "%s%s<%s, %s>" % (empty,
+                                 self.container,
+                                 self.val.type.template_argument(0),
+                                 self.val.type.template_argument(1))
 
-    def display_hint (self):
+    def display_hint(self):
         return 'map'
 
+
 class QHashPrinter:
     "Print a QHash"
 
@@ -296,64 +306,63 @@ class QHashPrinter:
         def __iter__(self):
             return self
 
-        def hashNode (self):
+        def hashNode(self):
             "Casts the current QHashData::Node to a QHashNode and returns the result. See also QHash::concrete()"
             return self.data_node.cast(gdb.lookup_type('QHashNode<%s, %s>' % (self.ktype, self.vtype)).pointer())
 
-        def firstNode (self):
+        def firstNode(self):
             "Get the first node, See QHashData::firstNode()."
             e = self.d.cast(gdb.lookup_type('QHashData::Node').pointer())
-            #print "QHashData::firstNode() e %s" % e
+            # print "QHashData::firstNode() e %s" % e
             bucketNum = 0
             bucket = self.d['buckets'][bucketNum]
-            #print "QHashData::firstNode() *bucket %s" % bucket
+            # print "QHashData::firstNode() *bucket %s" % bucket
             n = self.d['numBuckets']
-            #print "QHashData::firstNode() n %s" % n
+            # print "QHashData::firstNode() n %s" % n
             while n:
-                #print "QHashData::firstNode() in while, n %s" % n;
+                # print "QHashData::firstNode() in while, n %s" % n;
                 if bucket != e:
-                    #print "QHashData::firstNode() in while, return *bucket %s" % bucket
+                    # print "QHashData::firstNode() in while, return *bucket %s" % bucket
                     return bucket
                 bucketNum += 1
                 bucket = self.d['buckets'][bucketNum]
-                #print "QHashData::firstNode() in while, new bucket %s" % bucket
+                # print "QHashData::firstNode() in while, new bucket %s" % bucket
                 n -= 1
-            #print "QHashData::firstNode() return e %s" % e
+            # print "QHashData::firstNode() return e %s" % e
             return e
 
-
-        def nextNode (self, node):
+        def nextNode(self, node):
             "Get the nextNode after the current, see also QHashData::nextNode()."
-            #print "******************************** nextNode"
-            #print "nextNode: node %s" % node
+            # print "******************************** nextNode"
+            # print "nextNode: node %s" % node
             next = node['next'].cast(gdb.lookup_type('QHashData::Node').pointer())
             e = next
 
-            #print "nextNode: next %s" % next
+            # print "nextNode: next %s" % next
             if next['next']:
-                #print "nextNode: return next"
+                # print "nextNode: return next"
                 return next
 
-            #print "nextNode: node->h %s" % node['h']
-            #print "nextNode: numBuckets %s" % self.d['numBuckets']
+            # print "nextNode: node->h %s" % node['h']
+            # print "nextNode: numBuckets %s" % self.d['numBuckets']
             start = (node['h'] % self.d['numBuckets']) + 1
             bucketNum = start
-            #print "nextNode: start %s" % start
+            # print "nextNode: start %s" % start
             bucket = self.d['buckets'][start]
-            #print "nextNode: bucket %s" % bucket
+            # print "nextNode: bucket %s" % bucket
             n = self.d['numBuckets'] - start
-            #print "nextNode: n %s" % n
+            # print "nextNode: n %s" % n
             while n:
-                #print "nextNode: in while; n %s" % n
-                #print "nextNode: in while; e %s" % e
-                #print "nextNode: in while; *bucket %s" % bucket
+                # print "nextNode: in while; n %s" % n
+                # print "nextNode: in while; e %s" % e
+                # print "nextNode: in while; *bucket %s" % bucket
                 if bucket != e:
-                    #print "nextNode: in while; return bucket %s" % bucket
+                    # print "nextNode: in while; return bucket %s" % bucket
                     return bucket
                 bucketNum += 1
                 bucket = self.d['buckets'][bucketNum]
                 n -= 1
-            #print "nextNode: return e %s" % e
+            # print "nextNode: return e %s" % e
             return e
 
         def next(self):
@@ -370,7 +379,7 @@ class QHashPrinter:
                 self.data_node = self.nextNode(self.data_node)
 
             self.count = self.count + 1
-            return ('[%d]' % self.count, item)
+            return '[%d]' % self.count, item
 
     def __init__(self, val, container):
         self.val = val
@@ -385,11 +394,15 @@ class QHashPrinter:
         else:
             empty = ""
 
-        return "%s%s<%s, %s>" % ( empty, self.container, self.val.type.template_argument(0), self.val.type.template_argument(1) )
+        return "%s%s<%s, %s>" % (empty,
+                                 self.container,
+                                 self.val.type.template_argument(0),
+                                 self.val.type.template_argument(1))
 
-    def display_hint (self):
+    def display_hint(self):
         return 'map'
 
+
 class QDatePrinter:
 
     def __init__(self, val):
@@ -405,30 +418,31 @@ class QDatePrinter:
         if julianDay >= 2299161:
             # Gregorian calendar starting from October 15, 1582
             # This algorithm is from Henry F. Fliegel and Thomas C. Van Flandern
-            ell = julianDay + 68569;
-            n = (4 * ell) / 146097;
-            ell = ell - (146097 * n + 3) / 4;
-            i = (4000 * (ell + 1)) / 1461001;
-            ell = ell - (1461 * i) / 4 + 31;
-            j = (80 * ell) / 2447;
-            d = ell - (2447 * j) / 80;
-            ell = j / 11;
-            m = j + 2 - (12 * ell);
-            y = 100 * (n - 49) + i + ell;
+            ell = julianDay + 68569
+            n = (4 * ell) / 146097
+            ell = ell - (146097 * n + 3) / 4
+            i = (4000 * (ell + 1)) / 1461001
+            ell = ell - (1461 * i) / 4 + 31
+            j = (80 * ell) / 2447
+            d = ell - (2447 * j) / 80
+            ell = j / 11
+            m = j + 2 - (12 * ell)
+            y = 100 * (n - 49) + i + ell
         else:
             # Julian calendar until October 4, 1582
             # Algorithm from Frequently Asked Questions about Calendars by Claus Toendering
-            julianDay += 32082;
-            dd = (4 * julianDay + 3) / 1461;
-            ee = julianDay - (1461 * dd) / 4;
-            mm = ((5 * ee) + 2) / 153;
-            d = ee - (153 * mm + 2) / 5 + 1;
-            m = mm + 3 - 12 * (mm / 10);
-            y = dd - 4800 + (mm / 10);
+            julianDay += 32082
+            dd = (4 * julianDay + 3) / 1461
+            ee = julianDay - (1461 * dd) / 4
+            mm = ((5 * ee) + 2) / 153
+            d = ee - (153 * mm + 2) / 5 + 1
+            m = mm + 3 - 12 * (mm / 10)
+            y = dd - 4800 + (mm / 10)
             if y <= 0:
-                --y;
+                --y
         return "%d-%02d-%02d" % (y, m, d)
 
+
 class QTimePrinter:
 
     def __init__(self, val):
@@ -446,27 +460,30 @@ class QTimePrinter:
 
         hour = ds / MSECS_PER_HOUR
         minute = (ds % MSECS_PER_HOUR) / MSECS_PER_MIN
-        second = (ds / 1000)%SECS_PER_MIN
+        second = (ds / 1000) % SECS_PER_MIN
         msec = ds % 1000
         return "%02d:%02d:%02d.%03d" % (hour, minute, second, msec)
 
+
 class QDateTimePrinter:
 
     def __init__(self, val):
         self.val = val
 
     def to_string(self):
-        #val['d'] is a QDateTimePrivate, but for some reason casting to that doesn't work
-        #so work around by manually adjusting the pointer
-        date = self.val['d'].cast(gdb.lookup_type('char').pointer());
-        date += gdb.lookup_type('int').sizeof #increment for QAtomicInt ref;
-        date = date.cast(gdb.lookup_type('QDate').pointer()).dereference();
-
-        time = self.val['d'].cast(gdb.lookup_type('char').pointer());
-        time += gdb.lookup_type('int').sizeof + gdb.lookup_type('QDate').sizeof #increment for QAtomicInt ref; and QDate date;
-        time = time.cast(gdb.lookup_type('QTime').pointer()).dereference();
+        # val['d'] is a QDateTimePrivate, but for some reason casting to that doesn't work
+        # so work around by manually adjusting the pointer
+        date = self.val['d'].cast(gdb.lookup_type('char').pointer())
+        date += gdb.lookup_type('int').sizeof  # increment for QAtomicInt ref;
+        date = date.cast(gdb.lookup_type('QDate').pointer()).dereference()
+
+        time = self.val['d'].cast(gdb.lookup_type('char').pointer())
+        time += gdb.lookup_type('int').sizeof + gdb.lookup_type(
+            'QDate').sizeof  # increment for QAtomicInt ref; and QDate date;
+        time = time.cast(gdb.lookup_type('QTime').pointer()).dereference()
         return "%s %s" % (date, time)
 
+
 class QUrlPrinter:
 
     def __init__(self, val):
@@ -475,19 +492,21 @@ class QUrlPrinter:
     def to_string(self):
         try:
             return self.val['d']['encodedOriginal']
-        except RuntimeError, error:
-            #if no debug information is avaliable for Qt, try guessing the correct address for encodedOriginal
-            #problem with this is that if QUrlPrivate members get changed, this fails
+        except RuntimeError as error:
+            print(error)
+            # if no debug information is avaliable for Qt, try guessing the correct address for encodedOriginal
+            # problem with this is that if QUrlPrivate members get changed, this fails
             offset = gdb.lookup_type('int').sizeof
-            offset += offset % gdb.lookup_type('void').pointer().sizeof #alignment
+            offset += offset % gdb.lookup_type('void').pointer().sizeof  # alignment
             offset += gdb.lookup_type('QString').sizeof * 6
             offset += gdb.lookup_type('QByteArray').sizeof
-            encodedOriginal = self.val['d'].cast(gdb.lookup_type('char').pointer());
+            encodedOriginal = self.val['d'].cast(gdb.lookup_type('char').pointer())
             encodedOriginal += offset
-            encodedOriginal = encodedOriginal.cast(gdb.lookup_type('QByteArray').pointer()).dereference();
+            encodedOriginal = encodedOriginal.cast(gdb.lookup_type('QByteArray').pointer()).dereference()
             encodedOriginal = encodedOriginal['d']['data'].string()
             return encodedOriginal
 
+
 class QSetPrinter:
     "Print a QSet"
 
@@ -512,7 +531,7 @@ class QSetPrinter:
             self.hashIterator.data_node = self.hashIterator.nextNode(self.hashIterator.data_node)
 
             self.count = self.count + 1
-            return ('[%d]' % (self.count-1), item)
+            return '[%d]' % (self.count - 1), item
 
     def children(self):
         hashPrinter = QHashPrinter(self.val['q_hash'], None)
@@ -525,7 +544,7 @@ class QSetPrinter:
         else:
             empty = ""
 
-        return "%sQSet<%s>" % ( empty , self.val.type.template_argument(0) )
+        return "%sQSet<%s>" % (empty, self.val.type.template_argument(0))
 
 
 class QCharPrinter:
@@ -534,46 +553,49 @@ class QCharPrinter:
         self.val = val
 
     def to_string(self):
-        return unichr(self.val['ucs'])
+        return chr(self.val['ucs'])
 
-    def display_hint (self):
+    def display_hint(self):
         return 'string'
 
-def register_qt4_printers (obj):
-    if obj == None:
+
+def register_qt4_printers(obj):
+    if obj is None:
         obj = gdb
 
-    obj.pretty_printers.append (lookup_function)
+    obj.pretty_printers.append(lookup_function)
 
-def lookup_function (val):
+
+def lookup_function(val):
     "Look-up and return a pretty-printer that can print val."
 
     # Get the type.
-    type = val.type;
+    type = val.type
 
     # If it points to a reference, get the reference.
     if type.code == gdb.TYPE_CODE_REF:
-        type = type.target ()
+        type = type.target()
 
     # Get the unqualified type, stripped of typedefs.
-    type = type.unqualified ().strip_typedefs ()
+    type = type.unqualified().strip_typedefs()
 
     # Get the type name.
     typename = type.tag
-    if typename == None:
+    if typename is None:
         return None
 
     # Iterate over local dictionary of types to determine
     # if a printer is registered for that type.  Return an
     # instantiation of the printer if found.
     for function in pretty_printers_dict:
-        if function.search (typename):
-            return pretty_printers_dict[function] (val)
+        if function.search(typename):
+            return pretty_printers_dict[function](val)
 
     # Cannot find a pretty printer.  Return None.
     return None
 
-def build_dictionary ():
+
+def build_dictionary():
     pretty_printers_dict[re.compile('^QString$')] = lambda val: QStringPrinter(val)
     pretty_printers_dict[re.compile('^QByteArray$')] = lambda val: QByteArrayPrinter(val)
     pretty_printers_dict[re.compile('^QList<.*>$')] = lambda val: QListPrinter(val, 'QList', None)
@@ -596,4 +618,4 @@ def build_dictionary ():
 
 pretty_printers_dict = {}
 
-build_dictionary ()
+build_dictionary()
diff --git a/utilities/gdbPrettyPrinter/walberla/printers.py b/utilities/gdbPrettyPrinter/walberla/printers.py
index a56fb1409..414cf8f89 100644
--- a/utilities/gdbPrettyPrinter/walberla/printers.py
+++ b/utilities/gdbPrettyPrinter/walberla/printers.py
@@ -4,25 +4,29 @@
 import gdb
 import re
 
+
 class static:
     "Creates a 'static' method"
+
     def __init__(self, function):
         self.__call__ = function
 
-walberla_pretty_printers = [ ]
+
+walberla_pretty_printers = []
+
+
 def register_pretty_printer(pretty_printer):
     "Registers a Pretty Printer"
     walberla_pretty_printers.append(pretty_printer)
     return pretty_printer
 
 
-
 @register_pretty_printer
-class FieldPrinter:    
+class FieldPrinter:
     "Pretty Printer for walberla::Fields"
-    regexField   = re.compile('walberla::field::Field')
+    regexField = re.compile('walberla::field::Field')
     regexGlField = re.compile('walberla::field::GhostLayerField')
-    
+
     @static
     def supports(typename):
         return FieldPrinter.regexField.search(typename) or FieldPrinter.regexGlField.search(typename)
@@ -30,42 +34,50 @@ class FieldPrinter:
     def __init__(self, typename, value):
         self.typename = typename
         self.value = value
-        if FieldPrinter.regexGlField.search(typename) :
+        if FieldPrinter.regexGlField.search(typename):
             self.typeStr = "GhostLayerField"
             self.ghostLayers = value.type.template_argument(2)
         else:
             self.typeStr = "Field"
 
-        
     def to_string(self):
-        str = "%s<%s> of size (x,y,z,f)=(%d,%d,%d,%d) alloc (%d,%d,%d,%d)"  %  ( self.typeStr, self.value.type.template_argument(0),
-                   self.value['xSize_'], self.value['ySize_'], self.value['zSize_'],self.value.type.template_argument(1),
-                   self.value['xAllocSize_'], self.value['yAllocSize_'], self.value['zAllocSize_'], self.value['fAllocSize_'] )
-        return str
+        ret = "%s<%s> of size (x,y,z,f)=(%d,%d,%d,%d) alloc (%d,%d,%d,%d)" % (self.typeStr,
+                                                                              self.value.type.template_argument(0),
+                                                                              self.value['xSize_'],
+                                                                              self.value['ySize_'],
+                                                                              self.value['zSize_'],
+                                                                              self.value.type.template_argument(1),
+                                                                              self.value['xAllocSize_'],
+                                                                              self.value['yAllocSize_'],
+                                                                              self.value['zAllocSize_'],
+                                                                              self.value['fAllocSize_'])
+        return ret
 
     def children(self):
-        if self.value['layout_'] == gdb.parse_and_eval( "walberla::field::fzyx"):
+        if self.value['layout_'] == gdb.parse_and_eval("walberla::field::fzyx"):
             arrStr = "field(f,z,y,x)"
-            [s1,s2,s3,s4] = [ self.value['xSize_'], self.value['ySize_'], self.value['zSize_'],self.value.type.template_argument(1) ]
+            [s1, s2, s3, s4] = [self.value['xSize_'], self.value['ySize_'], self.value['zSize_'],
+                                self.value.type.template_argument(1)]
         else:
             arrStr = "field(z,y,x,f)"
-            [s1,s2,s3,s4] = [ self.value.type.template_argument(1), self.value['xSize_'], self.value['ySize_'], self.value['zSize_'] ]
+            [s1, s2, s3, s4] = [self.value.type.template_argument(1), self.value['xSize_'], self.value['ySize_'],
+                                self.value['zSize_']]
 
-        
         fieldType = self.value.type.template_argument(0)
-        yield ( arrStr, self.value["values_"].dereference().cast( fieldType.array(s1-1).array(s2-1).array(s3-1).array(s4-1) ) )
-        
-        memberToDisplay = ['layout_', 
-                           'xSize_', 'ySize_', 'zSize_', 
-                           'xAllocSize_', 'yAllocSize_' , 'zAllocSize_', 
-                           'ffact_', 'xfact_', 'yfact_','zfact_',
-                           'xOff_','yOff_','zOff_']
-        
+        yield (arrStr, self.value["values_"].dereference().cast(
+            fieldType.array(s1 - 1).array(s2 - 1).array(s3 - 1).array(s4 - 1)))
+
+        memberToDisplay = ['layout_',
+                           'xSize_', 'ySize_', 'zSize_',
+                           'xAllocSize_', 'yAllocSize_', 'zAllocSize_',
+                           'ffact_', 'xfact_', 'yfact_', 'zfact_',
+                           'xOff_', 'yOff_', 'zOff_']
+
         if hasattr(self, 'ghostLayers'):
-            yield("GhostLayers", self.ghostLayers)
-       
+            yield ("GhostLayers", self.ghostLayers)
+
         for member in memberToDisplay:
-            yield(member, self.value[member] )
+            yield (member, self.value[member])
 
 
 def find_pretty_printer(value):
@@ -73,12 +85,12 @@ def find_pretty_printer(value):
     type = value.type
 
     if type.code == gdb.TYPE_CODE_REF:
-       type = type.target()
+        type = type.target()
 
     type = type.unqualified().strip_typedefs()
 
     typename = type.tag
-    if typename == None:
+    if typename is None:
         return None
 
     for pretty_printer in walberla_pretty_printers:
@@ -87,9 +99,9 @@ def find_pretty_printer(value):
 
     return None
 
+
 def register_walberla_printers(obj):
     "Register walberla Pretty Printers."
-    if obj == None:
+    if obj is None:
         obj = gdb
     obj.pretty_printers.append(find_pretty_printer)
-
diff --git a/utilities/moduleDependenciesToDot.py b/utilities/moduleDependenciesToDot.py
index 2fa4fd677..edb30aeb6 100755
--- a/utilities/moduleDependenciesToDot.py
+++ b/utilities/moduleDependenciesToDot.py
@@ -4,8 +4,10 @@ import argparse
 import re
 from pathlib import Path
 
+
 def is_walberla_root(walberla_root):
-   return (walberla_root / 'src' / 'walberla.h').exists()
+    return (walberla_root / 'src' / 'walberla.h').exists()
+
 
 def trace_dependencies(modules, base_modules):
     deps = base_modules.copy()
@@ -16,18 +18,21 @@ def trace_dependencies(modules, base_modules):
     else:
         return trace_dependencies(modules, deps)
 
+
 def get_module_dependencies(module_dir):
     with open(module_dir / 'CMakeLists.txt', 'r') as fin:
         m = re.search(r'DEPENDS([\w\s]*)', fin.read())
         if m is not None:
-            stripped = {x.strip() for x in m.group(1).split(' ') if x!=''}
+            stripped = {x.strip() for x in m.group(1).split(' ') if x != ''}
             return (module_dir.name, {x for x in stripped if (x not in ['', 'BUILD_ONLY_IF_FOUND'])})
     return (module_dir.name, set())
 
+
 def get_dependency_graph(walberla_root):
     modules_dir = walberla_root / 'src'
     modules = (get_module_dependencies(x) for x in modules_dir.iterdir() if x.is_dir())
-    return {i:v for i,v in modules}
+    return {i: v for i, v in modules}
+
 
 def color_dependencies(fout, dependencies, base_module):
     for dep in trace_dependencies(dependencies, {base_module}):
@@ -39,11 +44,13 @@ def color_dependencies(fout, dependencies, base_module):
             continue
         fout.write(f'  {dep}[fillcolor=brown1, style="filled"];\n')
 
+
 def write_dependency_graph(fout, dependencies):
     for module, deps in dependencies.items():
         for dep in deps:
             fout.write(f'  {dep} -> {module};\n')
 
+
 if __name__ == '__main__':
     parser = argparse.ArgumentParser(description='Generates a dot file containing the waLBerla dependency graph.')
     parser.add_argument('-d', default='..', help='waLBerla root directory', type=Path)
@@ -62,5 +69,3 @@ if __name__ == '__main__':
         if args.trace is not None:
             color_dependencies(fout, dependencies, args.trace)
         fout.write('}\n')
-
-
-- 
GitLab