From 67e53365441032070b260fc5454d648795059fd3 Mon Sep 17 00:00:00 2001
From: markus <markus.holzer@fau.de>
Date: Fri, 17 Jul 2020 11:59:03 +0200
Subject: [PATCH] Adapted flak8 formatting for apps

---
 .flake8                                       |   2 +
 apps/benchmarks/FieldCommunication/config.py  |   3 +-
 .../PhaseFieldAllenCahn/multiphase_codegen.py |   2 +-
 .../PhaseFieldAllenCahn/profiling.py          |   1 -
 apps/benchmarks/UniformGrid/ecmModel.py       |  87 +--
 .../UniformGrid/mergeSqliteFiles.py           | 114 ++--
 apps/benchmarks/UniformGrid/plot.py           | 524 +++++++++---------
 .../UniformGridGPU/UniformGridGPU.py          |  16 +-
 .../UniformGridGPU/UniformGridGPU_AA.py       |   8 +-
 .../simulation_setup/benchmark_configs.py     |  35 +-
 .../UniformGridGenerated.py                   |  28 +-
 .../benchmarks/UniformGridGenerated/params.py |  21 +-
 apps/pythonmodule/setup.py                    |  49 +-
 apps/pythonmodule/stage.py                    |   3 +-
 .../CPU/multiphase_RTI_3D.py                  |   6 +-
 .../CPU/multiphase_codegen.py                 |   6 +-
 .../CPU/multiphase_rising_bubble.py           |   1 +
 .../GPU/multiphase_RTI_3D.py                  |   6 +-
 .../GPU/multiphase_codegen.py                 |   7 +-
 19 files changed, 478 insertions(+), 441 deletions(-)

diff --git a/.flake8 b/.flake8
index 8c53c3999..9bd296133 100644
--- a/.flake8
+++ b/.flake8
@@ -1,3 +1,5 @@
 [flake8]
 max-line-length=120
+exclude=apps/showcases/Mixer/GenerateModule.py, # contains only statements
+        apps/benchmarks/FieldCommunication/config.py # just a config file 
 ignore = W503 C901 E741
diff --git a/apps/benchmarks/FieldCommunication/config.py b/apps/benchmarks/FieldCommunication/config.py
index 56b480f15..154f0b90d 100644
--- a/apps/benchmarks/FieldCommunication/config.py
+++ b/apps/benchmarks/FieldCommunication/config.py
@@ -34,7 +34,8 @@ sng_network = supermuc_network_spread()
 
 class AlreadySimulated:
 
-    def __init__(self, db_file, properties=('processes0*processes1*processes2', 'layout', 'ghostLayers', 'cartesianCommunicator', 'stencil',
+    def __init__(self, db_file, properties=('processes0*processes1*processes2', 'layout', 'ghostLayers',
+                                            'cartesianCommunicator', 'stencil',
                                             'cellsPerBlock0', 'cellsPerBlock1', 'cellsPerBlock2',
                                             'blocksPerProcess', 'localCommunicationMode', 'singleMessage',
                                             'fieldsPdf', 'fieldsPdfOpt', 'fieldsVector', 'fieldsScalar',
diff --git a/apps/benchmarks/PhaseFieldAllenCahn/multiphase_codegen.py b/apps/benchmarks/PhaseFieldAllenCahn/multiphase_codegen.py
index d46056d82..5fd50cf57 100644
--- a/apps/benchmarks/PhaseFieldAllenCahn/multiphase_codegen.py
+++ b/apps/benchmarks/PhaseFieldAllenCahn/multiphase_codegen.py
@@ -147,7 +147,7 @@ sweep_block_size = (TypedSymbol("cudaBlockSize0", np.int32),
 sweep_params = {'block_size': sweep_block_size}
 
 info_header = f"""
-#include "stencil/D3Q{q_phase}.h"\nusing Stencil_phase_T = walberla::stencil::D3Q{q_phase}; 
+#include "stencil/D3Q{q_phase}.h"\nusing Stencil_phase_T = walberla::stencil::D3Q{q_phase};
 #include "stencil/D3Q{q_hydro}.h"\nusing Stencil_hydro_T = walberla::stencil::D3Q{q_hydro};
 """
 
diff --git a/apps/benchmarks/PhaseFieldAllenCahn/profiling.py b/apps/benchmarks/PhaseFieldAllenCahn/profiling.py
index 4d8c9375e..71cdba3f3 100644
--- a/apps/benchmarks/PhaseFieldAllenCahn/profiling.py
+++ b/apps/benchmarks/PhaseFieldAllenCahn/profiling.py
@@ -1,4 +1,3 @@
-import os
 import waLBerla as wlb
 
 
diff --git a/apps/benchmarks/UniformGrid/ecmModel.py b/apps/benchmarks/UniformGrid/ecmModel.py
index 73abc6ccf..0509ed3d0 100755
--- a/apps/benchmarks/UniformGrid/ecmModel.py
+++ b/apps/benchmarks/UniformGrid/ecmModel.py
@@ -3,75 +3,78 @@
 import numpy as np
 import matplotlib.pyplot as plt
 
-
 kernels = dict()
 
+
 class Kernel:
-    
-    def __init__(self,name, cyclesFirstLoop=0, cyclesSecondLoop=0, cyclesRegPerLUP =0):
+
+    def __init__(self, name, cyclesFirstLoop=0, cyclesSecondLoop=0, cyclesRegPerLUP=0):
         self.name = name
         if cyclesRegPerLUP <= 0:
-            self.cyclesFirstLoop  = cyclesFirstLoop
+            self.cyclesFirstLoop = cyclesFirstLoop
             self.cyclesSecondLoop = cyclesSecondLoop
-            self.cyclesRegPerLUP = cyclesFirstLoop + 9* cyclesSecondLoop  
+            self.cyclesRegPerLUP = cyclesFirstLoop + 9 * cyclesSecondLoop
         else:
             self.cyclesRegPerLUP = cyclesRegPerLUP
-         
-        self.cyclesRegPerCacheLine = 8*self.cyclesRegPerLUP
 
-        self.cyclesL1L2 = 3*19*2
-        self.cyclesL2L3 = 3*19*2
-        
+        self.cyclesRegPerCacheLine = 8 * self.cyclesRegPerLUP
+
+        self.cyclesL1L2 = 3 * 19 * 2
+        self.cyclesL2L3 = 3 * 19 * 2
+
         self.freq = 2.7e9
         self.cyclesMem = 305
-        #self.cyclesMem = 191
-     
+        # self.cyclesMem = 191
+
     def mlups(self, processes):
         singleCoreCycles = self.cyclesRegPerCacheLine + self.cyclesL1L2 + self.cyclesL2L3 + self.cyclesMem
-                                     
+
         timeSingleCore = singleCoreCycles / self.freq
-        
+
         mlups = 8 / timeSingleCore * 1e-6
-        
-        #todo         
+
+        # todo
         mlupsMax = 78
-        
-        return min ( processes * mlups, mlupsMax )
-       
-    def plot( self,  divideByProcesses=False,processes=8, label="" ):
-        
-        x = np.arange( 1, processes+1, 1 )
+
+        return min(processes * mlups, mlupsMax)
+
+    def plot(self, divideByProcesses=False, processes=8, label=""):
+
+        x = np.arange(1, processes + 1, 1)
         if divideByProcesses:
-            y = np.array( [ self.mlups(i)/i for i in x ] )
+            y = np.array([self.mlups(i) / i for i in x])
         else:
-            y = np.array( [ self.mlups(i)   for i in x ] )
-        
-        if label=="":
-            label = "ecm\_" + self.name
-        plt.plot( x, y, marker='^', markersize=5, label = label)
-        
+            y = np.array([self.mlups(i) for i in x])
 
-kernels=dict()
+        if label == "":
+            label = "ecm_" + self.name
+        plt.plot(x, y, marker='^', markersize=5, label=label)
 
-#kernels['srt_split'] = Kernel("srt_split", 46, 12 )
 
-kernels['srt_pure']   = Kernel("srt_pure", 40, 8 )
-kernels['trt_split']  = Kernel("trt\_split", 41, 11 )
-kernels['srt_nonopt'] = Kernel("srt_nonopt", cyclesRegPerLUP = 1045)   #SRTStreamCollide.h -  pgo and lto (20cycles first loop, 35 second)
+kernels = dict()
+
+# kernels['srt_split'] = Kernel("srt_split", 46, 12 )
 
-#kernels['trt_pure_intelOpt'] = Kernel("trt_pure_intelOpt", 41/2, 10/2 )  # vectorized (v*pd)
+kernels['srt_pure'] = Kernel("srt_pure", 40, 8)
+kernels['trt_split'] = Kernel("trt_split", 41, 11)
+# SRTStreamCollide.h -  pgo and lto (20cycles first loop, 35 second)
+kernels['srt_nonopt'] = Kernel("srt_nonopt",
+                               cyclesRegPerLUP=1045)
 
 
-def plotAllKernels( divideByProcesses = False ):
+# kernels['trt_pure_intelOpt'] = Kernel("trt_pure_intelOpt", 41/2, 10/2 )  # vectorized (v*pd)
+
+
+def plotAllKernels(divideByProcesses=False):
     for kernel in kernels:
-        kernel.plot( divideByProcesses )
+        kernel.plot(divideByProcesses)
+
+
+def plot(kernelName, divideByProcesses=False, label=""):
+    kernels[kernelName].plot(divideByProcesses, label=label)
 
-def plot( kernelName, divideByProcesses = False, label = ""):
-    kernels[kernelName].plot( divideByProcesses, label=label )
-    
-    
 
 if __name__ == "__main__":
     plotAllKernels()
     plt.legend()
-    plt.show()
\ No newline at end of file
+    plt.show()
diff --git a/apps/benchmarks/UniformGrid/mergeSqliteFiles.py b/apps/benchmarks/UniformGrid/mergeSqliteFiles.py
index d82a6449a..9975bb09b 100755
--- a/apps/benchmarks/UniformGrid/mergeSqliteFiles.py
+++ b/apps/benchmarks/UniformGrid/mergeSqliteFiles.py
@@ -4,85 +4,87 @@ import sqlite3
 import sys
 import shutil
 
-def getColumnNames ( db, tableName, dbName ):
+
+def getColumnNames(db, tableName, dbName):
     cursor = db.cursor()
-    cursor.execute("PRAGMA %s.table_info(%s)"  % (dbName,tableName) )
+    cursor.execute("PRAGMA %s.table_info(%s)" % (dbName, tableName))
     columns = cursor.fetchall()
-    
+
     res = []
     for e in columns:
-        res.append ( (e[1], e[2].upper()) )
-    
+        res.append((e[1], e[2].upper()))
+
     return res
 
 
-def mergeSqliteFiles ( targetFile, fileToMerge ):
-    
-    db  = sqlite3.connect( targetFile )
-    db.execute ('ATTACH "' + fileToMerge + '" AS toMerge')
-    
-    targetColumns  = getColumnNames( db, "runs", "main" )
-    toMergeColumns = getColumnNames( db, "runs", "toMerge" )
-    
+def mergeSqliteFiles(targetFile, fileToMerge):
+    db = sqlite3.connect(targetFile)
+    db.execute('ATTACH "' + fileToMerge + '" AS toMerge')
+
+    targetColumns = getColumnNames(db, "runs", "main")
+    toMergeColumns = getColumnNames(db, "runs", "toMerge")
+
     columnsToCreate = [e for e in toMergeColumns if e not in targetColumns]
-    
+
     for column in columnsToCreate:
-        print "Adding Column %s to run table of %s " % ( column[0], targetFile )
-        db.execute ( "ALTER TABLE main.runs ADD COLUMN %s %s" % ( column[0], column[1] ) )
-        
+        print
+        "Adding Column %s to run table of %s " % (column[0], targetFile)
+        db.execute("ALTER TABLE main.runs ADD COLUMN %s %s" % (column[0], column[1]))
+
     # Fetch all runs from toMerge,
     # check if an entry with same date exists, if not add the run and the timing pool entries
     # to the targetTable
     c = db.cursor()
-    assert( toMergeColumns[0][0] == "runId")
-    columns = [ e[0] for e in toMergeColumns ]
-    columnString        = ",".join( columns     )
-    columnStringNoRunId = ",".join( columns[1:] )
-    
-    query  = 'SELECT %s FROM toMerge.runs WHERE timestamp || " " || random NOT IN ' % (columnString,)
-    query += '( SELECT timestamp || " " || random FROM main.runs )' 
-
-    timingPoolColumnsMain    = getColumnNames ( db, "timingPool", "main"    )
-    timingPoolColumnsToMerge = getColumnNames ( db, "timingPool", "toMerge" )
-    assert ( timingPoolColumnsMain == timingPoolColumnsToMerge )
-    timingPoolColumnNames = [ e[0] for e in timingPoolColumnsMain ]
-    assert ( timingPoolColumnNames[0] == "runId")
-    
+    assert (toMergeColumns[0][0] == "runId")
+    columns = [e[0] for e in toMergeColumns]
+    columnString = ",".join(columns)
+    columnStringNoRunId = ",".join(columns[1:])
+
+    query = 'SELECT %s FROM toMerge.runs WHERE timestamp || " " || random NOT IN ' % (columnString,)
+    query += '( SELECT timestamp || " " || random FROM main.runs )'
+
+    timingPoolColumnsMain = getColumnNames(db, "timingPool", "main")
+    timingPoolColumnsToMerge = getColumnNames(db, "timingPool", "toMerge")
+    assert (timingPoolColumnsMain == timingPoolColumnsToMerge)
+    timingPoolColumnNames = [e[0] for e in timingPoolColumnsMain]
+    assert (timingPoolColumnNames[0] == "runId")
+
     mergedRuns = 0
-    for run in c.execute (query):
+    for run in c.execute(query):
         # Build up insert statement for 'runs' table
-        questionMarkList   = ['?'] * (len(run)-1)
-        questionMarkString = ",".join( questionMarkList )
-        insertStatement = "INSERT INTO main.runs (%s) VALUES (%s);" % ( columnStringNoRunId, questionMarkString ) 
+        questionMarkList = ['?'] * (len(run) - 1)
+        questionMarkString = ",".join(questionMarkList)
+        insertStatement = "INSERT INTO main.runs (%s) VALUES (%s);" % (columnStringNoRunId, questionMarkString)
         # Execute the insert
         insertCursor = db.cursor()
-        insertCursor.execute( insertStatement, run[1:] )
+        insertCursor.execute(insertStatement, run[1:])
         # Insert the corresponding timingPool infos
         insertedRunId = insertCursor.lastrowid
         originalRunId = run[0]
-        
-        timingPoolQuery = "SELECT %s FROM toMerge.timingPool WHERE runId=?" % ( ",".join( timingPoolColumnNames[1:] ) )
+
+        timingPoolQuery = "SELECT %s FROM toMerge.timingPool WHERE runId=?" % (",".join(timingPoolColumnNames[1:]))
         timingPoolInsertCursor = db.cursor()
-        timingPoolQueryCursor  = db.cursor()
-
-        for tp in timingPoolQueryCursor.execute ( timingPoolQuery, ( originalRunId,) ): 
-            questionMarkList   = ['?'] * len(timingPoolColumnNames) 
-            questionMarkString = ",".join( questionMarkList )
-            insertQuery = "INSERT INTO main.timingPool (%s) VALUES (%s)" % (",".join(timingPoolColumnNames), questionMarkString)
-            timingPoolInsertCursor.execute ( insertQuery, (insertedRunId,) + tp )
-            
-        mergedRuns = mergedRuns +1
-        
-    print "Merged %s runs from %s to %s " % ( mergedRuns, fileToMerge, targetFile )
+        timingPoolQueryCursor = db.cursor()
+
+        for tp in timingPoolQueryCursor.execute(timingPoolQuery, (originalRunId,)):
+            questionMarkList = ['?'] * len(timingPoolColumnNames)
+            questionMarkString = ",".join(questionMarkList)
+            insertQuery = "INSERT INTO main.timingPool (%s) VALUES (%s)" % (",".join(timingPoolColumnNames),
+                                                                            questionMarkString)
+            timingPoolInsertCursor.execute(insertQuery, (insertedRunId,) + tp)
+
+        mergedRuns = mergedRuns + 1
+
+    print("Merged %s runs from %s to %s " % (mergedRuns, fileToMerge, targetFile))
     db.commit()
     db.close()
 
 
-if ( len(sys.argv) < 3 ):
-    print "Usage: mergeSqliteFiles resultFile <filesToMerge>"
+if len(sys.argv) < 3:
+    print("Usage: mergeSqliteFiles resultFile <filesToMerge>")
 else:
-    print "Copying " + sys.argv[2] + " to " + sys.argv[1]
-    shutil.copy( sys.argv[2], sys.argv[1] )
-    for i in range ( 3, len(sys.argv) ):
-     	print "Merging " + sys.argv[i] 
-    	mergeSqliteFiles ( sys.argv[1], sys.argv[i] )
+    print("Copying " + sys.argv[2] + " to " + sys.argv[1])
+    shutil.copy(sys.argv[2], sys.argv[1])
+    for i in range(3, len(sys.argv)):
+        print("Merging " + sys.argv[i])
+        mergeSqliteFiles(sys.argv[1], sys.argv[i])
diff --git a/apps/benchmarks/UniformGrid/plot.py b/apps/benchmarks/UniformGrid/plot.py
index fcdedfd0a..877dc3635 100755
--- a/apps/benchmarks/UniformGrid/plot.py
+++ b/apps/benchmarks/UniformGrid/plot.py
@@ -1,7 +1,6 @@
 #!/usr/bin/python
 
 import sqlite3
-import numpy as np
 import sys
 import re
 import math
@@ -11,270 +10,283 @@ from optparse import OptionParser
 
 import ecmModel
 
+# Defining graph types
 
-####################################################################################################################################################
-############################             Defining graph types                                         ##############################################
-####################################################################################################################################################
 
+graphs = dict(nonOpt=["cores,MAX(MLUPS)",
+                      "SPLIT IS NULL AND PURE IS NULL AND CELL_OP IS NULL AND "
+                      + "COMPRESSED IS NULL AND D3Q19_OPT IS NULL"],
+              split=["cores,MAX(MLUPS)", "SPLIT=1"],
+              pure=["cores,MAX(MLUPS)", "PURE=1"],
+              normal=["cores,MAX(MLUPS)", "CELL_OP=1", ],
+              compressed=["cores,MAX(MLUPS)", "COMPRESSED=1"],
+              opt=["cores,MAX(MLUPS)", "D3Q19_OPT=1"],
+              split_lbm=["cores,MAX(percentage)", "SPLIT=1  AND name=\"Timeloop\" AND sweep LIKE \"LBM%\""],
+              split_comm=["cores,MAX(percentage)", "SPLIT=1  AND name=\"Communication\" AND sweep LIKE \"%MPI%\""],
+              split_time=["cores,MAX(average)  ", "SPLIT=1  AND name=\"Timeloop\" AND sweep LIKE \"LBM%\""]
+              )
 
+# Defining all options
 
-graphs = dict( nonOpt     = [ "cores,MAX(MLUPS)", "SPLIT IS NULL AND PURE IS NULL AND CELL_OP IS NULL AND COMPRESSED IS NULL AND D3Q19_OPT IS NULL" ],
-               split      = [ "cores,MAX(MLUPS)", "SPLIT=1" ],
-               pure       = [ "cores,MAX(MLUPS)", "PURE=1"  ],
-               normal     = [ "cores,MAX(MLUPS)", "CELL_OP=1",  ],               
-               compressed = [ "cores,MAX(MLUPS)", "COMPRESSED=1"],
-               opt        = [ "cores,MAX(MLUPS)", "D3Q19_OPT=1"],
-               split_lbm  = [ "cores,MAX(percentage)", "SPLIT=1  AND name=\"Timeloop\" AND sweep LIKE \"LBM%\"" ],
-               split_comm = [ "cores,MAX(percentage)", "SPLIT=1  AND name=\"Communication\" AND sweep LIKE \"%MPI%\"" ],
-               split_time = [ "cores,MAX(average)  " , "SPLIT=1  AND name=\"Timeloop\" AND sweep LIKE \"LBM%\"" ]
-              )
 
+def addCommonOptions(parser):
+    parser.add_option("", "--smIntel", action="store_true", default=False, help="On SuperMUC using IntelMPI")
+    parser.add_option("", "--smIbm", action="store_true", default=False, help="On SuperMUC using IBM MPI")
+    parser.add_option("", "--zyxf", action="store_true", default=False, help="Only Layout zyxf")
+    parser.add_option("", "--fzyx", action="store_true", default=False, help="Only Layout fzyx")
+    parser.add_option("", "--trt", action="store_true", default=False, help="Only TRT")
+    parser.add_option("", "--srt", action="store_true", default=False, help="Only SRT")
+
+    parser.add_option("", "--lto", action="store_true", default=False, help="Only where link time optimization was on")
+    parser.add_option("", "--noLto", action="store_true", default=False,
+                      help="Only where link time optimization was off")
+
+    parser.add_option("", "--pgo", action="store_true", default=False,
+                      help="Only where profile guided optimization was on")
+    parser.add_option("", "--noPgo", action="store_true", default=False,
+                      help="Only where profile guided optimization was off")
+
+    parser.add_option("", "--intelOpt", action="store_true", default=False, help="Only where intel pragmas were used")
+    parser.add_option("", "--noIntelOpt", action="store_true", default=False, help="Only where intel pragmas not used")
+
+    parser.add_option("", "--pinCore", action="store_true", default=False, help="First fill up socket.")
+    parser.add_option("", "--pinMCM", action="store_true", default=False, help="Socket round robin")
+
+    parser.add_option("-t", "--totalMLUPS", action="store_true", default=False,
+                      help="Show total MLUPS instead of MLUPS/cores")
+
+    parser.add_option("-w", "--where", help="SQL Where or Group By clause", default="")
+
+    parser.add_option("-p", "--printDataset", action="store_true", default=False,
+                      help="Prints the dataset that is plotted")
 
-####################################################################################################################################################
-############################             Defining all options                                         ##############################################
-####################################################################################################################################################
-
-    
-def addCommonOptions( parser ):
-    parser.add_option("", "--smIntel", action="store_true", default=False, help="On SuperMUC using IntelMPI"  )
-    parser.add_option("", "--smIbm",   action="store_true", default=False, help="On SuperMUC using IBM MPI"  )
-    parser.add_option("", "--zyxf",    action="store_true", default=False, help="Only Layout zyxf" )
-    parser.add_option("", "--fzyx",    action="store_true", default=False, help="Only Layout fzyx" )
-    parser.add_option("", "--trt",     action="store_true", default=False, help="Only TRT"  )
-    parser.add_option("", "--srt",     action="store_true", default=False, help="Only SRT"  )
-   
-    parser.add_option("", "--lto",     action="store_true", default=False, help="Only where link time optimization was on")
-    parser.add_option("", "--noLto",   action="store_true", default=False, help="Only where link time optimization was off")
-    
-    parser.add_option("", "--pgo",     action="store_true", default=False, help="Only where profile guided optimization was on")
-    parser.add_option("", "--noPgo",   action="store_true", default=False, help="Only where profile guided optimization was off")
-  
-    parser.add_option("", "--intelOpt",  action="store_true", default=False, help="Only where intel pragmas were used")
-    parser.add_option("", "--noIntelOpt",action="store_true", default=False, help="Only where intel pragmas not used" )
-
-  
-    parser.add_option("", "--pinCore", action="store_true", default=False, help="First fill up socket."  )
-    parser.add_option("", "--pinMCM",  action="store_true", default=False, help="Socket round robin"  )
-        
-    parser.add_option("-t", "--totalMLUPS",  action="store_true", default=False, help="Show total MLUPS instead of MLUPS/cores"  )
-        
-    parser.add_option("-w", "--where",  help="SQL Where or Group By clause", default = "")
-
-    parser.add_option("-p", "--printDataset",  action="store_true", default=False,  help="Prints the dataset that is plotted")
-    
-    parser.add_option("",   "--legend", default="", help="Legend entry for the graph" )
-
-
-def addOuterOptions( parser ):
-    parser.add_option("-f", "--file",  default="timing.sqlite",  help="Sqlite3 File with timing data")
-    parser.add_option("",   "--title", default="",               help="Title of Graph" )
-    parser.add_option("-s", "--save",  default="",               help="Print graph to file")
-    parser.add_option("",   "--xlabel",default="",               help="Label of x Axis")
-    parser.add_option("",   "--ylabel",default="",               help="Label of y Axis")
-    parser.add_option("",   "--figureWidth", default="",         help="When using --save, the width has to be specified in pts")
-    parser.add_option("",   "--figureHeight", default="",        help="When using --save, the height has to be specified in pts")
-    parser.add_option("",   "--legendPos", default="",           help="Position of legend (matplotlib syntax)")
-
-def addInnerOptions( parser ):
-    parser.add_option("-g", "--graph",  default="", help="Graph from database (measured)" )
-    parser.add_option("-e", "--ecm",    default="",  help="Plot ECM model for given kernel name")
-    
-
-def stringFromOptions( opt ):
+    parser.add_option("", "--legend", default="", help="Legend entry for the graph")
+
+
+def addOuterOptions(parser):
+    parser.add_option("-f", "--file", default="timing.sqlite", help="Sqlite3 File with timing data")
+    parser.add_option("", "--title", default="", help="Title of Graph")
+    parser.add_option("-s", "--save", default="", help="Print graph to file")
+    parser.add_option("", "--xlabel", default="", help="Label of x Axis")
+    parser.add_option("", "--ylabel", default="", help="Label of y Axis")
+    parser.add_option("", "--figureWidth", default="", help="When using --save, the width has to be specified in pts")
+    parser.add_option("", "--figureHeight", default="", help="When using --save, the height has to be specified in pts")
+    parser.add_option("", "--legendPos", default="", help="Position of legend (matplotlib syntax)")
+
+
+def addInnerOptions(parser):
+    parser.add_option("-g", "--graph", default="", help="Graph from database (measured)")
+    parser.add_option("-e", "--ecm", default="", help="Plot ECM model for given kernel name")
+
+
+def stringFromOptions(opt):
     c = ""
-    if ( opt.zyxf ):
+    if opt.zyxf:
         c += "FZYX "
-    elif ( opt.fzyx ): 
+    elif opt.fzyx:
         c += "ZYXF"
     else:
-        c+="ALL"
-    
-    c+= "|"
-    
-    if ( opt.trt ): 
+        c += "ALL"
+
+    c += "|"
+
+    if opt.trt:
         c += "TRT"
-    elif ( opt.srt ): 
+    elif opt.srt:
         c += "SRT"
     else:
-        c+= "ALL"
-    
-    c+="|" 
-    
-    if ( opt.smIntel ): 
-        c += "SmIntel|"  
-    if ( opt.smIbm ): 
-        c += "SmIBM"  
-    
-    
-    if opt.where and len( opt.where ) > 0:
-       c += opt.where + "|"
-    
-    if opt.pinCore: c+="pinCore|"
-    if opt.pinMCM:  c+="pinMCM|"
-
-    if opt.lto:     c+="lto|"
-    if opt.noLto:   c+="noLto|"
-    
-    if opt.pgo:     c+="pgo|"
-    if opt.noPgo:   c+="noPgo|"
-
-    if opt.intelOpt:   c+="intelOpt|"
-    if opt.noIntelOpt: c+="noIntelOpt|"
+        c += "ALL"
+
+    c += "|"
+
+    if opt.smIntel:
+        c += "SmIntel|"
+    if opt.smIbm:
+        c += "SmIBM"
+
+    if opt.where and len(opt.where) > 0:
+        c += opt.where + "|"
+
+    if opt.pinCore:
+        c += "pinCore|"
+    if opt.pinMCM:
+        c += "pinMCM|"
+
+    if opt.lto:
+        c += "lto|"
+    if opt.noLto:
+        c += "noLto|"
+
+    if opt.pgo:
+        c += "pgo|"
+    if opt.noPgo:
+        c += "noPgo|"
+
+    if opt.intelOpt:
+        c += "intelOpt|"
+    if opt.noIntelOpt:
+        c += "noIntelOpt|"
 
     if hasattr(opt, 'graph') and opt.graph:
-        c+= opt.graph
-       
-    
+        c += opt.graph
+
     return c
 
 
-def whereClauseFromOptions ( opt ):
+def whereClauseFromOptions(opt):
     c = ""
-    if ( opt.zyxf )   : c += "FZYX IS NULL AND "
-    if ( opt.fzyx )   : c += "FZYX = 1 AND "
-     
-    if ( opt.trt )    : c += "TRT=1 AND "
-    if ( opt.srt )    : c += "TRT IS NULL AND " 
-    
-    if ( opt.smIntel ): c += "buildMachine LIKE \"supermuc_intel%\" AND machine LIKE \"i__r__a__\" AND "  
-    
-    if ( opt.smIbm )  : c += "buildMachine LIKE \"supermuc_ibm%\" AND machine LIKE \"i__r__a__\" AND "  
-    
-    if opt.pinCore: c+="MP_TASK_AFFINITY='CORE' AND "
-    if opt.pinMCM:  c+="MP_TASK_AFFINITY='MCM' AND "
-
-    if opt.lto:     c+="compilerFlags LIKE '%-ipo%' AND "
-    if opt.noLto:   c+="compilerFlags NOT LIKE '%-ipo%' AND "
-    
-    if opt.pgo:     c+="compilerFlags LIKE '%-prof-use%' AND "
-    if opt.noPgo:   c+="compilerFlags NOT LIKE '%-prof-use%' AND "
-
-    if opt.intelOpt:   c+="intelCompilerOpt=1 AND "
-    if opt.noIntelOpt: c+="intelCompilerOpt IS NULL AND "
-    
-
-    if opt.where and len( opt.where ) > 0:
-       c += opt.where + " AND "
-    
-    return c
+    if opt.zyxf:
+        c += "FZYX IS NULL AND "
+    if opt.fzyx:
+        c += "FZYX = 1 AND "
+
+    if opt.trt:
+        c += "TRT=1 AND "
+    if opt.srt:
+        c += "TRT IS NULL AND "
+
+    if opt.smIntel:
+        c += "buildMachine LIKE \"supermuc_intel%\" AND machine LIKE \"i__r__a__\" AND "
+
+    if opt.smIbm:
+        c += "buildMachine LIKE \"supermuc_ibm%\" AND machine LIKE \"i__r__a__\" AND "
 
+    if opt.pinCore:
+        c += "MP_TASK_AFFINITY='CORE' AND "
+    if opt.pinMCM:
+        c += "MP_TASK_AFFINITY='MCM' AND "
 
+    if opt.lto:
+        c += "compilerFlags LIKE '%-ipo%' AND "
+    if opt.noLto:
+        c += "compilerFlags NOT LIKE '%-ipo%' AND "
 
+    if opt.pgo:
+        c += "compilerFlags LIKE '%-prof-use%' AND "
+    if opt.noPgo:
+        c += "compilerFlags NOT LIKE '%-prof-use%' AND "
 
-####################################################################################################################################################
-############################             Database and plotting                                        ##############################################
-####################################################################################################################################################
+    if opt.intelOpt:
+        c += "intelCompilerOpt=1 AND "
+    if opt.noIntelOpt:
+        c += "intelCompilerOpt IS NULL AND "
 
+    if opt.where and len(opt.where) > 0:
+        c += opt.where + " AND "
 
+    return c
+
+# Database and plotting
 
-def getListFromDatabase( databaseFile, graphKeyword, whereClause ):
-    db = sqlite3.connect( databaseFile )
+
+def getListFromDatabase(databaseFile, graphKeyword, whereClause):
+    db = sqlite3.connect(databaseFile)
     db.row_factory = sqlite3.Row
-    query  = "SELECT " + graphs[graphKeyword][0] 
+    query = "SELECT " + graphs[graphKeyword][0]
     query += " FROM runs JOIN timingPool ON runs.runId = timingPool.runId "
     if not whereClause or len(whereClause) == 0:
         whereClause = " 1 "
-        
-    combinedWhere = " WHERE " + graphs[graphKeyword][1] + " AND " + whereClause +  "GROUP BY cores"
+
+    combinedWhere = " WHERE " + graphs[graphKeyword][1] + " AND " + whereClause + "GROUP BY cores"
     query += combinedWhere
-    print combinedWhere;
+    print(combinedWhere)
     c = db.cursor()
-    c.execute( query )
+    c.execute(query)
     return c.fetchall()
-                  
-     
-def numericPlot( dataset, legendLabel = "" , divideByProcesses=True):
-    assert ( len(dataset) > 0 )
-    assert ( isinstance(dataset[0][0], (int, long, float) ) )
-    columns = len( dataset[0] )
+
+
+def numericPlot(dataset, legendLabel="", divideByProcesses=True):
+    assert (len(dataset) > 0)
+    assert (isinstance(dataset[0][0], (int, float)))
+    columns = len(dataset[0])
     columnNames = dataset[0].keys()
-    
-    xVals = [ e[0] for e in dataset ]
-    
+
+    xVals = [e[0] for e in dataset]
+
     plots = []
-    for i in range(1,columns):
-        yVals = [ e[i] for e in dataset ]
+    for i in range(1, columns):
+        yVals = [e[i] for e in dataset]
         if divideByProcesses:
-            yVals = [ yVals[i] / xVals[i] for i in range(0,len(yVals)) ]
-            
+            yVals = [yVals[i] / xVals[i] for i in range(0, len(yVals))]
+
         if legendLabel == "":
             legendLabel = columnNames[i]
-            
-        p, = plt.plot( xVals, yVals, marker='^', markersize=5, label = legendLabel )
-        plots.append( p )
-        
-    plt.xlabel ( "Cores" )
-    plt.ylabel ( "MLUPS" )
-    #plt.gca().yaxis.grid(color='gray', linestyle='dashed')
-    #plt.gca().xaxis.grid(color='gray', linestyle='dashed')
 
+        p, = plt.plot(xVals, yVals, marker='^', markersize=5, label=legendLabel)
+        plots.append(p)
 
-def printDataset( dataset ):
+    plt.xlabel("Cores")
+    plt.ylabel("MLUPS")
+    # plt.gca().yaxis.grid(color='gray', linestyle='dashed')
+    # plt.gca().xaxis.grid(color='gray', linestyle='dashed')
+
+
+def printDataset(dataset):
     header = dataset[0].keys()
-    row_format ="{:<30}" * (len(header) )
-    print row_format.format( *header).upper()
-    
-    for  row in dataset:
-        print row_format.format( *row)
+    row_format = "{:<30}" * (len(header))
+    print(row_format.format(*header).upper())
 
+    for row in dataset:
+        print(row_format.format(*row))
 
-def matplotlibLatexSetup( figureWidth, figureHeight ):
+
+def matplotlibLatexSetup(figureWidth, figureHeight):
     ieee = False
     if ieee:
-        params ={\
+        params = {
             'backend': 'GTKAgg',
-            
+
             'font.family': 'serif',
             'font.serif': ['Times', 'Palatino', 'New Century Schoolbook', 'Bookman', 'Computer Modern Roman'],
-            'font.sans-serif' : ['Helvetica', 'Avant Garde', 'Computer Modern Sans serif'],
-        #font.cursive       : Zapf Chancery
-        #font.monospace     : Courier, Computer Modern Typewriter
+            'font.sans-serif': ['Helvetica', 'Avant Garde', 'Computer Modern Sans serif'],
+            # font.cursive       : Zapf Chancery
+            # font.monospace     : Courier, Computer Modern Typewriter
             'text.usetex': True,
-            
+
             'axes.labelsize': 9,
             'axes.linewidth': .75,
-            
+
             'figure.figsize': (3.5, 2.5),
-            'figure.subplot.left' : 0.175,
+            'figure.subplot.left': 0.175,
             'figure.subplot.right': 0.95,
             'figure.subplot.bottom': 0.15,
             'figure.subplot.top': .95,
-            
-            'figure.dpi':150,
-            
+
+            'figure.dpi': 150,
+
             'text.fontsize': 9,
             'legend.fontsize': 8,
             'xtick.labelsize': 8,
             'ytick.labelsize': 8,
-            
-            'lines.linewidth':.75,
-            'savefig.dpi':600,
-            }
+
+            'lines.linewidth': .75,
+            'savefig.dpi': 600,
+        }
     else:
         # Input comes usually directly from latex, and looks like "124.0pt"
         # first the "pt" is removed, then it is converted to a floating point number
-        maxFigHeight = float( figureWidth .replace("pt","") )
-        maxFigWidth  = float( figureHeight.replace("pt","") )
-        
+        maxFigHeight = float(figureWidth.replace("pt", ""))
+        maxFigWidth = float(figureHeight.replace("pt", ""))
+
         # Convert pt to inch
-        inches_per_pt = 1.0/72.27               
-        maxFigHeight  *= inches_per_pt             
-        maxFigWidth   *= inches_per_pt
-        
-        golden_mean = (math.sqrt(5)-1.0)/2.0    # Aesthetic ratio
-        
+        inches_per_pt = 1.0 / 72.27
+        maxFigHeight *= inches_per_pt
+        maxFigWidth *= inches_per_pt
+
+        golden_mean = (math.sqrt(5) - 1.0) / 2.0  # Aesthetic ratio
+
         if maxFigWidth * golden_mean < maxFigHeight:
             height = maxFigWidth * golden_mean
             width = maxFigWidth
         else:
             height = maxFigHeight
             width = maxFigHeight / golden_mean
-        
-        fig_size =  [width,height]
+
+        fig_size = [width, height]
         params = {'backend': 'ps',
                   'font.family': 'serif',
                   'font.serif': ['Times', 'Palatino', 'New Century Schoolbook', 'Bookman', 'Computer Modern Roman'],
-                  'font.sans-serif' : ['Helvetica', 'Avant Garde', 'Computer Modern Sans serif'],
+                  'font.sans-serif': ['Helvetica', 'Avant Garde', 'Computer Modern Sans serif'],
                   'axes.labelsize': 10,
                   'text.fontsize': 10,
                   'legend.fontsize': 8,
@@ -282,116 +294,116 @@ def matplotlibLatexSetup( figureWidth, figureHeight ):
                   'ytick.labelsize': 8,
                   'text.usetex': True,
                   'figure.figsize': fig_size,
-                  'savefig.dpi':600,
-                  'antialiased': True,}
+                  'savefig.dpi': 600,
+                  'antialiased': True, }
 
     plt.rcParams.update(params)
-    plt.rc('lines', aa = True)
-####################################################################################################################################################
-############################             Option parsing                                              ###############################################
-####################################################################################################################################################
+    plt.rc('lines', aa=True)
+
+# Option parsing
 
 
 def bracketsAsExtraElements(list):
     """ Transforms the list [ '[abc', 'def]' ] to [ '[', 'abc', 'def', ']' ] """
     processedList = []
     for e in list:
-        splitted = re.split('(\[|\])', e)
-        while '' in splitted:  splitted.remove('') 
-        processedList.extend( splitted )
-        
+        # TODO: write better
+        splitted = re.split('(\[|\])', e)  # noqa: W605
+        while '' in splitted:
+            splitted.remove('')
+        processedList.extend(splitted)
+
     return processedList
 
 
-def parseList( list ):
-    list = bracketsAsExtraElements( list )
-    outerList = [] 
+def parseList(list):
+    list = bracketsAsExtraElements(list)
+    outerList = []
     subLists = []
-    
+
     curSubList = None
     for element in list:
-        
+
         if element == "[":
-            if not curSubList == None:
-                raise ValueError( "Two opening brackets" ) 
-            curSubList = []  
+            if curSubList is not None:
+                raise ValueError("Two opening brackets")
+            curSubList = []
         elif element == "]":
-            if curSubList == None:
+            if curSubList is None:
                 raise ValueError("Closing bracket without opening bracket")
-            subLists.append( curSubList )
+            subLists.append(curSubList)
             curSubList = None
         else:
-            if curSubList == None:
-                outerList.append( element )
+            if curSubList is None:
+                outerList.append(element)
             else:
-                curSubList.append( element )
-    
-    if not curSubList  == None:
-        raise ValueError( "Missing closing bracket")
-    
-    return ( outerList, subLists )
-    
-    
+                curSubList.append(element)
+
+    if curSubList is not None:
+        raise ValueError("Missing closing bracket")
+
+    return outerList, subLists
+
 
 outerParser = OptionParser()
-addCommonOptions( outerParser )
-addOuterOptions( outerParser )
+addCommonOptions(outerParser)
+addOuterOptions(outerParser)
 
 innerParser = OptionParser()
-addCommonOptions( innerParser )
-addInnerOptions( innerParser )
+addCommonOptions(innerParser)
+addInnerOptions(innerParser)
 
-(outerList,innerLists) = parseList( sys.argv )
+(outerList, innerLists) = parseList(sys.argv)
 
-
-(outerOptions, outerArgs) = outerParser.parse_args( outerList )
-outerWhereClause = whereClauseFromOptions( outerOptions )
+(outerOptions, outerArgs) = outerParser.parse_args(outerList)
+outerWhereClause = whereClauseFromOptions(outerOptions)
 
 # Matplotlib setup
 if outerOptions.save != "":
-    matplotlibLatexSetup( outerOptions.figureWidth, outerOptions.figureHeight )
+    matplotlibLatexSetup(outerOptions.figureWidth, outerOptions.figureHeight)
 
 legend = []
 for innerList in innerLists:
-    ( innerOptions,innerArgs ) = innerParser.parse_args( innerList )
-    
+    (innerOptions, innerArgs) = innerParser.parse_args(innerList)
+
     if innerOptions.ecm != "":
-        ecmModel.plot( innerOptions.ecm, not outerOptions.totalMLUPS, innerOptions.legend )
+        ecmModel.plot(innerOptions.ecm, not outerOptions.totalMLUPS, innerOptions.legend)
     else:
-        innerWhereClause = whereClauseFromOptions( innerOptions )
-        dataset = getListFromDatabase ( outerOptions.file, innerOptions.graph, outerWhereClause + innerWhereClause + " 1 ")
-        
+        innerWhereClause = whereClauseFromOptions(innerOptions)
+        dataset = getListFromDatabase(outerOptions.file, innerOptions.graph,
+                                      outerWhereClause + innerWhereClause + " 1 ")
+
         if len(dataset) == 0:
-            print "Empty"
+            print("Empty")
             continue
-        
-        if ( innerOptions.printDataset or outerOptions.printDataset ):
-            printDataset( dataset )
-        
-        
-        if innerOptions.legend == "" : innerOptions.legend = stringFromOptions( innerOptions )
-        numericPlot( dataset,  innerOptions.legend, not (outerOptions.totalMLUPS or innerOptions.totalMLUPS) )
+
+        if innerOptions.printDataset or outerOptions.printDataset:
+            printDataset(dataset)
+
+        if innerOptions.legend == "":
+            innerOptions.legend = stringFromOptions(innerOptions)
+        numericPlot(dataset, innerOptions.legend, not (outerOptions.totalMLUPS or innerOptions.totalMLUPS))
 
 # Title
 if outerOptions.title == "auto":
     outerOptions.title = stringFromOptions(outerOptions)
 
-if outerOptions.title  != "": 
-    plt.title( outerOptions.title ) 
-    
+if outerOptions.title != "":
+    plt.title(outerOptions.title)
+
 # Axis labels
-if outerOptions.xlabel != "": plt.xlabel( outerOptions.xlabel )
-if outerOptions.ylabel != "": plt.ylabel( outerOptions.ylabel )
+if outerOptions.xlabel != "":
+    plt.xlabel(outerOptions.xlabel)
+if outerOptions.ylabel != "":
+    plt.ylabel(outerOptions.ylabel)
 
 # Legend
 if outerOptions.legendPos != "":
-    plt.legend( loc = outerOptions.legendPos )
+    plt.legend(loc=outerOptions.legendPos)
 else:
-    plt.legend( loc = 'upper left' )
-
+    plt.legend(loc='upper left')
 
 if outerOptions.save != "":
-    plt.savefig( outerOptions.save + ".pdf" )
+    plt.savefig(outerOptions.save + ".pdf")
 else:
     plt.show()
-
diff --git a/apps/benchmarks/UniformGridGPU/UniformGridGPU.py b/apps/benchmarks/UniformGridGPU/UniformGridGPU.py
index 98eed9e8e..c1371278a 100644
--- a/apps/benchmarks/UniformGridGPU/UniformGridGPU.py
+++ b/apps/benchmarks/UniformGridGPU/UniformGridGPU.py
@@ -3,7 +3,7 @@ import numpy as np
 import pystencils as ps
 from lbmpy.creationfunctions import create_lb_method, create_lb_update_rule, create_lb_collision_rule
 from lbmpy.boundaries import NoSlip, UBB
-from lbmpy.fieldaccess import StreamPullTwoFieldsAccessor, StreamPushTwoFieldsAccessor
+from lbmpy.fieldaccess import StreamPullTwoFieldsAccessor
 from pystencils_walberla import generate_pack_info_from_kernel
 from lbmpy_walberla import generate_lattice_model, generate_boundary
 from pystencils_walberla import CodeGeneration, generate_sweep
@@ -45,7 +45,8 @@ options_dict = {
     'mrt_full': {
         'method': 'mrt',
         'stencil': 'D3Q19',
-        'relaxation_rates': [omega_fill[0], omega, omega_fill[1], omega_fill[2], omega_fill[3], omega_fill[4], omega_fill[5]],
+        'relaxation_rates': [omega_fill[0], omega, omega_fill[1], omega_fill[2],
+                             omega_fill[3], omega_fill[4], omega_fill[5]],
     },
     'entropic': {
         'method': 'mrt',
@@ -77,7 +78,7 @@ options_dict = {
 }
 
 info_header = """
-#include "stencil/D3Q{q}.h"\nusing Stencil_T = walberla::stencil::D3Q{q}; 
+#include "stencil/D3Q{q}.h"\nusing Stencil_T = walberla::stencil::D3Q{q};
 const char * infoStencil = "{stencil}";
 const char * infoConfigName = "{configName}";
 const bool infoCseGlobal = {cse_global};
@@ -87,7 +88,7 @@ const bool infoCsePdfs = {cse_pdfs};
 
 with CodeGeneration() as ctx:
     accessor = StreamPullTwoFieldsAccessor()
-    #accessor = StreamPushTwoFieldsAccessor()
+    # accessor = StreamPushTwoFieldsAccessor()
     assert not accessor.is_inplace, "This app does not work for inplace accessors"
 
     common_options = {
@@ -118,7 +119,7 @@ with CodeGeneration() as ctx:
         options['stencil'] = 'D3Q27'
 
     stencil_str = options['stencil']
-    q = int(stencil_str[stencil_str.find('Q')+1:])
+    q = int(stencil_str[stencil_str.find('Q') + 1:])
     pdfs, velocity_field = ps.fields("pdfs({q}), velocity(3) : double[3D]".format(q=q), layout='fzyx')
     options['optimization']['symbolic_field'] = pdfs
 
@@ -143,7 +144,8 @@ with CodeGeneration() as ctx:
     # CPU lattice model - required for macroscopic value computation, VTK output etc.
     options_without_opt = options.copy()
     del options_without_opt['optimization']
-    generate_lattice_model(ctx, 'UniformGridGPU_LatticeModel', create_lb_collision_rule(lb_method=lb_method, **options_without_opt))
+    generate_lattice_model(ctx, 'UniformGridGPU_LatticeModel', create_lb_collision_rule(lb_method=lb_method,
+                                                                                        **options_without_opt))
 
     # gpu LB sweep & boundaries
     generate_sweep(ctx, 'UniformGridGPU_LbKernel', update_rule,
@@ -158,7 +160,7 @@ with CodeGeneration() as ctx:
     setter_assignments = macroscopic_values_setter(lb_method, velocity=velocity_field.center_vector,
                                                    pdfs=pdfs.center_vector, density=1)
     getter_assignments = macroscopic_values_getter(lb_method, velocity=velocity_field.center_vector,
-                                                   pdfs=pdfs.center_vector,  density=None)
+                                                   pdfs=pdfs.center_vector, density=None)
     generate_sweep(ctx, 'UniformGridGPU_MacroSetter', setter_assignments)
     generate_sweep(ctx, 'UniformGridGPU_MacroGetter', getter_assignments)
 
diff --git a/apps/benchmarks/UniformGridGPU/UniformGridGPU_AA.py b/apps/benchmarks/UniformGridGPU/UniformGridGPU_AA.py
index 64ccfa6ac..1fb6261d3 100644
--- a/apps/benchmarks/UniformGridGPU/UniformGridGPU_AA.py
+++ b/apps/benchmarks/UniformGridGPU/UniformGridGPU_AA.py
@@ -56,7 +56,7 @@ options_dict = {
 
 
 info_header = """
-#include "stencil/D3Q{q}.h"\nusing Stencil_T = walberla::stencil::D3Q{q}; 
+#include "stencil/D3Q{q}.h"\nusing Stencil_T = walberla::stencil::D3Q{q};
 const char * infoStencil = "{stencil}";
 const char * infoConfigName = "{configName}";
 const bool infoCseGlobal = {cse_global};
@@ -113,8 +113,10 @@ with CodeGeneration() as ctx:
     generate_sweep(ctx, 'UniformGridGPU_AA_MacroGetter', getter_assignments)
 
     # communication
-    generate_pack_info_from_kernel(ctx, 'UniformGridGPU_AA_PackInfoPull', update_rules['Odd'], kind='pull', target='gpu')
-    generate_pack_info_from_kernel(ctx, 'UniformGridGPU_AA_PackInfoPush', update_rules['Odd'], kind='push', target='gpu')
+    generate_pack_info_from_kernel(ctx, 'UniformGridGPU_AA_PackInfoPull', update_rules['Odd'],
+                                   kind='pull', target='gpu')
+    generate_pack_info_from_kernel(ctx, 'UniformGridGPU_AA_PackInfoPush', update_rules['Odd'],
+                                   kind='push', target='gpu')
 
     infoHeaderParams = {
         'stencil': stencil_str,
diff --git a/apps/benchmarks/UniformGridGPU/simulation_setup/benchmark_configs.py b/apps/benchmarks/UniformGridGPU/simulation_setup/benchmark_configs.py
index ca66b364c..5fbf36a94 100755
--- a/apps/benchmarks/UniformGridGPU/simulation_setup/benchmark_configs.py
+++ b/apps/benchmarks/UniformGridGPU/simulation_setup/benchmark_configs.py
@@ -9,8 +9,8 @@ Look at the end of the file to select the benchmark to run
 
 import os
 import waLBerla as wlb
-from waLBerla.tools.config import block_decomposition, toPrm
-from waLBerla.tools.sqlitedb import *
+from waLBerla.tools.config import block_decomposition
+from waLBerla.tools.sqlitedb import sequenceValuesToScalars, checkAndUpdateSchema, storeSingle
 from copy import deepcopy
 import sys
 import sqlite3
@@ -53,7 +53,7 @@ class Scenario:
         from pprint import pformat
         wlb.log_info_on_root("Scenario:\n" + pformat(self.config_dict))
         # Write out the configuration as text-based prm:
-        #print(toPrm(self.config_dict))
+        # print(toPrm(self.config_dict))
         return self.config_dict
 
     @wlb.member_callback
@@ -71,16 +71,17 @@ class Scenario:
         result = data
         sequenceValuesToScalars(result)
         num_tries = 4
-        for num_try in range(num_tries):  # check multiple times e.g. may fail when multiple benchmark processes are running
+        # check multiple times e.g. may fail when multiple benchmark processes are running
+        for num_try in range(num_tries):
             try:
                 checkAndUpdateSchema(result, "runs", DB_FILE)
                 storeSingle(result, "runs", DB_FILE)
                 break
             except sqlite3.OperationalError as e:
-                wlb.log_warning("Sqlite DB writing failed: try {}/{}  {}".format(num_try+1, num_tries, str(e)))
+                wlb.log_warning("Sqlite DB writing failed: try {}/{}  {}".format(num_try + 1, num_tries, str(e)))
 
 
-# -------------------------------------- Functions trying different parameter sets -------------------------------------------------------------------
+# -------------------------------------- Functions trying different parameter sets -----------------------------------
 
 
 def overlap_benchmark():
@@ -93,9 +94,11 @@ def overlap_benchmark():
                           (4, 4, 1), (8, 8, 1), (16, 16, 1), (32, 32, 1),
                           (4, 4, 4), (8, 8, 8), (16, 16, 16), (32, 32, 32)]
 
-    for comm_strategy in ['UniformGPUScheme_Baseline', 'UniformGPUScheme_Memcpy']:  # 'GPUPackInfo_Baseline', 'GPUPackInfo_Streams'
+    # 'GPUPackInfo_Baseline', 'GPUPackInfo_Streams'
+    for comm_strategy in ['UniformGPUScheme_Baseline', 'UniformGPUScheme_Memcpy']:
         # no overlap
-        scenarios.add(Scenario(timeStepStrategy='noOverlap', communicationScheme=comm_strategy, innerOuterSplit=(1, 1, 1)))
+        scenarios.add(Scenario(timeStepStrategy='noOverlap', communicationScheme=comm_strategy,
+                               innerOuterSplit=(1, 1, 1)))
 
         # overlap
         for overlap_strategy in ['simpleOverlap', 'complexOverlap']:
@@ -123,7 +126,8 @@ def communication_compare():
                           timesteps=num_time_steps(block_size))
             scenarios.add(sc)
             for inner_outer_split in [(4, 1, 1), (8, 1, 1), (16, 1, 1), (32, 1, 1)]:
-                if 3 * inner_outer_split[0] > block_size[0]:  # ensure that the inner part of the domain is still large enough
+                # ensure that the inner part of the domain is still large enough
+                if 3 * inner_outer_split[0] > block_size[0]:
                     continue
                 sc = Scenario(cells_per_block=block_size,
                               gpuBlockSize=(128, 1, 1),
@@ -155,7 +159,7 @@ def single_gpu_benchmark():
             scenarios.add(scenario)
 
 
-# -------------------------------------- Optional job script generation for PizDaint -------------------------------------------------------------------
+# -------------------------------------- Optional job script generation for PizDaint ---------------------------------
 
 
 job_script_header = """
@@ -223,7 +227,8 @@ def generate_jobscripts(exe_names=all_executables):
 
         job_script = job_script_header.format(nodes=node_count, folder=os.path.join(os.getcwd(), folder_name))
         for exe in exe_names:
-            job_script += job_script_exe_part.format(app="../" + exe, nodes=node_count, config='../communication_compare.py')
+            job_script += job_script_exe_part.format(app="../" + exe, nodes=node_count,
+                                                     config='../communication_compare.py')
 
         with open(os.path.join(folder_name, 'job.sh'), 'w') as f:
             f.write(job_script)
@@ -235,6 +240,8 @@ if __name__ == '__main__':
 else:
     wlb.log_info_on_root("Batch run of benchmark scenarios, saving result to {}".format(DB_FILE))
     # Select the benchmark you want to run
-    single_gpu_benchmark()  # benchmarks different CUDA block sizes and domain sizes and measures single GPU performance of compute kernel (no communication)
-    #communication_compare()  # benchmarks different communication routines, with and without overlap
-    #overlap_benchmark()      # benchmarks different communication overlap options
+    single_gpu_benchmark()
+    # benchmarks different CUDA block sizes and domain sizes and measures single
+    # GPU performance of compute kernel (no communication)
+    # communication_compare(): benchmarks different communication routines, with and without overlap
+    # overlap_benchmark(): benchmarks different communication overlap options
diff --git a/apps/benchmarks/UniformGridGenerated/UniformGridGenerated.py b/apps/benchmarks/UniformGridGenerated/UniformGridGenerated.py
index 6a4d6c452..b9ca24ba1 100644
--- a/apps/benchmarks/UniformGridGenerated/UniformGridGenerated.py
+++ b/apps/benchmarks/UniformGridGenerated/UniformGridGenerated.py
@@ -1,7 +1,8 @@
 import sympy as sp
 import pystencils as ps
 from lbmpy.creationfunctions import create_lb_update_rule, create_lb_collision_rule
-from pystencils_walberla import CodeGeneration, generate_pack_info_from_kernel, generate_sweep, generate_mpidtype_info_from_kernel
+from pystencils_walberla import CodeGeneration, generate_pack_info_from_kernel, generate_sweep,\
+    generate_mpidtype_info_from_kernel
 from lbmpy.macroscopic_value_kernels import macroscopic_values_getter, macroscopic_values_setter
 from lbmpy.fieldaccess import AAEvenTimeStepAccessor, AAOddTimeStepAccessor
 
@@ -30,7 +31,8 @@ options_dict = {
     'mrt_full': {
         'method': 'mrt',
         'stencil': 'D3Q19',
-        'relaxation_rates': [omega_fill[0], omega, omega_fill[1], omega_fill[2], omega_fill[3], omega_fill[4], omega_fill[5]],
+        'relaxation_rates': [omega_fill[0], omega, omega_fill[1], omega_fill[2],
+                             omega_fill[3], omega_fill[4], omega_fill[5]],
     },
     'entropic': {
         'method': 'mrt',
@@ -62,7 +64,7 @@ options_dict = {
 }
 
 info_header = """
-#include "stencil/D3Q{q}.h"\nusing Stencil_T = walberla::stencil::D3Q{q}; 
+#include "stencil/D3Q{q}.h"\nusing Stencil_T = walberla::stencil::D3Q{q};
 const char * infoStencil = "{stencil}";
 const char * infoConfigName = "{configName}";
 const char * optimizationDict = "{optimizationDict}";
@@ -108,7 +110,7 @@ with CodeGeneration() as ctx:
         options['stencil'] = 'D3Q27'
 
     stencil_str = options['stencil']
-    q = int(stencil_str[stencil_str.find('Q')+1:])
+    q = int(stencil_str[stencil_str.find('Q') + 1:])
     pdfs, velocity_field = ps.fields("pdfs({q}), velocity(3) : double[3D]".format(q=q), layout='fzyx')
 
     update_rule_two_field = create_lb_update_rule(optimization={'symbolic_field': pdfs,
@@ -128,12 +130,16 @@ with CodeGeneration() as ctx:
             ((0, 0, 1), UBB([0.05, 0, 0])),
             ((0, 0, -1), NoSlip()),
         ))
-        cr_even = create_lb_collision_rule(stencil="D3Q19", compressible=False, optimization={'cse_global': opts['aa_even_cse_global'],
-                                                                                              'cse_pdfs': opts['aa_even_cse_pdfs']})
-        cr_odd = create_lb_collision_rule(stencil="D3Q19", compressible=False, optimization={'cse_global': opts['aa_odd_cse_global'],
-                                                                                             'cse_pdfs': opts['aa_odd_cse_pdfs']})
-        update_rule_aa_even = update_rule_with_push_boundaries(cr_even, pdfs, boundaries, AAEvenTimeStepAccessor, AAOddTimeStepAccessor.read)
-        update_rule_aa_odd = update_rule_with_push_boundaries(cr_odd, pdfs, boundaries, AAOddTimeStepAccessor, AAEvenTimeStepAccessor.read)
+        cr_even = create_lb_collision_rule(stencil="D3Q19", compressible=False,
+                                           optimization={'cse_global': opts['aa_even_cse_global'],
+                                                         'cse_pdfs': opts['aa_even_cse_pdfs']})
+        cr_odd = create_lb_collision_rule(stencil="D3Q19", compressible=False,
+                                          optimization={'cse_global': opts['aa_odd_cse_global'],
+                                                        'cse_pdfs': opts['aa_odd_cse_pdfs']})
+        update_rule_aa_even = update_rule_with_push_boundaries(cr_even, pdfs, boundaries,
+                                                               AAEvenTimeStepAccessor, AAOddTimeStepAccessor.read)
+        update_rule_aa_odd = update_rule_with_push_boundaries(cr_odd, pdfs, boundaries,
+                                                              AAOddTimeStepAccessor, AAEvenTimeStepAccessor.read)
     else:
         update_rule_aa_even = create_lb_update_rule(kernel_type=AAEvenTimeStepAccessor(),
                                                     optimization={'symbolic_field': pdfs,
@@ -146,7 +152,7 @@ with CodeGeneration() as ctx:
                                                                  'cse_global': opts['aa_odd_cse_global'],
                                                                  'cse_pdfs': opts['aa_odd_cse_pdfs']}, **options)
 
-    vec = { 'assume_aligned': True, 'assume_inner_stride_one': True}
+    vec = {'assume_aligned': True, 'assume_inner_stride_one': True}
 
     # check if openmp is enabled in cmake
     if ctx.openmp:
diff --git a/apps/benchmarks/UniformGridGenerated/params.py b/apps/benchmarks/UniformGridGenerated/params.py
index 724238f4b..598855b63 100644
--- a/apps/benchmarks/UniformGridGenerated/params.py
+++ b/apps/benchmarks/UniformGridGenerated/params.py
@@ -2,7 +2,7 @@ import math
 import os
 import operator
 import waLBerla as wlb
-from waLBerla.tools.sqlitedb import *
+from waLBerla.tools.sqlitedb import sequenceValuesToScalars, checkAndUpdateSchema, storeSingle
 from waLBerla.tools.config import block_decomposition
 from functools import reduce
 import sqlite3
@@ -137,18 +137,19 @@ def single_node_benchmark():
                             continue
                         scenarios.add(sc)
                 else:
-                        sc = BenchmarkScenario(block_size=block_size, direct_comm=direct_comm,
-                                               domain_decomposition_func=domain_decomposition_func_z,
-                                               time_step_mode=time_step_mode)
-                        if not block_size_ok(sc):
-                            continue
-                        scenarios.add(sc)
+                    sc = BenchmarkScenario(block_size=block_size, direct_comm=direct_comm,
+                                           domain_decomposition_func=domain_decomposition_func_z,
+                                           time_step_mode=time_step_mode)
+                    if not block_size_ok(sc):
+                        continue
+                        # scenarios.add(sc)
 
 
 def single_node_benchmark_small():
     scenarios = wlb.ScenarioManager()
     for block_size in [(128, 128, 128), (128, 64, 64), (64, 64, 128), (64, 128, 64), (64, 64, 64),
-                       (1024, 64, 32), (2048, 64, 16), (64, 32, 32), (32, 32, 32), (16, 16, 16), (256, 128, 64), (512, 128, 32)]:
+                       (1024, 64, 32), (2048, 64, 16), (64, 32, 32), (32, 32, 32), (16, 16, 16),
+                       (256, 128, 64), (512, 128, 32)]:
         for direct_comm in (True, False):
             for time_step_mode in ['aa', 'aaKernelOnly', 'twoField']:
                 sc = BenchmarkScenario(block_size=block_size, direct_comm=direct_comm, time_step_mode=time_step_mode)
@@ -160,7 +161,8 @@ def single_node_benchmark_small():
 def weak_scaling():
     scenarios = wlb.ScenarioManager()
     for block_size in [(128, 128, 128), (128, 64, 64), (64, 64, 128), (64, 128, 64), (64, 64, 64),
-                       (1024, 64, 32), (2048, 64, 16), (64, 32, 32), (32, 32, 32), (16, 16, 16), (256, 128, 64), (512, 128, 32)]:
+                       (1024, 64, 32), (2048, 64, 16), (64, 32, 32), (32, 32, 32), (16, 16, 16),
+                       (256, 128, 64), (512, 128, 32)]:
         for direct_comm in (True, False):
             sc = BenchmarkScenario(block_size=block_size, direct_comm=direct_comm,
                                    domain_decomposition_func=domain_decomposition_func_full)
@@ -168,4 +170,5 @@ def weak_scaling():
                 continue
             scenarios.add(sc)
 
+
 single_node_benchmark()
diff --git a/apps/pythonmodule/setup.py b/apps/pythonmodule/setup.py
index 74532db4f..5929b0fb9 100644
--- a/apps/pythonmodule/setup.py
+++ b/apps/pythonmodule/setup.py
@@ -5,37 +5,36 @@ import platform
 import sys
 
 # The following variables are configure by CMake
-walberla_source_dir  = "${walberla_SOURCE_DIR}"
+walberla_source_dir = "${walberla_SOURCE_DIR}"
 walberla_binary_dir = "${CMAKE_CURRENT_BINARY_DIR}"
 
 if platform.system() == 'Windows':
-     extension = ( 'dll', 'pyd' )
-     configuration = 'Release'
+    extension = ('dll', 'pyd')
+    configuration = 'Release'
 else:
-     extension = ( 'so', 'so' )
-     configuration = ''
-     
+    extension = ('so', 'so')
+    configuration = ''
+
 
 def collectFiles():
-    src_shared_lib = join(walberla_binary_dir, configuration, 'walberla_cpp.' + extension[0] )
-    dst_shared_lib = join(walberla_binary_dir, 'waLBerla', 'walberla_cpp.' + extension[1] )
+    src_shared_lib = join(walberla_binary_dir, configuration, 'walberla_cpp.' + extension[0])
+    dst_shared_lib = join(walberla_binary_dir, 'waLBerla', 'walberla_cpp.' + extension[1])
     # copy everything inplace
-    
-    print( src_shared_lib )
-    
-    if not exists( src_shared_lib ):
+
+    print(src_shared_lib)
+
+    if not exists(src_shared_lib):
         print("Python Module was not built yet - run 'make walberla_cpp'")
         exit(1)
 
-    
+    shutil.rmtree(join(walberla_binary_dir, 'waLBerla'), ignore_errors=True)
 
-    shutil.rmtree( join(walberla_binary_dir, 'waLBerla'), ignore_errors=True )
+    shutil.copytree(join(walberla_source_dir, 'python', 'waLBerla'),
+                    join(walberla_binary_dir, 'waLBerla'))
 
-    shutil.copytree( join(walberla_source_dir, 'python', 'waLBerla'),
-                     join(walberla_binary_dir, 'waLBerla') )
+    shutil.copy(src_shared_lib,
+                dst_shared_lib)
 
-    shutil.copy( src_shared_lib,
-                 dst_shared_lib )
 
 packages = ['waLBerla',
             'waLBerla.evaluation',
@@ -48,13 +47,13 @@ packages = ['waLBerla',
 
 collectFiles()
 
-setup( name='waLBerla',
-       version='1.0',
-       author='Martin Bauer',
-       author_email='martin.bauer@fau.de',
-       url='http://www.walberla.net',
-       packages=packages,
-       package_data = {'' : ['walberla_cpp.' + extension[1]] }
+setup(name='waLBerla',
+      version='1.0',
+      author='Martin Bauer',
+      author_email='martin.bauer@fau.de',
+      url='http://www.walberla.net',
+      packages=packages,
+      package_data={'': ['walberla_cpp.' + extension[1]]}
       )
 
 if sys.argv[1] == 'build':
diff --git a/apps/pythonmodule/stage.py b/apps/pythonmodule/stage.py
index 50cfd4362..2072a7e86 100644
--- a/apps/pythonmodule/stage.py
+++ b/apps/pythonmodule/stage.py
@@ -1,3 +1,2 @@
-
 if __name__ == "__main__":
-    install()
\ No newline at end of file
+    install()  # noqa: F821
diff --git a/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_RTI_3D.py b/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_RTI_3D.py
index 1050b08e9..54f656cf9 100755
--- a/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_RTI_3D.py
+++ b/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_RTI_3D.py
@@ -89,17 +89,17 @@ class Scenario:
             spike_data = wlb.field.gather(blocks, 'phase', makeSlice[self.size[0] // 2, :, self.size[2] // 2])
             if spike_data:
                 spike_field = np.asarray(spike_data.buffer()).squeeze()
-                location_of_spike = (np.argmax(spike_field > 0.5) - ny//2)/l0
+                location_of_spike = (np.argmax(spike_field > 0.5) - ny // 2) / l0
 
             bubble_data = wlb.field.gather(blocks, 'phase', makeSlice[0, :, 0])
             if bubble_data:
                 bubble_field = np.asarray(bubble_data.buffer()).squeeze()
-                location_of_bubble = (np.argmax(bubble_field > 0.5) - ny//2)/l0
+                location_of_bubble = (np.argmax(bubble_field > 0.5) - ny // 2) / l0
 
             saddle_data = wlb.field.gather(blocks, 'phase', makeSlice[0, :, self.size[2] // 2])
             if saddle_data:
                 saddle_field = np.asarray(saddle_data.buffer()).squeeze()
-                location_of_saddle = (np.argmax(saddle_field > 0.5) - ny//2)/l0
+                location_of_saddle = (np.argmax(saddle_field > 0.5) - ny // 2) / l0
 
             phase = wlb.field.gather(blocks, 'phase', makeSlice[:, :, :])
             if phase:
diff --git a/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_codegen.py b/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_codegen.py
index 9010cdcf8..42e990dc3 100644
--- a/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_codegen.py
+++ b/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_codegen.py
@@ -1,4 +1,4 @@
-from pystencils import fields, TypedSymbol
+from pystencils import fields
 from pystencils.simp import sympy_cse
 from pystencils import AssignmentCollection
 
@@ -6,7 +6,7 @@ from lbmpy.boundaries import NoSlip
 from lbmpy.creationfunctions import create_lb_method, create_lb_update_rule
 from lbmpy.stencils import get_stencil
 
-from pystencils_walberla import CodeGeneration, generate_sweep, generate_pack_info_from_kernel
+from pystencils_walberla import CodeGeneration, generate_sweep
 from lbmpy_walberla import generate_boundary
 
 from lbmpy.phasefield_allen_cahn.kernel_equations import initializer_kernel_phase_field_lb, \
@@ -155,7 +155,7 @@ vp = [('int32_t', 'cudaBlockSize0'),
       ('int32_t', 'cudaBlockSize1')]
 
 info_header = f"""
-#include "stencil/D3Q{q_phase}.h"\nusing Stencil_phase_T = walberla::stencil::D3Q{q_phase}; 
+#include "stencil/D3Q{q_phase}.h"\nusing Stencil_phase_T = walberla::stencil::D3Q{q_phase};
 #include "stencil/D3Q{q_hydro}.h"\nusing Stencil_hydro_T = walberla::stencil::D3Q{q_hydro};
 """
 
diff --git a/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_rising_bubble.py b/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_rising_bubble.py
index e1ca7bdda..9a8948868 100755
--- a/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_rising_bubble.py
+++ b/apps/showcases/PhaseFieldAllenCahn/CPU/multiphase_rising_bubble.py
@@ -5,6 +5,7 @@ import numpy as np
 from waLBerla.core_extension import makeSlice
 from lbmpy.phasefield_allen_cahn.parameter_calculation import calculate_dimensionless_rising_bubble
 
+
 class Scenario:
     def __init__(self):
         # output frequencies
diff --git a/apps/showcases/PhaseFieldAllenCahn/GPU/multiphase_RTI_3D.py b/apps/showcases/PhaseFieldAllenCahn/GPU/multiphase_RTI_3D.py
index a7c5ed1c4..d862b1a48 100755
--- a/apps/showcases/PhaseFieldAllenCahn/GPU/multiphase_RTI_3D.py
+++ b/apps/showcases/PhaseFieldAllenCahn/GPU/multiphase_RTI_3D.py
@@ -89,17 +89,17 @@ class Scenario:
             spike_data = wlb.field.gather(blocks, 'phase', makeSlice[self.size[0] // 2, :, self.size[2] // 2])
             if spike_data:
                 spike_field = np.asarray(spike_data.buffer()).squeeze()
-                location_of_spike = (np.argmax(spike_field > 0.5) - ny//2)/l0
+                location_of_spike = (np.argmax(spike_field > 0.5) - ny // 2) / l0
 
             bubble_data = wlb.field.gather(blocks, 'phase', makeSlice[0, :, 0])
             if bubble_data:
                 bubble_field = np.asarray(bubble_data.buffer()).squeeze()
-                location_of_bubble = (np.argmax(bubble_field > 0.5) - ny//2)/l0
+                location_of_bubble = (np.argmax(bubble_field > 0.5) - ny // 2) / l0
 
             saddle_data = wlb.field.gather(blocks, 'phase', makeSlice[0, :, self.size[2] // 2])
             if saddle_data:
                 saddle_field = np.asarray(saddle_data.buffer()).squeeze()
-                location_of_saddle = (np.argmax(saddle_field > 0.5) - ny//2)/l0
+                location_of_saddle = (np.argmax(saddle_field > 0.5) - ny // 2) / l0
 
             phase = wlb.field.gather(blocks, 'phase', makeSlice[:, :, :])
             if phase:
diff --git a/apps/showcases/PhaseFieldAllenCahn/GPU/multiphase_codegen.py b/apps/showcases/PhaseFieldAllenCahn/GPU/multiphase_codegen.py
index b98348ab4..16bdcfb5a 100644
--- a/apps/showcases/PhaseFieldAllenCahn/GPU/multiphase_codegen.py
+++ b/apps/showcases/PhaseFieldAllenCahn/GPU/multiphase_codegen.py
@@ -9,9 +9,8 @@ from lbmpy.stencils import get_stencil
 from pystencils_walberla import CodeGeneration, generate_sweep, generate_pack_info_from_kernel
 from lbmpy_walberla import generate_boundary
 
-from lbmpy.phasefield_allen_cahn.kernel_equations import initializer_kernel_phase_field_lb, \
-     initializer_kernel_hydro_lb, interface_tracking_force, \
-     hydrodynamic_force, get_collision_assignments_hydro
+from lbmpy.phasefield_allen_cahn.kernel_equations import initializer_kernel_phase_field_lb,\
+    initializer_kernel_hydro_lb, interface_tracking_force, hydrodynamic_force, get_collision_assignments_hydro
 
 from lbmpy.phasefield_allen_cahn.force_model import MultiphaseForceModel
 
@@ -159,7 +158,7 @@ sweep_block_size = (TypedSymbol("cudaBlockSize0", np.int32),
 sweep_params = {'block_size': sweep_block_size}
 
 info_header = f"""
-#include "stencil/D3Q{q_phase}.h"\nusing Stencil_phase_T = walberla::stencil::D3Q{q_phase}; 
+#include "stencil/D3Q{q_phase}.h"\nusing Stencil_phase_T = walberla::stencil::D3Q{q_phase};
 #include "stencil/D3Q{q_hydro}.h"\nusing Stencil_hydro_T = walberla::stencil::D3Q{q_hydro};
 """
 
-- 
GitLab