diff --git a/__init__.py b/__init__.py
index 4d4dacb55c0f236e360656afbd7b6b3abcd16733..b81bc4fe4cad9f5b7dc039ba79c6df0cfd05b00b 100644
--- a/__init__.py
+++ b/__init__.py
@@ -1,16 +1,16 @@
-from pystencils.field import Field, FieldType, extractCommonSubexpressions
+from pystencils.field import Field, FieldType
 from pystencils.data_types import TypedSymbol
 from pystencils.slicing import makeSlice
-from pystencils.kernelcreation import createKernel, createIndexedKernel
+from pystencils.kernelcreation import create_kernel, create_indexed_kernel
 from pystencils.display_utils import show_code, to_dot
 from pystencils.assignment_collection import AssignmentCollection
 from pystencils.assignment import Assignment
 from pystencils.sympyextensions import SymbolCreator
 
-__all__ = ['Field', 'FieldType', 'extractCommonSubexpressions',
+__all__ = ['Field', 'FieldType',
            'TypedSymbol',
            'makeSlice',
-           'createKernel', 'createIndexedKernel',
+           'create_kernel', 'create_indexed_kernel',
            'show_code', 'to_dot',
            'AssignmentCollection',
            'Assignment',
diff --git a/alignedarray.py b/alignedarray.py
index 687ebf9f737f8d2b5921e000da1ca07e85564bc6..46d4dbacfa7865e7f5af8f971b6b645ce81a3186 100644
--- a/alignedarray.py
+++ b/alignedarray.py
@@ -1,45 +1,45 @@
 import numpy as np
 
 
-def aligned_empty(shape, byteAlignment=32, dtype=np.float64, byteOffset=0, order='C', alignInnerCoordinate=True):
+def aligned_empty(shape, byte_alignment=32, dtype=np.float64, byte_offset=0, order='C', align_inner_coordinate=True):
     """
     Creates an aligned empty numpy array
     :param shape: size of the array
-    :param byteAlignment: alignment in bytes, for the start address of the array holds (a % byteAlignment) == 0
+    :param byte_alignment: alignment in bytes, for the start address of the array holds (a % byteAlignment) == 0
     :param dtype: numpy data type
-    :param byteOffset: offset in bytes for position that should be aligned i.e. (a+byteOffset) % byteAlignment == 0
+    :param byte_offset: offset in bytes for position that should be aligned i.e. (a+byte_offset) % byteAlignment == 0
                        typically used to align first inner cell instead of ghost layer
     :param order: storage linearization order
-    :param alignInnerCoordinate: if True, the start of the innermost coordinate lines are aligned as well
+    :param align_inner_coordinate: if True, the start of the innermost coordinate lines are aligned as well
     :return:
     """
-    if (not alignInnerCoordinate) or (not hasattr(shape, '__len__')):
-        N = np.prod(shape)
+    if (not align_inner_coordinate) or (not hasattr(shape, '__len__')):
+        size = np.prod(shape)
         d = np.dtype(dtype)
-        tmp = np.empty(N * d.itemsize + byteAlignment, dtype=np.uint8)
+        tmp = np.empty(size * d.itemsize + byte_alignment, dtype=np.uint8)
         address = tmp.__array_interface__['data'][0]
-        offset = (byteAlignment - (address + byteOffset) % byteAlignment) % byteAlignment
-        t1 = tmp[offset:offset + N * d.itemsize]
-        return tmp[offset:offset + N * d.itemsize].view(dtype=d).reshape(shape, order=order)
+        offset = (byte_alignment - (address + byte_offset) % byte_alignment) % byte_alignment
+        return tmp[offset:offset + size * d.itemsize].view(dtype=d).reshape(shape, order=order)
     else:
         if order == 'C':
-            ndim0 = shape[-1]
+            dim0_size = shape[-1]
             dim0 = -1
-            ndim1 = np.prod(shape[:-1])
+            dim1_size = np.prod(shape[:-1])
         else:
-            ndim0 = shape[0]
+            dim0_size = shape[0]
             dim0 = 0
-            ndim1 = np.prod(shape[1:])
+            dim1_size = np.prod(shape[1:])
         d = np.dtype(dtype)
 
-        assert byteAlignment >= d.itemsize and byteAlignment % d.itemsize == 0
-        padding = (byteAlignment - ((ndim0 * d.itemsize) % byteAlignment)) % byteAlignment
+        assert byte_alignment >= d.itemsize and byte_alignment % d.itemsize == 0
+        padding = (byte_alignment - ((dim0_size * d.itemsize) % byte_alignment)) % byte_alignment
 
-        N = ndim1 * padding + np.prod(shape) * d.itemsize
-        tmp = aligned_empty(N, byteAlignment=byteAlignment, dtype=np.uint8, byteOffset=byteOffset).view(dtype=dtype)
-        bshape = [i for i in shape]
-        bshape[dim0] = ndim0 + padding // d.itemsize
-        tmp = tmp.reshape(bshape, order=order)
+        size = dim1_size * padding + np.prod(shape) * d.itemsize
+        tmp = aligned_empty(size, byte_alignment=byte_alignment, dtype=np.uint8, byte_offset=byte_offset)
+        tmp = tmp.view(dtype=dtype)
+        shape_in_bytes = [i for i in shape]
+        shape_in_bytes[dim0] = dim0_size + padding // d.itemsize
+        tmp = tmp.reshape(shape_in_bytes, order=order)
         if tmp.flags['C_CONTIGUOUS']:
             tmp = tmp[..., :shape[-1]]
         else:
@@ -48,17 +48,17 @@ def aligned_empty(shape, byteAlignment=32, dtype=np.float64, byteOffset=0, order
         return tmp
 
 
-def aligned_zeros(shape, byteAlignment=16, dtype=float, byteOffset=0, order='C', alignInnerCoordinate=True):
-    arr = aligned_empty(shape, dtype=dtype, byteOffset=byteOffset,
-                        order=order, byteAlignment=byteAlignment, alignInnerCoordinate=alignInnerCoordinate)
+def aligned_zeros(shape, byte_alignment=16, dtype=float, byte_offset=0, order='C', align_inner_coordinate=True):
+    arr = aligned_empty(shape, dtype=dtype, byte_offset=byte_offset,
+                        order=order, byte_alignment=byte_alignment, align_inner_coordinate=align_inner_coordinate)
     x = np.zeros((), arr.dtype)
     arr[...] = x
     return arr
 
 
-def aligned_ones(shape, byteAlignment=16, dtype=float, byteOffset=0, order='C', alignInnerCoordinate=True):
-    arr = aligned_empty(shape, dtype=dtype, byteOffset=byteOffset,
-                        order=order, byteAlignment=byteAlignment, alignInnerCoordinate=alignInnerCoordinate)
+def aligned_ones(shape, byte_alignment=16, dtype=float, byte_offset=0, order='C', align_inner_coordinate=True):
+    arr = aligned_empty(shape, dtype=dtype, byte_offset=byte_offset,
+                        order=order, byte_alignment=byte_alignment, align_inner_coordinate=align_inner_coordinate)
     x = np.ones((), arr.dtype)
     arr[...] = x
     return arr
diff --git a/astnodes.py b/astnodes.py
index 0a10b8645ba3699fded7e424496e3db61a1e2fd2..bbc12505e2a04856d2b6de8911a6e0c0ea5d43df 100644
--- a/astnodes.py
+++ b/astnodes.py
@@ -1,7 +1,7 @@
 import sympy as sp
 from sympy.tensor import IndexedBase
 from pystencils.field import Field
-from pystencils.data_types import TypedSymbol, create_type, castFunc
+from pystencils.data_types import TypedSymbol, create_type, cast_func
 from pystencils.sympyextensions import fast_subs
 from typing import List, Set, Optional, Union, Any
 
@@ -113,34 +113,34 @@ class KernelFunction(Node):
 
     class Argument:
         def __init__(self, name, dtype, symbol, kernel_function_node):
-            from pystencils.transformations import symbolNameToVariableName
+            from pystencils.transformations import symbol_name_to_variable_name
             self.name = name
             self.dtype = dtype
             self.isFieldPtrArgument = False
             self.isFieldShapeArgument = False
             self.isFieldStrideArgument = False
             self.isFieldArgument = False
-            self.fieldName = ""
+            self.field_name = ""
             self.coordinate = None
             self.symbol = symbol
 
             if name.startswith(Field.DATA_PREFIX):
                 self.isFieldPtrArgument = True
                 self.isFieldArgument = True
-                self.fieldName = name[len(Field.DATA_PREFIX):]
+                self.field_name = name[len(Field.DATA_PREFIX):]
             elif name.startswith(Field.SHAPE_PREFIX):
                 self.isFieldShapeArgument = True
                 self.isFieldArgument = True
-                self.fieldName = name[len(Field.SHAPE_PREFIX):]
+                self.field_name = name[len(Field.SHAPE_PREFIX):]
             elif name.startswith(Field.STRIDE_PREFIX):
                 self.isFieldStrideArgument = True
                 self.isFieldArgument = True
-                self.fieldName = name[len(Field.STRIDE_PREFIX):]
+                self.field_name = name[len(Field.STRIDE_PREFIX):]
 
             self.field = None
             if self.isFieldArgument:
-                field_map = {symbolNameToVariableName(f.name): f for f in kernel_function_node.fields_accessed}
-                self.field = field_map[self.fieldName]
+                field_map = {symbol_name_to_variable_name(f.name): f for f in kernel_function_node.fields_accessed}
+                self.field = field_map[self.field_name]
 
         def __lt__(self, other):
             def score(l):
@@ -167,12 +167,12 @@ class KernelFunction(Node):
         self._body = body
         body.parent = self
         self._parameters = None
-        self.functionName = function_name
+        self.function_name = function_name
         self._body.parent = self
         self.compile = None
-        self.ghostLayers = ghost_layers
+        self.ghost_layers = ghost_layers
         # these variables are assumed to be global, so no automatic parameter is generated for them
-        self.globalVariables = set()
+        self.global_variables = set()
         self.backend = backend
 
     @property
@@ -202,19 +202,19 @@ class KernelFunction(Node):
         return set(o.field for o in self.atoms(ResolvedFieldAccess))
 
     def _update_parameters(self):
-        undefined_symbols = self._body.undefined_symbols - self.globalVariables
+        undefined_symbols = self._body.undefined_symbols - self.global_variables
         self._parameters = [KernelFunction.Argument(s.name, s.dtype, s, self) for s in undefined_symbols]
 
         self._parameters.sort()
 
     def __str__(self):
         self._update_parameters()
-        return '{0} {1}({2})\n{3}'.format(type(self).__name__, self.functionName, self.parameters,
+        return '{0} {1}({2})\n{3}'.format(type(self).__name__, self.function_name, self.parameters,
                                           ("\t" + "\t".join(str(self.body).splitlines(True))))
 
     def __repr__(self):
         self._update_parameters()
-        return '{0} {1}({2})'.format(type(self).__name__, self.functionName, self.parameters)
+        return '{0} {1}({2})'.format(type(self).__name__, self.function_name, self.parameters)
 
 
 class Block(Node):
@@ -392,8 +392,8 @@ class LoopOverCoordinate(Node):
 
     @property
     def is_outermost_loop(self):
-        from pystencils.transformations import getNextParentOfType
-        return getNextParentOfType(self, LoopOverCoordinate) is None
+        from pystencils.transformations import get_next_parent_of_type
+        return get_next_parent_of_type(self, LoopOverCoordinate) is None
 
     @property
     def is_innermost_loop(self):
@@ -417,7 +417,7 @@ class SympyAssignment(Node):
         self._lhsSymbol = lhs_symbol
         self.rhs = rhs_expr
         self._isDeclaration = True
-        is_cast = self._lhsSymbol.func == castFunc
+        is_cast = self._lhsSymbol.func == cast_func
         if isinstance(self._lhsSymbol, Field.Access) or isinstance(self._lhsSymbol, ResolvedFieldAccess) or is_cast:
             self._isDeclaration = False
         self._isConst = is_const
@@ -430,7 +430,7 @@ class SympyAssignment(Node):
     def lhs(self, new_value):
         self._lhsSymbol = new_value
         self._isDeclaration = True
-        is_cast = self._lhsSymbol.func == castFunc
+        is_cast = self._lhsSymbol.func == cast_func
         if isinstance(self._lhsSymbol, Field.Access) or isinstance(self._lhsSymbol, sp.Indexed) or is_cast:
             self._isDeclaration = False
 
diff --git a/backends/__init__.py b/backends/__init__.py
index e66688a98b80c4dbad5492411a62de513920531c..981703e589d8187bd3280c037863a056fe0c7d00 100644
--- a/backends/__init__.py
+++ b/backends/__init__.py
@@ -1,4 +1,4 @@
-from .cbackend import print_c
+from .cbackend import generate_c
 
 try:
     from .dot import print_dot
diff --git a/backends/cbackend.py b/backends/cbackend.py
index 43d585cae4a98b3419c83b22548c248b7f0a8541..c2806f9e701f706ebdb0bf785c2b9b82e79b171a 100644
--- a/backends/cbackend.py
+++ b/backends/cbackend.py
@@ -1,22 +1,22 @@
 import sympy as sp
 from collections import namedtuple
 from sympy.core import S
-from typing import Optional
+from typing import Optional, Set
 
 try:
     from sympy.printing.ccode import C99CodePrinter as CCodePrinter
 except ImportError:
     from sympy.printing.ccode import CCodePrinter  # for sympy versions < 1.1
 
-from pystencils.bitoperations import xor, rightShift, leftShift, bitwiseAnd, bitwiseOr
+from pystencils.bitoperations import bitwise_xor, bit_shift_right, bit_shift_left, bitwise_and, bitwise_or
 from pystencils.astnodes import Node, ResolvedFieldAccess, SympyAssignment
-from pystencils.data_types import create_type, PointerType, get_type_of_expression, VectorType, castFunc
+from pystencils.data_types import create_type, PointerType, get_type_of_expression, VectorType, cast_func
 from pystencils.backends.simd_instruction_sets import selectedInstructionSet
 
-__all__ = ['print_c']
+__all__ = ['generate_c', 'CustomCppCode', 'get_headers']
 
 
-def print_c(ast_node: Node, signature_only: bool = False, use_float_constants: Optional[bool] = None) -> str:
+def generate_c(ast_node: Node, signature_only: bool = False, use_float_constants: Optional[bool] = None) -> str:
     """Prints an abstract syntax tree node as C or CUDA code.
 
     This function does not need to distinguish between C, C++ or CUDA code, it just prints 'C-like' code as encoded
@@ -42,7 +42,8 @@ def print_c(ast_node: Node, signature_only: bool = False, use_float_constants: O
     return printer(ast_node)
 
 
-def get_headers(ast_node):
+def get_headers(ast_node: Node) -> Set[str]:
+    """Return a set of header files, necessary to compile the printed C-like code."""
     headers = set()
 
     if hasattr(ast_node, 'headers'):
@@ -131,7 +132,7 @@ class CBackend:
 
     def _print_KernelFunction(self, node):
         function_arguments = ["%s %s" % (str(s.dtype), s.name) for s in node.parameters]
-        func_declaration = "FUNC_PREFIX void %s(%s)" % (node.functionName, ", ".join(function_arguments))
+        func_declaration = "FUNC_PREFIX void %s(%s)" % (node.function_name, ", ".join(function_arguments))
         if self._signatureOnly:
             return func_declaration
 
@@ -163,7 +164,7 @@ class CBackend:
             return "%s %s = %s;" % (data_type, self.sympyPrinter.doprint(node.lhs), self.sympyPrinter.doprint(node.rhs))
         else:
             lhs_type = get_type_of_expression(node.lhs)
-            if type(lhs_type) is VectorType and node.lhs.func == castFunc:
+            if type(lhs_type) is VectorType and node.lhs.func == cast_func:
                 return self._vectorInstructionSet['storeU'].format("&" + self.sympyPrinter.doprint(node.lhs.args[0]),
                                                                    self.sympyPrinter.doprint(node.rhs)) + ';'
             else:
@@ -231,13 +232,13 @@ class CustomSympyPrinter(CCodePrinter):
 
     def _print_Function(self, expr):
         function_map = {
-            xor: '^',
-            rightShift: '>>',
-            leftShift: '<<',
-            bitwiseOr: '|',
-            bitwiseAnd: '&',
+            bitwise_xor: '^',
+            bit_shift_right: '>>',
+            bit_shift_left: '<<',
+            bitwise_or: '|',
+            bitwise_and: '&',
         }
-        if expr.func == castFunc:
+        if expr.func == cast_func:
             arg, data_type = expr.args
             return "*((%s)(& %s))" % (PointerType(data_type), self._print(arg))
         elif expr.func in function_map:
@@ -263,7 +264,7 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
             return None
 
     def _print_Function(self, expr):
-        if expr.func == castFunc:
+        if expr.func == cast_func:
             arg, data_type = expr.args
             if type(data_type) is VectorType:
                 if type(arg) is ResolvedFieldAccess:
diff --git a/backends/dot.py b/backends/dot.py
index fa1c7067d18656584208541d5de343725967e147..d74b94f2603ddfaf974d5705cdaf1f6da449b1e3 100644
--- a/backends/dot.py
+++ b/backends/dot.py
@@ -72,7 +72,7 @@ def __shortened(node):
     elif isinstance(node, KernelFunction):
         params = [f.name for f in node.fields_accessed]
         params += [p.name for p in node.parameters if not p.isFieldArgument]
-        return "Func: %s (%s)" % (node.functionName, ",".join(params))
+        return "Func: %s (%s)" % (node.function_name, ",".join(params))
     elif isinstance(node, SympyAssignment):
         return repr(node.lhs)
     elif isinstance(node, Block):
diff --git a/bitoperations.py b/bitoperations.py
index a5fdd7612b562c77fb25748142a6412b4d397351..841505e0838cc51b6922b6d7c879e92c6d5a16fa 100644
--- a/bitoperations.py
+++ b/bitoperations.py
@@ -1,6 +1,6 @@
 import sympy as sp
-xor = sp.Function("⊻")
-rightShift = sp.Function("rshift")
-leftShift = sp.Function("lshift")
-bitwiseAnd = sp.Function("Bit&")
-bitwiseOr = sp.Function("Bit|")
+bitwise_xor = sp.Function("⊻")
+bit_shift_right = sp.Function("rshift")
+bit_shift_left = sp.Function("lshift")
+bitwise_and = sp.Function("Bit&")
+bitwise_or = sp.Function("Bit|")
diff --git a/boundaries/boundaryconditions.py b/boundaries/boundaryconditions.py
index 1cc67d17ca8df82b8c3b601533528c03a03368f6..87d953aa51e03d7f390ccd12056b6c20c0f4d623 100644
--- a/boundaries/boundaryconditions.py
+++ b/boundaries/boundaryconditions.py
@@ -1,5 +1,6 @@
 from pystencils import Assignment
 from pystencils.boundaries.boundaryhandling import BoundaryOffsetInfo
+from typing import List, Tuple, Any
 
 
 class Boundary(object):
@@ -8,29 +9,30 @@ class Boundary(object):
     def __init__(self, name=None):
         self._name = name
 
-    def __call__(self, field, directionSymbol, indexField):
-        """
-        This function defines the boundary behavior and must therefore be implemented by all boundaries.
-        Here the boundary is defined as a list of sympy equations, from which a boundary kernel is generated.
-        :param field: pystencils field where boundary condition should be applied.
-                     The current cell is cell next to the boundary, which is influenced by the boundary
-                     cell i.e. has a link from the boundary cell to itself.
-        :param directionSymbol: a sympy symbol that can be used as index to the pdfField. It describes
-                                the direction pointing from the fluid to the boundary cell
-        :param indexField: the boundary index field that can be used to retrieve and update boundary data
-        :return: list of sympy equations
+    def __call__(self, field, direction_symbol, index_field) -> List[Assignment]:
+        """Defines the boundary behavior and must therefore be implemented by all boundaries.
+
+        Here the boundary is defined as a list of sympy assignments, from which a boundary kernel is generated.
+
+        Args:
+            field: pystencils field where boundary condition should be applied.
+                   The current cell is cell next to the boundary, which is influenced by the boundary
+                   cell i.e. has a link from the boundary cell to itself.
+            direction_symbol: a sympy symbol that can be used as index to the pdfField. It describes
+                              the direction pointing from the fluid to the boundary cell
+            index_field: the boundary index field that can be used to retrieve and update boundary data
         """
         raise NotImplementedError("Boundary class has to overwrite __call__")
 
     @property
-    def additionalData(self):
+    def additional_data(self) -> Tuple[str, Any]:
         """Return a list of (name, type) tuples for additional data items required in this boundary
         These data items can either be initialized in separate kernel see additionalDataKernelInit or by
         Python callbacks - see additionalDataCallback """
         return []
 
     @property
-    def additionalDataInitCallback(self):
+    def additional_data_init_callback(self):
         """Return a callback function called with a boundary data setter object and returning a dict of
         data-name to data for each element that should be initialized"""
         return None
@@ -43,22 +45,22 @@ class Boundary(object):
             return type(self).__name__
 
     @name.setter
-    def name(self, newValue):
-        self._name = newValue
+    def name(self, new_value):
+        self._name = new_value
 
 
 class Neumann(Boundary):
-    def __call__(self, field, directionSymbol, **kwargs):
+    def __call__(self, field, direction_symbol, **kwargs):
 
-        neighbor = BoundaryOffsetInfo.offsetFromDir(directionSymbol, field.spatialDimensions)
-        if field.indexDimensions == 0:
+        neighbor = BoundaryOffsetInfo.offset_from_dir(direction_symbol, field.spatial_dimensions)
+        if field.index_dimensions == 0:
             return [Assignment(field[neighbor], field.center)]
         else:
             from itertools import product
-            if not field.hasFixedIndexShape:
+            if not field.has_fixed_index_shape:
                 raise NotImplementedError("Neumann boundary works only for fields with fixed index shape")
-            indexIter = product(*(range(i) for i in field.indexShape))
-            return [Assignment(field[neighbor](*idx), field(*idx)) for idx in indexIter]
+            index_iter = product(*(range(i) for i in field.index_shape))
+            return [Assignment(field[neighbor](*idx), field(*idx)) for idx in index_iter]
 
     def __hash__(self):
         # All boundaries of these class behave equal -> should also be equal
diff --git a/boundaries/boundaryhandling.py b/boundaries/boundaryhandling.py
index aa39d35a4e4e57af0f778aa49068216c369a8085..cdc8620ad38e5d9773201fe9859462124fe2e34b 100644
--- a/boundaries/boundaryhandling.py
+++ b/boundaries/boundaryhandling.py
@@ -1,7 +1,7 @@
 import numpy as np
 import sympy as sp
 from pystencils.assignment import Assignment
-from pystencils import Field, TypedSymbol, createIndexedKernel
+from pystencils import Field, TypedSymbol, create_indexed_kernel
 from pystencils.backends.cbackend import CustomCppCode
 from pystencils.boundaries.createindexlist import numpyDataTypeForBoundaryObject, createBoundaryIndexArray
 from pystencils.cache import memorycache
@@ -11,23 +11,23 @@ from pystencils.data_types import create_type
 class FlagInterface:
     FLAG_DTYPE = np.uint32
 
-    def __init__(self, dataHandling, flagFieldName):
-        self.flagFieldName = flagFieldName
-        self.domainFlag = self.FLAG_DTYPE(1 << 0)
+    def __init__(self, data_handling, flag_field_name):
+        self.flag_field_name = flag_field_name
+        self.domain_flag = self.FLAG_DTYPE(1 << 0)
         self._nextFreeFlag = 1
-        self.dataHandling = dataHandling
+        self.data_handling = data_handling
 
         # Add flag field to data handling if it does not yet exist
-        if dataHandling.hasData(self.flagFieldName):
+        if data_handling.has_data(self.flag_field_name):
             raise ValueError("There is already a boundary handling registered at the data handling."
                              "If you want to add multiple handlings, choose a different name.")
 
-        dataHandling.addArray(self.flagFieldName, dtype=self.FLAG_DTYPE, cpu=True, gpu=False)
-        ffGhostLayers = dataHandling.ghostLayersOfField(self.flagFieldName)
-        for b in dataHandling.iterate(ghostLayers=ffGhostLayers):
-            b[self.flagFieldName].fill(self.domainFlag)
+        data_handling.add_array(self.flag_field_name, dtype=self.FLAG_DTYPE, cpu=True, gpu=False)
+        ff_ghost_layers = data_handling.ghost_layers_of_field(self.flag_field_name)
+        for b in data_handling.iterate(ghost_layers=ff_ghost_layers):
+            b[self.flag_field_name].fill(self.domain_flag)
 
-    def allocateNextFlag(self):
+    def allocate_next_flag(self):
         result = self.FLAG_DTYPE(1 << self._nextFreeFlag)
         self._nextFreeFlag += 1
         return result
@@ -35,58 +35,58 @@ class FlagInterface:
 
 class BoundaryHandling:
 
-    def __init__(self, dataHandling, fieldName, stencil, name="boundaryHandling", flagInterface=None,
-                 target='cpu', openMP=True):
-        assert dataHandling.hasData(fieldName)
+    def __init__(self, data_handling, field_name, stencil, name="boundary_handling", flag_interface=None,
+                 target='cpu', openmp=True):
+        assert data_handling.has_data(field_name)
 
-        self._dataHandling = dataHandling
-        self._fieldName = fieldName
-        self._indexArrayName = name + "IndexArrays"
+        self._data_handling = data_handling
+        self._field_name = field_name
+        self._index_array_name = name + "IndexArrays"
         self._target = target
-        self._openMP = openMP
-        self._boundaryObjectToBoundaryInfo = {}
+        self._openmp = openmp
+        self._boundary_object_to_boundary_info = {}
         self.stencil = stencil
         self._dirty = True
-        self.flagInterface = flagInterface if flagInterface is not None else FlagInterface(dataHandling, name + "Flags")
+        self.flag_interface = flag_interface if flag_interface is not None else FlagInterface(data_handling, name + "Flags")
 
         gpu = self._target == 'gpu'
-        dataHandling.addCustomClass(self._indexArrayName, self.IndexFieldBlockData, cpu=True, gpu=gpu)
+        data_handling.add_custom_class(self._index_array_name, self.IndexFieldBlockData, cpu=True, gpu=gpu)
 
     @property
-    def dataHandling(self):
-        return self._dataHandling
+    def data_handling(self):
+        return self._data_handling
 
-    def getFlag(self, boundaryObj):
-        return self._boundaryObjectToBoundaryInfo[boundaryObj].flag
+    def get_flag(self, boundary_obj):
+        return self._boundary_object_to_boundary_info[boundary_obj].flag
 
     @property
     def shape(self):
-        return self._dataHandling.shape
+        return self._data_handling.shape
 
     @property
     def dim(self):
-        return self._dataHandling.dim
+        return self._data_handling.dim
 
     @property
-    def boundaryObjects(self):
+    def boundary_objects(self):
         return tuple(self._boundaryObjectToName.keys())
 
     @property
-    def flagArrayName(self):
-        return self.flagInterface.flagFieldName
+    def flag_array_name(self):
+        return self.flag_interface.flag_field_name
 
-    def getBoundaryNameToFlagDict(self):
-        result = {bObj.name: bInfo.flag for bObj, bInfo in self._boundaryObjectToBoundaryInfo.items()}
-        result['domain'] = self.flagInterface.domainFlag
+    def get_boundary_name_to_flag_dict(self):
+        result = {bObj.name: bInfo.flag for bObj, bInfo in self._boundary_object_to_boundary_info.items()}
+        result['domain'] = self.flag_interface.domain_flag
         return result
 
-    def getMask(self, sliceObj, boundaryObj, inverse=False):
-        if isinstance(boundaryObj, str) and boundaryObj.lower() == 'domain':
-            flag = self.flagInterface.domainFlag
+    def get_mask(self, slice_obj, boundary_obj, inverse=False):
+        if isinstance(boundary_obj, str) and boundary_obj.lower() == 'domain':
+            flag = self.flag_interface.domain_flag
         else:
-            flag = self._boundaryObjectToBoundaryInfo[boundaryObj].flag
+            flag = self._boundary_object_to_boundary_info[boundary_obj].flag
 
-        arr = self.dataHandling.gatherArray(self.flagArrayName, sliceObj)
+        arr = self.data_handling.gather_array(self.flag_array_name, slice_obj)
         if arr is None:
             return None
         else:
@@ -95,227 +95,227 @@ class BoundaryHandling:
                 result = np.logical_not(result)
             return result
 
-    def setBoundary(self, boundaryObject, sliceObj=None, maskCallback=None, ghostLayers=True, innerGhostLayers=True,
-                    replace=True):
+    def set_boundary(self, boundary_obj, slice_obj=None, mask_callback=None,
+                     ghost_layers=True, inner_ghost_layers=True, replace=True):
         """
         Sets boundary using either a rectangular slice, a boolean mask or a combination of both
 
-        :param boundaryObject: instance of a boundary object that should be set
-        :param sliceObj: a slice object (can be created with makeSlice[]) that selects a part of the domain where
+        :param boundary_obj: instance of a boundary object that should be set
+        :param slice_obj: a slice object (can be created with makeSlice[]) that selects a part of the domain where
                           the boundary should be set. If none, the complete domain is selected which makes only sense
                           if a maskCallback is passed. The slice can have ':' placeholders, which are interpreted
                           depending on the 'includeGhostLayers' parameter i.e. if it is True, the slice extends
                           into the ghost layers
-        :param maskCallback: callback function getting x,y (z) parameters of the cell midpoints and returning a
+        :param mask_callback: callback function getting x,y (z) parameters of the cell midpoints and returning a
                              boolean mask with True entries where boundary cells should be set.
                              The x, y, z arrays have 2D/3D shape such that they can be used directly
                              to create the boolean return array. i.e return x < 10 sets boundaries in cells with
                              midpoint x coordinate smaller than 10.
-        :param ghostLayers see DataHandling.iterate()
+        :param ghost_layers see DataHandling.iterate()
         """
-        if isinstance(boundaryObject, str) and boundaryObject.lower() == 'domain':
-            flag = self.flagInterface.domainFlag
+        if isinstance(boundary_obj, str) and boundary_obj.lower() == 'domain':
+            flag = self.flag_interface.domain_flag
         else:
-            flag = self._addBoundary(boundaryObject)
+            flag = self._add_boundary(boundary_obj)
 
-        for b in self._dataHandling.iterate(sliceObj, ghostLayers=ghostLayers, innerGhostLayers=innerGhostLayers):
-            flagArr = b[self.flagInterface.flagFieldName]
-            if maskCallback is not None:
-                mask = maskCallback(*b.midpointArrays)
+        for b in self._data_handling.iterate(slice_obj, ghost_layers=ghost_layers, inner_ghost_layers=inner_ghost_layers):
+            flag_arr = b[self.flag_interface.flag_field_name]
+            if mask_callback is not None:
+                mask = mask_callback(*b.midpoint_arrays)
                 if replace:
-                    flagArr[mask] = flag
+                    flag_arr[mask] = flag
                 else:
-                    np.bitwise_or(flagArr, flag, where=mask, out=flagArr)
-                    np.bitwise_and(flagArr, ~self.flagInterface.domainFlag, where=mask, out=flagArr)
+                    np.bitwise_or(flag_arr, flag, where=mask, out=flag_arr)
+                    np.bitwise_and(flag_arr, ~self.flag_interface.domain_flag, where=mask, out=flag_arr)
             else:
                 if replace:
-                    flagArr.fill(flag)
+                    flag_arr.fill(flag)
                 else:
-                    np.bitwise_or(flagArr, flag, out=flagArr)
-                    np.bitwise_and(flagArr, ~self.flagInterface.domainFlag, out=flagArr)
+                    np.bitwise_or(flag_arr, flag, out=flag_arr)
+                    np.bitwise_and(flag_arr, ~self.flag_interface.domain_flag, out=flag_arr)
 
         self._dirty = True
 
         return flag
 
-    def setBoundaryWhereFlagIsSet(self, boundaryObject, flag):
-        self._addBoundary(boundaryObject, flag)
+    def set_boundary_where_flag_is_set(self, boundary_obj, flag):
+        self._add_boundary(boundary_obj, flag)
         self._dirty = True
         return flag
 
     def prepare(self):
         if not self._dirty:
             return
-        self._createIndexFields()
+        self._create_index_fields()
         self._dirty = False
 
-    def triggerReinitializationOfBoundaryData(self, **kwargs):
+    def trigger_reinitialization_of_boundary_data(self, **kwargs):
         if self._dirty:
             self.prepare()
         else:
-            ffGhostLayers = self._dataHandling.ghostLayersOfField(self.flagInterface.flagFieldName)
-            for b in self._dataHandling.iterate(ghostLayers=ffGhostLayers):
-                for bObj, setter in b[self._indexArrayName].boundaryObjectToDataSetter.items():
-                    self._boundaryDataInitialization(bObj, setter, **kwargs)
+            ff_ghost_layers = self._data_handling.ghost_layers_of_field(self.flag_interface.flag_field_name)
+            for b in self._data_handling.iterate(ghost_layers=ff_ghost_layers):
+                for bObj, setter in b[self._index_array_name].boundaryObjectToDataSetter.items():
+                    self._boundary_data_initialization(bObj, setter, **kwargs)
 
     def __call__(self, **kwargs):
         if self._dirty:
             self.prepare()
 
-        for b in self._dataHandling.iterate(gpu=self._target == 'gpu'):
-            for bObj, idxArr in b[self._indexArrayName].boundaryObjectToIndexList.items():
-                kwargs[self._fieldName] = b[self._fieldName]
+        for b in self._data_handling.iterate(gpu=self._target == 'gpu'):
+            for bObj, idxArr in b[self._index_array_name].boundary_object_to_index_list.items():
+                kwargs[self._field_name] = b[self._field_name]
                 kwargs['indexField'] = idxArr
-                dataUsedInKernel = (p.fieldName
-                                    for p in self._boundaryObjectToBoundaryInfo[bObj].kernel.parameters
-                                    if p.isFieldPtrArgument and p.fieldName not in kwargs)
-                kwargs.update({name: b[name] for name in dataUsedInKernel})
+                data_used_in_kernel = (p.field_name
+                                       for p in self._boundary_object_to_boundary_info[bObj].kernel.parameters
+                                       if p.isFieldPtrArgument and p.field_name not in kwargs)
+                kwargs.update({name: b[name] for name in data_used_in_kernel})
 
-                self._boundaryObjectToBoundaryInfo[bObj].kernel(**kwargs)
+                self._boundary_object_to_boundary_info[bObj].kernel(**kwargs)
 
-    def geometryToVTK(self, fileName='geometry', boundaries='all', ghostLayers=False):
+    def geometry_to_vtk(self, file_name='geometry', boundaries='all', ghost_layers=False):
         """
         Writes a VTK field where each cell with the given boundary is marked with 1, other cells are 0
         This can be used to display the simulation geometry in Paraview
-        :param fileName: vtk filename
+        :param file_name: vtk filename
         :param boundaries: boundary object, or special string 'domain' for domain cells or special string 'all' for all
                          boundary conditions.
                          can also  be a sequence, to write multiple boundaries to VTK file
-        :param ghostLayers: number of ghost layers to write, or True for all, False for none
+        :param ghost_layers: number of ghost layers to write, or True for all, False for none
         """
         if boundaries == 'all':
-            boundaries = list(self._boundaryObjectToBoundaryInfo.keys()) + ['domain']
+            boundaries = list(self._boundary_object_to_boundary_info.keys()) + ['domain']
         elif not hasattr(boundaries, "__len__"):
             boundaries = [boundaries]
 
-        masksToName = {}
+        masks_to_name = {}
         for b in boundaries:
             if b == 'domain':
-                masksToName[self.flagInterface.domainFlag] = 'domain'
+                masks_to_name[self.flag_interface.domain_flag] = 'domain'
             else:
-                masksToName[self._boundaryObjectToBoundaryInfo[b].flag] = b.name
+                masks_to_name[self._boundary_object_to_boundary_info[b].flag] = b.name
 
-        writer = self.dataHandling.vtkWriterFlags(fileName, self.flagInterface.flagFieldName,
-                                                  masksToName, ghostLayers=ghostLayers)
+        writer = self.data_handling.create_vtk_writer_for_flag_array(file_name, self.flag_interface.flag_field_name,
+                                                                     masks_to_name, ghost_layers=ghost_layers)
         writer(1)
 
     # ------------------------------ Implementation Details ------------------------------------------------------------
 
-    def _addBoundary(self, boundaryObject, flag=None):
-        if boundaryObject not in self._boundaryObjectToBoundaryInfo:
-            symbolicIndexField = Field.createGeneric('indexField', spatialDimensions=1,
-                                                     dtype=numpyDataTypeForBoundaryObject(boundaryObject, self.dim))
-            ast = self._createBoundaryKernel(self._dataHandling.fields[self._fieldName],
-                                             symbolicIndexField, boundaryObject)
+    def _add_boundary(self, boundary_obj, flag=None):
+        if boundary_obj not in self._boundary_object_to_boundary_info:
+            symbolic_index_field = Field.create_generic('indexField', spatial_dimensions=1,
+                                                        dtype=numpyDataTypeForBoundaryObject(boundary_obj, self.dim))
+            ast = self._create_boundary_kernel(self._data_handling.fields[self._field_name],
+                                               symbolic_index_field, boundary_obj)
             if flag is None:
-                flag = self.flagInterface.allocateNextFlag()
-            boundaryInfo = self.BoundaryInfo(boundaryObject, flag=flag, kernel=ast.compile())
-            self._boundaryObjectToBoundaryInfo[boundaryObject] = boundaryInfo
-        return self._boundaryObjectToBoundaryInfo[boundaryObject].flag
-
-    def _createBoundaryKernel(self, symbolicField, symbolicIndexField, boundaryObject):
-        return createBoundaryKernel(symbolicField, symbolicIndexField, self.stencil, boundaryObject,
-                                    target=self._target, openMP=self._openMP)
-
-    def _createIndexFields(self):
-        dh = self._dataHandling
-        ffGhostLayers = dh.ghostLayersOfField(self.flagInterface.flagFieldName)
-        for b in dh.iterate(ghostLayers=ffGhostLayers):
-            flagArr = b[self.flagInterface.flagFieldName]
-            pdfArr = b[self._fieldName]
-            indexArrayBD = b[self._indexArrayName]
-            indexArrayBD.clear()
-            for bInfo in self._boundaryObjectToBoundaryInfo.values():
-                idxArr = createBoundaryIndexArray(flagArr, self.stencil, bInfo.flag, self.flagInterface.domainFlag,
-                                                  bInfo.boundaryObject, ffGhostLayers)
+                flag = self.flag_interface.allocate_next_flag()
+            boundary_info = self.BoundaryInfo(boundary_obj, flag=flag, kernel=ast.compile())
+            self._boundary_object_to_boundary_info[boundary_obj] = boundary_info
+        return self._boundary_object_to_boundary_info[boundary_obj].flag
+
+    def _create_boundary_kernel(self, symbolic_field, symbolic_index_field, boundary_obj):
+        return create_boundary_kernel(symbolic_field, symbolic_index_field, self.stencil, boundary_obj,
+                                      target=self._target, openmp=self._openmp)
+
+    def _create_index_fields(self):
+        dh = self._data_handling
+        ff_ghost_layers = dh.ghost_layers_of_field(self.flag_interface.flag_field_name)
+        for b in dh.iterate(ghost_layers=ff_ghost_layers):
+            flag_arr = b[self.flag_interface.flag_field_name]
+            pdf_arr = b[self._field_name]
+            index_array_bd = b[self._index_array_name]
+            index_array_bd.clear()
+            for bInfo in self._boundary_object_to_boundary_info.values():
+                idxArr = createBoundaryIndexArray(flag_arr, self.stencil, bInfo.flag, self.flag_interface.domain_flag,
+                                                  bInfo.boundaryObject, ff_ghost_layers)
                 if idxArr.size == 0:
                     continue
 
-                boundaryDataSetter = BoundaryDataSetter(idxArr, b.offset, self.stencil, ffGhostLayers, pdfArr)
-                indexArrayBD.boundaryObjectToIndexList[bInfo.boundaryObject] = idxArr
-                indexArrayBD.boundaryObjectToDataSetter[bInfo.boundaryObject] = boundaryDataSetter
-                self._boundaryDataInitialization(bInfo.boundaryObject, boundaryDataSetter)
+                boundary_data_setter = BoundaryDataSetter(idxArr, b.offset, self.stencil, ff_ghost_layers, pdf_arr)
+                index_array_bd.boundary_object_to_index_list[bInfo.boundaryObject] = idxArr
+                index_array_bd.boundaryObjectToDataSetter[bInfo.boundaryObject] = boundary_data_setter
+                self._boundary_data_initialization(bInfo.boundaryObject, boundary_data_setter)
 
-    def _boundaryDataInitialization(self, boundaryObject, boundaryDataSetter, **kwargs):
-        if boundaryObject.additionalDataInitCallback:
-            boundaryObject.additionalDataInitCallback(boundaryDataSetter, **kwargs)
+    def _boundary_data_initialization(self, boundary_obj, boundary_data_setter, **kwargs):
+        if boundary_obj.additional_data_init_callback:
+            boundary_obj.additional_data_init_callback(boundary_data_setter, **kwargs)
         if self._target == 'gpu':
-            self._dataHandling.toGpu(self._indexArrayName)
+            self._data_handling.to_gpu(self._index_array_name)
 
     class BoundaryInfo(object):
-        def __init__(self, boundaryObject, flag, kernel):
-            self.boundaryObject = boundaryObject
+        def __init__(self, boundary_obj, flag, kernel):
+            self.boundaryObject = boundary_obj
             self.flag = flag
             self.kernel = kernel
 
     class IndexFieldBlockData:
         def __init__(self, *args, **kwargs):
-            self.boundaryObjectToIndexList = {}
+            self.boundary_object_to_index_list = {}
             self.boundaryObjectToDataSetter = {}
 
         def clear(self):
-            self.boundaryObjectToIndexList.clear()
+            self.boundary_object_to_index_list.clear()
             self.boundaryObjectToDataSetter.clear()
 
         @staticmethod
-        def toCpu(gpuVersion, cpuVersion):
-            gpuVersion = gpuVersion.boundaryObjectToIndexList
-            cpuVersion = cpuVersion.boundaryObjectToIndexList
-            for obj, cpuArr in cpuVersion.values():
-                gpuVersion[obj].get(cpuArr)
+        def to_cpu(gpu_version, cpu_version):
+            gpu_version = gpu_version.boundary_object_to_index_list
+            cpu_version = cpu_version.boundary_object_to_index_list
+            for obj, cpuArr in cpu_version.values():
+                gpu_version[obj].get(cpuArr)
 
         @staticmethod
-        def toGpu(gpuVersion, cpuVersion):
+        def to_gpu(gpu_version, cpu_version):
             from pycuda import gpuarray
-            gpuVersion = gpuVersion.boundaryObjectToIndexList
-            cpuVersion = cpuVersion.boundaryObjectToIndexList
-            for obj, cpuArr in cpuVersion.items():
-                if obj not in gpuVersion:
-                    gpuVersion[obj] = gpuarray.to_gpu(cpuArr)
+            gpu_version = gpu_version.boundary_object_to_index_list
+            cpu_version = cpu_version.boundary_object_to_index_list
+            for obj, cpuArr in cpu_version.items():
+                if obj not in gpu_version:
+                    gpu_version[obj] = gpuarray.to_gpu(cpuArr)
                 else:
-                    gpuVersion[obj].set(cpuArr)
+                    gpu_version[obj].set(cpuArr)
 
 
 class BoundaryDataSetter:
 
-    def __init__(self, indexArray, offset, stencil, ghostLayers, pdfArray):
-        self.indexArray = indexArray
+    def __init__(self, index_array, offset, stencil, ghost_layers, pdf_array):
+        self.indexArray = index_array
         self.offset = offset
         self.stencil = np.array(stencil)
-        self.pdfArray = pdfArray.view()
-        self.pdfArray.flags.writeable = False
+        self.pdf_array = pdf_array.view()
+        self.pdf_array.flags.writeable = False
 
-        arrFieldNames = indexArray.dtype.names
-        self.dim = 3 if 'z' in arrFieldNames else 2
-        assert 'x' in arrFieldNames and 'y' in arrFieldNames and 'dir' in arrFieldNames, str(arrFieldNames)
-        self.boundaryDataNames = set(self.indexArray.dtype.names) - set(['x', 'y', 'z', 'dir'])
-        self.coordMap = {0: 'x', 1: 'y', 2: 'z'}
-        self.ghostLayers = ghostLayers
+        arr_field_names = index_array.dtype.names
+        self.dim = 3 if 'z' in arr_field_names else 2
+        assert 'x' in arr_field_names and 'y' in arr_field_names and 'dir' in arr_field_names, str(arr_field_names)
+        self.boundary_data_names = set(self.indexArray.dtype.names) - set(['x', 'y', 'z', 'dir'])
+        self.coord_map = {0: 'x', 1: 'y', 2: 'z'}
+        self.ghost_layers = ghost_layers
 
-    def nonBoundaryCellPositions(self, coord):
+    def non_boundary_cell_positions(self, coord):
         assert coord < self.dim
-        return self.indexArray[self.coordMap[coord]] + self.offset[coord] - self.ghostLayers + 0.5
+        return self.indexArray[self.coord_map[coord]] + self.offset[coord] - self.ghost_layers + 0.5
 
     @memorycache()
-    def linkOffsets(self):
+    def link_offsets(self):
         return self.stencil[self.indexArray['dir']]
 
     @memorycache()
-    def linkPositions(self, coord):
-        return self.nonBoundaryCellPositions(coord) + 0.5 * self.linkOffsets()[:, coord]
+    def link_positions(self, coord):
+        return self.non_boundary_cell_positions(coord) + 0.5 * self.link_offsets()[:, coord]
 
     @memorycache()
-    def boundaryCellPositions(self, coord):
-        return self.nonBoundaryCellPositions(coord) + self.linkOffsets()[:, coord]
+    def boundary_cell_positions(self, coord):
+        return self.non_boundary_cell_positions(coord) + self.link_offsets()[:, coord]
 
     def __setitem__(self, key, value):
-        if key not in self.boundaryDataNames:
-            raise KeyError("Invalid boundary data name %s. Allowed are %s" % (key, self.boundaryDataNames))
+        if key not in self.boundary_data_names:
+            raise KeyError("Invalid boundary data name %s. Allowed are %s" % (key, self.boundary_data_names))
         self.indexArray[key] = value
 
     def __getitem__(self, item):
-        if item not in self.boundaryDataNames:
-            raise KeyError("Invalid boundary data name %s. Allowed are %s" % (item, self.boundaryDataNames))
+        if item not in self.boundary_data_names:
+            raise KeyError("Invalid boundary data name %s. Allowed are %s" % (item, self.boundary_data_names))
         return self.indexArray[item]
 
 
@@ -324,46 +324,46 @@ class BoundaryOffsetInfo(CustomCppCode):
     # --------------------------- Functions to be used by boundaries --------------------------
 
     @staticmethod
-    def offsetFromDir(dirIdx, dim):
-        return tuple([sp.IndexedBase(symbol, shape=(1,))[dirIdx]
-                      for symbol in BoundaryOffsetInfo._offsetSymbols(dim)])
+    def offset_from_dir(dir_idx, dim):
+        return tuple([sp.IndexedBase(symbol, shape=(1,))[dir_idx]
+                      for symbol in BoundaryOffsetInfo._offset_symbols(dim)])
 
     @staticmethod
-    def invDir(dirIdx):
-        return sp.IndexedBase(BoundaryOffsetInfo.INV_DIR_SYMBOL, shape=(1,))[dirIdx]
+    def inv_dir(dir_idx):
+        return sp.IndexedBase(BoundaryOffsetInfo.INV_DIR_SYMBOL, shape=(1,))[dir_idx]
 
     # ---------------------------------- Internal ---------------------------------------------
 
     def __init__(self, stencil):
         dim = len(stencil[0])
 
-        offsetSym = BoundaryOffsetInfo._offsetSymbols(dim)
+        offset_sym = BoundaryOffsetInfo._offset_symbols(dim)
         code = "\n"
         for i in range(dim):
-            offsetStr = ", ".join([str(d[i]) for d in stencil])
-            code += "const int64_t %s [] = { %s };\n" % (offsetSym[i].name, offsetStr)
+            offset_str = ", ".join([str(d[i]) for d in stencil])
+            code += "const int64_t %s [] = { %s };\n" % (offset_sym[i].name, offset_str)
 
-        invDirs = []
+        inv_dirs = []
         for direction in stencil:
-            inverseDir = tuple([-i for i in direction])
-            invDirs.append(str(stencil.index(inverseDir)))
+            inverse_dir = tuple([-i for i in direction])
+            inv_dirs.append(str(stencil.index(inverse_dir)))
 
-        code += "const int %s [] = { %s };\n" % (self.INV_DIR_SYMBOL.name, ", ".join(invDirs))
-        offsetSymbols = BoundaryOffsetInfo._offsetSymbols(dim)
+        code += "const int %s [] = { %s };\n" % (self.INV_DIR_SYMBOL.name, ", ".join(inv_dirs))
+        offset_symbols = BoundaryOffsetInfo._offset_symbols(dim)
         super(BoundaryOffsetInfo, self).__init__(code, symbols_read=set(),
-                                                 symbols_defined=set(offsetSymbols + [self.INV_DIR_SYMBOL]))
+                                                 symbols_defined=set(offset_symbols + [self.INV_DIR_SYMBOL]))
 
     @staticmethod
-    def _offsetSymbols(dim):
+    def _offset_symbols(dim):
         return [TypedSymbol("c_%d" % (d,), create_type(np.int64)) for d in range(dim)]
 
-    INV_DIR_SYMBOL = TypedSymbol("invDir", "int")
+    INV_DIR_SYMBOL = TypedSymbol("inv_dir", "int")
 
 
-def createBoundaryKernel(field, indexField, stencil, boundaryFunctor, target='cpu', openMP=True):
+def create_boundary_kernel(field, index_field, stencil, boundary_functor, target='cpu', openmp=True):
     elements = [BoundaryOffsetInfo(stencil)]
-    indexArrDtype = indexField.dtype.numpy_dtype
-    dirSymbol = TypedSymbol("dir", indexArrDtype.fields['dir'][0])
-    elements += [Assignment(dirSymbol, indexField[0]('dir'))]
-    elements += boundaryFunctor(field, directionSymbol=dirSymbol, indexField=indexField)
-    return createIndexedKernel(elements, [indexField], target=target, cpuOpenMP=openMP)
+    index_arr_dtype = index_field.dtype.numpy_dtype
+    dir_symbol = TypedSymbol("dir", index_arr_dtype.fields['dir'][0])
+    elements += [Assignment(dir_symbol, index_field[0]('dir'))]
+    elements += boundary_functor(field, directionSymbol=dir_symbol, indexField=index_field)
+    return create_indexed_kernel(elements, [index_field], target=target, cpu_openmp=openmp)
diff --git a/boundaries/createindexlist.py b/boundaries/createindexlist.py
index 69ddb4c1b14f91628fe23d3fa8a4f8ef68d67ac2..d1419786268fb968de773abd00f03219e4d52e30 100644
--- a/boundaries/createindexlist.py
+++ b/boundaries/createindexlist.py
@@ -6,7 +6,7 @@ try:
     import pyximport;
 
     pyximport.install()
-    from pystencils.boundaries.createindexlistcython import createBoundaryIndexList2D, createBoundaryIndexList3D
+    from pystencils.boundaries.createindexlistcython import create_boundary_index_list_2d, create_boundary_index_list_3d
 
     cythonFuncsAvailable = True
 except Exception:
@@ -22,7 +22,7 @@ def numpyDataTypeForBoundaryObject(boundaryObject, dim):
     coordinateNames = boundaryIndexArrayCoordinateNames[:dim]
     return np.dtype([(name, np.int32) for name in coordinateNames] +
                     [(directionMemberName, np.int32)] +
-                    [(i[0], i[1].numpy_dtype) for i in boundaryObject.additionalData], align=True)
+                    [(i[0], i[1].numpy_dtype) for i in boundaryObject.additional_data], align=True)
 
 
 def _createBoundaryIndexListPython(flagFieldArr, nrOfGhostLayers, boundaryMask, fluidMask, stencil):
@@ -51,9 +51,9 @@ def createBoundaryIndexList(flagField, stencil, boundaryMask, fluidMask, nrOfGho
     if cythonFuncsAvailable:
         stencil = np.array(stencil, dtype=np.int32)
         if dim == 2:
-            idxList = createBoundaryIndexList2D(flagField, nrOfGhostLayers, boundaryMask, fluidMask, stencil)
+            idxList = create_boundary_index_list_2d(flagField, nrOfGhostLayers, boundaryMask, fluidMask, stencil)
         elif dim == 3:
-            idxList = createBoundaryIndexList3D(flagField, nrOfGhostLayers, boundaryMask, fluidMask, stencil)
+            idxList = create_boundary_index_list_3d(flagField, nrOfGhostLayers, boundaryMask, fluidMask, stencil)
         else:
             raise ValueError("Flag field has to be a 2 or 3 dimensional numpy array")
         return np.array(idxList, dtype=indexArrDtype)
@@ -67,7 +67,7 @@ def createBoundaryIndexArray(flagField, stencil, boundaryMask, fluidMask, bounda
     idxArray = createBoundaryIndexList(flagField, stencil, boundaryMask, fluidMask, nrOfGhostLayers)
     dim = len(flagField.shape)
 
-    if boundaryObject.additionalData:
+    if boundaryObject.additional_data:
         coordinateNames = boundaryIndexArrayCoordinateNames[:dim]
         indexArrDtype = numpyDataTypeForBoundaryObject(boundaryObject, dim)
         extendedIdxField = np.empty(len(idxArray), dtype=indexArrDtype)
diff --git a/boundaries/createindexlistcython.pyx b/boundaries/createindexlistcython.pyx
index dfa84069d67442f4866fcec1924f6e5cd77fdd61..fc3b2dc928437457d6efc22c4eff35cbb6eb1bb7 100644
--- a/boundaries/createindexlistcython.pyx
+++ b/boundaries/createindexlistcython.pyx
@@ -15,49 +15,49 @@ ctypedef fused IntegerType:
 
 @cython.boundscheck(False) # turn off bounds-checking for entire function
 @cython.wraparound(False)  # turn off negative index wrapping for entire function
-def createBoundaryIndexList2D(object[IntegerType, ndim=2] flagField,
-                              int nrOfGhostLayers, IntegerType boundaryMask, IntegerType fluidMask,
-                              object[int, ndim=2] stencil):
+def create_boundary_index_list_2d(object[IntegerType, ndim=2] flag_field,
+                                  int nr_of_ghost_layers, IntegerType boundary_mask, IntegerType fluid_mask,
+                                  object[int, ndim=2] stencil):
     cdef int xs, ys, x, y
-    cdef int dirIdx, numDirections, dx, dy
+    cdef int dirIdx, num_directions, dx, dy
 
-    xs, ys = flagField.shape
-    boundaryIndexList = []
-    numDirections = stencil.shape[0]
+    xs, ys = flag_field.shape
+    boundary_index_list = []
+    num_directions = stencil.shape[0]
 
-    for y in range(nrOfGhostLayers,ys-nrOfGhostLayers):
-        for x in range(nrOfGhostLayers,xs-nrOfGhostLayers):
-            if flagField[x,y] & fluidMask:
-                for dirIdx in range(1, numDirections):
+    for y in range(nr_of_ghost_layers, ys - nr_of_ghost_layers):
+        for x in range(nr_of_ghost_layers, xs - nr_of_ghost_layers):
+            if flag_field[x, y] & fluid_mask:
+                for dirIdx in range(1, num_directions):
                     dx = stencil[dirIdx,0]
                     dy = stencil[dirIdx,1]
-                    if flagField[x+dx, y+dy] & boundaryMask:
-                        boundaryIndexList.append((x,y, dirIdx))
-    return boundaryIndexList
+                    if flag_field[x + dx, y + dy] & boundary_mask:
+                        boundary_index_list.append((x,y, dirIdx))
+    return boundary_index_list
 
 
 @cython.boundscheck(False) # turn off bounds-checking for entire function
 @cython.wraparound(False)  # turn off negative index wrapping for entire function
-def createBoundaryIndexList3D(object[IntegerType, ndim=3] flagField,
-                              int nrOfGhostLayers, IntegerType boundaryMask, IntegerType fluidMask,
-                              object[int, ndim=2] stencil):
+def create_boundary_index_list_3d(object[IntegerType, ndim=3] flagField,
+                                  int nrOfGhostLayers, IntegerType boundaryMask, IntegerType fluidMask,
+                                  object[int, ndim=2] stencil):
     cdef int xs, ys, zs, x, y, z
-    cdef int dirIdx, numDirections, dx, dy, dz
+    cdef int dirIdx, num_directions, dx, dy, dz
 
     xs, ys, zs = flagField.shape
-    boundaryIndexList = []
-    numDirections = stencil.shape[0]
+    boundary_index_list = []
+    num_directions = stencil.shape[0]
 
     for z in range(nrOfGhostLayers, zs-nrOfGhostLayers):
         for y in range(nrOfGhostLayers,ys-nrOfGhostLayers):
             for x in range(nrOfGhostLayers,xs-nrOfGhostLayers):
                 if flagField[x, y, z] & fluidMask:
-                    for dirIdx in range(1, numDirections):
+                    for dirIdx in range(1, num_directions):
                         dx = stencil[dirIdx,0]
                         dy = stencil[dirIdx,1]
                         dz = stencil[dirIdx,2]
                         if flagField[x + dx, y + dy, z + dz] & boundaryMask:
-                            boundaryIndexList.append((x,y,z, dirIdx))
-    return boundaryIndexList
+                            boundary_index_list.append((x,y,z, dirIdx))
+    return boundary_index_list
 
 
diff --git a/boundaries/inkernel.py b/boundaries/inkernel.py
index 22f8d3e3771bb50a2dbee61c0f6e9d493acf1262..90ba36b208972dd934551bd4a02d4f48d8828f24 100644
--- a/boundaries/inkernel.py
+++ b/boundaries/inkernel.py
@@ -1,27 +1,28 @@
 import sympy as sp
 from pystencils import Field, TypedSymbol
-from pystencils.bitoperations import bitwiseAnd
+from pystencils.bitoperations import bitwise_and
 from pystencils.boundaries.boundaryhandling import FlagInterface
 from pystencils.data_types import create_type
 
 
-def addNeumannBoundary(eqs, fields, flagField, boundaryFlag="neumannFlag", inverseFlag=False):
+def add_neumann_boundary(eqs, fields, flag_field, boundary_flag="neumannFlag", inverse_flag=False):
     """
     Replaces all neighbor accesses by flag field guarded accesses.
     If flag in neighboring cell is set, the center value is used instead
     :param eqs: list of equations containing field accesses to direct neighbors
     :param fields: fields for which the Neumann boundary should be applied
-    :param flagField: integer field marking boundary cells
-    :param boundaryFlag: if flag field has value 'boundaryFlag' (no bitoperations yet) the cell is assumed to be boundary
-    :param inverseFlag: if true, boundary cells are where flagfield has not the value of boundaryFlag
+    :param flag_field: integer field marking boundary cells
+    :param boundary_flag: if flag field has value 'boundaryFlag' (no bit operations yet)
+                          the cell is assumed to be boundary
+    :param inverse_flag: if true, boundary cells are where flag field has not the value of boundaryFlag
     :return: list of equations with guarded field accesses
     """
     if not hasattr(fields, "__len__"):
         fields = [fields]
     fields = set(fields)
 
-    if type(boundaryFlag) is str:
-        boundaryFlag = TypedSymbol(boundaryFlag, dtype=create_type(FlagInterface.FLAG_DTYPE))
+    if type(boundary_flag) is str:
+        boundary_flag = TypedSymbol(boundary_flag, dtype=create_type(FlagInterface.FLAG_DTYPE))
 
     substitutions = {}
     for eq in eqs:
@@ -33,10 +34,10 @@ def addNeumannBoundary(eqs, fields, flagField, boundaryFlag="neumannFlag", inver
             if all(offset == 0 for offset in fa.offsets):
                 continue
 
-            if inverseFlag:
-                condition = sp.Eq(bitwiseAnd(flagField[tuple(fa.offsets)], boundaryFlag), 0)
+            if inverse_flag:
+                condition = sp.Eq(bitwise_and(flag_field[tuple(fa.offsets)], boundary_flag), 0)
             else:
-                condition = sp.Ne(bitwiseAnd(flagField[tuple(fa.offsets)], boundaryFlag), 0)
+                condition = sp.Ne(bitwise_and(flag_field[tuple(fa.offsets)], boundary_flag), 0)
 
             center = fa.field(*fa.index)
             substitutions[fa] = sp.Piecewise((center, condition), (fa, True))
diff --git a/cache.py b/cache.py
index 40a0120226827eddbc56eb882f84159d5ba10cfc..e10a211efa5556f0aaffd7f1740ea9cb0bbac45a 100644
--- a/cache.py
+++ b/cache.py
@@ -1,5 +1,3 @@
-import sympy as sp
-import json
 import os
 
 try:
@@ -14,14 +12,16 @@ try:
         cacheDir = os.environ['PYSTENCILS_CACHE_DIR']
     else:
         cacheDir = user_cache_dir('pystencils')
-    diskcache = Memory(cachedir=cacheDir, verbose=False).cache
-    diskcacheNoFallback = diskcache
+    disk_cache = Memory(cachedir=cacheDir, verbose=False).cache
+    disk_cache_no_fallback = disk_cache
 except ImportError:
     # fallback to in-memory caching if joblib is not available
-    diskcache = memorycache(maxsize=64)
-    diskcacheNoFallback = lambda o: o
+    disk_cache = memorycache(maxsize=64)
+
+    def disk_cache_no_fallback(o):
+        return o
 
 
 # Disable memory cache:
-# diskcache = lambda o: o
-# diskcacheNoFallback = lambda o: o
+# disk_cache = lambda o: o
+# disk_cache_no_fallback = lambda o: o
diff --git a/cpu/__init__.py b/cpu/__init__.py
index 00f0f9e7235061d21282396f96c32f7db26d718d..0e592f13bcf84699b69f3867da836af7cc41664d 100644
--- a/cpu/__init__.py
+++ b/cpu/__init__.py
@@ -1,3 +1,3 @@
-from pystencils.cpu.kernelcreation import createKernel, createIndexedKernel, addOpenMP
-from pystencils.cpu.cpujit import makePythonFunction
-from pystencils.backends.cbackend import print_c
+from pystencils.cpu.kernelcreation import create_kernel, create_indexed_kernel, add_openmp
+from pystencils.cpu.cpujit import make_python_function
+from pystencils.backends.cbackend import generate_c
diff --git a/cpu/cpujit.py b/cpu/cpujit.py
index 894621c0a4a9842503413311ec2ee5cf7b5782e7..36edade180a63153577376cf40e33f1ee2c2f610 100644
--- a/cpu/cpujit.py
+++ b/cpu/cpujit.py
@@ -17,7 +17,7 @@ Compiler Config (Linux)
 - **'os'**: should be detected automatically as 'linux'
 - **'command'**: path to C++ compiler (defaults to 'g++')
 - **'flags'**: space separated list of compiler flags. Make sure to activate OpenMP in your compiler
-- **'restrictQualifier'**: the restrict qualifier is not standardized accross compilers.
+- **'restrict_qualifier'**: the restrict qualifier is not standardized accross compilers.
   For most Linux compilers the qualifier is ``__restrict__``
 
 
@@ -28,12 +28,12 @@ Compiler Config (Windows)
 Then 'cl.exe' is used to compile.
 
 - **'os'**: should be detected automatically as 'windows'
-- **'msvcVersion'**:  either a version number, year number, 'auto' or 'latest' for automatic detection of latest
+- **'msvc_version'**:  either a version number, year number, 'auto' or 'latest' for automatic detection of latest
                       installed version or 'setuptools' for setuptools-based detection. Alternatively path to folder
                       where Visual Studio is installed. This path has to contain a file called 'vcvarsall.bat'
 - **'arch'**: 'x86' or 'x64'
 - **'flags'**: flags passed to 'cl.exe', make sure OpenMP is activated
-- **'restrictQualifier'**: the restrict qualifier is not standardized across compilers.
+- **'restrict_qualifier'**: the restrict qualifier is not standardized across compilers.
   For Windows compilers the qualifier should be ``__restrict``
 
 
@@ -44,20 +44,20 @@ Cache Config
 the shared libraries which are then loaded from Python using ctypes. The file names are SHA hashes of the
 generated code. If the same kernel was already compiled, the existing object file is used - no recompilation is done.
 
-If 'sharedLibrary' is specified, all kernels that are currently in the cache are compiled into a single shared library.
+If 'shared_library' is specified, all kernels that are currently in the cache are compiled into a single shared library.
 This mechanism can be used to run *pystencils* on systems where compilation is not possible, e.g. on clusters where
 compilation on the compute nodes is not possible.
 First the script is run on a system where compilation is possible (e.g. the login node) with
-'readFromSharedLibrary=False' and with 'sharedLibrary' set a valid path.
+'read_from_shared_library=False' and with 'shared_library' set a valid path.
 All kernels generated during the run are put into the cache and at the end
 compiled into the shared library. Then, the same script can be run from the compute nodes, with
-'readFromSharedLibrary=True', such that kernels are taken from the library instead of compiling them.
+'read_from_shared_library=True', such that kernels are taken from the library instead of compiling them.
 
 
-- **'readFromSharedLibrary'**: if true kernels are not compiled but assumed to be in the shared library
-- **'objectCache'**: path to a folder where intermediate files are stored
-- **'clearCacheOnStart'**: when true the cache is cleared on each start of a *pystencils* script
-- **'sharedLibrary'**: path to a shared library file, which is created if `readFromSharedLibrary=false`
+- **'read_from_shared_library'**: if true kernels are not compiled but assumed to be in the shared library
+- **'object_cache'**: path to a folder where intermediate files are stored
+- **'clear_cache_on_start'**: when true the cache is cleared on each start of a *pystencils* script
+- **'shared_library'**: path to a shared library file, which is created if `read_from_shared_library=false`
 """
 from __future__ import print_function
 import os
@@ -71,14 +71,14 @@ import shutil
 import numpy as np
 from appdirs import user_config_dir, user_cache_dir
 from ctypes import cdll
-from pystencils.backends.cbackend import print_c, get_headers
+from pystencils.backends.cbackend import generate_c, get_headers
 from collections import OrderedDict, Mapping
-from pystencils.transformations import symbolNameToVariableName
+from pystencils.transformations import symbol_name_to_variable_name
 from pystencils.data_types import to_ctypes, get_base_type, StructType
 from pystencils.field import FieldType
 
 
-def makePythonFunction(kernelFunctionNode, argumentDict={}):
+def make_python_function(kernel_function_node, argument_dict={}):
     """
     Creates C code from the abstract syntax tree, compiles it and makes it accessible as Python function
 
@@ -86,23 +86,23 @@ def makePythonFunction(kernelFunctionNode, argumentDict={}):
         - numpy arrays for each field used in the kernel. The keyword argument name is the name of the field
         - all symbols which are not defined in the kernel itself are expected as parameters
 
-    :param kernelFunctionNode: the abstract syntax tree
-    :param argumentDict: parameters passed here are already fixed. Remaining parameters have to be passed to the
+    :param kernel_function_node: the abstract syntax tree
+    :param argument_dict: parameters passed here are already fixed. Remaining parameters have to be passed to the
                         returned kernel functor.
     :return: kernel functor
     """
     # build up list of CType arguments
-    func = compileAndLoad(kernelFunctionNode)
+    func = compile_and_load(kernel_function_node)
     func.restype = None
     try:
-        args = buildCTypeArgumentList(kernelFunctionNode.parameters, argumentDict)
+        args = build_ctypes_argument_list(kernel_function_node.parameters, argument_dict)
     except KeyError:
         # not all parameters specified yet
-        return makePythonFunctionIncompleteParams(kernelFunctionNode, argumentDict, func)
+        return make_python_function_incomplete_params(kernel_function_node, argument_dict, func)
     return lambda: func(*args)
 
 
-def setCompilerConfig(config):
+def set_compiler_config(config):
     """
     Override the configuration provided in config file
 
@@ -133,18 +133,18 @@ def setCompilerConfig(config):
     _config = config.copy()
 
 
-def _recursiveDictUpdate(d, u):
+def _recursive_dict_update(d, u):
     for k, v in u.items():
         if isinstance(v, Mapping):
-            r = _recursiveDictUpdate(d.get(k, {}), v)
+            r = _recursive_dict_update(d.get(k, {}), v)
             d[k] = r
         else:
             d[k] = u[k]
     return d
 
 
-def getConfigurationFilePath():
-    configPathInHome = os.path.join(user_config_dir('pystencils'), 'config.json')
+def get_configuration_file_path():
+    config_path_in_home = os.path.join(user_config_dir('pystencils'), 'config.json')
 
     # 1) Read path from environment variable if found
     if 'PYSTENCILS_CONFIG' in os.environ:
@@ -153,14 +153,14 @@ def getConfigurationFilePath():
     elif os.path.exists("pystencils.json"):
         return "pystencils.json", True
     # 3) Try ~/.pystencils.json
-    elif os.path.exists(configPathInHome):
-        return configPathInHome, True
+    elif os.path.exists(config_path_in_home):
+        return config_path_in_home, True
     else:
-        return configPathInHome, False
+        return config_path_in_home, False
 
 
-def createFolder(path, isFile):
-    if isFile:
+def create_folder(path, is_file):
+    if is_file:
         path = os.path.split(path)[0]
     try:
         os.makedirs(path)
@@ -168,265 +168,267 @@ def createFolder(path, isFile):
         pass
 
 
-def readConfig():
+def read_config():
     if platform.system().lower() == 'linux':
-        defaultCompilerConfig = OrderedDict([
+        default_compiler_config = OrderedDict([
             ('os', 'linux'),
             ('command', 'g++'),
             ('flags', '-Ofast -DNDEBUG -fPIC -march=native -fopenmp -std=c++11'),
-            ('restrictQualifier', '__restrict__')
+            ('restrict_qualifier', '__restrict__')
         ])
 
     elif platform.system().lower() == 'windows':
-        defaultCompilerConfig = OrderedDict([
+        default_compiler_config = OrderedDict([
             ('os', 'windows'),
-            ('msvcVersion', 'latest'),
+            ('msvc_version', 'latest'),
             ('arch', 'x64'),
             ('flags', '/Ox /fp:fast /openmp /arch:avx'),
-            ('restrictQualifier', '__restrict')
+            ('restrict_qualifier', '__restrict')
         ])
-    defaultCacheConfig = OrderedDict([
-        ('readFromSharedLibrary', False),
-        ('objectCache', os.path.join(user_cache_dir('pystencils'), 'objectcache')),
-        ('clearCacheOnStart', False),
-        ('sharedLibrary', os.path.join(user_cache_dir('pystencils'), 'cache.so')),
+    default_cache_config = OrderedDict([
+        ('read_from_shared_library', False),
+        ('object_cache', os.path.join(user_cache_dir('pystencils'), 'objectcache')),
+        ('clear_cache_on_start', False),
+        ('shared_library', os.path.join(user_cache_dir('pystencils'), 'cache.so')),
     ])
 
-    defaultConfig = OrderedDict([('compiler', defaultCompilerConfig),
-                                 ('cache', defaultCacheConfig)])
+    default_config = OrderedDict([('compiler', default_compiler_config),
+                                  ('cache', default_cache_config)])
 
-    configPath, configExists = getConfigurationFilePath()
-    config = defaultConfig.copy()
-    if configExists:
-        with open(configPath, 'r') as jsonConfigFile:
-            loadedConfig = json.load(jsonConfigFile)
-        config = _recursiveDictUpdate(config, loadedConfig)
+    config_path, config_exists = get_configuration_file_path()
+    config = default_config.copy()
+    if config_exists:
+        with open(config_path, 'r') as jsonConfigFile:
+            loaded_config = json.load(jsonConfigFile)
+        config = _recursive_dict_update(config, loaded_config)
     else:
-        createFolder(configPath, True)
-        json.dump(config, open(configPath, 'w'), indent=4)
+        create_folder(config_path, True)
+        json.dump(config, open(config_path, 'w'), indent=4)
 
-    config['cache']['sharedLibrary'] = os.path.expanduser(config['cache']['sharedLibrary']).format(pid=os.getpid())
-    config['cache']['objectCache'] = os.path.expanduser(config['cache']['objectCache']).format(pid=os.getpid())
+    config['cache']['shared_library'] = os.path.expanduser(config['cache']['shared_library']).format(pid=os.getpid())
+    config['cache']['object_cache'] = os.path.expanduser(config['cache']['object_cache']).format(pid=os.getpid())
 
-    if config['cache']['clearCacheOnStart']:
-        shutil.rmtree(config['cache']['objectCache'], ignore_errors=True)
+    if config['cache']['clear_cache_on_start']:
+        shutil.rmtree(config['cache']['object_cache'], ignore_errors=True)
 
-    createFolder(config['cache']['objectCache'], False)
-    createFolder(config['cache']['sharedLibrary'], True)
+    create_folder(config['cache']['object_cache'], False)
+    create_folder(config['cache']['shared_library'], True)
 
     if 'env' not in config['compiler']:
         config['compiler']['env'] = {}
 
     if config['compiler']['os'] == 'windows':
-        from pystencils.cpu.msvc_detection import getEnvironment
-        msvcEnv = getEnvironment(config['compiler']['msvcVersion'], config['compiler']['arch'])
-        config['compiler']['env'].update(msvcEnv)
+        from pystencils.cpu.msvc_detection import get_environment
+        msvc_env = get_environment(config['compiler']['msvc_version'], config['compiler']['arch'])
+        config['compiler']['env'].update(msvc_env)
 
     return config
 
 
-_config = readConfig()
+_config = read_config()
 
 
-def getCompilerConfig():
+def get_compiler_config():
     return _config['compiler']
 
 
-def getCacheConfig():
+def get_cache_config():
     return _config['cache']
 
 
-def hashToFunctionName(h):
+def hash_to_function_name(h):
     res = "func_%s" % (h,)
     return res.replace('-', 'm')
 
 
-def compileObjectCacheToSharedLibrary():
-    compilerConfig = getCompilerConfig()
-    cacheConfig = getCacheConfig()
+def compile_object_cache_to_shared_library():
+    compiler_config = get_compiler_config()
+    cache_config = get_cache_config()
 
-    sharedLibrary = cacheConfig['sharedLibrary']
-    if len(sharedLibrary) == 0 or cacheConfig['readFromSharedLibrary']:
+    shared_library = cache_config['shared_library']
+    if len(shared_library) == 0 or cache_config['read_from_shared_library']:
         return
 
-    configEnv = compilerConfig['env'] if 'env' in compilerConfig else {}
-    compileEnvironment = os.environ.copy()
-    compileEnvironment.update(configEnv)
+    config_env = compiler_config['env'] if 'env' in compiler_config else {}
+    compile_environment = os.environ.copy()
+    compile_environment.update(config_env)
 
     try:
-        if compilerConfig['os'] == 'windows':
-            allObjectFiles = glob.glob(os.path.join(cacheConfig['objectCache'], '*.obj'))
-            linkCmd = ['link.exe',  '/DLL', '/out:' + sharedLibrary]
+        if compiler_config['os'] == 'windows':
+            all_object_files = glob.glob(os.path.join(cache_config['object_cache'], '*.obj'))
+            link_cmd = ['link.exe',  '/DLL', '/out:' + shared_library]
         else:
-            allObjectFiles = glob.glob(os.path.join(cacheConfig['objectCache'], '*.o'))
-            linkCmd = [compilerConfig['command'], '-shared', '-o', sharedLibrary]
+            all_object_files = glob.glob(os.path.join(cache_config['object_cache'], '*.o'))
+            link_cmd = [compiler_config['command'], '-shared', '-o', shared_library]
 
-        linkCmd += allObjectFiles
-        if len(allObjectFiles) > 0:
-            runCompileStep(linkCmd)
+        link_cmd += all_object_files
+        if len(all_object_files) > 0:
+            run_compile_step(link_cmd)
     except subprocess.CalledProcessError as e:
         print(e.output)
         raise e
 
-atexit.register(compileObjectCacheToSharedLibrary)
 
+atexit.register(compile_object_cache_to_shared_library)
 
-def generateCode(ast, restrictQualifier, functionPrefix, targetFile):
+
+def generate_code(ast, restrict_qualifier, function_prefix, target_file):
     headers = get_headers(ast)
     headers.update(['<cmath>', '<cstdint>'])
 
-    with open(targetFile, 'w') as sourceFile:
-        code = print_c(ast)
+    with open(target_file, 'w') as sourceFile:
+        code = generate_c(ast)
         includes = "\n".join(["#include %s" % (includeFile,) for includeFile in headers])
         print(includes, file=sourceFile)
-        print("#define RESTRICT %s" % (restrictQualifier,), file=sourceFile)
-        print("#define FUNC_PREFIX %s" % (functionPrefix,), file=sourceFile)
+        print("#define RESTRICT %s" % (restrict_qualifier,), file=sourceFile)
+        print("#define FUNC_PREFIX %s" % (function_prefix,), file=sourceFile)
         print('extern "C" { ', file=sourceFile)
         print(code, file=sourceFile)
         print('}', file=sourceFile)
 
 
-def runCompileStep(command):
-    compilerConfig = getCompilerConfig()
-    configEnv = compilerConfig['env'] if 'env' in compilerConfig else {}
-    compileEnvironment = os.environ.copy()
-    compileEnvironment.update(configEnv)
+def run_compile_step(command):
+    compiler_config = get_compiler_config()
+    config_env = compiler_config['env'] if 'env' in compiler_config else {}
+    compile_environment = os.environ.copy()
+    compile_environment.update(config_env)
 
     try:
-        shell = True if compilerConfig['os'].lower() == 'windows' else False
-        subprocess.check_output(command, env=compileEnvironment, stderr=subprocess.STDOUT, shell=shell)
+        shell = True if compiler_config['os'].lower() == 'windows' else False
+        subprocess.check_output(command, env=compile_environment, stderr=subprocess.STDOUT, shell=shell)
     except subprocess.CalledProcessError as e:
         print(" ".join(command))
         print(e.output.decode('utf8'))
         raise e
 
 
-def compileLinux(ast, codeHashStr, srcFile, libFile):
-    cacheConfig = getCacheConfig()
-    compilerConfig = getCompilerConfig()
+def compile_linux(ast, code_hash_str, src_file, lib_file):
+    cache_config = get_cache_config()
+    compiler_config = get_compiler_config()
 
-    objectFile = os.path.join(cacheConfig['objectCache'], codeHashStr + '.o')
+    object_file = os.path.join(cache_config['object_cache'], code_hash_str + '.o')
     # Compilation
-    if not os.path.exists(objectFile):
-        generateCode(ast, compilerConfig['restrictQualifier'], '', srcFile)
-        compileCmd = [compilerConfig['command'], '-c'] + compilerConfig['flags'].split()
-        compileCmd += ['-o', objectFile, srcFile]
-        runCompileStep(compileCmd)
+    if not os.path.exists(object_file):
+        generate_code(ast, compiler_config['restrict_qualifier'], '', src_file)
+        compile_cmd = [compiler_config['command'], '-c'] + compiler_config['flags'].split()
+        compile_cmd += ['-o', object_file, src_file]
+        run_compile_step(compile_cmd)
 
     # Linking
-    runCompileStep([compilerConfig['command'], '-shared', objectFile, '-o', libFile] + compilerConfig['flags'].split())
+    run_compile_step([compiler_config['command'], '-shared', object_file, '-o', lib_file] +
+                     compiler_config['flags'].split())
 
 
-def compileWindows(ast, codeHashStr, srcFile, libFile):
-    cacheConfig = getCacheConfig()
-    compilerConfig = getCompilerConfig()
+def compile_windows(ast, code_hash_str, src_file, lib_file):
+    cache_config = get_cache_config()
+    compiler_config = get_compiler_config()
 
-    objectFile = os.path.join(cacheConfig['objectCache'], codeHashStr + '.obj')
+    object_file = os.path.join(cache_config['object_cache'], code_hash_str + '.obj')
     # Compilation
-    if not os.path.exists(objectFile):
-        generateCode(ast, compilerConfig['restrictQualifier'],
-                     '__declspec(dllexport)', srcFile)
+    if not os.path.exists(object_file):
+        generate_code(ast, compiler_config['restrict_qualifier'],
+                     '__declspec(dllexport)', src_file)
 
         # /c compiles only, /EHsc turns of exception handling in c code
-        compileCmd = ['cl.exe', '/c', '/EHsc'] + compilerConfig['flags'].split()
-        compileCmd += [srcFile, '/Fo' + objectFile]
-        runCompileStep(compileCmd)
+        compile_cmd = ['cl.exe', '/c', '/EHsc'] + compiler_config['flags'].split()
+        compile_cmd += [src_file, '/Fo' + object_file]
+        run_compile_step(compile_cmd)
 
     # Linking
-    runCompileStep(['link.exe', '/DLL', '/out:' + libFile, objectFile])
+    run_compile_step(['link.exe', '/DLL', '/out:' + lib_file, object_file])
 
 
-def compileAndLoad(ast):
-    cacheConfig = getCacheConfig()
+def compile_and_load(ast):
+    cache_config = get_cache_config()
 
-    codeHashStr = hashlib.sha256(print_c(ast).encode()).hexdigest()
-    ast.functionName = hashToFunctionName(codeHashStr)
+    code_hash_str = hashlib.sha256(generate_c(ast).encode()).hexdigest()
+    ast.function_name = hash_to_function_name(code_hash_str)
 
-    srcFile = os.path.join(cacheConfig['objectCache'], codeHashStr + ".cpp")
+    src_file = os.path.join(cache_config['object_cache'], code_hash_str + ".cpp")
 
-    if cacheConfig['readFromSharedLibrary']:
-        return cdll.LoadLibrary(cacheConfig['sharedLibrary'])[ast.functionName]
+    if cache_config['read_from_shared_library']:
+        return cdll.LoadLibrary(cache_config['shared_library'])[ast.function_name]
     else:
-        if getCompilerConfig()['os'].lower() == 'windows':
-            libFile = os.path.join(cacheConfig['objectCache'], codeHashStr + ".dll")
+        if get_compiler_config()['os'].lower() == 'windows':
+            libFile = os.path.join(cache_config['object_cache'], code_hash_str + ".dll")
             if not os.path.exists(libFile):
-                compileWindows(ast, codeHashStr, srcFile, libFile)
+                compile_windows(ast, code_hash_str, src_file, libFile)
         else:
-            libFile = os.path.join(cacheConfig['objectCache'], codeHashStr + ".so")
+            libFile = os.path.join(cache_config['object_cache'], code_hash_str + ".so")
             if not os.path.exists(libFile):
-                compileLinux(ast, codeHashStr, srcFile, libFile)
-        return cdll.LoadLibrary(libFile)[ast.functionName]
+                compile_linux(ast, code_hash_str, src_file, libFile)
+        return cdll.LoadLibrary(libFile)[ast.function_name]
 
 
-def buildCTypeArgumentList(parameterSpecification, argumentDict):
-    argumentDict = {symbolNameToVariableName(k): v for k, v in argumentDict.items()}
-    ctArguments = []
-    arrayShapes = set()
-    indexArrShapes = set()
+def build_ctypes_argument_list(parameter_specification, argument_dict):
+    argument_dict = {symbol_name_to_variable_name(k): v for k, v in argument_dict.items()}
+    ct_arguments = []
+    array_shapes = set()
+    index_arr_shapes = set()
 
-    for arg in parameterSpecification:
+    for arg in parameter_specification:
         if arg.isFieldArgument:
             try:
-                fieldArr = argumentDict[arg.fieldName]
+                field_arr = argument_dict[arg.field_name]
             except KeyError:
-                raise KeyError("Missing field parameter for kernel call " + arg.fieldName)
+                raise KeyError("Missing field parameter for kernel call " + arg.field_name)
 
-            symbolicField = arg.field
+            symbolic_field = arg.field
             if arg.isFieldPtrArgument:
-                ctArguments.append(fieldArr.ctypes.data_as(to_ctypes(arg.dtype)))
-                if symbolicField.hasFixedShape:
-                    symbolicFieldShape = tuple(int(i) for i in symbolicField.shape)
-                    if isinstance(symbolicField.dtype, StructType):
-                        symbolicFieldShape = symbolicFieldShape[:-1]
-                    if symbolicFieldShape != fieldArr.shape:
+                ct_arguments.append(field_arr.ctypes.data_as(to_ctypes(arg.dtype)))
+                if symbolic_field.has_fixed_shape:
+                    symbolic_field_shape = tuple(int(i) for i in symbolic_field.shape)
+                    if isinstance(symbolic_field.dtype, StructType):
+                        symbolic_field_shape = symbolic_field_shape[:-1]
+                    if symbolic_field_shape != field_arr.shape:
                         raise ValueError("Passed array '%s' has shape %s which does not match expected shape %s" %
-                                         (arg.fieldName, str(fieldArr.shape), str(symbolicField.shape)))
-                if symbolicField.hasFixedShape:
-                    symbolicFieldStrides = tuple(int(i) * fieldArr.itemsize for i in symbolicField.strides)
-                    if isinstance(symbolicField.dtype, StructType):
-                        symbolicFieldStrides = symbolicFieldStrides[:-1]
-                    if symbolicFieldStrides != fieldArr.strides:
+                                         (arg.field_name, str(field_arr.shape), str(symbolic_field.shape)))
+                if symbolic_field.has_fixed_shape:
+                    symbolic_field_strides = tuple(int(i) * field_arr.itemsize for i in symbolic_field.strides)
+                    if isinstance(symbolic_field.dtype, StructType):
+                        symbolic_field_strides = symbolic_field_strides[:-1]
+                    if symbolic_field_strides != field_arr.strides:
                         raise ValueError("Passed array '%s' has strides %s which does not match expected strides %s" %
-                                         (arg.fieldName, str(fieldArr.strides), str(symbolicFieldStrides)))
+                                         (arg.field_name, str(field_arr.strides), str(symbolic_field_strides)))
 
-                if FieldType.isIndexed(symbolicField):
-                    indexArrShapes.add(fieldArr.shape[:symbolicField.spatialDimensions])
-                elif not FieldType.isBuffer(symbolicField):
-                    arrayShapes.add(fieldArr.shape[:symbolicField.spatialDimensions])
+                if FieldType.is_indexed(symbolic_field):
+                    index_arr_shapes.add(field_arr.shape[:symbolic_field.spatial_dimensions])
+                elif not FieldType.is_buffer(symbolic_field):
+                    array_shapes.add(field_arr.shape[:symbolic_field.spatial_dimensions])
 
             elif arg.isFieldShapeArgument:
-                dataType = to_ctypes(get_base_type(arg.dtype))
-                ctArguments.append(fieldArr.ctypes.shape_as(dataType))
+                data_type = to_ctypes(get_base_type(arg.dtype))
+                ct_arguments.append(field_arr.ctypes.shape_as(data_type))
             elif arg.isFieldStrideArgument:
-                dataType = to_ctypes(get_base_type(arg.dtype))
-                strides = fieldArr.ctypes.strides_as(dataType)
-                for i in range(len(fieldArr.shape)):
-                    assert strides[i] % fieldArr.itemsize == 0
-                    strides[i] //= fieldArr.itemsize
-                ctArguments.append(strides)
+                data_type = to_ctypes(get_base_type(arg.dtype))
+                strides = field_arr.ctypes.strides_as(data_type)
+                for i in range(len(field_arr.shape)):
+                    assert strides[i] % field_arr.itemsize == 0
+                    strides[i] //= field_arr.itemsize
+                ct_arguments.append(strides)
             else:
                 assert False
         else:
             try:
-                param = argumentDict[arg.name]
+                param = argument_dict[arg.name]
             except KeyError:
                 raise KeyError("Missing parameter for kernel call " + arg.name)
-            expectedType = to_ctypes(arg.dtype)
-            ctArguments.append(expectedType(param))
+            expected_type = to_ctypes(arg.dtype)
+            ct_arguments.append(expected_type(param))
 
-    if len(arrayShapes) > 1:
-        raise ValueError("All passed arrays have to have the same size " + str(arrayShapes))
-    if len(indexArrShapes) > 1:
-        raise ValueError("All passed index arrays have to have the same size " + str(arrayShapes))
+    if len(array_shapes) > 1:
+        raise ValueError("All passed arrays have to have the same size " + str(array_shapes))
+    if len(index_arr_shapes) > 1:
+        raise ValueError("All passed index arrays have to have the same size " + str(array_shapes))
 
-    return ctArguments
+    return ct_arguments
 
 
-def makePythonFunctionIncompleteParams(kernelFunctionNode, argumentDict, func):
-    parameters = kernelFunctionNode.parameters
+def make_python_function_incomplete_params(kernel_function_node, argument_dict, func):
+    parameters = kernel_function_node.parameters
 
     cache = {}
-    cacheValues = []
+    cache_values = []
 
     def wrapper(**kwargs):
         key = hash(tuple((k, v.ctypes.data, v.strides, v.shape) if isinstance(v, np.ndarray) else (k, id(v))
@@ -435,12 +437,12 @@ def makePythonFunctionIncompleteParams(kernelFunctionNode, argumentDict, func):
             args = cache[key]
             func(*args)
         except KeyError:
-            fullArguments = argumentDict.copy()
-            fullArguments.update(kwargs)
-            args = buildCTypeArgumentList(parameters, fullArguments)
+            full_arguments = argument_dict.copy()
+            full_arguments.update(kwargs)
+            args = build_ctypes_argument_list(parameters, full_arguments)
             cache[key] = args
-            cacheValues.append(kwargs)  # keep objects alive such that ids remain unique
+            cache_values.append(kwargs)  # keep objects alive such that ids remain unique
             func(*args)
-    wrapper.ast = kernelFunctionNode
-    wrapper.parameters = kernelFunctionNode.parameters
+    wrapper.ast = kernel_function_node
+    wrapper.parameters = kernel_function_node.parameters
     return wrapper
diff --git a/cpu/kernelcreation.py b/cpu/kernelcreation.py
index ce37a08876e4a6a359dff01d4a01c3a3858995c6..efd1b3892e3c117447b285e6e5cc30a734cadaf7 100644
--- a/cpu/kernelcreation.py
+++ b/cpu/kernelcreation.py
@@ -1,72 +1,79 @@
 import sympy as sp
 from functools import partial
 from pystencils.astnodes import SympyAssignment, Block, LoopOverCoordinate, KernelFunction
-from pystencils.transformations import resolveBufferAccesses, resolveFieldAccesses, makeLoopOverDomain, \
-    typeAllEquations, getOptimalLoopOrdering, parseBasePointerInfo, moveConstantsBeforeLoop, splitInnerLoop, \
-    substituteArrayAccessesWithConstants
+from pystencils.transformations import resolve_buffer_accesses, resolve_field_accesses, make_loop_over_domain, \
+    type_all_equations, get_optimal_loop_ordering, parse_base_pointer_info, move_constants_before_loop, split_inner_loop, \
+    substitute_array_accesses_with_constants
 from pystencils.data_types import TypedSymbol, BasicType, StructType, create_type
 from pystencils.field import Field, FieldType
 import pystencils.astnodes as ast
-from pystencils.cpu.cpujit import makePythonFunction
+from pystencils.cpu.cpujit import make_python_function
+from pystencils.assignment import Assignment
+from typing import List, Union
 
+AssignmentOrAstNodeList = List[Union[Assignment, ast.Node]]
 
-def createKernel(listOfEquations, functionName="kernel", typeForSymbol='double', splitGroups=(),
-                 iterationSlice=None, ghostLayers=None):
+
+def create_kernel(assignments: AssignmentOrAstNodeList, function_name: str = "kernel", type_info='double',
+                  split_groups=(), iteration_slice=None, ghost_layers=None) -> KernelFunction:
     """
     Creates an abstract syntax tree for a kernel function, by taking a list of update rules.
 
     Loops are created according to the field accesses in the equations.
 
-    :param listOfEquations: list of sympy equations, containing accesses to :class:`pystencils.field.Field`.
-           Defining the update rules of the kernel
-    :param functionName: name of the generated function - only important if generated code is written out
-    :param typeForSymbol: a map from symbol name to a C type specifier. If not specified all symbols are assumed to
-           be of type 'double' except symbols which occur on the left hand side of equations where the
-           right hand side is a sympy Boolean which are assumed to be 'bool' .
-    :param splitGroups: Specification on how to split up inner loop into multiple loops. For details see
-           transformation :func:`pystencils.transformation.splitInnerLoop`
-    :param iterationSlice: if not None, iteration is done only over this slice of the field
-    :param ghostLayers: a sequence of pairs for each coordinate with lower and upper nr of ghost layers
-                        if None, the number of ghost layers is determined automatically and assumed to be equal for a
-                        all dimensions
-
-    :return: :class:`pystencils.ast.KernelFunction` node
+    Args:
+        assignments: list of sympy equations, containing accesses to :class:`pystencils.field.Field`.
+        Defining the update rules of the kernel
+        function_name: name of the generated function - only important if generated code is written out
+        type_info: a map from symbol name to a C type specifier. If not specified all symbols are assumed to
+                   be of type 'double' except symbols which occur on the left hand side of equations where the
+                   right hand side is a sympy Boolean which are assumed to be 'bool' .
+        split_groups: Specification on how to split up inner loop into multiple loops. For details see
+                      transformation :func:`pystencils.transformation.split_inner_loop`
+        iteration_slice: if not None, iteration is done only over this slice of the field
+        ghost_layers: a sequence of pairs for each coordinate with lower and upper nr of ghost layers
+                     if None, the number of ghost layers is determined automatically and assumed to be equal for a
+                     all dimensions
+
+    Returns:
+        AST node representing a function, that can be printed as C or CUDA code
     """
 
     def type_symbol(term):
         if isinstance(term, Field.Access) or isinstance(term, TypedSymbol):
             return term
         elif isinstance(term, sp.Symbol):
-            if not hasattr(typeForSymbol, '__getitem__'):
-                return TypedSymbol(term.name, create_type(typeForSymbol))
+            if not hasattr(type_info, '__getitem__'):
+                return TypedSymbol(term.name, create_type(type_info))
             else:
-                return TypedSymbol(term.name, typeForSymbol[term.name])
+                return TypedSymbol(term.name, type_info[term.name])
         else:
             raise ValueError("Term has to be field access or symbol")
 
-    fields_read, fields_written, assignments = typeAllEquations(listOfEquations, typeForSymbol)
+    fields_read, fields_written, assignments = type_all_equations(assignments, type_info)
     all_fields = fields_read.union(fields_written)
     read_only_fields = set([f.name for f in fields_read - fields_written])
 
-    buffers = set([f for f in all_fields if FieldType.isBuffer(f)])
+    buffers = set([f for f in all_fields if FieldType.is_buffer(f)])
     fields_without_buffers = all_fields - buffers
 
     body = ast.Block(assignments)
-    loop_order = getOptimalLoopOrdering(fields_without_buffers)
-    code, loop_strides, loop_vars = makeLoopOverDomain(body, functionName, iterationSlice=iterationSlice,
-                                                       ghostLayers=ghostLayers, loopOrder=loop_order)
+    loop_order = get_optimal_loop_ordering(fields_without_buffers)
+    code, loop_strides, loop_vars = make_loop_over_domain(body, function_name, iteration_slice=iteration_slice,
+                                                          ghost_layers=ghost_layers, loop_order=loop_order)
     code.target = 'cpu'
 
-    if splitGroups:
-        typed_split_groups = [[type_symbol(s) for s in splitGroup] for splitGroup in splitGroups]
-        splitInnerLoop(code, typed_split_groups)
+    if split_groups:
+        typed_split_groups = [[type_symbol(s) for s in splitGroup] for splitGroup in split_groups]
+        split_inner_loop(code, typed_split_groups)
 
-    base_pointer_info = [['spatialInner0'], ['spatialInner1']] if len(loop_order) >= 2 else [['spatialInner0']]
-    base_pointer_infos = {field.name: parseBasePointerInfo(base_pointer_info, loop_order, field)
-                          for field in fields_without_buffers}
+    base_pointer_spec = [['spatialInner0'], ['spatialInner1']] if len(loop_order) >= 2 else [['spatialInner0']]
+    base_pointer_info = {field.name: parse_base_pointer_info(base_pointer_spec, loop_order, field)
+                         for field in fields_without_buffers}
 
-    buffer_base_pointer_infos = {field.name: parseBasePointerInfo([['spatialInner0']], [0], field) for field in buffers}
-    base_pointer_infos.update(buffer_base_pointer_infos)
+    buffer_base_pointer_info = {field.name: parse_base_pointer_info([['spatialInner0']], [0], field)
+                                for field in buffers}
+    base_pointer_info.update(buffer_base_pointer_info)
 
     base_buffer_index = loop_vars[0]
     stride = 1
@@ -75,116 +82,119 @@ def createKernel(listOfEquations, functionName="kernel", typeForSymbol='double',
         stride *= int(cur_stride) if isinstance(cur_stride, float) else cur_stride
         base_buffer_index += var * stride
 
-    resolveBufferAccesses(code, base_buffer_index, read_only_fields)
-    resolveFieldAccesses(code, read_only_fields, field_to_base_pointer_info=base_pointer_infos)
-    substituteArrayAccessesWithConstants(code)
-    moveConstantsBeforeLoop(code)
-    code.compile = partial(makePythonFunction, code)
+    resolve_buffer_accesses(code, base_buffer_index, read_only_fields)
+    resolve_field_accesses(code, read_only_fields, field_to_base_pointer_info=base_pointer_info)
+    substitute_array_accesses_with_constants(code)
+    move_constants_before_loop(code)
+    code.compile = partial(make_python_function, code)
     return code
 
 
-def createIndexedKernel(listOfEquations, indexFields, functionName="kernel", typeForSymbol=None,
-                        coordinateNames=('x', 'y', 'z')):
+def create_indexed_kernel(assignments: AssignmentOrAstNodeList, index_fields, function_name="kernel",
+                          type_info=None, coordinate_names=('x', 'y', 'z')) -> KernelFunction:
     """
-    Similar to :func:`createKernel`, but here not all cells of a field are updated but only cells with
+    Similar to :func:`create_kernel`, but here not all cells of a field are updated but only cells with
     coordinates which are stored in an index field. This traversal method can e.g. be used for boundary handling.
 
-    The coordinates are stored in a separated indexField, which is a one dimensional array with struct data type.
+    The coordinates are stored in a separate indexField, which is a one dimensional array with struct data type.
     This struct has to contain fields named 'x', 'y' and for 3D fields ('z'). These names are configurable with the
     'coordinateNames' parameter. The struct can have also other fields that can be read and written in the kernel, for
     example boundary parameters.
 
-    :param listOfEquations: list of update equations or AST nodes
-    :param indexFields: list of index fields, i.e. 1D fields with struct data type
-    :param typeForSymbol: see documentation of :func:`createKernel`
-    :param functionName: see documentation of :func:`createKernel`
-    :param coordinateNames: name of the coordinate fields in the struct data type
-    :return: abstract syntax tree
+    Args:
+        assignments: list of assignments
+        index_fields: list of index fields, i.e. 1D fields with struct data type
+        type_info: see documentation of :func:`create_kernel`
+        function_name: see documentation of :func:`create_kernel`
+        coordinate_names: name of the coordinate fields in the struct data type
     """
-    fieldsRead, fieldsWritten, assignments = typeAllEquations(listOfEquations, typeForSymbol)
-    allFields = fieldsRead.union(fieldsWritten)
-
-    for indexField in indexFields:
-        indexField.fieldType = FieldType.INDEXED
-        assert FieldType.isIndexed(indexField)
-        assert indexField.spatialDimensions == 1, "Index fields have to be 1D"
-
-    nonIndexFields = [f for f in allFields if f not in indexFields]
-    spatialCoordinates = {f.spatialDimensions for f in nonIndexFields}
-    assert len(spatialCoordinates) == 1, "Non-index fields do not have the same number of spatial coordinates"
-    spatialCoordinates = list(spatialCoordinates)[0]
-
-    def getCoordinateSymbolAssignment(name):
-        for indexField in indexFields:
-            assert isinstance(indexField.dtype, StructType), "Index fields have to have a struct datatype"
-            dataType = indexField.dtype
-            if dataType.has_element(name):
-                rhs = indexField[0](name)
-                lhs = TypedSymbol(name, BasicType(dataType.get_element_type(name)))
+    fields_read, fields_written, assignments = type_all_equations(assignments, type_info)
+    all_fields = fields_read.union(fields_written)
+
+    for index_field in index_fields:
+        index_field.fieldType = FieldType.INDEXED
+        assert FieldType.is_indexed(index_field)
+        assert index_field.spatial_dimensions == 1, "Index fields have to be 1D"
+
+    non_index_fields = [f for f in all_fields if f not in index_fields]
+    spatial_coordinates = {f.spatial_dimensions for f in non_index_fields}
+    assert len(spatial_coordinates) == 1, "Non-index fields do not have the same number of spatial coordinates"
+    spatial_coordinates = list(spatial_coordinates)[0]
+
+    def get_coordinate_symbol_assignment(name):
+        for idx_field in index_fields:
+            assert isinstance(idx_field.dtype, StructType), "Index fields have to have a struct data type"
+            data_type = idx_field.dtype
+            if data_type.has_element(name):
+                rhs = idx_field[0](name)
+                lhs = TypedSymbol(name, BasicType(data_type.get_element_type(name)))
                 return SympyAssignment(lhs, rhs)
         raise ValueError("Index %s not found in any of the passed index fields" % (name,))
 
-    coordinateSymbolAssignments = [getCoordinateSymbolAssignment(n) for n in coordinateNames[:spatialCoordinates]]
-    coordinateTypedSymbols = [eq.lhs for eq in coordinateSymbolAssignments]
-    assignments = coordinateSymbolAssignments + assignments
+    coordinate_symbol_assignments = [get_coordinate_symbol_assignment(n)
+                                     for n in coordinate_names[:spatial_coordinates]]
+    coordinate_typed_symbols = [eq.lhs for eq in coordinate_symbol_assignments]
+    assignments = coordinate_symbol_assignments + assignments
 
     # make 1D loop over index fields
-    loopBody = Block([])
-    loopNode = LoopOverCoordinate(loopBody, coordinate_to_loop_over=0, start=0, stop=indexFields[0].shape[0])
+    loop_body = Block([])
+    loop_node = LoopOverCoordinate(loop_body, coordinate_to_loop_over=0, start=0, stop=index_fields[0].shape[0])
 
     for assignment in assignments:
-        loopBody.append(assignment)
+        loop_body.append(assignment)
 
-    functionBody = Block([loopNode])
-    ast = KernelFunction(functionBody, backend="cpu", function_name=functionName)
+    function_body = Block([loop_node])
+    ast_node = KernelFunction(function_body, backend="cpu", function_name=function_name)
 
-    fixedCoordinateMapping = {f.name: coordinateTypedSymbols for f in nonIndexFields}
-    resolveFieldAccesses(ast, set(['indexField']), field_to_fixed_coordinates=fixedCoordinateMapping)
-    substituteArrayAccessesWithConstants(ast)
-    moveConstantsBeforeLoop(ast)
-    ast.compile = partial(makePythonFunction, ast)
-    return ast
+    fixed_coordinate_mapping = {f.name: coordinate_typed_symbols for f in non_index_fields}
 
+    read_only_fields = set([f.name for f in fields_read - fields_written])
+    resolve_field_accesses(ast_node, read_only_fields, field_to_fixed_coordinates=fixed_coordinate_mapping)
+    substitute_array_accesses_with_constants(ast_node)
+    move_constants_before_loop(ast_node)
+    ast_node.compile = partial(make_python_function, ast_node)
+    return ast_node
 
-def addOpenMP(astNode, schedule="static", numThreads=True):
-    """
-    Parallelizes the outer loop with OpenMP
 
-    :param astNode: abstract syntax tree created e.g. by :func:`createKernel`
-    :param schedule: OpenMP scheduling policy e.g. 'static' or 'dynamic'
-    :param numThreads: explicitly specify number of threads
+def add_openmp(ast_node, schedule="static", num_threads=True):
+    """Parallelize the outer loop with OpenMP.
+
+    Args:
+        ast_node: abstract syntax tree created e.g. by :func:`create_kernel`
+        schedule: OpenMP scheduling policy e.g. 'static' or 'dynamic'
+        num_threads: explicitly specify number of threads
     """
-    if not numThreads:
+    if not num_threads:
         return
 
-    assert type(astNode) is ast.KernelFunction
-    body = astNode.body
-    threadsClause = "" if numThreads and isinstance(numThreads,bool) else " num_threads(%s)" % (numThreads,)
-    wrapperBlock = ast.PragmaBlock('#pragma omp parallel' + threadsClause, body.take_child_nodes())
-    body.append(wrapperBlock)
+    assert type(ast_node) is ast.KernelFunction
+    body = ast_node.body
+    threads_clause = "" if num_threads and isinstance(num_threads, bool) else " num_threads(%s)" % (num_threads,)
+    wrapper_block = ast.PragmaBlock('#pragma omp parallel' + threads_clause, body.take_child_nodes())
+    body.append(wrapper_block)
 
-    outerLoops = [l for l in body.atoms(ast.LoopOverCoordinate) if l.is_outermost_loop]
-    assert outerLoops, "No outer loop found"
-    assert len(outerLoops) <= 1, "More than one outer loop found. Which one should be parallelized?"
-    loopToParallelize = outerLoops[0]
+    outer_loops = [l for l in body.atoms(ast.LoopOverCoordinate) if l.is_outermost_loop]
+    assert outer_loops, "No outer loop found"
+    assert len(outer_loops) <= 1, "More than one outer loop found. Which one should be parallelized?"
+    loop_to_parallelize = outer_loops[0]
     try:
-        loopRange = int(loopToParallelize.stop - loopToParallelize.start)
+        loop_range = int(loop_to_parallelize.stop - loop_to_parallelize.start)
     except TypeError:
-        loopRange = None
+        loop_range = None
 
-    if numThreads is None:
+    if num_threads is None:
         import multiprocessing
-        numThreads = multiprocessing.cpu_count()
+        num_threads = multiprocessing.cpu_count()
 
-    if loopRange is not None and loopRange < numThreads:
-        containedLoops = [l for l in loopToParallelize.body.args if isinstance(l, LoopOverCoordinate)]
-        if len(containedLoops) == 1:
-            containedLoop = containedLoops[0]
+    if loop_range is not None and loop_range < num_threads:
+        contained_loops = [l for l in loop_to_parallelize.body.args if isinstance(l, LoopOverCoordinate)]
+        if len(contained_loops) == 1:
+            contained_loop = contained_loops[0]
             try:
-                containedLoopRange = int(containedLoop.stop - containedLoop.start)
-                if containedLoopRange > loopRange:
-                    loopToParallelize = containedLoop
+                contained_loop_range = int(contained_loop.stop - contained_loop.start)
+                if contained_loop_range > loop_range:
+                    loop_to_parallelize = contained_loop
             except TypeError:
                 pass
 
-    loopToParallelize.prefixLines.append("#pragma omp for schedule(%s)" % (schedule,))
+    loop_to_parallelize.prefixLines.append("#pragma omp for schedule(%s)" % (schedule,))
diff --git a/cpu/msvc_detection.py b/cpu/msvc_detection.py
index b14541096494d2ee2b4b64f0e7f488df69367e58..7c45d484f97a5569305caf937e41be3e15dda1d5 100644
--- a/cpu/msvc_detection.py
+++ b/cpu/msvc_detection.py
@@ -2,37 +2,39 @@ import subprocess
 import os
 
 
-def getEnvironment(versionSpecifier, arch='x64'):
-    """
-    Returns an environment dictionary, for activating the Visual Studio compiler
-    :param versionSpecifier: either a version number, year number, 'auto' or 'latest' for automatic detection of latest
-                             installed version or 'setuptools' for setuptools-based detection
-    :param arch: x86 or x64
+def get_environment(version_specifier, arch='x64'):
+    """Returns an environment dictionary, for activating the Visual Studio compiler.
+
+    Args:
+        version_specifier: either a version number, year number, 'auto' or 'latest' for automatic detection of latest
+                          installed version or 'setuptools' for setuptools-based detection
+        arch: x86 or x64
     """
-    if versionSpecifier == 'setuptools':
-        return getEnvironmentFromSetupTools(arch)
-    elif '\\' in versionSpecifier:
-        vcVarsPath = findVcVarsAllViaFilesystemSearch(versionSpecifier)
-        return getEnvironmentFromVcVarsFile(vcVarsPath, arch)
+    if version_specifier == 'setuptools':
+        return get_environment_from_setup_tools(arch)
+    elif '\\' in version_specifier:
+        vc_vars_path = find_vc_vars_all_via_filesystem_search(version_specifier)
+        return get_environment_from_vc_vars_file(vc_vars_path, arch)
     else:
         try:
-            if versionSpecifier in ('auto', 'latest'):
-                versionNr = findLatestMsvcVersionUsingEnvironmentVariables()
+            if version_specifier in ('auto', 'latest'):
+                version_nr = find_latest_msvc_version_using_environment_variables()
             else:
-                versionNr = normalizeMsvcVersion(versionSpecifier)
-            vcVarsPath = getVcVarsPathViaEnvironmentVariable(versionNr)
+                version_nr = normalize_msvc_version(version_specifier)
+            vc_vars_path = get_vc_vars_path_via_environment_variable(version_nr)
         except ValueError:
-            vcVarsPath = findVcVarsAllViaFilesystemSearch("C:\\Program Files (x86)\\Microsoft Visual Studio")
-            if vcVarsPath is None:
-                vcVarsPath = findVcVarsAllViaFilesystemSearch("C:\\Program Files\\Microsoft Visual Studio")
-            if vcVarsPath is None:
+            vc_vars_path = find_vc_vars_all_via_filesystem_search("C:\\Program Files (x86)\\Microsoft Visual Studio")
+            if vc_vars_path is None:
+                vc_vars_path = find_vc_vars_all_via_filesystem_search("C:\\Program Files\\Microsoft Visual Studio")
+            if vc_vars_path is None:
                 raise ValueError("Visual Studio not found. Write path to VS folder in pystencils config")
 
-        return getEnvironmentFromVcVarsFile(vcVarsPath, arch)
+        return get_environment_from_vc_vars_file(vc_vars_path, arch)
 
 
-def findLatestMsvcVersionUsingEnvironmentVariables():
+def find_latest_msvc_version_using_environment_variables():
     import re
+    # noinspection SpellCheckingInspection
     regex = re.compile('VS(\d\d)\dCOMNTOOLS')
     versions = []
     for key, value in os.environ.items():
@@ -45,7 +47,7 @@ def findLatestMsvcVersionUsingEnvironmentVariables():
     return versions[-1]
 
 
-def normalizeMsvcVersion(version):
+def normalize_msvc_version(version):
     """
     Takes version specifiers in the following form:
         - year: 2012, 2013, 2015, either as int or string
@@ -67,9 +69,9 @@ def normalizeMsvcVersion(version):
         return version
 
 
-def getEnvironmentFromVcVarsFile(vcVarsFile, arch):
+def get_environment_from_vc_vars_file(vc_vars_file, arch):
     out = subprocess.check_output(
-        'cmd /u /c "{}" {} && set'.format(vcVarsFile, arch),
+        'cmd /u /c "{}" {} && set'.format(vc_vars_file, arch),
         stderr=subprocess.STDOUT,
     ).decode('utf-16le', errors='replace')
 
@@ -77,23 +79,24 @@ def getEnvironmentFromVcVarsFile(vcVarsFile, arch):
     return env
 
 
-def getVcVarsPathViaEnvironmentVariable(versionNr):
-    environmentVarName = 'VS%d0COMNTOOLS' % (versionNr,)
-    vcPath = os.environ[environmentVarName]
-    path = os.path.join(vcPath, '..', '..', 'VC', 'vcvarsall.bat')
+def get_vc_vars_path_via_environment_variable(version_nr):
+    # noinspection SpellCheckingInspection
+    environment_var_name = 'VS%d0COMNTOOLS' % (version_nr,)
+    vc_path = os.environ[environment_var_name]
+    path = os.path.join(vc_path, '..', '..', 'VC', 'vcvarsall.bat')
     return os.path.abspath(path)
 
 
-def getEnvironmentFromSetupTools(arch):
+def get_environment_from_setup_tools(arch):
     from setuptools.msvc import msvc14_get_vc_env
-    msvcEnv = msvc14_get_vc_env(arch)
-    return {k.upper(): v for k, v in msvcEnv.items()}
+    msvc_env = msvc14_get_vc_env(arch)
+    return {k.upper(): v for k, v in msvc_env.items()}
 
 
-def findVcVarsAllViaFilesystemSearch(basePath):
+def find_vc_vars_all_via_filesystem_search(base_path):
     matches = []
-    for root, dirnames, filenames in os.walk(basePath):
-        for filename in filenames:
+    for root, dir_names, file_names in os.walk(base_path):
+        for filename in file_names:
             if filename == 'vcvarsall.bat':
                 matches.append(os.path.join(root, filename))
 
diff --git a/data_types.py b/data_types.py
index c92875374da01b9de08ee31e9aa7b7e0d399d6ce..9999408d0acd647616f2d695b91ad74743e8185a 100644
--- a/data_types.py
+++ b/data_types.py
@@ -9,11 +9,11 @@ except ImportError as e:
 from sympy.core.cache import cacheit
 
 from pystencils.cache import memorycache
-from pystencils.utils import allEqual
+from pystencils.utils import all_equal
 
 
 # to work in conditions of sp.Piecewise castFunc has to be of type Relational as well
-class castFunc(sp.Function, sp.Rel):
+class cast_func(sp.Function, sp.Rel):
     @property
     def canonical(self):
         if hasattr(self.args[0], 'canonical'):
@@ -26,7 +26,7 @@ class castFunc(sp.Function, sp.Rel):
         return self.args[0].is_commutative
 
 
-class pointerArithmeticFunc(sp.Function, sp.Rel):
+class pointer_arithmetic_func(sp.Function, sp.Rel):
 
     @property
     def canonical(self):
@@ -251,7 +251,7 @@ def collate_types(types):
 
     # peel of vector types, if at least one vector type occurred the result will also be the vector type
     vector_type = [t for t in types if type(t) is VectorType]
-    if not allEqual(t.width for t in vector_type):
+    if not all_equal(t.width for t in vector_type):
         raise ValueError("Collation failed because of vector types with different width")
     types = [peel_off_type(t, VectorType) for t in types]
 
@@ -280,7 +280,7 @@ def get_type_of_expression(expr):
         return expr.dtype
     elif isinstance(expr, sp.Symbol):
         raise ValueError("All symbols inside this expression have to be typed!")
-    elif hasattr(expr, 'func') and expr.func == castFunc:
+    elif hasattr(expr, 'func') and expr.func == cast_func:
         return expr.args[1]
     elif hasattr(expr, 'func') and expr.func == sp.Piecewise:
         collated_result_type = collate_types(tuple(get_type_of_expression(a[0]) for a in expr.args))
diff --git a/datahandling/__init__.py b/datahandling/__init__.py
index 719573055a38b39f2be02f90ead49be2262e814b..519e9b2fd218fd3eba1117e28668257e42c728b1 100644
--- a/datahandling/__init__.py
+++ b/datahandling/__init__.py
@@ -1,20 +1,21 @@
 from pystencils.datahandling.serial_datahandling import SerialDataHandling
 
 try:
-    import waLBerla
-    if waLBerla.cpp_available:
+    # noinspection PyPep8Naming
+    import waLBerla as wlb
+    if wlb.cpp_available:
         from pystencils.datahandling.parallel_datahandling import ParallelDataHandling
     else:
-        waLBerla = None
+        wlb = None
 except ImportError:
-    waLBerla = None
+    wlb = None
     ParallelDataHandling = None
 
 
-def createDataHandling(parallel, domainSize, periodicity, defaultLayout='SoA', defaultGhostLayers=1):
+def create_data_handling(parallel, domain_size, periodicity, default_layout='SoA', default_ghost_layers=1):
     if parallel:
-        if waLBerla is None:
-            raise ValueError("Cannot create parallel data handling because waLBerla module is not available")
+        if wlb is None:
+            raise ValueError("Cannot create parallel data handling because walberla module is not available")
 
         if periodicity is False or periodicity is None:
             periodicity = (0, 0, 0)
@@ -25,15 +26,16 @@ def createDataHandling(parallel, domainSize, periodicity, defaultLayout='SoA', d
             if len(periodicity) == 2:
                 periodicity += (1,)
 
-        if len(domainSize) == 2:
+        if len(domain_size) == 2:
             dim = 2
-            domainSize = (domainSize[0], domainSize[1], 1)
+            domain_size = (domain_size[0], domain_size[1], 1)
         else:
             dim = 3
 
-        blockStorage = waLBerla.createUniformBlockGrid(cells=domainSize, periodic=periodicity)
-        return ParallelDataHandling(blocks=blockStorage, dim=dim,
-                                    defaultLayout=defaultLayout, defaultGhostLayers=defaultGhostLayers)
+        # noinspection PyArgumentList
+        block_storage = wlb.createUniformBlockGrid(cells=domain_size, periodic=periodicity)
+        return ParallelDataHandling(blocks=block_storage, dim=dim,
+                                    default_layout=default_layout, default_ghost_layers=default_ghost_layers)
     else:
-        return SerialDataHandling(domainSize, periodicity=periodicity,
-                                  defaultLayout=defaultLayout, defaultGhostLayers=defaultGhostLayers)
+        return SerialDataHandling(domain_size, periodicity=periodicity,
+                                  default_layout=default_layout, default_ghost_layers=default_ghost_layers)
diff --git a/datahandling/datahandling_interface.py b/datahandling/datahandling_interface.py
index 849b9d6e9da90f768db431baff4abf6879490fc4..c0b54b39373348cc34c84550ed195bde928a1e27 100644
--- a/datahandling/datahandling_interface.py
+++ b/datahandling/datahandling_interface.py
@@ -1,12 +1,14 @@
 import numpy as np
 from abc import ABC, abstractmethod
+from typing import Optional, Callable, Sequence, Iterable, Tuple, Dict
+from pystencils.field import Field
 
 
 class DataHandling(ABC):
     """
     Manages the storage of arrays and maps them to a symbolic field.
     Two versions are available: a simple, pure Python implementation for single node
-    simulations :py:class:SerialDataHandling and a distributed version using waLBerla in :py:class:ParallelDataHandling
+    simulations :py:class:SerialDataHandling and a distributed version using walberla in :py:class:ParallelDataHandling
 
     Keep in mind that the data can be distributed, so use the 'access' method whenever possible and avoid the
     'gather' function that has collects (parts of the) distributed data on a single process.
@@ -16,240 +18,341 @@ class DataHandling(ABC):
 
     @property
     @abstractmethod
-    def dim(self):
+    def dim(self) -> int:
         """Dimension of the domain, either 2 or 3"""
 
     @property
     @abstractmethod
-    def shape(self):
-        """Shape of outer bounding box"""
+    def shape(self) -> Tuple[int, ...]:
+        """Shape of outer bounding box."""
 
     @property
     @abstractmethod
-    def periodicity(self):
-        """Returns tuple of booleans for x,y,(z) directions with True if domain is periodic in that direction"""
+    def periodicity(self) -> Tuple[bool, ...]:
+        """Returns tuple of booleans for x,y,(z) directions with True if domain is periodic in that direction."""
 
     @abstractmethod
-    def addArray(self, name, fSize=1, dtype=np.float64, latexName=None, ghostLayers=None, layout=None, cpu=True, gpu=None):
-        """
-        Adds a (possibly distributed) array to the handling that can be accessed using the given name.
+    def add_array(self, name: str, values_per_cell: int = 1, dtype=np.float64,
+                  latex_name: Optional[str]=None, ghost_layers: Optional[int] = None, layout: Optional[str] = None,
+                  cpu: bool = True, gpu: Optional[bool] = None) -> Field:
+        """Adds a (possibly distributed) array to the handling that can be accessed using the given name.
+
         For each array a symbolic field is available via the 'fields' dictionary
 
-        :param name: unique name that is used to access the field later
-        :param fSize: shape of the dim+1 coordinate. DataHandling supports zero or one index dimensions, i.e. scalar
-                      fields and vector fields. This parameter gives the shape of the index dimensions. The default
-                      value of 1 means no index dimension
-        :param dtype: data type of the array as numpy data type
-        :param latexName: optional, name of the symbolic field, if not given 'name' is used
-        :param ghostLayers: number of ghost layers - if not specified a default value specified in the constructor
-                            is used
-        :param layout: memory layout of array, either structure of arrays 'SoA' or array of structures 'AoS'.
-                       this is only important if fSize > 1
-        :param cpu: allocate field on the CPU
-        :param gpu: allocate field on the GPU, if None, a GPU field is allocated if defaultTarget is 'gpu'
+        Args:
+            name: unique name that is used to access the field later
+            values_per_cell: shape of the dim+1 coordinate. DataHandling supports zero or one index dimensions,
+                             i.e. scalar fields and vector fields. This parameter gives the shape of the index
+                             dimensions. The default value of 1 means no index dimension are created.
+            dtype: data type of the array as numpy data type
+            latex_name: optional, name of the symbolic field, if not given 'name' is used
+            ghost_layers: number of ghost layers - if not specified a default value specified in the constructor
+                         is used
+            layout: memory layout of array, either structure of arrays 'SoA' or array of structures 'AoS'.
+                    this is only important if values_per_cell > 1
+            cpu: allocate field on the CPU
+            gpu: allocate field on the GPU, if None, a GPU field is allocated if defaultTarget is 'gpu'
+
+        Returns:
+            pystencils field, that can be used to formulate symbolic kernels
         """
 
     @abstractmethod
-    def hasData(self, name):
-        """
-        Returns true if a field or custom data element with this name was added
-        """
+    def has_data(self, name):
+        """Returns true if a field or custom data element with this name was added."""
 
     @abstractmethod
-    def addArrayLike(self, name, nameOfTemplateField, latexName=None, cpu=True, gpu=None):
+    def add_array_like(self, name, name_of_template_field, latex_name=None, cpu=True, gpu=None):
         """
-        Adds an array with the same parameters (number of ghost layers, fSize, dtype) as existing array
-        :param name: name of new array
-        :param nameOfTemplateField: name of array that is used as template
-        :param latexName: see 'add' method
-        :param cpu: see 'add' method
-        :param gpu: see 'add' method
+        Adds an array with the same parameters (number of ghost layers, values_per_cell, dtype) as existing array.
+
+        Args:
+            name: name of new array
+            name_of_template_field: name of array that is used as template
+            latex_name: see 'add' method
+            cpu: see 'add' method
+            gpu: see 'add' method
         """
 
     @abstractmethod
-    def addCustomData(self, name, cpuCreationFunction,
-                      gpuCreationFunction=None, cpuToGpuTransferFunc=None, gpuToCpuTransferFunc=None):
-        """
-        Adds custom (non-array) data to domain
-        :param name: name to access data
-        :param cpuCreationFunction: function returning a new instance of the data that should be stored
-        :param gpuCreationFunction: optional, function returning a new instance, stored on GPU
-        :param cpuToGpuTransferFunc: function that transfers cpu to gpu version, getting two parameters (gpuInstance, cpuInstance)
-        :param gpuToCpuTransferFunc: function that transfers gpu to cpu version, getting two parameters (gpuInstance, cpuInstance)
-        :return:
+    def add_custom_data(self, name: str, cpu_creation_function,
+                        gpu_creation_function=None, cpu_to_gpu_transfer_func=None, gpu_to_cpu_transfer_func=None):
+        """Adds custom (non-array) data to domain.
+
+        Args:
+            name: name to access data
+            cpu_creation_function: function returning a new instance of the data that should be stored
+            gpu_creation_function: optional, function returning a new instance, stored on GPU
+            cpu_to_gpu_transfer_func: function that transfers cpu to gpu version,
+                                      getting two parameters (gpuInstance, cpuInstance)
+            gpu_to_cpu_transfer_func: function that transfers gpu to cpu version, getting two parameters
+                                      (gpuInstance, cpuInstance)
         """
 
-    def addCustomClass(self, name, classObj, cpu=True, gpu=False):
-        self.addCustomData(name,
-                           cpuCreationFunction=classObj if cpu else None,
-                           gpuCreationFunction=classObj if gpu else None,
-                           cpuToGpuTransferFunc=classObj.toGpu if cpu and gpu and hasattr(classObj, 'toGpu') else None,
-                           gpuToCpuTransferFunc=classObj.toCpu if cpu and gpu and hasattr(classObj, 'toCpu') else None)
+    def add_custom_class(self, name: str, class_obj, cpu: bool = True, gpu: bool = False):
+        """Adds non-array data by passing a class object with optional 'to_gpu' and 'to_cpu' member functions."""
+        cpu_to_gpu_transfer_func = class_obj.to_gpu if cpu and gpu and hasattr(class_obj, 'to_gpu') else None
+        gpu_to_cpu_transfer_func = class_obj.to_cpu if cpu and gpu and hasattr(class_obj, 'to_cpu') else None
+        self.add_custom_data(name,
+                             cpu_creation_function=class_obj if cpu else None,
+                             gpu_creation_function=class_obj if gpu else None,
+                             cpu_to_gpu_transfer_func=cpu_to_gpu_transfer_func,
+                             gpu_to_cpu_transfer_func=gpu_to_cpu_transfer_func)
 
     @property
     @abstractmethod
-    def fields(self):
-        """Dictionary mapping data name to symbolic pystencils field - use this to create pystencils kernels"""
+    def fields(self) -> Dict[str, Field]:
+        """Dictionary mapping data name to symbolic pystencils field - use this to create pystencils kernels."""
 
     @property
     @abstractmethod
-    def arrayNames(self):
-        """Tuple of all array names"""
+    def array_names(self) -> Sequence[str]:
+        """Sequence of all array names."""
 
     @property
     @abstractmethod
-    def customDataNames(self):
-        """Tuple of all custom data names"""
+    def custom_data_names(self) -> Sequence[str]:
+        """Sequence of all custom data names."""
 
     @abstractmethod
-    def ghostLayersOfField(self, name):
-        """Returns the number of ghost layers for a specific field/array"""
+    def ghost_layers_of_field(self, name: str) -> int:
+        """Returns the number of ghost layers for a specific field/array."""
 
     @abstractmethod
-    def fSize(self, name):
-        """Returns fSize of array"""
+    def values_per_cell(self, name: str) -> int:
+        """Returns values_per_cell of array."""
 
     @abstractmethod
-    def iterate(self, sliceObj=None, gpu=False, ghostLayers=None, innerGhostLayers=True):
-        """
-        Iterate over local part of potentially distributed data structure.
-        """
+    def iterate(self, slice_obj=None, gpu=False, ghost_layers=None,
+                inner_ghost_layers=True) -> Iterable['Block']:
+        """Iterate over local part of potentially distributed data structure."""
 
     @abstractmethod
-    def gatherArray(self, name, sliceObj=None, allGather=False, ghostLayers=False):
+    def gather_array(self, name, slice_obj=None, all_gather=False, ghost_layers=False) -> Optional[np.ndarray]:
         """
         Gathers part of the domain on a local process. Whenever possible use 'access' instead, since this method copies
         the distributed data to a single process which is inefficient and may exhaust the available memory
 
-        :param name: name of the array to gather
-        :param sliceObj: slice expression of the rectangular sub-part that should be gathered
-        :param allGather: if False only the root process receives the result, if True all processes
-        :param ghostLayers: number of outer ghost layers to include (only available for serial data handlings)
-        :return: gathered field that does not include any ghost layers, or None if gathered on another process
+        Args:
+            name: name of the array to gather
+            slice_obj: slice expression of the rectangular sub-part that should be gathered
+            all_gather: if False only the root process receives the result, if True all processes
+            ghost_layers: number of outer ghost layers to include (only available for serial version of data handling)
+
+        Returns:
+            gathered field that does not include any ghost layers, or None if gathered on another process
         """
 
     @abstractmethod
-    def runKernel(self, kernelFunc, *args, **kwargs):
-        """
-        Runs a compiled pystencils kernel using the arrays stored in the DataHandling class for all array parameters
-        Additional passed arguments are directly passed to the kernel function and override possible parameters from
-        the DataHandling
+    def run_kernel(self, kernel_function, *args, **kwargs) -> None:
+        """Runs a compiled pystencils kernel.
+
+        Uses the arrays stored in the DataHandling class for all array parameters. Additional passed arguments are
+        directly passed to the kernel function and override possible parameters from the DataHandling
         """
 
     # ------------------------------- CPU/GPU transfer -----------------------------------------------------------------
 
     @abstractmethod
-    def toCpu(self, name):
+    def to_cpu(self, name):
         """Copies GPU data of array with specified name to CPU.
-        Works only if 'cpu=True' and 'gpu=True' has been used in 'add' method"""
+        Works only if 'cpu=True' and 'gpu=True' has been used in 'add' method."""
 
     @abstractmethod
-    def toGpu(self, name):
+    def to_gpu(self, name):
         """Copies GPU data of array with specified name to GPU.
-        Works only if 'cpu=True' and 'gpu=True' has been used in 'add' method"""
+        Works only if 'cpu=True' and 'gpu=True' has been used in 'add' method."""
 
     @abstractmethod
-    def allToCpu(self, name):
-        """Copies data from GPU to CPU for all arrays that have a CPU and a GPU representation"""
+    def all_to_cpu(self):
+        """Copies data from GPU to CPU for all arrays that have a CPU and a GPU representation."""
 
     @abstractmethod
-    def allToGpu(self, name):
-        """Copies data from CPU to GPU for all arrays that have a CPU and a GPU representation"""
+    def all_to_gpu(self):
+        """Copies data from CPU to GPU for all arrays that have a CPU and a GPU representation."""
 
     @abstractmethod
-    def isOnGpu(self, name):
-        """Checks if this data was also allocated on the GPU - does not check if this data item is in synced"""
+    def is_on_gpu(self, name):
+        """Checks if this data was also allocated on the GPU - does not check if this data item is in synced."""
 
     @abstractmethod
-    def vtkWriter(self, fileName, dataNames, ghostLayers=False):
-        """VTK output for one or multiple arrays
-        :param fileName: base file name without extension for the VTK output
-        :param dataNames: list of array names that should be included in the vtk output
-        :param ghostLayers: true if ghost layer information should be written out as well
-        :return: a function that can be called with an integer time step to write the current state
-                i.e vtkWriter('someFile', ['velocity', 'density']) (1)
+    def create_vtk_writer(self, file_name, data_names, ghost_layers=False) -> Callable[[int], None]:
+        """VTK output for one or multiple arrays.
+
+        Args
+            file_name: base file name without extension for the VTK output
+            data_names: list of array names that should be included in the vtk output
+            ghost_layers: true if ghost layer information should be written out as well
+
+        Returns:
+            a function that can be called with an integer time step to write the current state
+            i.e create_vtk_writer('someFile', ['velocity', 'density']) (1)
         """
+
     @abstractmethod
-    def vtkWriterFlags(self, fileName, dataName, masksToName, ghostLayers=False):
-        """VTK output for an unsigned integer field, where bits are intepreted as flags
-        :param fileName: see vtkWriter
-        :param dataName: name of an array with uint type
-        :param masksToName: dictionary mapping integer masks to a name in the output
-        :param ghostLayers: see vtkWriter
-        :returns: functor that can be called with time step
+    def create_vtk_writer_for_flag_array(self, file_name, data_name, masks_to_name,
+                                         ghost_layers=False) -> Callable[[int], None]:
+        """VTK output for an unsigned integer field, where bits are interpreted as flags.
+
+        Args:
+            file_name: see create_vtk_writer
+            data_name: name of an array with uint type
+            masks_to_name: dictionary mapping integer masks to a name in the output
+            ghost_layers: see create_vtk_writer
+
+        Returns:
+            functor that can be called with time step
          """
 
     # ------------------------------- Communication --------------------------------------------------------------------
 
     @abstractmethod
-    def synchronizationFunction(self, names, stencil, target, **kwargs):
-        """
-        Synchronizes ghost layers for distributed arrays - for serial scenario this has to be called
-        for correct periodicity handling
-        :param names: what data to synchronize: name of array or sequence of names
-        :param stencil: stencil as string defining which neighbors are synchronized e.g. 'D2Q9', 'D3Q19'
-                        if None, a full synchronization (i.e. D2Q9 or D3Q27) is done
-        :param target: either 'cpu' or 'gpu
-        :param kwargs: implementation specific, optional optimization parameters for communication
-        :return: function object to run the communication
+    def synchronization_function(self, names, stencil, target, **kwargs) -> Callable[[], None]:
+        """Synchronizes ghost layers for distributed arrays.
+
+        For serial scenario this has to be called for correct periodicity handling
+
+        Args:
+            names: what data to synchronize: name of array or sequence of names
+            stencil: stencil as string defining which neighbors are synchronized e.g. 'D2Q9', 'D3Q19'
+                     if None, a full synchronization (i.e. D2Q9 or D3Q27) is done
+            target: either 'cpu' or 'gpu
+            kwargs: implementation specific, optional optimization parameters for communication
+
+        Returns:
+            function object to run the communication
         """
 
-    def reduceFloatSequence(self, sequence, operation, allReduce=False):
-        """Takes a sequence of floating point values on each process and reduces it element wise to all
-        processes (allReduce=True) or only to the root process (allReduce=False).
+    def reduce_float_sequence(self, sequence, operation, all_reduce=False) -> np.array:
+        """Takes a sequence of floating point values on each process and reduces it element-wise.
+
+        If all_reduce, all processes get the result, otherwise only the root process.
         Possible operations are 'sum', 'min', 'max'
         """
 
-    def reduceIntSequence(self, sequence, operation, allReduce=False):
-        """See function reduceFloatSequence - this is the same for integers"""
-
-    def fill(self, arrayName, val, fValue=None, sliceObj=None, ghostLayers=False, innerGhostLayers=False):
-        if ghostLayers is True:
-            ghostLayers = self.ghostLayersOfField(arrayName)
-        if innerGhostLayers is True:
-            ghostLayers = self.ghostLayersOfField(arrayName)
-        if fValue is not None and self.fSize(arrayName) < 2:
-            raise ValueError("fValue parameter only valid for fields with fSize > 1")
-        for b in self.iterate(sliceObj, ghostLayers=ghostLayers, innerGhostLayers=innerGhostLayers):
-            if fValue is not None:
-                b[arrayName][..., fValue].fill(val)
+    def reduce_int_sequence(self, sequence, operation, all_reduce=False) -> np.array:
+        """See function reduce_float_sequence - this is the same for integers"""
+
+    # ------------------------------- Data access and modification -----------------------------------------------------
+
+    def fill(self, array_name: str, val, value_idx: Optional[int] = None,
+             slice_obj=None, ghost_layers=False, inner_ghost_layers=False) -> None:
+        """Sets all cells to the same value.
+
+        Args:
+            array_name: name of the array that should be modified
+            val: value to set the array to
+            value_idx: If an array stores multiple values per cell, this index chooses which of this values to fill.
+                       If None, all values are set
+            slice_obj: if passed, only the defined slice is filled
+            ghost_layers: True if the outer ghost layers should also be filled
+            inner_ghost_layers: True if the inner ghost layers should be filled. Inner ghost layers occur only in
+                                parallel setups for distributed memory communication.
+        """
+        if ghost_layers is True:
+            ghost_layers = self.ghost_layers_of_field(array_name)
+        if inner_ghost_layers is True:
+            ghost_layers = self.ghost_layers_of_field(array_name)
+        if value_idx is not None and self.values_per_cell(array_name) < 2:
+            raise ValueError("value_idx parameter only valid for fields with values_per_cell > 1")
+        for b in self.iterate(slice_obj, ghost_layers=ghost_layers, inner_ghost_layers=inner_ghost_layers):
+            if value_idx is not None:
+                b[array_name][..., value_idx].fill(val)
             else:
-                b[arrayName].fill(val)
+                b[array_name].fill(val)
+
+    def min(self, array_name, slice_obj=None, ghost_layers=False, inner_ghost_layers=False, reduce=True):
+        """Returns the minimum value inside the domain or slice of the domain.
 
-    def min(self, arrayName, sliceObj=None, ghostLayers=False, innerGhostLayers=False, reduce=True):
+        For meaning of arguments see documentation of :func:`DataHandling.fill`.
+
+        Returns:
+            the minimum of the locally stored domain part is returned if reduce is False, otherwise the global minimum
+            on the root process, on other processes None
+        """
         result = None
-        if ghostLayers is True:
-            ghostLayers = self.ghostLayersOfField(arrayName)
-        if innerGhostLayers is True:
-            ghostLayers = self.ghostLayersOfField(arrayName)
-        for b in self.iterate(sliceObj, ghostLayers=ghostLayers, innerGhostLayers=innerGhostLayers):
-            m = np.min(b[arrayName])
+        if ghost_layers is True:
+            ghost_layers = self.ghost_layers_of_field(array_name)
+        if inner_ghost_layers is True:
+            ghost_layers = self.ghost_layers_of_field(array_name)
+        for b in self.iterate(slice_obj, ghost_layers=ghost_layers, inner_ghost_layers=inner_ghost_layers):
+            m = np.min(b[array_name])
             result = m if result is None else np.min(result, m)
-        return self.reduceFloatSequence([result], 'min')[0] if reduce else result
+        return self.reduce_float_sequence([result], 'min')[0] if reduce else result
+
+    def max(self, array_name, slice_obj=None, ghost_layers=False, inner_ghost_layers=False, reduce=True):
+        """Returns the maximum value inside the domain or slice of the domain.
 
-    def max(self, arrayName, sliceObj=None, ghostLayers=False, innerGhostLayers=False, reduce=True):
+        For argument description see :func:`DataHandling.min`
+        """
         result = None
-        if ghostLayers is True:
-            ghostLayers = self.ghostLayersOfField(arrayName)
-        if innerGhostLayers is True:
-            ghostLayers = self.ghostLayersOfField(arrayName)
-        for b in self.iterate(sliceObj, ghostLayers=ghostLayers, innerGhostLayers=innerGhostLayers):
-            m = np.max(b[arrayName])
+        if ghost_layers is True:
+            ghost_layers = self.ghost_layers_of_field(array_name)
+        if inner_ghost_layers is True:
+            ghost_layers = self.ghost_layers_of_field(array_name)
+        for b in self.iterate(slice_obj, ghost_layers=ghost_layers, inner_ghost_layers=inner_ghost_layers):
+            m = np.max(b[array_name])
             result = m if result is None else np.max(result, m)
-        return self.reduceFloatSequence([result], 'max')[0] if reduce else result
+        return self.reduce_float_sequence([result], 'max')[0] if reduce else result
 
     def __str__(self):
         result = ""
 
-        firstColumnWidth = max(len("Name"), max(len(a) for a in self.arrayNames))
-        rowFormat = "{:>%d}|{:>21}|{:>21}\n" % (firstColumnWidth,)
-        separatorLine = "-" * (firstColumnWidth + 21 + 21 + 2) + "\n"
-        result += rowFormat.format("Name", "Inner (min/max)", "WithGl (min/max)")
-        result += separatorLine
-        for arrName in sorted(self.arrayNames):
-            innerMinMax = (self.min(arrName, ghostLayers=False), self.max(arrName, ghostLayers=False))
-            withGlMinMax = (self.min(arrName, ghostLayers=True), self.max(arrName, ghostLayers=True))
-            innerMinMax = "({0[0]:3.3g},{0[1]:3.3g})".format(innerMinMax)
-            withGlMinMax = "({0[0]:3.3g},{0[1]:3.3g})".format(withGlMinMax)
-            result += rowFormat.format(arrName, innerMinMax, withGlMinMax)
+        first_column_width = max(len("Name"), max(len(a) for a in self.array_names))
+        row_format = "{:>%d}|{:>21}|{:>21}\n" % (first_column_width,)
+        separator_line = "-" * (first_column_width + 21 + 21 + 2) + "\n"
+        result += row_format.format("Name", "Inner (min/max)", "WithGl (min/max)")
+        result += separator_line
+        for arrName in sorted(self.array_names):
+            inner_min_max = (self.min(arrName, ghost_layers=False), self.max(arrName, ghost_layers=False))
+            with_gl_min_max = (self.min(arrName, ghost_layers=True), self.max(arrName, ghost_layers=True))
+            inner_min_max = "({0[0]:3.3g},{0[1]:3.3g})".format(inner_min_max)
+            with_gl_min_max = "({0[0]:3.3g},{0[1]:3.3g})".format(with_gl_min_max)
+            result += row_format.format(arrName, inner_min_max, with_gl_min_max)
         return result
+
+
+class Block:
+    """Represents locally stored part of domain.
+
+    Instances of this class are returned by DataHandling.iterate, do not create manually!
+    """
+
+    def __init__(self, offset, local_slice):
+        self._offset = offset
+        self._localSlice = local_slice
+
+    @property
+    def offset(self):
+        """Offset of the current block in global coordinates (where lower ghost layers have negative indices)."""
+        return self._offset
+
+    @property
+    def cell_index_arrays(self):
+        """Global coordinate mesh-grid of cell coordinates.
+
+        Cell indices start at 0 at the first inner cell, lower ghost layers have negative indices
+        """
+        mesh_grid_params = [offset + np.arange(width, dtype=np.int32)
+                            for offset, width in zip(self.offset, self.shape)]
+        return np.meshgrid(*mesh_grid_params, indexing='ij', copy=False)
+
+    @property
+    def midpoint_arrays(self):
+        """Global coordinate mesh-grid of cell midpoints which are shifted by 0.5 compared to cell indices."""
+        mesh_grid_params = [offset + 0.5 + np.arange(width, dtype=float)
+                            for offset, width in zip(self.offset, self.shape)]
+        return np.meshgrid(*mesh_grid_params, indexing='ij', copy=False)
+
+    @property
+    def shape(self):
+        """Shape of the fields (potentially including ghost layers)."""
+        return tuple(s.stop - s.start for s in self._localSlice[:len(self._offset)])
+
+    @property
+    def global_slice(self):
+        """Slice in global coordinates."""
+        return tuple(slice(off, off+size) for off, size in zip(self._offset, self.shape))
+
+    def __getitem__(self, data_name: str) -> np.ndarray:
+        raise NotImplementedError()
diff --git a/datahandling/parallel_datahandling.py b/datahandling/parallel_datahandling.py
index 88c3c837e4bc61c0009ebfc04f913a2f61fb67d9..bd8ccc97a61f3ab52694ab82b24d1a0f1a1b35b6 100644
--- a/datahandling/parallel_datahandling.py
+++ b/datahandling/parallel_datahandling.py
@@ -1,8 +1,9 @@
 import numpy as np
 from pystencils import Field
 from pystencils.datahandling.datahandling_interface import DataHandling
-from pystencils.parallel.blockiteration import slicedBlockIteration, blockIteration
+from pystencils.parallel.blockiteration import sliced_block_iteration, block_iteration
 from pystencils.utils import DotDict
+# noinspection PyPep8Naming
 import waLBerla as wlb
 import warnings
 
@@ -11,34 +12,34 @@ class ParallelDataHandling(DataHandling):
     GPU_DATA_PREFIX = "gpu_"
     VTK_COUNTER = 0
 
-    def __init__(self, blocks, defaultGhostLayers=1, defaultLayout='SoA', dim=3, defaultTarget='cpu'):
+    def __init__(self, blocks, default_ghost_layers=1, default_layout='SoA', dim=3, default_target='cpu'):
         """
-        Creates data handling based on waLBerla block storage
+        Creates data handling based on walberla block storage
 
-        :param blocks: waLBerla block storage
-        :param defaultGhostLayers: nr of ghost layers used if not specified in add() method
-        :param defaultLayout: layout used if no layout is given to add() method
+        :param blocks: walberla block storage
+        :param default_ghost_layers: nr of ghost layers used if not specified in add() method
+        :param default_layout: layout used if no layout is given to add() method
         :param dim: dimension of scenario,
-                    waLBerla always uses three dimensions, so if dim=2 the extend of the
+                    walberla always uses three dimensions, so if dim=2 the extend of the
                     z coordinate of blocks has to be 1
-        :param defaultTarget: either 'cpu' or 'gpu' . If set to 'gpu' for each array also a GPU version is allocated
-                              if not overwritten in addArray, and synchronization functions are for the GPU by default
+        :param default_target: either 'cpu' or 'gpu' . If set to 'gpu' for each array also a GPU version is allocated
+                              if not overwritten in add_array, and synchronization functions are for the GPU by default
         """
         super(ParallelDataHandling, self).__init__()
         assert dim in (2, 3)
         self.blocks = blocks
-        self.defaultGhostLayers = defaultGhostLayers
-        self.defaultLayout = defaultLayout
+        self.default_ghost_layers = default_ghost_layers
+        self.default_layout = default_layout
         self._fields = DotDict()  # maps name to symbolic pystencils field
-        self._fieldNameToCpuDataName = {}
-        self._fieldNameToGpuDataName = {}
+        self._field_name_to_cpu_data_name = {}
+        self._field_name_to_gpu_data_name = {}
         self.dataNames = set()
         self._dim = dim
         self._fieldInformation = {}
-        self._cpuGpuPairs = []
-        self._customDataTransferFunctions = {}
-        self._customDataNames = []
-        self._reduceMap = {
+        self._cpu_gpu_pairs = []
+        self._custom_data_transfer_functions = {}
+        self._custom_data_names = []
+        self._reduce_map = {
             'sum': wlb.mpi.SUM,
             'min': wlb.mpi.MIN,
             'max': wlb.mpi.MAX,
@@ -46,7 +47,7 @@ class ParallelDataHandling(DataHandling):
 
         if self._dim == 2:
             assert self.blocks.getDomainCellBB().size[2] == 1
-        self.defaultTarget = defaultTarget
+        self.defaultTarget = default_target
 
     @property
     def dim(self):
@@ -64,33 +65,33 @@ class ParallelDataHandling(DataHandling):
     def fields(self):
         return self._fields
 
-    def ghostLayersOfField(self, name):
-        return self._fieldInformation[name]['ghostLayers']
+    def ghost_layers_of_field(self, name):
+        return self._fieldInformation[name]['ghost_layers']
 
-    def fSize(self, name):
-        return self._fieldInformation[name]['fSize']
+    def values_per_cell(self, name):
+        return self._fieldInformation[name]['values_per_cell']
 
-    def addCustomData(self, name, cpuCreationFunction,
-                      gpuCreationFunction=None, cpuToGpuTransferFunc=None, gpuToCpuTransferFunc=None):
-        if cpuCreationFunction and gpuCreationFunction:
-            if cpuToGpuTransferFunc is None or gpuToCpuTransferFunc is None:
+    def add_custom_data(self, name, cpu_creation_function,
+                        gpu_creation_function=None, cpu_to_gpu_transfer_func=None, gpu_to_cpu_transfer_func=None):
+        if cpu_creation_function and gpu_creation_function:
+            if cpu_to_gpu_transfer_func is None or gpu_to_cpu_transfer_func is None:
                 raise ValueError("For GPU data, both transfer functions have to be specified")
-            self._customDataTransferFunctions[name] = (cpuToGpuTransferFunc, gpuToCpuTransferFunc)
-
-        if cpuCreationFunction:
-            self.blocks.addBlockData(name, cpuCreationFunction)
-        if gpuCreationFunction:
-            self.blocks.addBlockData(self.GPU_DATA_PREFIX + name, gpuCreationFunction)
-        self._customDataNames.append(name)
-
-    def addArray(self, name, fSize=1, dtype=np.float64, latexName=None, ghostLayers=None,
-                 layout=None, cpu=True, gpu=None):
-        if ghostLayers is None:
-            ghostLayers = self.defaultGhostLayers
+            self._custom_data_transfer_functions[name] = (cpu_to_gpu_transfer_func, gpu_to_cpu_transfer_func)
+
+        if cpu_creation_function:
+            self.blocks.addBlockData(name, cpu_creation_function)
+        if gpu_creation_function:
+            self.blocks.addBlockData(self.GPU_DATA_PREFIX + name, gpu_creation_function)
+        self._custom_data_names.append(name)
+
+    def add_array(self, name, values_per_cell=1, dtype=np.float64, latex_name=None, ghost_layers=None,
+                  layout=None, cpu=True, gpu=None):
+        if ghost_layers is None:
+            ghost_layers = self.default_ghost_layers
         if gpu is None:
             gpu = self.defaultTarget == 'gpu'
         if layout is None:
-            layout = self.defaultLayout
+            layout = self.default_layout
         if len(self.blocks) == 0:
             raise ValueError("Data handling expects that each process has at least one block")
         if hasattr(dtype, 'type'):
@@ -98,54 +99,55 @@ class ParallelDataHandling(DataHandling):
         if name in self.blocks[0] or self.GPU_DATA_PREFIX + name in self.blocks[0]:
             raise ValueError("Data with this name has already been added")
 
-        self._fieldInformation[name] = {'ghostLayers': ghostLayers,
-                                        'fSize': fSize,
+        self._fieldInformation[name] = {'ghost_layers': ghost_layers,
+                                        'values_per_cell': values_per_cell,
                                         'layout': layout,
                                         'dtype': dtype}
 
-        layoutMap = {'fzyx': wlb.field.Layout.fzyx, 'zyxf': wlb.field.Layout.zyxf,
-                     'f': wlb.field.Layout.fzyx,
-                     'SoA': wlb.field.Layout.fzyx,  'AoS': wlb.field.Layout.zyxf}
+        layout_map = {'fzyx': wlb.field.Layout.fzyx, 'zyxf': wlb.field.Layout.zyxf,
+                      'f': wlb.field.Layout.fzyx,
+                      'SoA': wlb.field.Layout.fzyx, 'AoS': wlb.field.Layout.zyxf}
 
         if cpu:
-            wlb.field.addToStorage(self.blocks, name, dtype, fSize=fSize, layout=layoutMap[layout],
-                                   ghostLayers=ghostLayers)
+            wlb.field.addToStorage(self.blocks, name, dtype, fSize=values_per_cell, layout=layout_map[layout],
+                                   ghostLayers=ghost_layers)
         if gpu:
-            wlb.cuda.addGpuFieldToStorage(self.blocks, self.GPU_DATA_PREFIX+name, dtype, fSize=fSize,
-                                          usePitchedMem=False, ghostLayers=ghostLayers, layout=layoutMap[layout])
+            wlb.cuda.addGpuFieldToStorage(self.blocks, self.GPU_DATA_PREFIX + name, dtype, fSize=values_per_cell,
+                                          usePitchedMem=False, ghostLayers=ghost_layers, layout=layout_map[layout])
 
         if cpu and gpu:
-            self._cpuGpuPairs.append((name, self.GPU_DATA_PREFIX + name))
+            self._cpu_gpu_pairs.append((name, self.GPU_DATA_PREFIX + name))
 
-        blockBB = self.blocks.getBlockCellBB(self.blocks[0])
-        shape = tuple(s + 2 * ghostLayers for s in blockBB.size[:self.dim])
-        indexDimensions = 1 if fSize > 1 else 0
-        if indexDimensions == 1:
-            shape += (fSize, )
+        block_bb = self.blocks.getBlockCellBB(self.blocks[0])
+        shape = tuple(s + 2 * ghost_layers for s in block_bb.size[:self.dim])
+        index_dimensions = 1 if values_per_cell > 1 else 0
+        if index_dimensions == 1:
+            shape += (values_per_cell,)
 
         assert all(f.name != name for f in self.fields.values()), "Symbolic field with this name already exists"
 
-        self.fields[name] = Field.createGeneric(name, self.dim, dtype, indexDimensions, layout,
-                                                indexShape=(fSize,) if indexDimensions > 0 else None)
-        self.fields[name].latexName = latexName
-        self._fieldNameToCpuDataName[name] = name
+        self.fields[name] = Field.create_generic(name, self.dim, dtype, index_dimensions, layout,
+                                                 index_shape=(values_per_cell,) if index_dimensions > 0 else None)
+        self.fields[name].latex_name = latex_name
+        self._field_name_to_cpu_data_name[name] = name
         if gpu:
-            self._fieldNameToGpuDataName[name] = self.GPU_DATA_PREFIX + name
+            self._field_name_to_gpu_data_name[name] = self.GPU_DATA_PREFIX + name
         return self.fields[name]
 
-    def hasData(self, name):
+    def has_data(self, name):
         return name in self._fields
 
     @property
-    def arrayNames(self):
+    def array_names(self):
         return tuple(self.fields.keys())
 
     @property
-    def customDataNames(self):
-        return tuple(self._customDataNames)
+    def custom_data_names(self):
+        return tuple(self._custom_data_names)
 
-    def addArrayLike(self, name, nameOfTemplateField, latexName=None, cpu=True, gpu=None):
-        return self.addArray(name, latexName=latexName, cpu=cpu, gpu=gpu, **self._fieldInformation[nameOfTemplateField])
+    def add_array_like(self, name, name_of_template_field, latex_name=None, cpu=True, gpu=None):
+        return self.add_array(name, latex_name=latex_name, cpu=cpu, gpu=gpu,
+                              **self._fieldInformation[name_of_template_field])
 
     def swap(self, name1, name2, gpu=False):
         if gpu:
@@ -154,117 +156,118 @@ class ParallelDataHandling(DataHandling):
         for block in self.blocks:
             block[name1].swapDataPointers(block[name2])
 
-    def iterate(self, sliceObj=None, gpu=False, ghostLayers=True, innerGhostLayers=True):
-        if ghostLayers is True:
-            ghostLayers = self.defaultGhostLayers
-        elif ghostLayers is False:
-            ghostLayers = 0
-        elif isinstance(ghostLayers, str):
-            ghostLayers = self.ghostLayersOfField(ghostLayers)
-
-        if innerGhostLayers is True:
-            innerGhostLayers = self.defaultGhostLayers
-        elif innerGhostLayers is False:
-            innerGhostLayers = 0
-        elif isinstance(ghostLayers, str):
-            ghostLayers = self.ghostLayersOfField(ghostLayers)
+    def iterate(self, slice_obj=None, gpu=False, ghost_layers=True, inner_ghost_layers=True):
+        if ghost_layers is True:
+            ghost_layers = self.default_ghost_layers
+        elif ghost_layers is False:
+            ghost_layers = 0
+        elif isinstance(ghost_layers, str):
+            ghost_layers = self.ghost_layers_of_field(ghost_layers)
+
+        if inner_ghost_layers is True:
+            inner_ghost_layers = self.default_ghost_layers
+        elif inner_ghost_layers is False:
+            inner_ghost_layers = 0
+        elif isinstance(ghost_layers, str):
+            ghost_layers = self.ghost_layers_of_field(ghost_layers)
 
         prefix = self.GPU_DATA_PREFIX if gpu else ""
-        if sliceObj is not None:
-            yield from slicedBlockIteration(self.blocks, sliceObj, innerGhostLayers, ghostLayers,
-                                            self.dim, prefix)
+        if slice_obj is not None:
+            yield from sliced_block_iteration(self.blocks, slice_obj, inner_ghost_layers, ghost_layers,
+                                              self.dim, prefix)
         else:
-            yield from blockIteration(self.blocks, ghostLayers, self.dim, prefix)
+            yield from block_iteration(self.blocks, ghost_layers, self.dim, prefix)
 
-    def gatherArray(self, name, sliceObj=None, allGather=False, ghostLayers=False):
-        if ghostLayers is not False:
-            warnings.warn("gatherArray with ghost layers is only supported in serial datahandling. "
+    def gather_array(self, name, slice_obj=None, all_gather=False, ghost_layers=False):
+        if ghost_layers is not False:
+            warnings.warn("gather_array with ghost layers is only supported in serial data handling. "
                           "Array without ghost layers is returned")
 
-        if sliceObj is None:
-            sliceObj = tuple([slice(None, None, None)] * self.dim)
+        if slice_obj is None:
+            slice_obj = tuple([slice(None, None, None)] * self.dim)
         if self.dim == 2:
-            sliceObj = sliceObj[:2] + (0.5,) + sliceObj[2:]
+            slice_obj = slice_obj[:2] + (0.5,) + slice_obj[2:]
 
-        lastElement = sliceObj[3:]
+        last_element = slice_obj[3:]
 
-        array = wlb.field.gatherField(self.blocks, name, sliceObj[:3], allGather)
+        array = wlb.field.gatherField(self.blocks, name, slice_obj[:3], all_gather)
         if array is None:
             return None
 
         if self.dim == 2:
             array = array[:, :, 0]
-        if lastElement and self.fields[name].indexDimensions > 0:
-            array = array[..., lastElement[0]]
-        if self.fields[name].indexDimensions == 0:
+        if last_element and self.fields[name].index_dimensions > 0:
+            array = array[..., last_element[0]]
+        if self.fields[name].index_dimensions == 0:
             array = array[..., 0]
 
         return array
 
-    def _normalizeArrShape(self, arr, indexDimensions):
-        if indexDimensions == 0:
+    def _normalize_arr_shape(self, arr, index_dimensions):
+        if index_dimensions == 0:
             arr = arr[..., 0]
         if self.dim == 2:
             arr = arr[:, :, 0]
         return arr
 
-    def runKernel(self, kernelFunc, *args, **kwargs):
-        if kernelFunc.ast.backend == 'gpucuda':
-            nameMap = self._fieldNameToGpuDataName
-            toArray = wlb.cuda.toGpuArray
+    def run_kernel(self, kernel_function, *args, **kwargs):
+        if kernel_function.ast.backend == 'gpucuda':
+            name_map = self._field_name_to_gpu_data_name
+            to_array = wlb.cuda.toGpuArray
         else:
-            nameMap = self._fieldNameToCpuDataName
-            toArray = wlb.field.toArray
-        dataUsedInKernel = [(nameMap[p.fieldName], self.fields[p.fieldName])
-                            for p in kernelFunc.parameters if p.isFieldPtrArgument and p.fieldName not in kwargs]
+            name_map = self._field_name_to_cpu_data_name
+            to_array = wlb.field.toArray
+        data_used_in_kernel = [(name_map[p.field_name], self.fields[p.field_name])
+                               for p in kernel_function.parameters if
+                               p.isFieldPtrArgument and p.field_name not in kwargs]
         for block in self.blocks:
-            fieldArgs = {}
-            for dataName, f in dataUsedInKernel:
-                arr = toArray(block[dataName], withGhostLayers=[True, True, self.dim == 3])
-                arr = self._normalizeArrShape(arr, f.indexDimensions)
-                fieldArgs[f.name] = arr
-            fieldArgs.update(kwargs)
-            kernelFunc(*args, **fieldArgs)
-
-    def toCpu(self, name):
-        if name in self._customDataTransferFunctions:
-            transferFunc = self._customDataTransferFunctions[name][1]
+            field_args = {}
+            for dataName, f in data_used_in_kernel:
+                arr = to_array(block[dataName], withGhostLayers=[True, True, self.dim == 3])
+                arr = self._normalize_arr_shape(arr, f.index_dimensions)
+                field_args[f.name] = arr
+            field_args.update(kwargs)
+            kernel_function(*args, **field_args)
+
+    def to_cpu(self, name):
+        if name in self._custom_data_transfer_functions:
+            transfer_func = self._custom_data_transfer_functions[name][1]
             for block in self.blocks:
-                transferFunc(block[self.GPU_DATA_PREFIX + name], block[name])
+                transfer_func(block[self.GPU_DATA_PREFIX + name], block[name])
         else:
             wlb.cuda.copyFieldToCpu(self.blocks, self.GPU_DATA_PREFIX + name, name)
 
-    def toGpu(self, name):
-        if name in self._customDataTransferFunctions:
-            transferFunc = self._customDataTransferFunctions[name][0]
+    def to_gpu(self, name):
+        if name in self._custom_data_transfer_functions:
+            transfer_func = self._custom_data_transfer_functions[name][0]
             for block in self.blocks:
-                transferFunc(block[self.GPU_DATA_PREFIX + name], block[name])
+                transfer_func(block[self.GPU_DATA_PREFIX + name], block[name])
         else:
             print("trying to transfer ", self.GPU_DATA_PREFIX + name)
             wlb.cuda.copyFieldToGpu(self.blocks, self.GPU_DATA_PREFIX + name, name)
 
-    def isOnGpu(self, name):
-        return (name, self.GPU_DATA_PREFIX + name) in self._cpuGpuPairs
+    def is_on_gpu(self, name):
+        return (name, self.GPU_DATA_PREFIX + name) in self._cpu_gpu_pairs
 
-    def allToCpu(self):
-        for cpuName, gpuName in self._cpuGpuPairs:
+    def all_to_cpu(self):
+        for cpuName, gpuName in self._cpu_gpu_pairs:
             wlb.cuda.copyFieldToCpu(self.blocks, gpuName, cpuName)
-        for name in self._customDataTransferFunctions.keys():
-            self.toCpu(name)
+        for name in self._custom_data_transfer_functions.keys():
+            self.to_cpu(name)
 
-    def allToGpu(self):
-        for cpuName, gpuName in self._cpuGpuPairs:
+    def all_to_gpu(self):
+        for cpuName, gpuName in self._cpu_gpu_pairs:
             wlb.cuda.copyFieldToGpu(self.blocks, gpuName, cpuName)
-        for name in self._customDataTransferFunctions.keys():
-            self.toGpu(name)
+        for name in self._custom_data_transfer_functions.keys():
+            self.to_gpu(name)
 
-    def synchronizationFunctionCPU(self, names, stencil=None, buffered=True, **kwargs):
-        return self.synchronizationFunction(names, stencil, 'cpu',  buffered,)
+    def synchronization_function_cpu(self, names, stencil=None, buffered=True, **_):
+        return self.synchronization_function(names, stencil, 'cpu', buffered, )
 
-    def synchronizationFunctionGPU(self, names, stencil=None, buffered=True, **kwargs):
-        return self.synchronizationFunction(names, stencil, 'gpu', buffered)
+    def synchronization_function_gpu(self, names, stencil=None, buffered=True, **_):
+        return self.synchronization_function(names, stencil, 'gpu', buffered)
 
-    def synchronizationFunction(self, names, stencil=None, target='cpu', buffered=True):
+    def synchronization_function(self, names, stencil=None, target='cpu', buffered=True):
         if target is None:
             target = self.defaultTarget
 
@@ -274,52 +277,52 @@ class ParallelDataHandling(DataHandling):
         if not hasattr(names, '__len__') or type(names) is str:
             names = [names]
 
-        createScheme = wlb.createUniformBufferedScheme if buffered else wlb.createUniformDirectScheme
+        create_scheme = wlb.createUniformBufferedScheme if buffered else wlb.createUniformDirectScheme
         if target == 'cpu':
-            createPacking = wlb.field.createPackInfo if buffered else wlb.field.createMPIDatatypeInfo
-        elif target == 'gpu':
-            createPacking = wlb.cuda.createPackInfo if buffered else wlb.cuda.createMPIDatatypeInfo
+            create_packing = wlb.field.createPackInfo if buffered else wlb.field.createMPIDatatypeInfo
+        else:
+            assert target == 'gpu'
+            create_packing = wlb.cuda.createPackInfo if buffered else wlb.cuda.createMPIDatatypeInfo
             names = [self.GPU_DATA_PREFIX + name for name in names]
 
-        syncFunction = createScheme(self.blocks, stencil)
+        sync_function = create_scheme(self.blocks, stencil)
         for name in names:
-            syncFunction.addDataToCommunicate(createPacking(self.blocks, name))
+            sync_function.addDataToCommunicate(create_packing(self.blocks, name))
 
-        return syncFunction
+        return sync_function
 
-    def reduceFloatSequence(self, sequence, operation, allReduce=False):
-        if allReduce:
-            return np.array(wlb.mpi.allreduceReal(sequence, self._reduceMap[operation.lower()]))
+    def reduce_float_sequence(self, sequence, operation, all_reduce=False):
+        if all_reduce:
+            return np.array(wlb.mpi.allreduceReal(sequence, self._reduce_map[operation.lower()]))
         else:
-            return np.array(wlb.mpi.reduceReal(sequence, self._reduceMap[operation.lower()]))
+            return np.array(wlb.mpi.reduceReal(sequence, self._reduce_map[operation.lower()]))
 
-    def reduceIntSequence(self, sequence, operation, allReduce=False):
-        if allReduce:
-            return np.array(wlb.mpi.allreduceInt(sequence, self._reduceMap[operation.lower()]))
+    def reduce_int_sequence(self, sequence, operation, all_reduce=False):
+        if all_reduce:
+            return np.array(wlb.mpi.allreduceInt(sequence, self._reduce_map[operation.lower()]))
         else:
-            return np.array(wlb.mpi.reduceInt(sequence, self._reduceMap[operation.lower()]))
-
-    def vtkWriter(self, fileName, dataNames, ghostLayers=False):
-        if ghostLayers is False:
-            ghostLayers = 0
-        if ghostLayers is True:
-            ghostLayers = min(self.ghostLayersOfField(n) for n in dataNames)
-        fileName = "%s_%02d" % (fileName, ParallelDataHandling.VTK_COUNTER)
+            return np.array(wlb.mpi.reduceInt(sequence, self._reduce_map[operation.lower()]))
+
+    def create_vtk_writer(self, file_name, data_names, ghost_layers=False):
+        if ghost_layers is False:
+            ghost_layers = 0
+        if ghost_layers is True:
+            ghost_layers = min(self.ghost_layers_of_field(n) for n in data_names)
+        file_name = "%s_%02d" % (file_name, ParallelDataHandling.VTK_COUNTER)
         ParallelDataHandling.VTK_COUNTER += 1
-        output = wlb.vtk.makeOutput(self.blocks, fileName, ghostLayers=ghostLayers)
-        for n in dataNames:
+        output = wlb.vtk.makeOutput(self.blocks, file_name, ghostLayers=ghost_layers)
+        for n in data_names:
             output.addCellDataWriter(wlb.field.createVTKWriter(self.blocks, n))
         return output
 
-    def vtkWriterFlags(self, fileName, dataName, masksToName, ghostLayers=False):
-        if ghostLayers is False:
-            ghostLayers = 0
-        if ghostLayers is True:
-            ghostLayers = self.ghostLayersOfField(dataName)
+    def create_vtk_writer_for_flag_array(self, file_name, data_name, masks_to_name, ghost_layers=False):
+        if ghost_layers is False:
+            ghost_layers = 0
+        if ghost_layers is True:
+            ghost_layers = self.ghost_layers_of_field(data_name)
 
-        output = wlb.vtk.makeOutput(self.blocks, fileName, ghostLayers=ghostLayers)
-        for mask, name in masksToName.items():
-            w = wlb.field.createBinarizationVTKWriter(self.blocks, dataName, mask, name)
+        output = wlb.vtk.makeOutput(self.blocks, file_name, ghostLayers=ghost_layers)
+        for mask, name in masks_to_name.items():
+            w = wlb.field.createBinarizationVTKWriter(self.blocks, data_name, mask, name)
             output.addCellDataWriter(w)
         return output
-
diff --git a/datahandling/serial_datahandling.py b/datahandling/serial_datahandling.py
index 4b91b10ffad37bb2333125e3dd8def4bb0cd17d0..c53bf26ef44009ecc547675c2fc29c218b09f4f1 100644
--- a/datahandling/serial_datahandling.py
+++ b/datahandling/serial_datahandling.py
@@ -1,12 +1,11 @@
 import itertools
-
+from typing import Sequence, Union
 import numpy as np
-
 from pystencils import Field
 from pystencils.datahandling.datahandling_interface import DataHandling
-from pystencils.field import layoutStringToTuple, spatialLayoutStringToTuple, createNumpyArrayWithLayout
+from pystencils.field import layout_string_to_tuple, spatial_layout_string_to_tuple, create_numpy_array_with_layout
 from pystencils.parallel.blockiteration import SerialBlock
-from pystencils.slicing import normalizeSlice, removeGhostLayers
+from pystencils.slicing import normalize_slice, remove_ghost_layers
 from pystencils.utils import DotDict
 
 try:
@@ -18,26 +17,28 @@ except ImportError:
 
 class SerialDataHandling(DataHandling):
 
-    def __init__(self, domainSize, defaultGhostLayers=1, defaultLayout='SoA', periodicity=False,  defaultTarget='cpu'):
+    def __init__(self, domain_size: Sequence[int], default_ghost_layers: int = 1, default_layout: str='SoA',
+                 periodicity: Union[bool, Sequence[bool]] = False, default_target: str = 'cpu') -> None:
         """
-        Creates a data handling for single node simulations
-
-        :param domainSize: size of the spatial domain as tuple
-        :param defaultGhostLayers: nr of ghost layers used if not specified in add() method
-        :param defaultLayout: layout used if no layout is given to add() method
-        :param defaultTarget: either 'cpu' or 'gpu' . If set to 'gpu' for each array also a GPU version is allocated
-                              if not overwritten in addArray, and synchronization functions are for the GPU by default
+        Creates a data handling for single node simulations.
+
+        Args:
+            domain_size: size of the spatial domain as tuple
+            default_ghost_layers: default number of ghost layers used, if not overridden in add_array() method
+            default_layout: default layout used, if  not overridden in add_array() method
+            default_target: either 'cpu' or 'gpu' . If set to 'gpu' for each array also a GPU version is allocated
+                            if not overwritten in add_array, and synchronization functions are for the GPU by default
         """
         super(SerialDataHandling, self).__init__()
-        self._domainSize = tuple(domainSize)
-        self.defaultGhostLayers = defaultGhostLayers
-        self.defaultLayout = defaultLayout
+        self._domainSize = tuple(domain_size)
+        self.defaultGhostLayers = default_ghost_layers
+        self.defaultLayout = default_layout
         self._fields = DotDict()
-        self.cpuArrays = DotDict()
-        self.gpuArrays = DotDict()
-        self.customDataCpu = DotDict()
-        self.customDataGpu = DotDict()
-        self._customDataTransferFunctions = {}
+        self.cpu_arrays = DotDict()
+        self.gpu_arrays = DotDict()
+        self.custom_data_cpu = DotDict()
+        self.custom_data_gpu = DotDict()
+        self._custom_data_transfer_functions = {}
 
         if periodicity is None or periodicity is False:
             periodicity = [False] * self.dim
@@ -45,8 +46,8 @@ class SerialDataHandling(DataHandling):
             periodicity = [True] * self.dim
 
         self._periodicity = periodicity
-        self._fieldInformation = {}
-        self.defaultTarget = defaultTarget
+        self._field_information = {}
+        self.defaultTarget = default_target
 
     @property
     def dim(self):
@@ -64,128 +65,131 @@ class SerialDataHandling(DataHandling):
     def fields(self):
         return self._fields
 
-    def ghostLayersOfField(self, name):
-        return self._fieldInformation[name]['ghostLayers']
+    def ghost_layers_of_field(self, name):
+        return self._field_information[name]['ghost_layers']
 
-    def fSize(self, name):
-        return self._fieldInformation[name]['fSize']
+    def values_per_cell(self, name):
+        return self._field_information[name]['values_per_cell']
 
-    def addArray(self, name, fSize=1, dtype=np.float64, latexName=None, ghostLayers=None, layout=None,
-                 cpu=True, gpu=None, alignment=False):
-        if ghostLayers is None:
-            ghostLayers = self.defaultGhostLayers
+    def add_array(self, name, values_per_cell=1, dtype=np.float64, latex_name=None, ghost_layers=None, layout=None,
+                  cpu=True, gpu=None, alignment=False):
+        if ghost_layers is None:
+            ghost_layers = self.defaultGhostLayers
         if layout is None:
             layout = self.defaultLayout
         if gpu is None:
             gpu = self.defaultTarget == 'gpu'
 
         kwargs = {
-            'shape': tuple(s + 2 * ghostLayers for s in self._domainSize),
+            'shape': tuple(s + 2 * ghost_layers for s in self._domainSize),
             'dtype': dtype,
         }
-        self._fieldInformation[name] = {
-            'ghostLayers': ghostLayers,
-            'fSize': fSize,
+        self._field_information[name] = {
+            'ghost_layers': ghost_layers,
+            'values_per_cell': values_per_cell,
             'layout': layout,
             'dtype': dtype,
         }
 
-        if fSize > 1:
-            kwargs['shape'] = kwargs['shape'] + (fSize,)
-            indexDimensions = 1
-            layoutTuple = layoutStringToTuple(layout, self.dim+1)
+        if values_per_cell > 1:
+            kwargs['shape'] = kwargs['shape'] + (values_per_cell,)
+            index_dimensions = 1
+            layout_tuple = layout_string_to_tuple(layout, self.dim + 1)
         else:
-            indexDimensions = 0
-            layoutTuple = spatialLayoutStringToTuple(layout, self.dim)
+            index_dimensions = 0
+            layout_tuple = spatial_layout_string_to_tuple(layout, self.dim)
 
-        # cpuArr is always created - since there is no createPycudaArrayWithLayout()
-        byteOffset = ghostLayers * np.dtype(dtype).itemsize
-        cpuArr = createNumpyArrayWithLayout(layout=layoutTuple, alignment=alignment, byteOffset=byteOffset, **kwargs)
-        cpuArr.fill(np.inf)
+        # cpu_arr is always created - since there is no createPycudaArrayWithLayout()
+        byte_offset = ghost_layers * np.dtype(dtype).itemsize
+        cpu_arr = create_numpy_array_with_layout(layout=layout_tuple, alignment=alignment,
+                                                 byte_offset=byte_offset, **kwargs)
+        cpu_arr.fill(np.inf)
 
         if alignment and gpu:
             raise NotImplementedError("Alignment for GPU fields not supported")
 
         if cpu:
-            if name in self.cpuArrays:
+            if name in self.cpu_arrays:
                 raise ValueError("CPU Field with this name already exists")
-            self.cpuArrays[name] = cpuArr
+            self.cpu_arrays[name] = cpu_arr
         if gpu:
-            if name in self.gpuArrays:
+            if name in self.gpu_arrays:
                 raise ValueError("GPU Field with this name already exists")
-            self.gpuArrays[name] = gpuarray.to_gpu(cpuArr)
+            self.gpu_arrays[name] = gpuarray.to_gpu(cpu_arr)
 
         assert all(f.name != name for f in self.fields.values()), "Symbolic field with this name already exists"
-        self.fields[name] = Field.createFromNumpyArray(name, cpuArr, indexDimensions=indexDimensions)
-        self.fields[name].latexName = latexName
+        self.fields[name] = Field.create_from_numpy_array(name, cpu_arr, index_dimensions=index_dimensions)
+        self.fields[name].latex_name = latex_name
         return self.fields[name]
 
-    def addCustomData(self, name, cpuCreationFunction,
-                      gpuCreationFunction=None, cpuToGpuTransferFunc=None, gpuToCpuTransferFunc=None):
+    def add_custom_data(self, name, cpu_creation_function,
+                        gpu_creation_function=None, cpu_to_gpu_transfer_func=None, gpu_to_cpu_transfer_func=None):
 
-        if cpuCreationFunction and gpuCreationFunction:
-            if cpuToGpuTransferFunc is None or gpuToCpuTransferFunc is None:
+        if cpu_creation_function and gpu_creation_function:
+            if cpu_to_gpu_transfer_func is None or gpu_to_cpu_transfer_func is None:
                 raise ValueError("For GPU data, both transfer functions have to be specified")
-            self._customDataTransferFunctions[name] = (cpuToGpuTransferFunc, gpuToCpuTransferFunc)
+            self._custom_data_transfer_functions[name] = (cpu_to_gpu_transfer_func, gpu_to_cpu_transfer_func)
 
-        assert name not in self.customDataCpu
-        if cpuCreationFunction:
-            assert name not in self.cpuArrays
-            self.customDataCpu[name] = cpuCreationFunction()
+        assert name not in self.custom_data_cpu
+        if cpu_creation_function:
+            assert name not in self.cpu_arrays
+            self.custom_data_cpu[name] = cpu_creation_function()
 
-        if gpuCreationFunction:
-            assert name not in self.gpuArrays
-            self.customDataGpu[name] = gpuCreationFunction()
+        if gpu_creation_function:
+            assert name not in self.gpu_arrays
+            self.custom_data_gpu[name] = gpu_creation_function()
 
-    def hasData(self, name):
+    def has_data(self, name):
         return name in self.fields
 
-    def addArrayLike(self, name, nameOfTemplateField, latexName=None, cpu=True, gpu=None):
-        return self.addArray(name, latexName=latexName, cpu=cpu, gpu=gpu, **self._fieldInformation[nameOfTemplateField])
-
-    def iterate(self, sliceObj=None, gpu=False, ghostLayers=True, innerGhostLayers=True):
-        if ghostLayers is True:
-            ghostLayers = self.defaultGhostLayers
-        elif ghostLayers is False:
-            ghostLayers = 0
-        elif isinstance(ghostLayers, str):
-            ghostLayers = self.ghostLayersOfField(ghostLayers)
-
-        if sliceObj is None:
-            sliceObj = (slice(None, None, None),) * self.dim
-        sliceObj = normalizeSlice(sliceObj, tuple(s + 2 * ghostLayers for s in self._domainSize))
-        sliceObj = tuple(s if type(s) is slice else slice(s, s+1, None) for s in sliceObj)
-
-        arrays = self.gpuArrays if gpu else self.cpuArrays
-        customDataDict = self.customDataGpu if gpu else self.customDataCpu
-        iterDict = customDataDict.copy()
+    def add_array_like(self, name, name_of_template_field, latex_name=None, cpu=True, gpu=None):
+        return self.add_array(name, latex_name=latex_name, cpu=cpu, gpu=gpu,
+                              **self._field_information[name_of_template_field])
+
+    def iterate(self, slice_obj=None, gpu=False, ghost_layers=True, inner_ghost_layers=True):
+        if ghost_layers is True:
+            ghost_layers = self.defaultGhostLayers
+        elif ghost_layers is False:
+            ghost_layers = 0
+        elif isinstance(ghost_layers, str):
+            ghost_layers = self.ghost_layers_of_field(ghost_layers)
+
+        if slice_obj is None:
+            slice_obj = (slice(None, None, None),) * self.dim
+        slice_obj = normalize_slice(slice_obj, tuple(s + 2 * ghost_layers for s in self._domainSize))
+        slice_obj = tuple(s if type(s) is slice else slice(s, s + 1, None) for s in slice_obj)
+
+        arrays = self.gpu_arrays if gpu else self.cpu_arrays
+        custom_data_dict = self.custom_data_gpu if gpu else self.custom_data_cpu
+        iter_dict = custom_data_dict.copy()
         for name, arr in arrays.items():
-            fieldGls = self._fieldInformation[name]['ghostLayers']
-            if fieldGls < ghostLayers:
+            field_gls = self._field_information[name]['ghost_layers']
+            if field_gls < ghost_layers:
                 continue
-            arr = removeGhostLayers(arr, indexDimensions=len(arr.shape) - self.dim, ghostLayers=fieldGls-ghostLayers)
-            iterDict[name] = arr
-
-        offset = tuple(s.start - ghostLayers for s in sliceObj)
-        yield SerialBlock(iterDict, offset, sliceObj)
-
-    def gatherArray(self, name, sliceObj=None, ghostLayers=False, **kwargs):
-        glToRemove = self._fieldInformation[name]['ghostLayers']
-        if isinstance(ghostLayers, int):
-            glToRemove -= ghostLayers
-        if ghostLayers is True:
-            glToRemove = 0
-        arr = self.cpuArrays[name]
-        indDimensions = self.fields[name].indexDimensions
-        spatialDimensions = self.fields[name].spatialDimensions
-
-        arr = removeGhostLayers(arr, indexDimensions=indDimensions, ghostLayers=glToRemove)
-
-        if sliceObj is not None:
-            normalizedSlice = normalizeSlice(sliceObj[:spatialDimensions], arr.shape[:spatialDimensions])
-            normalizedSlice = tuple(s if type(s) is slice else slice(s, s + 1, None) for s in normalizedSlice)
-            normalizedSlice += sliceObj[spatialDimensions:]
-            arr = arr[normalizedSlice]
+            arr = remove_ghost_layers(arr, index_dimensions=len(arr.shape) - self.dim,
+                                      ghost_layers=field_gls - ghost_layers)
+            iter_dict[name] = arr
+
+        offset = tuple(s.start - ghost_layers for s in slice_obj)
+        yield SerialBlock(iter_dict, offset, slice_obj)
+
+    def gather_array(self, name, slice_obj=None, ghost_layers=False, **kwargs):
+        gl_to_remove = self._field_information[name]['ghost_layers']
+        if isinstance(ghost_layers, int):
+            gl_to_remove -= ghost_layers
+        if ghost_layers is True:
+            gl_to_remove = 0
+        arr = self.cpu_arrays[name]
+        ind_dimensions = self.fields[name].index_dimensions
+        spatial_dimensions = self.fields[name].spatial_dimensions
+
+        arr = remove_ghost_layers(arr, index_dimensions=ind_dimensions, ghost_layers=gl_to_remove)
+
+        if slice_obj is not None:
+            normalized_slice = normalize_slice(slice_obj[:spatial_dimensions], arr.shape[:spatial_dimensions])
+            normalized_slice = tuple(s if type(s) is slice else slice(s, s + 1, None) for s in normalized_slice)
+            normalized_slice += slice_obj[spatial_dimensions:]
+            arr = arr[normalized_slice]
         else:
             arr = arr.view()
         arr.flags.writeable = False
@@ -193,57 +197,58 @@ class SerialDataHandling(DataHandling):
 
     def swap(self, name1, name2, gpu=False):
         if not gpu:
-            self.cpuArrays[name1], self.cpuArrays[name2] = self.cpuArrays[name2], self.cpuArrays[name1]
+            self.cpu_arrays[name1], self.cpu_arrays[name2] = self.cpu_arrays[name2], self.cpu_arrays[name1]
         else:
-            self.gpuArrays[name1], self.gpuArrays[name2] = self.gpuArrays[name2], self.gpuArrays[name1]
-
-    def allToCpu(self):
-        for name in (self.cpuArrays.keys() & self.gpuArrays.keys()) | self._customDataTransferFunctions.keys():
-            self.toCpu(name)
-
-    def allToGpu(self):
-        for name in (self.cpuArrays.keys() & self.gpuArrays.keys()) | self._customDataTransferFunctions.keys():
-            self.toGpu(name)
-
-    def runKernel(self, kernelFunc, *args, **kwargs):
-        dataUsedInKernel = [p.fieldName
-                            for p in kernelFunc.parameters if p.isFieldPtrArgument and p.fieldName not in kwargs]
-        arrays = self.gpuArrays if kernelFunc.ast.backend == 'gpucuda' else self.cpuArrays
-        arrayParams = {name: arrays[name] for name in dataUsedInKernel}
-        arrayParams.update(kwargs)
-        kernelFunc(*args, **arrayParams)
-
-    def toCpu(self, name):
-        if name in self._customDataTransferFunctions:
-            transferFunc = self._customDataTransferFunctions[name][1]
-            transferFunc(self.customDataGpu[name], self.customDataCpu[name])
+            self.gpu_arrays[name1], self.gpu_arrays[name2] = self.gpu_arrays[name2], self.gpu_arrays[name1]
+
+    def all_to_cpu(self):
+        for name in (self.cpu_arrays.keys() & self.gpu_arrays.keys()) | self._custom_data_transfer_functions.keys():
+            self.to_cpu(name)
+
+    def all_to_gpu(self):
+        for name in (self.cpu_arrays.keys() & self.gpu_arrays.keys()) | self._custom_data_transfer_functions.keys():
+            self.to_gpu(name)
+
+    def run_kernel(self, kernel_function, *args, **kwargs):
+        data_used_in_kernel = [p.field_name
+                               for p in kernel_function.parameters if
+                               p.isFieldPtrArgument and p.field_name not in kwargs]
+        arrays = self.gpu_arrays if kernel_function.ast.backend == 'gpucuda' else self.cpu_arrays
+        array_params = {name: arrays[name] for name in data_used_in_kernel}
+        array_params.update(kwargs)
+        kernel_function(*args, **array_params)
+
+    def to_cpu(self, name):
+        if name in self._custom_data_transfer_functions:
+            transfer_func = self._custom_data_transfer_functions[name][1]
+            transfer_func(self.custom_data_gpu[name], self.custom_data_cpu[name])
         else:
-            self.gpuArrays[name].get(self.cpuArrays[name])
+            self.gpu_arrays[name].get(self.cpu_arrays[name])
 
-    def toGpu(self, name):
-        if name in self._customDataTransferFunctions:
-            transferFunc = self._customDataTransferFunctions[name][0]
-            transferFunc(self.customDataGpu[name], self.customDataCpu[name])
+    def to_gpu(self, name):
+        if name in self._custom_data_transfer_functions:
+            transfer_func = self._custom_data_transfer_functions[name][0]
+            transfer_func(self.custom_data_gpu[name], self.custom_data_cpu[name])
         else:
-            self.gpuArrays[name].set(self.cpuArrays[name])
+            self.gpu_arrays[name].set(self.cpu_arrays[name])
 
-    def isOnGpu(self, name):
-        return name in self.gpuArrays
+    def is_on_gpu(self, name):
+        return name in self.gpu_arrays
 
-    def synchronizationFunctionCPU(self, names, stencilName=None, **kwargs):
-        return self.synchronizationFunction(names, stencilName, 'cpu')
+    def synchronization_function_cpu(self, names, stencil_name=None, **_):
+        return self.synchronization_function(names, stencil_name, 'cpu')
 
-    def synchronizationFunctionGPU(self, names, stencilName=None, **kwargs):
-        return self.synchronizationFunction(names, stencilName, 'gpu')
+    def synchronization_function_gpu(self, names, stencil_name=None, **_):
+        return self.synchronization_function(names, stencil_name, 'gpu')
 
-    def synchronizationFunction(self, names, stencil=None, target=None):
+    def synchronization_function(self, names, stencil=None, target=None, **_):
         if target is None:
             target = self.defaultTarget
         assert target in ('cpu', 'gpu')
         if not hasattr(names, '__len__') or type(names) is str:
             names = [names]
 
-        filteredStencil = []
+        filtered_stencil = []
         neighbors = [-1, 0, 1]
 
         if (stencil is None and self.dim == 2) or (stencil is not None and stencil.startswith('D2')):
@@ -254,105 +259,102 @@ class SerialDataHandling(DataHandling):
             raise ValueError("Invalid stencil")
 
         for direction in directions:
-            useDirection = True
+            use_direction = True
             if direction == (0, 0) or direction == (0, 0, 0):
-                useDirection = False
+                use_direction = False
             for component, periodicity in zip(direction, self._periodicity):
                 if not periodicity and component != 0:
-                    useDirection = False
-            if useDirection:
-                filteredStencil.append(direction)
+                    use_direction = False
+            if use_direction:
+                filtered_stencil.append(direction)
 
-        resultFunctors = []
+        result = []
         for name in names:
-            gls = self._fieldInformation[name]['ghostLayers']
-            if len(filteredStencil) > 0:
+            gls = self._field_information[name]['ghost_layers']
+            if len(filtered_stencil) > 0:
                 if target == 'cpu':
-                    from pystencils.slicing import getPeriodicBoundaryFunctor
-                    resultFunctors.append(getPeriodicBoundaryFunctor(filteredStencil, ghostLayers=gls))
+                    from pystencils.slicing import get_periodic_boundary_functor
+                    result.append(get_periodic_boundary_functor(filtered_stencil, ghost_layers=gls))
                 else:
-                    from pystencils.gpucuda.periodicity import getPeriodicBoundaryFunctor
-                    resultFunctors.append(getPeriodicBoundaryFunctor(filteredStencil, self._domainSize,
-                                                                     indexDimensions=self.fields[name].indexDimensions,
-                                                                     indexDimShape=self._fieldInformation[name]['fSize'],
-                                                                     dtype=self.fields[name].dtype.numpy_dtype,
-                                                                     ghostLayers=gls))
+                    from pystencils.gpucuda.periodicity import get_periodic_boundary_functor
+                    result.append(get_periodic_boundary_functor(filtered_stencil, self._domainSize,
+                                                                index_dimensions=self.fields[name].index_dimensions,
+                                                                index_dim_shape=self._field_information[name][
+                                                                 'values_per_cell'],
+                                                                dtype=self.fields[name].dtype.numpy_dtype,
+                                                                ghost_layers=gls))
 
         if target == 'cpu':
-            def resultFunctor():
-                for name, func in zip(names, resultFunctors):
-                    func(pdfs=self.cpuArrays[name])
+            def result_functor():
+                for arr_name, func in zip(names, result):
+                    func(pdfs=self.cpu_arrays[arr_name])
         else:
-            def resultFunctor():
-                for name, func in zip(names, resultFunctors):
-                    func(pdfs=self.gpuArrays[name])
+            def result_functor():
+                for arr_name, func in zip(names, result):
+                    func(pdfs=self.gpu_arrays[arr_name])
 
-        return resultFunctor
+        return result_functor
 
     @property
-    def arrayNames(self):
+    def array_names(self):
         return tuple(self.fields.keys())
 
     @property
-    def customDataNames(self):
-        return tuple(self.customDataCpu.keys())
+    def custom_data_names(self):
+        return tuple(self.custom_data_cpu.keys())
 
-    @staticmethod
-    def reduceFloatSequence(sequence, operation, allReduce=False):
+    def reduce_float_sequence(self, sequence, operation, all_reduce=False) -> np.array:
         return np.array(sequence)
 
-    @staticmethod
-    def reduceIntSequence(sequence):
+    def reduce_int_sequence(self, sequence, operation, all_reduce=False) -> np.array:
         return np.array(sequence)
 
-    def vtkWriter(self, fileName, dataNames, ghostLayers=False):
-        from pystencils.vtk import imageToVTK
+    def create_vtk_writer(self, file_name, data_names, ghost_layers=False):
+        from pystencils.vtk import image_to_vtk
 
         def writer(step):
-            fullFileName = "%s_%08d" % (fileName, step,)
-            cellData = {}
-            for name in dataNames:
-                field = self._getFieldWithGivenNumberOfGhostLayers(name, ghostLayers)
+            full_file_name = "%s_%08d" % (file_name, step,)
+            cell_data = {}
+            for name in data_names:
+                field = self._get_field_with_given_number_of_ghost_layers(name, ghost_layers)
                 if self.dim == 2:
-                    cellData[name] = field[:, :, np.newaxis]
+                    cell_data[name] = field[:, :, np.newaxis]
                 if len(field.shape) == 3:
-                    cellData[name] = np.ascontiguousarray(field)
+                    cell_data[name] = np.ascontiguousarray(field)
                 elif len(field.shape) == 4:
-                    fSize = field.shape[-1]
-                    if fSize == self.dim:
-                        field = [np.ascontiguousarray(field[..., i]) for i in range(fSize)]
+                    values_per_cell = field.shape[-1]
+                    if values_per_cell == self.dim:
+                        field = [np.ascontiguousarray(field[..., i]) for i in range(values_per_cell)]
                         if len(field) == 2:
                             field.append(np.zeros_like(field[0]))
-                        cellData[name] = tuple(field)
+                        cell_data[name] = tuple(field)
                     else:
-                        for i in range(fSize):
-                            cellData["%s[%d]" % (name, i)] = np.ascontiguousarray(field[..., i])
+                        for i in range(values_per_cell):
+                            cell_data["%s[%d]" % (name, i)] = np.ascontiguousarray(field[..., i])
                 else:
                     assert False
-            imageToVTK(fullFileName, cellData=cellData)
+            image_to_vtk(full_file_name, cell_data=cell_data)
         return writer
 
-    def vtkWriterFlags(self, fileName, dataName, masksToName, ghostLayers=False):
-        from pystencils.vtk import imageToVTK
+    def create_vtk_writer_for_flag_array(self, file_name, data_name, masks_to_name, ghost_layers=False):
+        from pystencils.vtk import image_to_vtk
 
         def writer(step):
-            fullFileName = "%s_%08d" % (fileName, step,)
-            field = self._getFieldWithGivenNumberOfGhostLayers(dataName, ghostLayers)
+            full_file_name = "%s_%08d" % (file_name, step,)
+            field = self._get_field_with_given_number_of_ghost_layers(data_name, ghost_layers)
             if self.dim == 2:
                 field = field[:, :, np.newaxis]
-            cellData = {name: np.ascontiguousarray(np.bitwise_and(field, mask) > 0, dtype=np.uint8)
-                        for mask, name in masksToName.items()}
-            imageToVTK(fullFileName, cellData=cellData)
+            cell_data = {name: np.ascontiguousarray(np.bitwise_and(field, mask) > 0, dtype=np.uint8)
+                         for mask, name in masks_to_name.items()}
+            image_to_vtk(full_file_name, cell_data=cell_data)
 
         return writer
 
-    def _getFieldWithGivenNumberOfGhostLayers(self, name, ghostLayers):
-        actualGhostLayers = self.ghostLayersOfField(name)
-        if ghostLayers is True:
-            ghostLayers = actualGhostLayers
-
-        glToRemove = actualGhostLayers - ghostLayers
-        indDims = 1 if self._fieldInformation[name]['fSize'] > 1 else 0
-        return removeGhostLayers(self.cpuArrays[name], indDims, glToRemove)
-
+    def _get_field_with_given_number_of_ghost_layers(self, name, ghost_layers):
+        actual_ghost_layers = self.ghost_layers_of_field(name)
+        if ghost_layers is True:
+            ghost_layers = actual_ghost_layers
 
+        gl_to_remove = actual_ghost_layers - ghost_layers
+        ind_dims = 1 if self._field_information[name]['values_per_cell'] > 1 else 0
+        return remove_ghost_layers(self.cpu_arrays[name], ind_dims, gl_to_remove)
diff --git a/derivative.py b/derivative.py
index c2dadd8e4e540d62f66b5c33393a366bbdc3f246..0716c79734d29c62db88aac5a77e647b6aff584a 100644
--- a/derivative.py
+++ b/derivative.py
@@ -3,7 +3,7 @@ from collections import namedtuple, defaultdict
 from pystencils.sympyextensions import normalize_product, prod
 
 
-def defaultDiffSortKey(d):
+def default_diff_sort_key(d):
     return str(d.superscript), str(d.target)
 
 
@@ -18,35 +18,35 @@ class Diff(sp.Expr):
     is_Rational = False
     _diff_wrt = True
 
-    def __new__(cls, argument, target=-1, superscript=-1, **kwargs):
+    def __new__(cls, argument, target=-1, superscript=-1):
         if argument == 0:
             return sp.Rational(0, 1)
-        return sp.Expr.__new__(cls, argument.expand(), sp.sympify(target), sp.sympify(superscript), **kwargs)
+        return sp.Expr.__new__(cls, argument.expand(), sp.sympify(target), sp.sympify(superscript))
 
     @property
     def is_commutative(self):
-        anyNonCommutative = any(not s.is_commutative for s in self.atoms(sp.Symbol))
-        if anyNonCommutative:
+        any_non_commutative = any(not s.is_commutative for s in self.atoms(sp.Symbol))
+        if any_non_commutative:
             return False
         else:
             return True
 
-    def getArgRecursive(self):
+    def get_arg_recursive(self):
         """Returns the argument the derivative acts on, for nested derivatives the inner argument is returned"""
         if not isinstance(self.arg, Diff):
             return self.arg
         else:
-            return self.arg.getArgRecursive()
+            return self.arg.get_arg_recursive()
 
-    def changeArgRecursive(self, newArg):
-        """Returns a Diff node with the given 'newArg' instead of the current argument. For nested derivatives
-        a new nested derivative is returned where the inner Diff has the 'newArg'"""
+    def change_arg_recursive(self, new_arg):
+        """Returns a Diff node with the given 'new_arg' instead of the current argument. For nested derivatives
+        a new nested derivative is returned where the inner Diff has the 'new_arg'"""
         if not isinstance(self.arg, Diff):
-            return Diff(newArg, self.target, self.superscript)
+            return Diff(new_arg, self.target, self.superscript)
         else:
-            return Diff(self.arg.changeArgRecursive(newArg), self.target, self.superscript)
+            return Diff(self.arg.change_arg_recursive(new_arg), self.target, self.superscript)
 
-    def splitLinear(self, functions):
+    def split_linear(self, functions):
         """
         Applies linearity property of Diff: i.e.  'Diff(c*a+b)' is transformed to 'c * Diff(a) + Diff(b)'
         The parameter functions is a list of all symbols that are considered functions, not constants.
@@ -83,10 +83,10 @@ class Diff(sp.Expr):
 
     @property
     def superscript(self):
-        """Superscript, used as the Chapman Enskog order index"""
+        """Superscript, for example used as the Chapman-Enskog order index"""
         return self.args[2]
 
-    def _latex(self, printer, *args):
+    def _latex(self, printer, *_):
         result = "{\partial"
         if self.superscript >= 0:
             result += "^{(%s)}" % (self.superscript,)
@@ -122,8 +122,8 @@ class DiffOperator(sp.Expr):
     is_number = False
     is_Rational = False
 
-    def __new__(cls, target=-1, superscript=-1, **kwargs):
-        return sp.Expr.__new__(cls, sp.sympify(target), sp.sympify(superscript), **kwargs)
+    def __new__(cls, target=-1, superscript=-1):
+        return sp.Expr.__new__(cls, sp.sympify(target), sp.sympify(superscript))
 
     @property
     def target(self):
@@ -133,7 +133,7 @@ class DiffOperator(sp.Expr):
     def superscript(self):
         return self.args[1]
 
-    def _latex(self, printer, *args):
+    def _latex(self, *_):
         result = "{\partial"
         if self.superscript >= 0:
             result += "^{(%s)}" % (self.superscript,)
@@ -143,19 +143,19 @@ class DiffOperator(sp.Expr):
         return result
 
     @staticmethod
-    def apply(expr, argument, applyToConstants=True):
+    def apply(expr, argument, apply_to_constants=True):
         """
         Returns a new expression where each 'DiffOperator' is replaced by a 'Diff' node.
         Multiplications of 'DiffOperator's are interpreted as nested application of differentiation:
         i.e. DiffOperator('x')*DiffOperator('x') is a second derivative replaced by Diff(Diff(arg, x), t)
         """
-        def handleMul(mul):
+        def handle_mul(mul):
             args = normalize_product(mul)
             diffs = [a for a in args if isinstance(a, DiffOperator)]
             if len(diffs) == 0:
-                return mul * argument if applyToConstants else mul
+                return mul * argument if apply_to_constants else mul
             rest = [a for a in args if not isinstance(a, DiffOperator)]
-            diffs.sort(key=defaultDiffSortKey)
+            diffs.sort(key=default_diff_sort_key)
             result = argument
             for d in reversed(diffs):
                 result = Diff(result, target=d.target, superscript=d.superscript)
@@ -163,16 +163,16 @@ class DiffOperator(sp.Expr):
 
         expr = expr.expand()
         if expr.func == sp.Mul or expr.func == sp.Pow:
-            return handleMul(expr)
+            return handle_mul(expr)
         elif expr.func == sp.Add:
-            return expr.func(*[handleMul(a) for a in expr.args])
+            return expr.func(*[handle_mul(a) for a in expr.args])
         else:
-            return expr * argument if applyToConstants else expr
+            return expr * argument if apply_to_constants else expr
 
 # ----------------------------------------------------------------------------------------------------------------------
 
 
-def derivativeTerms(expr):
+def derivative_terms(expr):
     """
     Returns set of all derivatives in an expression
     this is different from `expr.atoms(Diff)` when nested derivatives are in the expression,
@@ -190,12 +190,12 @@ def derivativeTerms(expr):
     return result
 
 
-def collectDerivatives(expr):
-    """Rewrites expression into a sum of distinct derivatives with prefactors"""
-    return expr.collect(derivativeTerms(expr))
+def collect_derivatives(expr):
+    """Rewrites expression into a sum of distinct derivatives with pre-factors"""
+    return expr.collect(derivative_terms(expr))
 
 
-def createNestedDiff(*args, arg=None):
+def create_nested_diff(*args, arg=None):
     """Shortcut to create nested derivatives"""
     assert arg is not None
     args = sorted(args, reverse=True)
@@ -205,9 +205,9 @@ def createNestedDiff(*args, arg=None):
     return res
 
 
-def expandUsingLinearity(expr, functions=None, constants=None):
+def expand_using_linearity(expr, functions=None, constants=None):
     """
-    Expands all derivative nodes by applying Diff.splitLinear
+    Expands all derivative nodes by applying Diff.split_linear
     :param expr: expression containing derivatives
     :param functions: sequence of symbols that are considered functions and can not be pulled before the derivative.
                       if None, all symbols are viewed as functions
@@ -219,25 +219,25 @@ def expandUsingLinearity(expr, functions=None, constants=None):
             functions.difference_update(constants)
 
     if isinstance(expr, Diff):
-        arg = expandUsingLinearity(expr.arg, functions)
+        arg = expand_using_linearity(expr.arg, functions)
         if hasattr(arg, 'func') and arg.func == sp.Add:
             result = 0
             for a in arg.args:
-                result += Diff(a, target=expr.target, superscript=expr.superscript).splitLinear(functions)
+                result += Diff(a, target=expr.target, superscript=expr.superscript).split_linear(functions)
             return result
         else:
             diff = Diff(arg, target=expr.target, superscript=expr.superscript)
             if diff == 0:
                 return 0
             else:
-                return diff.splitLinear(functions)
+                return diff.split_linear(functions)
     else:
-        newArgs = [expandUsingLinearity(e, functions) for e in expr.args]
-        result = sp.expand(expr.func(*newArgs) if newArgs else expr)
+        new_args = [expand_using_linearity(e, functions) for e in expr.args]
+        result = sp.expand(expr.func(*new_args) if new_args else expr)
         return result
 
 
-def fullDiffExpand(expr, functions=None, constants=None):
+def full_diff_expand(expr, functions=None, constants=None):
     if functions is None:
         functions = expr.atoms(sp.Symbol)
         if constants is not None:
@@ -248,26 +248,26 @@ def fullDiffExpand(expr, functions=None, constants=None):
 
         if e.func == Diff:
             result = 0
-            diffArgs = {'target': e.target, 'superscript': e.superscript}
-            diffInner = e.args[0]
-            diffInner = visit(diffInner)
-            for term in diffInner.args if diffInner.func == sp.Add else [diffInner]:
-                independentTerms = 1
-                dependentTerms = []
+            diff_args = {'target': e.target, 'superscript': e.superscript}
+            diff_inner = e.args[0]
+            diff_inner = visit(diff_inner)
+            for term in diff_inner.args if diff_inner.func == sp.Add else [diff_inner]:
+                independent_terms = 1
+                dependent_terms = []
                 for factor in normalize_product(term):
                     if factor in functions or isinstance(factor, Diff):
-                        dependentTerms.append(factor)
+                        dependent_terms.append(factor)
                     else:
-                        independentTerms *= factor
-                for i in range(len(dependentTerms)):
-                    dependentTerm = dependentTerms[i]
-                    otherDependentTerms = dependentTerms[:i] + dependentTerms[i+1:]
-                    processedDiff = normalizeDiffOrder(Diff(dependentTerm, **diffArgs))
-                    result += independentTerms * prod(otherDependentTerms) * processedDiff
+                        independent_terms *= factor
+                for i in range(len(dependent_terms)):
+                    dependent_term = dependent_terms[i]
+                    other_dependent_terms = dependent_terms[:i] + dependent_terms[i+1:]
+                    processed_diff = normalize_diff_order(Diff(dependent_term, **diff_args))
+                    result += independent_terms * prod(other_dependent_terms) * processed_diff
             return result
         else:
-            newArgs = [visit(arg) for arg in e.args]
-            return e.func(*newArgs) if newArgs else e
+            new_args = [visit(arg) for arg in e.args]
+            return e.func(*new_args) if new_args else e
 
     if isinstance(expr, sp.Matrix):
         return expr.applyfunc(visit)
@@ -275,176 +275,179 @@ def fullDiffExpand(expr, functions=None, constants=None):
         return visit(expr)
 
 
-def normalizeDiffOrder(expression, functions=None, constants=None, sortKey=defaultDiffSortKey):
+def normalize_diff_order(expression, functions=None, constants=None, sort_key=default_diff_sort_key):
     """Assumes order of differentiation can be exchanged. Changes the order of nested Diffs to a standard order defined
-    by the sorting key 'sortKey' such that the derivative terms can be further simplified """
+    by the sorting key 'sort_key' such that the derivative terms can be further simplified """
     def visit(expr):
         if isinstance(expr, Diff):
             nodes = [expr]
             while isinstance(nodes[-1].arg, Diff):
                 nodes.append(nodes[-1].arg)
 
-            processedArg = visit(nodes[-1].arg)
-            nodes.sort(key=sortKey)
+            processed_arg = visit(nodes[-1].arg)
+            nodes.sort(key=sort_key)
 
-            result = processedArg
+            result = processed_arg
             for d in reversed(nodes):
                 result = Diff(result, target=d.target, superscript=d.superscript)
             return result
         else:
-            newArgs = [visit(e) for e in expr.args]
-            return expr.func(*newArgs) if newArgs else expr
+            new_args = [visit(e) for e in expr.args]
+            return expr.func(*new_args) if new_args else expr
 
-    expression = expandUsingLinearity(expression.expand(), functions, constants).expand()
+    expression = expand_using_linearity(expression.expand(), functions, constants).expand()
     return visit(expression)
 
 
-def expandUsingProductRule(expr):
+def expand_using_product_rule(expr):
     """Fully expands all derivatives by applying product rule"""
     if isinstance(expr, Diff):
-        arg = expandUsingProductRule(expr.args[0])
+        arg = expand_using_product_rule(expr.args[0])
         if arg.func == sp.Add:
-            newArgs = [Diff(e, target=expr.target, superscript=expr.superscript)
-                       for e in arg.args]
-            return sp.Add(*newArgs)
+            new_args = [Diff(e, target=expr.target, superscript=expr.superscript)
+                        for e in arg.args]
+            return sp.Add(*new_args)
         if arg.func not in (sp.Mul, sp.Pow):
             return Diff(arg, target=expr.target, superscript=expr.superscript)
         else:
-            prodList = normalize_product(arg)
+            prod_list = normalize_product(arg)
             result = 0
-            for i in range(len(prodList)):
-                preFactor = prod(prodList[j] for j in range(len(prodList)) if i != j)
-                result += preFactor * Diff(prodList[i], target=expr.target, superscript=expr.superscript)
+            for i in range(len(prod_list)):
+                pre_factor = prod(prod_list[j] for j in range(len(prod_list)) if i != j)
+                result += pre_factor * Diff(prod_list[i], target=expr.target, superscript=expr.superscript)
             return result
     else:
-        newArgs = [expandUsingProductRule(e) for e in expr.args]
-        return expr.func(*newArgs) if newArgs else expr
+        new_args = [expand_using_product_rule(e) for e in expr.args]
+        return expr.func(*new_args) if new_args else expr
 
 
-def combineUsingProductRule(expr):
+def combine_using_product_rule(expr):
     """Inverse product rule"""
 
-    def exprToDiffDecomposition(expr):
+    def expr_to_diff_decomposition(expression):
         """Decomposes a sp.Add node containing CeDiffs into:
-        diffDict: maps (target, superscript) -> [ (preFactor, argument), ... ]
-        i.e.  a partial(b) ( a is prefactor, b is argument)
+        diff_dict: maps (target, superscript) -> [ (pre_factor, argument), ... ]
+        i.e.  a partial(b) ( a is pre-factor, b is argument)
             in case of partial(a) partial(b) two entries are created  (0.5 partial(a), b), (0.5 partial(b), a)
         """
         DiffInfo = namedtuple("DiffInfo", ["target", "superscript"])
 
         class DiffSplit:
-            def __init__(self, preFactor, argument):
-                self.preFactor = preFactor
+            def __init__(self, fac, argument):
+                self.pre_factor = fac
                 self.argument = argument
 
             def __repr__(self):
-                return str((self.preFactor, self.argument))
+                return str((self.pre_factor, self.argument))
 
-        assert isinstance(expr, sp.Add)
-        diffDict = defaultdict(list)
+        assert isinstance(expression, sp.Add)
+        diff_dict = defaultdict(list)
         rest = 0
-        for term in expr.args:
+        for term in expression.args:
             if isinstance(term, Diff):
-                diffDict[DiffInfo(term.target, term.superscript)].append(DiffSplit(1, term.arg))
+                diff_dict[DiffInfo(term.target, term.superscript)].append(DiffSplit(1, term.arg))
             else:
-                mulArgs = normalize_product(term)
-                diffs = [d for d in mulArgs if isinstance(d, Diff)]
-                factor = prod(d for d in mulArgs if not isinstance(d, Diff))
+                mul_args = normalize_product(term)
+                diffs = [d for d in mul_args if isinstance(d, Diff)]
+                factor = prod(d for d in mul_args if not isinstance(d, Diff))
                 if len(diffs) == 0:
                     rest += factor
                 else:
                     for i, diff in enumerate(diffs):
-                        allButCurrent = [d for j, d in enumerate(diffs) if i != j]
-                        preFactor = factor * prod(allButCurrent) * sp.Rational(1, len(diffs))
-                        diffDict[DiffInfo(diff.target, diff.superscript)].append(DiffSplit(preFactor, diff.arg))
+                        all_but_current = [d for j, d in enumerate(diffs) if i != j]
+                        pre_factor = factor * prod(all_but_current) * sp.Rational(1, len(diffs))
+                        diff_dict[DiffInfo(diff.target, diff.superscript)].append(DiffSplit(pre_factor, diff.arg))
 
-        return diffDict, rest
+        return diff_dict, rest
 
-    def matchDiffSplits(own, other):
-        ownFac = own.preFactor / other.argument
-        otherFac = other.preFactor / own.argument
-
-        if sp.count_ops(ownFac) > sp.count_ops(own.preFactor) or sp.count_ops(otherFac) > sp.count_ops(other.preFactor):
+    def match_diff_splits(own, other):
+        own_fac = own.pre_factor / other.argument
+        other_fac = other.pre_factor / own.argument
+        count = sp.count_ops
+        if count(own_fac) > count(own.pre_factor) or count(other_fac) > count(other.pre_factor):
             return None
 
-        newOtherFactor = ownFac - otherFac
-        return newOtherFactor
+        new_other_factor = own_fac - other_fac
+        return new_other_factor
 
-    def processDiffList(diffList, label, superscript):
-        if len(diffList) == 0:
+    def process_diff_list(diff_list, label, superscript):
+        if len(diff_list) == 0:
             return 0
-        elif len(diffList) == 1:
-            return diffList[0].preFactor * Diff(diffList[0].argument, label, superscript)
+        elif len(diff_list) == 1:
+            return diff_list[0].pre_factor * Diff(diff_list[0].argument, label, superscript)
 
         result = 0
         matches = []
-        for i in range(1, len(diffList)):
-            matchResult = matchDiffSplits(diffList[i], diffList[0])
-            if matchResult is not None:
-                matches.append((i, matchResult))
+        for i in range(1, len(diff_list)):
+            match_result = match_diff_splits(diff_list[i], diff_list[0])
+            if match_result is not None:
+                matches.append((i, match_result))
 
         if len(matches) == 0:
-            result += diffList[0].preFactor * Diff(diffList[0].argument, label, superscript)
+            result += diff_list[0].pre_factor * Diff(diff_list[0].argument, label, superscript)
         else:
-            otherIdx, matchResult = sorted(matches, key=lambda e: sp.count_ops(e[1]))[0]
-            newArgument = diffList[0].argument * diffList[otherIdx].argument
-            result += (diffList[0].preFactor / diffList[otherIdx].argument) * Diff(newArgument, label, superscript)
-            if matchResult == 0:
-                del diffList[otherIdx]
+            other_idx, match_result = sorted(matches, key=lambda e: sp.count_ops(e[1]))[0]
+            new_argument = diff_list[0].argument * diff_list[other_idx].argument
+            result += (diff_list[0].pre_factor / diff_list[other_idx].argument) * Diff(new_argument, label, superscript)
+            if match_result == 0:
+                del diff_list[other_idx]
             else:
-                diffList[otherIdx].preFactor = matchResult * diffList[0].argument
-        result += processDiffList(diffList[1:], label, superscript)
+                diff_list[other_idx].pre_factor = match_result * diff_list[0].argument
+        result += process_diff_list(diff_list[1:], label, superscript)
         return result
 
-    expr = expr.expand()
-    if isinstance(expr, sp.Add):
-        diffDict, rest = exprToDiffDecomposition(expr)
-        for (label, superscript), diffList in diffDict.items():
-            rest += processDiffList(diffList, label, superscript)
-        return rest
-    else:
-        newArgs = [combineUsingProductRule(e) for e in expr.args]
-        return expr.func(*newArgs) if newArgs else expr
+    def combine(expression):
+        expression = expression.expand()
+        if isinstance(expression, sp.Add):
+            diff_dict, rest = expr_to_diff_decomposition(expression)
+            for (label, superscript), diffList in diff_dict.items():
+                rest += process_diff_list(diffList, label, superscript)
+            return rest
+        else:
+            new_args = [combine_using_product_rule(e) for e in expression.args]
+            return expression.func(*new_args) if new_args else expression
 
+    return combine(expr)
 
-def replaceDiff(expr, replacementDict):
-    """replacementDict: maps variable (target) to a new Differential operator"""
+
+def replace_diff(expr, replacement_dict):
+    """replacement_dict: maps variable (target) to a new Differential operator"""
 
     def visit(e):
         if isinstance(e, Diff):
-            if e.target in replacementDict:
-                return DiffOperator.apply(replacementDict[e.target], visit(e.arg))
-        newArgs = [visit(arg) for arg in e.args]
-        return e.func(*newArgs) if newArgs else e
+            if e.target in replacement_dict:
+                return DiffOperator.apply(replacement_dict[e.target], visit(e.arg))
+        new_args = [visit(arg) for arg in e.args]
+        return e.func(*new_args) if new_args else e
 
     return visit(expr)
 
 
-def zeroDiffs(expr, label):
+def zero_diffs(expr, label):
     """Replaces all differentials with the given target by 0"""
     def visit(e):
         if isinstance(e, Diff):
             if e.target == label:
                 return 0
-        newArgs = [visit(arg) for arg in e.args]
-        return e.func(*newArgs) if newArgs else e
+        new_args = [visit(arg) for arg in e.args]
+        return e.func(*new_args) if new_args else e
     return visit(expr)
 
 
-def evaluateDiffs(expr, var=None):
+def evaluate_diffs(expr, var=None):
     """Replaces Diff nodes by sp.diff , the free variable is either the target (if var=None) otherwise
     the specified var"""
     if isinstance(expr, Diff):
         if var is None:
             var = expr.target
-        return sp.diff(evaluateDiffs(expr.arg, var), var)
+        return sp.diff(evaluate_diffs(expr.arg, var), var)
     else:
-        newArgs = [evaluateDiffs(arg, var) for arg in expr.args]
-        return expr.func(*newArgs) if newArgs else expr
+        new_args = [evaluate_diffs(arg, var) for arg in expr.args]
+        return expr.func(*new_args) if new_args else expr
 
 
-def functionalDerivative(functional, v):
-    """
+def functional_derivative(functional, v):
+    r"""
     Computes functional derivative of functional with respect to v using Euler-Lagrange equation
 
     .. math ::
@@ -458,18 +461,18 @@ def functionalDerivative(functional, v):
       of the derivative terms.
     """
     diffs = functional.atoms(Diff)
-    bulkSubstitutions = {d: sp.Dummy() for d in diffs}
-    bulkSubstitutionsInverse = {v: k for k, v in bulkSubstitutions.items()}
-    nonDiffPart = functional.subs(bulkSubstitutions)
-    partialF_partialV = sp.diff(nonDiffPart, v).subs(bulkSubstitutionsInverse)
+    bulk_substitutions = {d: sp.Dummy() for d in diffs}
+    bulk_substitutions_inverse = {v: k for k, v in bulk_substitutions.items()}
+    non_diff_part = functional.subs(bulk_substitutions)
+    partial_f_partial_v = sp.diff(non_diff_part, v).subs(bulk_substitutions_inverse)
 
-    gradientPart = 0
+    gradient_part = 0
     for diffObj in diffs:
         if diffObj.args[0] != v:
             continue
         dummy = sp.Dummy()
-        partialF_partialGradV = functional.subs(diffObj, dummy).diff(dummy).subs(dummy, diffObj)
-        gradientPart += Diff(partialF_partialGradV, target=diffObj.target, superscript=diffObj.superscript)
+        partial_f_partial_grad_v = functional.subs(diffObj, dummy).diff(dummy).subs(dummy, diffObj)
+        gradient_part += Diff(partial_f_partial_grad_v, target=diffObj.target, superscript=diffObj.superscript)
 
-    result = partialF_partialV - gradientPart
+    result = partial_f_partial_v - gradient_part
     return result
diff --git a/display_utils.py b/display_utils.py
index 30341e2345ec99a721600551385941445cdc9be2..35da77adb9781980443a91249c2cd5161d23c06d 100644
--- a/display_utils.py
+++ b/display_utils.py
@@ -37,18 +37,18 @@ def show_code(ast: KernelFunction):
 
     Can either  be displayed as HTML in Jupyter notebooks or printed as normal string.
     """
-    from pystencils.cpu import print_c
+    from pystencils.cpu import generate_c
 
     class CodeDisplay:
         def __init__(self, ast_input):
             self.ast = ast_input
 
         def _repr_html_(self):
-            return highlight_cpp(print_c(self.ast)).__html__()
+            return highlight_cpp(generate_c(self.ast)).__html__()
 
         def __str__(self):
-            return print_c(self.ast)
+            return generate_c(self.ast)
 
         def __repr__(self):
-            return print_c(self.ast)
+            return generate_c(self.ast)
     return CodeDisplay(ast)
diff --git a/field.py b/field.py
index 6085a314f9c576bc99b1ab040f22955eca7b240b..4e6fb3acd8f9f6e3c3dc9e2b2fd52a500e861f9f 100644
--- a/field.py
+++ b/field.py
@@ -1,5 +1,6 @@
 from enum import Enum
 from itertools import chain
+from typing import Tuple, Sequence, Optional, List
 import numpy as np
 import sympy as sp
 from sympy.core.cache import cacheit
@@ -21,17 +22,17 @@ class FieldType(Enum):
     BUFFER = 2
 
     @staticmethod
-    def isGeneric(field):
+    def is_generic(field):
         assert isinstance(field, Field)
         return field.fieldType == FieldType.GENERIC
 
     @staticmethod
-    def isIndexed(field):
+    def is_indexed(field):
         assert isinstance(field, Field)
         return field.fieldType == FieldType.INDEXED
 
     @staticmethod
-    def isBuffer(field):
+    def is_buffer(field):
         assert isinstance(field, Field)
         return field.fieldType == FieldType.BUFFER
 
@@ -48,13 +49,13 @@ class Field(object):
         1. create a kernel with fixed loop sizes i.e. the shape of the array is already known. This is usually the
            case if just-in-time compilation directly from Python is done. (see :func:`Field.createFromNumpyArray`)
         2. create a more general kernel that works for variable array sizes. This can be used to create kernels
-           beforehand for a library. (see :func:`Field.createGeneric`)
+           beforehand for a library. (see :func:`Field.create_generic`)
 
     Dimensions:
         A field has spatial and index dimensions, where the spatial dimensions come first.
         The interpretation is that the field has multiple cells in (usually) two or three dimensional space which are
-        looped over. Additionally  N values are stored per cell. In this case spatialDimensions is two or three,
-        and indexDimensions equals N. If you want to store a matrix on each point in a two dimensional grid, there
+        looped over. Additionally  N values are stored per cell. In this case spatial_dimensions is two or three,
+        and index_dimensions equals N. If you want to store a matrix on each point in a two dimensional grid, there
         are four dimensions, two spatial and two index dimensions: ``len(arr.shape) == spatialDims + indexDims``
 
     Indexing:
@@ -64,13 +65,13 @@ class Field(object):
 
     Example without index dimensions:
         >>> a = np.zeros([10, 10])
-        >>> f = Field.createFromNumpyArray("f", a, indexDimensions=0)
+        >>> f = Field.create_from_numpy_array("f", a, index_dimensions=0)
         >>> jacobi = ( f[-1,0] + f[1,0] + f[0,-1] + f[0,1] ) / 4
 
     Example with index dimensions: LBM D2Q9 stream pull
         >>> stencil = np.array([[0,0], [0,1], [0,-1]])
-        >>> src = Field.createGeneric("src", spatialDimensions=2, indexDimensions=1)
-        >>> dst = Field.createGeneric("dst", spatialDimensions=2, indexDimensions=1)
+        >>> src = Field.create_generic("src", spatial_dimensions=2, index_dimensions=1)
+        >>> dst = Field.create_generic("dst", spatial_dimensions=2, index_dimensions=1)
         >>> for i, offset in enumerate(stencil):
         ...     Assignment(dst[0,0](i), src[-offset](i))
         Assignment(dst_C^0, src_C^0)
@@ -79,128 +80,136 @@ class Field(object):
     """
 
     @staticmethod
-    def createGeneric(fieldName, spatialDimensions, dtype=np.float64, indexDimensions=0, layout='numpy',
-                      indexShape=None, fieldType=FieldType.GENERIC):
+    def create_generic(field_name, spatial_dimensions, dtype=np.float64, index_dimensions=0, layout='numpy',
+                       index_shape=None, field_type=FieldType.GENERIC) -> 'Field':
         """
         Creates a generic field where the field size is not fixed i.e. can be called with arrays of different sizes
 
-        :param fieldName: symbolic name for the field
-        :param dtype: numpy data type of the array the kernel is called with later
-        :param spatialDimensions: see documentation of Field
-        :param indexDimensions: see documentation of Field
-        :param layout: tuple specifying the loop ordering of the spatial dimensions e.g. (2, 1, 0 ) means that
-                       the outer loop loops over dimension 2, the second outer over dimension 1, and the inner loop
-                       over dimension 0. Also allowed: the strings 'numpy' (0,1,..d) or 'reverseNumpy' (d, ..., 1, 0)
-        :param indexShape: optional shape of the index dimensions i.e. maximum values allowed for each index dimension,
-                           has to be a list or tuple
+        Args:
+            field_name: symbolic name for the field
+            dtype: numpy data type of the array the kernel is called with later
+            spatial_dimensions: see documentation of Field
+            index_dimensions: see documentation of Field
+            layout: tuple specifying the loop ordering of the spatial dimensions e.g. (2, 1, 0 ) means that
+                    the outer loop loops over dimension 2, the second outer over dimension 1, and the inner loop
+                    over dimension 0. Also allowed: the strings 'numpy' (0,1,..d) or 'reverseNumpy' (d, ..., 1, 0)
+            index_shape: optional shape of the index dimensions i.e. maximum values allowed for each index dimension,
+                        has to be a list or tuple
+            field_type: besides the normal GENERIC fields, there are INDEXED fields that store indices of the domain
+                        that should be iterated over, and BUFFER fields that are used to generate
+                        communication packing/unpacking kernels
         """
         if isinstance(layout, str):
-            layout = spatialLayoutStringToTuple(layout, dim=spatialDimensions)
-        shapeSymbol = IndexedBase(TypedSymbol(Field.SHAPE_PREFIX + fieldName, Field.SHAPE_DTYPE), shape=(1,))
-        strideSymbol = IndexedBase(TypedSymbol(Field.STRIDE_PREFIX + fieldName, Field.STRIDE_DTYPE), shape=(1,))
-        totalDimensions = spatialDimensions + indexDimensions
-        if indexShape is None or len(indexShape) == 0:
-            shape = tuple([shapeSymbol[i] for i in range(totalDimensions)])
+            layout = spatial_layout_string_to_tuple(layout, dim=spatial_dimensions)
+        shape_symbol = IndexedBase(TypedSymbol(Field.SHAPE_PREFIX + field_name, Field.SHAPE_DTYPE), shape=(1,))
+        stride_symbol = IndexedBase(TypedSymbol(Field.STRIDE_PREFIX + field_name, Field.STRIDE_DTYPE), shape=(1,))
+        total_dimensions = spatial_dimensions + index_dimensions
+        if index_shape is None or len(index_shape) == 0:
+            shape = tuple([shape_symbol[i] for i in range(total_dimensions)])
         else:
-            shape = tuple([shapeSymbol[i] for i in range(spatialDimensions)] + list(indexShape))
+            shape = tuple([shape_symbol[i] for i in range(spatial_dimensions)] + list(index_shape))
 
-        strides = tuple([strideSymbol[i] for i in range(totalDimensions)])
+        strides = tuple([stride_symbol[i] for i in range(total_dimensions)])
 
-        npDataType = np.dtype(dtype)
-        if npDataType.fields is not None:
-            if indexDimensions != 0:
+        np_data_type = np.dtype(dtype)
+        if np_data_type.fields is not None:
+            if index_dimensions != 0:
                 raise ValueError("Structured arrays/fields are not allowed to have an index dimension")
             shape += (1,)
             strides += (1,)
 
-        return Field(fieldName, fieldType, dtype, layout, shape, strides)
+        return Field(field_name, field_type, dtype, layout, shape, strides)
 
     @staticmethod
-    def createFromNumpyArray(fieldName, npArray, indexDimensions=0):
-        """
-        Creates a field based on the layout, data type, and shape of a given numpy array.
+    def create_from_numpy_array(field_name: str, array: np.ndarray, index_dimensions: int = 0) -> 'Field':
+        """Creates a field based on the layout, data type, and shape of a given numpy array.
+
         Kernels created for these kind of fields can only be called with arrays of the same layout, shape and type.
-        :param fieldName: symbolic name for the field
-        :param npArray: numpy array
-        :param indexDimensions: see documentation of Field
+
+        Args:
+            field_name: symbolic name for the field
+            array: numpy array
+            index_dimensions: see documentation of Field
         """
-        spatialDimensions = len(npArray.shape) - indexDimensions
-        if spatialDimensions < 1:
+        spatial_dimensions = len(array.shape) - index_dimensions
+        if spatial_dimensions < 1:
             raise ValueError("Too many index dimensions. At least one spatial dimension required")
 
-        fullLayout = getLayoutOfArray(npArray)
-        spatialLayout = tuple([i for i in fullLayout if i < spatialDimensions])
-        assert len(spatialLayout) == spatialDimensions
+        full_layout = get_layout_of_array(array)
+        spatial_layout = tuple([i for i in full_layout if i < spatial_dimensions])
+        assert len(spatial_layout) == spatial_dimensions
 
-        strides = tuple([s // np.dtype(npArray.dtype).itemsize for s in npArray.strides])
-        shape = tuple(int(s) for s in npArray.shape)
+        strides = tuple([s // np.dtype(array.dtype).itemsize for s in array.strides])
+        shape = tuple(int(s) for s in array.shape)
 
-        npDataType = np.dtype(npArray.dtype)
-        if npDataType.fields is not None:
-            if indexDimensions != 0:
+        numpy_dtype = np.dtype(array.dtype)
+        if numpy_dtype.fields is not None:
+            if index_dimensions != 0:
                 raise ValueError("Structured arrays/fields are not allowed to have an index dimension")
             shape += (1,)
             strides += (1,)
 
-        return Field(fieldName, FieldType.GENERIC, npArray.dtype, spatialLayout, shape, strides)
+        return Field(field_name, FieldType.GENERIC, array.dtype, spatial_layout, shape, strides)
 
     @staticmethod
-    def createFixedSize(fieldName, shape, indexDimensions=0, dtype=np.float64, layout='numpy', strides=None):
+    def create_fixed_size(field_name: str, shape: Tuple[int, ...], index_dimensions: int = 0,
+                          dtype=np.float64, layout: str = 'numpy', strides: Optional[Sequence[int]]=None) -> 'Field':
         """
         Creates a field with fixed sizes i.e. can be called only with arrays of the same size and layout
 
-        :param fieldName: symbolic name for the field
-        :param shape: overall shape of the array
-        :param indexDimensions: how many of the trailing dimensions are interpreted as index (as opposed to spatial)
-        :param dtype: numpy data type of the array the kernel is called with later
-        :param layout: full layout of array, not only spatial dimensions
-        :param strides: strides in bytes or None to automatically compute them from shape (assuming no padding)
+        Args:
+            field_name: symbolic name for the field
+            shape: overall shape of the array
+            index_dimensions: how many of the trailing dimensions are interpreted as index (as opposed to spatial)
+            dtype: numpy data type of the array the kernel is called with later
+            layout: full layout of array, not only spatial dimensions
+            strides: strides in bytes or None to automatically compute them from shape (assuming no padding)
         """
-        spatialDimensions = len(shape) - indexDimensions
-        assert spatialDimensions >= 1
+        spatial_dimensions = len(shape) - index_dimensions
+        assert spatial_dimensions >= 1
 
         if isinstance(layout, str):
-            layout = layoutStringToTuple(layout, spatialDimensions + indexDimensions)
+            layout = layout_string_to_tuple(layout, spatial_dimensions + index_dimensions)
 
         shape = tuple(int(s) for s in shape)
         if strides is None:
-            strides = computeStrides(shape, layout)
+            strides = compute_strides(shape, layout)
         else:
             assert len(strides) == len(shape)
             strides = tuple([s // np.dtype(dtype).itemsize for s in strides])
 
-        npDataType = np.dtype(dtype)
-        if npDataType.fields is not None:
-            if indexDimensions != 0:
+        numpy_dtype = np.dtype(dtype)
+        if numpy_dtype.fields is not None:
+            if index_dimensions != 0:
                 raise ValueError("Structured arrays/fields are not allowed to have an index dimension")
             shape += (1,)
             strides += (1,)
 
-        spatialLayout = list(layout)
-        for i in range(spatialDimensions, len(layout)):
-            spatialLayout.remove(i)
-        return Field(fieldName, FieldType.GENERIC, dtype, tuple(spatialLayout), shape, strides)
+        spatial_layout = list(layout)
+        for i in range(spatial_dimensions, len(layout)):
+            spatial_layout.remove(i)
+        return Field(field_name, FieldType.GENERIC, dtype, tuple(spatial_layout), shape, strides)
 
-    def __init__(self, fieldName, fieldType, dtype, layout, shape, strides):
+    def __init__(self, field_name, field_type, dtype, layout, shape, strides):
         """Do not use directly. Use static create* methods"""
-        self._fieldName = fieldName
-        assert isinstance(fieldType, FieldType)
-        self.fieldType = fieldType
+        self._fieldName = field_name
+        assert isinstance(field_type, FieldType)
+        self.fieldType = field_type
         self._dtype = create_type(dtype)
-        self._layout = normalizeLayout(layout)
+        self._layout = normalize_layout(layout)
         self.shape = shape
         self.strides = strides
-        self.latexName = None
+        self.latex_name: Optional[str] = None
 
-    def newFieldWithDifferentName(self, newName):
-        return Field(newName, self.fieldType, self._dtype, self._layout, self.shape, self.strides)
+    def new_field_with_different_name(self, new_name):
+        return Field(new_name, self.fieldType, self._dtype, self._layout, self.shape, self.strides)
 
     @property
-    def spatialDimensions(self):
+    def spatial_dimensions(self) -> int:
         return len(self._layout)
 
     @property
-    def indexDimensions(self):
+    def index_dimensions(self) -> int:
         return len(self.shape) - len(self._layout)
 
     @property
@@ -208,36 +217,32 @@ class Field(object):
         return self._layout
 
     @property
-    def name(self):
+    def name(self) -> str:
         return self._fieldName
 
     @property
-    def spatialShape(self):
-        return self.shape[:self.spatialDimensions]
-
-    @property
-    def indexShape(self):
-        return self.shape[self.spatialDimensions:]
+    def spatial_shape(self) -> Tuple[int, ...]:
+        return self.shape[:self.spatial_dimensions]
 
     @property
-    def hasFixedShape(self):
+    def has_fixed_shape(self):
         return is_integer_sequence(self.shape)
 
     @property
-    def indexShape(self):
-        return self.shape[self.spatialDimensions:]
+    def index_shape(self):
+        return self.shape[self.spatial_dimensions:]
 
     @property
-    def hasFixedIndexShape(self):
-        return is_integer_sequence(self.indexShape)
+    def has_fixed_index_shape(self):
+        return is_integer_sequence(self.index_shape)
 
     @property
-    def spatialStrides(self):
-        return self.strides[:self.spatialDimensions]
+    def spatial_strides(self):
+        return self.strides[:self.spatial_dimensions]
 
     @property
-    def indexStrides(self):
-        return self.strides[self.spatialDimensions:]
+    def index_strides(self):
+        return self.strides[self.spatial_dimensions:]
 
     @property
     def dtype(self):
@@ -246,56 +251,55 @@ class Field(object):
     def __repr__(self):
         return self._fieldName
 
-    def neighbor(self, coordId, offset):
-        offsetList = [0] * self.spatialDimensions
-        offsetList[coordId] = offset
-        return Field.Access(self, tuple(offsetList))
+    def neighbor(self, coord_id, offset):
+        offset_list = [0] * self.spatial_dimensions
+        offset_list[coord_id] = offset
+        return Field.Access(self, tuple(offset_list))
 
     def neighbors(self, stencil):
         return [self.__getitem__(s) for s in stencil]
 
     @property
-    def vecCenter(self):
-        indexShape = self.indexShape
-        if len(indexShape) == 0:
+    def center_vector(self):
+        index_shape = self.index_shape
+        if len(index_shape) == 0:
             return self.center
-        elif len(indexShape) == 1:
-            return sp.Matrix([self(i) for i in range(indexShape[0])])
-        elif len(indexShape) == 2:
+        elif len(index_shape) == 1:
+            return sp.Matrix([self(i) for i in range(index_shape[0])])
+        elif len(index_shape) == 2:
             def cb(*args):
                 r = self.__call__(*args)
                 return r
-            return sp.Matrix(*indexShape, cb)
+            return sp.Matrix(*index_shape, cb)
 
     @property
     def center(self):
-        center = tuple([0] * self.spatialDimensions)
+        center = tuple([0] * self.spatial_dimensions)
         return Field.Access(self, center)
 
     def __getitem__(self, offset):
         if type(offset) is np.ndarray:
             offset = tuple(offset)
         if type(offset) is str:
-            offset = tuple(directionStringToOffset(offset, self.spatialDimensions))
+            offset = tuple(direction_string_to_offset(offset, self.spatial_dimensions))
         if type(offset) is not tuple:
             offset = (offset,)
-        if len(offset) != self.spatialDimensions:
+        if len(offset) != self.spatial_dimensions:
             raise ValueError("Wrong number of spatial indices: "
-                             "Got %d, expected %d" % (len(offset), self.spatialDimensions))
+                             "Got %d, expected %d" % (len(offset), self.spatial_dimensions))
         return Field.Access(self, offset)
 
     def __call__(self, *args, **kwargs):
-        center = tuple([0]*self.spatialDimensions)
+        center = tuple([0] * self.spatial_dimensions)
         return Field.Access(self, center)(*args, **kwargs)
 
     def __hash__(self):
         return hash((self._layout, self.shape, self.strides, self._dtype, self.fieldType, self._fieldName))
 
     def __eq__(self, other):
-        selfTuple = (self.shape, self.strides, self.name, self.dtype, self.fieldType)
-        otherTuple = (other.shape, other.strides, other.name, other.dtype, other.fieldType)
-        return selfTuple == otherTuple
-
+        self_tuple = (self.shape, self.strides, self.name, self.dtype, self.fieldType)
+        other_tuple = (other.shape, other.strides, other.name, other.dtype, other.fieldType)
+        return self_tuple == other_tuple
 
     PREFIX = "f"
     STRIDE_PREFIX = PREFIX + "stride_"
@@ -304,41 +308,42 @@ class Field(object):
     SHAPE_DTYPE = create_composite_type_from_string("const int *")
     DATA_PREFIX = PREFIX + "d_"
 
+    # noinspection PyAttributeOutsideInit,PyUnresolvedReferences
     class Access(sp.Symbol):
         def __new__(cls, name, *args, **kwargs):
             obj = Field.Access.__xnew_cached_(cls, name, *args, **kwargs)
             return obj
 
         def __new_stage2__(self, field, offsets=(0, 0, 0), idx=None):
-            fieldName = field.name
-            offsetsAndIndex = chain(offsets, idx) if idx is not None else offsets
-            constantOffsets = not any([isinstance(o, sp.Basic) and not o.is_Integer for o in offsetsAndIndex])
+            field_name = field.name
+            offsets_and_index = chain(offsets, idx) if idx is not None else offsets
+            constant_offsets = not any([isinstance(o, sp.Basic) and not o.is_Integer for o in offsets_and_index])
 
             if not idx:
-                idx = tuple([0] * field.indexDimensions)
+                idx = tuple([0] * field.index_dimensions)
 
-            if constantOffsets:
-                offsetName = offsetToDirectionString(offsets)
-                if field.indexDimensions == 0:
+            if constant_offsets:
+                offset_name = offset_to_direction_string(offsets)
+                if field.index_dimensions == 0:
                     superscript = None
-                elif field.indexDimensions == 1:
+                elif field.index_dimensions == 1:
                     superscript = str(idx[0])
                 else:
-                    idxStr = ",".join([str(e) for e in idx])
-                    superscript = idxStr
-                if field.hasFixedIndexShape and not isinstance(field.dtype, StructType):
-                    for i, bound in zip(idx, field.indexShape):
+                    idx_str = ",".join([str(e) for e in idx])
+                    superscript = idx_str
+                if field.has_fixed_index_shape and not isinstance(field.dtype, StructType):
+                    for i, bound in zip(idx, field.index_shape):
                         if i >= bound:
                             raise ValueError("Field index out of bounds")
             else:
-                offsetName = "%0.10X" % (abs(hash(tuple(offsetsAndIndex))))
+                offset_name = "%0.10X" % (abs(hash(tuple(offsets_and_index))))
                 superscript = None
 
-            symbolName = "%s_%s" % (fieldName, offsetName)
+            symbol_name = "%s_%s" % (field_name, offset_name)
             if superscript is not None:
-                symbolName += "^" + superscript
+                symbol_name += "^" + superscript
 
-            obj = super(Field.Access, self).__xnew__(self, symbolName)
+            obj = super(Field.Access, self).__xnew__(self, symbol_name)
             obj._field = field
             obj._offsets = []
             for o in offsets:
@@ -346,7 +351,7 @@ class Field(object):
                     obj._offsets.append(o)
                 else:
                     obj._offsets.append(int(o))
-            obj._offsetName = offsetName
+            obj._offsetName = offset_name
             obj._superscript = superscript
             obj._index = idx
 
@@ -355,21 +360,23 @@ class Field(object):
         def __getnewargs__(self):
             return self.field, self.offsets, self.index
 
+        # noinspection SpellCheckingInspection
         __xnew__ = staticmethod(__new_stage2__)
+        # noinspection SpellCheckingInspection
         __xnew_cached_ = staticmethod(cacheit(__new_stage2__))
 
         def __call__(self, *idx):
-            if self._index != tuple([0]*self.field.indexDimensions):
+            if self._index != tuple([0]*self.field.index_dimensions):
                 raise ValueError("Indexing an already indexed Field.Access")
 
             idx = tuple(idx)
 
-            if self.field.indexDimensions == 0 and idx == (0,):
+            if self.field.index_dimensions == 0 and idx == (0,):
                 idx = ()
 
-            if len(idx) != self.field.indexDimensions:
+            if len(idx) != self.field.index_dimensions:
                 raise ValueError("Wrong number of indices: "
-                                 "Got %d, expected %d" % (len(idx), self.field.indexDimensions))
+                                 "Got %d, expected %d" % (len(idx), self.field.index_dimensions))
             return Field.Access(self.field, self._offsets, idx)
 
         def __getitem__(self, *idx):
@@ -393,110 +400,97 @@ class Field(object):
             self._offsets = value
 
         @property
-        def requiredGhostLayers(self):
+        def required_ghost_layers(self):
             return int(np.max(np.abs(self._offsets)))
 
         @property
-        def nrOfCoordinates(self):
+        def nr_of_coordinates(self):
             return len(self._offsets)
 
         @property
-        def offsetName(self):
+        def offset_name(self) -> str:
             return self._offsetName
 
-        def _latex(self, arg):
-            n = self._field.latexName if self._field.latexName else self._field.name
-            if self._superscript:
-                return "{{%s}_{%s}^{%s}}" % (n, self._offsetName, self._superscript)
-            else:
-                return "{{%s}_{%s}}" % (n, self._offsetName)
-
         @property
         def index(self):
             return self._index
 
-        def getNeighbor(self, *offsets):
+        def get_neighbor(self, *offsets) -> 'Field.Access':
             return Field.Access(self.field, offsets, self.index)
 
-        def neighbor(self, coordId, offset):
-            offsetList = list(self.offsets)
-            offsetList[coordId] += offset
-            return Field.Access(self.field, tuple(offsetList), self.index)
+        def neighbor(self, coord_id: int, offset: Sequence[int]) -> 'Field.Access':
+            offset_list = list(self.offsets)
+            offset_list[coord_id] += offset
+            return Field.Access(self.field, tuple(offset_list), self.index)
 
-        def getShifted(self, *shift):
+        def get_shifted(self, *shift)-> 'Field.Access':
             return Field.Access(self.field, tuple(a + b for a, b in zip(shift, self.offsets)), self.index)
 
         def _hashable_content(self):
-            superClassContents = list(super(Field.Access, self)._hashable_content())
-            t = tuple(superClassContents + [hash(self._field), self._index] + self._offsets)
+            super_class_contents = list(super(Field.Access, self)._hashable_content())
+            t = tuple(super_class_contents + [hash(self._field), self._index] + self._offsets)
             return t
 
-
-def extractCommonSubexpressions(equations):
-    """
-    Uses sympy to find common subexpressions in equations and returns
-    them in a topologically sorted order, ready for evaluation.
-    Usually called before list of equations is passed to :func:`createKernel`
-    """
-    replacements, new_eq = sp.cse(equations)
-    # Workaround for older sympy versions: here subexpressions (temporary = True) are extracted
-    # which leads to problems in Piecewise functions which have to a default case indicated by True
-    symbols_equal_to_true = {r[0]: True for r in replacements if r[1] is sp.true}
-
-    replacement_eqs = [Assignment(*r) for r in replacements if r[1] is not sp.true]
-    equations = replacement_eqs + new_eq
-    topologically_sorted_pairs = sp.cse_main.reps_toposort([[e.lhs, e.rhs] for e in equations])
-    equations = [Assignment(a[0], a[1].subs(symbols_equal_to_true)) for a in topologically_sorted_pairs]
-    return equations
+        def _latex(self, _):
+            n = self._field.latexName if self._field.latexName else self._field.name
+            if self._superscript:
+                return "{{%s}_{%s}^{%s}}" % (n, self._offsetName, self._superscript)
+            else:
+                return "{{%s}_{%s}}" % (n, self._offsetName)
 
 
-def getLayoutFromStrides(strides, indexDimensionIds=[]):
+def get_layout_from_strides(strides: Sequence[int], index_dimension_ids: Optional[List[int]] = None):
+    index_dimension_ids = [] if index_dimension_ids is None else index_dimension_ids
     coordinates = list(range(len(strides)))
-    relevant_strides = [stride for i, stride in enumerate(strides) if i not in indexDimensionIds]
+    relevant_strides = [stride for i, stride in enumerate(strides) if i not in index_dimension_ids]
     result = [x for (y, x) in sorted(zip(relevant_strides, coordinates), key=lambda pair: pair[0], reverse=True)]
-    return normalizeLayout(result)
+    return normalize_layout(result)
 
 
-def getLayoutOfArray(arr, indexDimensionIds=[]):
-    """
-    Returns a list indicating the memory layout (linearization order) of the numpy array.
-    Example:
-    >>> getLayoutOfArray(np.zeros([3,3,3]))
-    (0, 1, 2)
+def get_layout_of_array(arr: np.ndarray, index_dimension_ids: Optional[List[int]] = None):
+    """ Returns a list indicating the memory layout (linearization order) of the numpy array.
+
+    Examples:
+        >>> get_layout_of_array(np.zeros([3,3,3]))
+        (0, 1, 2)
 
     In this example the loop over the zeroth coordinate should be the outermost loop,
     followed by the first and second. Elements arr[x,y,0] and arr[x,y,1] are adjacent in memory.
     Normally constructed numpy arrays have this order, however by stride tricks or other frameworks, arrays
     with different memory layout can be created.
 
-    The indexDimensionIds parameter leaves specifies which coordinates should not be
+    The index_dimension_ids parameter leaves specifies which coordinates should not be
     """
-    return getLayoutFromStrides(arr.strides, indexDimensionIds)
+    index_dimension_ids = [] if index_dimension_ids is None else index_dimension_ids
+    return get_layout_from_strides(arr.strides, index_dimension_ids)
 
 
-def createNumpyArrayWithLayout(shape, layout, alignment=False, byteOffset=0, **kwargs):
-    """
-    Creates a numpy array with
-    :param shape: shape of the resulting array
-    :param layout: layout as tuple, where the coordinates are ordered from slow to fast
-    :param alignment: number of bytes to align the beginning and the innermost coordinate to, or False for no alignment
-    :param byteOffset: only used when alignment is specified, align not beginning but address at this offset
-                       mostly used to align first inner cell, not ghost cells
-    >>> res = createNumpyArrayWithLayout(shape=(2, 3, 4, 5), layout=(3, 2, 0, 1))
-    >>> res.shape
-    (2, 3, 4, 5)
-    >>> getLayoutOfArray(res)
-    (3, 2, 0, 1)
+def create_numpy_array_with_layout(shape, layout, alignment=False, byte_offset=0, **kwargs):
+    """Creates numpy array with given memory layout.
+
+    Args:
+        shape: shape of the resulting array
+        layout: layout as tuple, where the coordinates are ordered from slow to fast
+        alignment: number of bytes to align the beginning and the innermost coordinate to, or False for no alignment
+        byte_offset: only used when alignment is specified, align not beginning but address at this offset
+                     mostly used to align first inner cell, not ghost cells
+
+    Example:
+        >>> res = create_numpy_array_with_layout(shape=(2, 3, 4, 5), layout=(3, 2, 0, 1))
+        >>> res.shape
+        (2, 3, 4, 5)
+        >>> get_layout_of_array(res)
+        (3, 2, 0, 1)
     """
     assert set(layout) == set(range(len(shape))), "Wrong layout descriptor"
-    currentLayout = list(range(len(shape)))
+    cur_layout = list(range(len(shape)))
     swaps = []
     for i in range(len(layout)):
-        if currentLayout[i] != layout[i]:
-            indexToSwapWith = currentLayout.index(layout[i])
-            swaps.append((i, indexToSwapWith))
-            currentLayout[i], currentLayout[indexToSwapWith] = currentLayout[indexToSwapWith], currentLayout[i]
-    assert tuple(currentLayout) == tuple(layout)
+        if cur_layout[i] != layout[i]:
+            index_to_swap_with = cur_layout.index(layout[i])
+            swaps.append((i, index_to_swap_with))
+            cur_layout[i], cur_layout[index_to_swap_with] = cur_layout[index_to_swap_with], cur_layout[i]
+    assert tuple(cur_layout) == tuple(layout)
 
     shape = list(shape)
     for a, b in swaps:
@@ -507,57 +501,61 @@ def createNumpyArrayWithLayout(shape, layout, alignment=False, byteOffset=0, **k
     else:
         if alignment is True:
             alignment = 8 * 4
-        res = aligned_empty(shape, alignment, byteOffset=byteOffset, **kwargs)
+        res = aligned_empty(shape, alignment, byte_offset=byte_offset, **kwargs)
 
     for a, b in reversed(swaps):
         res = res.swapaxes(a, b)
     return res
 
 
-def spatialLayoutStringToTuple(layoutStr, dim):
-    if layoutStr in ('fzyx', 'zyxf'):
+def spatial_layout_string_to_tuple(layout_str: str, dim: int) -> Tuple[int, ...]:
+    if layout_str in ('fzyx', 'zyxf'):
         assert dim <= 3
         return tuple(reversed(range(dim)))
 
-    if layoutStr in ('fzyx', 'f', 'reverseNumpy', 'SoA'):
+    if layout_str in ('fzyx', 'f', 'reverseNumpy', 'SoA'):
         return tuple(reversed(range(dim)))
-    elif layoutStr in ('c', 'numpy', 'AoS'):
+    elif layout_str in ('c', 'numpy', 'AoS'):
         return tuple(range(dim))
-    raise ValueError("Unknown layout descriptor " + layoutStr)
+    raise ValueError("Unknown layout descriptor " + layout_str)
 
 
-def layoutStringToTuple(layoutStr, dim):
-    layoutStr = layoutStr.lower()
-    if layoutStr == 'fzyx' or layoutStr == 'soa':
+def layout_string_to_tuple(layout_str, dim):
+    layout_str = layout_str.lower()
+    if layout_str == 'fzyx' or layout_str == 'soa':
         assert dim <= 4
         return tuple(reversed(range(dim)))
-    elif layoutStr == 'zyxf' or layoutStr == 'aos':
+    elif layout_str == 'zyxf' or layout_str == 'aos':
         assert dim <= 4
         return tuple(reversed(range(dim - 1))) + (dim-1,)
-    elif layoutStr == 'f' or layoutStr == 'reversenumpy':
+    elif layout_str == 'f' or layout_str == 'reversenumpy':
         return tuple(reversed(range(dim)))
-    elif layoutStr == 'c' or layoutStr == 'numpy':
+    elif layout_str == 'c' or layout_str == 'numpy':
         return tuple(range(dim))
-    raise ValueError("Unknown layout descriptor " + layoutStr)
+    raise ValueError("Unknown layout descriptor " + layout_str)
 
 
-def normalizeLayout(layout):
+def normalize_layout(layout):
     """Takes a layout tuple and subtracts the minimum from all entries"""
-    minEntry = min(layout)
-    return tuple(i - minEntry for i in layout)
+    min_entry = min(layout)
+    return tuple(i - min_entry for i in layout)
 
 
-def computeStrides(shape, layout):
+def compute_strides(shape, layout):
     """
     Computes strides assuming no padding exists
-    :param shape: shape (size) of array
-    :param layout: layout specification as tuple
-    :return: strides in elements, not in bytes
+
+    Args:
+        shape: shape (size) of array
+        layout: layout specification as tuple
+
+    Returns:
+        strides in elements, not in bytes
     """
-    N = len(shape)
-    assert len(layout) == N
-    assert len(set(layout)) == N
-    strides = [0] * N
+    dim = len(shape)
+    assert len(layout) == dim
+    assert len(set(layout)) == dim
+    strides = [0] * dim
     product = 1
     for j in reversed(layout):
         strides[j] = product
@@ -565,72 +563,78 @@ def computeStrides(shape, layout):
     return tuple(strides)
 
 
-def offsetComponentToDirectionString(coordinateId, value):
-    """
-    Translates numerical offset to string notation.
+def offset_component_to_direction_string(coordinate_id: int, value: int) -> str:
+    """Translates numerical offset to string notation.
+
     x offsets are labeled with east 'E' and 'W',
     y offsets with north 'N' and 'S' and
     z offsets with top 'T' and bottom 'B'
     If the absolute value of the offset is bigger than 1, this number is prefixed.
-    :param coordinateId: integer 0, 1 or 2 standing for x,y and z
-    :param value: integer offset
 
-    Example:
-    >>> offsetComponentToDirectionString(0, 1)
-    'E'
-    >>> offsetComponentToDirectionString(1, 2)
-    '2N'
+    Args:
+        coordinate_id: integer 0, 1 or 2 standing for x,y and z
+        value: integer offset
+
+    Examples:
+        >>> offset_component_to_direction_string(0, 1)
+        'E'
+        >>> offset_component_to_direction_string(1, 2)
+        '2N'
     """
-    nameComponents = (('W', 'E'),  # west, east
-                      ('S', 'N'),  # south, north
-                      ('B', 'T'),  # bottom, top
-                      )
+    name_components = (('W', 'E'),  # west, east
+                       ('S', 'N'),  # south, north
+                       ('B', 'T'),  # bottom, top
+                       )
     if value == 0:
         result = ""
     elif value < 0:
-        result = nameComponents[coordinateId][0]
+        result = name_components[coordinate_id][0]
     else:
-        result = nameComponents[coordinateId][1]
+        result = name_components[coordinate_id][1]
     if abs(value) > 1:
         result = "%d%s" % (abs(value), result)
     return result
 
 
-def offsetToDirectionString(offsetTuple):
+def offset_to_direction_string(offsets: Sequence[int]) -> str:
     """
     Translates numerical offset to string notation.
-    For details see :func:`offsetComponentToDirectionString`
-    :param offsetTuple: 3-tuple with x,y,z offset
-
-    Example:
-    >>> offsetToDirectionString([1, -1, 0])
-    'SE'
-    >>> offsetToDirectionString(([-3, 0, -2]))
-    '2B3W'
+    For details see :func:`offset_component_to_direction_string`
+    Args:
+        offsets: 3-tuple with x,y,z offset
+
+    Examples:
+        >>> offset_to_direction_string([1, -1, 0])
+        'SE'
+        >>> offset_to_direction_string(([-3, 0, -2]))
+        '2B3W'
     """
     names = ["", "", ""]
-    for i in range(len(offsetTuple)):
-        names[i] = offsetComponentToDirectionString(i, offsetTuple[i])
+    for i in range(len(offsets)):
+        names[i] = offset_component_to_direction_string(i, offsets[i])
     name = "".join(reversed(names))
     if name == "":
         name = "C"
     return name
 
 
-def directionStringToOffset(directionStr, dim=3):
+def direction_string_to_offset(direction: str, dim: int = 3):
     """
     Reverse mapping of :func:`offsetToDirectionString`
-    :param directionStr: string representation of offset
-    :param dim: dimension of offset, i.e the length of the returned list
-
-    >>> directionStringToOffset('NW', dim=3)
-    array([-1,  1,  0])
-    >>> directionStringToOffset('NW', dim=2)
-    array([-1,  1])
-    >>> directionStringToOffset(offsetToDirectionString([3,-2,1]))
-    array([ 3, -2,  1])
+
+    Args:
+        direction: string representation of offset
+        dim: dimension of offset, i.e the length of the returned list
+
+    Examples:
+        >>> direction_string_to_offset('NW', dim=3)
+        array([-1,  1,  0])
+        >>> direction_string_to_offset('NW', dim=2)
+        array([-1,  1])
+        >>> direction_string_to_offset(offset_to_direction_string((3,-2,1)))
+        array([ 3, -2,  1])
     """
-    offsetMap = {
+    offset_dict = {
         'C': np.array([0, 0, 0]),
 
         'W': np.array([-1, 0, 0]),
@@ -644,23 +648,15 @@ def directionStringToOffset(directionStr, dim=3):
     }
     offset = np.array([0, 0, 0])
 
-    while len(directionStr) > 0:
+    while len(direction) > 0:
         factor = 1
-        firstNonDigit = 0
-        while directionStr[firstNonDigit].isdigit():
-            firstNonDigit += 1
-        if firstNonDigit > 0:
-            factor = int(directionStr[:firstNonDigit])
-            directionStr = directionStr[firstNonDigit:]
-        curOffset = offsetMap[directionStr[0]]
-        offset += factor * curOffset
-        directionStr = directionStr[1:]
+        first_non_digit = 0
+        while direction[first_non_digit].isdigit():
+            first_non_digit += 1
+        if first_non_digit > 0:
+            factor = int(direction[:first_non_digit])
+            direction = direction[first_non_digit:]
+        cur_offset = offset_dict[direction[0]]
+        offset += factor * cur_offset
+        direction = direction[1:]
     return offset[:dim]
-
-
-if __name__ == '__main__':
-    f = Field.createGeneric('f', spatialDimensions=2, indexShape=(2,4))
-    f(2, 0)
-    fa = f[0, 1](4) ** 2
-    print(fa)
-    print(sp.latex(fa))
\ No newline at end of file
diff --git a/finitedifferences.py b/finitedifferences.py
index 189e414a85d5cb7d316b246c8301f67949fc880d..db7e4c8c8cd5f9a5f72ace0029c68489934f2d81 100644
--- a/finitedifferences.py
+++ b/finitedifferences.py
@@ -23,112 +23,118 @@ def grad(var, dim=3):
         return [sp.Symbol("%s^Delta^%d" % (var.name, i)) for i in range(dim)]
 
 
-def discretizeCenter(term, symbolsToFieldDict, dx, dim=3):
+def discretize_center(term, symbols_to_field_dict, dx, dim=3):
     """
     Expects term that contains given symbols and gradient components of these symbols and replaces them
     by field accesses. Gradients are replaced by centralized approximations:
     ``(upper neighbor - lower neighbor ) / ( 2*dx)``
     :param term: term where symbols and gradient(symbol) should be replaced
-    :param symbolsToFieldDict: mapping of symbols to Field
+    :param symbols_to_field_dict: mapping of symbols to Field
     :param dx: width and height of one cell
     :param dim: dimension
 
     Example:
       >>> x = sp.Symbol("x")
-      >>> gradx = grad(x, dim=3)
-      >>> term = x * gradx[0]
+      >>> grad_x = grad(x, dim=3)
+      >>> term = x * grad_x[0]
       >>> term
       x*x^Delta^0
-      >>> f = Field.createGeneric('f', spatialDimensions=3)
-      >>> discretizeCenter(term, { x: f }, dx=1, dim=3)
+      >>> f = Field.create_generic('f', spatial_dimensions=3)
+      >>> discretize_center(term, { x: f }, dx=1, dim=3)
       f_C*(f_E/2 - f_W/2)
     """
     substitutions = {}
-    for symbols, field in symbolsToFieldDict.items():
+    for symbols, field in symbols_to_field_dict.items():
         if not hasattr(symbols, "__getitem__"):
             symbols = [symbols]
         g = grad(symbols, dim)
         substitutions.update({symbol: field(i) for i, symbol in enumerate(symbols)})
         for d in range(dim):
-            up, down = __upDownOffsets(d, dim)
+            up, down = __up_down_offsets(d, dim)
             substitutions.update({g[d][i]: (field[up](i) - field[down](i)) / dx / 2 for i in range(len(symbols))})
     return term.subs(substitutions)
 
 
-def discretizeStaggered(term, symbolsToFieldDict, coordinate, coordinateOffset, dx, dim=3):
+def discretize_staggered(term, symbols_to_field_dict, coordinate, coordinate_offset, dx, dim=3):
     """
     Expects term that contains given symbols and gradient components of these symbols and replaces them
     by field accesses. Gradients in coordinate direction  are replaced by staggered version at cell boundary.
     Symbols themselves and gradients in other directions are replaced by interpolated version at cell face.
 
-    :param term: input term where symbols and gradients are replaced
-    :param symbolsToFieldDict: mapping of symbols to Field
-    :param coordinate: id for coordinate (0 for x, 1 for y, ... ) defining cell boundary.
-                       Only gradients in this direction are replaced e.g. if symbol^Delta^coordinate
-    :param coordinateOffset: either +1 or -1 for upper or lower face in coordinate direction
-    :param dx: width and height of one cell
-    :param dim: dimension
-
-    Example: Discretizing at right/east face of cell i.e. coordinate=0, offset=1)
+    Args:
+        term: input term where symbols and gradients are replaced
+        symbols_to_field_dict: mapping of symbols to Field
+        coordinate: id for coordinate (0 for x, 1 for y, ... ) defining cell boundary.
+                    Only gradients in this direction are replaced e.g. if symbol^Delta^coordinate
+        coordinate_offset: either +1 or -1 for upper or lower face in coordinate direction
+        dx: width and height of one cell
+        dim: dimension
+
+    Examples:
+      Discretizing at right/east face of cell i.e. coordinate=0, offset=1)
       >>> x, dx = sp.symbols("x dx")
-      >>> gradx = grad(x, dim=3)
-      >>> term = x * gradx[0]
+      >>> grad_x = grad(x, dim=3)
+      >>> term = x * grad_x[0]
       >>> term
       x*x^Delta^0
-      >>> f = Field.createGeneric('f', spatialDimensions=3)
-      >>> discretizeStaggered(term, symbolsToFieldDict={ x: f}, dx=dx, coordinate=0, coordinateOffset=1, dim=3)
+      >>> f = Field.create_generic('f', spatial_dimensions=3)
+      >>> discretize_staggered(term, symbols_to_field_dict={ x: f}, dx=dx, coordinate=0, coordinate_offset=1, dim=3)
       (-f_C + f_E)*(f_C/2 + f_E/2)/dx
     """
-    assert coordinateOffset == 1 or coordinateOffset == -1
+    assert coordinate_offset == 1 or coordinate_offset == -1
     assert 0 <= coordinate < dim
 
     substitutions = {}
-    for symbols, field in symbolsToFieldDict.items():
+    for symbols, field in symbols_to_field_dict.items():
         if not hasattr(symbols, "__getitem__"):
             symbols = [symbols]
 
         offset = [0] * dim
-        offset[coordinate] = coordinateOffset
+        offset[coordinate] = coordinate_offset
         offset = np.array(offset, dtype=np.int)
 
         gradient = grad(symbols)[coordinate]
         substitutions.update({s: (field[offset](i) + field(i)) / 2 for i, s in enumerate(symbols)})
-        substitutions.update({g: (field[offset](i) - field(i)) / dx * coordinateOffset for i, g in enumerate(gradient)})
+        substitutions.update({g: (field[offset](i) - field(i)) / dx * coordinate_offset
+                              for i, g in enumerate(gradient)})
         for d in range(dim):
             if d == coordinate:
                 continue
-            up, down = __upDownOffsets(d, dim)
+            up, down = __up_down_offsets(d, dim)
             for i, s in enumerate(symbols):
-                centerGrad = (field[up](i) - field[down](i)) / (2 * dx)
-                neighborGrad = (field[up+offset](i) - field[down+offset](i)) / (2 * dx)
-                substitutions[grad(s)[d]] = (centerGrad + neighborGrad) / 2
+                center_grad = (field[up](i) - field[down](i)) / (2 * dx)
+                neighbor_grad = (field[up+offset](i) - field[down+offset](i)) / (2 * dx)
+                substitutions[grad(s)[d]] = (center_grad + neighbor_grad) / 2
 
     return fast_subs(term, substitutions)
 
 
-def discretizeDivergence(vectorTerm, symbolsToFieldDict, dx):
+def discretize_divergence(vector_term, symbols_to_field_dict, dx):
     """
     Computes discrete divergence of symbolic vector
-    :param vectorTerm: sequence of terms, interpreted as vector
-    :param symbolsToFieldDict: mapping of symbols to Field
-    :param dx: length of a cell
 
-    Example: Laplace stencil
+    Args:
+        vector_term: sequence of terms, interpreted as vector
+        symbols_to_field_dict: mapping of symbols to Field
+        dx: length of a cell
+
+    Examples:
+        Laplace stencil
         >>> x, dx = sp.symbols("x dx")
         >>> gradX = grad(x, dim=3)
-        >>> f = Field.createGeneric('f', spatialDimensions=3)
-        >>> sp.simplify(discretizeDivergence(gradX, {x : f}, dx))
+        >>> f = Field.create_generic('f', spatial_dimensions=3)
+        >>> sp.simplify(discretize_divergence(gradX, {x : f}, dx))
         (f_B - 6*f_C + f_E + f_N + f_S + f_T + f_W)/dx**2
     """
-    dim = len(vectorTerm)
+    dim = len(vector_term)
     result = 0
     for d in range(dim):
         for offset in [-1, 1]:
-            result += offset * discretizeStaggered(vectorTerm[d], symbolsToFieldDict, d, offset, dx, dim)
+            result += offset * discretize_staggered(vector_term[d], symbols_to_field_dict, d, offset, dx, dim)
     return result / dx
 
 
-def __upDownOffsets(d, dim):
+def __up_down_offsets(d, dim):
     coord = [0] * dim
     coord[d] = 1
     up = np.array(coord, dtype=np.int)
@@ -153,49 +159,49 @@ class Advection(sp.Function):
             return self.args[1]
 
     @property
-    def scalarIndex(self):
+    def scalar_index(self):
         return None if len(self.args) <= 2 else int(self.args[2])
 
     @property
     def dim(self):
-        return self.scalar.spatialDimensions
+        return self.scalar.spatial_dimensions
 
     def _latex(self, printer):
-        nameSuffix = "_%s" % self.scalarIndex if self.scalarIndex is not None else ""
+        name_suffix = "_%s" % self.scalar_index if self.scalar_index is not None else ""
         if isinstance(self.vector, Field):
             return r"\nabla \cdot(%s %s)" % (printer.doprint(sp.Symbol(self.vector.name)),
-                                             printer.doprint(sp.Symbol(self.scalar.name+nameSuffix)))
+                                             printer.doprint(sp.Symbol(self.scalar.name+name_suffix)))
         else:
-            args = [r"\partial_%d(%s %s)" % (i, printer.doprint(sp.Symbol(self.scalar.name+nameSuffix)),
+            args = [r"\partial_%d(%s %s)" % (i, printer.doprint(sp.Symbol(self.scalar.name+name_suffix)),
                                              printer.doprint(self.vector[i]))
                     for i in range(self.dim)]
             return " + ".join(args)
 
     # --- Interface for discretization strategy
 
-    def velocityFieldAtOffset(self, offsetDim, offsetValue, index):
+    def velocity_field_at_offset(self, offset_dim, offset_value, index):
         v = self.vector
         if isinstance(v, Field):
-            assert v.indexDimensions == 1
-            return v.neighbor(offsetDim, offsetValue)(index)
+            assert v.index_dimensions == 1
+            return v.neighbor(offset_dim, offset_value)(index)
         else:
             return v[index]
 
-    def advectedScalarAtOffset(self, offsetDim, offsetValue):
-        idx = 0 if self.scalarIndex is None else int(self.scalarIndex)
-        return self.scalar.neighbor(offsetDim, offsetValue)(idx)
+    def advected_scalar_at_offset(self, offset_dim, offset_value):
+        idx = 0 if self.scalar_index is None else int(self.scalar_index)
+        return self.scalar.neighbor(offset_dim, offset_value)(idx)
 
 
-def advection(advectedScalar, velocityField, idx=None):
-    """Advection term: divergence( velocityField * advectedScalar )"""
-    if isinstance(advectedScalar, Field):
-        firstArg = advectedScalar.center
-    elif isinstance(advectedScalar, Field.Access):
-        firstArg = advectedScalar
+def advection(advected_scalar, velocity_field, idx=None):
+    """Advection term: divergence( velocity_field * advected_scalar )"""
+    if isinstance(advected_scalar, Field):
+        first_arg = advected_scalar.center
+    elif isinstance(advected_scalar, Field.Access):
+        first_arg = advected_scalar
     else:
         raise ValueError("Advected scalar has to be a pystencils Field or Field.Access")
 
-    args = [firstArg, velocityField if not isinstance(velocityField, Field) else velocityField.center]
+    args = [first_arg, velocity_field if not isinstance(velocity_field, Field) else velocity_field.center]
     if idx is not None:
         args.append(idx)
     return Advection(*args)
@@ -208,49 +214,50 @@ class Diffusion(sp.Function):
         return self.args[0].field
 
     @property
-    def diffusionCoeff(self):
+    def diffusion_coeff(self):
         if isinstance(self.args[1], Field.Access):
             return self.args[1].field
         else:
             return self.args[1]
 
     @property
-    def scalarIndex(self):
+    def scalar_index(self):
         return None if len(self.args) <= 2 else int(self.args[2])
 
     @property
     def dim(self):
-        return self.scalar.spatialDimensions
+        return self.scalar.spatial_dimensions
 
     def _latex(self, printer):
-        nameSuffix = "_%s" % self.scalarIndex if self.scalarIndex is not None else ""
-        diffCoeff = sp.Symbol(self.diffusionCoeff.name) if isinstance(self.diffusionCoeff, Field) else self.diffusionCoeff
-        return r"div(%s \nabla %s)" % (printer.doprint(diffCoeff),
-                                       printer.doprint(sp.Symbol(self.scalar.name+nameSuffix)))
+        name_suffix = "_%s" % self.scalar_index if self.scalar_index is not None else ""
+        coeff = self.diffusion_coeff
+        diff_coeff = sp.Symbol(coeff.name) if isinstance(coeff, Field) else coeff
+        return r"div(%s \nabla %s)" % (printer.doprint(diff_coeff),
+                                       printer.doprint(sp.Symbol(self.scalar.name+name_suffix)))
 
     # --- Interface for discretization strategy
 
-    def diffusionScalarAtOffset(self, offsetDim, offsetValue):
-        idx = 0 if self.scalarIndex is None else self.scalarIndex
-        return self.scalar.neighbor(offsetDim, offsetValue)(idx)
+    def diffusion_scalar_at_offset(self, offset_dim, offset_value):
+        idx = 0 if self.scalar_index is None else self.scalar_index
+        return self.scalar.neighbor(offset_dim, offset_value)(idx)
 
-    def diffusionCoefficientAtOffset(self, offsetDim, offsetValue):
-        d = self.diffusionCoeff
+    def diffusion_coefficient_at_offset(self, offset_dim, offset_value):
+        d = self.diffusion_coeff
         if isinstance(d, Field):
-            return d.neighbor(offsetDim, offsetValue)
+            return d.neighbor(offset_dim, offset_value)
         else:
             return d
 
 
-def diffusion(scalar, diffusionCoeff, idx=None):
+def diffusion(scalar, diffusion_coeff, idx=None):
     if isinstance(scalar, Field):
-        firstArg = scalar.center
+        first_arg = scalar.center
     elif isinstance(scalar, Field.Access):
-        firstArg = scalar
+        first_arg = scalar
     else:
         raise ValueError("Diffused scalar has to be a pystencils Field or Field.Access")
 
-    args = [firstArg, diffusionCoeff if not isinstance(diffusionCoeff, Field) else diffusionCoeff.center]
+    args = [first_arg, diffusion_coeff if not isinstance(diffusion_coeff, Field) else diffusion_coeff.center]
     if idx is not None:
         args.append(idx)
     return Diffusion(*args)
@@ -259,18 +266,18 @@ def diffusion(scalar, diffusionCoeff, idx=None):
 class Transient(sp.Function):
     @property
     def scalar(self):
-        if self.scalarIndex is None:
+        if self.scalar_index is None:
             return self.args[0].field.center
         else:
-            return self.args[0].field(self.scalarIndex)
+            return self.args[0].field(self.scalar_index)
 
     @property
-    def scalarIndex(self):
+    def scalar_index(self):
         return None if len(self.args) <= 1 else int(self.args[1])
 
     def _latex(self, printer):
-        nameSuffix = "_%s" % self.scalarIndex if self.scalarIndex is not None else ""
-        return r"\partial_t %s" % (printer.doprint(sp.Symbol(self.scalar.name+nameSuffix)),)
+        name_suffix = "_%s" % self.scalar_index if self.scalar_index is not None else ""
+        return r"\partial_t %s" % (printer.doprint(sp.Symbol(self.scalar.name+name_suffix)),)
 
 
 def transient(scalar, idx=None):
@@ -291,32 +298,32 @@ class Discretization2ndOrder:
         self.dt = dt
 
     @staticmethod
-    def __diffOrder(e):
+    def __diff_order(e):
         if not isinstance(e, Diff):
             return 0
         else:
-            return 1 + Discretization2ndOrder.__diffOrder(e.args[0])
+            return 1 + Discretization2ndOrder.__diff_order(e.args[0])
 
     def _discretize_diffusion(self, expr):
         result = 0
         for c in range(expr.dim):
-            firstDiffs = [offset *
-                          (expr.diffusionScalarAtOffset(c, offset) * expr.diffusionCoefficientAtOffset(c, offset) -
-                           expr.diffusionScalarAtOffset(0, 0) * expr.diffusionCoefficientAtOffset(0, 0))
-                          for offset in [-1, 1]]
-            result += firstDiffs[1] - firstDiffs[0]
+            first_diffs = [offset *
+                           (expr.diffusion_scalar_at_offset(c, offset) * expr.diffusion_coefficient_at_offset(c, offset)
+                            - expr.diffusion_scalar_at_offset(0, 0) * expr.diffusion_coefficient_at_offset(0, 0))
+                           for offset in [-1, 1]]
+            result += first_diffs[1] - first_diffs[0]
         return result / (self.dx**2)
 
     def _discretize_advection(self, expr):
         result = 0
         for c in range(expr.dim):
-            interpolated = [(expr.advectedScalarAtOffset(c, offset) * expr.velocityFieldAtOffset(c, offset, c) +
-                             expr.advectedScalarAtOffset(c, 0) * expr.velocityFieldAtOffset(c, 0, c)) / 2
+            interpolated = [(expr.advected_scalar_at_offset(c, offset) * expr.velocity_field_at_offset(c, offset, c) +
+                             expr.advected_scalar_at_offset(c, 0) * expr.velocity_field_at_offset(c, 0, c)) / 2
                             for offset in [-1, 1]]
             result += interpolated[1] - interpolated[0]
         return result / self.dx
 
-    def _discretizeSpatial(self, e):
+    def _discretize_spatial(self, e):
         if isinstance(e, Diffusion):
             return self._discretize_diffusion(e)
         elif isinstance(e, Advection):
@@ -324,11 +331,11 @@ class Discretization2ndOrder:
         elif isinstance(e, Diff):
             return self._discretize_diff(e)
         else:
-            newArgs = [self._discretizeSpatial(a) for a in e.args]
-            return e.func(*newArgs) if newArgs else e
+            new_args = [self._discretize_spatial(a) for a in e.args]
+            return e.func(*new_args) if new_args else e
 
     def _discretize_diff(self, e):
-        order = self.__diffOrder(e)
+        order = self.__diff_order(e)
         if order == 1:
             fa = e.args[0]
             index = e.target
@@ -340,7 +347,7 @@ class Discretization2ndOrder:
                 result = (-2 * fa + fa.neighbor(indices[0], -1) + fa.neighbor(indices[0], +1))
             elif indices[0] == indices[1]:
                 result = 0
-                for d in range(fa.field.spatialDimensions):
+                for d in range(fa.field.spatial_dimensions):
                     result += (-2 * fa + fa.neighbor(d, -1) + fa.neighbor(d, +1))
             else:
                 assert all(i >= 0 for i in indices)
@@ -359,18 +366,18 @@ class Discretization2ndOrder:
             return expr.copy(main_assignments=[e for e in expr.main_assignments],
                              subexpressions=[e for e in expr.subexpressions])
 
-        transientTerms = expr.atoms(Transient)
-        if len(transientTerms) == 0:
-            return self._discretizeSpatial(expr)
-        elif len(transientTerms) == 1:
-            transientTerm = transientTerms.pop()
-            solveResult = sp.solve(expr, transientTerm)
-            if len(solveResult) != 1:
-                raise ValueError("Could not solve for transient term" + str(solveResult))
-            rhs = solveResult.pop()
+        transient_terms = expr.atoms(Transient)
+        if len(transient_terms) == 0:
+            return self._discretize_spatial(expr)
+        elif len(transient_terms) == 1:
+            transient_term = transient_terms.pop()
+            solve_result = sp.solve(expr, transient_term)
+            if len(solve_result) != 1:
+                raise ValueError("Could not solve for transient term" + str(solve_result))
+            rhs = solve_result.pop()
             # explicit euler
-            return transientTerm.scalar + self.dt * self._discretizeSpatial(rhs)
+            return transient_term.scalar + self.dt * self._discretize_spatial(rhs)
         else:
-            print(transientTerms)
+            print(transient_terms)
             raise NotImplementedError("Cannot discretize expression with more than one transient term")
 
diff --git a/gpucuda/__init__.py b/gpucuda/__init__.py
index 0a8845b9fd573eca51453b61e0d6c5f3196c2757..a2786d2777dcd889ec2a3089d17eb5302e36f64d 100644
--- a/gpucuda/__init__.py
+++ b/gpucuda/__init__.py
@@ -1,2 +1,2 @@
-from pystencils.gpucuda.kernelcreation import createCUDAKernel, createdIndexedCUDAKernel
-from pystencils.gpucuda.cudajit import makePythonFunction
+from pystencils.gpucuda.kernelcreation import create_cuda_kernel, created_indexed_cuda_kernel
+from pystencils.gpucuda.cudajit import make_python_function
diff --git a/gpucuda/cudajit.py b/gpucuda/cudajit.py
index c9eb9a45bf1f81fd0a338a1fc1e98f523710217f..1553ecbfbafc6622a24928f7db590a6a524ee96f 100644
--- a/gpucuda/cudajit.py
+++ b/gpucuda/cudajit.py
@@ -1,142 +1,148 @@
 import numpy as np
-from pystencils.backends.cbackend import print_c
-from pystencils.transformations import symbolNameToVariableName
+from pystencils.backends.cbackend import generate_c
+from pystencils.transformations import symbol_name_to_variable_name
 from pystencils.data_types import StructType, get_base_type
 from pystencils.field import FieldType
 
 
-def makePythonFunction(kernelFunctionNode, argumentDict={}):
+def make_python_function(kernel_function_node, argument_dict=None):
     """
     Creates a kernel function from an abstract syntax tree which
-    was created e.g. by :func:`pystencils.gpucuda.createCUDAKernel`
-    or :func:`pystencils.gpucuda.createdIndexedCUDAKernel`
+    was created e.g. by :func:`pystencils.gpucuda.create_cuda_kernel`
+    or :func:`pystencils.gpucuda.created_indexed_cuda_kernel`
 
-    :param kernelFunctionNode: the abstract syntax tree
-    :param argumentDict: parameters passed here are already fixed. Remaining parameters have to be passed to the
-                        returned kernel functor.
-    :return: kernel functor
+    Args:
+        kernel_function_node: the abstract syntax tree
+        argument_dict: parameters passed here are already fixed. Remaining parameters have to be passed to the
+                       returned kernel functor.
+
+    Returns:
+        compiled kernel as Python function
     """
     import pycuda.autoinit
     from pycuda.compiler import SourceModule
 
+    if argument_dict is None:
+        argument_dict = {}
+
     code = "#include <cstdint>\n"
     code += "#define FUNC_PREFIX __global__\n"
     code += "#define RESTRICT __restrict__\n\n"
-    code += str(print_c(kernelFunctionNode))
+    code += str(generate_c(kernel_function_node))
 
     mod = SourceModule(code, options=["-w", "-std=c++11"])
-    func = mod.get_function(kernelFunctionNode.functionName)
+    func = mod.get_function(kernel_function_node.function_name)
 
-    parameters = kernelFunctionNode.parameters
+    parameters = kernel_function_node.parameters
 
     cache = {}
-    cacheValues = []
+    cache_values = []
 
     def wrapper(**kwargs):
         key = hash(tuple((k, v.ctypes.data, v.strides, v.shape) if isinstance(v, np.ndarray) else (k, id(v))
                          for k, v in kwargs.items()))
         try:
-            args, dictWithBlockAndThreadNumbers = cache[key]
-            func(*args, **dictWithBlockAndThreadNumbers)
+            args, block_and_thread_numbers = cache[key]
+            func(*args, **block_and_thread_numbers)
         except KeyError:
-            fullArguments = argumentDict.copy()
-            fullArguments.update(kwargs)
-            shape = _checkArguments(parameters, fullArguments)
-
-            indexing = kernelFunctionNode.indexing
-            dictWithBlockAndThreadNumbers = indexing.getCallParameters(shape)
-            dictWithBlockAndThreadNumbers['block'] = tuple(int(i) for i in dictWithBlockAndThreadNumbers['block'])
-            dictWithBlockAndThreadNumbers['grid'] = tuple(int(i) for i in dictWithBlockAndThreadNumbers['grid'])
-
-            args = _buildNumpyArgumentList(parameters, fullArguments)
-            cache[key] = (args, dictWithBlockAndThreadNumbers)
-            cacheValues.append(kwargs)  # keep objects alive such that ids remain unique
-            func(*args, **dictWithBlockAndThreadNumbers)
+            full_arguments = argument_dict.copy()
+            full_arguments.update(kwargs)
+            shape = _check_arguments(parameters, full_arguments)
+
+            indexing = kernel_function_node.indexing
+            block_and_thread_numbers = indexing.call_parameters(shape)
+            block_and_thread_numbers['block'] = tuple(int(i) for i in block_and_thread_numbers['block'])
+            block_and_thread_numbers['grid'] = tuple(int(i) for i in block_and_thread_numbers['grid'])
+
+            args = _build_numpy_argument_list(parameters, full_arguments)
+            cache[key] = (args, block_and_thread_numbers)
+            cache_values.append(kwargs)  # keep objects alive such that ids remain unique
+            func(*args, **block_and_thread_numbers)
         #cuda.Context.synchronize() # useful for debugging, to get errors right after kernel was called
-    wrapper.ast = kernelFunctionNode
-    wrapper.parameters = kernelFunctionNode.parameters
+    wrapper.ast = kernel_function_node
+    wrapper.parameters = kernel_function_node.parameters
     return wrapper
 
 
-def _buildNumpyArgumentList(parameters, argumentDict):
+def _build_numpy_argument_list(parameters, argument_dict):
     import pycuda.driver as cuda
 
-    argumentDict = {symbolNameToVariableName(k): v for k, v in argumentDict.items()}
+    argument_dict = {symbol_name_to_variable_name(k): v for k, v in argument_dict.items()}
     result = []
     for arg in parameters:
         if arg.isFieldArgument:
-            field = argumentDict[arg.fieldName]
+            field = argument_dict[arg.field_name]
             if arg.isFieldPtrArgument:
-                actualType = field.dtype
-                expectedType = arg.dtype.base_type.numpy_dtype
-                if expectedType != actualType:
+                actual_type = field.dtype
+                expected_type = arg.dtype.base_type.numpy_dtype
+                if expected_type != actual_type:
                     raise ValueError("Data type mismatch for field '%s'. Expected '%s' got '%s'." %
-                                     (arg.fieldName, expectedType, actualType))
+                                     (arg.field_name, expected_type, actual_type))
                 result.append(field)
             elif arg.isFieldStrideArgument:
                 dtype = get_base_type(arg.dtype).numpy_dtype
-                strideArr = np.array(field.strides, dtype=dtype) // field.dtype.itemsize
-                result.append(cuda.In(strideArr))
+                stride_arr = np.array(field.strides, dtype=dtype) // field.dtype.itemsize
+                result.append(cuda.In(stride_arr))
             elif arg.isFieldShapeArgument:
                 dtype = get_base_type(arg.dtype).numpy_dtype
-                shapeArr = np.array(field.shape, dtype=dtype)
-                result.append(cuda.In(shapeArr))
+                shape_arr = np.array(field.shape, dtype=dtype)
+                result.append(cuda.In(shape_arr))
             else:
                 assert False
         else:
-            param = argumentDict[arg.name]
-            expectedType = arg.dtype.numpy_dtype
-            result.append(expectedType.type(param))
+            param = argument_dict[arg.name]
+            expected_type = arg.dtype.numpy_dtype
+            result.append(expected_type.type(param))
     assert len(result) == len(parameters)
     return result
 
 
-def _checkArguments(parameterSpecification, argumentDict):
+def _check_arguments(parameter_specification, argument_dict):
     """
     Checks if parameters passed to kernel match the description in the AST function node.
     If not it raises a ValueError, on success it returns the array shape that determines the CUDA blocks and threads
     """
-    argumentDict = {symbolNameToVariableName(k): v for k, v in argumentDict.items()}
-    arrayShapes = set()
-    indexArrShapes = set()
-    for arg in parameterSpecification:
+    argument_dict = {symbol_name_to_variable_name(k): v for k, v in argument_dict.items()}
+    array_shapes = set()
+    index_arr_shapes = set()
+    for arg in parameter_specification:
         if arg.isFieldArgument:
             try:
-                fieldArr = argumentDict[arg.fieldName]
+                field_arr = argument_dict[arg.field_name]
             except KeyError:
-                raise KeyError("Missing field parameter for kernel call " + arg.fieldName)
+                raise KeyError("Missing field parameter for kernel call " + arg.field_name)
 
-            symbolicField = arg.field
+            symbolic_field = arg.field
             if arg.isFieldPtrArgument:
-                if symbolicField.hasFixedShape:
-                    symbolicFieldShape = tuple(int(i) for i in symbolicField.shape)
-                    if isinstance(symbolicField.dtype, StructType):
-                        symbolicFieldShape = symbolicFieldShape[:-1]
-                    if symbolicFieldShape != fieldArr.shape:
+                if symbolic_field.has_fixed_shape:
+                    symbolic_field_shape = tuple(int(i) for i in symbolic_field.shape)
+                    if isinstance(symbolic_field.dtype, StructType):
+                        symbolic_field_shape = symbolic_field_shape[:-1]
+                    if symbolic_field_shape != field_arr.shape:
                         raise ValueError("Passed array '%s' has shape %s which does not match expected shape %s" %
-                                         (arg.fieldName, str(fieldArr.shape), str(symbolicField.shape)))
-                if symbolicField.hasFixedShape:
-                    symbolicFieldStrides = tuple(int(i) * fieldArr.dtype.itemsize for i in symbolicField.strides)
-                    if isinstance(symbolicField.dtype, StructType):
-                        symbolicFieldStrides = symbolicFieldStrides[:-1]
-                    if symbolicFieldStrides != fieldArr.strides:
+                                         (arg.field_name, str(field_arr.shape), str(symbolic_field.shape)))
+                if symbolic_field.has_fixed_shape:
+                    symbolic_field_strides = tuple(int(i) * field_arr.dtype.itemsize for i in symbolic_field.strides)
+                    if isinstance(symbolic_field.dtype, StructType):
+                        symbolic_field_strides = symbolic_field_strides[:-1]
+                    if symbolic_field_strides != field_arr.strides:
                         raise ValueError("Passed array '%s' has strides %s which does not match expected strides %s" %
-                                         (arg.fieldName, str(fieldArr.strides), str(symbolicFieldStrides)))
+                                         (arg.field_name, str(field_arr.strides), str(symbolic_field_strides)))
 
-                if FieldType.isIndexed(symbolicField):
-                    indexArrShapes.add(fieldArr.shape[:symbolicField.spatialDimensions])
-                elif not FieldType.isBuffer(symbolicField):
-                    arrayShapes.add(fieldArr.shape[:symbolicField.spatialDimensions])
+                if FieldType.is_indexed(symbolic_field):
+                    index_arr_shapes.add(field_arr.shape[:symbolic_field.spatial_dimensions])
+                elif not FieldType.is_buffer(symbolic_field):
+                    array_shapes.add(field_arr.shape[:symbolic_field.spatial_dimensions])
 
-    if len(arrayShapes) > 1:
-        raise ValueError("All passed arrays have to have the same size " + str(arrayShapes))
-    if len(indexArrShapes) > 1:
-        raise ValueError("All passed index arrays have to have the same size " + str(arrayShapes))
+    if len(array_shapes) > 1:
+        raise ValueError("All passed arrays have to have the same size " + str(array_shapes))
+    if len(index_arr_shapes) > 1:
+        raise ValueError("All passed index arrays have to have the same size " + str(array_shapes))
 
-    if len(indexArrShapes) > 0:
-        return list(indexArrShapes)[0]
+    if len(index_arr_shapes) > 0:
+        return list(index_arr_shapes)[0]
     else:
-        return list(arrayShapes)[0]
+        return list(array_shapes)[0]
 
 
 
diff --git a/gpucuda/indexing.py b/gpucuda/indexing.py
index e699c7657573478d94592f6be656d1b304662eab..cc76344e8dd34591193996cc17113b0157806189 100644
--- a/gpucuda/indexing.py
+++ b/gpucuda/indexing.py
@@ -1,13 +1,14 @@
 import abc
+from typing import Tuple
 
 import sympy as sp
 
 from pystencils.astnodes import Conditional, Block
-from pystencils.slicing import normalizeSlice
+from pystencils.slicing import normalize_slice
 from pystencils.data_types import TypedSymbol, create_type
 from functools import partial
 
-AUTO_BLOCKSIZE_LIMITING = True
+AUTO_BLOCK_SIZE_LIMITING = True
 
 BLOCK_IDX = [TypedSymbol("blockIdx." + coord, create_type("int")) for coord in ('x', 'y', 'z')]
 THREAD_IDX = [TypedSymbol("threadIdx." + coord, create_type("int")) for coord in ('x', 'y', 'z')]
@@ -15,7 +16,7 @@ BLOCK_DIM = [TypedSymbol("blockDim." + coord, create_type("int")) for coord in (
 GRID_DIM = [TypedSymbol("gridDim." + coord, create_type("int")) for coord in ('x', 'y', 'z')]
 
 
-class AbstractIndexing(abc.ABCMeta('ABC', (object,), {})):
+class AbstractIndexing(abc.ABC):
     """
     Abstract base class for all Indexing classes. An Indexing class defines how a multidimensional
     field is mapped to CUDA's block and grid system. It calculates indices based on CUDA's thread and block indices
@@ -23,32 +24,33 @@ class AbstractIndexing(abc.ABCMeta('ABC', (object,), {})):
     a pystencils field, a slice to iterate over, and further optional parameters that must have default values.
     """
 
-    @abc.abstractproperty
+    @property
+    @abc.abstractmethod
     def coordinates(self):
         """Returns a sequence of coordinate expressions for (x,y,z) depending on symbolic CUDA block and thread indices.
-        These symbolic indices can be obtained with the method `indexVariables` """
+        These symbolic indices can be obtained with the method `index_variables` """
 
     @property
-    def indexVariables(self):
+    def index_variables(self):
         """Sympy symbols for CUDA's block and thread indices, and block and grid dimensions. """
         return BLOCK_IDX + THREAD_IDX + BLOCK_DIM + GRID_DIM
 
     @abc.abstractmethod
-    def getCallParameters(self, arrShape):
+    def call_parameters(self, arr_shape):
         """
         Determine grid and block size for kernel call
-        :param arrShape: the numeric (not symbolic) shape of the array
+        :param arr_shape: the numeric (not symbolic) shape of the array
         :return: dict with keys 'blocks' and 'threads' with tuple values for number of (x,y,z) threads and blocks
                  the kernel should be started with
         """
 
     @abc.abstractmethod
-    def guard(self, kernelContent, arrShape):
+    def guard(self, kernel_content, arr_shape):
         """
         In some indexing schemes not all threads of a block execute the kernel content.
         This function can return a Conditional ast node, defining this execution guard.
-        :param kernelContent: the actual kernel contents which can e.g. be put into the Conditional node as true block
-        :param arrShape: the numeric or symbolic shape of the field
+        :param kernel_content: the actual kernel contents which can e.g. be put into the Conditional node as true block
+        :param arr_shape: the numeric or symbolic shape of the field
         :return: ast node, which is put inside the kernel function
         """
 
@@ -57,70 +59,71 @@ class AbstractIndexing(abc.ABCMeta('ABC', (object,), {})):
 
 
 class BlockIndexing(AbstractIndexing):
-    """Generic indexing scheme that maps sub-blocks of an array to CUDA blocks."""
+    """Generic indexing scheme that maps sub-blocks of an array to CUDA blocks.
 
-    def __init__(self, field, iterationSlice=None,
-                 blockSize=(256, 8, 1), permuteBlockSizeDependentOnLayout=True):
-        """
-        Creates
-        :param field: pystencils field (common to all Indexing classes)
-        :param iterationSlice: slice that defines rectangular subarea which is iterated over
-        :param permuteBlockSizeDependentOnLayout: if True the blockSize is permuted such that the fastest coordinate
-                                                  gets the largest amount of threads
-        """
-        if field.spatialDimensions > 3:
+    Args:
+        field: pystencils field (common to all Indexing classes)
+        iteration_slice: slice that defines rectangular subarea which is iterated over
+        permute_block_size_dependent_on_layout: if True the block_size is permuted such that the fastest coordinate
+                                                gets the largest amount of threads
+    """
+
+    def __init__(self, field, iteration_slice=None,
+                 block_size=(256, 8, 1), permute_block_size_dependent_on_layout=True):
+        if field.spatial_dimensions > 3:
             raise NotImplementedError("This indexing scheme supports at most 3 spatial dimensions")
 
-        if permuteBlockSizeDependentOnLayout:
-            blockSize = self.permuteBlockSizeAccordingToLayout(blockSize, field.layout)
+        if permute_block_size_dependent_on_layout:
+            block_size = self.permute_block_size_according_to_layout(block_size, field.layout)
 
-        if AUTO_BLOCKSIZE_LIMITING:
-            blockSize = self.limitBlockSizeToDeviceMaximum(blockSize)
+        if AUTO_BLOCK_SIZE_LIMITING:
+            block_size = self.limit_block_size_to_device_maximum(block_size)
             
-        self._blockSize = blockSize
-        self._iterationSlice = normalizeSlice(iterationSlice, field.spatialShape)
-        self._dim = field.spatialDimensions
-        self._symbolicShape = [e if isinstance(e, sp.Basic) else None for e in field.spatialShape]
+        self._blockSize = block_size
+        self._iterationSlice = normalize_slice(iteration_slice, field.spatial_shape)
+        self._dim = field.spatial_dimensions
+        self._symbolicShape = [e if isinstance(e, sp.Basic) else None for e in field.spatial_shape]
 
     @property
     def coordinates(self):
-        offsets = _getStartFromSlice(self._iterationSlice)
+        offsets = _get_start_from_slice(self._iterationSlice)
         coordinates = [blockIndex * bs + threadIdx + off
                        for blockIndex, bs, threadIdx, off in zip(BLOCK_IDX, self._blockSize, THREAD_IDX, offsets)]
 
         return coordinates[:self._dim]
 
-    def getCallParameters(self, arrShape):
-        substitutionDict = {sym: value for sym, value in zip(self._symbolicShape, arrShape) if sym is not None}
+    def call_parameters(self, arr_shape):
+        substitution_dict = {sym: value for sym, value in zip(self._symbolicShape, arr_shape) if sym is not None}
 
-        widths = [end - start for start, end in zip(_getStartFromSlice(self._iterationSlice),
-                                                    _getEndFromSlice(self._iterationSlice, arrShape))]
-        widths = sp.Matrix(widths).subs(substitutionDict)
+        widths = [end - start for start, end in zip(_get_start_from_slice(self._iterationSlice),
+                                                    _get_end_from_slice(self._iterationSlice, arr_shape))]
+        widths = sp.Matrix(widths).subs(substitution_dict)
 
-        grid = tuple(sp.ceiling(length / blockSize) for length, blockSize in zip(widths, self._blockSize))
-        extendBs = (1,) * (3 - len(self._blockSize))
-        extendGr = (1,) * (3 - len(grid))
+        grid: Tuple[int, ...] = tuple(sp.ceiling(length / blockSize)
+                                      for length, blockSize in zip(widths, self._blockSize))
+        extend_bs = (1,) * (3 - len(self._blockSize))
+        extend_gr = (1,) * (3 - len(grid))
 
-        return {'block': self._blockSize + extendBs,
-                'grid': grid + extendGr}
+        return {'block': self._blockSize + extend_bs,
+                'grid': grid + extend_gr}
 
-    def guard(self, kernelContent, arrShape):
-        arrShape = arrShape[:self._dim]
+    def guard(self, kernel_content, arr_shape):
+        arr_shape = arr_shape[:self._dim]
         conditions = [c < end
-                      for c, end in zip(self.coordinates, _getEndFromSlice(self._iterationSlice, arrShape))]
+                      for c, end in zip(self.coordinates, _get_end_from_slice(self._iterationSlice, arr_shape))]
         condition = conditions[0]
         for c in conditions[1:]:
             condition = sp.And(condition, c)
-        return Block([Conditional(condition, kernelContent)])
+        return Block([Conditional(condition, kernel_content)])
 
     @staticmethod
-    def limitBlockSizeToDeviceMaximum(blockSize):
+    def limit_block_size_to_device_maximum(block_size):
         """
-        Changes blocksize according to match device limits according to the following rules:
+        Changes block size according to match device limits according to the following rules:
         1) if the total amount of threads is too big for the current device, the biggest coordinate is divided by 2.
         2) next, if one component is still too big, the component which is too big is divided by 2 and the smallest
            component is multiplied by 2, such that the total amount of threads stays the same
-        Returns the altered blockSize
+        Returns the altered block_size
         """
         # Get device limits
         import pycuda.driver as cuda
@@ -129,10 +132,10 @@ class BlockIndexing(AbstractIndexing):
         da = cuda.device_attribute
         device = cuda.Context.get_device()
 
-        blockSize = list(blockSize)
-        maxThreads = device.get_attribute(da.MAX_THREADS_PER_BLOCK)
-        maxBlockSize = [device.get_attribute(a)
-                        for a in (da.MAX_BLOCK_DIM_X, da.MAX_BLOCK_DIM_Y, da.MAX_BLOCK_DIM_Z)]
+        block_size = list(block_size)
+        max_threads = device.get_attribute(da.MAX_THREADS_PER_BLOCK)
+        max_block_size = [device.get_attribute(a)
+                          for a in (da.MAX_BLOCK_DIM_X, da.MAX_BLOCK_DIM_Y, da.MAX_BLOCK_DIM_Z)]
 
         def prod(seq):
             result = 1
@@ -140,42 +143,42 @@ class BlockIndexing(AbstractIndexing):
                 result *= e
             return result
 
-        def getIndexOfTooBigElement(blockSize):
-            for i, bs in enumerate(blockSize):
-                if bs > maxBlockSize[i]:
+        def get_index_of_too_big_element():
+            for i, bs in enumerate(block_size):
+                if bs > max_block_size[i]:
                     return i
             return None
 
-        def getIndexOfTooSmallElement(blockSize):
-            for i, bs in enumerate(blockSize):
-                if bs // 2 <= maxBlockSize[i]:
+        def get_index_of_too_small_element():
+            for i, bs in enumerate(block_size):
+                if bs // 2 <= max_block_size[i]:
                     return i
             return None
 
         # Reduce the total number of threads if necessary
-        while prod(blockSize) > maxThreads:
-            itemToReduce = blockSize.index(max(blockSize))
-            for i, bs in enumerate(blockSize):
-                if bs > maxBlockSize[i]:
-                    itemToReduce = i
-            blockSize[itemToReduce] //= 2
+        while prod(block_size) > max_threads:
+            item_to_reduce = block_size.index(max(block_size))
+            for j, block_size_entry in enumerate(block_size):
+                if block_size_entry > max_block_size[j]:
+                    item_to_reduce = j
+            block_size[item_to_reduce] //= 2
 
         # Cap individual elements
-        tooBigElementIndex = getIndexOfTooBigElement(blockSize)
-        while tooBigElementIndex is not None:
-            tooSmallElementIndex = getIndexOfTooSmallElement(blockSize)
-            blockSize[tooSmallElementIndex] *= 2
-            blockSize[tooBigElementIndex] //= 2
-            tooBigElementIndex = getIndexOfTooBigElement(blockSize)
+        too_big_element_index = get_index_of_too_big_element()
+        while too_big_element_index is not None:
+            too_small_element_index = get_index_of_too_small_element()
+            block_size[too_small_element_index] *= 2
+            block_size[too_big_element_index] //= 2
+            too_big_element_index = get_index_of_too_big_element()
 
-        return tuple(blockSize)
+        return tuple(block_size)
 
     @staticmethod
-    def limitBlockSizeByRegisterRestriction(blockSize, requiredRegistersPerThread, device=None):
-        """Shrinks the blockSize if there are too many registers used per multiprocessor.
-        This is not done automatically, since the requiredRegistersPerThread are not known before compilation.
+    def limit_block_size_by_register_restriction(block_size, required_registers_per_thread, device=None):
+        """Shrinks the block_size if there are too many registers used per multiprocessor.
+        This is not done automatically, since the required_registers_per_thread are not known before compilation.
         They can be obtained by ``func.num_regs`` from a pycuda function.
-        :returns smaller blockSize if too many registers are used.
+        :returns smaller block_size if too many registers are used.
         """
         import pycuda.driver as cuda
         import pycuda.autoinit
@@ -183,32 +186,32 @@ class BlockIndexing(AbstractIndexing):
         da = cuda.device_attribute
         if device is None:
             device = cuda.Context.get_device()
-        availableRegistersPerMP = device.get_attribute(da.MAX_REGISTERS_PER_MULTIPROCESSOR)
+        available_registers_per_mp = device.get_attribute(da.MAX_REGISTERS_PER_MULTIPROCESSOR)
 
-        block = blockSize
+        block = block_size
 
         while True:
-            numThreads = 1
+            num_threads = 1
             for t in block:
-                numThreads *= t
-            requiredRegistersPerMT = numThreads * requiredRegistersPerThread
-            if requiredRegistersPerMT <= availableRegistersPerMP:
+                num_threads *= t
+            required_registers_per_mt = num_threads * required_registers_per_thread
+            if required_registers_per_mt <= available_registers_per_mp:
                 return block
             else:
-                largestGridEntryIdx = max(range(len(block)), key=lambda e: block[e])
-                assert block[largestGridEntryIdx] >= 2
-                block[largestGridEntryIdx] //= 2
+                largest_grid_entry_idx = max(range(len(block)), key=lambda e: block[e])
+                assert block[largest_grid_entry_idx] >= 2
+                block[largest_grid_entry_idx] //= 2
 
     @staticmethod
-    def permuteBlockSizeAccordingToLayout(blockSize, layout):
-        """Returns modified blockSize such that the fastest coordinate gets the biggest block dimension"""
-        sortedBlockSize = list(sorted(blockSize, reverse=True))
-        while len(sortedBlockSize) > len(layout):
-            sortedBlockSize[0] *= sortedBlockSize[-1]
-            sortedBlockSize = sortedBlockSize[:-1]
-
-        result = list(blockSize)
-        for l, bs in zip(reversed(layout), sortedBlockSize):
+    def permute_block_size_according_to_layout(block_size, layout):
+        """Returns modified block_size such that the fastest coordinate gets the biggest block dimension"""
+        sorted_block_size = list(sorted(block_size, reverse=True))
+        while len(sorted_block_size) > len(layout):
+            sorted_block_size[0] *= sorted_block_size[-1]
+            sorted_block_size = sorted_block_size[:-1]
+
+        result = list(block_size)
+        for l, bs in zip(reversed(layout), sorted_block_size):
             result[l] = bs
         return tuple(result[:len(layout)])
 
@@ -221,50 +224,50 @@ class LineIndexing(AbstractIndexing):
     maximum amount of threads allowed in a CUDA block (which depends on device).
     """
 
-    def __init__(self, field, iterationSlice=None):
-        availableIndices = [THREAD_IDX[0]] + BLOCK_IDX
-        if field.spatialDimensions > 4:
+    def __init__(self, field, iteration_slice=None):
+        available_indices = [THREAD_IDX[0]] + BLOCK_IDX
+        if field.spatial_dimensions > 4:
             raise NotImplementedError("This indexing scheme supports at most 4 spatial dimensions")
 
-        coordinates = availableIndices[:field.spatialDimensions]
+        coordinates = available_indices[:field.spatial_dimensions]
 
-        fastestCoordinate = field.layout[-1]
-        coordinates[0], coordinates[fastestCoordinate] = coordinates[fastestCoordinate], coordinates[0]
+        fastest_coordinate = field.layout[-1]
+        coordinates[0], coordinates[fastest_coordinate] = coordinates[fastest_coordinate], coordinates[0]
 
         self._coordinates = coordinates
-        self._iterationSlice = normalizeSlice(iterationSlice, field.spatialShape)
-        self._symbolicShape = [e if isinstance(e, sp.Basic) else None for e in field.spatialShape]
+        self._iterationSlice = normalize_slice(iteration_slice, field.spatial_shape)
+        self._symbolicShape = [e if isinstance(e, sp.Basic) else None for e in field.spatial_shape]
 
     @property
     def coordinates(self):
-        return [i + offset for i, offset in zip(self._coordinates, _getStartFromSlice(self._iterationSlice))]
+        return [i + offset for i, offset in zip(self._coordinates, _get_start_from_slice(self._iterationSlice))]
 
-    def getCallParameters(self, arrShape):
-        substitutionDict = {sym: value for sym, value in zip(self._symbolicShape, arrShape) if sym is not None}
+    def call_parameters(self, arr_shape):
+        substitution_dict = {sym: value for sym, value in zip(self._symbolicShape, arr_shape) if sym is not None}
 
-        widths = [end - start for start, end in zip(_getStartFromSlice(self._iterationSlice),
-                                                    _getEndFromSlice(self._iterationSlice, arrShape))]
-        widths = sp.Matrix(widths).subs(substitutionDict)
+        widths = [end - start for start, end in zip(_get_start_from_slice(self._iterationSlice),
+                                                    _get_end_from_slice(self._iterationSlice, arr_shape))]
+        widths = sp.Matrix(widths).subs(substitution_dict)
 
-        def getShapeOfCudaIdx(cudaIdx):
-            if cudaIdx not in self._coordinates:
+        def get_shape_of_cuda_idx(cuda_idx):
+            if cuda_idx not in self._coordinates:
                 return 1
             else:
-                idx = self._coordinates.index(cudaIdx)
+                idx = self._coordinates.index(cuda_idx)
                 return widths[idx]
 
-        return {'block': tuple([getShapeOfCudaIdx(idx) for idx in THREAD_IDX]),
-                'grid': tuple([getShapeOfCudaIdx(idx) for idx in BLOCK_IDX])}
+        return {'block': tuple([get_shape_of_cuda_idx(idx) for idx in THREAD_IDX]),
+                'grid': tuple([get_shape_of_cuda_idx(idx) for idx in BLOCK_IDX])}
 
-    def guard(self, kernelContent, arrShape):
-        return kernelContent
+    def guard(self, kernel_content, arr_shape):
+        return kernel_content
 
 
 # -------------------------------------- Helper functions --------------------------------------------------------------
 
-def _getStartFromSlice(iterationSlice):
+def _get_start_from_slice(iteration_slice):
     res = []
-    for sliceComponent in iterationSlice:
+    for sliceComponent in iteration_slice:
         if type(sliceComponent) is slice:
             res.append(sliceComponent.start if sliceComponent.start is not None else 0)
         else:
@@ -273,10 +276,10 @@ def _getStartFromSlice(iterationSlice):
     return res
 
 
-def _getEndFromSlice(iterationSlice, arrShape):
-    iterSlice = normalizeSlice(iterationSlice, arrShape)
+def _get_end_from_slice(iteration_slice, arr_shape):
+    iter_slice = normalize_slice(iteration_slice, arr_shape)
     res = []
-    for sliceComponent in iterSlice:
+    for sliceComponent in iter_slice:
         if type(sliceComponent) is slice:
             res.append(sliceComponent.stop)
         else:
@@ -285,16 +288,16 @@ def _getEndFromSlice(iterationSlice, arrShape):
     return res
 
 
-def indexingCreatorFromParams(gpuIndexing, gpuIndexingParams):
-    if isinstance(gpuIndexing, str):
-        if gpuIndexing == 'block':
-            indexingCreator = BlockIndexing
-        elif gpuIndexing == 'line':
-            indexingCreator = LineIndexing
+def indexing_creator_from_params(gpu_indexing, gpu_indexing_params):
+    if isinstance(gpu_indexing, str):
+        if gpu_indexing == 'block':
+            indexing_creator = BlockIndexing
+        elif gpu_indexing == 'line':
+            indexing_creator = LineIndexing
         else:
-            raise ValueError("Unknown GPU indexing %s. Valid values are 'block' and 'line'" % (gpuIndexing,))
-        if gpuIndexingParams:
-            indexingCreator = partial(indexingCreator, **gpuIndexingParams)
-        return indexingCreator
+            raise ValueError("Unknown GPU indexing %s. Valid values are 'block' and 'line'" % (gpu_indexing,))
+        if gpu_indexing_params:
+            indexing_creator = partial(indexing_creator, **gpu_indexing_params)
+        return indexing_creator
     else:
-        return gpuIndexing
+        return gpu_indexing
diff --git a/gpucuda/kernelcreation.py b/gpucuda/kernelcreation.py
index 2ef07df5108d6afdd90fa036383474c7fcd06857..1c72101c556379aed12c73e4172788a24cc2d2e6 100644
--- a/gpucuda/kernelcreation.py
+++ b/gpucuda/kernelcreation.py
@@ -1,140 +1,142 @@
 from functools import partial
 
 from pystencils.gpucuda.indexing import BlockIndexing
-from pystencils.transformations import resolveFieldAccesses, typeAllEquations, parseBasePointerInfo, getCommonShape, \
-    substituteArrayAccessesWithConstants, resolveBufferAccesses
+from pystencils.transformations import resolve_field_accesses, type_all_equations, parse_base_pointer_info, get_common_shape, \
+    substitute_array_accesses_with_constants, resolve_buffer_accesses
 from pystencils.astnodes import Block, KernelFunction, SympyAssignment, LoopOverCoordinate
 from pystencils.data_types import TypedSymbol, BasicType, StructType
 from pystencils import Field, FieldType
-from pystencils.gpucuda.cudajit import makePythonFunction
+from pystencils.gpucuda.cudajit import make_python_function
 
 
-def createCUDAKernel(listOfEquations, functionName="kernel", typeForSymbol=None, indexingCreator=BlockIndexing,
-                     iterationSlice=None, ghostLayers=None):
-    fieldsRead, fieldsWritten, assignments = typeAllEquations(listOfEquations, typeForSymbol)
-    allFields = fieldsRead.union(fieldsWritten)
-    readOnlyFields = set([f.name for f in fieldsRead - fieldsWritten])
+def create_cuda_kernel(assignments, function_name="kernel", type_info=None, indexing_creator=BlockIndexing,
+                       iteration_slice=None, ghost_layers=None):
+    fields_read, fields_written, assignments = type_all_equations(assignments, type_info)
+    all_fields = fields_read.union(fields_written)
+    read_only_fields = set([f.name for f in fields_read - fields_written])
 
-    buffers = set([f for f in allFields if FieldType.isBuffer(f)])
-    fieldsWithoutBuffers = allFields - buffers
+    buffers = set([f for f in all_fields if FieldType.is_buffer(f)])
+    fields_without_buffers = all_fields - buffers
 
-    fieldAccesses = set()
-    numBufferAccesses = 0
-    for eq in listOfEquations:
-        fieldAccesses.update(eq.atoms(Field.Access))
+    field_accesses = set()
+    num_buffer_accesses = 0
+    for eq in assignments:
+        field_accesses.update(eq.atoms(Field.Access))
 
-        numBufferAccesses += sum([1 for access in eq.atoms(Field.Access) if FieldType.isBuffer(access.field)])
+        num_buffer_accesses += sum([1 for access in eq.atoms(Field.Access) if FieldType.is_buffer(access.field)])
 
-    commonShape = getCommonShape(fieldsWithoutBuffers)
+    common_shape = get_common_shape(fields_without_buffers)
 
-    if iterationSlice is None:
+    if iteration_slice is None:
         # determine iteration slice from ghost layers
-        if ghostLayers is None:
+        if ghost_layers is None:
             # determine required number of ghost layers from field access
-            requiredGhostLayers = max([fa.requiredGhostLayers for fa in fieldAccesses])
-            ghostLayers = [(requiredGhostLayers, requiredGhostLayers)] * len(commonShape)
-        iterationSlice = []
-        if isinstance(ghostLayers, int):
-            for i in range(len(commonShape)):
-                iterationSlice.append(slice(ghostLayers, -ghostLayers if ghostLayers > 0 else None))
+            required_ghost_layers = max([fa.required_ghost_layers for fa in field_accesses])
+            ghost_layers = [(required_ghost_layers, required_ghost_layers)] * len(common_shape)
+        iteration_slice = []
+        if isinstance(ghost_layers, int):
+            for i in range(len(common_shape)):
+                iteration_slice.append(slice(ghost_layers, -ghost_layers if ghost_layers > 0 else None))
         else:
-            for i in range(len(commonShape)):
-                iterationSlice.append(slice(ghostLayers[i][0], -ghostLayers[i][1] if ghostLayers[i][1] > 0 else None))
+            for i in range(len(common_shape)):
+                iteration_slice.append(slice(ghost_layers[i][0], -ghost_layers[i][1] if ghost_layers[i][1] > 0 else None))
 
-    indexing = indexingCreator(field=list(fieldsWithoutBuffers)[0], iterationSlice=iterationSlice)
+    indexing = indexing_creator(field=list(fields_without_buffers)[0], iteration_slice=iteration_slice)
 
     block = Block(assignments)
-    block = indexing.guard(block, commonShape)
-    ast = KernelFunction(block, function_name=functionName, ghost_layers=ghostLayers, backend='gpucuda')
-    ast.globalVariables.update(indexing.indexVariables)
+    block = indexing.guard(block, common_shape)
+    ast = KernelFunction(block, function_name=function_name, ghost_layers=ghost_layers, backend='gpucuda')
+    ast.global_variables.update(indexing.index_variables)
 
-    coordMapping = indexing.coordinates
-    basePointerInfo = [['spatialInner0']]
-    basePointerInfos = {f.name: parseBasePointerInfo(basePointerInfo, [2, 1, 0], f) for f in allFields}
+    coord_mapping = indexing.coordinates
+    base_pointer_info = [['spatialInner0']]
+    base_pointer_infos = {f.name: parse_base_pointer_info(base_pointer_info, [2, 1, 0], f) for f in all_fields}
 
-    coordMapping = {f.name: coordMapping for f in allFields}
+    coord_mapping = {f.name: coord_mapping for f in all_fields}
 
-    loopVars = [numBufferAccesses * i for i in indexing.coordinates]
-    loopStrides = list(fieldsWithoutBuffers)[0].shape
+    loop_vars = [num_buffer_accesses * i for i in indexing.coordinates]
+    loop_strides = list(fields_without_buffers)[0].shape
 
-    baseBufferIndex = loopVars[0]
+    base_buffer_index = loop_vars[0]
     stride = 1
-    for idx, var in enumerate(loopVars[1:]):
-        stride *= loopStrides[idx]
-        baseBufferIndex += var * stride
+    for idx, var in enumerate(loop_vars[1:]):
+        stride *= loop_strides[idx]
+        base_buffer_index += var * stride
 
-    resolveBufferAccesses(ast, baseBufferIndex, readOnlyFields)
-    resolveFieldAccesses(ast, readOnlyFields, field_to_base_pointer_info=basePointerInfos,
-                         field_to_fixed_coordinates=coordMapping)
+    resolve_buffer_accesses(ast, base_buffer_index, read_only_fields)
+    resolve_field_accesses(ast, read_only_fields, field_to_base_pointer_info=base_pointer_infos,
+                           field_to_fixed_coordinates=coord_mapping)
 
-    substituteArrayAccessesWithConstants(ast)
+    substitute_array_accesses_with_constants(ast)
 
     # add the function which determines #blocks and #threads as additional member to KernelFunction node
     # this is used by the jit
 
     # If loop counter symbols have been explicitly used in the update equations (e.g. for built in periodicity),
     # they are defined here
-    undefinedLoopCounters = {LoopOverCoordinate.is_loop_counter_symbol(s): s for s in ast.body.undefined_symbols
-                             if LoopOverCoordinate.is_loop_counter_symbol(s) is not None}
-    for i, loopCounter in undefinedLoopCounters.items():
+    undefined_loop_counters = {LoopOverCoordinate.is_loop_counter_symbol(s): s for s in ast.body.undefined_symbols
+                               if LoopOverCoordinate.is_loop_counter_symbol(s) is not None}
+    for i, loopCounter in undefined_loop_counters.items():
         ast.body.insert_front(SympyAssignment(loopCounter, indexing.coordinates[i]))
 
     ast.indexing = indexing
-    ast.compile = partial(makePythonFunction, ast)
+    ast.compile = partial(make_python_function, ast)
     return ast
 
 
-def createdIndexedCUDAKernel(listOfEquations, indexFields, functionName="kernel", typeForSymbol=None,
-                             coordinateNames=('x', 'y', 'z'), indexingCreator=BlockIndexing):
-    fieldsRead, fieldsWritten, assignments = typeAllEquations(listOfEquations, typeForSymbol)
-    allFields = fieldsRead.union(fieldsWritten)
-    readOnlyFields = set([f.name for f in fieldsRead - fieldsWritten])
+def created_indexed_cuda_kernel(assignments, index_fields, function_name="kernel", type_info=None,
+                                coordinate_names=('x', 'y', 'z'), indexing_creator=BlockIndexing):
+    fields_read, fields_written, assignments = type_all_equations(assignments, type_info)
+    all_fields = fields_read.union(fields_written)
+    read_only_fields = set([f.name for f in fields_read - fields_written])
 
-    for indexField in indexFields:
+    for indexField in index_fields:
         indexField.fieldType = FieldType.INDEXED
-        assert FieldType.isIndexed(indexField)
-        assert indexField.spatialDimensions == 1, "Index fields have to be 1D"
-
-    nonIndexFields = [f for f in allFields if f not in indexFields]
-    spatialCoordinates = {f.spatialDimensions for f in nonIndexFields}
-    assert len(spatialCoordinates) == 1, "Non-index fields do not have the same number of spatial coordinates"
-    spatialCoordinates = list(spatialCoordinates)[0]
-
-    def getCoordinateSymbolAssignment(name):
-        for indexField in indexFields:
-            assert isinstance(indexField.dtype, StructType), "Index fields have to have a struct datatype"
-            dataType = indexField.dtype
-            if dataType.has_element(name):
-                rhs = indexField[0](name)
-                lhs = TypedSymbol(name, BasicType(dataType.get_element_type(name)))
+        assert FieldType.is_indexed(indexField)
+        assert indexField.spatial_dimensions == 1, "Index fields have to be 1D"
+
+    non_index_fields = [f for f in all_fields if f not in index_fields]
+    spatial_coordinates = {f.spatial_dimensions for f in non_index_fields}
+    assert len(spatial_coordinates) == 1, "Non-index fields do not have the same number of spatial coordinates"
+    spatial_coordinates = list(spatial_coordinates)[0]
+
+    def get_coordinate_symbol_assignment(name):
+        for index_field in index_fields:
+            assert isinstance(index_field.dtype, StructType), "Index fields have to have a struct data type"
+            data_type = index_field.dtype
+            if data_type.has_element(name):
+                rhs = index_field[0](name)
+                lhs = TypedSymbol(name, BasicType(data_type.get_element_type(name)))
                 return SympyAssignment(lhs, rhs)
         raise ValueError("Index %s not found in any of the passed index fields" % (name,))
 
-    coordinateSymbolAssignments = [getCoordinateSymbolAssignment(n) for n in coordinateNames[:spatialCoordinates]]
-    coordinateTypedSymbols = [eq.lhs for eq in coordinateSymbolAssignments]
+    coordinate_symbol_assignments = [get_coordinate_symbol_assignment(n)
+                                     for n in coordinate_names[:spatial_coordinates]]
+    coordinate_typed_symbols = [eq.lhs for eq in coordinate_symbol_assignments]
 
-    idxField = list(indexFields)[0]
-    indexing = indexingCreator(field=idxField, iterationSlice=[slice(None, None, None)] * len(idxField.spatialShape))
+    idx_field = list(index_fields)[0]
+    indexing = indexing_creator(field=idx_field,
+                                iteration_slice=[slice(None, None, None)] * len(idx_field.spatial_shape))
 
-    functionBody = Block(coordinateSymbolAssignments + assignments)
-    functionBody = indexing.guard(functionBody, getCommonShape(indexFields))
-    ast = KernelFunction(functionBody, function_name=functionName, backend='gpucuda')
-    ast.globalVariables.update(indexing.indexVariables)
+    function_body = Block(coordinate_symbol_assignments + assignments)
+    function_body = indexing.guard(function_body, get_common_shape(index_fields))
+    ast = KernelFunction(function_body, function_name=function_name, backend='gpucuda')
+    ast.global_variables.update(indexing.index_variables)
 
-    coordMapping = indexing.coordinates
-    basePointerInfo = [['spatialInner0']]
-    basePointerInfos = {f.name: parseBasePointerInfo(basePointerInfo, [2, 1, 0], f) for f in allFields}
+    coord_mapping = indexing.coordinates
+    base_pointer_info = [['spatialInner0']]
+    base_pointer_infos = {f.name: parse_base_pointer_info(base_pointer_info, [2, 1, 0], f) for f in all_fields}
 
-    coordMapping = {f.name: coordMapping for f in indexFields}
-    coordMapping.update({f.name: coordinateTypedSymbols for f in nonIndexFields})
-    resolveFieldAccesses(ast, readOnlyFields, field_to_fixed_coordinates=coordMapping,
-                         field_to_base_pointer_info=basePointerInfos)
-    substituteArrayAccessesWithConstants(ast)
+    coord_mapping = {f.name: coord_mapping for f in index_fields}
+    coord_mapping.update({f.name: coordinate_typed_symbols for f in non_index_fields})
+    resolve_field_accesses(ast, read_only_fields, field_to_fixed_coordinates=coord_mapping,
+                           field_to_base_pointer_info=base_pointer_infos)
+    substitute_array_accesses_with_constants(ast)
 
     # add the function which determines #blocks and #threads as additional member to KernelFunction node
     # this is used by the jit
     ast.indexing = indexing
-    ast.compile = partial(makePythonFunction, ast)
+    ast.compile = partial(make_python_function, ast)
     return ast
 
 
diff --git a/gpucuda/periodicity.py b/gpucuda/periodicity.py
index 5009fce894b41b75bd9b4845b1d7f96fa536e256..ea234de90a7146860bf0e50a1d5cffcef41e48c2 100644
--- a/gpucuda/periodicity.py
+++ b/gpucuda/periodicity.py
@@ -1,41 +1,42 @@
 import sympy as sp
 import numpy as np
 from pystencils import Field, Assignment
-from pystencils.slicing import normalizeSlice, getPeriodicBoundarySrcDstSlices
-from pystencils.gpucuda import makePythonFunction
-from pystencils.gpucuda.kernelcreation import createCUDAKernel
+from pystencils.slicing import normalize_slice, get_periodic_boundary_src_dst_slices
+from pystencils.gpucuda import make_python_function
+from pystencils.gpucuda.kernelcreation import create_cuda_kernel
 
 
-def createCopyKernel(domainSize, fromSlice, toSlice, indexDimensions=0, indexDimShape=1, dtype=np.float64):
+def create_copy_kernel(domain_size, from_slice, to_slice, index_dimensions=0, index_dim_shape=1, dtype=np.float64):
     """Copies a rectangular part of an array to another non-overlapping part"""
-    if indexDimensions not in (0, 1):
+    if index_dimensions not in (0, 1):
         raise NotImplementedError("Works only for one or zero index coordinates")
 
-    f = Field.createGeneric("pdfs", len(domainSize), indexDimensions=indexDimensions, dtype=dtype)
-    normalizedFromSlice = normalizeSlice(fromSlice, f.spatialShape)
-    normalizedToSlice = normalizeSlice(toSlice, f.spatialShape)
+    f = Field.create_generic("pdfs", len(domain_size), index_dimensions=index_dimensions, dtype=dtype)
+    normalized_from_slice = normalize_slice(from_slice, f.spatial_shape)
+    normalized_to_slice = normalize_slice(to_slice, f.spatial_shape)
 
-    offset = [s1.start - s2.start for s1, s2 in zip(normalizedFromSlice, normalizedToSlice)]
-    assert offset == [s1.stop - s2.stop for s1, s2 in zip(normalizedFromSlice, normalizedToSlice)], "Slices have to have same size"
+    offset = [s1.start - s2.start for s1, s2 in zip(normalized_from_slice, normalized_to_slice)]
+    assert offset == [s1.stop - s2.stop for s1, s2 in zip(normalized_from_slice, normalized_to_slice)], \
+        "Slices have to have same size"
 
-    updateEqs = []
-    for i in range(indexDimShape):
+    update_eqs = []
+    for i in range(index_dim_shape):
         eq = Assignment(f(i), f[tuple(offset)](i))
-        updateEqs.append(eq)
+        update_eqs.append(eq)
 
-    ast = createCUDAKernel(updateEqs, iterationSlice=toSlice)
-    return makePythonFunction(ast)
+    ast = create_cuda_kernel(update_eqs, iteration_slice=to_slice)
+    return make_python_function(ast)
 
 
-def getPeriodicBoundaryFunctor(stencil, domainSize, indexDimensions=0, indexDimShape=1, ghostLayers=1,
-                               thickness=None, dtype=float):
-    srcDstSliceTuples = getPeriodicBoundarySrcDstSlices(stencil, ghostLayers, thickness)
+def get_periodic_boundary_functor(stencil, domain_size, index_dimensions=0, index_dim_shape=1, ghost_layers=1,
+                                  thickness=None, dtype=float):
+    src_dst_slice_tuples = get_periodic_boundary_src_dst_slices(stencil, ghost_layers, thickness)
     kernels = []
-    indexDimensions = indexDimensions
-    for srcSlice, dstSlice in srcDstSliceTuples:
-        kernels.append(createCopyKernel(domainSize, srcSlice, dstSlice, indexDimensions, indexDimShape, dtype))
+    index_dimensions = index_dimensions
+    for srcSlice, dstSlice in src_dst_slice_tuples:
+        kernels.append(create_copy_kernel(domain_size, srcSlice, dstSlice, index_dimensions, index_dim_shape, dtype))
 
-    def functor(pdfs, **kwargs):
+    def functor(pdfs, **_):
         for kernel in kernels:
             kernel(pdfs=pdfs)
 
diff --git a/jupytersetup.py b/jupytersetup.py
index 068ef534aace3314f8c49847a8dbc72c6ed28f91..f478bf18e48e0cc2ecfd5f2f6563841f5235f038 100644
--- a/jupytersetup.py
+++ b/jupytersetup.py
@@ -5,8 +5,8 @@ from tempfile import NamedTemporaryFile
 import base64
 import sympy as sp
 
-__all__ = ['log_progress', 'makeImshowAnimation', 'makeSurfacePlotAnimation',
-           'disp', 'setDisplayMode']
+__all__ = ['log_progress', 'make_imshow_animation', 'make_surface_plot_animation',
+           'display_animation', 'set_display_mode']
 
 
 def log_progress(sequence, every=None, size=None, name='Items'):
@@ -73,32 +73,32 @@ VIDEO_TAG = """<video controls width="80%">
 </video>"""
 
 
-def __anim_to_html(anim, fps):
-    if not hasattr(anim, '_encoded_video'):
+def __animation_to_html(animation, fps):
+    if not hasattr(animation, 'encoded_video'):
         with NamedTemporaryFile(suffix='.mp4') as f:
-            anim.save(f.name, fps=fps, extra_args=['-vcodec', 'libx264', '-pix_fmt',
-                                                   'yuv420p', '-profile:v', 'baseline', '-level', '3.0'])
+            animation.save(f.name, fps=fps, extra_args=['-vcodec', 'libx264', '-pix_fmt',
+                                                        'yuv420p', '-profile:v', 'baseline', '-level', '3.0'])
             video = open(f.name, "rb").read()
-        anim._encoded_video = base64.b64encode(video).decode('ascii')
+        animation.encoded_video = base64.b64encode(video).decode('ascii')
 
-    return VIDEO_TAG.format(anim._encoded_video)
+    return VIDEO_TAG.format(animation.encoded_video)
 
 
-def makeImshowAnimation(grid, gridUpdateFunction, frames=90, **kwargs):
+def make_imshow_animation(grid, grid_update_function, frames=90, **_):
     from functools import partial
     fig = plt.figure()
     im = plt.imshow(grid, interpolation='none')
 
-    def updatefig(*args, **kwargs):
+    def update_figure(*_, **kwargs):
         image = kwargs['image']
-        image = gridUpdateFunction(image)
+        image = grid_update_function(image)
         im.set_array(image)
         return im,
 
-    return animation.FuncAnimation(fig, partial(updatefig, image=grid), frames=frames)
+    return animation.FuncAnimation(fig, partial(update_figure, image=grid), frames=frames)
 
 
-def makeSurfacePlotAnimation(runFunction, frames=90, interval=30):
+def make_surface_plot_animation(run_function, frames=90, interval=30):
     from mpl_toolkits.mplot3d import Axes3D
     import matplotlib.animation as animation
     import matplotlib.pyplot as plt
@@ -106,26 +106,26 @@ def makeSurfacePlotAnimation(runFunction, frames=90, interval=30):
 
     fig = plt.figure()
     ax = fig.add_subplot(111, projection='3d')
-    X, Y, data = runFunction(1)
-    ax.plot_surface(X, Y, data, rstride=2, cstride=2, color='b', cmap=cm.coolwarm,)
+    x, y, data = run_function(1)
+    ax.plot_surface(x, y, data, rstride=2, cstride=2, color='b', cmap=cm.coolwarm,)
     ax.set_zlim(-1.0, 1.0)
 
-    def updatefig(*args):
-        X, Y, data = runFunction(1)
+    def update_figure(*_):
+        x_grid, y_grid, d = run_function(1)
         ax.clear()
-        plot = ax.plot_surface(X, Y, data, rstride=2, cstride=2, color='b', cmap=cm.coolwarm,)
+        plot = ax.plot_surface(x_grid, y_grid, d, rstride=2, cstride=2, color='b', cmap=cm.coolwarm,)
         ax.set_zlim(-1.0, 1.0)
         return plot,
 
-    return animation.FuncAnimation(fig, updatefig, interval=interval, frames=frames, blit=False)
+    return animation.FuncAnimation(fig, update_figure, interval=interval, frames=frames, blit=False)
 
 
 # -------   Version 1: Embed the animation as HTML5 video --------- ----------------------------------
 
-def displayAsHtmlVideo(anim, fps=30, show=True, **kwargs):
+def display_as_html_video(animation, fps=30, show=True, **_):
     try:
-        plt.close(anim._fig)
-        res = __anim_to_html(anim, fps)
+        plt.close(animation._fig)
+        res = __animation_to_html(animation, fps)
         if show:
             return HTML(res)
         else:
@@ -137,27 +137,26 @@ def displayAsHtmlVideo(anim, fps=30, show=True, **kwargs):
 # -------   Version 2: Animation is shown in extra matplotlib window ----------------------------------
 
 
-def displayInExtraWindow(animation, *args, **kwargs):
+def display_in_extra_window(*_, **__):
     fig = plt.gcf()
     try:
-      fig.canvas.manager.window.raise_()
+        fig.canvas.manager.window.raise_()
     except Exception:
-      pass
+        pass
     plt.show()
 
 
 # -------   Version 3: Animation is shown in images that are updated directly in website --------------
 
-def displayAsHtmlImage(animation, show=True, iterations=10000,  *args, **kwargs):
+def display_as_html_image(animation, show=True, iterations=10000, *args, **kwargs):
     from IPython import display
 
     try:
-        if show:
-            fig = plt.gcf()
         if show:
             animation._init_draw()
         for i in range(iterations):
             if show:
+                fig = plt.gcf()
                 display.display(fig)
             animation._step()
             if show:
@@ -168,11 +167,11 @@ def displayAsHtmlImage(animation, show=True, iterations=10000,  *args, **kwargs)
 
 # Dispatcher
 
-animation_display_mode = 'imageupdate'
+animation_display_mode = 'image_update'
 display_animation_func = None
 
 
-def disp(*args, **kwargs):
+def display_animation(*args, **kwargs):
     from IPython import get_ipython
     ipython = get_ipython()
     if not ipython:
@@ -183,7 +182,7 @@ def disp(*args, **kwargs):
     return display_animation_func(*args, **kwargs)
 
 
-def setDisplayMode(mode):
+def set_display_mode(mode):
     from IPython import get_ipython
     ipython = get_ipython()
     if not ipython:
@@ -193,25 +192,26 @@ def setDisplayMode(mode):
     animation_display_mode = mode
     if animation_display_mode == 'video':
         ipython.magic("matplotlib inline")
-        display_animation_func = displayAsHtmlVideo
+        display_animation_func = display_as_html_video
     elif animation_display_mode == 'window':
         ipython.magic("matplotlib qt")
-        display_animation_func = displayInExtraWindow
-    elif animation_display_mode == 'imageupdate':
+        display_animation_func = display_in_extra_window
+    elif animation_display_mode == 'image_update':
         ipython.magic("matplotlib inline")
-        display_animation_func = displayAsHtmlImage
+        display_animation_func = display_as_html_image
     else:
-        raise Exception("Unknown mode. Available modes 'imageupdate', 'video' and 'window' ")
+        raise Exception("Unknown mode. Available modes 'image_update', 'video' and 'window' ")
 
 
-def activateIPython():
+def activate_ipython():
     from IPython import get_ipython
     ipython = get_ipython()
     if ipython:
-        setDisplayMode('imageupdate')
+        set_display_mode('image_update')
         ipython.magic("config InlineBackend.rc = { }")
         ipython.magic("matplotlib inline")
         plt.rc('figure', figsize=(16, 6))
         sp.init_printing()
 
-activateIPython()
+
+activate_ipython()
diff --git a/kerncraft_coupling/generate_benchmark.py b/kerncraft_coupling/generate_benchmark.py
index ac7339936d38cd0b51e1c3c5dc43ab95e9c58483..38699c4e6995aaa021fb032f519548674fb17de3 100644
--- a/kerncraft_coupling/generate_benchmark.py
+++ b/kerncraft_coupling/generate_benchmark.py
@@ -1,5 +1,5 @@
 from jinja2 import Template
-from pystencils.cpu import print_c
+from pystencils.cpu import generate_c
 from pystencils.sympyextensions import prod
 from pystencils.data_types import get_base_type
 
@@ -29,15 +29,15 @@ int main(int argc, char **argv)
   likwid_markerThreadInit();
   {%- endif %}
 
-  {%- for fieldName, dataType, size in fields %}
+  {%- for field_name, dataType, size in fields %}
   
-  // Initialization {{fieldName}} 
-  double * {{fieldName}} = aligned_malloc(sizeof({{dataType}}) * {{size}}, 32);
+  // Initialization {{field_name}} 
+  double * {{field_name}} = aligned_malloc(sizeof({{dataType}}) * {{size}}, 32);
   for (int i = 0; i < {{size}}; ++i)
-    {{fieldName}}[i] = 0.23;
+    {{field_name}}[i] = 0.23;
   
   if(var_false)
-    dummy({{fieldName}});   
+    dummy({{field_name}});   
          
   {%- endfor %}
   
@@ -63,8 +63,8 @@ int main(int argc, char **argv)
     {{kernelName}}({{callArgumentList}});
     
     // Dummy calls   
-    {%- for fieldName, dataType, size in fields %}
-    if(var_false) dummy({{fieldName}});      
+    {%- for field_name, dataType, size in fields %}
+    if(var_false) dummy({{field_name}});      
     {%- endfor %}
     {%- for constantName, dataType in constants %}
     if(var_false) dummy(&{{constantName}});
@@ -84,28 +84,28 @@ int main(int argc, char **argv)
 """)
 
 
-def generateBenchmark(ast, likwid=False):
-    accessedFields = {f.name: f for f in ast.fields_accessed}
+def generate_benchmark(ast, likwid=False):
+    accessed_fields = {f.name: f for f in ast.fields_accessed}
     constants = []
     fields = []
-    callParameters = []
+    call_parameters = []
     for p in ast.parameters:
         if not p.isFieldArgument:
             constants.append((p.name, str(p.dtype)))
-            callParameters.append(p.name)
+            call_parameters.append(p.name)
         else:
             assert p.isFieldPtrArgument, "Benchmark implemented only for kernels with fixed loop size"
-            field = accessedFields[p.fieldName]
+            field = accessed_fields[p.field_name]
             dtype = str(get_base_type(p.dtype))
-            fields.append((p.fieldName, dtype, prod(field.shape)))
-            callParameters.append(p.fieldName)
+            fields.append((p.field_name, dtype, prod(field.shape)))
+            call_parameters.append(p.field_name)
 
     args = {
         'likwid': likwid,
-        'kernelCode': print_c(ast),
-        'kernelName': ast.functionName,
+        'kernelCode': generate_c(ast),
+        'kernelName': ast.function_name,
         'fields': fields,
         'constants': constants,
-        'callArgumentList': ",".join(callParameters),
+        'callArgumentList': ",".join(call_parameters),
     }
     return benchmarkTemplate.render(**args)
diff --git a/kerncraft_coupling/kerncraft_interface.py b/kerncraft_coupling/kerncraft_interface.py
index a583869b2a6fa6c4c7534484c18b53ac8d61c633..8860a8b6f32d08726901fc548433904ecf009223 100644
--- a/kerncraft_coupling/kerncraft_interface.py
+++ b/kerncraft_coupling/kerncraft_interface.py
@@ -9,9 +9,9 @@ import kerncraft.kernel
 from kerncraft.machinemodel import MachineModel
 from kerncraft.models import ECM, Benchmark
 from kerncraft.iaca import iaca_analyse_instrumented_binary, iaca_instrumentation
-from pystencils.kerncraft_coupling.generate_benchmark import generateBenchmark
+from pystencils.kerncraft_coupling.generate_benchmark import generate_benchmark
 from pystencils.astnodes import LoopOverCoordinate, SympyAssignment, ResolvedFieldAccess
-from pystencils.field import getLayoutFromStrides
+from pystencils.field import get_layout_from_strides
 from pystencils.sympyextensions import count_operations_in_ast
 from pystencils.utils import DotDict
 
@@ -30,44 +30,44 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
         self.temporaryDir = TemporaryDirectory()
 
         # Loops
-        innerLoops = [l for l in ast.atoms(LoopOverCoordinate) if l.is_innermost_loop]
-        if len(innerLoops) == 0:
+        inner_loops = [l for l in ast.atoms(LoopOverCoordinate) if l.is_innermost_loop]
+        if len(inner_loops) == 0:
             raise ValueError("No loop found in pystencils AST")
-        elif len(innerLoops) > 1:
+        elif len(inner_loops) > 1:
             raise ValueError("pystencils AST contains multiple inner loops - only one can be analyzed")
         else:
-            innerLoop = innerLoops[0]
+            inner_loop = inner_loops[0]
 
         self._loop_stack = []
-        curNode = innerLoop
-        while curNode is not None:
-            if isinstance(curNode, LoopOverCoordinate):
-                loopCounterSym = curNode.loop_counter_symbol
-                loopInfo = (loopCounterSym.name, curNode.start, curNode.stop, curNode.step)
+        cur_node = inner_loop
+        while cur_node is not None:
+            if isinstance(cur_node, LoopOverCoordinate):
+                loopCounterSym = cur_node.loop_counter_symbol
+                loopInfo = (loopCounterSym.name, cur_node.start, cur_node.stop, cur_node.step)
                 self._loop_stack.append(loopInfo)
-            curNode = curNode.parent
+            cur_node = cur_node.parent
         self._loop_stack = list(reversed(self._loop_stack))
 
         # Data sources & destinations
         self.sources = defaultdict(list)
         self.destinations = defaultdict(list)
 
-        reads, writes = searchResolvedFieldAccessesInAst(innerLoop)
+        reads, writes = search_resolved_field_accesses_in_ast(inner_loop)
         for accesses, targetDict in [(reads, self.sources), (writes, self.destinations)]:
             for fa in accesses:
                 coord = [sp.Symbol(LoopOverCoordinate.get_loop_counter_name(i), positive=True, integer=True) + off
                          for i, off in enumerate(fa.offsets)]
                 coord += list(fa.idxCoordinateValues)
-                layout = getLayoutFromStrides(fa.field.strides)
-                permutedCoord = [coord[i] for i in layout]
-                targetDict[fa.field.name].append(permutedCoord)
+                layout = get_layout_from_strides(fa.field.strides)
+                permuted_coord = [coord[i] for i in layout]
+                targetDict[fa.field.name].append(permuted_coord)
 
         # Variables (arrays)
-        fieldsAccessed = ast.fields_accessed
-        for field in fieldsAccessed:
-            layout = getLayoutFromStrides(field.strides)
-            permutedShape = list(field.shape[i] for i in layout)
-            self.set_variable(field.name, str(field.dtype), tuple(permutedShape))
+        fields_accessed = ast.fields_accessed
+        for field in fields_accessed:
+            layout = get_layout_from_strides(field.strides)
+            permuted_shape = list(field.shape[i] for i in layout)
+            self.set_variable(field.name, str(field.dtype), tuple(permuted_shape))
 
         for param in ast.parameters:
             if not param.isFieldArgument:
@@ -78,11 +78,11 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
         self.datatype = list(self.variables.values())[0][0]
 
         # flops
-        operationCount = count_operations_in_ast(innerLoop)
+        operation_count = count_operations_in_ast(inner_loop)
         self._flops = {
-            '+': operationCount['adds'],
-            '*': operationCount['muls'],
-            '/': operationCount['divs'],
+            '+': operation_count['adds'],
+            '*': operation_count['muls'],
+            '/': operation_count['divs'],
         }
         for k in [k for k, v in self._flops.items() if v == 0]:
             del self._flops[k]
@@ -93,57 +93,57 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
         compiler, compiler_args = self._machine.get_compiler()
         if '-std=c99' not in compiler_args:
             compiler_args += ['-std=c99']
-        headerPath = kerncraft.get_header_path()
+        header_path = kerncraft.get_header_path()
     
-        compilerCmd = [compiler] + compiler_args + ['-I' + headerPath]
+        compiler_cmd = [compiler] + compiler_args + ['-I' + header_path]
     
-        srcFile = os.path.join(self.temporaryDir.name, "source.c")
-        asmFile = os.path.join(self.temporaryDir.name, "source.s")
-        iacaAsmFile = os.path.join(self.temporaryDir.name, "source.iaca.s")
-        dummySrcFile = os.path.join(headerPath, "dummy.c")
-        dummyAsmFile = os.path.join(self.temporaryDir.name, "dummy.s")
-        binaryFile = os.path.join(self.temporaryDir.name, "binary")
+        src_file = os.path.join(self.temporaryDir.name, "source.c")
+        asm_file = os.path.join(self.temporaryDir.name, "source.s")
+        iaca_asm_file = os.path.join(self.temporaryDir.name, "source.iaca.s")
+        dummy_src_file = os.path.join(header_path, "dummy.c")
+        dummy_asm_file = os.path.join(self.temporaryDir.name, "dummy.s")
+        binary_file = os.path.join(self.temporaryDir.name, "binary")
 
         # write source code to file
-        with open(srcFile, 'w') as f:
-            f.write(generateBenchmark(self.ast, likwid=False))
+        with open(src_file, 'w') as f:
+            f.write(generate_benchmark(self.ast, likwid=False))
 
         # compile to asm files
-        subprocess.check_output(compilerCmd + [srcFile,      '-S', '-o', asmFile])
-        subprocess.check_output(compilerCmd + [dummySrcFile, '-S', '-o', dummyAsmFile])
+        subprocess.check_output(compiler_cmd + [src_file,      '-S', '-o', asm_file])
+        subprocess.check_output(compiler_cmd + [dummy_src_file, '-S', '-o', dummy_asm_file])
 
-        with open(asmFile) as read, open(iacaAsmFile, 'w') as write:
-            instrumentedAsmBlock = iaca_instrumentation(read, write)
+        with open(asm_file) as read, open(iaca_asm_file, 'w') as write:
+            instrumented_asm_block = iaca_instrumentation(read, write)
 
         # assemble asm files to executable
-        subprocess.check_output(compilerCmd + [iacaAsmFile, dummyAsmFile, '-o', binaryFile])
+        subprocess.check_output(compiler_cmd + [iaca_asm_file, dummy_asm_file, '-o', binary_file])
 
-        result = iaca_analyse_instrumented_binary(binaryFile, micro_architecture)
+        result = iaca_analyse_instrumented_binary(binary_file, micro_architecture)
     
-        return result, instrumentedAsmBlock
+        return result, instrumented_asm_block
 
     def build(self, lflags=None, verbose=False):
         compiler, compiler_args = self._machine.get_compiler()
         if '-std=c99' not in compiler_args:
             compiler_args.append('-std=c99')
-        headerPath = kerncraft.get_header_path()
+        header_path = kerncraft.get_header_path()
 
         cmd = [compiler] + compiler_args + [
             '-I' + os.path.join(self.LIKWID_BASE, 'include'),
             '-L' + os.path.join(self.LIKWID_BASE, 'lib'),
-            '-I' + headerPath,
+            '-I' + header_path,
             '-Wl,-rpath=' + os.path.join(self.LIKWID_BASE, 'lib'),
         ]
 
-        dummySrcFile = os.path.join(headerPath, 'dummy.c')
-        srcFile = os.path.join(self.temporaryDir.name, "source_likwid.c")
-        binFile = os.path.join(self.temporaryDir.name, "benchmark")
+        dummy_src_file = os.path.join(header_path, 'dummy.c')
+        src_file = os.path.join(self.temporaryDir.name, "source_likwid.c")
+        bin_file = os.path.join(self.temporaryDir.name, "benchmark")
 
-        with open(srcFile, 'w') as f:
-            f.write(generateBenchmark(self.ast, likwid=True))
+        with open(src_file, 'w') as f:
+            f.write(generate_benchmark(self.ast, likwid=True))
 
-        subprocess.check_output(cmd + [srcFile, dummySrcFile, '-pthread', '-llikwid', '-o', binFile])
-        return binFile
+        subprocess.check_output(cmd + [src_file, dummy_src_file, '-pthread', '-llikwid', '-o', bin_file])
+        return bin_file
 
 
 class KerncraftParameters(DotDict):
@@ -160,7 +160,7 @@ class KerncraftParameters(DotDict):
 # ------------------------------------------- Helper functions ---------------------------------------------------------
 
 
-def searchResolvedFieldAccessesInAst(ast):
+def search_resolved_field_accesses_in_ast(ast):
     def visit(node, reads, writes):
         if not isinstance(node, SympyAssignment):
             for a in node.args:
@@ -170,7 +170,7 @@ def searchResolvedFieldAccessesInAst(ast):
         for expr, accesses in [(node.lhs, writes), (node.rhs, reads)]:
             accesses.update(expr.atoms(ResolvedFieldAccess))
 
-    readAccesses = set()
-    writeAccesses = set()
-    visit(ast, readAccesses, writeAccesses)
-    return readAccesses, writeAccesses
\ No newline at end of file
+    read_accesses = set()
+    write_accesses = set()
+    visit(ast, read_accesses, write_accesses)
+    return read_accesses, write_accesses
\ No newline at end of file
diff --git a/kernelcreation.py b/kernelcreation.py
index c5d6b988d3b9705eb20254fc4f6e67832fee0153..171de3fa45718d6207381db54d6adcfff6742897 100644
--- a/kernelcreation.py
+++ b/kernelcreation.py
@@ -1,29 +1,29 @@
 from pystencils.assignment_collection import AssignmentCollection
-from pystencils.gpucuda.indexing import indexingCreatorFromParams
+from pystencils.gpucuda.indexing import indexing_creator_from_params
 
 
-def createKernel(equations, target='cpu', dataType="double", iterationSlice=None, ghostLayers=None,
-                 cpuOpenMP=False, cpuVectorizeInfo=None,
-                 gpuIndexing='block', gpuIndexingParams={}):
+def create_kernel(equations, target='cpu', data_type="double", iteration_slice=None, ghost_layers=None,
+                  cpu_openmp=False, cpu_vectorize_info=None,
+                  gpu_indexing='block', gpu_indexing_params={}):
     """
     Creates abstract syntax tree (AST) of kernel, using a list of update equations.
     :param equations: either be a plain list of equations or a AssignmentCollection object
     :param target: 'cpu', 'llvm' or 'gpu'
-    :param dataType: data type used for all untyped symbols (i.e. non-fields), can also be a dict from symbol name
+    :param data_type: data type used for all untyped symbols (i.e. non-fields), can also be a dict from symbol name
                      to type
-    :param iterationSlice: rectangular subset to iterate over, if not specified the complete non-ghost layer part of the
+    :param iteration_slice: rectangular subset to iterate over, if not specified the complete non-ghost layer part of the
                            field is iterated over
-    :param ghostLayers: if left to default, the number of necessary ghost layers is determined automatically
+    :param ghost_layers: if left to default, the number of necessary ghost layers is determined automatically
                         a single integer specifies the ghost layer count at all borders, can also be a sequence of
                         pairs [(xLowerGl, xUpperGl), .... ]
 
     CPU specific Parameters:
-    :param cpuOpenMP: True or number of threads for OpenMP parallelization, False for no OpenMP
-    :param cpuVectorizeInfo: pair of instruction set name ('sse, 'avx', 'avx512') and data type ('float', 'double')
+    :param cpu_openmp: True or number of threads for OpenMP parallelization, False for no OpenMP
+    :param cpu_vectorize_info: pair of instruction set name ('sse, 'avx', 'avx512') and data type ('float', 'double')
 
     GPU specific Parameters
-    :param gpuIndexing: either 'block' or 'line' , or custom indexing class (see gpucuda/indexing.py)
-    :param gpuIndexingParams: dict with indexing parameters (constructor parameters of indexing class)
+    :param gpu_indexing: either 'block' or 'line' , or custom indexing class (see gpucuda/indexing.py)
+    :param gpu_indexing_params: dict with indexing parameters (constructor parameters of indexing class)
                               e.g. for 'block' one can specify {'blockSize': (20, 20, 10) }
 
     :return: abstract syntax tree object, that can either be printed as source code or can be compiled with
@@ -31,74 +31,74 @@ def createKernel(equations, target='cpu', dataType="double", iterationSlice=None
     """
 
     # ----  Normalizing parameters
-    splitGroups = ()
+    split_groups = ()
     if isinstance(equations, AssignmentCollection):
-        if 'splitGroups' in equations.simplification_hints:
-            splitGroups = equations.simplification_hints['splitGroups']
+        if 'split_groups' in equations.simplification_hints:
+            split_groups = equations.simplification_hints['split_groups']
         equations = equations.all_assignments
 
     # ----  Creating ast
     if target == 'cpu':
-        from pystencils.cpu import createKernel
-        from pystencils.cpu import addOpenMP
-        ast = createKernel(equations, typeForSymbol=dataType, splitGroups=splitGroups,
-                           iterationSlice=iterationSlice, ghostLayers=ghostLayers)
-        if cpuOpenMP:
-            addOpenMP(ast, numThreads=cpuOpenMP)
-        if cpuVectorizeInfo:
+        from pystencils.cpu import create_kernel
+        from pystencils.cpu import add_openmp
+        ast = create_kernel(equations, type_info=data_type, split_groups=split_groups,
+                            iteration_slice=iteration_slice, ghost_layers=ghost_layers)
+        if cpu_openmp:
+            add_openmp(ast, num_threads=cpu_openmp)
+        if cpu_vectorize_info:
             import pystencils.backends.simd_instruction_sets as vec
             from pystencils.vectorization import vectorize
-            vecParams = cpuVectorizeInfo
-            vec.selectedInstructionSet = vec.x86_vector_instruction_set(instruction_set=vecParams[0], data_type=vecParams[1])
+            vec_params = cpu_vectorize_info
+            vec.selectedInstructionSet = vec.x86_vector_instruction_set(instruction_set=vec_params[0],
+                                                                        data_type=vec_params[1])
             vectorize(ast)
         return ast
     elif target == 'llvm':
-        from pystencils.llvm import createKernel
-        ast = createKernel(equations, typeForSymbol=dataType, splitGroups=splitGroups,
-                           iterationSlice=iterationSlice, ghostLayers=ghostLayers)
+        from pystencils.llvm import create_kernel
+        ast = create_kernel(equations, type_info=data_type, split_groups=split_groups,
+                            iteration_slice=iteration_slice, ghost_layers=ghost_layers)
         return ast
     elif target == 'gpu':
-        from pystencils.gpucuda import createCUDAKernel
-        ast = createCUDAKernel(equations, typeForSymbol=dataType,
-                               indexingCreator=indexingCreatorFromParams(gpuIndexing, gpuIndexingParams),
-                               iterationSlice=iterationSlice, ghostLayers=ghostLayers)
+        from pystencils.gpucuda import create_cuda_kernel
+        ast = create_cuda_kernel(equations, type_info=data_type,
+                                 indexing_creator=indexing_creator_from_params(gpu_indexing, gpu_indexing_params),
+                                 iteration_slice=iteration_slice, ghost_layers=ghost_layers)
         return ast
     else:
         raise ValueError("Unknown target %s. Has to be one of 'cpu', 'gpu' or 'llvm' " % (target,))
 
 
-def createIndexedKernel(equations, indexFields, target='cpu', dataType="double", coordinateNames=('x', 'y', 'z'),
-                        cpuOpenMP=True,
-                        gpuIndexing='block', gpuIndexingParams={}):
+def create_indexed_kernel(assignments, index_fields, target='cpu', data_type="double", coordinate_names=('x', 'y', 'z'),
+                          cpu_openmp=True, gpu_indexing='block', gpu_indexing_params={}):
     """
-    Similar to :func:`createKernel`, but here not all cells of a field are updated but only cells with
+    Similar to :func:`create_kernel`, but here not all cells of a field are updated but only cells with
     coordinates which are stored in an index field. This traversal method can e.g. be used for boundary handling.
 
     The coordinates are stored in a separated indexField, which is a one dimensional array with struct data type.
     This struct has to contain fields named 'x', 'y' and for 3D fields ('z'). These names are configurable with the
-    'coordinateNames' parameter. The struct can have also other fields that can be read and written in the kernel, for
+    'coordinate_names' parameter. The struct can have also other fields that can be read and written in the kernel, for
     example boundary parameters.
 
-    indexFields: list of index fields, i.e. 1D fields with struct data type
-    coordinateNames: name of the coordinate fields in the struct data type
+    index_fields: list of index fields, i.e. 1D fields with struct data type
+    coordinate_names: name of the coordinate fields in the struct data type
     """
 
-    if isinstance(equations, AssignmentCollection):
-        equations = equations.all_assignments
+    if isinstance(assignments, AssignmentCollection):
+        assignments = assignments.all_assignments
     if target == 'cpu':
-        from pystencils.cpu import createIndexedKernel
-        from pystencils.cpu import addOpenMP
-        ast = createIndexedKernel(equations, indexFields=indexFields, typeForSymbol=dataType,
-                                  coordinateNames=coordinateNames)
-        if cpuOpenMP:
-            addOpenMP(ast, numThreads=cpuOpenMP)
+        from pystencils.cpu import create_indexed_kernel
+        from pystencils.cpu import add_openmp
+        ast = create_indexed_kernel(assignments, index_fields=index_fields, type_info=data_type,
+                                    coordinate_names=coordinate_names)
+        if cpu_openmp:
+            add_openmp(ast, num_threads=cpu_openmp)
         return ast
     elif target == 'llvm':
         raise NotImplementedError("Indexed kernels are not yet supported in LLVM backend")
     elif target == 'gpu':
-        from pystencils.gpucuda import createdIndexedCUDAKernel
-        ast = createdIndexedCUDAKernel(equations, indexFields, typeForSymbol=dataType, coordinateNames=coordinateNames,
-                                       indexingCreator=indexingCreatorFromParams(gpuIndexing, gpuIndexingParams))
+        from pystencils.gpucuda import created_indexed_cuda_kernel
+        ast = created_indexed_cuda_kernel(assignments, index_fields, type_info=data_type, coordinate_names=coordinate_names,
+                                          indexing_creator=indexing_creator_from_params(gpu_indexing, gpu_indexing_params))
         return ast
     else:
         raise ValueError("Unknown target %s. Has to be either 'cpu' or 'gpu'" % (target,))
diff --git a/kernelstep.py b/kernelstep.py
deleted file mode 100644
index 75c1bf5b6b1fdb02d1989d9192108a3d669f6528..0000000000000000000000000000000000000000
--- a/kernelstep.py
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-class KernelStep:
-
-    def __init__(self, dataHandling, updateRule):
-        # fields to sync before
-        # GPU sync fields
-        pass
diff --git a/llvm/__init__.py b/llvm/__init__.py
index 77fdd2da619c0a715ca8551811a6f59eb7b746ba..2bbac481b94d41a734c2ae2de7728e4435c1c332 100644
--- a/llvm/__init__.py
+++ b/llvm/__init__.py
@@ -1,3 +1,3 @@
-from .kernelcreation import createKernel, createIndexedKernel
-from .llvmjit import compileLLVM, generate_and_jit, Jit, makePythonFunction
-from .llvm import generateLLVM
+from .kernelcreation import create_kernel, create_indexed_kernel
+from .llvmjit import compile_llvm, generate_and_jit, Jit, make_python_function
+from .llvm import generate_llvm
diff --git a/llvm/kernelcreation.py b/llvm/kernelcreation.py
index c47ed21eb220f6ada761a84ddd68e4b76b8dc15f..c403a8810fc140ceba95211f8d9bd1417c376e16 100644
--- a/llvm/kernelcreation.py
+++ b/llvm/kernelcreation.py
@@ -1,102 +1,104 @@
 from pystencils.astnodes import SympyAssignment, Block, LoopOverCoordinate, KernelFunction
-from pystencils.transformations import resolveFieldAccesses, resolveBufferAccesses, \
-    typeAllEquations, moveConstantsBeforeLoop, insertCasts
+from pystencils.transformations import resolve_field_accesses, \
+    type_all_equations, move_constants_before_loop, insert_casts
 from pystencils.data_types import TypedSymbol, BasicType, StructType
-from pystencils.field import Field, FieldType
+from pystencils.field import FieldType
 from functools import partial
-from pystencils.llvm.llvmjit import makePythonFunction
+from pystencils.llvm.llvmjit import make_python_function
 
 
-def createKernel(listOfEquations, functionName="kernel", typeForSymbol=None, splitGroups=(),
-                 iterationSlice=None, ghostLayers=None):
+def create_kernel(assignments, function_name="kernel", type_info=None, split_groups=(),
+                  iteration_slice=None, ghost_layers=None):
     """
     Creates an abstract syntax tree for a kernel function, by taking a list of update rules.
 
     Loops are created according to the field accesses in the equations.
 
-    :param listOfEquations: list of sympy equations, containing accesses to :class:`pystencils.field.Field`.
+    :param assignments: list of sympy equations, containing accesses to :class:`pystencils.field.Field`.
            Defining the update rules of the kernel
-    :param functionName: name of the generated function - only important if generated code is written out
-    :param typeForSymbol: a map from symbol name to a C type specifier. If not specified all symbols are assumed to
+    :param function_name: name of the generated function - only important if generated code is written out
+    :param type_info: a map from symbol name to a C type specifier. If not specified all symbols are assumed to
            be of type 'double' except symbols which occur on the left hand side of equations where the
            right hand side is a sympy Boolean which are assumed to be 'bool' .
-    :param splitGroups: Specification on how to split up inner loop into multiple loops. For details see
-           transformation :func:`pystencils.transformation.splitInnerLoop`
-    :param iterationSlice: if not None, iteration is done only over this slice of the field
-    :param ghostLayers: a sequence of pairs for each coordinate with lower and upper nr of ghost layers
+    :param split_groups: Specification on how to split up inner loop into multiple loops. For details see
+           transformation :func:`pystencils.transformation.split_inner_loop`
+    :param iteration_slice: if not None, iteration is done only over this slice of the field
+    :param ghost_layers: a sequence of pairs for each coordinate with lower and upper nr of ghost layers
                         if None, the number of ghost layers is determined automatically and assumed to be equal for a
                         all dimensions
 
     :return: :class:`pystencils.ast.KernelFunction` node
     """
-    from pystencils.cpu import createKernel
-    code = createKernel(listOfEquations, functionName, typeForSymbol, splitGroups, iterationSlice, ghostLayers)
-    code = insertCasts(code)
-    code.compile = partial(makePythonFunction, code)
+    from pystencils.cpu import create_kernel
+    code = create_kernel(assignments, function_name, type_info, split_groups, iteration_slice, ghost_layers)
+    code = insert_casts(code)
+    code.compile = partial(make_python_function, code)
     return code
 
 
-def createIndexedKernel(listOfEquations, indexFields, functionName="kernel", typeForSymbol=None,
-                        coordinateNames=('x', 'y', 'z')):
+def create_indexed_kernel(assignments, index_fields, function_name="kernel", type_info=None,
+                          coordinate_names=('x', 'y', 'z')):
     """
-    Similar to :func:`createKernel`, but here not all cells of a field are updated but only cells with
+    Similar to :func:`create_kernel`, but here not all cells of a field are updated but only cells with
     coordinates which are stored in an index field. This traversal method can e.g. be used for boundary handling.
 
-    The coordinates are stored in a separated indexField, which is a one dimensional array with struct data type.
+    The coordinates are stored in a separated index_field, which is a one dimensional array with struct data type.
     This struct has to contain fields named 'x', 'y' and for 3D fields ('z'). These names are configurable with the
-    'coordinateNames' parameter. The struct can have also other fields that can be read and written in the kernel, for
+    'coordinate_names' parameter. The struct can have also other fields that can be read and written in the kernel, for
     example boundary parameters.
 
-    :param listOfEquations: list of update equations or AST nodes
-    :param indexFields: list of index fields, i.e. 1D fields with struct data type
-    :param typeForSymbol: see documentation of :func:`createKernel`
-    :param functionName: see documentation of :func:`createKernel`
-    :param coordinateNames: name of the coordinate fields in the struct data type
+    :param assignments: list of update equations or AST nodes
+    :param index_fields: list of index fields, i.e. 1D fields with struct data type
+    :param type_info: see documentation of :func:`create_kernel`
+    :param function_name: see documentation of :func:`create_kernel`
+    :param coordinate_names: name of the coordinate fields in the struct data type
     :return: abstract syntax tree
     """
-    fieldsRead, fieldsWritten, assignments = typeAllEquations(listOfEquations, typeForSymbol)
-    allFields = fieldsRead.union(fieldsWritten)
+    fields_read, fields_written, assignments = type_all_equations(assignments, type_info)
+    all_fields = fields_read.union(fields_written)
 
-    for indexField in indexFields:
+    for indexField in index_fields:
         indexField.fieldType = FieldType.INDEXED
-        assert FieldType.isIndexed(indexField)
-        assert indexField.spatialDimensions == 1, "Index fields have to be 1D"
-
-    nonIndexFields = [f for f in allFields if f not in indexFields]
-    spatialCoordinates = {f.spatialDimensions for f in nonIndexFields}
-    assert len(spatialCoordinates) == 1, "Non-index fields do not have the same number of spatial coordinates"
-    spatialCoordinates = list(spatialCoordinates)[0]
-
-    def getCoordinateSymbolAssignment(name):
-        for indexField in indexFields:
-            assert isinstance(indexField.dtype, StructType), "Index fields have to have a struct datatype"
-            dataType = indexField.dtype
-            if dataType.has_element(name):
-                rhs = indexField[0](name)
-                lhs = TypedSymbol(name, BasicType(dataType.get_element_type(name)))
+        assert FieldType.is_indexed(indexField)
+        assert indexField.spatial_dimensions == 1, "Index fields have to be 1D"
+
+    non_index_fields = [f for f in all_fields if f not in index_fields]
+    spatial_coordinates = {f.spatial_dimensions for f in non_index_fields}
+    assert len(spatial_coordinates) == 1, "Non-index fields do not have the same number of spatial coordinates"
+    spatial_coordinates = list(spatial_coordinates)[0]
+
+    def get_coordinate_symbol_assignment(name):
+        for index_field in index_fields:
+            assert isinstance(index_field.dtype, StructType), "Index fields have to have a struct datatype"
+            data_type = index_field.dtype
+            if data_type.has_element(name):
+                rhs = index_field[0](name)
+                lhs = TypedSymbol(name, BasicType(data_type.get_element_type(name)))
                 return SympyAssignment(lhs, rhs)
         raise ValueError("Index %s not found in any of the passed index fields" % (name,))
 
-    coordinateSymbolAssignments = [getCoordinateSymbolAssignment(n) for n in coordinateNames[:spatialCoordinates]]
-    coordinateTypedSymbols = [eq.lhs for eq in coordinateSymbolAssignments]
-    assignments = coordinateSymbolAssignments + assignments
+    coordinate_symbol_assignments = [get_coordinate_symbol_assignment(n)
+                                     for n in coordinate_names[:spatial_coordinates]]
+    coordinate_typed_symbols = [eq.lhs for eq in coordinate_symbol_assignments]
+    assignments = coordinate_symbol_assignments + assignments
 
     # make 1D loop over index fields
-    loopBody = Block([])
-    loopNode = LoopOverCoordinate(loopBody, coordinate_to_loop_over=0, start=0, stop=indexFields[0].shape[0])
+    loop_body = Block([])
+    loop_node = LoopOverCoordinate(loop_body, coordinate_to_loop_over=0, start=0, stop=index_fields[0].shape[0])
 
     for assignment in assignments:
-        loopBody.append(assignment)
+        loop_body.append(assignment)
 
-    functionBody = Block([loopNode])
-    ast = KernelFunction(functionBody, None, functionName, backend='llvm')
+    function_body = Block([loop_node])
+    ast = KernelFunction(function_body, None, function_name, backend='llvm')
 
-    fixedCoordinateMapping = {f.name: coordinateTypedSymbols for f in nonIndexFields}
-    resolveFieldAccesses(ast, set(['indexField']), field_to_fixed_coordinates=fixedCoordinateMapping)
-    moveConstantsBeforeLoop(ast)
+    read_only_fields = set([f.name for f in fields_read - fields_written])
+    fixed_coordinate_mapping = {f.name: coordinate_typed_symbols for f in non_index_fields}
+    resolve_field_accesses(ast, read_only_fields, field_to_fixed_coordinates=fixed_coordinate_mapping)
+    move_constants_before_loop(ast)
 
     desympy_ast(ast)
     insert_casts(ast)
-    ast.compile = partial(makePythonFunction, ast)
+    ast.compile = partial(make_python_function, ast)
 
     return ast
diff --git a/llvm/llvm.py b/llvm/llvm.py
index 4a0d8d97fb6acc7f5d3ef6eb0482e9e847205db0..f85f97ac6b1d420f732ea0d0e26546983827e946 100644
--- a/llvm/llvm.py
+++ b/llvm/llvm.py
@@ -1,29 +1,24 @@
-import llvmlite.ir as ir
 import functools
-
+from sympy import S, Indexed
 from sympy.printing.printer import Printer
-from sympy import S
-# S is numbers?
-
+import llvmlite.ir as ir
+from pystencils.assignment import Assignment
 from pystencils.llvm.control_flow import Loop
 from pystencils.data_types import create_type, to_llvm_type, get_type_of_expression, collate_types, \
     create_composite_type_from_string
-from sympy import Indexed
-from pystencils.assignment import Assignment
 
 
-def generateLLVM(ast_node, module=None, builder=None):
-    """
-    Prints the ast as llvm code
-    """
+def generate_llvm(ast_node, module=None, builder=None):
+    """Prints the ast as llvm code."""
     if module is None:
         module = ir.Module()
     if builder is None:
         builder = ir.IRBuilder()
     printer = LLVMPrinter(module, builder)
-    return printer._print(ast_node)  # TODO use doprint() instead???
+    return printer._print(ast_node)
 
 
+# noinspection PyPep8Naming
 class LLVMPrinter(Printer):
     """Convert expressions to LLVM IR"""
 
@@ -158,27 +153,27 @@ class LLVMPrinter(Printer):
             comparison = self.builder.icmp_signed
         return comparison(cmpop, self._print(expr.lhs), self._print(expr.rhs))
 
-    def _print_KernelFunction(self, function):
+    def _print_KernelFunction(self, func):
         # KernelFunction does not posses a return type
         return_type = self.void
         parameter_type = []
-        for parameter in function.parameters:
+        for parameter in func.parameters:
             parameter_type.append(to_llvm_type(parameter.dtype))
         func_type = ir.FunctionType(return_type, tuple(parameter_type))
-        name = function.functionName
+        name = func.function_name
         fn = ir.Function(self.module, func_type, name)
         self.ext_fn[name] = fn
 
         # set proper names to arguments
         for i, arg in enumerate(fn.args):
-            arg.name = function.parameters[i].name
-            self.func_arg_map[function.parameters[i].name] = arg
+            arg.name = func.parameters[i].name
+            self.func_arg_map[func.parameters[i].name] = arg
 
         # func.attributes.add("inlinehint")
         # func.attributes.add("argmemonly")
         block = fn.append_basic_block(name="entry")
         self.builder = ir.IRBuilder(block)  # TODO use goto_block instead
-        self._print(function.body)
+        self._print(func.body)
         self.builder.ret_void()
         self.fn = fn
         return fn
@@ -205,23 +200,23 @@ class LLVMPrinter(Printer):
         self.func_arg_map[assignment.lhs.name] = expr
         return expr
 
-    def _print_castFunc(self, conversion):
+    def _print_cast_func(self, conversion):
         node = self._print(conversion.args[0])
         to_dtype = get_type_of_expression(conversion)
         from_dtype = get_type_of_expression(conversion.args[0])
         # (From, to)
         decision = {
-            (create_composite_type_from_string("int"), create_composite_type_from_string("double")): functools.partial(
-                self.builder.sitofp, node, self.fp_type),
-            (create_composite_type_from_string("double"), create_composite_type_from_string("int")): functools.partial(
-                self.builder.fptosi, node, self.integer),
-            (create_composite_type_from_string("double *"), create_composite_type_from_string("int")): functools.partial(
-                self.builder.ptrtoint, node, self.integer),
-            (create_composite_type_from_string("int"), create_composite_type_from_string("double *")): functools.partial(self.builder.inttoptr, node,
-                                                                                                                         self.fp_pointer),
-            (create_composite_type_from_string("double * restrict"), create_composite_type_from_string("int")): functools.partial(
-                self.builder.ptrtoint, node,
-                self.integer),
+            (create_composite_type_from_string("int"),
+             create_composite_type_from_string("double")): functools.partial(self.builder.sitofp, node, self.fp_type),
+            (create_composite_type_from_string("double"),
+             create_composite_type_from_string("int")): functools.partial(self.builder.fptosi, node, self.integer),
+            (create_composite_type_from_string("double *"),
+             create_composite_type_from_string("int")): functools.partial(self.builder.ptrtoint, node, self.integer),
+            (create_composite_type_from_string("int"),
+             create_composite_type_from_string("double *")): functools.partial(self.builder.inttoptr,
+                                                                               node, self.fp_pointer),
+            (create_composite_type_from_string("double * restrict"),
+             create_composite_type_from_string("int")): functools.partial(self.builder.ptrtoint, node, self.integer),
             (create_composite_type_from_string("int"),
              create_composite_type_from_string("double * restrict")): functools.partial(self.builder.inttoptr, node,
                                                                                         self.fp_pointer),
@@ -229,8 +224,8 @@ class LLVMPrinter(Printer):
              create_composite_type_from_string("int")): functools.partial(self.builder.ptrtoint, node,
                                                                           self.integer),
             (create_composite_type_from_string("int"),
-             create_composite_type_from_string("double * restrict const")): functools.partial(self.builder.inttoptr, node,
-                                                                                              self.fp_pointer),
+             create_composite_type_from_string("double * restrict const")): functools.partial(self.builder.inttoptr,
+                                                                                              node, self.fp_pointer),
         }
         # TODO float, TEST: const, restrict
         # TODO bitcast, addrspacecast
@@ -242,7 +237,7 @@ class LLVMPrinter(Printer):
         # print((from_dtype, to_dtype))
         return decision[(from_dtype, to_dtype)]()
 
-    def _print_pointerArithmeticFunc(self, pointer):
+    def _print_pointer_arithmetic_func(self, pointer):
         ptr = self._print(pointer.args[0])
         index = self._print(pointer.args[1])
         return self.builder.gep(ptr, [index])
@@ -302,7 +297,7 @@ class LLVMPrinter(Printer):
             self.ext_fn[name] = fn
         return self.builder.call(fn, [e0], name)
 
-    def emptyPrinter(self, expr):
+    def empty_printer(self, expr):
         try:
             import inspect
             mro = inspect.getmro(expr)
diff --git a/llvm/llvmjit.py b/llvm/llvmjit.py
index 7625eec5786b1b0e6127d0265ab7f92c031f65c7..56649be9e3d9c31de23bfa731e587958bfcbec13 100644
--- a/llvm/llvmjit.py
+++ b/llvm/llvmjit.py
@@ -7,31 +7,31 @@ import shutil
 
 from pystencils.data_types import create_composite_type_from_string
 from ..data_types import to_ctypes, ctypes_from_llvm
-from .llvm import generateLLVM
-from ..cpu.cpujit import buildCTypeArgumentList, makePythonFunctionIncompleteParams
+from .llvm import generate_llvm
+from ..cpu.cpujit import build_ctypes_argument_list, make_python_function_incomplete_params
 
 
 def generate_and_jit(ast):
-    gen = generateLLVM(ast)
+    gen = generate_llvm(ast)
     if isinstance(gen, ir.Module):
-        return compileLLVM(gen)
+        return compile_llvm(gen)
     else:
-        return compileLLVM(gen.module)
+        return compile_llvm(gen.module)
 
 
-def makePythonFunction(ast, argumentDict={}, func=None):
+def make_python_function(ast, argument_dict={}, func=None):
     if func is None:
         jit = generate_and_jit(ast)
-        func = jit.get_function_ptr(ast.functionName)
+        func = jit.get_function_ptr(ast.function_name)
     try:
-        args = buildCTypeArgumentList(ast.parameters, argumentDict)
+        args = build_ctypes_argument_list(ast.parameters, argument_dict)
     except KeyError:
         # not all parameters specified yet
-        return makePythonFunctionIncompleteParams(ast, argumentDict, func)
+        return make_python_function_incomplete_params(ast, argument_dict, func)
     return lambda: func(*args)
 
 
-def compileLLVM(module):
+def compile_llvm(module):
     jit = Jit()
     jit.parse(module)
     jit.optimize()
@@ -51,7 +51,8 @@ class Jit(object):
         self.target = llvm.Target.from_default_triple()
         self.cpu = llvm.get_host_cpu_name()
         self.cpu_features = llvm.get_host_cpu_features()
-        self.target_machine = self.target.create_target_machine(cpu=self.cpu, features=self.cpu_features.flatten(), opt=2)
+        self.target_machine = self.target.create_target_machine(cpu=self.cpu, features=self.cpu_features.flatten(),
+                                                                opt=2)
         llvm.check_jit_execution()
         self.ee = llvm.create_mcjit_compiler(self.llvmmod, self.target_machine)
         self.ee.finalize_object()
@@ -124,18 +125,18 @@ class Jit(object):
 
     def compile(self):
         fptr = {}
-        for function in self.module.functions:
-            if not function.is_declaration:
+        for func in self.module.functions:
+            if not func.is_declaration:
                 return_type = None
-                if function.ftype.return_type != ir.VoidType():
-                    return_type = to_ctypes(create_composite_type_from_string(str(function.ftype.return_type)))
-                args = [ctypes_from_llvm(arg) for arg in function.ftype.args]
-                function_address = self.ee.get_function_address(function.name)
-                fptr[function.name] = ct.CFUNCTYPE(return_type, *args)(function_address)
+                if func.ftype.return_type != ir.VoidType():
+                    return_type = to_ctypes(create_composite_type_from_string(str(func.ftype.return_type)))
+                args = [ctypes_from_llvm(arg) for arg in func.ftype.args]
+                function_address = self.ee.get_function_address(func.name)
+                fptr[func.name] = ct.CFUNCTYPE(return_type, *args)(function_address)
         self.fptr = fptr
 
-    def __call__(self, function, *args, **kwargs):
-        target_function = next(f for f in self.module.functions if f.name == function)
+    def __call__(self, func, *args, **kwargs):
+        target_function = next(f for f in self.module.functions if f.name == func)
         arg_types = [ctypes_from_llvm(arg.type) for arg in target_function.args]
 
         transformed_args = []
@@ -145,7 +146,7 @@ class Jit(object):
             else:
                 transformed_args.append(arg)
 
-        self.fptr[function](*transformed_args)
+        self.fptr[func](*transformed_args)
 
     def print_functions(self):
         for f in self.module.functions:
@@ -155,6 +156,3 @@ class Jit(object):
         fptr = self.fptr[name]
         fptr.jit = self
         return fptr
-
-
-
diff --git a/parallel/blockiteration.py b/parallel/blockiteration.py
index 110f7a6738ed724725c70a76dde46ae28d99b9bd..12ef85f29653623fb88e627fadfcd4753403bd31 100644
--- a/parallel/blockiteration.py
+++ b/parallel/blockiteration.py
@@ -1,151 +1,124 @@
 """
-This module contains function that simplify the iteration over waLBerlas distributed data structure.
+This module contains function that simplify the iteration over walberla's distributed data structure.
 These function simplify the iteration over rectangular slices, managing the mapping between block local coordinates and
 global coordinates.
 """
 import numpy as np
-from pystencils.slicing import normalizeSlice
+
+from pystencils.datahandling.datahandling_interface import Block
+from pystencils.slicing import normalize_slice
 try:
+    # noinspection PyPep8Naming
     import waLBerla as wlb
 except ImportError:
     wlb = None
 
 
-def blockIteration(blocks, ghostLayers, dim=3, accessPrefix=''):
-    """
-    Iterator that simplifies the access to field data by automatically converting from waLBerla fields to
+def block_iteration(blocks, ghost_layers, dim=3, access_prefix=''):
+    """Simple iteration over parallel walberla domain.
+
+    Iterator that simplifies the access to field data by automatically converting from walberla fields to
     numpy arrays
-    :param blocks: waLBerla block data structure
-    :param ghostLayers: how many ghost layers to include (outer and inner)
-    :param dim: waLBerlas block data structure is 3D - 2D domains can be done by setting zSize=1
-                if dim=2 is set here, the third coordinate of the returned fields is accessed at z=0 automatically
-    :param accessPrefix: see documentation of slicedBlockIteration
+
+    Args:
+        blocks: walberla block data structure
+        ghost_layers: how many ghost layers to include (outer and inner)
+        dim: walberla's block data structure is 3D - 2D domains can be done by setting zSize=1
+             if dim=2 is set here, the third coordinate of the returned fields is accessed at z=0 automatically
+        access_prefix: see documentation of sliced_block_iteration
     """
     for block in blocks:
-        cellInterval = blocks.getBlockCellBB(block)
-        cellInterval.expand(ghostLayers)
-        localSlice = [slice(0, w, None) for w in cellInterval.size]
+        cell_interval = blocks.getBlockCellBB(block)
+        cell_interval.expand(ghost_layers)
+        local_slice = [slice(0, w, None) for w in cell_interval.size]
         if dim == 2:
-            localSlice[2] = ghostLayers
-        yield ParallelBlock(block, cellInterval.min[:dim], tuple(localSlice), ghostLayers, accessPrefix)
-
-
-def slicedBlockIteration(blocks, sliceObj=None, innerGhostLayers=1, outerGhostLayers=1, dim=3, accessPrefix=''):
-    """
-    Iterates of all blocks that have an intersection with the given slice object.
-    For these blocks a Block object is yielded
-    
-    :param blocks: waLBerla block data structure
-    :param sliceObj: a slice (i.e. rectangular sub-region), can be created with makeSlice[]
-    :param innerGhostLayers: how many ghost layers are included in the local slice and the optional index arrays
-    :param outerGhostLayers: slices can have relative coordinates e.g. makeSlice[0.2, :, :]
-                             when computing absolute values, the domain size is needed. This parameter
-                             specifies how many ghost layers are taken into account for this operation.
-    :param dim: set to 2 for pseudo 2D simulation (i.e. where z coordinate of blocks has extent 1)
-                the arrays returned when indexing the block
-    :param accessPrefix: when accessing block data, this prefix is prepended to the access name
-                         mostly used to switch between CPU and GPU field access (gpu fields are added with a
-                         certain prefix 'gpu_')
-    Example: assume no slice is given, then sliceNormalizationGhostLayers effectively sets how much ghost layers
-    at the border of the domain are included. The innerGhostLayers parameter specifies how many inner ghost layers are
-    included
+            local_slice[2] = ghost_layers
+        yield ParallelBlock(block, cell_interval.min[:dim], tuple(local_slice), ghost_layers, access_prefix)
+
+
+def sliced_block_iteration(blocks, slice_obj=None, inner_ghost_layers=1, outer_ghost_layers=1, dim=3, access_prefix=''):
+    """Iterates of all blocks that have an intersection with the given slice object.
+
+    For intersection blocks a Block object is yielded
+
+    Args:
+        blocks: walberla block data structure
+        slice_obj: a slice (i.e. rectangular sub-region), can be created with makeSlice[]
+        inner_ghost_layers: how many ghost layers are included in the local slice and the optional index arrays
+        outer_ghost_layers: slices can have relative coordinates e.g. makeSlice[0.2, :, :]
+                          when computing absolute values, the domain size is needed. This parameter
+                          specifies how many ghost layers are taken into account for this operation.
+        dim: set to 2 for pseudo 2D simulation (i.e. where z coordinate of blocks has extent 1)
+             the arrays returned when indexing the block
+        access_prefix: when accessing block data, this prefix is prepended to the access name
+                      mostly used to switch between CPU and GPU field access (gpu fields are added with a
+                      certain prefix 'gpu_')
+
+    Example:
+        assume no slice is given, then sliceNormalizationGhostLayers effectively sets how much ghost layers at the
+        border of the domain are included. The innerGhostLayers parameter specifies how many inner ghost layers are
+        included
     """
-    if sliceObj is None:
-        sliceObj = tuple([slice(None, None, None)] * dim)
+    if slice_obj is None:
+        slice_obj = tuple([slice(None, None, None)] * dim)
     if dim == 2:
-        sliceObj += (innerGhostLayers, )
+        slice_obj += (inner_ghost_layers,)
 
-    domainCellBB = blocks.getDomainCellBB()
-    domainExtent = [s + 2 * outerGhostLayers for s in domainCellBB.size]
-    sliceObj = normalizeSlice(sliceObj, domainExtent)
-    targetCellBB = wlb.CellInterval.fromSlice(sliceObj)
-    targetCellBB.shift(*[a - outerGhostLayers for a in domainCellBB.min])
+    domain_cell_bb = blocks.getDomainCellBB()
+    domain_extent = [s + 2 * outer_ghost_layers for s in domain_cell_bb.size]
+    slice_obj = normalize_slice(slice_obj, domain_extent)
+    target_cell_bb = wlb.CellInterval.fromSlice(slice_obj)
+    target_cell_bb.shift(*[a - outer_ghost_layers for a in domain_cell_bb.min])
 
     for block in blocks:
-        intersection = blocks.getBlockCellBB(block).getExpanded(innerGhostLayers)
-        intersection.intersect(targetCellBB)
+        intersection = blocks.getBlockCellBB(block).getExpanded(inner_ghost_layers)
+        intersection.intersect(target_cell_bb)
         if intersection.empty():
             continue
 
-        localTargetBB = blocks.transformGlobalToLocal(block, intersection)
-        localTargetBB.shift(innerGhostLayers, innerGhostLayers, innerGhostLayers)
-        localSlice = localTargetBB.toSlice(False)
+        local_target_bb = blocks.transformGlobalToLocal(block, intersection)
+        local_target_bb.shift(inner_ghost_layers, inner_ghost_layers, inner_ghost_layers)
+        local_slice = local_target_bb.toSlice(False)
         if dim == 2:
-            localSlice = (localSlice[0], localSlice[1], innerGhostLayers)
-        yield ParallelBlock(block, intersection.min[:dim], localSlice, innerGhostLayers, accessPrefix)
-
-
-class Block:
-    def __init__(self, offset, localSlice):
-        self._offset = offset
-        self._localSlice = localSlice
-
-    @property
-    def offset(self):
-        """Offset of the current block in global coordinates (where lower ghost layers have negative indices)"""
-        return self._offset
-
-    @property
-    def cellIndexArrays(self):
-        """Global coordinate meshgrid of cell coordinates. Cell indices start at 0 at the first inner cell,
-        lower ghost layers have negative indices"""
-        meshGridParams = [offset + np.arange(width, dtype=np.int32)
-                          for offset, width in zip(self.offset, self.shape)]
-        return np.meshgrid(*meshGridParams, indexing='ij', copy=False)
-
-    @property
-    def midpointArrays(self):
-        """Global coordinate meshgrid of cell midpoints which are shifted by 0.5 compared to cell indices"""
-        meshGridParams = [offset + 0.5 + np.arange(width, dtype=float)
-                          for offset, width in zip(self.offset, self.shape)]
-        return np.meshgrid(*meshGridParams, indexing='ij', copy=False)
-
-    @property
-    def shape(self):
-        """Shape of the fields (potentially including ghost layers)"""
-        return tuple(s.stop - s.start for s in self._localSlice[:len(self._offset)])
-
-    @property
-    def globalSlice(self):
-        """Slice in global coordinates"""
-        return tuple(slice(off, off+size) for off, size in zip(self._offset, self.shape))
+            local_slice = (local_slice[0], local_slice[1], inner_ghost_layers)
+        yield ParallelBlock(block, intersection.min[:dim], local_slice, inner_ghost_layers, access_prefix)
+
 
 # ----------------------------- Implementation details -----------------------------------------------------------------
 
 
 class SerialBlock(Block):
-    """Simple mockup block that is used if SerialDataHandling."""
-    def __init__(self, fieldDict, offset, localSlice):
-        super(SerialBlock, self).__init__(offset, localSlice)
-        self._fieldDict = fieldDict
+    """Simple mock-up block that is used for SerialDataHandling."""
+    def __init__(self, field_dict, offset, local_slice):
+        super(SerialBlock, self).__init__(offset, local_slice)
+        self._fieldDict = field_dict
 
-    def __getitem__(self, dataName):
-        result = self._fieldDict[dataName]
+    def __getitem__(self, data_name):
+        result = self._fieldDict[data_name]
         if isinstance(result, np.ndarray):
             result = result[self._localSlice]
         return result
 
 
 class ParallelBlock(Block):
-    def __init__(self, block, offset, localSlice, innerGhostLayers, namePrefix):
-        super(ParallelBlock, self).__init__(offset, localSlice)
+    def __init__(self, block, offset, local_slice, inner_ghost_layers, name_prefix):
+        super(ParallelBlock, self).__init__(offset, local_slice)
         self._block = block
-        self._gls = innerGhostLayers
-        self._namePrefix = namePrefix
+        self._gls = inner_ghost_layers
+        self._namePrefix = name_prefix
 
-    def __getitem__(self, dataName):
-        result = self._block[self._namePrefix + dataName]
-        typeName = type(result).__name__
-        if typeName == 'GhostLayerField':
+    def __getitem__(self, data_name):
+        result = self._block[self._namePrefix + data_name]
+        type_name = type(result).__name__
+        if type_name == 'GhostLayerField':
             result = wlb.field.toArray(result, withGhostLayers=self._gls)
-            result = self._normalizeArrayShape(result)
-        elif typeName == 'GpuField':
+            result = self._normalize_array_shape(result)
+        elif type_name == 'GpuField':
             result = wlb.cuda.toGpuArray(result, withGhostLayers=self._gls)
-            result = self._normalizeArrayShape(result)
+            result = self._normalize_array_shape(result)
         return result
 
-    def _normalizeArrayShape(self, arr):
+    def _normalize_array_shape(self, arr):
         if arr.shape[-1] == 1:
             arr = arr[..., 0]
         return arr[self._localSlice]
-
diff --git a/plot2d.py b/plot2d.py
index 9fc234ab7dfe3c36a34340c6a9db8e90fc97739b..6fc0729dc3829a4f95827cbb0d4512f5ffeac2f1 100644
--- a/plot2d.py
+++ b/plot2d.py
@@ -1,7 +1,7 @@
 from matplotlib.pyplot import *
 
 
-def vectorField(field, step=2, **kwargs):
+def vector_field(field, step=2, **kwargs):
     """
     Plot given vector field as quiver (arrow) plot.
 
@@ -10,13 +10,13 @@ def vectorField(field, step=2, **kwargs):
     :param step: plots only every steps's cell
     :param kwargs: keyword arguments passed to :func:`matplotlib.pyplot.quiver`
     """
-    veln = field.swapaxes(0, 1)
-    res = quiver(veln[::step, ::step, 0], veln[::step, ::step, 1], **kwargs)
+    vel_n = field.swapaxes(0, 1)
+    res = quiver(vel_n[::step, ::step, 0], vel_n[::step, ::step, 1], **kwargs)
     axis('equal')
     return res
 
 
-def vectorFieldMagnitude(field, **kwargs):
+def vector_field_magnitude(field, **kwargs):
     """
     Plots the magnitude of a vector field as colormap
     :param field: numpy array with 3 dimensions, first two are spatial x,y coordinate, the last
@@ -27,122 +27,121 @@ def vectorFieldMagnitude(field, **kwargs):
     norm = norm(field, axis=2, ord=2)
     if hasattr(field, 'mask'):
         norm = np.ma.masked_array(norm, mask=field.mask[:, :, 0])
-    return scalarField(norm, **kwargs)
+    return scalar_field(norm, **kwargs)
 
 
-def scalarField(field, **kwargs):
+def scalar_field(field, **kwargs):
     """
     Plots field values as colormap
 
     :param field: two dimensional numpy array
     :param kwargs: keyword arguments passed to :func:`matplotlib.pyplot.imshow`
     """
-    import numpy as np
-    field = np.swapaxes(field, 0, 1)
+    import numpy
+    field = numpy.swapaxes(field, 0, 1)
     res = imshow(field, origin='lower', **kwargs)
     axis('equal')
     return res
 
 
-def scalarFieldAlphaValue(field, color, clip=False, **kwargs):
-    import numpy as np
+def scalar_field_alpha_value(field, color, clip=False, **kwargs):
+    import numpy
     import matplotlib
-    field = np.swapaxes(field, 0, 1)
+    field = numpy.swapaxes(field, 0, 1)
     color = matplotlib.colors.to_rgba(color)
 
-    fieldToPlot = np.empty(field.shape + (4,))
+    field_to_plot = numpy.empty(field.shape + (4,))
     for i in range(3):
-        fieldToPlot[:, :, i] = color[i]
+        field_to_plot[:, :, i] = color[i]
 
     if clip:
-        normalizedField = field.copy()
-        normalizedField[normalizedField<0] = 0
-        normalizedField[normalizedField>1] = 1
+        normalized_field = field.copy()
+        normalized_field[normalized_field < 0] = 0
+        normalized_field[normalized_field > 1] = 1
     else:
-        min, max = np.min(field), np.max(field)
-        normalizedField = (field - min) / (max - min)
-    fieldToPlot[:, :, 3] = normalizedField
+        minimum, maximum = numpy.min(field), numpy.max(field)
+        normalized_field = (field - minimum) / (maximum - minimum)
+    field_to_plot[:, :, 3] = normalized_field
 
-    res = imshow(fieldToPlot, origin='lower', **kwargs)
+    res = imshow(field_to_plot, origin='lower', **kwargs)
     axis('equal')
     return res
 
 
-def scalarFieldContour(field, **kwargs):
+def scalar_field_contour(field, **kwargs):
     field = np.swapaxes(field, 0, 1)
     res = contour(field, **kwargs)
     axis('equal')
     return res
 
 
-def multipleScalarFields(field, **kwargs):
-    subPlots = field.shape[-1]
-    for i in range(subPlots):
-        subplot(1, subPlots, i + 1)
+def multiple_scalar_fields(field, **_):
+    sub_plots = field.shape[-1]
+    for i in range(sub_plots):
+        subplot(1, sub_plots, i + 1)
         title(str(i))
-        scalarField(field[..., i])
+        scalar_field(field[..., i])
         colorbar()
 
 
-def sympyFunction(f, var, bounds, **kwargs):
+def sympy_function(f, var, bounds, **kwargs):
     import sympy as sp
-    xArr = np.linspace(bounds[0], bounds[1], 101)
-    yArr = sp.lambdify(var, f)(xArr)
-    plot(xArr, yArr, **kwargs)
+    x_arr = np.linspace(bounds[0], bounds[1], 101)
+    y_arr = sp.lambdify(var, f)(x_arr)
+    plot(x_arr, y_arr, **kwargs)
 
 # ------------------------------------------- Animations ---------------------------------------------------------------
 
 
-def vectorFieldAnimation(runFunction, step=2, rescale=True, plotSetupFunction=lambda: None,
-                         plotUpdateFunction=lambda: None, interval=30, frames=180, **kwargs):
+def vector_field_animation(run_function, step=2, rescale=True, plot_setup_function=lambda: None,
+                           plot_update_function=lambda: None, interval=30, frames=180, **kwargs):
     import matplotlib.animation as animation
     from numpy.linalg import norm
 
     fig = gcf()
     im = None
-    field = runFunction()
+    field = run_function()
     if rescale:
-        maxNorm = np.max(norm(field, axis=2, ord=2))
-        field = field / maxNorm
+        max_norm = np.max(norm(field, axis=2, ord=2))
+        field = field / max_norm
         if 'scale' not in kwargs:
             kwargs['scale'] = 1.0
 
-    quiverPlot = vectorField(field, step=step, **kwargs)
-    plotSetupFunction()
+    quiver_plot = vector_field(field, step=step, **kwargs)
+    plot_setup_function()
 
-    def updatefig(*args):
-        f = runFunction()
+    def update_figure(*_):
+        f = run_function()
         f = np.swapaxes(f, 0, 1)
         if rescale:
-            maxNorm = np.max(norm(f, axis=2, ord=2))
-            f = f / maxNorm
+            f = f / np.max(norm(f, axis=2, ord=2))
         u, v = f[::step, ::step, 0], f[::step, ::step, 1]
-        quiverPlot.set_UVC(u, v)
-        plotUpdateFunction()
+        quiver_plot.set_UVC(u, v)
+        plot_update_function()
         return im,
 
-    return animation.FuncAnimation(fig, updatefig, interval=interval, frames=frames)
+    return animation.FuncAnimation(fig, update_figure, interval=interval, frames=frames)
 
 
-def vectorFieldMagnitudeAnimation(runFunction, plotSetupFunction=lambda: None,
-                                  plotUpdateFunction=lambda: None, interval=30, frames=180, **kwargs):
+def vector_field_magnitude_animation(run_function, plot_setup_function=lambda: None,
+                                     plot_update_function=lambda: None, interval=30, frames=180, **kwargs):
     import matplotlib.animation as animation
     from numpy.linalg import norm
 
     fig = gcf()
     im = None
-    field = runFunction()
-    im = vectorFieldMagnitude(field, **kwargs)
-    plotSetupFunction()
+    field = run_function()
+    im = vector_field_magnitude(field, **kwargs)
+    plot_setup_function()
 
-    def updatefig(*args):
-        f = runFunction()
+    def update_figure(*_):
+        f = run_function()
         normed = norm(f, axis=2, ord=2)
         if hasattr(f, 'mask'):
             normed = np.ma.masked_array(normed, mask=f.mask[:, :, 0])
         normed = np.swapaxes(normed, 0, 1)
         im.set_array(normed)
-        plotUpdateFunction()
+        plot_update_function()
         return im,
 
-    return animation.FuncAnimation(fig, updatefig, interval=interval, frames=frames)
\ No newline at end of file
+    return animation.FuncAnimation(fig, update_figure, interval=interval, frames=frames)
diff --git a/qtgui.py b/qtgui.py
index 451c2b40d649187055353346a67b5b37821fda5d..2ce9cfa59ad8d399c2242cb770a6cfb695282130 100644
--- a/qtgui.py
+++ b/qtgui.py
@@ -3,7 +3,7 @@ from PyQt5.QtWidgets import QWidget, QApplication, QTreeWidget, QTreeWidgetItem,
 from pystencils.astnodes import Block, LoopOverCoordinate, KernelFunction
 
 
-def debugGUI(ast):
+def debug_gui(ast):
     app = QApplication.instance()
     if app is None:
         app = QApplication(sys.argv)
diff --git a/runhelper/db.py b/runhelper/db.py
index 4f37ebf8e5c21a2697153f771e612ce05684df88..9a55d182a22981abc569fa3b056be9ff56fc476b 100644
--- a/runhelper/db.py
+++ b/runhelper/db.py
@@ -2,7 +2,7 @@ import time
 import socket
 from typing import Dict, Sequence, Iterator
 import blitzdb
-from pystencils.cpu.cpujit import getCompilerConfig
+from pystencils.cpu.cpujit import get_compiler_config
 
 
 class Database:
@@ -139,7 +139,7 @@ class Database:
         return {
             'timestamp': time.mktime(time.gmtime()),
             'hostname': socket.gethostname(),
-            'cpuCompilerConfig': getCompilerConfig(),
+            'cpuCompilerConfig': get_compiler_config(),
         }
 
 
diff --git a/slicing.py b/slicing.py
index 200cb03054364ce6b760ba3cb3e9d169754a377b..9d3e432e6cc05dfea6a837bba5ca94b26c2e824a 100644
--- a/slicing.py
+++ b/slicing.py
@@ -1,23 +1,24 @@
 import sympy as sp
-import numpy as np
-from pystencils.field import createNumpyArrayWithLayout, getLayoutOfArray
+from pystencils.field import create_numpy_array_with_layout, get_layout_of_array
 
 
 class SliceMaker(object):
     def __getitem__(self, item):
         return item
+
+
 makeSlice = SliceMaker()
 
 
 class SlicedGetter(object):
-    def __init__(self, functionReturningArray):
-        self._functionReturningArray = functionReturningArray
+    def __init__(self, function_returning_array):
+        self._functionReturningArray = function_returning_array
 
     def __getitem__(self, item):
         return self._functionReturningArray(item)
 
 
-def normalizeSlice(slices, sizes):
+def normalize_slice(slices, sizes):
     """Converts slices with floating point and/or negative entries to integer slices"""
 
     if len(slices) != len(sizes):
@@ -38,50 +39,50 @@ def normalizeSlice(slices, sizes):
         assert (type(s) is slice)
 
         if s.start is None:
-            newStart = 0
+            new_start = 0
         elif type(s.start) is float:
-            newStart = int(s.start * size)
+            new_start = int(s.start * size)
         elif not isinstance(s.start, sp.Basic) and s.start < 0:
-            newStart = size + s.start
+            new_start = size + s.start
         else:
-            newStart = s.start
+            new_start = s.start
 
         if s.stop is None:
-            newStop = size
+            new_stop = size
         elif type(s.stop) is float:
-            newStop = int(s.stop * size)
+            new_stop = int(s.stop * size)
         elif not isinstance(s.stop, sp.Basic) and s.stop < 0:
-            newStop = size + s.stop
+            new_stop = size + s.stop
         else:
-            newStop = s.stop
+            new_stop = s.stop
 
-        result.append(slice(newStart, newStop, s.step if s.step is not None else 1))
+        result.append(slice(new_start, new_stop, s.step if s.step is not None else 1))
 
     return tuple(result)
 
 
-def shiftSlice(slices, offset):
-    def shiftSliceComponent(sliceComp, shiftOffset):
-        if sliceComp is None:
+def shift_slice(slices, offset):
+    def shift_slice_component(slice_comp, shift_offset):
+        if slice_comp is None:
             return None
-        elif isinstance(sliceComp, int):
-            return sliceComp + shiftOffset
-        elif isinstance(sliceComp, float):
-            return sliceComp  # relative entries are not shifted
-        elif isinstance(sliceComp, slice):
-            return slice(shiftSliceComponent(sliceComp.start, shiftOffset),
-                         shiftSliceComponent(sliceComp.stop, shiftOffset),
-                         sliceComp.step)
+        elif isinstance(slice_comp, int):
+            return slice_comp + shift_offset
+        elif isinstance(slice_comp, float):
+            return slice_comp  # relative entries are not shifted
+        elif isinstance(slice_comp, slice):
+            return slice(shift_slice_component(slice_comp.start, shift_offset),
+                         shift_slice_component(slice_comp.stop, shift_offset),
+                         slice_comp.step)
         else:
             raise ValueError()
 
     if hasattr(offset, '__len__'):
-        return [shiftSliceComponent(k, off) for k, off in zip(slices, offset)]
+        return [shift_slice_component(k, off) for k, off in zip(slices, offset)]
     else:
-        return [shiftSliceComponent(k, offset) for k in slices]
+        return [shift_slice_component(k, offset) for k in slices]
 
 
-def sliceFromDirection(directionName, dim, normalOffset=0, tangentialOffset=0):
+def slice_from_direction(direction_name, dim, normal_offset=0, tangential_offset=0):
     """
     Create a slice from a direction named by compass scheme:
         i.e. 'N' for north returns same as makeSlice[:, -1]
@@ -91,77 +92,77 @@ def sliceFromDirection(directionName, dim, normalOffset=0, tangentialOffset=0):
             - z: B, T (bottom, top)
     Also combinations are allowed like north-east 'NE'
 
-    :param directionName: name of direction as explained above
+    :param direction_name: name of direction as explained above
     :param dim: dimension of the returned slice (should be 2 or 3)
-    :param normalOffset: the offset in 'normal' direction: e.g. sliceFromDirection('N',2, normalOffset=2)
+    :param normal_offset: the offset in 'normal' direction: e.g. slice_from_direction('N',2, normalOffset=2)
                          would return makeSlice[:, -3]
-    :param tangentialOffset: offset in the other directions: e.g. sliceFromDirection('N',2, tangentialOffset=2)
+    :param tangential_offset: offset in the other directions: e.g. slice_from_direction('N',2, tangentialOffset=2)
                          would return makeSlice[2:-2, -1]
     """
-    if tangentialOffset == 0:
+    if tangential_offset == 0:
         result = [slice(None, None, None)] * dim
     else:
-        result = [slice(tangentialOffset, -tangentialOffset, None)] * dim
+        result = [slice(tangential_offset, -tangential_offset, None)] * dim
 
-    normalSliceHigh, normalSliceLow = -1-normalOffset, normalOffset
+    normal_slice_high, normal_slice_low = -1 - normal_offset, normal_offset
 
     for dimIdx, (lowName, highName) in enumerate([('W', 'E'), ('S', 'N'), ('B', 'T')]):
-        if lowName in directionName:
-            assert highName not in directionName, "Invalid direction name"
-            result[dimIdx] = normalSliceLow
-        if highName in directionName:
-            assert lowName not in directionName, "Invalid direction name"
-            result[dimIdx] = normalSliceHigh
+        if lowName in direction_name:
+            assert highName not in direction_name, "Invalid direction name"
+            result[dimIdx] = normal_slice_low
+        if highName in direction_name:
+            assert lowName not in direction_name, "Invalid direction name"
+            result[dimIdx] = normal_slice_high
     return tuple(result)
 
 
-def removeGhostLayers(arr, indexDimensions=0, ghostLayers=1):
-    if ghostLayers <= 0:
+def remove_ghost_layers(arr, index_dimensions=0, ghost_layers=1):
+    if ghost_layers <= 0:
         return arr
     dimensions = len(arr.shape)
-    spatialDimensions = dimensions - indexDimensions
-    indexing = [slice(ghostLayers, -ghostLayers, None), ] * spatialDimensions
-    indexing += [slice(None, None, None)] * indexDimensions
+    spatial_dimensions = dimensions - index_dimensions
+    indexing = [slice(ghost_layers, -ghost_layers, None), ] * spatial_dimensions
+    indexing += [slice(None, None, None)] * index_dimensions
     return arr[indexing]
 
 
-def addGhostLayers(arr, indexDimensions=0, ghostLayers=1, layout=None):
+def add_ghost_layers(arr, index_dimensions=0, ghost_layers=1, layout=None):
     dimensions = len(arr.shape)
-    spatialDimensions = dimensions - indexDimensions
-    newShape = [e + 2 * ghostLayers for e in arr.shape[:spatialDimensions]] + list(arr.shape[spatialDimensions:])
+    spatial_dimensions = dimensions - index_dimensions
+    new_shape = [e + 2 * ghost_layers for e in arr.shape[:spatial_dimensions]] + list(arr.shape[spatial_dimensions:])
     if layout is None:
-        layout = getLayoutOfArray(arr)
-    result = createNumpyArrayWithLayout(newShape, layout)
+        layout = get_layout_of_array(arr)
+    result = create_numpy_array_with_layout(new_shape, layout)
     result.fill(0.0)
-    indexing = [slice(ghostLayers, -ghostLayers, None), ] * spatialDimensions
-    indexing += [slice(None, None, None)] * indexDimensions
+    indexing = [slice(ghost_layers, -ghost_layers, None), ] * spatial_dimensions
+    indexing += [slice(None, None, None)] * index_dimensions
     result[indexing] = arr
     return result
 
 
-def getSliceBeforeGhostLayer(direction, ghostLayers=1, thickness=None, fullSlice=False):
+def get_slice_before_ghost_layer(direction, ghost_layers=1, thickness=None, full_slice=False):
     """
     Returns slicing expression for region before ghost layer
     :param direction: tuple specifying direction of slice
-    :param ghostLayers: number of ghost layers
+    :param ghost_layers: number of ghost layers
     :param thickness: thickness of the slice, defaults to number of ghost layers
-    :param fullSlice:  if true also the ghost cells in directions orthogonal to direction are contained in the
+    :param full_slice:  if true also the ghost cells in directions orthogonal to direction are contained in the
                        returned slice. Example (d=W ): if fullSlice then also the ghost layer in N-S and T-B
                        are included, otherwise only inner cells are returned
     """
     if not thickness:
-        thickness = ghostLayers
-    fullSliceInc = ghostLayers if not fullSlice else 0
+        thickness = ghost_layers
+    full_slice_inc = ghost_layers if not full_slice else 0
     slices = []
     for dirComponent in direction:
         if dirComponent == -1:
-            s = slice(ghostLayers, thickness + ghostLayers)
+            s = slice(ghost_layers, thickness + ghost_layers)
         elif dirComponent == 0:
-            end = -fullSliceInc
-            s = slice(fullSliceInc, end if end != 0 else None)
+            end = -full_slice_inc
+            s = slice(full_slice_inc, end if end != 0 else None)
         elif dirComponent == 1:
-            start = -thickness - ghostLayers
-            end = -ghostLayers
+            start = -thickness - ghost_layers
+            end = -ghost_layers
             s = slice(start if start != 0 else None, end if end != 0 else None)
         else:
             raise ValueError("Invalid direction: only -1, 0, 1 components are allowed")
@@ -169,25 +170,25 @@ def getSliceBeforeGhostLayer(direction, ghostLayers=1, thickness=None, fullSlice
     return tuple(slices)
 
 
-def getGhostRegionSlice(direction, ghostLayers=1, thickness=None, fullSlice=False):
+def get_ghost_region_slice(direction, ghost_layers=1, thickness=None, full_slice=False):
     """
-    Returns slice of ghost region. For parameters see :func:`getSliceBeforeGhostLayer`
+    Returns slice of ghost region. For parameters see :func:`get_slice_before_ghost_layer`
     """
     if not thickness:
-        thickness = ghostLayers
+        thickness = ghost_layers
     assert thickness > 0
-    assert thickness <= ghostLayers
-    fullSliceInc = ghostLayers if not fullSlice else 0
+    assert thickness <= ghost_layers
+    full_slice_inc = ghost_layers if not full_slice else 0
     slices = []
     for dirComponent in direction:
         if dirComponent == -1:
-            s = slice(ghostLayers - thickness, ghostLayers)
+            s = slice(ghost_layers - thickness, ghost_layers)
         elif dirComponent == 0:
-            end = -fullSliceInc
-            s = slice(fullSliceInc, end if end != 0 else None)
+            end = -full_slice_inc
+            s = slice(full_slice_inc, end if end != 0 else None)
         elif dirComponent == 1:
-            start = -ghostLayers
-            end = - ghostLayers + thickness
+            start = -ghost_layers
+            end = - ghost_layers + thickness
             s = slice(start if start != 0 else None, end if end != 0 else None)
         else:
             raise ValueError("Invalid direction: only -1, 0, 1 components are allowed")
@@ -195,52 +196,43 @@ def getGhostRegionSlice(direction, ghostLayers=1, thickness=None, fullSlice=Fals
     return tuple(slices)
 
 
-def getPeriodicBoundarySrcDstSlices(stencil, ghostLayers=1, thickness=None):
-    srcDstSliceTuples = []
+def get_periodic_boundary_src_dst_slices(stencil, ghost_layers=1, thickness=None):
+    src_dst_slice_tuples = []
 
     for d in stencil:
         if sum([abs(e) for e in d]) == 0:
             continue
-        invDir = (-e for e in d)
-        src = getSliceBeforeGhostLayer(invDir, ghostLayers, thickness=thickness, fullSlice=False)
-        dst = getGhostRegionSlice(d, ghostLayers, thickness=thickness, fullSlice=False)
-        srcDstSliceTuples.append((src, dst))
-    return srcDstSliceTuples
+        inv_dir = (-e for e in d)
+        src = get_slice_before_ghost_layer(inv_dir, ghost_layers, thickness=thickness, full_slice=False)
+        dst = get_ghost_region_slice(d, ghost_layers, thickness=thickness, full_slice=False)
+        src_dst_slice_tuples.append((src, dst))
+    return src_dst_slice_tuples
 
 
-def getPeriodicBoundaryFunctor(stencil, ghostLayers=1, thickness=None):
+def get_periodic_boundary_functor(stencil, ghost_layers=1, thickness=None):
     """
     Returns a function that applies periodic boundary conditions
     :param stencil: sequence of directions e.g. ( [0,1], [0,-1] ) for y periodicity
-    :param ghostLayers: how many ghost layers the array has
+    :param ghost_layers: how many ghost layers the array has
     :param thickness: how many of the ghost layers to copy, None means 'all'
     :return: function that takes a single array and applies the periodic copy operation
     """
-    srcDstSliceTuples = getPeriodicBoundarySrcDstSlices(stencil, ghostLayers, thickness)
+    src_dst_slice_tuples = get_periodic_boundary_src_dst_slices(stencil, ghost_layers, thickness)
 
-    def functor(pdfs, **kwargs):
-        for srcSlice, dstSlice in srcDstSliceTuples:
+    def functor(pdfs, **_):
+        for srcSlice, dstSlice in src_dst_slice_tuples:
             pdfs[dstSlice] = pdfs[srcSlice]
 
     return functor
 
 
-def sliceIntersection(slice1, slice2):
+def slice_intersection(slice1, slice2):
     slice1 = [s if not isinstance(s, int) else slice(s, s + 1, None) for s in slice1]
     slice2 = [s if not isinstance(s, int) else slice(s, s + 1, None) for s in slice2]
 
-    newMin = [max(s1.start, s2.start) for s1, s2 in zip(slice1, slice2)]
-    newMax = [min(s1.stop,  s2.stop)  for s1, s2 in zip(slice1, slice2)]
-    if any(maxP - minP < 0 for minP, maxP in zip(newMin, newMax)):
+    new_min = [max(s1.start, s2.start) for s1, s2 in zip(slice1, slice2)]
+    new_max = [min(s1.stop,  s2.stop) for s1, s2 in zip(slice1, slice2)]
+    if any(maxP - minP < 0 for minP, maxP in zip(new_min, new_max)):
         return None
 
-    return [slice(minP, maxP, None) for minP, maxP in zip(newMin, newMax)]
-
-
-    #min_.x() = std::max(xMin(), other.xMin());
-    #min_.y() = std::max(yMin(), other.yMin());
-    #min_.z() = std::max(zMin(), other.zMin());
-
-    #max_.x() = std::min(xMax(), other.xMax());
-    #max_.y() = std::min(yMax(), other.yMax());
-    #max_.z() = std::min(zMax(), other.zMax());
+    return [slice(minP, maxP, None) for minP, maxP in zip(new_min, new_max)]
diff --git a/sympyextensions.py b/sympyextensions.py
index c59df3c160a65bd7bc84292d771f888c0435e651..b4a9b6dba020c1233f48f94b3c6d882544ba4abd 100644
--- a/sympyextensions.py
+++ b/sympyextensions.py
@@ -13,7 +13,7 @@ from pystencils.assignment import Assignment
 T = TypeVar('T')
 
 
-def prod(seq: Sequence[T]) -> T:
+def prod(seq: Iterable[T]) -> T:
     """Takes a sequence and returns the product of all elements"""
     return reduce(operator.mul, seq, 1)
 
@@ -102,7 +102,7 @@ def symmetric_product(*args, with_diagonal: bool = True) -> Iterable:
             yield tuple(a[i] for a, i in zip(args, idx))
 
 
-def fast_subs(expression: T, substitutions: Dict[sp.Expr, sp.Expr],
+def fast_subs(expression: T, substitutions: Dict,
               skip: Optional[Callable[[sp.Expr], bool]] = None) -> T:
     """Similar to sympy subs function.
 
@@ -558,8 +558,8 @@ def assignments_from_python_function(func, **kwargs):
         
     >>> def my_kernel(s):
     ...     from pystencils import Field
-    ...     f = Field.createGeneric('f', spatialDimensions=2, indexDimensions=0)
-    ...     g = f.newFieldWithDifferentName('g')
+    ...     f = Field.create_generic('f', spatial_dimensions=2, index_dimensions=0)
+    ...     g = f.new_field_with_different_name('g')
     ...     
     ...     s.neighbors @= f[0,1] + f[1,0]
     ...     g[0,0]      @= s.neighbors + f[0,0]
diff --git a/timeloop.py b/timeloop.py
index c5c88a04f44445e029faf3d6563760b5b535ae6f..9a562d27e1c6baec6867b1c8919f8e464f35e5c1 100644
--- a/timeloop.py
+++ b/timeloop.py
@@ -9,79 +9,79 @@ class TimeLoop:
         self._functionNames = []
         self.timeStepsRun = 0
 
-    def addStep(self, stepObj):
-        if hasattr(stepObj, 'preRun'):
-            self.addPreRunFunction(stepObj.preRun)
-        if hasattr(stepObj, 'postRun'):
-            self.addPostRunFunction(stepObj.postRun)
-        self.add(stepObj.timeStep, stepObj.name)
-
-    def add(self, timeStepFunction, name=None):
+    def add_step(self, step_obj):
+        if hasattr(step_obj, 'pre_run'):
+            self.add_pre_run_function(step_obj.pre_run)
+        if hasattr(step_obj, 'post_run'):
+            self.add_post_run_function(step_obj.post_run)
+        self.add(step_obj.time_step, step_obj.name)
+
+    def add(self, time_step_function, name=None):
         if name is None:
-            name = str(timeStepFunction)
-        self._timeStepFunctions.append(timeStepFunction)
+            name = str(time_step_function)
+        self._timeStepFunctions.append(time_step_function)
         self._functionNames.append(name)
 
-    def addKernel(self, dataHandling, kernelFunc, name=None):
-        self.add(lambda: dataHandling.runKernel(kernelFunc), name)
+    def add_kernel(self, data_handling, kernel_func, name=None):
+        self.add(lambda: data_handling.run_kernel(kernel_func), name)
 
-    def addPreRunFunction(self, f):
+    def add_pre_run_function(self, f):
         self._preRunFunctions.append(f)
 
-    def addPostRunFunction(self, f):
+    def add_post_run_function(self, f):
         self._postRunFunctions.append(f)
 
-    def run(self, timeSteps=1):
-        self.preRun()
+    def run(self, time_steps=1):
+        self.pre_run()
 
         try:
-            for i in range(timeSteps):
-                self.timeStep()
+            for i in range(time_steps):
+                self.time_step()
         except KeyboardInterrupt:
             pass
 
-        self.postRun()
+        self.post_run()
 
-    def benchmarkRun(self, timeSteps=0, initTimeSteps=0):
-        self.preRun()
-        for i in range(initTimeSteps):
-            self.timeStep()
+    def benchmark_run(self, time_steps=0, init_time_steps=0):
+        self.pre_run()
+        for i in range(init_time_steps):
+            self.time_step()
 
         start = time.perf_counter()
-        for i in range(timeSteps):
-            self.timeStep()
+        for i in range(time_steps):
+            self.time_step()
         end = time.perf_counter()
-        self.postRun()
+        self.post_run()
 
-        timeForOneIteration = (end - start) / timeSteps
-        return timeForOneIteration
+        time_for_one_iteration = (end - start) / time_steps
+        return time_for_one_iteration
 
-    def benchmark(self, timeForBenchmark=5, initTimeSteps=10, numberOfTimeStepsForEstimation=20):
+    def benchmark(self, time_for_benchmark=5, init_time_steps=10, number_of_time_steps_for_estimation=20):
         """
         Returns the time in seconds for one time step
 
-        :param timeForBenchmark: number of seconds benchmark should take
-        :param initTimeSteps: number of time steps run initially for warm up, to get arrays into cache etc
-        :param numberOfTimeStepsForEstimation: time steps run before real benchmarks, to determine number of time steps
+        :param time_for_benchmark: number of seconds benchmark should take
+        :param init_time_steps: number of time steps run initially for warm up, to get arrays into cache etc
+        :param number_of_time_steps_for_estimation: time steps run before real benchmarks, to determine number of time steps
                                                that approximately take 'timeForBenchmark'
         """
         # Run a few time step to get first estimate
-        durationOfTimeStep = self.benchmarkRun(numberOfTimeStepsForEstimation, initTimeSteps)
+        duration_of_time_step = self.benchmark_run(number_of_time_steps_for_estimation, init_time_steps)
 
-        # Run for approximately 'timeForBenchmark' seconds
-        timeSteps = int(timeForBenchmark / durationOfTimeStep)
-        timeSteps = max(timeSteps, 20)
-        return self.benchmarkRun(timeSteps, initTimeSteps)
+        # Run for approximately 'time_for_benchmark' seconds
+        time_steps = int(time_for_benchmark / duration_of_time_step)
+        time_steps = max(time_steps, 20)
+        return self.benchmark_run(time_steps, init_time_steps)
 
-    def preRun(self):
+    def pre_run(self):
         for f in self._preRunFunctions:
             f()
 
-    def postRun(self):
+    def post_run(self):
         for f in self._postRunFunctions:
             f()
 
-    def timeStep(self):
+    def time_step(self):
         for f in self._timeStepFunctions:
             f()
         self.timeStepsRun += 1
diff --git a/transformations/stage2.py b/transformations/stage2.py
index dd539c3601fbf03998cb2e15f5ded962b3770858..6d47400e2543d986b2335744f6c9b9f887d72bc2 100644
--- a/transformations/stage2.py
+++ b/transformations/stage2.py
@@ -1,53 +1,53 @@
 import sympy as sp
-from pystencils.data_types import PointerType, get_type_of_expression, collate_types, castFunc, pointerArithmeticFunc
+from pystencils.data_types import PointerType, get_type_of_expression, collate_types, cast_func, pointer_arithmetic_func
 import pystencils.astnodes as ast
 
 
-def insertCasts(node):
+def insert_casts(node):
     """
     Checks the types and inserts casts and pointer arithmetic where necessary
     :param node: the head node of the ast
     :return: modified ast
     """
-    def cast(zippedArgsTypes, target):
+    def cast(zipped_args_types, target_dtype):
         """
         Adds casts to the arguments if their type differs from the target type
-        :param zippedArgsTypes: a zipped list of args and types
-        :param target: The target data type
+        :param zipped_args_types: a zipped list of args and types
+        :param target_dtype: The target data type
         :return: args with possible casts
         """
         casted_args = []
-        for arg, dataType in zippedArgsTypes:
-            if dataType.numpy_dtype != target.numpy_dtype:  # ignoring const
-                casted_args.append(castFunc(arg, target))
+        for argument, dataType in zipped_args_types:
+            if dataType.numpy_dtype != target_dtype.numpy_dtype:  # ignoring const
+                casted_args.append(cast_func(argument, target_dtype))
             else:
-                casted_args.append(arg)
+                casted_args.append(argument)
         return casted_args
 
-    def pointerArithmetic(args):
+    def pointer_arithmetic(expr_args):
         """
         Creates a valid pointer arithmetic function
-        :param args: Arguments of the add expression
-        :return: pointerArithmeticFunc
+        :param expr_args: Arguments of the add expression
+        :return: pointer_arithmetic_func
         """
         pointer = None
-        newArgs = []
-        for arg, dataType in args:
+        new_args = []
+        for arg, dataType in expr_args:
             if dataType.func is PointerType:
                 assert pointer is None
                 pointer = arg
-        for arg, dataType in args:
+        for arg, dataType in expr_args:
             if arg != pointer:
                 assert dataType.is_int() or dataType.is_uint()
-                newArgs.append(arg)
-        newArgs = sp.Add(*newArgs) if len(newArgs) > 0 else newArgs
-        return pointerArithmeticFunc(pointer, newArgs)
+                new_args.append(arg)
+        new_args = sp.Add(*new_args) if len(new_args) > 0 else new_args
+        return pointer_arithmetic_func(pointer, new_args)
 
     if isinstance(node, sp.AtomicExpr):
         return node
     args = []
     for arg in node.args:
-        args.append(insertCasts(arg))
+        args.append(insert_casts(arg))
     # TODO indexed, LoopOverCoordinate
     if node.func in (sp.Add, sp.Mul, sp.Or, sp.And, sp.Pow, sp.Eq, sp.Ne, sp.Lt, sp.Le, sp.Gt, sp.Ge):
         # TODO optimize pow, don't cast integer on double
@@ -57,7 +57,7 @@ def insertCasts(node):
         zipped = list(zip(args, types))
         if target.func is PointerType:
             assert node.func is sp.Add
-            return pointerArithmetic(zipped)
+            return pointer_arithmetic(zipped)
         else:
             return node.func(*cast(zipped, target))
     elif node.func is ast.SympyAssignment:
@@ -79,11 +79,11 @@ def insertCasts(node):
             node.replace(oldArg, newArg)
         return node
     elif node.func is sp.Piecewise:
-        exprs = [expr for (expr, _) in args]
-        types = [get_type_of_expression(expr) for expr in exprs]
+        expressions = [expr for (expr, _) in args]
+        types = [get_type_of_expression(expr) for expr in expressions]
         target = collate_types(types)
-        zipped = list(zip(exprs, types))
-        casted_exprs = cast(zipped, target)
-        args = [arg.func(*[expr, arg.cond]) for (arg, expr) in zip(args, casted_exprs)]
+        zipped = list(zip(expressions, types))
+        casted_expressions = cast(zipped, target)
+        args = [arg.func(*[expr, arg.cond]) for (arg, expr) in zip(args, casted_expressions)]
 
     return node.func(*args)
diff --git a/transformations/transformations.py b/transformations/transformations.py
index 0fb6d0164d1178e3a4312e9d127e7697af05f7b5..b26e019702cdc3ab2de19724ca7e54538a773fa3 100644
--- a/transformations/transformations.py
+++ b/transformations/transformations.py
@@ -8,165 +8,164 @@ from sympy.logic.boolalg import Boolean
 from sympy.tensor import IndexedBase
 
 from pystencils.assignment import Assignment
-from pystencils.field import Field, FieldType, offsetComponentToDirectionString
-from pystencils.data_types import TypedSymbol, create_type, PointerType, StructType, get_base_type, castFunc
-from pystencils.slicing import normalizeSlice
+from pystencils.field import Field, FieldType, offset_component_to_direction_string
+from pystencils.data_types import TypedSymbol, create_type, PointerType, StructType, get_base_type, cast_func
+from pystencils.slicing import normalize_slice
 import pystencils.astnodes as ast
 
 
-def filteredTreeIteration(node, nodeType):
+def filtered_tree_iteration(node, node_type):
     for arg in node.args:
-        if isinstance(arg, nodeType):
+        if isinstance(arg, node_type):
             yield arg
-        yield from filteredTreeIteration(arg, nodeType)
+        yield from filtered_tree_iteration(arg, node_type)
 
 
-def getCommonShape(fieldSet):
+def get_common_shape(field_set):
     """Takes a set of pystencils Fields and returns their common spatial shape if it exists. Otherwise
     ValueError is raised"""
-    nrOfFixedShapedFields = 0
-    for f in fieldSet:
-        if f.hasFixedShape:
-            nrOfFixedShapedFields += 1
-
-    if nrOfFixedShapedFields > 0 and nrOfFixedShapedFields != len(fieldSet):
-        fixedFieldNames = ",".join([f.name for f in fieldSet if f.hasFixedShape])
-        varFieldNames = ",".join([f.name for f in fieldSet if not f.hasFixedShape])
+    nr_of_fixed_shaped_fields = 0
+    for f in field_set:
+        if f.has_fixed_shape:
+            nr_of_fixed_shaped_fields += 1
+
+    if nr_of_fixed_shaped_fields > 0 and nr_of_fixed_shaped_fields != len(field_set):
+        fixed_field_names = ",".join([f.name for f in field_set if f.has_fixed_shape])
+        var_field_names = ",".join([f.name for f in field_set if not f.has_fixed_shape])
         msg = "Mixing fixed-shaped and variable-shape fields in a single kernel is not possible\n"
-        msg += "Variable shaped: %s \nFixed shaped:    %s" % (varFieldNames, fixedFieldNames)
+        msg += "Variable shaped: %s \nFixed shaped:    %s" % (var_field_names, fixed_field_names)
         raise ValueError(msg)
 
-    shapeSet = set([f.spatialShape for f in fieldSet])
-    if nrOfFixedShapedFields == len(fieldSet):
-        if len(shapeSet) != 1:
-            raise ValueError("Differently sized field accesses in loop body: " + str(shapeSet))
+    shape_set = set([f.spatial_shape for f in field_set])
+    if nr_of_fixed_shaped_fields == len(field_set):
+        if len(shape_set) != 1:
+            raise ValueError("Differently sized field accesses in loop body: " + str(shape_set))
 
-    shape = list(sorted(shapeSet, key=lambda e: str(e[0])))[0]
+    shape = list(sorted(shape_set, key=lambda e: str(e[0])))[0]
     return shape
 
 
-def makeLoopOverDomain(body, functionName, iterationSlice=None, ghostLayers=None, loopOrder=None):
-    """
-    Uses :class:`pystencils.field.Field.Access` to create (multiple) loops around given AST.
-
-    :param body: list of nodes
-    :param functionName: name of generated C function
-    :param iterationSlice: if not None, iteration is done only over this slice of the field
-    :param ghostLayers: a sequence of pairs for each coordinate with lower and upper nr of ghost layers
-                if None, the number of ghost layers is determined automatically and assumed to be equal for a
-                all dimensions
-    :param loopOrder: loop ordering from outer to inner loop (optimal ordering is same as layout)
-    :return: :class:`LoopOverCoordinate` instance with nested loops, ordered according to field layouts
+def make_loop_over_domain(body, function_name, iteration_slice=None, ghost_layers=None, loop_order=None):
+    """Uses :class:`pystencils.field.Field.Access` to create (multiple) loops around given AST.
+
+    Args:
+        body: list of nodes
+        function_name: name of generated C function
+        iteration_slice: if not None, iteration is done only over this slice of the field
+        ghost_layers: a sequence of pairs for each coordinate with lower and upper nr of ghost layers
+             if None, the number of ghost layers is determined automatically and assumed to be equal for a
+             all dimensions
+        loop_order: loop ordering from outer to inner loop (optimal ordering is same as layout)
+
+    Returns:
+        :class:`LoopOverCoordinate` instance with nested loops, ordered according to field layouts
     """
     # find correct ordering by inspecting participating FieldAccesses
-    fieldAccesses = body.atoms(Field.Access)
-    # exclude accesses to buffers from fieldList, because buffers are treated separately
-    fieldList = [e.field for e in fieldAccesses if not FieldType.isBuffer(e.field)]
-    fields = set(fieldList)
-    numBufferAccesses = len(fieldAccesses) - len(fieldList)
-
-    if loopOrder is None:
-        loopOrder = getOptimalLoopOrdering(fields)
-
-    shape = getCommonShape(list(fields))
-
-    if iterationSlice is not None:
-        iterationSlice = normalizeSlice(iterationSlice, shape)
-
-    if ghostLayers is None:
-        requiredGhostLayers = max([fa.requiredGhostLayers for fa in fieldAccesses])
-        ghostLayers = [(requiredGhostLayers, requiredGhostLayers)] * len(loopOrder)
-    if isinstance(ghostLayers, int):
-        ghostLayers = [(ghostLayers, ghostLayers)] * len(loopOrder)
-
-    def getLoopStride(begin, end, step):
-        return (end - begin) / step
-
-    loopStrides = []
-    loopVars = []
-    currentBody = body
-    lastLoop = None
-    for i, loopCoordinate in enumerate(reversed(loopOrder)):
-        if iterationSlice is None:
-            begin = ghostLayers[loopCoordinate][0]
-            end = shape[loopCoordinate] - ghostLayers[loopCoordinate][1]
-            newLoop = ast.LoopOverCoordinate(currentBody, loopCoordinate, begin, end, 1)
-            lastLoop = newLoop
-            currentBody = ast.Block([lastLoop])
-            loopStrides.append(getLoopStride(begin, end, 1))
-            loopVars.append(newLoop.loop_counter_symbol)
+    field_accesses = body.atoms(Field.Access)
+    # exclude accesses to buffers from field_list, because buffers are treated separately
+    field_list = [e.field for e in field_accesses if not FieldType.is_buffer(e.field)]
+    fields = set(field_list)
+    num_buffer_accesses = len(field_accesses) - len(field_list)
+
+    if loop_order is None:
+        loop_order = get_optimal_loop_ordering(fields)
+
+    shape = get_common_shape(list(fields))
+
+    if iteration_slice is not None:
+        iteration_slice = normalize_slice(iteration_slice, shape)
+
+    if ghost_layers is None:
+        required_ghost_layers = max([fa.required_ghost_layers for fa in field_accesses])
+        ghost_layers = [(required_ghost_layers, required_ghost_layers)] * len(loop_order)
+    if isinstance(ghost_layers, int):
+        ghost_layers = [(ghost_layers, ghost_layers)] * len(loop_order)
+
+    def get_loop_stride(loop_begin, loop_end, step):
+        return (loop_end - loop_begin) / step
+
+    loop_strides = []
+    loop_vars = []
+    current_body = body
+    for i, loopCoordinate in enumerate(reversed(loop_order)):
+        if iteration_slice is None:
+            begin = ghost_layers[loopCoordinate][0]
+            end = shape[loopCoordinate] - ghost_layers[loopCoordinate][1]
+            new_loop = ast.LoopOverCoordinate(current_body, loopCoordinate, begin, end, 1)
+            current_body = ast.Block([new_loop])
+            loop_strides.append(get_loop_stride(begin, end, 1))
+            loop_vars.append(new_loop.loop_counter_symbol)
         else:
-            sliceComponent = iterationSlice[loopCoordinate]
-            if type(sliceComponent) is slice:
-                sc = sliceComponent
-                newLoop = ast.LoopOverCoordinate(currentBody, loopCoordinate, sc.start, sc.stop, sc.step)
-                lastLoop = newLoop
-                currentBody = ast.Block([lastLoop])
-                loopStrides.append(getLoopStride(sc.start, sc.stop, sc.step))
-                loopVars.append(newLoop.loop_counter_symbol)
+            slice_component = iteration_slice[loopCoordinate]
+            if type(slice_component) is slice:
+                sc = slice_component
+                new_loop = ast.LoopOverCoordinate(current_body, loopCoordinate, sc.start, sc.stop, sc.step)
+                current_body = ast.Block([new_loop])
+                loop_strides.append(get_loop_stride(sc.start, sc.stop, sc.step))
+                loop_vars.append(new_loop.loop_counter_symbol)
             else:
                 assignment = ast.SympyAssignment(ast.LoopOverCoordinate.get_loop_counter_symbol(loopCoordinate),
-                                                 sp.sympify(sliceComponent))
-                currentBody.insert_front(assignment)
+                                                 sp.sympify(slice_component))
+                current_body.insert_front(assignment)
 
-    loopVars = [numBufferAccesses * var for var in loopVars]
-    astNode = ast.KernelFunction(currentBody, ghost_layers=ghostLayers, function_name=functionName, backend='cpu')
-    return (astNode, loopStrides, loopVars)
+    loop_vars = [num_buffer_accesses * var for var in loop_vars]
+    ast_node = ast.KernelFunction(current_body, ghost_layers=ghost_layers, function_name=function_name, backend='cpu')
+    return ast_node, loop_strides, loop_vars
 
 
-def createIntermediateBasePointer(fieldAccess, coordinates, previousPtr):
+def create_intermediate_base_pointer(field_access, coordinates, previous_ptr):
     r"""
     Addressing elements in structured arrays are done with :math:`ptr\left[ \sum_i c_i \cdot s_i \right]`
     where :math:`c_i` is the coordinate value and :math:`s_i` the stride of a coordinate.
     The sum can be split up into multiple parts, such that parts of it can be pulled before loops.
     This function creates such an access for coordinates :math:`i \in \mbox{coordinates}`.
     Returns a new typed symbol, where the name encodes which coordinates have been resolved.
-    :param fieldAccess: instance of :class:`pystencils.field.Field.Access` which provides strides and offsets
+    :param field_access: instance of :class:`pystencils.field.Field.Access` which provides strides and offsets
     :param coordinates: mapping of coordinate ids to its value, where stride*value is calculated
-    :param previousPtr: the pointer which is dereferenced
+    :param previous_ptr: the pointer which is de-referenced
     :return: tuple with the new pointer symbol and the calculated offset
 
     Example:
-        >>> field = Field.createGeneric('myfield', spatialDimensions=2, indexDimensions=1)
+        >>> field = Field.create_generic('myfield', spatial_dimensions=2, index_dimensions=1)
         >>> x, y = sp.symbols("x y")
         >>> prevPointer = TypedSymbol("ptr", "double")
-        >>> createIntermediateBasePointer(field[1,-2](5), {0: x}, prevPointer)
+        >>> create_intermediate_base_pointer(field[1,-2](5), {0: x}, prevPointer)
         (ptr_E, x*fstride_myfield[0] + fstride_myfield[0])
-        >>> createIntermediateBasePointer(field[1,-2](5), {0: x, 1 : y }, prevPointer)
+        >>> create_intermediate_base_pointer(field[1,-2](5), {0: x, 1 : y }, prevPointer)
         (ptr_E_2S, x*fstride_myfield[0] + y*fstride_myfield[1] + fstride_myfield[0] - 2*fstride_myfield[1])
     """
-    field = fieldAccess.field
+    field = field_access.field
     offset = 0
     name = ""
-    listToHash = []
+    list_to_hash = []
     for coordinateId, coordinateValue in coordinates.items():
         offset += field.strides[coordinateId] * coordinateValue
 
-        if coordinateId < field.spatialDimensions:
-            offset += field.strides[coordinateId] * fieldAccess.offsets[coordinateId]
-            if type(fieldAccess.offsets[coordinateId]) is int:
-                offsetComp = offsetComponentToDirectionString(coordinateId, fieldAccess.offsets[coordinateId])
+        if coordinateId < field.spatial_dimensions:
+            offset += field.strides[coordinateId] * field_access.offsets[coordinateId]
+            if type(field_access.offsets[coordinateId]) is int:
+                offset_comp = offset_component_to_direction_string(coordinateId, field_access.offsets[coordinateId])
                 name += "_"
-                name += offsetComp if offsetComp else "C"
+                name += offset_comp if offset_comp else "C"
             else:
-                listToHash.append(fieldAccess.offsets[coordinateId])
+                list_to_hash.append(field_access.offsets[coordinateId])
         else:
             if type(coordinateValue) is int:
                 name += "_%d" % (coordinateValue,)
             else:
-                listToHash.append(coordinateValue)
+                list_to_hash.append(coordinateValue)
 
-    if len(listToHash) > 0:
-        name += "%0.6X" % (abs(hash(tuple(listToHash))))
+    if len(list_to_hash) > 0:
+        name += "%0.6X" % (abs(hash(tuple(list_to_hash))))
 
-    newPtr = TypedSymbol(previousPtr.name + name, previousPtr.dtype)
+    new_ptr = TypedSymbol(previous_ptr.name + name, previous_ptr.dtype)
 
-    return newPtr, offset
+    return new_ptr, offset
 
 
-def parseBasePointerInfo(basePointerSpecification, loopOrder, field):
+def parse_base_pointer_info(base_pointer_specification, loop_order, field):
     """
-    Creates base pointer specification for :func:`resolveFieldAccesses` function.
+    Creates base pointer specification for :func:`resolve_field_accesses` function.
 
     Specification of how many and which intermediate pointers are created for a field access.
     For example [ (0), (2,3,)]  creates on base pointer for coordinates 2 and 3 and writes the offset for coordinate
@@ -180,164 +179,168 @@ def parseBasePointerInfo(basePointerSpecification, loopOrder, field):
         - "index<int>": index coordinate
         - "<int>": specifying directly the coordinate
 
-    :param basePointerSpecification: nested list with above specifications
-    :param loopOrder: list with ordering of loops from outer to inner
-    :param field:
-    :return: list of tuples that can be passed to :func:`resolveFieldAccesses`
+    Args:
+        base_pointer_specification: nested list with above specifications
+        loop_order: list with ordering of loops from outer to inner
+        field:
+
+    Returns:
+        list of tuples that can be passed to :func:`resolve_field_accesses`
     """
     result = []
-    specifiedCoordinates = set()
-    loopOrder = list(reversed(loopOrder))
-    for specGroup in basePointerSpecification:
-        newGroup = []
-
-        def addNewElement(i):
-            if i >= field.spatialDimensions + field.indexDimensions:
-                raise ValueError("Coordinate %d does not exist" % (i,))
-            newGroup.append(i)
-            if i in specifiedCoordinates:
-                raise ValueError("Coordinate %d specified two times" % (i,))
-            specifiedCoordinates.add(i)
+    specified_coordinates = set()
+    loop_order = list(reversed(loop_order))
+    for specGroup in base_pointer_specification:
+        new_group = []
+
+        def add_new_element(elem):
+            if elem >= field.spatial_dimensions + field.index_dimensions:
+                raise ValueError("Coordinate %d does not exist" % (elem,))
+            new_group.append(elem)
+            if elem in specified_coordinates:
+                raise ValueError("Coordinate %d specified two times" % (elem,))
+            specified_coordinates.add(elem)
         for element in specGroup:
             if type(element) is int:
-                addNewElement(element)
+                add_new_element(element)
             elif element.startswith("spatial"):
                 element = element[len("spatial"):]
                 if element.startswith("Inner"):
                     index = int(element[len("Inner"):])
-                    addNewElement(loopOrder[index])
+                    add_new_element(loop_order[index])
                 elif element.startswith("Outer"):
                     index = int(element[len("Outer"):])
-                    addNewElement(loopOrder[-index])
+                    add_new_element(loop_order[-index])
                 elif element == "all":
-                    for i in range(field.spatialDimensions):
-                        addNewElement(i)
+                    for i in range(field.spatial_dimensions):
+                        add_new_element(i)
                 else:
                     raise ValueError("Could not parse " + element)
             elif element.startswith("index"):
                 index = int(element[len("index"):])
-                addNewElement(field.spatialDimensions + index)
+                add_new_element(field.spatial_dimensions + index)
             else:
                 raise ValueError("Unknown specification %s" % (element,))
 
-        result.append(newGroup)
+        result.append(new_group)
 
-    allCoordinates = set(range(field.spatialDimensions + field.indexDimensions))
-    rest = allCoordinates - specifiedCoordinates
+    all_coordinates = set(range(field.spatial_dimensions + field.index_dimensions))
+    rest = all_coordinates - specified_coordinates
     if rest:
         result.append(list(rest))
 
     return result
 
 
-def substituteArrayAccessesWithConstants(astNode):
-    """Substitutes all instances of Indexed (array acceses) that are not field accesses with constants.
+def substitute_array_accesses_with_constants(ast_node):
+    """Substitutes all instances of Indexed (array accesses) that are not field accesses with constants.
     Benchmarks showed that using an array access as loop bound or in pointer computations cause some compilers to do 
     less optimizations.  
     This transformation should be after field accesses have been resolved (since they introduce array accesses) and 
     before constants are moved before the loops.
     """
 
-    def handleSympyExpression(expr, parentBlock):
+    def handle_sympy_expression(expr, parent_block):
         """Returns sympy expression where array accesses have been replaced with constants, together with a list
         of assignments that define these constants"""
         if not isinstance(expr, sp.Expr):
             return expr
 
         # get all indexed expressions that are not field accesses
-        indexedExprs = [e for e in expr.atoms(sp.Indexed) if not isinstance(e, ast.ResolvedFieldAccess)]
+        indexed_expressions = [e for e in expr.atoms(sp.Indexed) if not isinstance(e, ast.ResolvedFieldAccess)]
 
         # special case: right hand side is a single indexed expression, then nothing has to be done
-        if len(indexedExprs) == 1 and expr == indexedExprs[0]:
+        if len(indexed_expressions) == 1 and expr == indexed_expressions[0]:
             return expr
 
-        constantsDefinitions = []
-        constantSubstitutions = {}
-        for indexedExpr in indexedExprs:
+        constants_definitions = []
+        constant_substitutions = {}
+        for indexedExpr in indexed_expressions:
             base, idx = indexedExpr.args
-            typedSymbol = base.args[0]
-            baseType = deepcopy(get_base_type(typedSymbol.dtype))
-            baseType.const = False
-            constantReplacingIndexed = TypedSymbol(typedSymbol.name + str(idx), baseType)
-            constantsDefinitions.append(ast.SympyAssignment(constantReplacingIndexed, indexedExpr))
-            constantSubstitutions[indexedExpr] = constantReplacingIndexed
-        constantsDefinitions.sort(key=lambda e: e.lhs.name)
-
-        alreadyDefined = parentBlock.symbols_defined
-        for newAssignment in constantsDefinitions:
-            if newAssignment.lhs not in alreadyDefined:
-                parentBlock.insert_before(newAssignment, astNode)
-
-        return expr.subs(constantSubstitutions)
-
-    if isinstance(astNode, ast.SympyAssignment):
-        astNode.rhs = handleSympyExpression(astNode.rhs, astNode.parent)
-        astNode.lhs = handleSympyExpression(astNode.lhs, astNode.parent)
-    elif isinstance(astNode, ast.LoopOverCoordinate):
-        astNode.start = handleSympyExpression(astNode.start, astNode.parent)
-        astNode.stop = handleSympyExpression(astNode.stop, astNode.parent)
-        astNode.step = handleSympyExpression(astNode.step, astNode.parent)
-        substituteArrayAccessesWithConstants(astNode.body)
+            typed_symbol = base.args[0]
+            base_type = deepcopy(get_base_type(typed_symbol.dtype))
+            base_type.const = False
+            constant_replacing_indexed = TypedSymbol(typed_symbol.name + str(idx), base_type)
+            constants_definitions.append(ast.SympyAssignment(constant_replacing_indexed, indexedExpr))
+            constant_substitutions[indexedExpr] = constant_replacing_indexed
+        constants_definitions.sort(key=lambda e: e.lhs.name)
+
+        already_defined = parent_block.symbols_defined
+        for newAssignment in constants_definitions:
+            if newAssignment.lhs not in already_defined:
+                parent_block.insert_before(newAssignment, ast_node)
+
+        return expr.subs(constant_substitutions)
+
+    if isinstance(ast_node, ast.SympyAssignment):
+        ast_node.rhs = handle_sympy_expression(ast_node.rhs, ast_node.parent)
+        ast_node.lhs = handle_sympy_expression(ast_node.lhs, ast_node.parent)
+    elif isinstance(ast_node, ast.LoopOverCoordinate):
+        ast_node.start = handle_sympy_expression(ast_node.start, ast_node.parent)
+        ast_node.stop = handle_sympy_expression(ast_node.stop, ast_node.parent)
+        ast_node.step = handle_sympy_expression(ast_node.step, ast_node.parent)
+        substitute_array_accesses_with_constants(ast_node.body)
     else:
-        for a in astNode.args:
-            substituteArrayAccessesWithConstants(a)
+        for a in ast_node.args:
+            substitute_array_accesses_with_constants(a)
 
 
-def resolveBufferAccesses(astNode, baseBufferIndex, readOnlyFieldNames=set()):
-    def visitSympyExpr(expr, enclosingBlock, sympyAssignment):
+def resolve_buffer_accesses(ast_node, base_buffer_index, read_only_field_names=set()):
+    def visit_sympy_expr(expr, enclosing_block, sympy_assignment):
         if isinstance(expr, Field.Access):
-            fieldAccess = expr
+            field_access = expr
 
             # Do not apply transformation if field is not a buffer
-            if not FieldType.isBuffer(fieldAccess.field):
+            if not FieldType.is_buffer(field_access.field):
                 return expr
 
-            buffer = fieldAccess.field
+            buffer = field_access.field
 
-            dtype = PointerType(buffer.dtype, const=buffer.name in readOnlyFieldNames, restrict=True)
-            fieldPtr = TypedSymbol("%s%s" % (Field.DATA_PREFIX, symbolNameToVariableName(buffer.name)), dtype)
+            dtype = PointerType(buffer.dtype, const=buffer.name in read_only_field_names, restrict=True)
+            field_ptr = TypedSymbol("%s%s" % (Field.DATA_PREFIX, symbol_name_to_variable_name(buffer.name)), dtype)
 
-            bufferIndex = baseBufferIndex
-            if len(fieldAccess.index) > 1:
+            buffer_index = base_buffer_index
+            if len(field_access.index) > 1:
                 raise RuntimeError('Only indexing dimensions up to 1 are currently supported in buffers!')
 
-            if len(fieldAccess.index) > 0:
-                cellIndex = fieldAccess.index[0]
-                bufferIndex += cellIndex
+            if len(field_access.index) > 0:
+                cell_index = field_access.index[0]
+                buffer_index += cell_index
 
-            result = ast.ResolvedFieldAccess(fieldPtr, bufferIndex, fieldAccess.field, fieldAccess.offsets,
-                                             fieldAccess.index)
+            result = ast.ResolvedFieldAccess(field_ptr, buffer_index, field_access.field, field_access.offsets,
+                                             field_access.index)
 
-            return visitSympyExpr(result, enclosingBlock, sympyAssignment)
+            return visit_sympy_expr(result, enclosing_block, sympy_assignment)
         else:
             if isinstance(expr, ast.ResolvedFieldAccess):
                 return expr
 
-            newArgs = [visitSympyExpr(e, enclosingBlock, sympyAssignment) for e in expr.args]
+            new_args = [visit_sympy_expr(e, enclosing_block, sympy_assignment) for e in expr.args]
             kwargs = {'evaluate': False} if type(expr) in (sp.Add, sp.Mul, sp.Piecewise) else {}
-            return expr.func(*newArgs, **kwargs) if newArgs else expr
-
-    def visitNode(subAst):
-        if isinstance(subAst, ast.SympyAssignment):
-            enclosingBlock = subAst.parent
-            assert type(enclosingBlock) is ast.Block
-            subAst.lhs = visitSympyExpr(subAst.lhs, enclosingBlock, subAst)
-            subAst.rhs = visitSympyExpr(subAst.rhs, enclosingBlock, subAst)
+            return expr.func(*new_args, **kwargs) if new_args else expr
+
+    def visit_node(sub_ast):
+        if isinstance(sub_ast, ast.SympyAssignment):
+            enclosing_block = sub_ast.parent
+            assert type(enclosing_block) is ast.Block
+            sub_ast.lhs = visit_sympy_expr(sub_ast.lhs, enclosing_block, sub_ast)
+            sub_ast.rhs = visit_sympy_expr(sub_ast.rhs, enclosing_block, sub_ast)
         else:
-            for i, a in enumerate(subAst.args):
-                visitNode(a)
+            for i, a in enumerate(sub_ast.args):
+                visit_node(a)
 
-    return visitNode(astNode)
+    return visit_node(ast_node)
 
 
-def resolveFieldAccesses(astNode, readOnlyFieldNames=set(), field_to_base_pointer_info={}, field_to_fixed_coordinates={}):
+def resolve_field_accesses(ast_node, read_only_field_names=set(),
+                           field_to_base_pointer_info={}, field_to_fixed_coordinates={}):
     """
     Substitutes :class:`pystencils.field.Field.Access` nodes by array indexing
 
-    :param astNode: the AST root
-    :param readOnlyFieldNames: set of field names which are considered read-only
+    :param ast_node: the AST root
+    :param read_only_field_names: set of field names which are considered read-only
     :param field_to_base_pointer_info: a list of tuples indicating which intermediate base pointers should be created
-                                   for details see :func:`parseBasePointerInfo`
+                                   for details see :func:`parse_base_pointer_info`
     :param field_to_fixed_coordinates: map of field name to a tuple of coordinate symbols. Instead of using the loop
                                     counters to index the field these symbols are used as coordinates
     :return: transformed AST
@@ -353,37 +356,37 @@ def resolveFieldAccesses(astNode, readOnlyFieldNames=set(), field_to_base_pointe
             if field.name in field_to_base_pointer_info:
                 base_pointer_info = field_to_base_pointer_info[field.name]
             else:
-                base_pointer_info = [list(range(field.indexDimensions + field.spatialDimensions))]
+                base_pointer_info = [list(range(field.index_dimensions + field.spatial_dimensions))]
 
-            dtype = PointerType(field.dtype, const=field.name in readOnlyFieldNames, restrict=True)
-            field_ptr = TypedSymbol("%s%s" % (Field.DATA_PREFIX, symbolNameToVariableName(field.name)), dtype)
+            dtype = PointerType(field.dtype, const=field.name in read_only_field_names, restrict=True)
+            field_ptr = TypedSymbol("%s%s" % (Field.DATA_PREFIX, symbol_name_to_variable_name(field.name)), dtype)
 
-            def create_coordinate_dict(group):
-                coord_dict = {}
-                for e in group:
-                    if e < field.spatialDimensions:
+            def create_coordinate_dict(group_param):
+                coordinates = {}
+                for e in group_param:
+                    if e < field.spatial_dimensions:
                         if field.name in field_to_fixed_coordinates:
-                            coord_dict[e] = field_to_fixed_coordinates[field.name][e]
+                            coordinates[e] = field_to_fixed_coordinates[field.name][e]
                         else:
                             ctr_name = ast.LoopOverCoordinate.LOOP_COUNTER_NAME_PREFIX
-                            coord_dict[e] = TypedSymbol("%s_%d" % (ctr_name, e), 'int')
-                        coord_dict[e] *= field.dtype.item_size
+                            coordinates[e] = TypedSymbol("%s_%d" % (ctr_name, e), 'int')
+                        coordinates[e] *= field.dtype.item_size
                     else:
                         if isinstance(field.dtype, StructType):
-                            assert field.indexDimensions == 1
+                            assert field.index_dimensions == 1
                             accessed_field_name = field_access.index[0]
                             assert isinstance(accessed_field_name, str)
-                            coord_dict[e] = field.dtype.get_element_offset(accessed_field_name)
+                            coordinates[e] = field.dtype.get_element_offset(accessed_field_name)
                         else:
-                            coord_dict[e] = field_access.index[e - field.spatialDimensions]
+                            coordinates[e] = field_access.index[e - field.spatial_dimensions]
 
-                return coord_dict
+                return coordinates
 
             last_pointer = field_ptr
 
             for group in reversed(base_pointer_info[1:]):
                 coord_dict = create_coordinate_dict(group)
-                new_ptr, offset = createIntermediateBasePointer(field_access, coord_dict, last_pointer)
+                new_ptr, offset = create_intermediate_base_pointer(field_access, coord_dict, last_pointer)
                 if new_ptr not in enclosing_block.symbols_defined:
                     new_assignment = ast.SympyAssignment(new_ptr, last_pointer + offset, is_const=False)
                     enclosing_block.insert_before(new_assignment, sympy_assignment)
@@ -391,13 +394,13 @@ def resolveFieldAccesses(astNode, readOnlyFieldNames=set(), field_to_base_pointe
 
             coord_dict = create_coordinate_dict(base_pointer_info[0])
 
-            _, offset = createIntermediateBasePointer(field_access, coord_dict, last_pointer)
+            _, offset = create_intermediate_base_pointer(field_access, coord_dict, last_pointer)
             result = ast.ResolvedFieldAccess(last_pointer, offset, field_access.field,
                                              field_access.offsets, field_access.index)
 
             if isinstance(get_base_type(field_access.field.dtype), StructType):
                 new_type = field_access.field.dtype.get_element_type(field_access.index[0])
-                result = castFunc(result, new_type)
+                result = cast_func(result, new_type)
 
             return visit_sympy_expr(result, enclosing_block, sympy_assignment)
         else:
@@ -418,17 +421,17 @@ def resolveFieldAccesses(astNode, readOnlyFieldNames=set(), field_to_base_pointe
             for i, a in enumerate(sub_ast.args):
                 visit_node(a)
 
-    return visit_node(astNode)
+    return visit_node(ast_node)
 
 
-def moveConstantsBeforeLoop(astNode):
+def move_constants_before_loop(ast_node):
     """
     Moves :class:`pystencils.ast.SympyAssignment` nodes out of loop body if they are iteration independent.
-    Call this after creating the loop structure with :func:`makeLoopOverDomain`
-    :param astNode:
+    Call this after creating the loop structure with :func:`make_loop_over_domain`
+    :param ast_node:
     :return:
     """
-    def findBlockToMoveTo(node):
+    def find_block_to_move_to(node):
         """
         Traverses parents of node as long as the symbols are independent and returns a (parent) block
         the assignment can be safely moved to
@@ -438,159 +441,159 @@ def moveConstantsBeforeLoop(astNode):
         assert isinstance(node, ast.SympyAssignment)
         assert isinstance(node.parent, ast.Block)
 
-        lastBlock = node.parent
-        lastBlockChild = node
+        last_block = node.parent
+        last_block_child = node
         element = node.parent
-        prevElement = node
+        prev_element = node
         while element:
             if isinstance(element, ast.Block):
-                lastBlock = element
-                lastBlockChild = prevElement
+                last_block = element
+                last_block_child = prev_element
 
             if isinstance(element, ast.Conditional):
-                criticalSymbols = element.conditionExpr.atoms(sp.Symbol)
+                critical_symbols = element.conditionExpr.atoms(sp.Symbol)
             else:
-                criticalSymbols = element.symbols_defined
-            if node.undefined_symbols.intersection(criticalSymbols):
+                critical_symbols = element.symbols_defined
+            if node.undefined_symbols.intersection(critical_symbols):
                 break
-            prevElement = element
+            prev_element = element
             element = element.parent
-        return lastBlock, lastBlockChild
+        return last_block, last_block_child
 
-    def checkIfAssignmentAlreadyInBlock(assignment, targetBlock):
-        for arg in targetBlock.args:
+    def check_if_assignment_already_in_block(assignment, target_block):
+        for arg in target_block.args:
             if type(arg) is not ast.SympyAssignment:
                 continue
             if arg.lhs == assignment.lhs:
                 return arg
         return None
 
-    def getBlocks(node, resultList):
+    def get_blocks(node, result_list):
         if isinstance(node, ast.Block):
-            resultList.insert(0, node)
+            result_list.insert(0, node)
         if isinstance(node, ast.Node):
             for a in node.args:
-                getBlocks(a, resultList)
+                get_blocks(a, result_list)
 
-    allBlocks = []
-    getBlocks(astNode, allBlocks)
-    for block in allBlocks:
+    all_blocks = []
+    get_blocks(ast_node, all_blocks)
+    for block in all_blocks:
         children = block.take_child_nodes()
         for child in children:
             if not isinstance(child, ast.SympyAssignment):
                 block.append(child)
             else:
-                target, childToInsertBefore = findBlockToMoveTo(child)
+                target, child_to_insert_before = find_block_to_move_to(child)
                 if target == block:     # movement not possible
                     target.append(child)
                 else:
-                    existingAssignment = checkIfAssignmentAlreadyInBlock(child, target)
-                    if not existingAssignment:
-                        target.insert_before(child, childToInsertBefore)
+                    existing_assignment = check_if_assignment_already_in_block(child, target)
+                    if not existing_assignment:
+                        target.insert_before(child, child_to_insert_before)
                     else:
-                        assert existingAssignment.rhs == child.rhs, "Symbol with same name exists already"
+                        assert existing_assignment.rhs == child.rhs, "Symbol with same name exists already"
 
 
-def splitInnerLoop(astNode, symbolGroups):
+def split_inner_loop(ast_node: ast.Node, symbol_groups):
     """
     Splits inner loop into multiple loops to minimize the amount of simultaneous load/store streams
 
-    :param astNode: AST root
-    :param symbolGroups: sequence of symbol sequences: for each symbol sequence a new inner loop is created which
-         updates these symbols and their dependent symbols. Symbols which are in none of the symbolGroups and which
-         no symbol in a symbol group depends on, are not updated!
-    :return: transformed AST
+    Args:
+        ast_node: AST root
+        symbol_groups: sequence of symbol sequences: for each symbol sequence a new inner loop is created which
+                       updates these symbols and their dependent symbols. Symbols which are in none of the symbolGroups
+                       and which no symbol in a symbol group depends on, are not updated!
     """
-    allLoops = astNode.atoms(ast.LoopOverCoordinate)
-    innerLoop = [l for l in allLoops if l.is_innermost_loop]
-    assert len(innerLoop) == 1, "Error in AST: multiple innermost loops. Was split transformation already called?"
-    innerLoop = innerLoop[0]
-    assert type(innerLoop.body) is ast.Block
-    outerLoop = [l for l in allLoops if l.is_outermost_loop]
-    assert len(outerLoop) == 1, "Error in AST, multiple outermost loops."
-    outerLoop = outerLoop[0]
-
-    symbolsWithTemporaryArray = OrderedDict()
-    assignmentMap = OrderedDict((a.lhs, a) for a in innerLoop.body.args)
-
-    assignmentGroups = []
-    for symbolGroup in symbolGroups:
+    all_loops = ast_node.atoms(ast.LoopOverCoordinate)
+    inner_loop = [l for l in all_loops if l.is_innermost_loop]
+    assert len(inner_loop) == 1, "Error in AST: multiple innermost loops. Was split transformation already called?"
+    inner_loop = inner_loop[0]
+    assert type(inner_loop.body) is ast.Block
+    outer_loop = [l for l in all_loops if l.is_outermost_loop]
+    assert len(outer_loop) == 1, "Error in AST, multiple outermost loops."
+    outer_loop = outer_loop[0]
+
+    symbols_with_temporary_array = OrderedDict()
+    assignment_map = OrderedDict((a.lhs, a) for a in inner_loop.body.args)
+
+    assignment_groups = []
+    for symbolGroup in symbol_groups:
         # get all dependent symbols
-        symbolsToProcess = list(symbolGroup)
-        symbolsResolved = set()
-        while symbolsToProcess:
-            s = symbolsToProcess.pop()
-            if s in symbolsResolved:
+        symbols_to_process = list(symbolGroup)
+        symbols_resolved = set()
+        while symbols_to_process:
+            s = symbols_to_process.pop()
+            if s in symbols_resolved:
                 continue
 
-            if s in assignmentMap:  # if there is no assignment inside the loop body it is independent already
-                for newSymbol in assignmentMap[s].rhs.atoms(sp.Symbol):
-                    if type(newSymbol) is not Field.Access and newSymbol not in symbolsWithTemporaryArray:
-                        symbolsToProcess.append(newSymbol)
-            symbolsResolved.add(s)
+            if s in assignment_map:  # if there is no assignment inside the loop body it is independent already
+                for newSymbol in assignment_map[s].rhs.atoms(sp.Symbol):
+                    if type(newSymbol) is not Field.Access and newSymbol not in symbols_with_temporary_array:
+                        symbols_to_process.append(newSymbol)
+            symbols_resolved.add(s)
 
         for symbol in symbolGroup:
             if type(symbol) is not Field.Access:
                 assert type(symbol) is TypedSymbol
-                newTs = TypedSymbol(symbol.name, PointerType(symbol.dtype))
-                symbolsWithTemporaryArray[symbol] = IndexedBase(newTs, shape=(1,))[innerLoop.loop_counter_symbol]
+                new_ts = TypedSymbol(symbol.name, PointerType(symbol.dtype))
+                symbols_with_temporary_array[symbol] = IndexedBase(new_ts, shape=(1,))[inner_loop.loop_counter_symbol]
 
-        assignmentGroup = []
-        for assignment in innerLoop.body.args:
-            if assignment.lhs in symbolsResolved:
-                newRhs = assignment.rhs.subs(symbolsWithTemporaryArray.items())
+        assignment_group = []
+        for assignment in inner_loop.body.args:
+            if assignment.lhs in symbols_resolved:
+                new_rhs = assignment.rhs.subs(symbols_with_temporary_array.items())
                 if type(assignment.lhs) is not Field.Access and assignment.lhs in symbolGroup:
                     assert type(assignment.lhs) is TypedSymbol
-                    newTs = TypedSymbol(assignment.lhs.name, PointerType(assignment.lhs.dtype))
-                    newLhs = IndexedBase(newTs, shape=(1,))[innerLoop.loop_counter_symbol]
+                    new_ts = TypedSymbol(assignment.lhs.name, PointerType(assignment.lhs.dtype))
+                    new_lhs = IndexedBase(new_ts, shape=(1,))[inner_loop.loop_counter_symbol]
                 else:
-                    newLhs = assignment.lhs
-                assignmentGroup.append(ast.SympyAssignment(newLhs, newRhs))
-        assignmentGroups.append(assignmentGroup)
+                    new_lhs = assignment.lhs
+                assignment_group.append(ast.SympyAssignment(new_lhs, new_rhs))
+        assignment_groups.append(assignment_group)
 
-    newLoops = [innerLoop.new_loop_with_different_body(ast.Block(group)) for group in assignmentGroups]
-    innerLoop.parent.replace(innerLoop, ast.Block(newLoops))
+    new_loops = [inner_loop.new_loop_with_different_body(ast.Block(group)) for group in assignment_groups]
+    inner_loop.parent.replace(inner_loop, ast.Block(new_loops))
 
-    for tmpArray in symbolsWithTemporaryArray:
-        tmpArrayPointer = TypedSymbol(tmpArray.name, PointerType(tmpArray.dtype))
-        outerLoop.parent.insert_front(ast.TemporaryMemoryAllocation(tmpArrayPointer, innerLoop.stop))
-        outerLoop.parent.append(ast.TemporaryMemoryFree(tmpArrayPointer))
+    for tmpArray in symbols_with_temporary_array:
+        tmp_array_pointer = TypedSymbol(tmpArray.name, PointerType(tmpArray.dtype))
+        outer_loop.parent.insert_front(ast.TemporaryMemoryAllocation(tmp_array_pointer, inner_loop.stop))
+        outer_loop.parent.append(ast.TemporaryMemoryFree(tmp_array_pointer))
 
 
-def cutLoop(loopNode, cuttingPoints):
+def cut_loop(loop_node, cutting_points):
     """Cuts loop at given cutting points, that means one loop is transformed into len(cuttingPoints)+1 new loops
     that range from  oldBegin to cuttingPoint[1], ..., cuttingPoint[-1] to oldEnd"""
-    if loopNode.step != 1:
+    if loop_node.step != 1:
         raise NotImplementedError("Can only split loops that have a step of 1")
-    newLoops = []
-    newStart = loopNode.start
-    cuttingPoints = list(cuttingPoints) + [loopNode.stop]
-    for newEnd in cuttingPoints:
-        if newEnd - newStart == 1:
-            newBody = deepcopy(loopNode.body)
-            newBody.subs({loopNode.loop_counter_symbol: newStart})
-            newLoops.append(newBody)
+    new_loops = []
+    new_start = loop_node.start
+    cutting_points = list(cutting_points) + [loop_node.stop]
+    for newEnd in cutting_points:
+        if newEnd - new_start == 1:
+            new_body = deepcopy(loop_node.body)
+            new_body.subs({loop_node.loop_counter_symbol: new_start})
+            new_loops.append(new_body)
         else:
-            newLoop = ast.LoopOverCoordinate(deepcopy(loopNode.body), loopNode.coordinateToLoopOver,
-                                             newStart, newEnd, loopNode.step)
-            newLoops.append(newLoop)
-        newStart = newEnd
-    loopNode.parent.replace(loopNode, newLoops)
+            new_loop = ast.LoopOverCoordinate(deepcopy(loop_node.body), loop_node.coordinateToLoopOver,
+                                              new_start, newEnd, loop_node.step)
+            new_loops.append(new_loop)
+        new_start = newEnd
+    loop_node.parent.replace(loop_node, new_loops)
 
 
-def isConditionNecessary(condition, preCondition, symbol):
+def is_condition_necessary(condition, pre_condition, symbol):
     """
     Determines if a logical condition of a single variable is already contained in a stronger preCondition
     so if from preCondition follows that condition is always true, then this condition is not necessary
     :param condition: sympy relational of one variable
-    :param preCondition: logical expression that is known to be true
+    :param pre_condition: logical expression that is known to be true
     :param symbol: the single symbol of interest
     :return: returns  not (preCondition => condition) where "=>" is logical implication
     """
     from sympy.solvers.inequalities import reduce_rational_inequalities
     from sympy.logic.boolalg import to_dnf
 
-    def toDnfList(expr):
+    def to_dnf_list(expr):
         result = to_dnf(expr)
         if isinstance(result, sp.Or):
             return [orTerm.args for orTerm in result.args]
@@ -599,12 +602,12 @@ def isConditionNecessary(condition, preCondition, symbol):
         else:
             return result
 
-    t1 = reduce_rational_inequalities(toDnfList(sp.And(condition, preCondition)), symbol)
-    t2 = reduce_rational_inequalities(toDnfList(preCondition), symbol)
+    t1 = reduce_rational_inequalities(to_dnf_list(sp.And(condition, pre_condition)), symbol)
+    t2 = reduce_rational_inequalities(to_dnf_list(pre_condition), symbol)
     return t1 != t2
 
 
-def simplifyBooleanExpression(expr, singleVariableRanges):
+def simplify_boolean_expression(expr, single_variable_ranges):
     """Simplification of boolean expression using known ranges of variables
     The singleVariableRanges parameter is a dict mapping a variable name to a sympy logical expression that
     contains only this variable and defines a range for it. For example with a being a symbol
@@ -620,8 +623,8 @@ def simplifyBooleanExpression(expr, singleVariableRanges):
             symbols = e.atoms(sp.Symbol)
             if len(symbols) == 1:
                 symbol = symbols.pop()
-                if symbol in singleVariableRanges:
-                    if not isConditionNecessary(e, singleVariableRanges[symbol], symbol):
+                if symbol in single_variable_ranges:
+                    if not is_condition_necessary(e, single_variable_ranges[symbol], symbol):
                         return sp.true
             return e
         else:
@@ -631,127 +634,127 @@ def simplifyBooleanExpression(expr, singleVariableRanges):
     return visit(expr)
 
 
-def simplifyConditionals(node, loopConditionals={}):
+def simplify_conditionals(node, loop_conditionals={}):
     """Simplifies/Removes conditions inside loops that depend on the loop counter."""
     if isinstance(node, ast.LoopOverCoordinate):
-        ctrSym = node.loop_counter_symbol
-        loopConditionals[ctrSym] = sp.And(ctrSym >= node.start, ctrSym < node.stop)
-        simplifyConditionals(node.body)
-        del loopConditionals[ctrSym]
+        ctr_sym = node.loop_counter_symbol
+        loop_conditionals[ctr_sym] = sp.And(ctr_sym >= node.start, ctr_sym < node.stop)
+        simplify_conditionals(node.body)
+        del loop_conditionals[ctr_sym]
     elif isinstance(node, ast.Conditional):
-        node.conditionExpr = simplifyBooleanExpression(node.conditionExpr, loopConditionals)
-        simplifyConditionals(node.trueBlock)
+        node.conditionExpr = simplify_boolean_expression(node.conditionExpr, loop_conditionals)
+        simplify_conditionals(node.trueBlock)
         if node.falseBlock:
-            simplifyConditionals(node.falseBlock)
+            simplify_conditionals(node.falseBlock)
         if node.conditionExpr == sp.true:
             node.parent.replace(node, [node.trueBlock])
         if node.conditionExpr == sp.false:
             node.parent.replace(node, [node.falseBlock] if node.falseBlock else [])
     elif isinstance(node, ast.Block):
         for a in list(node.args):
-            simplifyConditionals(a)
+            simplify_conditionals(a)
     elif isinstance(node, ast.SympyAssignment):
         return node
     else:
         raise ValueError("Can not handle node", type(node))
 
 
-def cleanupBlocks(node):
+def cleanup_blocks(node):
     """Curly Brace Removal: Removes empty blocks, and replaces blocks with a single child by its child """
     if isinstance(node, ast.SympyAssignment):
         return
     elif isinstance(node, ast.Block):
         for a in list(node.args):
-            cleanupBlocks(a)
+            cleanup_blocks(a)
         if len(node.args) <= 1 and isinstance(node.parent, ast.Block):
             node.parent.replace(node, node.args)
             return
     else:
         for a in node.args:
-            cleanupBlocks(a)
+            cleanup_blocks(a)
 
 
-def symbolNameToVariableName(symbolName):
+def symbol_name_to_variable_name(symbol_name):
     """Replaces characters which are allowed in sympy symbol names but not in C/C++ variable names"""
-    return symbolName.replace("^", "_")
+    return symbol_name.replace("^", "_")
 
 
-def typeAllEquations(eqs, typeForSymbol):
+def type_all_equations(eqs, type_for_symbol):
     """
     Traverses AST and replaces every :class:`sympy.Symbol` by a :class:`pystencils.typedsymbol.TypedSymbol`.
     Additionally returns sets of all fields which are read/written
 
     :param eqs: list of equations
-    :param typeForSymbol: dict mapping symbol names to types. Types are strings of C types like 'int' or 'double'
-    :return: ``fieldsRead, fieldsWritten, typedEquations`` set of read fields, set of written fields, list of equations
+    :param type_for_symbol: dict mapping symbol names to types. Types are strings of C types like 'int' or 'double'
+    :return: ``fields_read, fields_written, typed_equations`` set of read fields, set of written fields, list of equations
                where symbols have been replaced by typed symbols
     """
-    if isinstance(typeForSymbol, str) or not hasattr(typeForSymbol, '__getitem__'):
-        typeForSymbol = typingFromSympyInspection(eqs, typeForSymbol)
+    if isinstance(type_for_symbol, str) or not hasattr(type_for_symbol, '__getitem__'):
+        type_for_symbol = typing_from_sympy_inspection(eqs, type_for_symbol)
 
-    fieldsWritten = set()
-    fieldsRead = set()
+    fields_written = set()
+    fields_read = set()
 
-    def processRhs(term):
+    def process_rhs(term):
         """Replaces Symbols by:
             - TypedSymbol if symbol is not a field access
         """
         if isinstance(term, Field.Access):
-            fieldsRead.add(term.field)
+            fields_read.add(term.field)
             return term
         elif isinstance(term, TypedSymbol):
             return term
         elif isinstance(term, sp.Symbol):
-            return TypedSymbol(symbolNameToVariableName(term.name), typeForSymbol[term.name])
+            return TypedSymbol(symbol_name_to_variable_name(term.name), type_for_symbol[term.name])
         else:
-            newArgs = [processRhs(arg) for arg in term.args]
-            return term.func(*newArgs) if newArgs else term
+            new_args = [process_rhs(arg) for arg in term.args]
+            return term.func(*new_args) if new_args else term
 
-    def processLhs(term):
+    def process_lhs(term):
         """Replaces symbol by TypedSymbol and adds field to fieldsWriten"""
         if isinstance(term, Field.Access):
-            fieldsWritten.add(term.field)
+            fields_written.add(term.field)
             return term
         elif isinstance(term, TypedSymbol):
             return term
         elif isinstance(term, sp.Symbol):
-            return TypedSymbol(term.name, typeForSymbol[term.name])
+            return TypedSymbol(term.name, type_for_symbol[term.name])
         else:
             assert False, "Expected a symbol as left-hand-side"
 
-    def visit(object):
-        if isinstance(object, list) or isinstance(object, tuple):
-            return [visit(e) for e in object]
-        if isinstance(object, sp.Eq) or isinstance(object, ast.SympyAssignment) or isinstance(object, Assignment):
-            newLhs = processLhs(object.lhs)
-            newRhs = processRhs(object.rhs)
-            return ast.SympyAssignment(newLhs, newRhs)
-        elif isinstance(object, ast.Conditional):
-            falseBlock = None if object.falseBlock is None else visit(object.falseBlock)
-            return ast.Conditional(processRhs(object.conditionExpr),
-                                   true_block=visit(object.trueBlock), false_block=falseBlock)
-        elif isinstance(object, ast.Block):
-            return ast.Block([visit(e) for e in object.args])
+    def visit(obj):
+        if isinstance(obj, list) or isinstance(obj, tuple):
+            return [visit(e) for e in obj]
+        if isinstance(obj, sp.Eq) or isinstance(obj, ast.SympyAssignment) or isinstance(obj, Assignment):
+            new_lhs = process_lhs(obj.lhs)
+            new_rhs = process_rhs(obj.rhs)
+            return ast.SympyAssignment(new_lhs, new_rhs)
+        elif isinstance(obj, ast.Conditional):
+            false_block = None if obj.falseBlock is None else visit(obj.falseBlock)
+            return ast.Conditional(process_rhs(obj.conditionExpr),
+                                   true_block=visit(obj.trueBlock), false_block=false_block)
+        elif isinstance(obj, ast.Block):
+            return ast.Block([visit(e) for e in obj.args])
         else:
-            return object
+            return obj
 
-    typedEquations = visit(eqs)
+    typed_equations = visit(eqs)
 
-    return fieldsRead, fieldsWritten, typedEquations
+    return fields_read, fields_written, typed_equations
 
 
 # --------------------------------------- Helper Functions -------------------------------------------------------------
 
 
-def typingFromSympyInspection(eqs, defaultType="double"):
+def typing_from_sympy_inspection(eqs, default_type="double"):
     """
     Creates a default symbol name to type mapping.
     If a sympy Boolean is assigned to a symbol it is assumed to be 'bool' otherwise the default type, usually ('double')
     :param eqs: list of equations
-    :param defaultType: the type for non-boolean symbols
+    :param default_type: the type for non-boolean symbols
     :return: dictionary, mapping symbol name to type
     """
-    result = defaultdict(lambda: defaultType)
+    result = defaultdict(lambda: default_type)
     for eq in eqs:
         if isinstance(eq, ast.Node):
             continue
@@ -762,19 +765,19 @@ def typingFromSympyInspection(eqs, defaultType="double"):
     return result
 
 
-def getNextParentOfType(node, parentType):
+def get_next_parent_of_type(node, parent_type):
     """
     Traverses the AST nodes parents until a parent of given type was found. If no such parent is found, None is returned
     """
     parent = node.parent
     while parent is not None:
-        if isinstance(parent, parentType):
+        if isinstance(parent, parent_type):
             return parent
         parent = parent.parent
     return None
 
 
-def getOptimalLoopOrdering(fields):
+def get_optimal_loop_ordering(fields):
     """
     Determines the optimal loop order for a given set of fields.
     If the fields have different memory layout or different sizes an exception is thrown.
@@ -782,11 +785,11 @@ def getOptimalLoopOrdering(fields):
     :return: list of coordinate ids, where the first list entry should be the outermost loop
     """
     assert len(fields) > 0
-    refField = next(iter(fields))
+    ref_field = next(iter(fields))
     for field in fields:
-        if field.spatialDimensions != refField.spatialDimensions:
+        if field.spatial_dimensions != ref_field.spatial_dimensions:
             raise ValueError("All fields have to have the same number of spatial dimensions. Spatial field dimensions: "
-                             + str({f.name: f.spatialShape for f in fields}))
+                             + str({f.name: f.spatial_shape for f in fields}))
 
     layouts = set([field.layout for field in fields])
     if len(layouts) > 1:
@@ -796,15 +799,15 @@ def getOptimalLoopOrdering(fields):
     return list(layout)
 
 
-def getLoopHierarchy(astNode):
+def get_loop_hierarchy(ast_node):
     """Determines the loop structure around a given AST node.
-    :param astNode: the AST node
+    :param ast_node: the AST node
     :return: list of coordinate ids, where the first list entry is the innermost loop
     """
     result = []
-    node = astNode
+    node = ast_node
     while node is not None:
-        node = getNextParentOfType(node, ast.LoopOverCoordinate)
+        node = get_next_parent_of_type(node, ast.LoopOverCoordinate)
         if node:
             result.append(node.coordinateToLoopOver)
     return reversed(result)
@@ -827,52 +830,6 @@ def get_type(node):
         raise NotImplemented('Not yet supported: %s %s' % (node, type(node)))
 
 
-def insert_casts(node):
-    """
-    Inserts casts and dtype whpere needed
-    :param node: ast which should be traversed
-    :return: node
-    """
-    def conversion(args):
-        target = args[0]
-        if isinstance(target.dtype, PointerType):
-            # Pointer arithmetic
-            for arg in args[1:]:
-                # Check validness
-                if not arg.dtype.is_int() and not arg.dtype.is_uint():
-                    raise ValueError("Impossible pointer arithmetic", target, arg)
-            pointer = ast.PointerArithmetic(ast.Add(args[1:]), target)
-            return [pointer]
-
-        else:
-            for i in range(len(args)):
-                if args[i].dtype != target.dtype:
-                    args[i] = ast.Conversion(args[i], target.dtype, node)
-            return args
-
-    for arg in node.args:
-        insert_casts(arg)
-    if isinstance(node, ast.Indexed):
-        #TODO revmove this
-        pass
-    elif isinstance(node, ast.Expr):
-        #print(node, node.args)
-        #print([type(arg) for arg in node.args])
-        #print([arg.dtype for arg in node.args])
-        args = sorted((arg for arg in node.args), key=attrgetter('dtype'))
-        target = args[0]
-        node.args = conversion(args)
-        node.dtype = target.dtype
-        #print(node.dtype)
-        #print(node)
-    elif isinstance(node, ast.SympyAssignment):
-        if node.lhs.dtype != node.rhs.dtype:
-            node.replace(node.rhs, ast.Conversion(node.rhs, node.lhs.dtype))
-    elif isinstance(node, ast.LoopOverCoordinate):
-        pass
-    return node
-
-
 def desympy_ast(node):
     """
     Remove Sympy Expressions, which have more then one argument.
@@ -896,29 +853,8 @@ def desympy_ast(node):
             node.replace(arg, ast.Indexed(arg.args, arg.base, node))
         elif isinstance(arg,  sp.tensor.IndexedBase):
             node.replace(arg, arg.label)
-        #elif isinstance(arg, sp.containers.Tuple):
-        #
         else:
-            #print('Not transforming:', type(arg), arg)
             pass
     for arg in node.args:
         desympy_ast(arg)
     return node
-
-
-def check_dtype(node):
-    if isinstance(node, ast.KernelFunction):
-        pass
-    elif isinstance(node, ast.Block):
-        pass
-    elif isinstance(node, ast.LoopOverCoordinate):
-        pass
-    elif isinstance(node, ast.SympyAssignment):
-        pass
-    else:
-        #print(node)
-        #print(node.dtype)
-        pass
-    for arg in node.args:
-        check_dtype(arg)
-
diff --git a/utils.py b/utils.py
index 2ea6b7dd934971cf56a8800b59657abeb572dd0b..439cc944e91a1018656d01895b30d5c9ac4ff09b 100644
--- a/utils.py
+++ b/utils.py
@@ -6,7 +6,7 @@ class DotDict(dict):
     __delattr__ = dict.__delitem__
 
 
-def allEqual(iterator):
+def all_equal(iterator):
     iterator = iter(iterator)
     try:
         first = next(iterator)
diff --git a/vectorization.py b/vectorization.py
index 6cb9501a6d00c38b9c2e902968c6c2bd143b8c7d..9311ecb7e4022f31409e4eb3e80a215a9a3defd1 100644
--- a/vectorization.py
+++ b/vectorization.py
@@ -1,8 +1,8 @@
 import sympy as sp
 import warnings
 from pystencils.sympyextensions import fast_subs
-from pystencils.transformations import filteredTreeIteration
-from pystencils.data_types import TypedSymbol, VectorType, get_type_of_expression, castFunc, collate_types, PointerType
+from pystencils.transformations import filtered_tree_iteration
+from pystencils.data_types import TypedSymbol, VectorType, get_type_of_expression, cast_func, collate_types, PointerType
 import pystencils.astnodes as ast
 
 
@@ -44,7 +44,7 @@ def vectorize_inner_loops_and_adapt_load_stores(ast_node, vector_width=4):
                     break
                 typed_symbol = base.label
                 assert type(typed_symbol.dtype) is PointerType, f"Type of access is {typed_symbol.dtype}, {indexed}"
-                substitutions[indexed] = castFunc(indexed, VectorType(typed_symbol.dtype.base_type, vector_width))
+                substitutions[indexed] = cast_func(indexed, VectorType(typed_symbol.dtype.base_type, vector_width))
         if not successful:
             warnings.warn("Could not vectorize loop because of non-consecutive memory access")
             continue
@@ -57,7 +57,7 @@ def insert_vector_casts(ast_node):
     """Inserts necessary casts from scalar values to vector values."""
 
     def visit_expr(expr):
-        if expr.func in (sp.Add, sp.Mul) or (isinstance(expr, sp.Rel) and not expr.func == castFunc) or \
+        if expr.func in (sp.Add, sp.Mul) or (isinstance(expr, sp.Rel) and not expr.func == cast_func) or \
                 isinstance(expr, sp.boolalg.BooleanFunction):
             new_args = [visit_expr(a) for a in expr.args]
             arg_types = [get_type_of_expression(a) for a in new_args]
@@ -65,7 +65,7 @@ def insert_vector_casts(ast_node):
                 return expr
             else:
                 target_type = collate_types(arg_types)
-                casted_args = [castFunc(a, target_type) if t != target_type else a
+                casted_args = [cast_func(a, target_type) if t != target_type else a
                                for a, t in zip(new_args, arg_types)]
                 return expr.func(*casted_args)
         elif expr.func is sp.Pow:
@@ -82,10 +82,10 @@ def insert_vector_casts(ast_node):
             if type(condition_target_type) is VectorType and type(result_target_type) is not VectorType:
                 result_target_type = VectorType(result_target_type, width=condition_target_type.width)
 
-            casted_results = [castFunc(a, result_target_type) if t != result_target_type else a
+            casted_results = [cast_func(a, result_target_type) if t != result_target_type else a
                               for a, t in zip(new_results, types_of_results)]
 
-            casted_conditions = [castFunc(a, condition_target_type)
+            casted_conditions = [cast_func(a, condition_target_type)
                                  if t != condition_target_type and a is not True else a
                                  for a, t in zip(new_conditions, types_of_conditions)]
 
@@ -94,7 +94,7 @@ def insert_vector_casts(ast_node):
             return expr
 
     substitution_dict = {}
-    for assignment in filteredTreeIteration(ast_node, ast.SympyAssignment):
+    for assignment in filtered_tree_iteration(ast_node, ast.SympyAssignment):
         subs_expr = fast_subs(assignment.rhs, substitution_dict, skip=lambda e: isinstance(e, ast.ResolvedFieldAccess))
         assignment.rhs = visit_expr(subs_expr)
         rhs_type = get_type_of_expression(assignment.rhs)
@@ -105,8 +105,8 @@ def insert_vector_casts(ast_node):
                 new_lhs = TypedSymbol(assignment.lhs.name, new_lhs_type)
                 substitution_dict[assignment.lhs] = new_lhs
                 assignment.lhs = new_lhs
-        elif assignment.lhs.func == castFunc:
+        elif assignment.lhs.func == cast_func:
             lhs_type = assignment.lhs.args[1]
             if type(lhs_type) is VectorType and type(rhs_type) is not VectorType:
-                assignment.rhs = castFunc(assignment.rhs, lhs_type)
+                assignment.rhs = cast_func(assignment.rhs, lhs_type)
 
diff --git a/vtk.py b/vtk.py
index caf9ca1aa411e065856315f196f51af8d7d2801c..da18c7934b5110cff27027a6b34770a3fc7a219d 100644
--- a/vtk.py
+++ b/vtk.py
@@ -2,7 +2,7 @@ from pyevtk.vtk import VtkFile, VtkImageData
 from pyevtk.hl import _addDataToFile, _appendDataToFile
 
 
-def imageToVTK(path, cellData, origin=(0.0, 0.0, 0.0), spacing=(1.0, 1.0, 1.0)):
+def image_to_vtk(path, cell_data, origin=(0.0, 0.0, 0.0), spacing=(1.0, 1.0, 1.0)):
     """
     Writes numpy data to VTK
 
@@ -10,33 +10,34 @@ def imageToVTK(path, cellData, origin=(0.0, 0.0, 0.0), spacing=(1.0, 1.0, 1.0)):
 
     Patched version of same pyevtk function that also supports vector-valued data
 
-    :param path: path with file name, without file ending (.vtk) where data should be stored
-    :param cellData: dictionary, mapping name of the data to a 3D numpy array, or to a 3-tuple of 3D numpy arrays
-                     in case of vector-valued data
-    :param origin: 3-tuple describing the origin of the field in 3D
-    :param spacing: 3-tuple describing the grid spacing in x,y, z direction
-    :returns path to file that was written
-
-
-    Example:
-
-    >>> from tempfile import TemporaryDirectory
-    >>> import os
-    >>> import numpy as np
-    >>> with TemporaryDirectory() as tmp_dir:
-    ...     path = os.path.join(tmp_dir, 'out')
-    ...     size = (20, 20, 20)
-    ...     resFile = imageToVTK(path, cellData = {'someScalarField': np.zeros(size),
-    ...                                            'someVectorField': (np.zeros(size), np.ones(size), np.zeros(size))
-    ...                                           })
+    Args:
+        path: path with file name, without file ending (.vtk) where data should be stored
+        cell_data: dictionary, mapping name of the data to a 3D numpy array, or to a 3-tuple of 3D numpy arrays
+                   in case of vector-valued data
+        origin: 3-tuple describing the origin of the field in 3D
+        spacing: 3-tuple describing the grid spacing in x,y, z direction
+
+    Returns:
+        path to file that was written
+
+    Examples:
+        >>> from tempfile import TemporaryDirectory
+        >>> import os
+        >>> import numpy as np
+        >>> with TemporaryDirectory() as tmp_dir:
+        ...     path = os.path.join(tmp_dir, 'out')
+        ...     size = (20, 20, 20)
+        ...     resFile = image_to_vtk(path, cell_data={'someScalarField': np.zeros(size),
+        ...                                             'someVectorField': (np.ones(size), np.ones(size), np.ones(size))
+        ...                                             })
     """
 
     # Extract dimensions
     start = (0, 0, 0)
     end = None
 
-    keys = list(cellData.keys())
-    data = cellData[keys[0]]
+    keys = list(cell_data.keys())
+    data = cell_data[keys[0]]
     if hasattr(data, 'shape'):
         end = data.shape
     elif isinstance(data, tuple):
@@ -49,9 +50,9 @@ def imageToVTK(path, cellData, origin=(0.0, 0.0, 0.0), spacing=(1.0, 1.0, 1.0)):
     w = VtkFile(path, VtkImageData)
     w.openGrid(start=start, end=end, origin=origin, spacing=spacing)
     w.openPiece(start=start, end=end)
-    _addDataToFile(w, cellData, pointData=None)
+    _addDataToFile(w, cell_data, pointData=None)
     w.closePiece()
     w.closeGrid()
-    _appendDataToFile(w, cellData, pointData=None)
+    _appendDataToFile(w, cell_data, pointData=None)
     w.save()
     return w.getFileName()