diff --git a/alignedarray.py b/alignedarray.py
index 46d4dbacfa7865e7f5af8f971b6b645ce81a3186..65807042ed6cccf6ec62751c118b818a972bfdfa 100644
--- a/alignedarray.py
+++ b/alignedarray.py
@@ -5,9 +5,9 @@ def aligned_empty(shape, byte_alignment=32, dtype=np.float64, byte_offset=0, ord
     """
     Creates an aligned empty numpy array
     :param shape: size of the array
-    :param byte_alignment: alignment in bytes, for the start address of the array holds (a % byteAlignment) == 0
+    :param byte_alignment: alignment in bytes, for the start address of the array holds (a % byte_alignment) == 0
     :param dtype: numpy data type
-    :param byte_offset: offset in bytes for position that should be aligned i.e. (a+byte_offset) % byteAlignment == 0
+    :param byte_offset: offset in bytes for position that should be aligned i.e. (a+byte_offset) % byte_alignment == 0
                        typically used to align first inner cell instead of ghost layer
     :param order: storage linearization order
     :param align_inner_coordinate: if True, the start of the innermost coordinate lines are aligned as well
diff --git a/assignment_collection/assignment_collection.py b/assignment_collection/assignment_collection.py
index 2907cb5fea3f77ebf971ea706096da6fe38dcbd9..01e7d5a037744323d7fdaba77a3e31146f2aa6e0 100644
--- a/assignment_collection/assignment_collection.py
+++ b/assignment_collection/assignment_collection.py
@@ -111,7 +111,7 @@ class AssignmentCollection:
     def dependent_symbols(self, symbols: Iterable[sp.Symbol]) -> Set[sp.Symbol]:
         """Returns all symbols that depend on one of the passed symbols.
 
-        A symbol 'a' depends on a symbol 'b', if there is an assignment 'a <- someExpression(b)' i.e. when
+        A symbol 'a' depends on a symbol 'b', if there is an assignment 'a <- some_expression(b)' i.e. when
         'b' is required to compute 'a'.
         """
 
@@ -217,18 +217,18 @@ class AssignmentCollection:
         substitution_dict = {}
 
         processed_other_subexpression_equations = []
-        for otherSubexpressionEq in other.subexpressions:
-            if otherSubexpressionEq.lhs in own_subexpression_symbols:
-                if otherSubexpressionEq.rhs == own_subexpression_symbols[otherSubexpressionEq.lhs]:
+        for other_subexpression_eq in other.subexpressions:
+            if other_subexpression_eq.lhs in own_subexpression_symbols:
+                if other_subexpression_eq.rhs == own_subexpression_symbols[other_subexpression_eq.lhs]:
                     continue  # exact the same subexpression equation exists already
                 else:
                     # different definition - a new name has to be introduced
                     new_lhs = next(self.subexpression_symbol_generator)
-                    new_eq = Assignment(new_lhs, fast_subs(otherSubexpressionEq.rhs, substitution_dict))
+                    new_eq = Assignment(new_lhs, fast_subs(other_subexpression_eq.rhs, substitution_dict))
                     processed_other_subexpression_equations.append(new_eq)
-                    substitution_dict[otherSubexpressionEq.lhs] = new_lhs
+                    substitution_dict[other_subexpression_eq.lhs] = new_lhs
             else:
-                processed_other_subexpression_equations.append(fast_subs(otherSubexpressionEq, substitution_dict))
+                processed_other_subexpression_equations.append(fast_subs(other_subexpression_eq, substitution_dict))
 
         processed_other_main_assignments = [fast_subs(eq, substitution_dict) for eq in other.main_assignments]
         return self.copy(self.main_assignments + processed_other_main_assignments,
diff --git a/assignment_collection/simplifications.py b/assignment_collection/simplifications.py
index 5c8a13f98cef1ecd1f3a838752d9a214a969fb7d..a635e8b33ab8d8438cea1d55e8cf8f8c86c00b6f 100644
--- a/assignment_collection/simplifications.py
+++ b/assignment_collection/simplifications.py
@@ -50,10 +50,10 @@ def apply_on_all_subexpressions(ac: AssignmentCollection,
 def subexpression_substitution_in_existing_subexpressions(ac: AssignmentCollection) -> AssignmentCollection:
     """Goes through the subexpressions list and replaces the term in the following subexpressions."""
     result = []
-    for outerCtr, s in enumerate(ac.subexpressions):
+    for outer_ctr, s in enumerate(ac.subexpressions):
         new_rhs = s.rhs
-        for innerCtr in range(outerCtr):
-            sub_expr = ac.subexpressions[innerCtr]
+        for inner_ctr in range(outer_ctr):
+            sub_expr = ac.subexpressions[inner_ctr]
             new_rhs = subs_additive(new_rhs, sub_expr.lhs, sub_expr.rhs, required_match_replacement=1.0)
             new_rhs = new_rhs.subs(sub_expr.rhs, sub_expr.lhs)
         result.append(Assignment(s.lhs, new_rhs))
@@ -66,8 +66,8 @@ def subexpression_substitution_in_main_assignments(ac: AssignmentCollection) ->
     result = []
     for s in ac.main_assignments:
         new_rhs = s.rhs
-        for subExpr in ac.subexpressions:
-            new_rhs = subs_additive(new_rhs, subExpr.lhs, subExpr.rhs, required_match_replacement=1.0)
+        for sub_expr in ac.subexpressions:
+            new_rhs = subs_additive(new_rhs, sub_expr.lhs, sub_expr.rhs, required_match_replacement=1.0)
         result.append(Assignment(s.lhs, new_rhs))
     return ac.copy(result)
 
@@ -91,5 +91,5 @@ def add_subexpressions_for_divisions(ac: AssignmentCollection) -> AssignmentColl
         search_divisors(eq.rhs)
 
     new_symbol_gen = ac.subexpression_symbol_generator
-    substitutions = {divisor: newSymbol for newSymbol, divisor in zip(new_symbol_gen, divisors)}
+    substitutions = {divisor: new_symbol for new_symbol, divisor in zip(new_symbol_gen, divisors)}
     return ac.new_with_substitutions(substitutions, True)
diff --git a/astnodes.py b/astnodes.py
index 67fa8910cd5f65cefaa73fe3c532c39e7e2aac6f..54eaafffb76450898213275b0f8be3abd23db796 100644
--- a/astnodes.py
+++ b/astnodes.py
@@ -64,7 +64,7 @@ class Conditional(Node):
         super(Conditional, self).__init__(parent=None)
 
         assert condition_expr.is_Boolean or condition_expr.is_Relational
-        self.conditionExpr = condition_expr
+        self.condition_expr = condition_expr
 
         def handle_child(c):
             if c is None:
@@ -74,20 +74,20 @@ class Conditional(Node):
             c.parent = self
             return c
 
-        self.trueBlock = handle_child(true_block)
-        self.falseBlock = handle_child(false_block)
+        self.true_block = handle_child(true_block)
+        self.false_block = handle_child(false_block)
 
     def subs(self, *args, **kwargs):
-        self.trueBlock.subs(*args, **kwargs)
-        if self.falseBlock:
-            self.falseBlock.subs(*args, **kwargs)
-        self.conditionExpr = self.conditionExpr.subs(*args, **kwargs)
+        self.true_block.subs(*args, **kwargs)
+        if self.false_block:
+            self.false_block.subs(*args, **kwargs)
+        self.condition_expr = self.condition_expr.subs(*args, **kwargs)
 
     @property
     def args(self):
-        result = [self.conditionExpr, self.trueBlock]
-        if self.falseBlock:
-            result.append(self.falseBlock)
+        result = [self.condition_expr, self.true_block]
+        if self.false_block:
+            result.append(self.false_block)
         return result
 
     @property
@@ -96,17 +96,17 @@ class Conditional(Node):
 
     @property
     def undefined_symbols(self):
-        result = self.trueBlock.undefined_symbols
-        if self.falseBlock:
-            result.update(self.falseBlock.undefined_symbols)
-        result.update(self.conditionExpr.atoms(sp.Symbol))
+        result = self.true_block.undefined_symbols
+        if self.false_block:
+            result.update(self.false_block.undefined_symbols)
+        result.update(self.condition_expr.atoms(sp.Symbol))
         return result
 
     def __str__(self):
-        return 'if:({!s}) '.format(self.conditionExpr)
+        return 'if:({!s}) '.format(self.condition_expr)
 
     def __repr__(self):
-        return 'if:({!r}) '.format(self.conditionExpr)
+        return 'if:({!r}) '.format(self.condition_expr)
 
 
 class KernelFunction(Node):
@@ -116,39 +116,39 @@ class KernelFunction(Node):
             from pystencils.transformations import symbol_name_to_variable_name
             self.name = name
             self.dtype = dtype
-            self.isFieldPtrArgument = False
-            self.isFieldShapeArgument = False
-            self.isFieldStrideArgument = False
-            self.isFieldArgument = False
+            self.is_field_ptr_argument = False
+            self.is_field_shape_argument = False
+            self.is_field_stride_argument = False
+            self.is_field_argument = False
             self.field_name = ""
             self.coordinate = None
             self.symbol = symbol
 
             if name.startswith(Field.DATA_PREFIX):
-                self.isFieldPtrArgument = True
-                self.isFieldArgument = True
+                self.is_field_ptr_argument = True
+                self.is_field_argument = True
                 self.field_name = name[len(Field.DATA_PREFIX):]
             elif name.startswith(Field.SHAPE_PREFIX):
-                self.isFieldShapeArgument = True
-                self.isFieldArgument = True
+                self.is_field_shape_argument = True
+                self.is_field_argument = True
                 self.field_name = name[len(Field.SHAPE_PREFIX):]
             elif name.startswith(Field.STRIDE_PREFIX):
-                self.isFieldStrideArgument = True
-                self.isFieldArgument = True
+                self.is_field_stride_argument = True
+                self.is_field_argument = True
                 self.field_name = name[len(Field.STRIDE_PREFIX):]
 
             self.field = None
-            if self.isFieldArgument:
+            if self.is_field_argument:
                 field_map = {symbol_name_to_variable_name(f.name): f for f in kernel_function_node.fields_accessed}
                 self.field = field_map[self.field_name]
 
         def __lt__(self, other):
             def score(l):
-                if l.isFieldPtrArgument:
+                if l.is_field_ptr_argument:
                     return -4
-                elif l.isFieldShapeArgument:
+                elif l.is_field_shape_argument:
                     return -3
-                elif l.isFieldStrideArgument:
+                elif l.is_field_stride_argument:
                     return -2
                 return 0
 
@@ -298,12 +298,12 @@ class Block(Node):
 class PragmaBlock(Block):
     def __init__(self, pragma_line, nodes):
         super(PragmaBlock, self).__init__(nodes)
-        self.pragmaLine = pragma_line
+        self.pragma_line = pragma_line
         for n in nodes:
             n.parent = self
 
     def __repr__(self):
-        return self.pragmaLine
+        return self.pragma_line
 
 
 class LoopOverCoordinate(Node):
@@ -313,16 +313,16 @@ class LoopOverCoordinate(Node):
         super(LoopOverCoordinate, self).__init__(parent=None)
         self.body = body
         body.parent = self
-        self.coordinateToLoopOver = coordinate_to_loop_over
+        self.coordinate_to_loop_over = coordinate_to_loop_over
         self.start = start
         self.stop = stop
         self.step = step
         self.body.parent = self
-        self.prefixLines = []
+        self.prefix_lines = []
 
     def new_loop_with_different_body(self, new_body):
-        result = LoopOverCoordinate(new_body, self.coordinateToLoopOver, self.start, self.stop, self.step)
-        result.prefixLines = [l for l in self.prefixLines]
+        result = LoopOverCoordinate(new_body, self.coordinate_to_loop_over, self.start, self.stop, self.step)
+        result.prefix_lines = [l for l in self.prefix_lines]
         return result
 
     def subs(self, *args, **kwargs):
@@ -359,9 +359,9 @@ class LoopOverCoordinate(Node):
     @property
     def undefined_symbols(self):
         result = self.body.undefined_symbols
-        for possibleSymbol in [self.start, self.stop, self.step]:
-            if isinstance(possibleSymbol, Node) or isinstance(possibleSymbol, sp.Basic):
-                result.update(possibleSymbol.atoms(sp.Symbol))
+        for possible_symbol in [self.start, self.stop, self.step]:
+            if isinstance(possible_symbol, Node) or isinstance(possible_symbol, sp.Basic):
+                result.update(possible_symbol.atoms(sp.Symbol))
         return result - {self.loop_counter_symbol}
 
     @staticmethod
@@ -370,7 +370,7 @@ class LoopOverCoordinate(Node):
 
     @property
     def loop_counter_name(self):
-        return LoopOverCoordinate.get_loop_counter_name(self.coordinateToLoopOver)
+        return LoopOverCoordinate.get_loop_counter_name(self.coordinate_to_loop_over)
 
     @staticmethod
     def is_loop_counter_symbol(symbol):
@@ -388,7 +388,7 @@ class LoopOverCoordinate(Node):
 
     @property
     def loop_counter_symbol(self):
-        return LoopOverCoordinate.get_loop_counter_symbol(self.coordinateToLoopOver)
+        return LoopOverCoordinate.get_loop_counter_symbol(self.coordinate_to_loop_over)
 
     @property
     def is_outermost_loop(self):
@@ -414,25 +414,25 @@ class LoopOverCoordinate(Node):
 class SympyAssignment(Node):
     def __init__(self, lhs_symbol, rhs_expr, is_const=True):
         super(SympyAssignment, self).__init__(parent=None)
-        self._lhsSymbol = lhs_symbol
+        self._lhs_symbol = lhs_symbol
         self.rhs = rhs_expr
-        self._isDeclaration = True
-        is_cast = self._lhsSymbol.func == cast_func
-        if isinstance(self._lhsSymbol, Field.Access) or isinstance(self._lhsSymbol, ResolvedFieldAccess) or is_cast:
-            self._isDeclaration = False
-        self._isConst = is_const
+        self._is_declaration = True
+        is_cast = self._lhs_symbol.func == cast_func
+        if isinstance(self._lhs_symbol, Field.Access) or isinstance(self._lhs_symbol, ResolvedFieldAccess) or is_cast:
+            self._is_declaration = False
+        self._is_const = is_const
 
     @property
     def lhs(self):
-        return self._lhsSymbol
+        return self._lhs_symbol
 
     @lhs.setter
     def lhs(self, new_value):
-        self._lhsSymbol = new_value
-        self._isDeclaration = True
-        is_cast = self._lhsSymbol.func == cast_func
-        if isinstance(self._lhsSymbol, Field.Access) or isinstance(self._lhsSymbol, sp.Indexed) or is_cast:
-            self._isDeclaration = False
+        self._lhs_symbol = new_value
+        self._is_declaration = True
+        is_cast = self._lhs_symbol.func == cast_func
+        if isinstance(self._lhs_symbol, Field.Access) or isinstance(self._lhs_symbol, sp.Indexed) or is_cast:
+            self._is_declaration = False
 
     def subs(self, *args, **kwargs):
         self.lhs = fast_subs(self.lhs, *args, **kwargs)
@@ -440,13 +440,13 @@ class SympyAssignment(Node):
 
     @property
     def args(self):
-        return [self._lhsSymbol, self.rhs]
+        return [self._lhs_symbol, self.rhs]
 
     @property
     def symbols_defined(self):
-        if not self._isDeclaration:
+        if not self._is_declaration:
             return set()
-        return {self._lhsSymbol}
+        return {self._lhs_symbol}
 
     @property
     def undefined_symbols(self):
@@ -458,16 +458,16 @@ class SympyAssignment(Node):
                 for i in range(len(symbol.offsets)):
                     loop_counters.add(LoopOverCoordinate.get_loop_counter_symbol(i))
         result.update(loop_counters)
-        result.update(self._lhsSymbol.atoms(sp.Symbol))
+        result.update(self._lhs_symbol.atoms(sp.Symbol))
         return result
 
     @property
     def is_declaration(self):
-        return self._isDeclaration
+        return self._is_declaration
 
     @property
     def is_const(self):
-        return self._isConst
+        return self._is_const
 
     def replace(self, child, replacement):
         if child == self.lhs:
@@ -495,24 +495,24 @@ class ResolvedFieldAccess(sp.Indexed):
         obj = super(ResolvedFieldAccess, cls).__new__(cls, base, linearized_index)
         obj.field = field
         obj.offsets = offsets
-        obj.idxCoordinateValues = idx_coordinate_values
+        obj.idx_coordinate_values = idx_coordinate_values
         return obj
 
     def _eval_subs(self, old, new):
         return ResolvedFieldAccess(self.args[0],
                                    self.args[1].subs(old, new),
-                                   self.field, self.offsets, self.idxCoordinateValues)
+                                   self.field, self.offsets, self.idx_coordinate_values)
 
     def fast_subs(self, substitutions):
         if self in substitutions:
             return substitutions[self]
         return ResolvedFieldAccess(self.args[0].subs(substitutions),
                                    self.args[1].subs(substitutions),
-                                   self.field, self.offsets, self.idxCoordinateValues)
+                                   self.field, self.offsets, self.idx_coordinate_values)
 
     def _hashable_content(self):
         super_class_contents = super(ResolvedFieldAccess, self)._hashable_content()
-        return super_class_contents + tuple(self.offsets) + (repr(self.idxCoordinateValues), hash(self.field))
+        return super_class_contents + tuple(self.offsets) + (repr(self.idx_coordinate_values), hash(self.field))
 
     @property
     def typed_symbol(self):
@@ -523,7 +523,7 @@ class ResolvedFieldAccess(sp.Indexed):
         return "%s (%s)" % (top, self.typed_symbol.dtype)
 
     def __getnewargs__(self):
-        return self.base, self.indices[0], self.field, self.offsets, self.idxCoordinateValues
+        return self.base, self.indices[0], self.field, self.offsets, self.idx_coordinate_values
 
 
 class TemporaryMemoryAllocation(Node):
diff --git a/backends/__init__.py b/backends/__init__.py
index 981703e589d8187bd3280c037863a056fe0c7d00..6e03c03838000454479ccbdb0bdad4351df8d8d6 100644
--- a/backends/__init__.py
+++ b/backends/__init__.py
@@ -2,6 +2,6 @@ from .cbackend import generate_c
 
 try:
     from .dot import print_dot
-    from .llvm import generateLLVM
+    from .llvm import generate_llvm
 except ImportError:
     pass
diff --git a/backends/cbackend.py b/backends/cbackend.py
index 64c863fb41d2a63badf1cf234b01cd1b9d28efa2..998d86d54cc9f43fb4cbb53d6fe9ffabbf09d081 100644
--- a/backends/cbackend.py
+++ b/backends/cbackend.py
@@ -11,7 +11,7 @@ except ImportError:
 from pystencils.bitoperations import bitwise_xor, bit_shift_right, bit_shift_left, bitwise_and, bitwise_or
 from pystencils.astnodes import Node, ResolvedFieldAccess, SympyAssignment
 from pystencils.data_types import create_type, PointerType, get_type_of_expression, VectorType, cast_func
-from pystencils.backends.simd_instruction_sets import selectedInstructionSet
+from pystencils.backends.simd_instruction_sets import selected_instruction_set
 
 __all__ = ['generate_c', 'CustomCppCode', 'PrintNode', 'get_headers']
 
@@ -36,7 +36,7 @@ def generate_c(ast_node: Node, signature_only: bool = False, use_float_constants
         double = create_type('double')
         use_float_constants = double not in field_types
 
-    vector_is = selectedInstructionSet['double']
+    vector_is = selected_instruction_set['double']
     printer = CBackend(constants_as_floats=use_float_constants, signature_only=signature_only,
                        vector_instruction_set=vector_is)
     return printer(ast_node)
@@ -50,7 +50,7 @@ def get_headers(ast_node: Node) -> Set[str]:
         headers.update(ast_node.headers)
     elif isinstance(ast_node, SympyAssignment):
         if type(get_type_of_expression(ast_node.rhs)) is VectorType:
-            headers.update(selectedInstructionSet['double']['headers'])
+            headers.update(selected_instruction_set['double']['headers'])
 
     for a in ast_node.args:
         if isinstance(a, Node):
@@ -104,23 +104,23 @@ class CBackend:
     def __init__(self, constants_as_floats=False, sympy_printer=None,
                  signature_only=False, vector_instruction_set=None):
         if sympy_printer is None:
-            self.sympyPrinter = CustomSympyPrinter(constants_as_floats)
+            self.sympy_printer = CustomSympyPrinter(constants_as_floats)
             if vector_instruction_set is not None:
-                self.sympyPrinter = VectorizedCustomSympyPrinter(vector_instruction_set, constants_as_floats)
+                self.sympy_printer = VectorizedCustomSympyPrinter(vector_instruction_set, constants_as_floats)
             else:
-                self.sympyPrinter = CustomSympyPrinter(constants_as_floats)
+                self.sympy_printer = CustomSympyPrinter(constants_as_floats)
         else:
-            self.sympyPrinter = sympy_printer
+            self.sympy_printer = sympy_printer
 
         self._vectorInstructionSet = vector_instruction_set
         self._indent = "   "
         self._signatureOnly = signature_only
 
     def __call__(self, node):
-        prev_is = VectorType.instructionSet
-        VectorType.instructionSet = self._vectorInstructionSet
+        prev_is = VectorType.instruction_set
+        VectorType.instruction_set = self._vectorInstructionSet
         result = str(self._print(node))
-        VectorType.instructionSet = prev_is
+        VectorType.instruction_set = prev_is
         return result
 
     def _print(self, node):
@@ -144,49 +144,49 @@ class CBackend:
         return "{\n%s\n}" % (self._indent + self._indent.join(block_contents.splitlines(True)))
 
     def _print_PragmaBlock(self, node):
-        return "%s\n%s" % (node.pragmaLine, self._print_Block(node))
+        return "%s\n%s" % (node.pragma_line, self._print_Block(node))
 
     def _print_LoopOverCoordinate(self, node):
         counter_symbol = node.loop_counter_name
-        start = "int %s = %s" % (counter_symbol, self.sympyPrinter.doprint(node.start))
-        condition = "%s < %s" % (counter_symbol, self.sympyPrinter.doprint(node.stop))
-        update = "%s += %s" % (counter_symbol, self.sympyPrinter.doprint(node.step),)
-        loopStr = "for (%s; %s; %s)" % (start, condition, update)
+        start = "int %s = %s" % (counter_symbol, self.sympy_printer.doprint(node.start))
+        condition = "%s < %s" % (counter_symbol, self.sympy_printer.doprint(node.stop))
+        update = "%s += %s" % (counter_symbol, self.sympy_printer.doprint(node.step),)
+        loop_str = "for (%s; %s; %s)" % (start, condition, update)
 
-        prefix = "\n".join(node.prefixLines)
+        prefix = "\n".join(node.prefix_lines)
         if prefix:
             prefix += "\n"
-        return "%s%s\n%s" % (prefix, loopStr, self._print(node.body))
+        return "%s%s\n%s" % (prefix, loop_str, self._print(node.body))
 
     def _print_SympyAssignment(self, node):
         if node.is_declaration:
             data_type = "const " + str(node.lhs.dtype) + " " if node.is_const else str(node.lhs.dtype) + " "
-            return "%s %s = %s;" % (data_type, self.sympyPrinter.doprint(node.lhs), self.sympyPrinter.doprint(node.rhs))
+            return "%s %s = %s;" % (data_type, self.sympy_printer.doprint(node.lhs), self.sympy_printer.doprint(node.rhs))
         else:
             lhs_type = get_type_of_expression(node.lhs)
             if type(lhs_type) is VectorType and node.lhs.func == cast_func:
-                return self._vectorInstructionSet['storeU'].format("&" + self.sympyPrinter.doprint(node.lhs.args[0]),
-                                                                   self.sympyPrinter.doprint(node.rhs)) + ';'
+                return self._vectorInstructionSet['storeU'].format("&" + self.sympy_printer.doprint(node.lhs.args[0]),
+                                                                   self.sympy_printer.doprint(node.rhs)) + ';'
             else:
-                return "%s = %s;" % (self.sympyPrinter.doprint(node.lhs), self.sympyPrinter.doprint(node.rhs))
+                return "%s = %s;" % (self.sympy_printer.doprint(node.lhs), self.sympy_printer.doprint(node.rhs))
 
     def _print_TemporaryMemoryAllocation(self, node):
-        return "%s %s = new %s[%s];" % (node.symbol.dtype, self.sympyPrinter.doprint(node.symbol.name),
-                                        node.symbol.dtype.base_type, self.sympyPrinter.doprint(node.size))
+        return "%s %s = new %s[%s];" % (node.symbol.dtype, self.sympy_printer.doprint(node.symbol.name),
+                                        node.symbol.dtype.base_type, self.sympy_printer.doprint(node.size))
 
     def _print_TemporaryMemoryFree(self, node):
-        return "delete [] %s;" % (self.sympyPrinter.doprint(node.symbol.name),)
+        return "delete [] %s;" % (self.sympy_printer.doprint(node.symbol.name),)
 
     @staticmethod
     def _print_CustomCppCode(node):
         return node.code
 
     def _print_Conditional(self, node):
-        condition_expr = self.sympyPrinter.doprint(node.conditionExpr)
-        true_block = self._print_Block(node.trueBlock)
+        condition_expr = self.sympy_printer.doprint(node.condition_expr)
+        true_block = self._print_Block(node.true_block)
         result = "if (%s)\n%s " % (condition_expr, true_block)
-        if node.falseBlock:
-            false_block = self._print_Block(node.falseBlock)
+        if node.false_block:
+            false_block = self._print_Block(node.false_block)
             result += "else " + false_block
         return result
 
@@ -253,14 +253,14 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
 
     def __init__(self, instruction_set, constants_as_floats=False):
         super(VectorizedCustomSympyPrinter, self).__init__(constants_as_floats)
-        self.instructionSet = instruction_set
+        self.instruction_set = instruction_set
 
     def _scalarFallback(self, func_name, expr, *args, **kwargs):
         expr_type = get_type_of_expression(expr)
         if type(expr_type) is not VectorType:
             return getattr(super(VectorizedCustomSympyPrinter, self), func_name)(expr, *args, **kwargs)
         else:
-            assert self.instructionSet['width'] == expr_type.width
+            assert self.instruction_set['width'] == expr_type.width
             return None
 
     def _print_Function(self, expr):
@@ -268,9 +268,9 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
             arg, data_type = expr.args
             if type(data_type) is VectorType:
                 if type(arg) is ResolvedFieldAccess:
-                    return self.instructionSet['loadU'].format("& " + self._print(arg))
+                    return self.instruction_set['loadU'].format("& " + self._print(arg))
                 else:
-                    return self.instructionSet['makeVec'].format(self._print(arg))
+                    return self.instruction_set['makeVec'].format(self._print(arg))
 
         return super(VectorizedCustomSympyPrinter, self)._print_Function(expr)
 
@@ -283,7 +283,7 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
         assert len(arg_strings) > 0
         result = arg_strings[0]
         for item in arg_strings[1:]:
-            result = self.instructionSet['&'].format(result, item)
+            result = self.instruction_set['&'].format(result, item)
         return result
 
     def _print_Or(self, expr):
@@ -295,7 +295,7 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
         assert len(arg_strings) > 0
         result = arg_strings[0]
         for item in arg_strings[1:]:
-            result = self.instructionSet['|'].format(result, item)
+            result = self.instruction_set['|'].format(result, item)
         return result
 
     def _print_Add(self, expr, order=None):
@@ -320,7 +320,7 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
         assert len(summands) >= 2
         processed = summands[0].term
         for summand in summands[1:]:
-            func = self.instructionSet['-'] if summand.sign == -1 else self.instructionSet['+']
+            func = self.instruction_set['-'] if summand.sign == -1 else self.instruction_set['+']
             processed = func.format(processed, summand.term)
         return processed
 
@@ -333,10 +333,10 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
             return "(" + self._print(sp.Mul(*[expr.base] * expr.exp, evaluate=False)) + ")"
         else:
             if expr.exp == -1:
-                one = self.instructionSet['makeVec'].format(1.0)
-                return self.instructionSet['/'].format(one, self._print(expr.base))
+                one = self.instruction_set['makeVec'].format(1.0)
+                return self.instruction_set['/'].format(one, self._print(expr.base))
             elif expr.exp == 0.5:
-                return self.instructionSet['sqrt'].format(self._print(expr.base))
+                return self.instruction_set['sqrt'].format(self._print(expr.base))
             else:
                 raise ValueError("Generic exponential not supported")
 
@@ -369,26 +369,26 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
                 a.append(item)
 
         a = a or [S.One]
-        # a = a or [castFunc(S.One, VectorType(createTypeFromString("double"), exprType.width))]
+        # a = a or [cast_func(S.One, VectorType(create_type_from_string("double"), expr_type.width))]
 
         a_str = [self._print(x) for x in a]
         b_str = [self._print(x) for x in b]
 
         result = a_str[0]
         for item in a_str[1:]:
-            result = self.instructionSet['*'].format(result, item)
+            result = self.instruction_set['*'].format(result, item)
 
         if len(b) > 0:
             denominator_str = b_str[0]
             for item in b_str[1:]:
-                denominator_str = self.instructionSet['*'].format(denominator_str, item)
-            result = self.instructionSet['/'].format(result, denominator_str)
+                denominator_str = self.instruction_set['*'].format(denominator_str, item)
+            result = self.instruction_set['/'].format(result, denominator_str)
 
         if inside_add:
             return sign, result
         else:
             if sign < 0:
-                return self.instructionSet['*'].format(self._print(S.NegativeOne), result)
+                return self.instruction_set['*'].format(self._print(S.NegativeOne), result)
             else:
                 return result
 
@@ -396,13 +396,13 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
         result = self._scalarFallback('_print_Relational', expr)
         if result:
             return result
-        return self.instructionSet[expr.rel_op].format(self._print(expr.lhs), self._print(expr.rhs))
+        return self.instruction_set[expr.rel_op].format(self._print(expr.lhs), self._print(expr.rhs))
 
     def _print_Equality(self, expr):
         result = self._scalarFallback('_print_Equality', expr)
         if result:
             return result
-        return self.instructionSet['=='].format(self._print(expr.lhs), self._print(expr.rhs))
+        return self.instruction_set['=='].format(self._print(expr.lhs), self._print(expr.rhs))
 
     def _print_Piecewise(self, expr):
         result = self._scalarFallback('_print_Piecewise', expr)
@@ -419,7 +419,7 @@ class VectorizedCustomSympyPrinter(CustomSympyPrinter):
                              "some condition.")
 
         result = self._print(expr.args[-1][0])
-        for trueExpr, condition in reversed(expr.args[:-1]):
+        for true_expr, condition in reversed(expr.args[:-1]):
             # noinspection SpellCheckingInspection
-            result = self.instructionSet['blendv'].format(result, self._print(trueExpr), self._print(condition))
+            result = self.instruction_set['blendv'].format(result, self._print(true_expr), self._print(condition))
         return result
diff --git a/backends/dot.py b/backends/dot.py
index d74b94f2603ddfaf974d5705cdaf1f6da449b1e3..dccc6ac5dc369325c83040433bbc2dc19be3d20e 100644
--- a/backends/dot.py
+++ b/backends/dot.py
@@ -10,18 +10,18 @@ class DotPrinter(Printer):
     """
     def __init__(self, node_to_str_function, full, **kwargs):
         super(DotPrinter, self).__init__()
-        self._nodeToStrFunction = node_to_str_function
+        self._node_to_str_function = node_to_str_function
         self.full = full
         self.dot = Digraph(**kwargs)
         self.dot.quote_edge = lang.quote
 
     def _print_KernelFunction(self, func):
-        self.dot.node(str(id(func)), style='filled', fillcolor='#a056db', label=self._nodeToStrFunction(func))
+        self.dot.node(str(id(func)), style='filled', fillcolor='#a056db', label=self._node_to_str_function(func))
         self._print(func.body)
         self.dot.edge(str(id(func)), str(id(func.body)))
 
     def _print_LoopOverCoordinate(self, loop):
-        self.dot.node(str(id(loop)), style='filled', fillcolor='#3498db', label=self._nodeToStrFunction(loop))
+        self.dot.node(str(id(loop)), style='filled', fillcolor='#3498db', label=self._node_to_str_function(loop))
         self._print(loop.body)
         self.dot.edge(str(id(loop)), str(id(loop.body)))
 
@@ -35,7 +35,7 @@ class DotPrinter(Printer):
 
     def _print_SympyAssignment(self, assignment):
         self.dot.node(str(id(assignment)), style='filled', fillcolor='#56db7f',
-                      label=self._nodeToStrFunction(assignment))
+                      label=self._node_to_str_function(assignment))
         if self.full:
             for node in assignment.args:
                 self._print(node)
@@ -43,16 +43,16 @@ class DotPrinter(Printer):
                 self.dot.edge(str(id(assignment)), str(id(node)))
 
     def _print_Conditional(self, expr):
-        self.dot.node(str(id(expr)), style='filled', fillcolor='#56bd7f', label=self._nodeToStrFunction(expr))
-        self._print(expr.trueBlock)
-        self.dot.edge(str(id(expr)), str(id(expr.trueBlock)))
-        if expr.falseBlock:
-            self._print(expr.falseBlock)
-            self.dot.edge(str(id(expr)), str(id(expr.falseBlock)))
+        self.dot.node(str(id(expr)), style='filled', fillcolor='#56bd7f', label=self._node_to_str_function(expr))
+        self._print(expr.true_block)
+        self.dot.edge(str(id(expr)), str(id(expr.true_block)))
+        if expr.false_block:
+            self._print(expr.false_block)
+            self.dot.edge(str(id(expr)), str(id(expr.false_block)))
 
     def empty_printer(self, expr):
         if self.full:
-            self.dot.node(str(id(expr)), label=self._nodeToStrFunction(expr))
+            self.dot.node(str(id(expr)), label=self._node_to_str_function(expr))
             for node in expr.args:
                 self._print(node)
             for node in expr.args:
@@ -68,10 +68,10 @@ class DotPrinter(Printer):
 def __shortened(node):
     from pystencils.astnodes import LoopOverCoordinate, KernelFunction, SympyAssignment, Block, Conditional
     if isinstance(node, LoopOverCoordinate):
-        return "Loop over dim %d" % (node.coordinateToLoopOver,)
+        return "Loop over dim %d" % (node.coordinate_to_loop_over,)
     elif isinstance(node, KernelFunction):
         params = [f.name for f in node.fields_accessed]
-        params += [p.name for p in node.parameters if not p.isFieldArgument]
+        params += [p.name for p in node.parameters if not p.is_field_argument]
         return "Func: %s (%s)" % (node.function_name, ",".join(params))
     elif isinstance(node, SympyAssignment):
         return repr(node.lhs)
diff --git a/backends/simd_instruction_sets.py b/backends/simd_instruction_sets.py
index 0949214545ccd0e76756e4882db147624ff8a371..11887299559bb81e0b878df794e135bd815e4e98 100644
--- a/backends/simd_instruction_sets.py
+++ b/backends/simd_instruction_sets.py
@@ -56,7 +56,7 @@ def x86_vector_instruction_set(data_type='double', instruction_set='avx'):
     result = {}
     pre = prefix[instruction_set]
     suf = suffix[data_type]
-    for intrinsicId, function_shortcut in base_names.items():
+    for intrinsic_id, function_shortcut in base_names.items():
         function_shortcut = function_shortcut.strip()
         name = function_shortcut[:function_shortcut.index('[')]
         args = function_shortcut[function_shortcut.index('[') + 1: -1]
@@ -70,7 +70,7 @@ def x86_vector_instruction_set(data_type='double', instruction_set='avx'):
             else:
                 arg_string += arg + ","
         arg_string = arg_string[:-1] + ")"
-        result[intrinsicId] = pre + "_" + name + "_" + suf + arg_string
+        result[intrinsic_id] = pre + "_" + name + "_" + suf + arg_string
 
     result['width'] = width[(data_type, instruction_set)]
     result['dataTypePrefix'] = {
@@ -88,7 +88,7 @@ def x86_vector_instruction_set(data_type='double', instruction_set='avx'):
     return result
 
 
-selectedInstructionSet = {
+selected_instruction_set = {
     'float': x86_vector_instruction_set('float', 'avx'),
     'double': x86_vector_instruction_set('double', 'avx'),
 }
diff --git a/boundaries/boundaryconditions.py b/boundaries/boundaryconditions.py
index 87d953aa51e03d7f390ccd12056b6c20c0f4d623..563940b30324271ce805d0656cee102ce1ee7a99 100644
--- a/boundaries/boundaryconditions.py
+++ b/boundaries/boundaryconditions.py
@@ -18,7 +18,7 @@ class Boundary(object):
             field: pystencils field where boundary condition should be applied.
                    The current cell is cell next to the boundary, which is influenced by the boundary
                    cell i.e. has a link from the boundary cell to itself.
-            direction_symbol: a sympy symbol that can be used as index to the pdfField. It describes
+            direction_symbol: a sympy symbol that can be used as index to the pdf_field. It describes
                               the direction pointing from the fluid to the boundary cell
             index_field: the boundary index field that can be used to retrieve and update boundary data
         """
@@ -27,8 +27,8 @@ class Boundary(object):
     @property
     def additional_data(self) -> Tuple[str, Any]:
         """Return a list of (name, type) tuples for additional data items required in this boundary
-        These data items can either be initialized in separate kernel see additionalDataKernelInit or by
-        Python callbacks - see additionalDataCallback """
+        These data items can either be initialized in separate kernel see additional_data_kernel_init or by
+        Python callbacks - see additional_data_callback """
         return []
 
     @property
diff --git a/boundaries/boundaryhandling.py b/boundaries/boundaryhandling.py
index d0e669bf6f8413eb2d1905b53a03c4f3fbc7ee39..885b1b350ec77a6d6f1453e32e514bc5da295e53 100644
--- a/boundaries/boundaryhandling.py
+++ b/boundaries/boundaryhandling.py
@@ -3,7 +3,7 @@ import sympy as sp
 from pystencils.assignment import Assignment
 from pystencils import Field, TypedSymbol, create_indexed_kernel
 from pystencils.backends.cbackend import CustomCppCode
-from pystencils.boundaries.createindexlist import numpyDataTypeForBoundaryObject, createBoundaryIndexArray
+from pystencils.boundaries.createindexlist import numpy_data_type_for_boundary_object, create_boundary_index_array
 from pystencils.cache import memorycache
 from pystencils.data_types import create_type
 
@@ -69,14 +69,14 @@ class BoundaryHandling:
 
     @property
     def boundary_objects(self):
-        return tuple(self._boundaryObjectToName.keys())
+        return tuple(self._boundary_objectToName.keys())
 
     @property
     def flag_array_name(self):
         return self.flag_interface.flag_field_name
 
     def get_boundary_name_to_flag_dict(self):
-        result = {bObj.name: bInfo.flag for bObj, bInfo in self._boundary_object_to_boundary_info.items()}
+        result = {b_obj.name: b_info.flag for b_obj, b_info in self._boundary_object_to_boundary_info.items()}
         result['domain'] = self.flag_interface.domain_flag
         return result
 
@@ -103,8 +103,8 @@ class BoundaryHandling:
         :param boundary_obj: instance of a boundary object that should be set
         :param slice_obj: a slice object (can be created with make_slice[]) that selects a part of the domain where
                           the boundary should be set. If none, the complete domain is selected which makes only sense
-                          if a maskCallback is passed. The slice can have ':' placeholders, which are interpreted
-                          depending on the 'includeGhostLayers' parameter i.e. if it is True, the slice extends
+                          if a mask_callback is passed. The slice can have ':' placeholders, which are interpreted
+                          depending on the 'inner_ghost_layers' parameter i.e. if it is True, the slice extends
                           into the ghost layers
         :param mask_callback: callback function getting x,y (z) parameters of the cell midpoints and returning a
                              boolean mask with True entries where boundary cells should be set.
@@ -155,23 +155,23 @@ class BoundaryHandling:
         else:
             ff_ghost_layers = self._data_handling.ghost_layers_of_field(self.flag_interface.flag_field_name)
             for b in self._data_handling.iterate(ghost_layers=ff_ghost_layers):
-                for bObj, setter in b[self._index_array_name].boundaryObjectToDataSetter.items():
-                    self._boundary_data_initialization(bObj, setter, **kwargs)
+                for b_obj, setter in b[self._index_array_name].boundary_objectToDataSetter.items():
+                    self._boundary_data_initialization(b_obj, setter, **kwargs)
 
     def __call__(self, **kwargs):
         if self._dirty:
             self.prepare()
 
         for b in self._data_handling.iterate(gpu=self._target == 'gpu'):
-            for bObj, idxArr in b[self._index_array_name].boundary_object_to_index_list.items():
+            for b_obj, idx_arr in b[self._index_array_name].boundary_object_to_index_list.items():
                 kwargs[self._field_name] = b[self._field_name]
-                kwargs['indexField'] = idxArr
+                kwargs['indexField'] = idx_arr
                 data_used_in_kernel = (p.field_name
-                                       for p in self._boundary_object_to_boundary_info[bObj].kernel.parameters
-                                       if p.isFieldPtrArgument and p.field_name not in kwargs)
+                                       for p in self._boundary_object_to_boundary_info[b_obj].kernel.parameters
+                                       if p.is_field_ptr_argument and p.field_name not in kwargs)
                 kwargs.update({name: b[name] for name in data_used_in_kernel})
 
-                self._boundary_object_to_boundary_info[bObj].kernel(**kwargs)
+                self._boundary_object_to_boundary_info[b_obj].kernel(**kwargs)
 
     def geometry_to_vtk(self, file_name='geometry', boundaries='all', ghost_layers=False):
         """
@@ -204,7 +204,7 @@ class BoundaryHandling:
     def _add_boundary(self, boundary_obj, flag=None):
         if boundary_obj not in self._boundary_object_to_boundary_info:
             symbolic_index_field = Field.create_generic('indexField', spatial_dimensions=1,
-                                                        dtype=numpyDataTypeForBoundaryObject(boundary_obj, self.dim))
+                                                        dtype=numpy_data_type_for_boundary_object(boundary_obj, self.dim))
             ast = self._create_boundary_kernel(self._data_handling.fields[self._field_name],
                                                symbolic_index_field, boundary_obj)
             if flag is None:
@@ -225,16 +225,17 @@ class BoundaryHandling:
             pdf_arr = b[self._field_name]
             index_array_bd = b[self._index_array_name]
             index_array_bd.clear()
-            for bInfo in self._boundary_object_to_boundary_info.values():
-                idxArr = createBoundaryIndexArray(flag_arr, self.stencil, bInfo.flag, self.flag_interface.domain_flag,
-                                                  bInfo.boundaryObject, ff_ghost_layers)
-                if idxArr.size == 0:
+            for b_info in self._boundary_object_to_boundary_info.values():
+                idx_arr = create_boundary_index_array(flag_arr, self.stencil, b_info.flag,
+                                                      self.flag_interface.domain_flag, b_info.boundary_object,
+                                                      ff_ghost_layers)
+                if idx_arr.size == 0:
                     continue
 
-                boundary_data_setter = BoundaryDataSetter(idxArr, b.offset, self.stencil, ff_ghost_layers, pdf_arr)
-                index_array_bd.boundary_object_to_index_list[bInfo.boundaryObject] = idxArr
-                index_array_bd.boundaryObjectToDataSetter[bInfo.boundaryObject] = boundary_data_setter
-                self._boundary_data_initialization(bInfo.boundaryObject, boundary_data_setter)
+                boundary_data_setter = BoundaryDataSetter(idx_arr, b.offset, self.stencil, ff_ghost_layers, pdf_arr)
+                index_array_bd.boundary_object_to_index_list[b_info.boundary_object] = idx_arr
+                index_array_bd.boundary_objectToDataSetter[b_info.boundary_object] = boundary_data_setter
+                self._boundary_data_initialization(b_info.boundary_object, boundary_data_setter)
 
     def _boundary_data_initialization(self, boundary_obj, boundary_data_setter, **kwargs):
         if boundary_obj.additional_data_init_callback:
@@ -244,42 +245,42 @@ class BoundaryHandling:
 
     class BoundaryInfo(object):
         def __init__(self, boundary_obj, flag, kernel):
-            self.boundaryObject = boundary_obj
+            self.boundary_object = boundary_obj
             self.flag = flag
             self.kernel = kernel
 
     class IndexFieldBlockData:
         def __init__(self, *args, **kwargs):
             self.boundary_object_to_index_list = {}
-            self.boundaryObjectToDataSetter = {}
+            self.boundary_objectToDataSetter = {}
 
         def clear(self):
             self.boundary_object_to_index_list.clear()
-            self.boundaryObjectToDataSetter.clear()
+            self.boundary_objectToDataSetter.clear()
 
         @staticmethod
         def to_cpu(gpu_version, cpu_version):
             gpu_version = gpu_version.boundary_object_to_index_list
             cpu_version = cpu_version.boundary_object_to_index_list
-            for obj, cpuArr in cpu_version.values():
-                gpu_version[obj].get(cpuArr)
+            for obj, cpu_arr in cpu_version.values():
+                gpu_version[obj].get(cpu_arr)
 
         @staticmethod
         def to_gpu(gpu_version, cpu_version):
             from pycuda import gpuarray
             gpu_version = gpu_version.boundary_object_to_index_list
             cpu_version = cpu_version.boundary_object_to_index_list
-            for obj, cpuArr in cpu_version.items():
+            for obj, cpu_arr in cpu_version.items():
                 if obj not in gpu_version:
-                    gpu_version[obj] = gpuarray.to_gpu(cpuArr)
+                    gpu_version[obj] = gpuarray.to_gpu(cpu_arr)
                 else:
-                    gpu_version[obj].set(cpuArr)
+                    gpu_version[obj].set(cpu_arr)
 
 
 class BoundaryDataSetter:
 
     def __init__(self, index_array, offset, stencil, ghost_layers, pdf_array):
-        self.indexArray = index_array
+        self.index_array = index_array
         self.offset = offset
         self.stencil = np.array(stencil)
         self.pdf_array = pdf_array.view()
@@ -288,17 +289,17 @@ class BoundaryDataSetter:
         arr_field_names = index_array.dtype.names
         self.dim = 3 if 'z' in arr_field_names else 2
         assert 'x' in arr_field_names and 'y' in arr_field_names and 'dir' in arr_field_names, str(arr_field_names)
-        self.boundary_data_names = set(self.indexArray.dtype.names) - set(['x', 'y', 'z', 'dir'])
+        self.boundary_data_names = set(self.index_array.dtype.names) - set(['x', 'y', 'z', 'dir'])
         self.coord_map = {0: 'x', 1: 'y', 2: 'z'}
         self.ghost_layers = ghost_layers
 
     def non_boundary_cell_positions(self, coord):
         assert coord < self.dim
-        return self.indexArray[self.coord_map[coord]] + self.offset[coord] - self.ghost_layers + 0.5
+        return self.index_array[self.coord_map[coord]] + self.offset[coord] - self.ghost_layers + 0.5
 
     @memorycache()
     def link_offsets(self):
-        return self.stencil[self.indexArray['dir']]
+        return self.stencil[self.index_array['dir']]
 
     @memorycache()
     def link_positions(self, coord):
@@ -311,12 +312,12 @@ class BoundaryDataSetter:
     def __setitem__(self, key, value):
         if key not in self.boundary_data_names:
             raise KeyError("Invalid boundary data name %s. Allowed are %s" % (key, self.boundary_data_names))
-        self.indexArray[key] = value
+        self.index_array[key] = value
 
     def __getitem__(self, item):
         if item not in self.boundary_data_names:
             raise KeyError("Invalid boundary data name %s. Allowed are %s" % (item, self.boundary_data_names))
-        return self.indexArray[item]
+        return self.index_array[item]
 
 
 class BoundaryOffsetInfo(CustomCppCode):
@@ -365,5 +366,5 @@ def create_boundary_kernel(field, index_field, stencil, boundary_functor, target
     index_arr_dtype = index_field.dtype.numpy_dtype
     dir_symbol = TypedSymbol("dir", index_arr_dtype.fields['dir'][0])
     elements += [Assignment(dir_symbol, index_field[0]('dir'))]
-    elements += boundary_functor(field, directionSymbol=dir_symbol, indexField=index_field)
+    elements += boundary_functor(field, direction_symbol=dir_symbol, index_field=index_field)
     return create_indexed_kernel(elements, [index_field], target=target, cpu_openmp=openmp)
diff --git a/boundaries/createindexlist.py b/boundaries/createindexlist.py
index d1419786268fb968de773abd00f03219e4d52e30..ed417c07a0ebd18916d9f830db04c671e0bf971e 100644
--- a/boundaries/createindexlist.py
+++ b/boundaries/createindexlist.py
@@ -8,72 +8,72 @@ try:
     pyximport.install()
     from pystencils.boundaries.createindexlistcython import create_boundary_index_list_2d, create_boundary_index_list_3d
 
-    cythonFuncsAvailable = True
+    cython_funcs_available = True
 except Exception:
-    cythonFuncsAvailable = False
-    createBoundaryIndexList2D = None
-    createBoundaryIndexList3D = None
+    cython_funcs_available = False
+    create_boundary_index_list_2d = None
+    create_boundary_index_list_3d = None
 
-boundaryIndexArrayCoordinateNames = ["x", "y", "z"]
-directionMemberName = "dir"
+boundary_index_array_coordinate_names = ["x", "y", "z"]
+direction_member_name = "dir"
 
 
-def numpyDataTypeForBoundaryObject(boundaryObject, dim):
-    coordinateNames = boundaryIndexArrayCoordinateNames[:dim]
-    return np.dtype([(name, np.int32) for name in coordinateNames] +
-                    [(directionMemberName, np.int32)] +
-                    [(i[0], i[1].numpy_dtype) for i in boundaryObject.additional_data], align=True)
+def numpy_data_type_for_boundary_object(boundary_object, dim):
+    coordinate_names = boundary_index_array_coordinate_names[:dim]
+    return np.dtype([(name, np.int32) for name in coordinate_names] +
+                    [(direction_member_name, np.int32)] +
+                    [(i[0], i[1].numpy_dtype) for i in boundary_object.additional_data], align=True)
 
 
-def _createBoundaryIndexListPython(flagFieldArr, nrOfGhostLayers, boundaryMask, fluidMask, stencil):
-    coordinateNames = boundaryIndexArrayCoordinateNames[:len(flagFieldArr.shape)]
-    indexArrDtype = np.dtype([(name, np.int32) for name in coordinateNames] + [(directionMemberName, np.int32)])
+def _create_boundary_index_list_python(flag_field_arr, nr_of_ghost_layers, boundary_mask, fluid_mask, stencil):
+    coordinate_names = boundary_index_array_coordinate_names[:len(flag_field_arr.shape)]
+    index_arr_dtype = np.dtype([(name, np.int32) for name in coordinate_names] + [(direction_member_name, np.int32)])
 
     result = []
-    gl = nrOfGhostLayers
-    for cell in itertools.product(*reversed([range(gl, i-gl) for i in flagFieldArr.shape])):
+    gl = nr_of_ghost_layers
+    for cell in itertools.product(*reversed([range(gl, i-gl) for i in flag_field_arr.shape])):
         cell = cell[::-1]
-        if not flagFieldArr[cell] & fluidMask:
+        if not flag_field_arr[cell] & fluid_mask:
             continue
-        for dirIdx, direction in enumerate(stencil):
-            neighborCell = tuple([cell_i + dir_i for cell_i, dir_i in zip(cell, direction)])
-            if flagFieldArr[neighborCell] & boundaryMask:
-                result.append(cell + (dirIdx,))
+        for dir_idx, direction in enumerate(stencil):
+            neighbor_cell = tuple([cell_i + dir_i for cell_i, dir_i in zip(cell, direction)])
+            if flag_field_arr[neighbor_cell] & boundary_mask:
+                result.append(cell + (dir_idx,))
 
-    return np.array(result, dtype=indexArrDtype)
+    return np.array(result, dtype=index_arr_dtype)
 
 
-def createBoundaryIndexList(flagField, stencil, boundaryMask, fluidMask, nrOfGhostLayers=1):
-    dim = len(flagField.shape)
-    coordinateNames = boundaryIndexArrayCoordinateNames[:dim]
-    indexArrDtype = np.dtype([(name, np.int32) for name in coordinateNames] + [(directionMemberName, np.int32)])
+def create_boundary_index_list(flag_field, stencil, boundary_mask, fluid_mask, nr_of_ghost_layers=1):
+    dim = len(flag_field.shape)
+    coordinate_names = boundary_index_array_coordinate_names[:dim]
+    index_arr_dtype = np.dtype([(name, np.int32) for name in coordinate_names] + [(direction_member_name, np.int32)])
 
-    if cythonFuncsAvailable:
+    if cython_funcs_available:
         stencil = np.array(stencil, dtype=np.int32)
         if dim == 2:
-            idxList = create_boundary_index_list_2d(flagField, nrOfGhostLayers, boundaryMask, fluidMask, stencil)
+            idx_list = create_boundary_index_list_2d(flag_field, nr_of_ghost_layers, boundary_mask, fluid_mask, stencil)
         elif dim == 3:
-            idxList = create_boundary_index_list_3d(flagField, nrOfGhostLayers, boundaryMask, fluidMask, stencil)
+            idx_list = create_boundary_index_list_3d(flag_field, nr_of_ghost_layers, boundary_mask, fluid_mask, stencil)
         else:
             raise ValueError("Flag field has to be a 2 or 3 dimensional numpy array")
-        return np.array(idxList, dtype=indexArrDtype)
+        return np.array(idx_list, dtype=index_arr_dtype)
     else:
-        if flagField.size > 1e6:
+        if flag_field.size > 1e6:
             warnings.warn("Boundary setup may take very long! Consider installing cython to speed it up")
-        return _createBoundaryIndexListPython(flagField, nrOfGhostLayers, boundaryMask, fluidMask, stencil)
+        return _create_boundary_index_list_python(flag_field, nr_of_ghost_layers, boundary_mask, fluid_mask, stencil)
 
 
-def createBoundaryIndexArray(flagField, stencil, boundaryMask, fluidMask, boundaryObject, nrOfGhostLayers=1):
-    idxArray = createBoundaryIndexList(flagField, stencil, boundaryMask, fluidMask, nrOfGhostLayers)
-    dim = len(flagField.shape)
+def create_boundary_index_array(flag_field, stencil, boundary_mask, fluid_mask, boundary_object, nr_of_ghost_layers=1):
+    idx_array = create_boundary_index_list(flag_field, stencil, boundary_mask, fluid_mask, nr_of_ghost_layers)
+    dim = len(flag_field.shape)
 
-    if boundaryObject.additional_data:
-        coordinateNames = boundaryIndexArrayCoordinateNames[:dim]
-        indexArrDtype = numpyDataTypeForBoundaryObject(boundaryObject, dim)
-        extendedIdxField = np.empty(len(idxArray), dtype=indexArrDtype)
-        for prop in coordinateNames + ['dir']:
-            extendedIdxField[prop] = idxArray[prop]
+    if boundary_object.additional_data:
+        coordinate_names = boundary_index_array_coordinate_names[:dim]
+        index_arr_dtype = numpy_data_type_for_boundary_object(boundary_object, dim)
+        extended_idx_field = np.empty(len(idx_array), dtype=index_arr_dtype)
+        for prop in coordinate_names + ['dir']:
+            extended_idx_field[prop] = idx_array[prop]
 
-        idxArray = extendedIdxField
+        idx_array = extended_idx_field
 
-    return idxArray
+    return idx_array
diff --git a/boundaries/inkernel.py b/boundaries/inkernel.py
index 90ba36b208972dd934551bd4a02d4f48d8828f24..82cab6384f762ed220686ca2e1d92bc0865c2ad0 100644
--- a/boundaries/inkernel.py
+++ b/boundaries/inkernel.py
@@ -5,16 +5,16 @@ from pystencils.boundaries.boundaryhandling import FlagInterface
 from pystencils.data_types import create_type
 
 
-def add_neumann_boundary(eqs, fields, flag_field, boundary_flag="neumannFlag", inverse_flag=False):
+def add_neumann_boundary(eqs, fields, flag_field, boundary_flag="neumann_flag", inverse_flag=False):
     """
     Replaces all neighbor accesses by flag field guarded accesses.
     If flag in neighboring cell is set, the center value is used instead
     :param eqs: list of equations containing field accesses to direct neighbors
     :param fields: fields for which the Neumann boundary should be applied
     :param flag_field: integer field marking boundary cells
-    :param boundary_flag: if flag field has value 'boundaryFlag' (no bit operations yet)
+    :param boundary_flag: if flag field has value 'boundary_flag' (no bit operations yet)
                           the cell is assumed to be boundary
-    :param inverse_flag: if true, boundary cells are where flag field has not the value of boundaryFlag
+    :param inverse_flag: if true, boundary cells are where flag field has not the value of boundary_flag
     :return: list of equations with guarded field accesses
     """
     if not hasattr(fields, "__len__"):
diff --git a/cache.py b/cache.py
index e10a211efa5556f0aaffd7f1740ea9cb0bbac45a..77526d4d660c52db7e7f5cd0140570e407b1cb9e 100644
--- a/cache.py
+++ b/cache.py
@@ -9,10 +9,10 @@ try:
     from joblib import Memory
     from appdirs import user_cache_dir
     if 'PYSTENCILS_CACHE_DIR' in os.environ:
-        cacheDir = os.environ['PYSTENCILS_CACHE_DIR']
+        cache_dir = os.environ['PYSTENCILS_CACHE_DIR']
     else:
-        cacheDir = user_cache_dir('pystencils')
-    disk_cache = Memory(cachedir=cacheDir, verbose=False).cache
+        cache_dir = user_cache_dir('pystencils')
+    disk_cache = Memory(cachedir=cache_dir, verbose=False).cache
     disk_cache_no_fallback = disk_cache
 except ImportError:
     # fallback to in-memory caching if joblib is not available
diff --git a/cpu/cpujit.py b/cpu/cpujit.py
index 42d5caa10aac83b3b6e71582e722da62955f6646..fcfcc816f516eca68ed57b114c1546c2d437c09f 100644
--- a/cpu/cpujit.py
+++ b/cpu/cpujit.py
@@ -189,8 +189,8 @@ def read_config():
     config_path, config_exists = get_configuration_file_path()
     config = default_config.copy()
     if config_exists:
-        with open(config_path, 'r') as jsonConfigFile:
-            loaded_config = json.load(jsonConfigFile)
+        with open(config_path, 'r') as json_config_file:
+            loaded_config = json.load(json_config_file)
         config = recursive_dict_update(config, loaded_config)
     else:
         create_folder(config_path, True)
@@ -267,15 +267,15 @@ def generate_code(ast, restrict_qualifier, function_prefix, target_file):
     headers = get_headers(ast)
     headers.update(['<cmath>', '<cstdint>'])
 
-    with open(target_file, 'w') as sourceFile:
+    with open(target_file, 'w') as source_file:
         code = generate_c(ast)
-        includes = "\n".join(["#include %s" % (includeFile,) for includeFile in headers])
-        print(includes, file=sourceFile)
-        print("#define RESTRICT %s" % (restrict_qualifier,), file=sourceFile)
-        print("#define FUNC_PREFIX %s" % (function_prefix,), file=sourceFile)
-        print('extern "C" { ', file=sourceFile)
-        print(code, file=sourceFile)
-        print('}', file=sourceFile)
+        includes = "\n".join(["#include %s" % (include_file,) for include_file in headers])
+        print(includes, file=source_file)
+        print("#define RESTRICT %s" % (restrict_qualifier,), file=source_file)
+        print("#define FUNC_PREFIX %s" % (function_prefix,), file=source_file)
+        print('extern "C" { ', file=source_file)
+        print(code, file=source_file)
+        print('}', file=source_file)
 
 
 def run_compile_step(command):
@@ -341,14 +341,14 @@ def compile_and_load(ast):
         return cdll.LoadLibrary(cache_config['shared_library'])[ast.function_name]
     else:
         if get_compiler_config()['os'].lower() == 'windows':
-            libFile = os.path.join(cache_config['object_cache'], code_hash_str + ".dll")
-            if not os.path.exists(libFile):
-                compile_windows(ast, code_hash_str, src_file, libFile)
+            lib_file = os.path.join(cache_config['object_cache'], code_hash_str + ".dll")
+            if not os.path.exists(lib_file):
+                compile_windows(ast, code_hash_str, src_file, lib_file)
         else:
-            libFile = os.path.join(cache_config['object_cache'], code_hash_str + ".so")
-            if not os.path.exists(libFile):
-                compile_linux(ast, code_hash_str, src_file, libFile)
-        return cdll.LoadLibrary(libFile)[ast.function_name]
+            lib_file = os.path.join(cache_config['object_cache'], code_hash_str + ".so")
+            if not os.path.exists(lib_file):
+                compile_linux(ast, code_hash_str, src_file, lib_file)
+        return cdll.LoadLibrary(lib_file)[ast.function_name]
 
 
 def build_ctypes_argument_list(parameter_specification, argument_dict):
@@ -358,14 +358,14 @@ def build_ctypes_argument_list(parameter_specification, argument_dict):
     index_arr_shapes = set()
 
     for arg in parameter_specification:
-        if arg.isFieldArgument:
+        if arg.is_field_argument:
             try:
                 field_arr = argument_dict[arg.field_name]
             except KeyError:
                 raise KeyError("Missing field parameter for kernel call " + arg.field_name)
 
             symbolic_field = arg.field
-            if arg.isFieldPtrArgument:
+            if arg.is_field_ptr_argument:
                 ct_arguments.append(field_arr.ctypes.data_as(to_ctypes(arg.dtype)))
                 if symbolic_field.has_fixed_shape:
                     symbolic_field_shape = tuple(int(i) for i in symbolic_field.shape)
@@ -387,10 +387,10 @@ def build_ctypes_argument_list(parameter_specification, argument_dict):
                 elif not FieldType.is_buffer(symbolic_field):
                     array_shapes.add(field_arr.shape[:symbolic_field.spatial_dimensions])
 
-            elif arg.isFieldShapeArgument:
+            elif arg.is_field_shape_argument:
                 data_type = to_ctypes(get_base_type(arg.dtype))
                 ct_arguments.append(field_arr.ctypes.shape_as(data_type))
-            elif arg.isFieldStrideArgument:
+            elif arg.is_field_stride_argument:
                 data_type = to_ctypes(get_base_type(arg.dtype))
                 strides = field_arr.ctypes.strides_as(data_type)
                 for i in range(len(field_arr.shape)):
diff --git a/cpu/kernelcreation.py b/cpu/kernelcreation.py
index efd1b3892e3c117447b285e6e5cc30a734cadaf7..8a5e4e18cd5603dc7ce5ff743cce816cad575e00 100644
--- a/cpu/kernelcreation.py
+++ b/cpu/kernelcreation.py
@@ -64,7 +64,7 @@ def create_kernel(assignments: AssignmentOrAstNodeList, function_name: str = "ke
     code.target = 'cpu'
 
     if split_groups:
-        typed_split_groups = [[type_symbol(s) for s in splitGroup] for splitGroup in split_groups]
+        typed_split_groups = [[type_symbol(s) for s in split_group] for split_group in split_groups]
         split_inner_loop(code, typed_split_groups)
 
     base_pointer_spec = [['spatialInner0'], ['spatialInner1']] if len(loop_order) >= 2 else [['spatialInner0']]
@@ -96,9 +96,9 @@ def create_indexed_kernel(assignments: AssignmentOrAstNodeList, index_fields, fu
     Similar to :func:`create_kernel`, but here not all cells of a field are updated but only cells with
     coordinates which are stored in an index field. This traversal method can e.g. be used for boundary handling.
 
-    The coordinates are stored in a separate indexField, which is a one dimensional array with struct data type.
+    The coordinates are stored in a separate index_field, which is a one dimensional array with struct data type.
     This struct has to contain fields named 'x', 'y' and for 3D fields ('z'). These names are configurable with the
-    'coordinateNames' parameter. The struct can have also other fields that can be read and written in the kernel, for
+    'coordinate_names' parameter. The struct can have also other fields that can be read and written in the kernel, for
     example boundary parameters.
 
     Args:
@@ -112,7 +112,7 @@ def create_indexed_kernel(assignments: AssignmentOrAstNodeList, index_fields, fu
     all_fields = fields_read.union(fields_written)
 
     for index_field in index_fields:
-        index_field.fieldType = FieldType.INDEXED
+        index_field.field_type = FieldType.INDEXED
         assert FieldType.is_indexed(index_field)
         assert index_field.spatial_dimensions == 1, "Index fields have to be 1D"
 
@@ -197,4 +197,4 @@ def add_openmp(ast_node, schedule="static", num_threads=True):
             except TypeError:
                 pass
 
-    loop_to_parallelize.prefixLines.append("#pragma omp for schedule(%s)" % (schedule,))
+    loop_to_parallelize.prefix_lines.append("#pragma omp for schedule(%s)" % (schedule,))
diff --git a/data_types.py b/data_types.py
index 282bb87be12435be5c30e30f966b2c6008a9f4b7..d12065f2f190dea89bc3e5b5c283b1ad931ff3e3 100644
--- a/data_types.py
+++ b/data_types.py
@@ -12,7 +12,7 @@ from pystencils.cache import memorycache
 from pystencils.utils import all_equal
 
 
-# to work in conditions of sp.Piecewise castFunc has to be of type Relational as well
+# to work in conditions of sp.Piecewise cast_func has to be of type Relational as well
 class cast_func(sp.Function, sp.Rel):
     @property
     def canonical(self):
@@ -27,7 +27,6 @@ class cast_func(sp.Function, sp.Rel):
 
 
 class pointer_arithmetic_func(sp.Function, sp.Rel):
-
     @property
     def canonical(self):
         if hasattr(self.args[0], 'canonical'):
@@ -395,7 +394,7 @@ class BasicType(Type):
 
 
 class VectorType(Type):
-    instructionSet = None
+    instruction_set = None
 
     def __init__(self, base_type, width=4):
         self._base_type = base_type
@@ -416,17 +415,17 @@ class VectorType(Type):
             return (self.base_type, self.width) == (other.base_type, other.width)
 
     def __str__(self):
-        if self.instructionSet is None:
+        if self.instruction_set is None:
             return "%s[%d]" % (self.base_type, self.width)
         else:
             if self.base_type == create_type("int64"):
-                return self.instructionSet['int']
+                return self.instruction_set['int']
             elif self.base_type == create_type("float64"):
-                return self.instructionSet['double']
+                return self.instruction_set['double']
             elif self.base_type == create_type("float32"):
-                return self.instructionSet['float']
+                return self.instruction_set['float']
             elif self.base_type == create_type("bool"):
-                return self.instructionSet['bool']
+                return self.instruction_set['bool']
             else:
                 raise NotImplementedError()
 
diff --git a/datahandling/datahandling_interface.py b/datahandling/datahandling_interface.py
index c0b54b39373348cc34c84550ed195bde928a1e27..d48912795704309c0e2193330903a8d86d4032f2 100644
--- a/datahandling/datahandling_interface.py
+++ b/datahandling/datahandling_interface.py
@@ -51,7 +51,7 @@ class DataHandling(ABC):
             layout: memory layout of array, either structure of arrays 'SoA' or array of structures 'AoS'.
                     this is only important if values_per_cell > 1
             cpu: allocate field on the CPU
-            gpu: allocate field on the GPU, if None, a GPU field is allocated if defaultTarget is 'gpu'
+            gpu: allocate field on the GPU, if None, a GPU field is allocated if default_target is 'gpu'
 
         Returns:
             pystencils field, that can be used to formulate symbolic kernels
@@ -84,9 +84,9 @@ class DataHandling(ABC):
             cpu_creation_function: function returning a new instance of the data that should be stored
             gpu_creation_function: optional, function returning a new instance, stored on GPU
             cpu_to_gpu_transfer_func: function that transfers cpu to gpu version,
-                                      getting two parameters (gpuInstance, cpuInstance)
+                                      getting two parameters (gpu_instance, cpu_instance)
             gpu_to_cpu_transfer_func: function that transfers gpu to cpu version, getting two parameters
-                                      (gpuInstance, cpuInstance)
+                                      (gpu_instance, cpu_instance)
         """
 
     def add_custom_class(self, name: str, class_obj, cpu: bool = True, gpu: bool = False):
@@ -186,7 +186,7 @@ class DataHandling(ABC):
 
         Returns:
             a function that can be called with an integer time step to write the current state
-            i.e create_vtk_writer('someFile', ['velocity', 'density']) (1)
+            i.e create_vtk_writer('some_file', ['velocity', 'density']) (1)
         """
 
     @abstractmethod
@@ -303,12 +303,12 @@ class DataHandling(ABC):
         separator_line = "-" * (first_column_width + 21 + 21 + 2) + "\n"
         result += row_format.format("Name", "Inner (min/max)", "WithGl (min/max)")
         result += separator_line
-        for arrName in sorted(self.array_names):
-            inner_min_max = (self.min(arrName, ghost_layers=False), self.max(arrName, ghost_layers=False))
-            with_gl_min_max = (self.min(arrName, ghost_layers=True), self.max(arrName, ghost_layers=True))
+        for arr_name in sorted(self.array_names):
+            inner_min_max = (self.min(arr_name, ghost_layers=False), self.max(arr_name, ghost_layers=False))
+            with_gl_min_max = (self.min(arr_name, ghost_layers=True), self.max(arr_name, ghost_layers=True))
             inner_min_max = "({0[0]:3.3g},{0[1]:3.3g})".format(inner_min_max)
             with_gl_min_max = "({0[0]:3.3g},{0[1]:3.3g})".format(with_gl_min_max)
-            result += row_format.format(arrName, inner_min_max, with_gl_min_max)
+            result += row_format.format(arr_name, inner_min_max, with_gl_min_max)
         return result
 
 
diff --git a/datahandling/parallel_datahandling.py b/datahandling/parallel_datahandling.py
index bd8ccc97a61f3ab52694ab82b24d1a0f1a1b35b6..e023c6076d64b83dfdd4e465fda28cbe407f57fa 100644
--- a/datahandling/parallel_datahandling.py
+++ b/datahandling/parallel_datahandling.py
@@ -33,7 +33,7 @@ class ParallelDataHandling(DataHandling):
         self._fields = DotDict()  # maps name to symbolic pystencils field
         self._field_name_to_cpu_data_name = {}
         self._field_name_to_gpu_data_name = {}
-        self.dataNames = set()
+        self.data_names = set()
         self._dim = dim
         self._fieldInformation = {}
         self._cpu_gpu_pairs = []
@@ -47,7 +47,7 @@ class ParallelDataHandling(DataHandling):
 
         if self._dim == 2:
             assert self.blocks.getDomainCellBB().size[2] == 1
-        self.defaultTarget = default_target
+        self.default_target = default_target
 
     @property
     def dim(self):
@@ -89,7 +89,7 @@ class ParallelDataHandling(DataHandling):
         if ghost_layers is None:
             ghost_layers = self.default_ghost_layers
         if gpu is None:
-            gpu = self.defaultTarget == 'gpu'
+            gpu = self.default_target == 'gpu'
         if layout is None:
             layout = self.default_layout
         if len(self.blocks) == 0:
@@ -219,11 +219,11 @@ class ParallelDataHandling(DataHandling):
             to_array = wlb.field.toArray
         data_used_in_kernel = [(name_map[p.field_name], self.fields[p.field_name])
                                for p in kernel_function.parameters if
-                               p.isFieldPtrArgument and p.field_name not in kwargs]
+                               p.is_field_ptr_argument and p.field_name not in kwargs]
         for block in self.blocks:
             field_args = {}
-            for dataName, f in data_used_in_kernel:
-                arr = to_array(block[dataName], withGhostLayers=[True, True, self.dim == 3])
+            for data_name, f in data_used_in_kernel:
+                arr = to_array(block[data_name], withGhostLayers=[True, True, self.dim == 3])
                 arr = self._normalize_arr_shape(arr, f.index_dimensions)
                 field_args[f.name] = arr
             field_args.update(kwargs)
@@ -250,14 +250,14 @@ class ParallelDataHandling(DataHandling):
         return (name, self.GPU_DATA_PREFIX + name) in self._cpu_gpu_pairs
 
     def all_to_cpu(self):
-        for cpuName, gpuName in self._cpu_gpu_pairs:
-            wlb.cuda.copyFieldToCpu(self.blocks, gpuName, cpuName)
+        for cpu_name, gpu_name in self._cpu_gpu_pairs:
+            wlb.cuda.copyFieldToCpu(self.blocks, gpu_name, cpu_name)
         for name in self._custom_data_transfer_functions.keys():
             self.to_cpu(name)
 
     def all_to_gpu(self):
-        for cpuName, gpuName in self._cpu_gpu_pairs:
-            wlb.cuda.copyFieldToGpu(self.blocks, gpuName, cpuName)
+        for cpu_name, gpu_name in self._cpu_gpu_pairs:
+            wlb.cuda.copyFieldToGpu(self.blocks, gpu_name, cpu_name)
         for name in self._custom_data_transfer_functions.keys():
             self.to_gpu(name)
 
@@ -269,7 +269,7 @@ class ParallelDataHandling(DataHandling):
 
     def synchronization_function(self, names, stencil=None, target='cpu', buffered=True):
         if target is None:
-            target = self.defaultTarget
+            target = self.default_target
 
         if stencil is None:
             stencil = 'D3Q27' if self.dim == 3 else 'D2Q9'
diff --git a/datahandling/serial_datahandling.py b/datahandling/serial_datahandling.py
index d803d13cbff8aa8a4235d0fe3a4b6a7cc641fd57..7e9adeafaa5f9855a12cc8948498f469c995c083 100644
--- a/datahandling/serial_datahandling.py
+++ b/datahandling/serial_datahandling.py
@@ -31,8 +31,8 @@ class SerialDataHandling(DataHandling):
         """
         super(SerialDataHandling, self).__init__()
         self._domainSize = tuple(domain_size)
-        self.defaultGhostLayers = default_ghost_layers
-        self.defaultLayout = default_layout
+        self.default_ghost_layers = default_ghost_layers
+        self.default_layout = default_layout
         self._fields = DotDict()
         self.cpu_arrays = DotDict()
         self.gpu_arrays = DotDict()
@@ -47,7 +47,7 @@ class SerialDataHandling(DataHandling):
 
         self._periodicity = periodicity
         self._field_information = {}
-        self.defaultTarget = default_target
+        self.default_target = default_target
 
     @property
     def dim(self):
@@ -74,11 +74,11 @@ class SerialDataHandling(DataHandling):
     def add_array(self, name, values_per_cell=1, dtype=np.float64, latex_name=None, ghost_layers=None, layout=None,
                   cpu=True, gpu=None, alignment=False):
         if ghost_layers is None:
-            ghost_layers = self.defaultGhostLayers
+            ghost_layers = self.default_ghost_layers
         if layout is None:
-            layout = self.defaultLayout
+            layout = self.default_layout
         if gpu is None:
-            gpu = self.defaultTarget == 'gpu'
+            gpu = self.default_target == 'gpu'
 
         kwargs = {
             'shape': tuple(s + 2 * ghost_layers for s in self._domainSize),
@@ -99,7 +99,7 @@ class SerialDataHandling(DataHandling):
             index_dimensions = 0
             layout_tuple = spatial_layout_string_to_tuple(layout, self.dim)
 
-        # cpu_arr is always created - since there is no createPycudaArrayWithLayout()
+        # cpu_arr is always created - since there is no create_pycuda_array_with_layout()
         byte_offset = ghost_layers * np.dtype(dtype).itemsize
         cpu_arr = create_numpy_array_with_layout(layout=layout_tuple, alignment=alignment,
                                                  byte_offset=byte_offset, **kwargs)
@@ -148,7 +148,7 @@ class SerialDataHandling(DataHandling):
 
     def iterate(self, slice_obj=None, gpu=False, ghost_layers=True, inner_ghost_layers=True):
         if ghost_layers is True:
-            ghost_layers = self.defaultGhostLayers
+            ghost_layers = self.default_ghost_layers
         elif ghost_layers is False:
             ghost_layers = 0
         elif isinstance(ghost_layers, str):
@@ -212,7 +212,7 @@ class SerialDataHandling(DataHandling):
     def run_kernel(self, kernel_function, *args, **kwargs):
         data_used_in_kernel = [p.field_name
                                for p in kernel_function.parameters if
-                               p.isFieldPtrArgument and p.field_name not in kwargs]
+                               p.is_field_ptr_argument and p.field_name not in kwargs]
         arrays = self.gpu_arrays if kernel_function.ast.backend == 'gpucuda' else self.cpu_arrays
         array_params = {name: arrays[name] for name in data_used_in_kernel}
         array_params.update(kwargs)
@@ -243,7 +243,7 @@ class SerialDataHandling(DataHandling):
 
     def synchronization_function(self, names, stencil=None, target=None, **_):
         if target is None:
-            target = self.defaultTarget
+            target = self.default_target
         assert target in ('cpu', 'gpu')
         if not hasattr(names, '__len__') or type(names) is str:
             names = [names]
diff --git a/derivative.py b/derivative.py
index 0716c79734d29c62db88aac5a77e647b6aff584a..2dbcae4519ad217cdac8ec5f275ba1e2603c92fb 100644
--- a/derivative.py
+++ b/derivative.py
@@ -400,8 +400,8 @@ def combine_using_product_rule(expr):
         expression = expression.expand()
         if isinstance(expression, sp.Add):
             diff_dict, rest = expr_to_diff_decomposition(expression)
-            for (label, superscript), diffList in diff_dict.items():
-                rest += process_diff_list(diffList, label, superscript)
+            for (label, superscript), diff_list in diff_dict.items():
+                rest += process_diff_list(diff_list, label, superscript)
             return rest
         else:
             new_args = [combine_using_product_rule(e) for e in expression.args]
@@ -467,12 +467,12 @@ def functional_derivative(functional, v):
     partial_f_partial_v = sp.diff(non_diff_part, v).subs(bulk_substitutions_inverse)
 
     gradient_part = 0
-    for diffObj in diffs:
-        if diffObj.args[0] != v:
+    for diff_obj in diffs:
+        if diff_obj.args[0] != v:
             continue
         dummy = sp.Dummy()
-        partial_f_partial_grad_v = functional.subs(diffObj, dummy).diff(dummy).subs(dummy, diffObj)
-        gradient_part += Diff(partial_f_partial_grad_v, target=diffObj.target, superscript=diffObj.superscript)
+        partial_f_partial_grad_v = functional.subs(diff_obj, dummy).diff(dummy).subs(dummy, diff_obj)
+        gradient_part += Diff(partial_f_partial_grad_v, target=diff_obj.target, superscript=diff_obj.superscript)
 
     result = partial_f_partial_v - gradient_part
     return result
diff --git a/field.py b/field.py
index b81b047dd7528841fe2bc881ec12dd33911f4366..7c61db7bc1654d30438221dabf94ea03a2b8c379 100644
--- a/field.py
+++ b/field.py
@@ -24,17 +24,17 @@ class FieldType(Enum):
     @staticmethod
     def is_generic(field):
         assert isinstance(field, Field)
-        return field.fieldType == FieldType.GENERIC
+        return field.field_type == FieldType.GENERIC
 
     @staticmethod
     def is_indexed(field):
         assert isinstance(field, Field)
-        return field.fieldType == FieldType.INDEXED
+        return field.field_type == FieldType.INDEXED
 
     @staticmethod
     def is_buffer(field):
         assert isinstance(field, Field)
-        return field.fieldType == FieldType.BUFFER
+        return field.field_type == FieldType.BUFFER
 
 
 class Field(object):
@@ -47,7 +47,7 @@ class Field(object):
         To create a field use one of the static create* members. There are two options:
 
         1. create a kernel with fixed loop sizes i.e. the shape of the array is already known. This is usually the
-           case if just-in-time compilation directly from Python is done. (see :func:`Field.createFromNumpyArray`)
+           case if just-in-time compilation directly from Python is done. (see :func:`Field.create_from_numpy_array`)
         2. create a more general kernel that works for variable array sizes. This can be used to create kernels
            beforehand for a library. (see :func:`Field.create_generic`)
 
@@ -56,11 +56,11 @@ class Field(object):
         The interpretation is that the field has multiple cells in (usually) two or three dimensional space which are
         looped over. Additionally  N values are stored per cell. In this case spatial_dimensions is two or three,
         and index_dimensions equals N. If you want to store a matrix on each point in a two dimensional grid, there
-        are four dimensions, two spatial and two index dimensions: ``len(arr.shape) == spatialDims + indexDims``
+        are four dimensions, two spatial and two index dimensions: ``len(arr.shape) == spatial_dims + index_dims``
 
     Indexing:
         When accessing (indexing) a field the result is a FieldAccess which is derived from sympy Symbol.
-        First specify the spatial offsets in [], then in case indexDimension>0 the indices in ()
+        First specify the spatial offsets in [], then in case index_dimension>0 the indices in ()
         e.g. ``f[-1,0,0](7)``
 
     Example without index dimensions:
@@ -92,7 +92,7 @@ class Field(object):
             index_dimensions: see documentation of Field
             layout: tuple specifying the loop ordering of the spatial dimensions e.g. (2, 1, 0 ) means that
                     the outer loop loops over dimension 2, the second outer over dimension 1, and the inner loop
-                    over dimension 0. Also allowed: the strings 'numpy' (0,1,..d) or 'reverseNumpy' (d, ..., 1, 0)
+                    over dimension 0. Also allowed: the strings 'numpy' (0,1,..d) or 'reverse_numpy' (d, ..., 1, 0)
             index_shape: optional shape of the index dimensions i.e. maximum values allowed for each index dimension,
                         has to be a list or tuple
             field_type: besides the normal GENERIC fields, there are INDEXED fields that store indices of the domain
@@ -194,7 +194,7 @@ class Field(object):
         """Do not use directly. Use static create* methods"""
         self._fieldName = field_name
         assert isinstance(field_type, FieldType)
-        self.fieldType = field_type
+        self.field_type = field_type
         self._dtype = create_type(dtype)
         self._layout = normalize_layout(layout)
         self.shape = shape
@@ -202,7 +202,7 @@ class Field(object):
         self.latex_name: Optional[str] = None
 
     def new_field_with_different_name(self, new_name):
-        return Field(new_name, self.fieldType, self._dtype, self._layout, self.shape, self.strides)
+        return Field(new_name, self.field_type, self._dtype, self._layout, self.shape, self.strides)
 
     @property
     def spatial_dimensions(self) -> int:
@@ -294,11 +294,11 @@ class Field(object):
         return Field.Access(self, center)(*args, **kwargs)
 
     def __hash__(self):
-        return hash((self._layout, self.shape, self.strides, self._dtype, self.fieldType, self._fieldName))
+        return hash((self._layout, self.shape, self.strides, self._dtype, self.field_type, self._fieldName))
 
     def __eq__(self, other):
-        self_tuple = (self.shape, self.strides, self.name, self.dtype, self.fieldType)
-        other_tuple = (other.shape, other.strides, other.name, other.dtype, other.fieldType)
+        self_tuple = (self.shape, self.strides, self.name, self.dtype, self.field_type)
+        other_tuple = (other.shape, other.strides, other.name, other.dtype, other.field_type)
         return self_tuple == other_tuple
 
     PREFIX = "f"
@@ -513,7 +513,7 @@ def spatial_layout_string_to_tuple(layout_str: str, dim: int) -> Tuple[int, ...]
         assert dim <= 3
         return tuple(reversed(range(dim)))
 
-    if layout_str in ('fzyx', 'f', 'reverseNumpy', 'SoA'):
+    if layout_str in ('fzyx', 'f', 'reverse_numpy', 'SoA'):
         return tuple(reversed(range(dim)))
     elif layout_str in ('c', 'numpy', 'AoS'):
         return tuple(range(dim))
@@ -528,7 +528,7 @@ def layout_string_to_tuple(layout_str, dim):
     elif layout_str == 'zyxf' or layout_str == 'aos':
         assert dim <= 4
         return tuple(reversed(range(dim - 1))) + (dim-1,)
-    elif layout_str == 'f' or layout_str == 'reversenumpy':
+    elif layout_str == 'f' or layout_str == 'reverse_numpy':
         return tuple(reversed(range(dim)))
     elif layout_str == 'c' or layout_str == 'numpy':
         return tuple(range(dim))
@@ -620,7 +620,7 @@ def offset_to_direction_string(offsets: Sequence[int]) -> str:
 
 def direction_string_to_offset(direction: str, dim: int = 3):
     """
-    Reverse mapping of :func:`offsetToDirectionString`
+    Reverse mapping of :func:`offset_to_direction_string`
 
     Args:
         direction: string representation of offset
diff --git a/finitedifferences.py b/finitedifferences.py
index db7e4c8c8cd5f9a5f72ace0029c68489934f2d81..db9a28b333ec7e2f8c23a24b8a1c5cc5a6568994 100644
--- a/finitedifferences.py
+++ b/finitedifferences.py
@@ -121,9 +121,9 @@ def discretize_divergence(vector_term, symbols_to_field_dict, dx):
     Examples:
         Laplace stencil
         >>> x, dx = sp.symbols("x dx")
-        >>> gradX = grad(x, dim=3)
+        >>> grad_x = grad(x, dim=3)
         >>> f = Field.create_generic('f', spatial_dimensions=3)
-        >>> sp.simplify(discretize_divergence(gradX, {x : f}, dx))
+        >>> sp.simplify(discretize_divergence(grad_x, {x : f}, dx))
         (f_B - 6*f_C + f_E + f_N + f_S + f_T + f_W)/dx**2
     """
     dim = len(vector_term)
diff --git a/gpucuda/cudajit.py b/gpucuda/cudajit.py
index 1553ecbfbafc6622a24928f7db590a6a524ee96f..a2a7c960e89134df3e24033683764c0841c86fdd 100644
--- a/gpucuda/cudajit.py
+++ b/gpucuda/cudajit.py
@@ -70,20 +70,20 @@ def _build_numpy_argument_list(parameters, argument_dict):
     argument_dict = {symbol_name_to_variable_name(k): v for k, v in argument_dict.items()}
     result = []
     for arg in parameters:
-        if arg.isFieldArgument:
+        if arg.is_field_argument:
             field = argument_dict[arg.field_name]
-            if arg.isFieldPtrArgument:
+            if arg.is_field_ptr_argument:
                 actual_type = field.dtype
                 expected_type = arg.dtype.base_type.numpy_dtype
                 if expected_type != actual_type:
                     raise ValueError("Data type mismatch for field '%s'. Expected '%s' got '%s'." %
                                      (arg.field_name, expected_type, actual_type))
                 result.append(field)
-            elif arg.isFieldStrideArgument:
+            elif arg.is_field_stride_argument:
                 dtype = get_base_type(arg.dtype).numpy_dtype
                 stride_arr = np.array(field.strides, dtype=dtype) // field.dtype.itemsize
                 result.append(cuda.In(stride_arr))
-            elif arg.isFieldShapeArgument:
+            elif arg.is_field_shape_argument:
                 dtype = get_base_type(arg.dtype).numpy_dtype
                 shape_arr = np.array(field.shape, dtype=dtype)
                 result.append(cuda.In(shape_arr))
@@ -106,14 +106,14 @@ def _check_arguments(parameter_specification, argument_dict):
     array_shapes = set()
     index_arr_shapes = set()
     for arg in parameter_specification:
-        if arg.isFieldArgument:
+        if arg.is_field_argument:
             try:
                 field_arr = argument_dict[arg.field_name]
             except KeyError:
                 raise KeyError("Missing field parameter for kernel call " + arg.field_name)
 
             symbolic_field = arg.field
-            if arg.isFieldPtrArgument:
+            if arg.is_field_ptr_argument:
                 if symbolic_field.has_fixed_shape:
                     symbolic_field_shape = tuple(int(i) for i in symbolic_field.shape)
                     if isinstance(symbolic_field.dtype, StructType):
diff --git a/gpucuda/indexing.py b/gpucuda/indexing.py
index cc76344e8dd34591193996cc17113b0157806189..705c9fe6771f118b698b5037d5acfb5614861a89 100644
--- a/gpucuda/indexing.py
+++ b/gpucuda/indexing.py
@@ -87,8 +87,8 @@ class BlockIndexing(AbstractIndexing):
     @property
     def coordinates(self):
         offsets = _get_start_from_slice(self._iterationSlice)
-        coordinates = [blockIndex * bs + threadIdx + off
-                       for blockIndex, bs, threadIdx, off in zip(BLOCK_IDX, self._blockSize, THREAD_IDX, offsets)]
+        coordinates = [block_index * bs + thread_idx + off
+                       for block_index, bs, thread_idx, off in zip(BLOCK_IDX, self._blockSize, THREAD_IDX, offsets)]
 
         return coordinates[:self._dim]
 
@@ -99,8 +99,8 @@ class BlockIndexing(AbstractIndexing):
                                                     _get_end_from_slice(self._iterationSlice, arr_shape))]
         widths = sp.Matrix(widths).subs(substitution_dict)
 
-        grid: Tuple[int, ...] = tuple(sp.ceiling(length / blockSize)
-                                      for length, blockSize in zip(widths, self._blockSize))
+        grid: Tuple[int, ...] = tuple(sp.ceiling(length / block_size)
+                                      for length, block_size in zip(widths, self._blockSize))
         extend_bs = (1,) * (3 - len(self._blockSize))
         extend_gr = (1,) * (3 - len(grid))
 
@@ -219,7 +219,7 @@ class BlockIndexing(AbstractIndexing):
 class LineIndexing(AbstractIndexing):
     """
     Indexing scheme that assigns the innermost 'line' i.e. the elements which are adjacent in memory to a 1D CUDA block.
-    The fastest coordinate is indexed with threadIdx.x, the remaining coordinates are mapped to blockIdx.{x,y,z}
+    The fastest coordinate is indexed with thread_idx.x, the remaining coordinates are mapped to block_idx.{x,y,z}
     This indexing scheme supports up to 4 spatial dimensions, where the innermost dimensions is not larger than the
     maximum amount of threads allowed in a CUDA block (which depends on device).
     """
@@ -267,24 +267,24 @@ class LineIndexing(AbstractIndexing):
 
 def _get_start_from_slice(iteration_slice):
     res = []
-    for sliceComponent in iteration_slice:
-        if type(sliceComponent) is slice:
-            res.append(sliceComponent.start if sliceComponent.start is not None else 0)
+    for slice_component in iteration_slice:
+        if type(slice_component) is slice:
+            res.append(slice_component.start if slice_component.start is not None else 0)
         else:
-            assert isinstance(sliceComponent, int)
-            res.append(sliceComponent)
+            assert isinstance(slice_component, int)
+            res.append(slice_component)
     return res
 
 
 def _get_end_from_slice(iteration_slice, arr_shape):
     iter_slice = normalize_slice(iteration_slice, arr_shape)
     res = []
-    for sliceComponent in iter_slice:
-        if type(sliceComponent) is slice:
-            res.append(sliceComponent.stop)
+    for slice_component in iter_slice:
+        if type(slice_component) is slice:
+            res.append(slice_component.stop)
         else:
-            assert isinstance(sliceComponent, int)
-            res.append(sliceComponent + 1)
+            assert isinstance(slice_component, int)
+            res.append(slice_component + 1)
     return res
 
 
diff --git a/gpucuda/kernelcreation.py b/gpucuda/kernelcreation.py
index 1c72101c556379aed12c73e4172788a24cc2d2e6..773e68b106522dc57f6840e150c3c637ef4dafac 100644
--- a/gpucuda/kernelcreation.py
+++ b/gpucuda/kernelcreation.py
@@ -76,8 +76,8 @@ def create_cuda_kernel(assignments, function_name="kernel", type_info=None, inde
     # they are defined here
     undefined_loop_counters = {LoopOverCoordinate.is_loop_counter_symbol(s): s for s in ast.body.undefined_symbols
                                if LoopOverCoordinate.is_loop_counter_symbol(s) is not None}
-    for i, loopCounter in undefined_loop_counters.items():
-        ast.body.insert_front(SympyAssignment(loopCounter, indexing.coordinates[i]))
+    for i, loop_counter in undefined_loop_counters.items():
+        ast.body.insert_front(SympyAssignment(loop_counter, indexing.coordinates[i]))
 
     ast.indexing = indexing
     ast.compile = partial(make_python_function, ast)
@@ -90,10 +90,10 @@ def created_indexed_cuda_kernel(assignments, index_fields, function_name="kernel
     all_fields = fields_read.union(fields_written)
     read_only_fields = set([f.name for f in fields_read - fields_written])
 
-    for indexField in index_fields:
-        indexField.fieldType = FieldType.INDEXED
-        assert FieldType.is_indexed(indexField)
-        assert indexField.spatial_dimensions == 1, "Index fields have to be 1D"
+    for index_field in index_fields:
+        index_field.field_type = FieldType.INDEXED
+        assert FieldType.is_indexed(index_field)
+        assert index_field.spatial_dimensions == 1, "Index fields have to be 1D"
 
     non_index_fields = [f for f in all_fields if f not in index_fields]
     spatial_coordinates = {f.spatial_dimensions for f in non_index_fields}
diff --git a/gpucuda/periodicity.py b/gpucuda/periodicity.py
index ea234de90a7146860bf0e50a1d5cffcef41e48c2..5513088658af5ed716dfc4288522a73d5652d00e 100644
--- a/gpucuda/periodicity.py
+++ b/gpucuda/periodicity.py
@@ -33,8 +33,8 @@ def get_periodic_boundary_functor(stencil, domain_size, index_dimensions=0, inde
     src_dst_slice_tuples = get_periodic_boundary_src_dst_slices(stencil, ghost_layers, thickness)
     kernels = []
     index_dimensions = index_dimensions
-    for srcSlice, dstSlice in src_dst_slice_tuples:
-        kernels.append(create_copy_kernel(domain_size, srcSlice, dstSlice, index_dimensions, index_dim_shape, dtype))
+    for src_slice, dst_slice in src_dst_slice_tuples:
+        kernels.append(create_copy_kernel(domain_size, src_slice, dst_slice, index_dimensions, index_dim_shape, dtype))
 
     def functor(pdfs, **_):
         for kernel in kernels:
diff --git a/kerncraft_coupling/generate_benchmark.py b/kerncraft_coupling/generate_benchmark.py
index 38699c4e6995aaa021fb032f519548674fb17de3..3e03410765fb451abf19c870ca40cd69f0eb7071 100644
--- a/kerncraft_coupling/generate_benchmark.py
+++ b/kerncraft_coupling/generate_benchmark.py
@@ -3,7 +3,7 @@ from pystencils.cpu import generate_c
 from pystencils.sympyextensions import prod
 from pystencils.data_types import get_base_type
 
-benchmarkTemplate = Template("""
+benchmark_template = Template("""
 #include "kerncraft.h"
 #include <stdlib.h>
 #include <stdint.h>
@@ -90,11 +90,11 @@ def generate_benchmark(ast, likwid=False):
     fields = []
     call_parameters = []
     for p in ast.parameters:
-        if not p.isFieldArgument:
+        if not p.is_field_argument:
             constants.append((p.name, str(p.dtype)))
             call_parameters.append(p.name)
         else:
-            assert p.isFieldPtrArgument, "Benchmark implemented only for kernels with fixed loop size"
+            assert p.is_field_ptr_argument, "Benchmark implemented only for kernels with fixed loop size"
             field = accessed_fields[p.field_name]
             dtype = str(get_base_type(p.dtype))
             fields.append((p.field_name, dtype, prod(field.shape)))
@@ -108,4 +108,4 @@ def generate_benchmark(ast, likwid=False):
         'constants': constants,
         'callArgumentList': ",".join(call_parameters),
     }
-    return benchmarkTemplate.render(**args)
+    return benchmark_template.render(**args)
diff --git a/kerncraft_coupling/kerncraft_interface.py b/kerncraft_coupling/kerncraft_interface.py
index 8860a8b6f32d08726901fc548433904ecf009223..241cba4674cc7fbff40695b06a5b6af6ab220313 100644
--- a/kerncraft_coupling/kerncraft_interface.py
+++ b/kerncraft_coupling/kerncraft_interface.py
@@ -27,7 +27,7 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
         super(PyStencilsKerncraftKernel, self).__init__(machine)
 
         self.ast = ast
-        self.temporaryDir = TemporaryDirectory()
+        self.temporary_dir = TemporaryDirectory()
 
         # Loops
         inner_loops = [l for l in ast.atoms(LoopOverCoordinate) if l.is_innermost_loop]
@@ -42,9 +42,9 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
         cur_node = inner_loop
         while cur_node is not None:
             if isinstance(cur_node, LoopOverCoordinate):
-                loopCounterSym = cur_node.loop_counter_symbol
-                loopInfo = (loopCounterSym.name, cur_node.start, cur_node.stop, cur_node.step)
-                self._loop_stack.append(loopInfo)
+                loop_counter_sym = cur_node.loop_counter_symbol
+                loop_info = (loop_counter_sym.name, cur_node.start, cur_node.stop, cur_node.step)
+                self._loop_stack.append(loop_info)
             cur_node = cur_node.parent
         self._loop_stack = list(reversed(self._loop_stack))
 
@@ -53,14 +53,14 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
         self.destinations = defaultdict(list)
 
         reads, writes = search_resolved_field_accesses_in_ast(inner_loop)
-        for accesses, targetDict in [(reads, self.sources), (writes, self.destinations)]:
+        for accesses, target_dict in [(reads, self.sources), (writes, self.destinations)]:
             for fa in accesses:
                 coord = [sp.Symbol(LoopOverCoordinate.get_loop_counter_name(i), positive=True, integer=True) + off
                          for i, off in enumerate(fa.offsets)]
-                coord += list(fa.idxCoordinateValues)
+                coord += list(fa.idx_coordinate_values)
                 layout = get_layout_from_strides(fa.field.strides)
                 permuted_coord = [coord[i] for i in layout]
-                targetDict[fa.field.name].append(permuted_coord)
+                target_dict[fa.field.name].append(permuted_coord)
 
         # Variables (arrays)
         fields_accessed = ast.fields_accessed
@@ -70,7 +70,7 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
             self.set_variable(field.name, str(field.dtype), tuple(permuted_shape))
 
         for param in ast.parameters:
-            if not param.isFieldArgument:
+            if not param.is_field_argument:
                 self.set_variable(param.name, str(param.dtype), None)
                 self.sources[param.name] = [None]
 
@@ -97,12 +97,12 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
     
         compiler_cmd = [compiler] + compiler_args + ['-I' + header_path]
     
-        src_file = os.path.join(self.temporaryDir.name, "source.c")
-        asm_file = os.path.join(self.temporaryDir.name, "source.s")
-        iaca_asm_file = os.path.join(self.temporaryDir.name, "source.iaca.s")
+        src_file = os.path.join(self.temporary_dir.name, "source.c")
+        asm_file = os.path.join(self.temporary_dir.name, "source.s")
+        iaca_asm_file = os.path.join(self.temporary_dir.name, "source.iaca.s")
         dummy_src_file = os.path.join(header_path, "dummy.c")
-        dummy_asm_file = os.path.join(self.temporaryDir.name, "dummy.s")
-        binary_file = os.path.join(self.temporaryDir.name, "binary")
+        dummy_asm_file = os.path.join(self.temporary_dir.name, "dummy.s")
+        binary_file = os.path.join(self.temporary_dir.name, "binary")
 
         # write source code to file
         with open(src_file, 'w') as f:
@@ -136,8 +136,8 @@ class PyStencilsKerncraftKernel(kerncraft.kernel.Kernel):
         ]
 
         dummy_src_file = os.path.join(header_path, 'dummy.c')
-        src_file = os.path.join(self.temporaryDir.name, "source_likwid.c")
-        bin_file = os.path.join(self.temporaryDir.name, "benchmark")
+        src_file = os.path.join(self.temporary_dir.name, "source_likwid.c")
+        bin_file = os.path.join(self.temporary_dir.name, "benchmark")
 
         with open(src_file, 'w') as f:
             f.write(generate_benchmark(self.ast, likwid=True))
diff --git a/kernelcreation.py b/kernelcreation.py
index 171de3fa45718d6207381db54d6adcfff6742897..25fe50088bf55022eab150d430953a4ef431ea8f 100644
--- a/kernelcreation.py
+++ b/kernelcreation.py
@@ -15,7 +15,7 @@ def create_kernel(equations, target='cpu', data_type="double", iteration_slice=N
                            field is iterated over
     :param ghost_layers: if left to default, the number of necessary ghost layers is determined automatically
                         a single integer specifies the ghost layer count at all borders, can also be a sequence of
-                        pairs [(xLowerGl, xUpperGl), .... ]
+                        pairs [(x_lower_gl, x_upper_gl), .... ]
 
     CPU specific Parameters:
     :param cpu_openmp: True or number of threads for OpenMP parallelization, False for no OpenMP
@@ -24,7 +24,7 @@ def create_kernel(equations, target='cpu', data_type="double", iteration_slice=N
     GPU specific Parameters
     :param gpu_indexing: either 'block' or 'line' , or custom indexing class (see gpucuda/indexing.py)
     :param gpu_indexing_params: dict with indexing parameters (constructor parameters of indexing class)
-                              e.g. for 'block' one can specify {'blockSize': (20, 20, 10) }
+                              e.g. for 'block' one can specify {'block_size': (20, 20, 10) }
 
     :return: abstract syntax tree object, that can either be printed as source code or can be compiled with
              through its compile() function
@@ -49,8 +49,8 @@ def create_kernel(equations, target='cpu', data_type="double", iteration_slice=N
             import pystencils.backends.simd_instruction_sets as vec
             from pystencils.vectorization import vectorize
             vec_params = cpu_vectorize_info
-            vec.selectedInstructionSet = vec.x86_vector_instruction_set(instruction_set=vec_params[0],
-                                                                        data_type=vec_params[1])
+            vec.selected_instruction_set = vec.x86_vector_instruction_set(instruction_set=vec_params[0],
+                                                                          data_type=vec_params[1])
             vectorize(ast)
         return ast
     elif target == 'llvm':
@@ -74,7 +74,7 @@ def create_indexed_kernel(assignments, index_fields, target='cpu', data_type="do
     Similar to :func:`create_kernel`, but here not all cells of a field are updated but only cells with
     coordinates which are stored in an index field. This traversal method can e.g. be used for boundary handling.
 
-    The coordinates are stored in a separated indexField, which is a one dimensional array with struct data type.
+    The coordinates are stored in a separated index_field, which is a one dimensional array with struct data type.
     This struct has to contain fields named 'x', 'y' and for 3D fields ('z'). These names are configurable with the
     'coordinate_names' parameter. The struct can have also other fields that can be read and written in the kernel, for
     example boundary parameters.
diff --git a/llvm/llvm.py b/llvm/llvm.py
index f85f97ac6b1d420f732ea0d0e26546983827e946..8f0a8b067258fb3968b9598ff5e950c7a7328b83 100644
--- a/llvm/llvm.py
+++ b/llvm/llvm.py
@@ -263,25 +263,25 @@ class LLVMPrinter(Printer):
                                       'whether to implement it. So far there is no'
                                       'use-case to test it.')
         else:
-            phiData = []
+            phi_data = []
             after_block = self.builder.append_basic_block()
             for (expr, condition) in piece.args:
                 if condition == True:  # Don't use 'is' use '=='!
-                    phiData.append((self._print(expr), self.builder.block))
+                    phi_data.append((self._print(expr), self.builder.block))
                     self.builder.branch(after_block)
                     self.builder.position_at_end(after_block)
                 else:
                     cond = self._print(condition)
-                    trueBlock = self.builder.append_basic_block()
-                    falseBlock = self.builder.append_basic_block()
-                    self.builder.cbranch(cond, trueBlock, falseBlock)
-                    self.builder.position_at_end(trueBlock)
-                    phiData.append((self._print(expr), trueBlock))
+                    true_block = self.builder.append_basic_block()
+                    false_block = self.builder.append_basic_block()
+                    self.builder.cbranch(cond, true_block, false_block)
+                    self.builder.position_at_end(true_block)
+                    phi_data.append((self._print(expr), true_block))
                     self.builder.branch(after_block)
-                    self.builder.position_at_end(falseBlock)
+                    self.builder.position_at_end(false_block)
 
             phi = self.builder.phi(to_llvm_type(get_type_of_expression(piece)))
-            for (val, block) in phiData:
+            for (val, block) in phi_data:
                 phi.add_incoming(val, block)
             return phi
 
diff --git a/parallel/blockiteration.py b/parallel/blockiteration.py
index b8c1b9e67b293b72ac9673d3954e753703545a79..1f7e0f43a73a780863b5fa95d9a808261b88738a 100644
--- a/parallel/blockiteration.py
+++ b/parallel/blockiteration.py
@@ -23,7 +23,7 @@ def block_iteration(blocks, ghost_layers, dim=3, access_prefix=''):
     Args:
         blocks: walberla block data structure
         ghost_layers: how many ghost layers to include (outer and inner)
-        dim: walberla's block data structure is 3D - 2D domains can be done by setting zSize=1
+        dim: walberla's block data structure is 3D - 2D domains can be done by setting z_size=1
              if dim=2 is set here, the third coordinate of the returned fields is accessed at z=0 automatically
         access_prefix: see documentation of sliced_block_iteration
     """
@@ -55,8 +55,8 @@ def sliced_block_iteration(blocks, slice_obj=None, inner_ghost_layers=1, outer_g
                       certain prefix 'gpu_')
 
     Example:
-        assume no slice is given, then sliceNormalizationGhostLayers effectively sets how much ghost layers at the
-        border of the domain are included. The innerGhostLayers parameter specifies how many inner ghost layers are
+        assume no slice is given, then slice_normalization_ghost_layers effectively sets how much ghost layers at the
+        border of the domain are included. The inner_ghost_layers parameter specifies how many inner ghost layers are
         included
     """
     if slice_obj is None:
diff --git a/runhelper/db.py b/runhelper/db.py
index 9a55d182a22981abc569fa3b056be9ff56fc476b..f6fdc69a34d132d02b0715b5070c45f695973867 100644
--- a/runhelper/db.py
+++ b/runhelper/db.py
@@ -103,7 +103,7 @@ class Database:
         return len(self.filter({'params': parameters})) > 0
 
     # Columns with these prefixes are not included in pandas result
-    pandasColumnsToIgnore = ['changedParams.', 'env.']
+    pandas_columns_to_ignore = ['changedParams.', 'env.']
 
     def to_pandas(self, parameter_query, remove_prefix=True, drop_constant_columns=False):
         """Queries for simulations with given parameters and returns them in a pandas data frame.
@@ -125,8 +125,8 @@ class Database:
         df = json_normalize(attributes)
         df.set_index('pk', inplace=True)
 
-        if self.pandasColumnsToIgnore:
-            remove_columns_by_prefix(df, self.pandasColumnsToIgnore, inplace=True)
+        if self.pandas_columns_to_ignore:
+            remove_columns_by_prefix(df, self.pandas_columns_to_ignore, inplace=True)
         if remove_prefix:
             remove_prefix_in_column_name(df, inplace=True)
         if drop_constant_columns:
@@ -159,10 +159,10 @@ def remove_columns_by_prefix(df, prefixes: Sequence[str], inplace: bool = False)
     if not inplace:
         df = df.copy()
 
-    for columnName in df.columns:
+    for column_name in df.columns:
         for prefix in prefixes:
-            if columnName.startswith(prefix):
-                del df[columnName]
+            if column_name.startswith(prefix):
+                del df[column_name]
     return df
 
 
@@ -176,10 +176,10 @@ def remove_prefix_in_column_name(df, inplace: bool = False):
         df = df.copy()
 
     new_column_names = []
-    for columnName in df.columns:
-        if '.' in columnName:
-            new_column_names.append(columnName[columnName.index('.') + 1:])
+    for column_name in df.columns:
+        if '.' in column_name:
+            new_column_names.append(column_name[column_name.index('.') + 1:])
         else:
-            new_column_names.append(columnName)
+            new_column_names.append(column_name)
     df.columns = new_column_names
     return df
diff --git a/runhelper/parameterstudy.py b/runhelper/parameterstudy.py
index 6a4089f1696a19784af3e4e07ef3399f65600e9d..302d7a51dcc01dc32cf76bc5afe8ee58a3895b2a 100644
--- a/runhelper/parameterstudy.py
+++ b/runhelper/parameterstudy.py
@@ -51,7 +51,7 @@ class ParameterStudy:
     to stores the results in the database.
     """
 
-    Run = namedtuple("Run", ['parameterDict', 'weight'])
+    Run = namedtuple("Run", ['parameter_dict', 'weight'])
 
     def __init__(self, run_function: Callable[..., Dict], runs: Sequence = (), database_connector: str='./db') -> None:
         self.runs = list(runs)
@@ -93,9 +93,9 @@ class ParameterStudy:
         parameter_values = [e[1] for e in degrees_of_freedom]
 
         default_params_dict = {} if constant_parameters is None else constant_parameters
-        for valueTuple in itertools.product(*parameter_values):
+        for value_tuple in itertools.product(*parameter_values):
             params_dict = deepcopy(default_params_dict)
-            params_dict.update({name: value for name, value in zip(parameter_names, valueTuple)})
+            params_dict.update({name: value for name, value in zip(parameter_names, value_tuple)})
             params = DotDict(params_dict)
             if filter_function:
                 params = filter_function(params)
@@ -116,22 +116,22 @@ class ParameterStudy:
         parameter_update = {} if parameter_update is None else parameter_update
         own_runs = self._distribute_runs(self.runs, process, num_processes)
         for run in own_runs:
-            parameter_dict = run.parameterDict.copy()
+            parameter_dict = run.parameter_dict.copy()
             parameter_dict.update(parameter_update)
             result = self.run_function(**parameter_dict)
 
-            self.db.save(run.parameterDict, result, None, changed_params=parameter_update)
+            self.db.save(run.parameter_dict, result, None, changed_params=parameter_update)
 
     def run_scenarios_not_in_database(self, parameter_update: Optional[ParameterDict] = None) -> None:
         """Same as run method, but runs only configuration for which no result is in the database yet."""
         parameter_update = {} if parameter_update is None else parameter_update
         filtered_runs = self._filter_already_simulated(self.runs)
         for run in filtered_runs:
-            parameter_dict = run.parameterDict.copy()
+            parameter_dict = run.parameter_dict.copy()
             parameter_dict.update(parameter_update)
             result = self.run_function(**parameter_dict)
 
-            self.db.save(run.parameterDict, result, changed_params=parameter_update)
+            self.db.save(run.parameter_dict, result, changed_params=parameter_update)
 
     def run_server(self, ip: str ="0.0.0.0", port: int = 8342):
         """Runs server to supply runner clients with scenarios to simulate and collect results from them.
@@ -145,17 +145,17 @@ class ParameterStudy:
 
         class ParameterStudyServer(BaseHTTPRequestHandler):
             parameterStudy = self
-            allRuns = filtered_runs
+            all_runs = filtered_runs
             runs = filtered_runs.copy()
-            currentlyRunning = {}
-            finishedRuns = []
+            currently_running = {}
+            finished_runs = []
 
             def next_scenario(self, received_json_data):
                 client_name = received_json_data['client_name']
                 if len(self.runs) > 0:
-                    run_status = "%d/%d" % (len(self.finishedRuns), len(self.allRuns))
-                    work_status = "%d/%d" % (sum(r.weight for r in self.finishedRuns),
-                                             sum(r.weight for r in self.allRuns))
+                    run_status = "%d/%d" % (len(self.finished_runs), len(self.all_runs))
+                    work_status = "%d/%d" % (sum(r.weight for r in self.finished_runs),
+                                             sum(r.weight for r in self.all_runs))
                     format_args = {
                         'remaining': len(self.runs),
                         'time': datetime.datetime.now().strftime("%H:%M:%S"),
@@ -167,24 +167,24 @@ class ParameterStudy:
                     scenario = self.runs.pop(0)
                     print(" {time} {client_name} fetched scenario. Scenarios: {run_status}, Work: {work_status}"
                           .format(**format_args))
-                    self.currentlyRunning[client_name] = scenario
-                    return {'status': 'ok', 'params': scenario.parameterDict}
+                    self.currently_running[client_name] = scenario
+                    return {'status': 'ok', 'params': scenario.parameter_dict}
                 else:
                     return {'status': 'finished'}
 
             def result(self, received_json_data):
                 client_name = received_json_data['client_name']
-                run = self.currentlyRunning[client_name]
-                self.finishedRuns.append(run)
-                del self.currentlyRunning[client_name]
+                run = self.currently_running[client_name]
+                self.finished_runs.append(run)
+                del self.currently_running[client_name]
                 d = received_json_data
 
                 def hash_dict(dictionary):
                     import hashlib
                     return hashlib.sha1(json.dumps(dictionary, sort_keys=True).encode()).hexdigest()
 
-                assert hash_dict(d['params']) == hash_dict(run.parameterDict)
-                self.parameterStudy.db.save(run.parameterDict,
+                assert hash_dict(d['params']) == hash_dict(run.parameter_dict)
+                self.parameterStudy.db.save(run.parameter_dict,
                                             result=d['result'], env=d['env'], changed_params=d['changed_params'])
                 return {}
 
@@ -215,7 +215,7 @@ class ParameterStudy:
 
         print("Listening to connections on {}:{}. Scenarios to simulate: {}".format(ip, port, len(filtered_runs)))
         server = HTTPServer((ip, port), ParameterStudyServer)
-        while len(ParameterStudyServer.currentlyRunning) > 0 or len(ParameterStudyServer.runs) > 0:
+        while len(ParameterStudyServer.currently_running) > 0 or len(ParameterStudyServer.runs) > 0:
             server.handle_request()
         server.handle_request()
 
@@ -268,12 +268,12 @@ class ParameterStudy:
             self.run_server(a.host, a.port)
 
         def client(a):
-            self.run_client(a.client_name, a.host, a.port, json.loads(a.parameterOverride))
+            self.run_client(a.client_name, a.host, a.port, json.loads(a.parameter_override))
 
         def local(a):
             if a.database:
                 self.db = Database(a.database)
-            self.run_scenarios_not_in_database(json.loads(a.parameterOverride))
+            self.run_scenarios_not_in_database(json.loads(a.parameter_override))
 
         parser = ArgumentParser()
         subparsers = parser.add_subparsers()
@@ -281,7 +281,7 @@ class ParameterStudy:
         local_parser = subparsers.add_parser('local', aliases=['l'],
                                              help="Run scenarios locally which are not yet in database", )
         local_parser.add_argument("-d", "--database", type=str, default="")
-        local_parser.add_argument("-P", "--parameterOverride", type=str, default="{}",
+        local_parser.add_argument("-P", "--parameter_override", type=str, default="{}",
                                   help="JSON: the parameter dictionary is updated with these parameters. Use this to "
                                        "set host specific options like GPU call parameters. Enclose in \" ")
         local_parser.set_defaults(func=local)
@@ -299,7 +299,7 @@ class ParameterStudy:
         client_parser.add_argument("-H", "--host", type=str, default="localhost", help="Host or IP to connect to")
         client_parser.add_argument("-n", "--client_name", type=str, default="{hostname}_{pid}",
                                    help="Unique client name, you can use {hostname} and {pid} as placeholder")
-        client_parser.add_argument("-P", "--parameterOverride", type=str, default="{}",
+        client_parser.add_argument("-P", "--parameter_override", type=str, default="{}",
                                    help="JSON: the parameter dictionary is updated with these parameters. Use this to "
                                         "set host specific options like GPU call parameters. Enclose in \" ")
         client_parser.set_defaults(func=client)
@@ -312,7 +312,7 @@ class ParameterStudy:
 
     def _filter_already_simulated(self, all_runs):
         """Removes all runs from the given list, that are already in the database"""
-        return [r for r in all_runs if not self.db.was_already_simulated(r.parameterDict)]
+        return [r for r in all_runs if not self.db.was_already_simulated(r.parameter_dict)]
 
     @staticmethod
     def _distribute_runs(all_runs, process, num_processes):
diff --git a/slicing.py b/slicing.py
index 389d1ddd35ace9ad1bfe0c8fa6af76d9124e4da3..2ebd985ff100fb60bc05bb82084f37cb71197588 100644
--- a/slicing.py
+++ b/slicing.py
@@ -94,9 +94,9 @@ def slice_from_direction(direction_name, dim, normal_offset=0, tangential_offset
 
     :param direction_name: name of direction as explained above
     :param dim: dimension of the returned slice (should be 2 or 3)
-    :param normal_offset: the offset in 'normal' direction: e.g. slice_from_direction('N',2, normalOffset=2)
+    :param normal_offset: the offset in 'normal' direction: e.g. slice_from_direction('N',2, normal_offset=2)
                          would return make_slice[:, -3]
-    :param tangential_offset: offset in the other directions: e.g. slice_from_direction('N',2, tangentialOffset=2)
+    :param tangential_offset: offset in the other directions: e.g. slice_from_direction('N',2, tangential_offset=2)
                          would return make_slice[2:-2, -1]
     """
     if tangential_offset == 0:
@@ -106,13 +106,13 @@ def slice_from_direction(direction_name, dim, normal_offset=0, tangential_offset
 
     normal_slice_high, normal_slice_low = -1 - normal_offset, normal_offset
 
-    for dimIdx, (lowName, highName) in enumerate([('W', 'E'), ('S', 'N'), ('B', 'T')]):
-        if lowName in direction_name:
-            assert highName not in direction_name, "Invalid direction name"
-            result[dimIdx] = normal_slice_low
-        if highName in direction_name:
-            assert lowName not in direction_name, "Invalid direction name"
-            result[dimIdx] = normal_slice_high
+    for dim_idx, (low_name, high_name) in enumerate([('W', 'E'), ('S', 'N'), ('B', 'T')]):
+        if low_name in direction_name:
+            assert high_name not in direction_name, "Invalid direction name"
+            result[dim_idx] = normal_slice_low
+        if high_name in direction_name:
+            assert low_name not in direction_name, "Invalid direction name"
+            result[dim_idx] = normal_slice_high
     return tuple(result)
 
 
@@ -147,20 +147,20 @@ def get_slice_before_ghost_layer(direction, ghost_layers=1, thickness=None, full
     :param ghost_layers: number of ghost layers
     :param thickness: thickness of the slice, defaults to number of ghost layers
     :param full_slice:  if true also the ghost cells in directions orthogonal to direction are contained in the
-                       returned slice. Example (d=W ): if fullSlice then also the ghost layer in N-S and T-B
+                       returned slice. Example (d=W ): if full_slice then also the ghost layer in N-S and T-B
                        are included, otherwise only inner cells are returned
     """
     if not thickness:
         thickness = ghost_layers
     full_slice_inc = ghost_layers if not full_slice else 0
     slices = []
-    for dirComponent in direction:
-        if dirComponent == -1:
+    for dir_component in direction:
+        if dir_component == -1:
             s = slice(ghost_layers, thickness + ghost_layers)
-        elif dirComponent == 0:
+        elif dir_component == 0:
             end = -full_slice_inc
             s = slice(full_slice_inc, end if end != 0 else None)
-        elif dirComponent == 1:
+        elif dir_component == 1:
             start = -thickness - ghost_layers
             end = -ghost_layers
             s = slice(start if start != 0 else None, end if end != 0 else None)
@@ -180,13 +180,13 @@ def get_ghost_region_slice(direction, ghost_layers=1, thickness=None, full_slice
     assert thickness <= ghost_layers
     full_slice_inc = ghost_layers if not full_slice else 0
     slices = []
-    for dirComponent in direction:
-        if dirComponent == -1:
+    for dir_component in direction:
+        if dir_component == -1:
             s = slice(ghost_layers - thickness, ghost_layers)
-        elif dirComponent == 0:
+        elif dir_component == 0:
             end = -full_slice_inc
             s = slice(full_slice_inc, end if end != 0 else None)
-        elif dirComponent == 1:
+        elif dir_component == 1:
             start = -ghost_layers
             end = - ghost_layers + thickness
             s = slice(start if start != 0 else None, end if end != 0 else None)
@@ -220,8 +220,8 @@ def get_periodic_boundary_functor(stencil, ghost_layers=1, thickness=None):
     src_dst_slice_tuples = get_periodic_boundary_src_dst_slices(stencil, ghost_layers, thickness)
 
     def functor(pdfs, **_):
-        for srcSlice, dstSlice in src_dst_slice_tuples:
-            pdfs[dstSlice] = pdfs[srcSlice]
+        for src_slice, dst_slice in src_dst_slice_tuples:
+            pdfs[dst_slice] = pdfs[src_slice]
 
     return functor
 
@@ -232,7 +232,7 @@ def slice_intersection(slice1, slice2):
 
     new_min = [max(s1.start, s2.start) for s1, s2 in zip(slice1, slice2)]
     new_max = [min(s1.stop,  s2.stop) for s1, s2 in zip(slice1, slice2)]
-    if any(maxP - minP < 0 for minP, maxP in zip(new_min, new_max)):
+    if any(max_p - min_p < 0 for min_p, max_p in zip(new_min, new_max)):
         return None
 
-    return [slice(minP, maxP, None) for minP, maxP in zip(new_min, new_max)]
+    return [slice(min_p, max_p, None) for min_p, max_p in zip(new_min, new_max)]
diff --git a/sympyextensions.py b/sympyextensions.py
index b4a9b6dba020c1233f48f94b3c6d882544ba4abd..634a04429a0da910141760cdd70e770d907dd6b5 100644
--- a/sympyextensions.py
+++ b/sympyextensions.py
@@ -189,10 +189,10 @@ def subs_additive(expr: sp.Expr, replacement: sp.Expr, subexpression: sp.Expr,
 
     Args:
         expr: input expression
-        replacement: expression that is inserted for subExpression (if found)
+        replacement: expression that is inserted for subexpression (if found)
         subexpression: expression to replace
         required_match_replacement:
-             * if float: the percentage of terms of the subExpression that has to be matched in order to replace
+             * if float: the percentage of terms of the subexpression that has to be matched in order to replace
              * if integer: the total number of terms that has to be matched in order to replace
              * None: is equal to integer 1
              * if both match parameters are given, both restrictions have to be fulfilled (i.e. logical AND)
@@ -229,11 +229,11 @@ def subs_additive(expr: sp.Expr, replacement: sp.Expr, subexpression: sp.Expr,
                 # find common factor
                 factors = defaultdict(lambda: 0)
                 skips = 0
-                for commonSymbol in subexpression_coefficient_dict.keys():
-                    if commonSymbol not in expr_coefficients:
+                for common_symbol in subexpression_coefficient_dict.keys():
+                    if common_symbol not in expr_coefficients:
                         skips += 1
                         continue
-                    factor = expr_coefficients[commonSymbol] / subexpression_coefficient_dict[commonSymbol]
+                    factor = expr_coefficients[common_symbol] / subexpression_coefficient_dict[common_symbol]
                     factors[sp.simplify(factor)] += 1
 
                 common_factor = max(factors.items(), key=operator.itemgetter(1))[0]
@@ -344,9 +344,9 @@ def remove_higher_order_terms(expr: sp.Expr, symbols: Sequence[sp.Symbol], order
     if type(expr) != Add:
         return expr
 
-    for sumTerm in expr.args:
-        if velocity_factors_in_product(sumTerm) <= order:
-            result += sumTerm
+    for sum_term in expr.args:
+        if velocity_factors_in_product(sum_term) <= order:
+            result += sum_term
     return result
 
 
@@ -364,7 +364,7 @@ def complete_the_square(expr: sp.Expr, symbol_to_complete: sp.Symbol,
         (n, s + b/(2*a))
 
     Returns:
-        (replacedExpr, tuple to pass to subs, such that old expr comes out again)
+        (replaced_expr, tuple to pass to subs, such that old expr comes out again)
     """
     p = sp.Poly(expr, symbol_to_complete)
     coefficients = p.all_coeffs()
@@ -436,8 +436,8 @@ def count_operations(term: Union[sp.Expr, List[sp.Expr]],
     if isinstance(term, Sequence):
         for element in term:
             r = count_operations(element, only_type)
-            for operationName in result.keys():
-                result[operationName] += r[operationName]
+            for operation_name in result.keys():
+                result[operation_name] += r[operation_name]
         return result
     elif isinstance(term, Assignment):
         term = term.rhs
diff --git a/timeloop.py b/timeloop.py
index 9a562d27e1c6baec6867b1c8919f8e464f35e5c1..cc7a2fe5440fbf63800a3a08e9336a912eaf2a79 100644
--- a/timeloop.py
+++ b/timeloop.py
@@ -7,7 +7,7 @@ class TimeLoop:
         self._postRunFunctions = []
         self._timeStepFunctions = []
         self._functionNames = []
-        self.timeStepsRun = 0
+        self.time_steps_run = 0
 
     def add_step(self, step_obj):
         if hasattr(step_obj, 'pre_run'):
@@ -62,8 +62,8 @@ class TimeLoop:
 
         :param time_for_benchmark: number of seconds benchmark should take
         :param init_time_steps: number of time steps run initially for warm up, to get arrays into cache etc
-        :param number_of_time_steps_for_estimation: time steps run before real benchmarks, to determine number of time steps
-                                               that approximately take 'timeForBenchmark'
+        :param number_of_time_steps_for_estimation: time steps run before real benchmarks, to determine number of time
+                                                    steps that approximately take 'time_for_benchmark'
         """
         # Run a few time step to get first estimate
         duration_of_time_step = self.benchmark_run(number_of_time_steps_for_estimation, init_time_steps)
@@ -84,7 +84,7 @@ class TimeLoop:
     def time_step(self):
         for f in self._timeStepFunctions:
             f()
-        self.timeStepsRun += 1
+        self.time_steps_run += 1
 
 
 
diff --git a/transformations/stage2.py b/transformations/stage2.py
index 6d47400e2543d986b2335744f6c9b9f887d72bc2..f5664e1bfb430f7b1787b5a2939959037a5514cd 100644
--- a/transformations/stage2.py
+++ b/transformations/stage2.py
@@ -17,8 +17,8 @@ def insert_casts(node):
         :return: args with possible casts
         """
         casted_args = []
-        for argument, dataType in zipped_args_types:
-            if dataType.numpy_dtype != target_dtype.numpy_dtype:  # ignoring const
+        for argument, data_type in zipped_args_types:
+            if data_type.numpy_dtype != target_dtype.numpy_dtype:  # ignoring const
                 casted_args.append(cast_func(argument, target_dtype))
             else:
                 casted_args.append(argument)
@@ -32,13 +32,13 @@ def insert_casts(node):
         """
         pointer = None
         new_args = []
-        for arg, dataType in expr_args:
-            if dataType.func is PointerType:
+        for arg, data_type in expr_args:
+            if data_type.func is PointerType:
                 assert pointer is None
                 pointer = arg
-        for arg, dataType in expr_args:
+        for arg, data_type in expr_args:
             if arg != pointer:
-                assert dataType.is_int() or dataType.is_uint()
+                assert data_type.is_int() or data_type.is_uint()
                 new_args.append(arg)
         new_args = sp.Add(*new_args) if len(new_args) > 0 else new_args
         return pointer_arithmetic_func(pointer, new_args)
@@ -71,12 +71,12 @@ def insert_casts(node):
     elif node.func is ast.ResolvedFieldAccess:
         return node
     elif node.func is ast.Block:
-        for oldArg, newArg in zip(node.args, args):
-            node.replace(oldArg, newArg)
+        for old_arg, new_arg in zip(node.args, args):
+            node.replace(old_arg, new_arg)
         return node
     elif node.func is ast.LoopOverCoordinate:
-        for oldArg, newArg in zip(node.args, args):
-            node.replace(oldArg, newArg)
+        for old_arg, new_arg in zip(node.args, args):
+            node.replace(old_arg, new_arg)
         return node
     elif node.func is sp.Piecewise:
         expressions = [expr for (expr, _) in args]
diff --git a/transformations/transformations.py b/transformations/transformations.py
index b0d406d78f6576ce6fc311f389b18c1f0a60e3df..9e0fb3f58b7139a2df7eff2ae5748dd5fb022d47 100644
--- a/transformations/transformations.py
+++ b/transformations/transformations.py
@@ -85,24 +85,24 @@ def make_loop_over_domain(body, function_name, iteration_slice=None, ghost_layer
     loop_strides = []
     loop_vars = []
     current_body = body
-    for i, loopCoordinate in enumerate(reversed(loop_order)):
+    for i, loop_coordinate in enumerate(reversed(loop_order)):
         if iteration_slice is None:
-            begin = ghost_layers[loopCoordinate][0]
-            end = shape[loopCoordinate] - ghost_layers[loopCoordinate][1]
-            new_loop = ast.LoopOverCoordinate(current_body, loopCoordinate, begin, end, 1)
+            begin = ghost_layers[loop_coordinate][0]
+            end = shape[loop_coordinate] - ghost_layers[loop_coordinate][1]
+            new_loop = ast.LoopOverCoordinate(current_body, loop_coordinate, begin, end, 1)
             current_body = ast.Block([new_loop])
             loop_strides.append(get_loop_stride(begin, end, 1))
             loop_vars.append(new_loop.loop_counter_symbol)
         else:
-            slice_component = iteration_slice[loopCoordinate]
+            slice_component = iteration_slice[loop_coordinate]
             if type(slice_component) is slice:
                 sc = slice_component
-                new_loop = ast.LoopOverCoordinate(current_body, loopCoordinate, sc.start, sc.stop, sc.step)
+                new_loop = ast.LoopOverCoordinate(current_body, loop_coordinate, sc.start, sc.stop, sc.step)
                 current_body = ast.Block([new_loop])
                 loop_strides.append(get_loop_stride(sc.start, sc.stop, sc.step))
                 loop_vars.append(new_loop.loop_counter_symbol)
             else:
-                assignment = ast.SympyAssignment(ast.LoopOverCoordinate.get_loop_counter_symbol(loopCoordinate),
+                assignment = ast.SympyAssignment(ast.LoopOverCoordinate.get_loop_counter_symbol(loop_coordinate),
                                                  sp.sympify(slice_component))
                 current_body.insert_front(assignment)
 
@@ -126,32 +126,32 @@ def create_intermediate_base_pointer(field_access, coordinates, previous_ptr):
     Example:
         >>> field = Field.create_generic('myfield', spatial_dimensions=2, index_dimensions=1)
         >>> x, y = sp.symbols("x y")
-        >>> prevPointer = TypedSymbol("ptr", "double")
-        >>> create_intermediate_base_pointer(field[1,-2](5), {0: x}, prevPointer)
+        >>> prev_pointer = TypedSymbol("ptr", "double")
+        >>> create_intermediate_base_pointer(field[1,-2](5), {0: x}, prev_pointer)
         (ptr_E, x*fstride_myfield[0] + fstride_myfield[0])
-        >>> create_intermediate_base_pointer(field[1,-2](5), {0: x, 1 : y }, prevPointer)
+        >>> create_intermediate_base_pointer(field[1,-2](5), {0: x, 1 : y }, prev_pointer)
         (ptr_E_2S, x*fstride_myfield[0] + y*fstride_myfield[1] + fstride_myfield[0] - 2*fstride_myfield[1])
     """
     field = field_access.field
     offset = 0
     name = ""
     list_to_hash = []
-    for coordinateId, coordinateValue in coordinates.items():
-        offset += field.strides[coordinateId] * coordinateValue
+    for coordinate_id, coordinate_value in coordinates.items():
+        offset += field.strides[coordinate_id] * coordinate_value
 
-        if coordinateId < field.spatial_dimensions:
-            offset += field.strides[coordinateId] * field_access.offsets[coordinateId]
-            if type(field_access.offsets[coordinateId]) is int:
-                offset_comp = offset_component_to_direction_string(coordinateId, field_access.offsets[coordinateId])
+        if coordinate_id < field.spatial_dimensions:
+            offset += field.strides[coordinate_id] * field_access.offsets[coordinate_id]
+            if type(field_access.offsets[coordinate_id]) is int:
+                offset_comp = offset_component_to_direction_string(coordinate_id, field_access.offsets[coordinate_id])
                 name += "_"
                 name += offset_comp if offset_comp else "C"
             else:
-                list_to_hash.append(field_access.offsets[coordinateId])
+                list_to_hash.append(field_access.offsets[coordinate_id])
         else:
-            if type(coordinateValue) is int:
-                name += "_%d" % (coordinateValue,)
+            if type(coordinate_value) is int:
+                name += "_%d" % (coordinate_value,)
             else:
-                list_to_hash.append(coordinateValue)
+                list_to_hash.append(coordinate_value)
 
     if len(list_to_hash) > 0:
         name += "%0.6X" % (abs(hash(tuple(list_to_hash))))
@@ -188,7 +188,7 @@ def parse_base_pointer_info(base_pointer_specification, loop_order, field):
     result = []
     specified_coordinates = set()
     loop_order = list(reversed(loop_order))
-    for specGroup in base_pointer_specification:
+    for spec_group in base_pointer_specification:
         new_group = []
 
         def add_new_element(elem):
@@ -198,7 +198,7 @@ def parse_base_pointer_info(base_pointer_specification, loop_order, field):
             if elem in specified_coordinates:
                 raise ValueError("Coordinate %d specified two times" % (elem,))
             specified_coordinates.add(elem)
-        for element in specGroup:
+        for element in spec_group:
             if type(element) is int:
                 add_new_element(element)
             elif element.startswith("spatial"):
@@ -253,20 +253,20 @@ def substitute_array_accesses_with_constants(ast_node):
 
         constants_definitions = []
         constant_substitutions = {}
-        for indexedExpr in indexed_expressions:
-            base, idx = indexedExpr.args
+        for indexed_expr in indexed_expressions:
+            base, idx = indexed_expr.args
             typed_symbol = base.args[0]
             base_type = deepcopy(get_base_type(typed_symbol.dtype))
             base_type.const = False
             constant_replacing_indexed = TypedSymbol(typed_symbol.name + str(idx), base_type)
-            constants_definitions.append(ast.SympyAssignment(constant_replacing_indexed, indexedExpr))
-            constant_substitutions[indexedExpr] = constant_replacing_indexed
+            constants_definitions.append(ast.SympyAssignment(constant_replacing_indexed, indexed_expr))
+            constant_substitutions[indexed_expr] = constant_replacing_indexed
         constants_definitions.sort(key=lambda e: e.lhs.name)
 
         already_defined = parent_block.symbols_defined
-        for newAssignment in constants_definitions:
-            if newAssignment.lhs not in already_defined:
-                parent_block.insert_before(newAssignment, ast_node)
+        for new_assignment in constants_definitions:
+            if new_assignment.lhs not in already_defined:
+                parent_block.insert_before(new_assignment, ast_node)
 
         return expr.subs(constant_substitutions)
 
@@ -449,7 +449,7 @@ def move_constants_before_loop(ast_node):
                 last_block_child = prev_element
 
             if isinstance(element, ast.Conditional):
-                critical_symbols = element.conditionExpr.atoms(sp.Symbol)
+                critical_symbols = element.condition_expr.atoms(sp.Symbol)
             else:
                 critical_symbols = element.symbols_defined
             if node.undefined_symbols.intersection(critical_symbols):
@@ -515,9 +515,9 @@ def split_inner_loop(ast_node: ast.Node, symbol_groups):
     assignment_map = OrderedDict((a.lhs, a) for a in inner_loop.body.args)
 
     assignment_groups = []
-    for symbolGroup in symbol_groups:
+    for symbol_group in symbol_groups:
         # get all dependent symbols
-        symbols_to_process = list(symbolGroup)
+        symbols_to_process = list(symbol_group)
         symbols_resolved = set()
         while symbols_to_process:
             s = symbols_to_process.pop()
@@ -525,12 +525,12 @@ def split_inner_loop(ast_node: ast.Node, symbol_groups):
                 continue
 
             if s in assignment_map:  # if there is no assignment inside the loop body it is independent already
-                for newSymbol in assignment_map[s].rhs.atoms(sp.Symbol):
-                    if type(newSymbol) is not Field.Access and newSymbol not in symbols_with_temporary_array:
-                        symbols_to_process.append(newSymbol)
+                for new_symbol in assignment_map[s].rhs.atoms(sp.Symbol):
+                    if type(new_symbol) is not Field.Access and new_symbol not in symbols_with_temporary_array:
+                        symbols_to_process.append(new_symbol)
             symbols_resolved.add(s)
 
-        for symbol in symbolGroup:
+        for symbol in symbol_group:
             if type(symbol) is not Field.Access:
                 assert type(symbol) is TypedSymbol
                 new_ts = TypedSymbol(symbol.name, PointerType(symbol.dtype))
@@ -540,7 +540,7 @@ def split_inner_loop(ast_node: ast.Node, symbol_groups):
         for assignment in inner_loop.body.args:
             if assignment.lhs in symbols_resolved:
                 new_rhs = assignment.rhs.subs(symbols_with_temporary_array.items())
-                if type(assignment.lhs) is not Field.Access and assignment.lhs in symbolGroup:
+                if type(assignment.lhs) is not Field.Access and assignment.lhs in symbol_group:
                     assert type(assignment.lhs) is TypedSymbol
                     new_ts = TypedSymbol(assignment.lhs.name, PointerType(assignment.lhs.dtype))
                     new_lhs = IndexedBase(new_ts, shape=(1,))[inner_loop.loop_counter_symbol]
@@ -552,30 +552,30 @@ def split_inner_loop(ast_node: ast.Node, symbol_groups):
     new_loops = [inner_loop.new_loop_with_different_body(ast.Block(group)) for group in assignment_groups]
     inner_loop.parent.replace(inner_loop, ast.Block(new_loops))
 
-    for tmpArray in symbols_with_temporary_array:
-        tmp_array_pointer = TypedSymbol(tmpArray.name, PointerType(tmpArray.dtype))
+    for tmp_array in symbols_with_temporary_array:
+        tmp_array_pointer = TypedSymbol(tmp_array.name, PointerType(tmp_array.dtype))
         outer_loop.parent.insert_front(ast.TemporaryMemoryAllocation(tmp_array_pointer, inner_loop.stop))
         outer_loop.parent.append(ast.TemporaryMemoryFree(tmp_array_pointer))
 
 
 def cut_loop(loop_node, cutting_points):
     """Cuts loop at given cutting points, that means one loop is transformed into len(cuttingPoints)+1 new loops
-    that range from  oldBegin to cuttingPoint[1], ..., cuttingPoint[-1] to oldEnd"""
+    that range from  old_begin to cutting_points[1], ..., cutting_points[-1] to old_end"""
     if loop_node.step != 1:
         raise NotImplementedError("Can only split loops that have a step of 1")
     new_loops = []
     new_start = loop_node.start
     cutting_points = list(cutting_points) + [loop_node.stop]
-    for newEnd in cutting_points:
-        if newEnd - new_start == 1:
+    for new_end in cutting_points:
+        if new_end - new_start == 1:
             new_body = deepcopy(loop_node.body)
             new_body.subs({loop_node.loop_counter_symbol: new_start})
             new_loops.append(new_body)
         else:
-            new_loop = ast.LoopOverCoordinate(deepcopy(loop_node.body), loop_node.coordinateToLoopOver,
-                                              new_start, newEnd, loop_node.step)
+            new_loop = ast.LoopOverCoordinate(deepcopy(loop_node.body), loop_node.coordinate_to_loop_over,
+                                              new_start, new_end, loop_node.step)
             new_loops.append(new_loop)
-        new_start = newEnd
+        new_start = new_end
     loop_node.parent.replace(loop_node, new_loops)
 
 
@@ -594,7 +594,7 @@ def is_condition_necessary(condition, pre_condition, symbol):
     def to_dnf_list(expr):
         result = to_dnf(expr)
         if isinstance(result, sp.Or):
-            return [orTerm.args for orTerm in result.args]
+            return [or_term.args for or_term in result.args]
         elif isinstance(result, sp.And):
             return [result.args]
         else:
@@ -626,8 +626,8 @@ def simplify_boolean_expression(expr, single_variable_ranges):
                         return sp.true
             return e
         else:
-            newArgs = [visit(a) for a in e.args]
-            return e.func(*newArgs) if newArgs else e
+            new_args = [visit(a) for a in e.args]
+            return e.func(*new_args) if new_args else e
 
     return visit(expr)
 
@@ -640,14 +640,14 @@ def simplify_conditionals(node, loop_conditionals={}):
         simplify_conditionals(node.body)
         del loop_conditionals[ctr_sym]
     elif isinstance(node, ast.Conditional):
-        node.conditionExpr = simplify_boolean_expression(node.conditionExpr, loop_conditionals)
-        simplify_conditionals(node.trueBlock)
-        if node.falseBlock:
-            simplify_conditionals(node.falseBlock)
-        if node.conditionExpr == sp.true:
-            node.parent.replace(node, [node.trueBlock])
-        if node.conditionExpr == sp.false:
-            node.parent.replace(node, [node.falseBlock] if node.falseBlock else [])
+        node.condition_expr = simplify_boolean_expression(node.condition_expr, loop_conditionals)
+        simplify_conditionals(node.true_block)
+        if node.false_block:
+            simplify_conditionals(node.false_block)
+        if node.condition_expr == sp.true:
+            node.parent.replace(node, [node.true_block])
+        if node.condition_expr == sp.false:
+            node.parent.replace(node, [node.false_block] if node.false_block else [])
     elif isinstance(node, ast.Block):
         for a in list(node.args):
             simplify_conditionals(a)
@@ -728,9 +728,9 @@ def type_all_equations(eqs, type_for_symbol):
             new_rhs = process_rhs(obj.rhs)
             return ast.SympyAssignment(new_lhs, new_rhs)
         elif isinstance(obj, ast.Conditional):
-            false_block = None if obj.falseBlock is None else visit(obj.falseBlock)
-            return ast.Conditional(process_rhs(obj.conditionExpr),
-                                   true_block=visit(obj.trueBlock), false_block=false_block)
+            false_block = None if obj.false_block is None else visit(obj.false_block)
+            return ast.Conditional(process_rhs(obj.condition_expr),
+                                   true_block=visit(obj.true_block), false_block=false_block)
         elif isinstance(obj, ast.Block):
             return ast.Block([visit(e) for e in obj.args])
         else:
@@ -807,5 +807,5 @@ def get_loop_hierarchy(ast_node):
     while node is not None:
         node = get_next_parent_of_type(node, ast.LoopOverCoordinate)
         if node:
-            result.append(node.coordinateToLoopOver)
+            result.append(node.coordinate_to_loop_over)
     return reversed(result)
diff --git a/vectorization.py b/vectorization.py
index 9311ecb7e4022f31409e4eb3e80a215a9a3defd1..d8220a36db8285bca56b55821a5573187c7f61d5 100644
--- a/vectorization.py
+++ b/vectorization.py
@@ -19,23 +19,23 @@ def vectorize_inner_loops_and_adapt_load_stores(ast_node, vector_width=4):
     """
     inner_loops = [n for n in ast_node.atoms(ast.LoopOverCoordinate) if n.is_innermost_loop]
 
-    for loopNode in inner_loops:
-        loop_range = loopNode.stop - loopNode.start
+    for loop_node in inner_loops:
+        loop_range = loop_node.stop - loop_node.start
 
         # Check restrictions
         if isinstance(loop_range, sp.Expr) and not loop_range.is_number:
             warnings.warn("Currently only loops with fixed ranges can be vectorized - skipping loop")
             continue
-        if loop_range % vector_width != 0 or loopNode.step != 1:
+        if loop_range % vector_width != 0 or loop_node.step != 1:
             warnings.warn("Currently only loops with loop bounds that are multiples "
                           "of vectorization width can be vectorized - skipping loop")
             continue
 
         # Find all array accesses (indexed) that depend on the loop counter as offset
-        loop_counter_symbol = ast.LoopOverCoordinate.get_loop_counter_symbol(loopNode.coordinateToLoopOver)
+        loop_counter_symbol = ast.LoopOverCoordinate.get_loop_counter_symbol(loop_node.coordinate_to_loop_over)
         substitutions = {}
         successful = True
-        for indexed in loopNode.atoms(sp.Indexed):
+        for indexed in loop_node.atoms(sp.Indexed):
             base, index = indexed.args
             if loop_counter_symbol in index.atoms(sp.Symbol):
                 loop_counter_is_offset = loop_counter_symbol not in (index - loop_counter_symbol).atoms()
@@ -49,8 +49,8 @@ def vectorize_inner_loops_and_adapt_load_stores(ast_node, vector_width=4):
             warnings.warn("Could not vectorize loop because of non-consecutive memory access")
             continue
 
-        loopNode.step = vector_width
-        loopNode.subs(substitutions)
+        loop_node.step = vector_width
+        loop_node.subs(substitutions)
 
 
 def insert_vector_casts(ast_node):
diff --git a/vtk.py b/vtk.py
index da18c7934b5110cff27027a6b34770a3fc7a219d..c823e15ae5fadd1082265421826719e597756897 100644
--- a/vtk.py
+++ b/vtk.py
@@ -27,9 +27,9 @@ def image_to_vtk(path, cell_data, origin=(0.0, 0.0, 0.0), spacing=(1.0, 1.0, 1.0
         >>> with TemporaryDirectory() as tmp_dir:
         ...     path = os.path.join(tmp_dir, 'out')
         ...     size = (20, 20, 20)
-        ...     resFile = image_to_vtk(path, cell_data={'someScalarField': np.zeros(size),
-        ...                                             'someVectorField': (np.ones(size), np.ones(size), np.ones(size))
-        ...                                             })
+        ...     res_file = image_to_vtk(path, cell_data={'scalar': np.zeros(size),
+        ...                                              'vector': (np.ones(size), np.ones(size), np.ones(size))
+        ...                                              })
     """
 
     # Extract dimensions